Skip to content
Commits on Source (2)
......@@ -15,8 +15,7 @@
#
# Contact: common-workflow-language@googlegroups.com
# make pycodestyle to check for basic Python code compliance
# make autopep8 to fix most pep8 errors
# make format to fix most python formatting errors
# make pylint to check Python code for enhanced compliance including naming
# and documentation
# make coverage-report to check coverage of the python scripts by the tests
......@@ -27,7 +26,8 @@ PACKAGE=schema-salad
# `SHELL=bash` doesn't work for some, so don't use BASH-isms like
# `[[` conditional expressions.
PYSOURCES=$(wildcard ${MODULE}/**.py tests/*.py) setup.py
DEVPKGS=pycodestyle diff_cover autopep8 pylint coverage pep257 pytest-xdist flake8
DEVPKGS=diff_cover black pylint coverage pep257 pytest-xdist \
flake8 flake8-bugbear
COVBASE=coverage run --branch --append --source=${MODULE} \
--omit=schema_salad/tests/*
......@@ -73,19 +73,6 @@ clean: FORCE
sort_imports:
isort ${MODULE}/*.py ${MODULE}/tests/*.py setup.py
pep8: pycodestyle
## pycodestyle : check Python code style
pycodestyle: $(PYSOURCES)
pycodestyle --exclude=_version.py --show-source --show-pep8 $^ || true
pep8_report.txt: pycodestyle_report.txt
pycodestyle_report.txt: $(PYSOURCES)
pycodestyle --exclude=_version.py $^ > $@ || true
diff_pep8_report: diff_pycodestyle_report
diff_pycodestyle_report: pycodestyle_report.txt
diff-quality --violations=pycodestyle $^
pep257: pydocstyle
## pydocstyle : check Python code style
pydocstyle: $(PYSOURCES)
......@@ -97,14 +84,9 @@ pydocstyle_report.txt: $(PYSOURCES)
diff_pydocstyle_report: pydocstyle_report.txt
diff-quality --violations=pycodestyle --fail-under=100 $^
## autopep8 : fix most Python code indentation and formatting
autopep8: $(PYSOURCES)
autopep8 --recursive --in-place --ignore E309 $^
# A command to automatically run astyle and autopep8 on appropriate files
## format : check/fix all code indentation and formatting (runs autopep8)
format: autopep8
# Do nothing
## format : check/fix all code indentation and formatting (runs black)
format:
black --target-version py27 schema_salad
## pylint : run static code analysis on Python code
pylint: $(PYSOURCES)
......@@ -180,18 +162,24 @@ list-author-emails:
@git log --format='%aN,%aE' | sort -u | grep -v 'root'
mypy2: ${PYSOURCES}
rm -Rf typeshed/2and3/ruamel/yaml
ln -s $(shell python -c 'from __future__ import print_function; import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))') \
typeshed/2and3/ruamel/
MYPYPATH=$MYPYPATH:typeshed/2.7:typeshed/2and3 mypy --py2 --disallow-untyped-calls \
if ! test -f $(shell python -c 'from __future__ import print_function; import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))')/py.typed ; \
then \
rm -Rf typeshed/2and3/ruamel/yaml ; \
ln -s $(shell python -c 'from __future__ import print_function; import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))') \
typeshed/2and3/ruamel/ ; \
fi # if minimally required ruamel.yaml version is 0.15.99 or greater, than the above can be removed
MYPYPATH=$$MYPYPATH:typeshed/2.7:typeshed/2and3 mypy --py2 --disallow-untyped-calls \
--warn-redundant-casts \
schema_salad
mypy3: ${PYSOURCES}
rm -Rf typeshed/2and3/ruamel/yaml
ln -s $(shell python -c 'from __future__ import print_function; import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))') \
typeshed/2and3/ruamel/
MYPYPATH=$MYPYPATH:typeshed/3:typeshed/2and3 mypy --disallow-untyped-calls \
if ! test -f $(shell python -c 'from __future__ import print_function; import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))')/py.typed ; \
then \
rm -Rf typeshed/2and3/ruamel/yaml ; \
ln -s $(shell python -c 'from __future__ import print_function; import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))') \
typeshed/2and3/ruamel/ ; \
fi # if minimally required ruamel.yaml version is 0.15.99 or greater, than the above can be removed
MYPYPATH=$$MYPYPATH:typeshed/3:typeshed/2and3 mypy --disallow-untyped-calls \
--warn-redundant-casts \
schema_salad
......@@ -200,7 +188,7 @@ jenkins: FORCE
. env/bin/activate ; \
pip install -U setuptools pip wheel ; \
${MAKE} install-dep coverage.html coverage.xml pydocstyle_report.txt \
sloccount.sc pycodestyle_report.txt pylint_report.txt
sloccount.sc pylint_report.txt
if ! test -d env3 ; then virtualenv -p python3 env3 ; fi
. env3/bin/activate ; \
pip install -U setuptools pip wheel ; \
......
Metadata-Version: 1.1
Name: schema-salad
Version: 4.5.20190621200723
Version: 4.5.20190815125611
Summary: Schema Annotations for Linked Avro Data (SALAD)
Home-page: https://github.com/common-workflow-language/schema_salad
Author: Common workflow language working group
......@@ -51,8 +51,14 @@ Description: |Linux Build Status| |Windows Build status| |Code coverage| |CII Be
$ schema-salad-tool
usage: schema-salad-tool [-h] [--rdf-serializer RDF_SERIALIZER]
[--print-jsonld-context | --print-rdfs | --print-avro | --print-rdf | --print-pre | --print-index | --print-metadata | --print-inheritance-dot | --print-fieldrefs-dot | --codegen language | --print-oneline]
[--strict | --non-strict] [--verbose | --quiet | --debug] [--version]
[--print-jsonld-context | --print-rdfs | --print-avro
| --print-rdf | --print-pre | --print-index
| --print-metadata | --print-inheritance-dot
| --print-fieldrefs-dot | --codegen language
| --print-oneline]
[--strict | --non-strict] [--verbose | --quiet
| --debug]
[--version]
[schema] [document]
$ python
......@@ -82,7 +88,8 @@ Description: |Linux Build Status| |Windows Build status| |Code coverage| |CII Be
$ schema-salad-tool --codegen=python myschema.yml > myschema.py
Display inheritance relationship between classes as a graphviz 'dot' file and render as SVG::
Display inheritance relationship between classes as a graphviz 'dot' file and
render as SVG::
$ schema-salad-tool --print-inheritance-dot myschema.yml | dot -Tsvg > myschema.svg
......@@ -90,7 +97,8 @@ Description: |Linux Build Status| |Windows Build status| |Code coverage| |CII Be
Quick Start
-----------
Let's say you have a 'basket' record that can contain items measured either by weight or by count. Here's an example::
Let's say you have a 'basket' record that can contain items measured either by
weight or by count. Here's an example::
basket:
- product: bananas
......@@ -159,7 +167,8 @@ Description: |Linux Build Status| |Windows Build status| |Code coverage| |CII Be
type: array
items: Product
You can check the schema and document in schema_salad/tests/basket_schema.yml and schema_salad/tests/basket.yml::
You can check the schema and document in schema_salad/tests/basket_schema.yml
and schema_salad/tests/basket.yml::
$ schema-salad-tool basket_schema.yml basket.yml
Document `basket.yml` is valid
......@@ -225,3 +234,5 @@ Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Typing :: Typed
......@@ -42,8 +42,14 @@ Schema salad can be used as a command line tool or imported as a Python module::
$ schema-salad-tool
usage: schema-salad-tool [-h] [--rdf-serializer RDF_SERIALIZER]
[--print-jsonld-context | --print-rdfs | --print-avro | --print-rdf | --print-pre | --print-index | --print-metadata | --print-inheritance-dot | --print-fieldrefs-dot | --codegen language | --print-oneline]
[--strict | --non-strict] [--verbose | --quiet | --debug] [--version]
[--print-jsonld-context | --print-rdfs | --print-avro
| --print-rdf | --print-pre | --print-index
| --print-metadata | --print-inheritance-dot
| --print-fieldrefs-dot | --codegen language
| --print-oneline]
[--strict | --non-strict] [--verbose | --quiet
| --debug]
[--version]
[schema] [document]
$ python
......@@ -73,7 +79,8 @@ Generate Python classes for loading/generating documents described by the schema
$ schema-salad-tool --codegen=python myschema.yml > myschema.py
Display inheritance relationship between classes as a graphviz 'dot' file and render as SVG::
Display inheritance relationship between classes as a graphviz 'dot' file and
render as SVG::
$ schema-salad-tool --print-inheritance-dot myschema.yml | dot -Tsvg > myschema.svg
......@@ -81,7 +88,8 @@ Display inheritance relationship between classes as a graphviz 'dot' file and re
Quick Start
-----------
Let's say you have a 'basket' record that can contain items measured either by weight or by count. Here's an example::
Let's say you have a 'basket' record that can contain items measured either by
weight or by count. Here's an example::
basket:
- product: bananas
......@@ -150,7 +158,8 @@ Here is an example schema to do that::
type: array
items: Product
You can check the schema and document in schema_salad/tests/basket_schema.yml and schema_salad/tests/basket.yml::
You can check the schema and document in schema_salad/tests/basket_schema.yml
and schema_salad/tests/basket.yml::
$ schema-salad-tool basket_schema.yml basket.yml
Document `basket.yml` is valid
......
import subprocess
import time
import pkg_resources
from setuptools.command.egg_info import egg_info
SETUPTOOLS_VER = pkg_resources.get_distribution(
"setuptools").version.split('.')
RECENT_SETUPTOOLS = int(SETUPTOOLS_VER[0]) > 40 or (
int(SETUPTOOLS_VER[0]) == 40 and int(SETUPTOOLS_VER[1]) > 0) or (
int(SETUPTOOLS_VER[0]) == 40 and int(SETUPTOOLS_VER[1]) == 0 and
int(SETUPTOOLS_VER[2]) > 0)
class EggInfoFromGit(egg_info):
"""Tag the build with git commit timestamp.
......@@ -10,6 +17,7 @@ class EggInfoFromGit(egg_info):
If a build tag has already been set (e.g., "egg_info -b", building
from source package), leave it alone.
"""
def git_timestamp_tag(self):
gitinfo = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
......@@ -20,7 +28,9 @@ class EggInfoFromGit(egg_info):
if self.tag_build is None:
try:
self.tag_build = self.git_timestamp_tag()
except (subprocess.CalledProcessError, OSError):
except subprocess.CalledProcessError:
pass
return egg_info.tags(self)
vtags = property(tags)
if RECENT_SETUPTOOLS:
vtags = property(tags)
typing==3.7.4 ; python_version < "3.5"
ruamel.yaml>=0.12.4, <= 0.15.96
ruamel.yaml>=0.12.4, <= 0.16
rdflib==4.2.2
rdflib-jsonld==0.4.0
mistune>=0.8.1,<0.9
......
Metadata-Version: 1.1
Name: schema-salad
Version: 4.5.20190621200723
Version: 4.5.20190815125611
Summary: Schema Annotations for Linked Avro Data (SALAD)
Home-page: https://github.com/common-workflow-language/schema_salad
Author: Common workflow language working group
......@@ -51,8 +51,14 @@ Description: |Linux Build Status| |Windows Build status| |Code coverage| |CII Be
$ schema-salad-tool
usage: schema-salad-tool [-h] [--rdf-serializer RDF_SERIALIZER]
[--print-jsonld-context | --print-rdfs | --print-avro | --print-rdf | --print-pre | --print-index | --print-metadata | --print-inheritance-dot | --print-fieldrefs-dot | --codegen language | --print-oneline]
[--strict | --non-strict] [--verbose | --quiet | --debug] [--version]
[--print-jsonld-context | --print-rdfs | --print-avro
| --print-rdf | --print-pre | --print-index
| --print-metadata | --print-inheritance-dot
| --print-fieldrefs-dot | --codegen language
| --print-oneline]
[--strict | --non-strict] [--verbose | --quiet
| --debug]
[--version]
[schema] [document]
$ python
......@@ -82,7 +88,8 @@ Description: |Linux Build Status| |Windows Build status| |Code coverage| |CII Be
$ schema-salad-tool --codegen=python myschema.yml > myschema.py
Display inheritance relationship between classes as a graphviz 'dot' file and render as SVG::
Display inheritance relationship between classes as a graphviz 'dot' file and
render as SVG::
$ schema-salad-tool --print-inheritance-dot myschema.yml | dot -Tsvg > myschema.svg
......@@ -90,7 +97,8 @@ Description: |Linux Build Status| |Windows Build status| |Code coverage| |CII Be
Quick Start
-----------
Let's say you have a 'basket' record that can contain items measured either by weight or by count. Here's an example::
Let's say you have a 'basket' record that can contain items measured either by
weight or by count. Here's an example::
basket:
- product: bananas
......@@ -159,7 +167,8 @@ Description: |Linux Build Status| |Windows Build status| |Code coverage| |CII Be
type: array
items: Product
You can check the schema and document in schema_salad/tests/basket_schema.yml and schema_salad/tests/basket.yml::
You can check the schema and document in schema_salad/tests/basket_schema.yml
and schema_salad/tests/basket.yml::
$ schema-salad-tool basket_schema.yml basket.yml
Document `basket.yml` is valid
......@@ -225,3 +234,5 @@ Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Typing :: Typed
setuptools
requests>=1.0
ruamel.yaml<=0.15.96,>=0.12.4
ruamel.yaml<=0.16,>=0.12.4
rdflib<4.3.0,>=4.2.2
rdflib-jsonld<0.5.0,>=0.3.0
mistune<0.9,>=0.8.1
......
from __future__ import absolute_import
import logging
import os
import sys
import typing
import threading
import six
import logging
from .utils import onWindows
__author__ = 'peter.amstutz@curoverse.com'
__author__ = "peter.amstutz@curoverse.com"
_logger = logging.getLogger("salad")
_logger.addHandler(logging.StreamHandler())
......
......@@ -14,4 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['schema']
__all__ = ["schema"]
This diff is collapsed.
"""Generate langauge specific loaders for a particular SALAD schema."""
import sys
from typing import Any, Dict, List, MutableMapping, Optional, Union
from typing import Any, Dict, List, MutableMapping, Optional
from typing_extensions import Text # pylint: disable=unused-import
# move to a regular typing import when Python 3.3-3.6 is no longer supported
from . import schema
from .codegen_base import CodeGenBase
......@@ -13,12 +12,15 @@ from .ref_resolver import Loader # pylint: disable=unused-import
from .schema import shortname
from .utils import aslist
# move to a regular typing import when Python 3.3-3.6 is no longer supported
def codegen(lang, # type: str
i, # type: List[Dict[Text, Any]]
schema_metadata, # type: Dict[Text, Any]
loader # type: Loader
): # type: (...) -> None
def codegen(
lang, # type: str
i, # type: List[Dict[Text, Any]]
schema_metadata, # type: Dict[Text, Any]
loader, # type: Loader
): # type: (...) -> None
"""Generate classes with loaders for the given Schema Salad description."""
j = schema.extend_and_specialize(i, loader)
......@@ -29,7 +31,7 @@ def codegen(lang, # type: str
elif lang == "java":
gen = JavaCodeGen(schema_metadata.get("$base", schema_metadata.get("id")))
else:
raise Exception("Unsupported code generation language '%s'" % lang)
raise Exception("Unsupported code generation language '{}'".format(lang))
assert gen is not None
gen.prologue()
......@@ -59,16 +61,26 @@ def codegen(lang, # type: str
if field.get("jsonldPredicate") == "@id":
idfield = field.get("name")
gen.begin_class(rec["name"], aslist(rec.get("extends", [])), rec.get("doc", ""),
rec.get("abstract", False), field_names, idfield)
gen.begin_class(
rec["name"],
aslist(rec.get("extends", [])),
rec.get("doc", ""),
rec.get("abstract", False),
field_names,
idfield,
)
gen.add_vocab(shortname(rec["name"]), rec["name"])
for field in rec.get("fields", []):
if field.get("jsonldPredicate") == "@id":
fieldpred = field["name"]
optional = bool("https://w3id.org/cwl/salad#null" in field["type"])
uri_loader = gen.uri_loader(gen.type_loader(field["type"]), True, False, None)
gen.declare_id_field(fieldpred, uri_loader, field.get("doc"), optional)
uri_loader = gen.uri_loader(
gen.type_loader(field["type"]), True, False, None
)
gen.declare_id_field(
fieldpred, uri_loader, field.get("doc"), optional
)
break
for field in rec.get("fields", []):
......@@ -82,15 +94,22 @@ def codegen(lang, # type: str
if jld.get("typeDSL"):
type_loader = gen.typedsl_loader(type_loader, ref_scope)
elif jld.get("_type") == "@id":
type_loader = gen.uri_loader(type_loader, jld.get("identity", False),
False, ref_scope)
type_loader = gen.uri_loader(
type_loader, jld.get("identity", False), False, ref_scope
)
elif jld.get("_type") == "@vocab":
type_loader = gen.uri_loader(type_loader, False, True, ref_scope)
type_loader = gen.uri_loader(
type_loader, False, True, ref_scope
)
map_subject = jld.get("mapSubject")
if map_subject:
type_loader = gen.idmap_loader(
field["name"], type_loader, map_subject, jld.get("mapPredicate"))
field["name"],
type_loader,
map_subject,
jld.get("mapPredicate"),
)
if "_id" in jld and jld["_id"][0] != "@":
fieldpred = jld["_id"]
......@@ -103,9 +122,6 @@ def codegen(lang, # type: str
gen.end_class(rec["name"], field_names)
root_type = list(document_roots)
root_type.append({
"type": "array",
"items": document_roots
})
root_type.append({"type": "array", "items": document_roots})
gen.epilogue(gen.type_loader(root_type))
"""Base class for the generation of loaders from schema-salad definitions."""
import collections
from typing import (Any, Dict, List, MutableSequence, Optional, Union)
from typing import Any, Dict, List, MutableSequence, Optional, Union
from typing_extensions import Text # pylint: disable=unused-import
# move to a regular typing import when Python 3.3-3.6 is no longer supported
from . import schema
# move to a regular typing import when Python 3.3-3.6 is no longer supported
class TypeDef(object): # pylint: disable=too-few-public-methods
"""Schema Salad type description."""
__slots__ = ["name", "init", "is_uri", "scoped_id", "ref_scope"]
# switch to class-style typing.NamedTuple once support for Python < 3.6
# is dropped
def __init__(self, # pylint: disable=too-many-arguments
name, # type: Text
init, # type: Text
is_uri=False, # type: bool
scoped_id=False, # type: bool
ref_scope=0 # type: Optional[int]
): # type: (...) -> None
def __init__(
self, # pylint: disable=too-many-arguments
name, # type: Text
init, # type: Text
is_uri=False, # type: bool
scoped_id=False, # type: bool
ref_scope=0, # type: Optional[int]
): # type: (...) -> None
self.name = name
self.init = init
self.is_uri = is_uri
self.scoped_id = scoped_id
self.ref_scope = ref_scope
class CodeGenBase(object):
"""Abstract base class for schema salad code generators."""
def __init__(self): # type: () -> None
self.collected_types = collections.OrderedDict() # type: collections.OrderedDict[Text, TypeDef]
self.collected_types = (
collections.OrderedDict()
) # type: collections.OrderedDict[Text, TypeDef]
self.vocab = {} # type: Dict[Text, Text]
def declare_type(self, declared_type): # type: (TypeDef) -> TypeDef
......@@ -49,14 +59,15 @@ class CodeGenBase(object):
"""Generate a safe version of the given name."""
return schema.avro_name(name)
def begin_class(self, # pylint: disable=too-many-arguments
classname, # type: Text
extends, # type: MutableSequence[Text]
doc, # type: Text
abstract, # type: bool
field_names, # type: MutableSequence[Text]
idfield # type: Text
): # type: (...) -> None
def begin_class(
self, # pylint: disable=too-many-arguments
classname, # type: Text
extends, # type: MutableSequence[Text]
doc, # type: Text
abstract, # type: bool
field_names, # type: MutableSequence[Text]
idfield, # type: Text
): # type: (...) -> None
"""Produce the header for the given class."""
raise NotImplementedError()
......
"""Work-in-progress Java code generator for a given schema salad definition."""
import os
from typing import MutableSequence
from typing import Any, Dict, List, MutableSequence, Union
from six import string_types
from six.moves import cStringIO, urllib
from typing_extensions import Text # pylint: disable=unused-import
# move to a regular typing import when Python 3.3-3.6 is no longer supported
from . import schema
from .codegen_base import CodeGenBase, TypeDef
# move to a regular typing import when Python 3.3-3.6 is no longer supported
class JavaCodeGen(CodeGenBase):
def __init__(self, base):
......@@ -17,7 +18,9 @@ class JavaCodeGen(CodeGenBase):
super(JavaCodeGen, self).__init__()
sp = urllib.parse.urlsplit(base)
self.package = ".".join(list(reversed(sp.netloc.split("."))) + sp.path.strip("/").split("/"))
self.package = ".".join(
list(reversed(sp.netloc.split("."))) + sp.path.strip("/").split("/")
)
self.outdir = self.package.replace(".", "/")
def prologue(self): # type: () -> None
......@@ -29,85 +32,116 @@ class JavaCodeGen(CodeGenBase):
avn = schema.avro_name(name)
if avn in ("class", "extends", "abstract"):
# reserved words
avn = avn+"_"
avn = avn + "_"
return avn
def interface_name(self, n):
# type: (Text) -> Text
return self.safe_name(n)
def begin_class(self,
classname, # type: Text
extends, # type: MutableSequence[Text]
doc, # type: Text
abstract, # type: bool
field_names, # type: MutableSequence[Text]
idfield # type: Text
): # type: (...) -> None
def begin_class(
self,
classname, # type: Text
extends, # type: MutableSequence[Text]
doc, # type: Text
abstract, # type: bool
field_names, # type: MutableSequence[Text]
idfield, # type: Text
): # type: (...) -> None
cls = self.interface_name(classname)
self.current_class = cls
self.current_class_is_abstract = abstract
self.current_loader = cStringIO()
self.current_fields = cStringIO()
with open(os.path.join(self.outdir, "%s.java" % cls), "w") as f:
with open(os.path.join(self.outdir, "{}.java".format(cls)), "w") as f:
if extends:
ext = "extends " + ", ".join(self.interface_name(e) for e in extends)
else:
ext = ""
f.write("""package {package};
f.write(
"""package {package};
public interface {cls} {ext} {{
""".
format(package=self.package,
cls=cls,
ext=ext))
""".format(
package=self.package, cls=cls, ext=ext
)
)
if self.current_class_is_abstract:
return
with open(os.path.join(self.outdir, "%sImpl.java" % cls), "w") as f:
f.write("""package {package};
with open(os.path.join(self.outdir, "{}Impl.java".format(cls)), "w") as f:
f.write(
"""package {package};
public class {cls}Impl implements {cls} {{
""".
format(package=self.package,
cls=cls,
ext=ext))
self.current_loader.write("""
""".format(
package=self.package, cls=cls, ext=ext
)
)
self.current_loader.write(
"""
void Load() {
""")
"""
)
def end_class(self, classname, field_names):
with open(os.path.join(self.outdir, "%s.java" % self.current_class), "a") as f:
f.write("""
# type: (Text, List[Text]) -> None
with open(
os.path.join(self.outdir, "{}.java".format(self.current_class)), "a"
) as f:
f.write(
"""
}
""")
"""
)
if self.current_class_is_abstract:
return
self.current_loader.write("""
self.current_loader.write(
"""
}
""")
"""
)
with open(os.path.join(self.outdir, "%sImpl.java" % self.current_class), "a") as f:
with open(
os.path.join(self.outdir, "{}Impl.java".format(self.current_class)), "a"
) as f:
f.write(self.current_fields.getvalue())
f.write(self.current_loader.getvalue())
f.write("""
f.write(
"""
}
""")
"""
)
prims = {
u"http://www.w3.org/2001/XMLSchema#string": TypeDef("String", "Support.StringLoader()"),
u"http://www.w3.org/2001/XMLSchema#int": TypeDef("Integer", "Support.IntLoader()"),
u"http://www.w3.org/2001/XMLSchema#long": TypeDef("Long", "Support.LongLoader()"),
u"http://www.w3.org/2001/XMLSchema#float": TypeDef("Float", "Support.FloatLoader()"),
u"http://www.w3.org/2001/XMLSchema#double": TypeDef("Double", "Support.DoubleLoader()"),
u"http://www.w3.org/2001/XMLSchema#boolean": TypeDef("Boolean", "Support.BoolLoader()"),
u"https://w3id.org/cwl/salad#null": TypeDef("null_type", "Support.NullLoader()"),
u"https://w3id.org/cwl/salad#Any": TypeDef("Any_type", "Support.AnyLoader()")
u"http://www.w3.org/2001/XMLSchema#string": TypeDef(
"String", "Support.StringLoader()"
),
u"http://www.w3.org/2001/XMLSchema#int": TypeDef(
"Integer", "Support.IntLoader()"
),
u"http://www.w3.org/2001/XMLSchema#long": TypeDef(
"Long", "Support.LongLoader()"
),
u"http://www.w3.org/2001/XMLSchema#float": TypeDef(
"Float", "Support.FloatLoader()"
),
u"http://www.w3.org/2001/XMLSchema#double": TypeDef(
"Double", "Support.DoubleLoader()"
),
u"http://www.w3.org/2001/XMLSchema#boolean": TypeDef(
"Boolean", "Support.BoolLoader()"
),
u"https://w3id.org/cwl/salad#null": TypeDef(
"null_type", "Support.NullLoader()"
),
u"https://w3id.org/cwl/salad#Any": TypeDef("Any_type", "Support.AnyLoader()"),
}
def type_loader(self, type_declaration):
# type: (Union[List[Any], Dict[Text, Any]]) -> TypeDef
if isinstance(type_declaration, MutableSequence) and len(type_declaration) == 2:
if type_declaration[0] == "https://w3id.org/cwl/salad#null":
type_declaration = type_declaration[1]
......@@ -117,46 +151,61 @@ public class {cls}Impl implements {cls} {{
return TypeDef("Object", "")
def declare_field(self, name, fieldtype, doc, optional):
# type: (Text, TypeDef, Text, bool) -> None
fieldname = self.safe_name(name)
with open(os.path.join(self.outdir, "%s.java" % self.current_class), "a") as f:
f.write("""
with open(
os.path.join(self.outdir, "{}.java".format(self.current_class)), "a"
) as f:
f.write(
"""
{type} get{capfieldname}();
""".
format(fieldname=fieldname,
capfieldname=fieldname[0].upper() + fieldname[1:],
type=fieldtype.name))
""".format(
fieldname=fieldname,
capfieldname=fieldname[0].upper() + fieldname[1:],
type=fieldtype.name,
)
)
if self.current_class_is_abstract:
return
self.current_fields.write("""
self.current_fields.write(
"""
private {type} {fieldname};
public {type} get{capfieldname}() {{
return this.{fieldname};
}}
""".
format(fieldname=fieldname,
capfieldname=fieldname[0].upper() + fieldname[1:],
type=fieldtype.name))
self.current_loader.write("""
""".format(
fieldname=fieldname,
capfieldname=fieldname[0].upper() + fieldname[1:],
type=fieldtype.name,
)
)
self.current_loader.write(
"""
this.{fieldname} = null; // TODO: loaders
""".
format(fieldname=fieldname))
""".format(
fieldname=fieldname
)
)
def declare_id_field(self, name, fieldtype, doc, optional):
# type: (Text, TypeDef, Text, bool) -> None
pass
def uri_loader(self, inner, scoped_id, vocab_term, ref_scope):
# type: (TypeDef, bool, bool, Union[int, None]) -> TypeDef
return inner
def idmap_loader(self, field, inner, map_subject, map_predicate):
# type: (Text, TypeDef, Text, Union[Text, None]) -> TypeDef
return inner
def typedsl_loader(self, inner, ref_scope):
# type: (TypeDef, Union[int, None]) -> TypeDef
return inner
def epilogue(self, root_loader):
def epilogue(self, root_loader): # type: (TypeDef) -> None
pass
from __future__ import absolute_import
import logging
from typing import (Any, Dict, Iterable, List, # pylint: disable=unused-import
MutableMapping, MutableSequence, Optional, Tuple, Union,
cast)
from typing import (
Any,
Dict,
Iterable,
List,
MutableMapping,
MutableSequence,
Optional,
Tuple,
Union,
cast,
)
import rdflib
import rdflib.namespace
import six
from rdflib import Graph, URIRef
from rdflib.namespace import RDF, RDFS
import six
from six.moves import urllib
from typing_extensions import Text # pylint: disable=unused-import
# move to a regular typing import when Python 3.3-3.6 is no longer supported
from .ref_resolver import ContextType # pylint: disable=unused-import
from .utils import aslist, json_dumps
# move to a regular typing import when Python 3.3-3.6 is no longer supported
_logger = logging.getLogger("salad")
def pred(datatype, # type: MutableMapping[Text, Union[Dict, Text]]
field, # type: Optional[Dict]
name, # type: str
context, # type: ContextType
defaultBase, # type: str
namespaces # type: Dict[Text, rdflib.namespace.Namespace]
): # type: (...) -> Union[Dict, Text]
def pred(
datatype, # type: MutableMapping[Text, Union[Dict[Text, Text], Text]]
field, # type: Optional[Dict[Text, Any]]
name, # type: str
context, # type: ContextType
defaultBase, # type: str
namespaces, # type: Dict[Text, rdflib.namespace.Namespace]
): # type: (...) -> Union[Dict[Text, Text], Text]
split = urllib.parse.urlsplit(name)
vee = None # type: Optional[Text]
if split.scheme != '':
if split.scheme != "":
vee = name
(ns, ln) = rdflib.namespace.split_uri(six.text_type(vee))
name = ln
......@@ -40,7 +51,7 @@ def pred(datatype, # type: MutableMapping[Text, Union[Dict, Text]]
vee = six.text_type(namespaces[ns[0:-1]][ln])
_logger.debug("name, v %s %s", name, vee)
v = None # type: Optional[Dict]
v = None # type: Optional[Dict[Text, Any]]
if field is not None and "jsonldPredicate" in field:
if isinstance(field["jsonldPredicate"], MutableMapping):
......@@ -59,8 +70,8 @@ def pred(datatype, # type: MutableMapping[Text, Union[Dict, Text]]
v = d["predicate"]
else:
raise Exception(
"entries in the jsonldPredicate List must be "
"Dictionaries")
"entries in the jsonldPredicate List must be " "Dictionaries"
)
else:
raise Exception("jsonldPredicate must be a List of Dictionaries.")
......@@ -71,8 +82,11 @@ def pred(datatype, # type: MutableMapping[Text, Union[Dict, Text]]
if name in context:
if context[name] != ret:
raise Exception("Predicate collision on %s, '%s' != '%s'" %
(name, context[name], ret))
raise Exception(
"Predicate collision on {}, '{}' != '{}'".format(
name, context[name], ret
)
)
else:
_logger.debug("Adding to context '%s' %s (%s)", name, ret, type(ret))
context[name] = ret
......@@ -80,13 +94,14 @@ def pred(datatype, # type: MutableMapping[Text, Union[Dict, Text]]
return ret
def process_type(t, # type: MutableMapping[Text, Any]
g, # type: Graph
context, # type: ContextType
defaultBase, # type: str
namespaces, # type: Dict[Text, rdflib.namespace.Namespace]
defaultPrefix # type: str
): # type: (...) -> None
def process_type(
t, # type: MutableMapping[Text, Any]
g, # type: Graph
context, # type: ContextType
defaultBase, # type: str
namespaces, # type: Dict[Text, rdflib.namespace.Namespace]
defaultPrefix, # type: str
): # type: (...) -> None
if t["type"] not in ("record", "enum"):
return
......@@ -106,17 +121,21 @@ def process_type(t, # type: MutableMapping[Text, Any]
predicate = recordname
recordname = ln
else:
predicate = "%s:%s" % (defaultPrefix, recordname)
predicate = "{}:{}".format(defaultPrefix, recordname)
if context.get(recordname, predicate) != predicate:
raise Exception("Predicate collision on '%s', '%s' != '%s'" % (
recordname, context[recordname], predicate))
raise Exception(
"Predicate collision on '{}', '{}' != '{}'".format(
recordname, context[recordname], predicate
)
)
if not recordname:
raise Exception()
_logger.debug("Adding to context '%s' %s (%s)",
recordname, predicate, type(predicate))
_logger.debug(
"Adding to context '%s' %s (%s)", recordname, predicate, type(predicate)
)
context[recordname] = predicate
if t["type"] == "record":
......@@ -125,8 +144,9 @@ def process_type(t, # type: MutableMapping[Text, Any]
_logger.debug("Processing field %s", i)
v = pred(t, i, fieldname, context, defaultPrefix,
namespaces) # type: Union[Dict[Any, Any], Text, None]
v = pred(
t, i, fieldname, context, defaultPrefix, namespaces
) # type: Union[Dict[Any, Any], Text, None]
if isinstance(v, six.string_types):
v = v if v[0] != "@" else None
......@@ -146,8 +166,9 @@ def process_type(t, # type: MutableMapping[Text, Any]
# TODO generate range from datatype.
if isinstance(i["type"], MutableMapping):
process_type(i["type"], g, context, defaultBase,
namespaces, defaultPrefix)
process_type(
i["type"], g, context, defaultBase, namespaces, defaultPrefix
)
if "extends" in t:
for e in aslist(t["extends"]):
......@@ -159,8 +180,10 @@ def process_type(t, # type: MutableMapping[Text, Any]
pred(t, None, i, context, defaultBase, namespaces)
def salad_to_jsonld_context(j, schema_ctx):
# type: (Iterable, MutableMapping[Text, Any]) -> Tuple[ContextType, Graph]
def salad_to_jsonld_context(
j, # type: Iterable[MutableMapping[Text, Any]]
schema_ctx, # type: MutableMapping[Text, Any]
): # type: (...) -> Tuple[ContextType, Graph]
context = {} # type: ContextType
namespaces = {}
g = Graph()
......@@ -185,9 +208,10 @@ def salad_to_jsonld_context(j, schema_ctx):
return (context, g)
def fix_jsonld_ids(obj, # type: Union[Dict[Text, Any], List[Dict[Text, Any]]]
ids # type: List[Text]
): # type: (...) -> None
def fix_jsonld_ids(
obj, # type: Union[List[Dict[Text, Any]], MutableMapping[Text, Any]]
ids, # type: List[Text]
): # type: (...) -> None
if isinstance(obj, MutableMapping):
for i in ids:
if i in obj:
......@@ -199,11 +223,12 @@ def fix_jsonld_ids(obj, # type: Union[Dict[Text, Any], List[Dict[Text, Any]]]
fix_jsonld_ids(entry, ids)
def makerdf(workflow, # type: Text
wf, # type: Union[List[Dict[Text, Any]], Dict[Text, Any]]
ctx, # type: ContextType
graph=None # type: Graph
): # type: (...) -> Graph
def makerdf(
workflow, # type: Text
wf, # type: Union[List[Dict[Text, Any]], MutableMapping[Text, Any]]
ctx, # type: ContextType
graph=None, # type: Optional[Graph]
): # type: (...) -> Graph
prefixes = {}
idfields = []
for k, v in six.iteritems(ctx):
......@@ -216,7 +241,7 @@ def makerdf(workflow, # type: Text
doc_url, frg = urllib.parse.urldefrag(url)
if "/" in frg:
p = frg.split("/")[0]
prefixes[p] = u"%s#%s/" % (doc_url, p)
prefixes[p] = u"{}#{}/".format(doc_url, p)
fix_jsonld_ids(wf, idfields)
......@@ -228,10 +253,10 @@ def makerdf(workflow, # type: Text
if isinstance(wf, MutableSequence):
for w in wf:
w["@context"] = ctx
g.parse(data=json_dumps(w), format='json-ld', publicID=str(workflow))
g.parse(data=json_dumps(w), format="json-ld", publicID=str(workflow))
else:
wf["@context"] = ctx
g.parse(data=json_dumps(wf), format='json-ld', publicID=str(workflow))
g.parse(data=json_dumps(wf), format="json-ld", publicID=str(workflow))
# Bug in json-ld loader causes @id fields to be added to the graph
for sub, pred, obj in g.triples((None, URIRef("@id"), None)):
......
......@@ -5,111 +5,175 @@ import argparse
import logging
import os
import sys
from typing import (Any, Dict, List, Mapping, MutableSequence,
Union, cast)
from typing import Any, Dict, List, Mapping, MutableSequence, Optional, Union, cast
import pkg_resources # part of setuptools
import six
from rdflib.parser import Parser
from rdflib.plugin import register
from ruamel.yaml.comments import CommentedMap
import six
from six.moves import urllib
from typing_extensions import Text # pylint: disable=unused-import
# move to a regular typing import when Python 3.3-3.6 is no longer supported
from ruamel.yaml.comments import CommentedSeq
from . import codegen, jsonld_context, schema, validate
from .ref_resolver import Loader, file_uri
from .sourceline import strip_dup_lineno, to_one_line_messages, reformat_yaml_exception_message
from .utils import json_dumps
from .avro.schema import SchemaParseException
from .makedoc import makedoc
from .ref_resolver import Loader, file_uri
from .sourceline import (
reformat_yaml_exception_message,
strip_dup_lineno,
to_one_line_messages,
)
from .utils import json_dumps
# move to a regular typing import when Python 3.3-3.6 is no longer supported
register('json-ld', Parser, 'rdflib_jsonld.parser', 'JsonLDParser')
register("json-ld", Parser, "rdflib_jsonld.parser", "JsonLDParser")
_logger = logging.getLogger("salad")
def printrdf(workflow, # type: str
wf, # type: Union[List[Dict[Text, Any]], Dict[Text, Any]]
ctx, # type: Dict[Text, Any]
sr # type: str
):
def printrdf(
workflow, # type: str
wf, # type: Union[List[Dict[Text, Any]], Dict[Text, Any]]
ctx, # type: Dict[Text, Any]
sr, # type: str
):
# type: (...) -> None
g = jsonld_context.makerdf(workflow, wf, ctx)
print(g.serialize(format=sr, encoding='utf-8').decode('utf-8')) # type: ignore
print (g.serialize(format=sr, encoding="utf-8").decode("utf-8"))
def main(argsl=None): # type: (List[str]) -> int
def main(argsl=None): # type: (Optional[List[str]]) -> int
if argsl is None:
argsl = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument("--rdf-serializer",
help="Output RDF serialization format used by --print-rdf (one of turtle (default), n3, nt, xml)",
default="turtle")
parser.add_argument("--skip-schemas", action="store_true", default=False, help="If specified, ignore $schemas sections.")
parser.add_argument("--strict-foreign-properties", action="store_true", help="Strict checking of foreign properties",
default=False)
parser.add_argument(
"--rdf-serializer",
help="Output RDF serialization format used by --print-rdf"
"(one of turtle (default), n3, nt, xml)",
default="turtle",
)
parser.add_argument(
"--skip-schemas",
action="store_true",
default=False,
help="If specified, ignore $schemas sections.",
)
parser.add_argument(
"--strict-foreign-properties",
action="store_true",
help="Strict checking of foreign properties",
default=False,
)
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--print-jsonld-context", action="store_true",
help="Print JSON-LD context for schema")
exgroup.add_argument(
"--print-rdfs", action="store_true", help="Print RDF schema")
exgroup.add_argument("--print-avro", action="store_true",
help="Print Avro schema")
exgroup.add_argument("--print-rdf", action="store_true",
help="Print corresponding RDF graph for document")
exgroup.add_argument("--print-pre", action="store_true",
help="Print document after preprocessing")
"--print-jsonld-context",
action="store_true",
help="Print JSON-LD context for schema",
)
exgroup.add_argument("--print-rdfs", action="store_true", help="Print RDF schema")
exgroup.add_argument("--print-avro", action="store_true", help="Print Avro schema")
exgroup.add_argument(
"--print-rdf",
action="store_true",
help="Print corresponding RDF graph for document",
)
exgroup.add_argument(
"--print-pre", action="store_true", help="Print document after preprocessing"
)
exgroup.add_argument("--print-index", action="store_true", help="Print node index")
exgroup.add_argument(
"--print-index", action="store_true", help="Print node index")
exgroup.add_argument("--print-metadata",
action="store_true", help="Print document metadata")
exgroup.add_argument("--print-inheritance-dot",
action="store_true", help="Print graphviz file of inheritance")
exgroup.add_argument("--print-fieldrefs-dot",
action="store_true", help="Print graphviz file of field refs")
"--print-metadata", action="store_true", help="Print document metadata"
)
exgroup.add_argument(
"--print-inheritance-dot",
action="store_true",
help="Print graphviz file of inheritance",
)
exgroup.add_argument(
"--print-fieldrefs-dot",
action="store_true",
help="Print graphviz file of field refs",
)
exgroup.add_argument("--codegen", type=str, metavar="language", help="Generate classes in target language, currently supported: python")
exgroup.add_argument(
"--codegen",
type=str,
metavar="language",
help="Generate classes in target language, currently supported: python",
)
exgroup.add_argument("--print-oneline", action="store_true",
help="Print each error message in oneline")
exgroup.add_argument(
"--print-oneline",
action="store_true",
help="Print each error message in oneline",
)
exgroup.add_argument("--print-doc", action="store_true",
help="Print HTML schema documentation page")
exgroup.add_argument(
"--print-doc", action="store_true", help="Print HTML schema documentation page"
)
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--strict", action="store_true", help="Strict validation (unrecognized or out of place fields are error)",
default=True, dest="strict")
exgroup.add_argument("--non-strict", action="store_false", help="Lenient validation (ignore unrecognized fields)",
default=True, dest="strict")
exgroup.add_argument(
"--strict",
action="store_true",
help="Strict validation (unrecognized or out of place fields are error)",
default=True,
dest="strict",
)
exgroup.add_argument(
"--non-strict",
action="store_false",
help="Lenient validation (ignore unrecognized fields)",
default=True,
dest="strict",
)
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--verbose", action="store_true",
help="Default logging")
exgroup.add_argument("--quiet", action="store_true",
help="Only print warnings and errors.")
exgroup.add_argument("--debug", action="store_true",
help="Print even more logging")
parser.add_argument('--only', action='append', help="Use with --print-doc, document only listed types")
parser.add_argument('--redirect', action='append', help="Use with --print-doc, override default link for type")
parser.add_argument('--brand', help="Use with --print-doc, set the 'brand' text in nav bar")
parser.add_argument('--brandlink', help="Use with --print-doc, set the link for 'brand' in nav bar")
parser.add_argument('--primtype', default="#PrimitiveType", help="Use with --print-doc, link to use for primitive types (string, int etc)")
exgroup.add_argument("--verbose", action="store_true", help="Default logging")
exgroup.add_argument(
"--quiet", action="store_true", help="Only print warnings and errors."
)
exgroup.add_argument("--debug", action="store_true", help="Print even more logging")
parser.add_argument(
"--only",
action="append",
help="Use with --print-doc, document only listed types",
)
parser.add_argument(
"--redirect",
action="append",
help="Use with --print-doc, override default link for type",
)
parser.add_argument(
"--brand", help="Use with --print-doc, set the 'brand' text in nav bar"
)
parser.add_argument(
"--brandlink", help="Use with --print-doc, set the link for 'brand' in nav bar"
)
parser.add_argument(
"--primtype",
default="#PrimitiveType",
help="Use with --print-doc, link to use for primitive types (string, int etc)",
)
parser.add_argument("schema", type=str, nargs="?", default=None)
parser.add_argument("document", type=str, nargs="?", default=None)
parser.add_argument("--version", "-v", action="store_true",
help="Print version", default=None)
parser.add_argument(
"--version", "-v", action="store_true", help="Print version", default=None
)
args = parser.parse_args(argsl)
if args.version is None and args.schema is None:
print('%s: error: too few arguments' % sys.argv[0])
print ("{}: error: too few arguments".format(sys.argv[0]))
return 1
if args.quiet:
......@@ -120,7 +184,7 @@ def main(argsl=None): # type: (List[str]) -> int
pkg = pkg_resources.require("schema_salad")
if pkg:
if args.version:
print("%s Current version: %s" % (sys.argv[0], pkg[0].version))
print ("{} Current version: {}".format(sys.argv[0], pkg[0].version))
return 0
else:
_logger.info("%s Current version: %s", sys.argv[0], pkg[0].version)
......@@ -131,22 +195,34 @@ def main(argsl=None): # type: (List[str]) -> int
# Load schema document and resolve refs
schema_uri = args.schema
if not (urllib.parse.urlparse(schema_uri)[0] and urllib.parse.urlparse(schema_uri)[0] in [u'http', u'https', u'file']):
if not (
urllib.parse.urlparse(schema_uri)[0]
and urllib.parse.urlparse(schema_uri)[0] in [u"http", u"https", u"file"]
):
schema_uri = file_uri(os.path.abspath(schema_uri))
schema_raw_doc = metaschema_loader.fetch(schema_uri)
try:
schema_doc, schema_metadata = metaschema_loader.resolve_all(
schema_raw_doc, schema_uri)
schema_raw_doc, schema_uri
)
except (validate.ValidationException) as e:
_logger.error("Schema `%s` failed link checking:\n%s",
args.schema, Text(e), exc_info=(True if args.debug else False))
_logger.error(
"Schema `%s` failed link checking:\n%s",
args.schema,
Text(e),
exc_info=(True if args.debug else False),
)
_logger.debug("Index is %s", list(metaschema_loader.idx.keys()))
_logger.debug("Vocabulary is %s", list(metaschema_loader.vocab.keys()))
return 1
except (RuntimeError) as e:
_logger.error("Schema `%s` read error:\n%s",
args.schema, Text(e), exc_info=(True if args.debug else False))
_logger.error(
"Schema `%s` read error:\n%s",
args.schema,
Text(e),
exc_info=(True if args.debug else False),
)
return 1
if args.print_doc:
......@@ -155,38 +231,43 @@ def main(argsl=None): # type: (List[str]) -> int
# Optionally print the schema after ref resolution
if not args.document and args.print_pre:
print(json_dumps(schema_doc, indent=4))
print (json_dumps(schema_doc, indent=4))
return 0
if not args.document and args.print_index:
print(json_dumps(list(metaschema_loader.idx.keys()), indent=4))
print (json_dumps(list(metaschema_loader.idx.keys()), indent=4))
return 0
# Validate the schema document against the metaschema
try:
schema.validate_doc(metaschema_names, schema_doc,
metaschema_loader, args.strict)
schema.validate_doc(
metaschema_names, schema_doc, metaschema_loader, args.strict
)
except validate.ValidationException as e:
_logger.error("While validating schema `%s`:\n%s",
args.schema, Text(e))
_logger.error("While validating schema `%s`:\n%s", args.schema, Text(e))
return 1
# Get the json-ld context and RDFS representation from the schema
metactx = schema.collect_namespaces(schema_metadata)
if "$base" in schema_metadata:
metactx["@base"] = schema_metadata["$base"]
if schema_doc is not None:
(schema_ctx, rdfs) = jsonld_context.salad_to_jsonld_context(
schema_doc, metactx)
if isinstance(schema_doc, CommentedSeq):
(schema_ctx, rdfs) = jsonld_context.salad_to_jsonld_context(schema_doc, metactx)
else:
raise Exception("schema_doc is None??")
raise Exception(
"Expected a CommentedSeq, got {}: {}.".format(type(schema_doc), schema_doc)
)
# Create the loader that will be used to load the target document.
document_loader = Loader(schema_ctx, skip_schemas=args.skip_schemas)
if args.codegen:
codegen.codegen(args.codegen, cast(List[Dict[Text, Any]], schema_doc),
schema_metadata, document_loader)
codegen.codegen(
args.codegen,
cast(List[Dict[Text, Any]], schema_doc),
schema_metadata,
document_loader,
)
return 0
# Make the Avro validation that will be used to validate the target
......@@ -196,11 +277,14 @@ def main(argsl=None): # type: (List[str]) -> int
try:
avsc_names = schema.make_avro_schema_from_avro(avsc_obj)
except SchemaParseException as err:
_logger.error("Schema `%s` error:\n%s", args.schema, Text(err),
exc_info=(
(type(err), err, None) if args.debug else None))
_logger.error(
"Schema `%s` error:\n%s",
args.schema,
Text(err),
exc_info=((type(err), err, None) if args.debug else None),
)
if args.print_avro:
print(json_dumps(avsc_obj, indent=4))
print (json_dumps(avsc_obj, indent=4))
return 1
else:
_logger.error("Schema `%s` must be a list.", args.schema)
......@@ -208,22 +292,22 @@ def main(argsl=None): # type: (List[str]) -> int
# Optionally print Avro-compatible schema from schema
if args.print_avro:
print(json_dumps(avsc_obj, indent=4))
print (json_dumps(avsc_obj, indent=4))
return 0
# Optionally print the json-ld context from the schema
if args.print_jsonld_context:
j = {"@context": schema_ctx}
print(json_dumps(j, indent=4, sort_keys=True))
print (json_dumps(j, indent=4, sort_keys=True))
return 0
# Optionally print the RDFS graph from the schema
if args.print_rdfs:
print(rdfs.serialize(format=args.rdf_serializer).decode('utf-8')) # type: ignore
print (rdfs.serialize(format=args.rdf_serializer).decode("utf-8"))
return 0
if args.print_metadata and not args.document:
print(json_dumps(schema_metadata, indent=4))
print (json_dumps(schema_metadata, indent=4))
return 0
if args.print_inheritance_dot:
......@@ -236,47 +320,58 @@ def main(argsl=None): # type: (List[str]) -> int
# If no document specified, all done.
if not args.document:
print("Schema `%s` is valid" % args.schema)
print ("Schema `{}` is valid".format(args.schema))
return 0
# Load target document and resolve refs
try:
uri = args.document
if not urllib.parse.urlparse(uri)[0]:
doc = "file://" + os.path.abspath(uri)
document, doc_metadata = document_loader.resolve_ref(uri, strict_foreign_properties=args.strict_foreign_properties)
document, doc_metadata = document_loader.resolve_ref(
uri, strict_foreign_properties=args.strict_foreign_properties
)
except validate.ValidationException as e:
msg = strip_dup_lineno(six.text_type(e))
msg = to_one_line_messages(str(msg)) if args.print_oneline else msg
_logger.error("Document `%s` failed validation:\n%s",
args.document, msg, exc_info=args.debug)
_logger.error(
"Document `%s` failed validation:\n%s",
args.document,
msg,
exc_info=args.debug,
)
return 1
except RuntimeError as e:
msg = strip_dup_lineno(six.text_type(e))
msg = reformat_yaml_exception_message(str(msg))
msg = to_one_line_messages(msg) if args.print_oneline else msg
_logger.error("Document `%s` failed validation:\n%s",
args.document, msg, exc_info=args.debug)
_logger.error(
"Document `%s` failed validation:\n%s",
args.document,
msg,
exc_info=args.debug,
)
return 1
# Optionally print the document after ref resolution
if args.print_pre:
print(json_dumps(document, indent=4))
print (json_dumps(document, indent=4))
return 0
if args.print_index:
print(json_dumps(list(document_loader.idx.keys()), indent=4))
print (json_dumps(list(document_loader.idx.keys()), indent=4))
return 0
# Validate the user document against the schema
try:
schema.validate_doc(avsc_names, document,
document_loader, args.strict,
strict_foreign_properties=args.strict_foreign_properties)
schema.validate_doc(
avsc_names,
document,
document_loader,
args.strict,
strict_foreign_properties=args.strict_foreign_properties,
)
except validate.ValidationException as e:
msg = to_one_line_messages(str(e)) if args.print_oneline else str(e)
_logger.error("While validating document `%s`:\n%s" %
(args.document, msg))
_logger.error("While validating document `%s`:\n%s" % (args.document, msg))
return 1
# Optionally convert the document to RDF
......@@ -285,14 +380,14 @@ def main(argsl=None): # type: (List[str]) -> int
printrdf(args.document, document, schema_ctx, args.rdf_serializer)
return 0
else:
print("Document must be a dictionary or list.")
print ("Document must be a dictionary or list.")
return 1
if args.print_metadata:
print(json_dumps(doc_metadata, indent=4))
print (json_dumps(doc_metadata, indent=4))
return 0
print("Document `%s` is valid" % args.document)
print ("Document `{}` is valid".format(args.document))
return 0
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.