Commit 1be0c429 authored by Sebastian Humenda's avatar Sebastian Humenda

New upstream version 0.5.0~beta.2

parent dc561545
# keep that alphabetical please
*~
*.bak
build
*/build/*
*.egg-info
freedict-database.xml
*.mo
*__pycache__*
*.pyc
*.swo
*.swp
#!/usr/bin/env python3
import multiprocessing
import re
import os
import subprocess
import sys
# 'printable name': 'maketarget'
MAKE_TARGETS = (
'report-duplicates EXIT=y',
'validation'
)
ITSADICT = re.compile(r"^[a-z]{3}-[a-z]{3}")
ERR_PATTERN = re.compile(r'^(E[0-9][0-9]?:\s+)')
def call(cmdline):
if isinstance(cmdline, str):
cmdline = cmdline.split(' ')
return subprocess.Popen(cmdline, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
#pylint: disable=inconsistent-return-statements
def validate_dictionary(directory):
"""directory == dictname"""
orig_cwd = os.getcwd()
os.chdir(directory)
try:
procs = [] # process handles
for target in MAKE_TARGETS:
procs.append(call(['make', '--silent',
'--no-print-directory'] + target.split(' ')))
# agregate results
results = []
for handle in procs:
text = handle.communicate()
ret = handle.wait()
if ret:
text = '\n'.join(t.decode(sys.getdefaultencoding())
for t in text)
lines = text.split('\n')[:-1] # strip last \n
if lines[-1].startswith('make:'):
text = '\n'.join(lines[:-1]) # strip make failure message
if text:
results.append(text)
if results:
return (directory, results)
finally:
os.chdir(orig_cwd)
def check_all_dicts(dictionaries):
"""ToDo: assumes to live in directory where list of supplied dicts
exists."""
with multiprocessing.Pool() as p:
results = p.map(validate_dictionary, [d for d in dictionaries])
return dict(filter(bool, results))
def main():
res = check_all_dicts(d for d in os.listdir('.') if ITSADICT.search(d))
for name, err in res.items():
print(name + ':')
for text in err:
errcode = ERR_PATTERN.search(text)
if errcode:
errcode = errcode.groups()[0]
if errcode:
text = text[len(errcode[0])+1:]
errcode = (errcode + ' ').ljust(4)
else:
errcode = ''
text = '\n '.join(text.split('\n')).lstrip()
print("- {}{}".format(errcode, text))
print("\nYou can learn more about errors starting with 'E<num>' by typing "
"`make description E=<num>`.")
if __name__ == '__main__':
main()
......@@ -32,6 +32,7 @@ import xml.etree.ElementTree as ET
# TEI name space, Python's parser doesn't handle them nicely
TEI_NS = '{http://www.tei-c.org/ns/1.0}'
NS = {'t': TEI_NS.strip('{}')}
# findall/iter with TEI namespace removed
findall = lambda x,y: x.findall(TEI_NS + y)
tei_iter = lambda x,y: x.iter(TEI_NS + y)
......@@ -77,6 +78,11 @@ def nodes_eq(node1, node2, tag=None):
return node1.text == node2.text
usages_match = lambda n1, n2: nodes_eq(n1, n2, 'usg')
def translations_of(sense):
trans = sense.findall('.//t:quote', NS)
if not trans:
trans = sense.findall('.//t:def', NS)
return trans
def rm_doubled_senses(entry):
"""Some entries have multiple senses. A few of them are exactly the same,
......@@ -85,21 +91,23 @@ def rm_doubled_senses(entry):
senses = list(findall(entry, 'sense'))
if len(senses) == 1:
return
# obtain a mapping from XML node -> list of words within `<quote>…</quote>`
senses = {sense: tuple(q.text.strip() for q in tei_iter(sense, 'quote')
if q.text) for sense in senses}
senses = {sense: translations_of(sense) for sense in senses}
changed = False
# pair each sense with another and compare their content
for s1, s2 in itertools.combinations(senses.items(), 2):
if len(s1[1]) == len(s2[1]):
# if two senses are *exacctly* identical
if all(definition in s2[1] for definition in s1[1]) \
and usages_match(s1[0], s2[0]):
try:
entry.remove(s2[0]) # remove sense from entry
changed = True
except ValueError: # already removed?
pass
# both sets match
if not len(s1[1]) == len(s2[1]):
continue
if not set(t.text for t in s1[1]) == set(t.text for t in s2[1]):
continue # don't match
# two senses are *exacctly* identical?
try:
entry.remove(s2[0]) # remove sense from entry
changed = True
except ValueError: # already removed?
pass
return changed
......@@ -109,18 +117,22 @@ def rm_empty_nodes(entry):
they are generated by the output formatter anyway.
This function returns True, if an empty node has been removed."""
changed = False
is_empty = lambda node: (not node.text or node.text.strip() == '') \
and not node.getchildren()
# sometimes parent nodes are empty after their empty children have been
# removed, so do this three times (won't work with deeper nestings…)
for _ in range(0, 2):
changed_here = False
nodes = [(None, entry)]
for parent, node in nodes:
if (node.text is None or node.text.strip() == '') \
and not node.getchildren():
if is_empty(node):
if parent:
parent.remove(node)
changed = True
changed_here = changed = True
else:
nodes.extend((node, c) for c in node.getchildren())
if not changed_here:
break # nothing removed, no further iterations
# try to strip enumeration of senses they aren't adjacent anymore; map
# sense: numeration first or discard if no numbering
sense_numbers = {sense: int(sense.attrib.get('n'))
......@@ -150,11 +162,14 @@ def rm_doubled_quotes(entry):
changed = False
# pair each sense with another and compare their content
for trans1, trans2 in itertools.combinations(senses, 2):
# translation could have been removed by a previous pairing
sense1, _cit1, quote1 = trans1
sense2, cit2, quote2 = trans2
# translation could have been removed by a previous pairing
if quote1.text == quote2.text and usages_match(sense1, sense2):
cit2.remove(quote2)
try:
cit2.remove(quote2)
except ValueError:
continue # already removed
changed = True
return changed
......@@ -192,7 +207,7 @@ class XmlParserWrapper:
parser.feed(content)
except ET.ParseError as e:
sys.stderr.write("Error while parsing input file\n")
sys.stderr.write(str(e).encode(sys.getdefaultencoding()) + '\n')
sys.stderr.write(str(e) + '\n')
sys.exit(15)
self.root = parser.close()
......@@ -228,15 +243,30 @@ def main():
dictionary_path = args.dictionary_path[0]
tree = XmlParserWrapper(dictionary_path)
changed = False
for entry in tei_iter(tree.root, 'entry'):
changed = changed or rm_doubled_senses(entry)
changed = changed or rm_doubled_quotes(entry)
# the processing above might leave empty parent nodes, remove those
changed = changed or rm_empty_nodes(entry)
if args.detect_changes: # abort if first change detected
for entry in tei_iter(tree.root, 'entry'):
changed = changed or rm_doubled_senses(entry)
# this one is dangerous
#changed = changed or rm_doubled_quotes(entry)
# the processing above might leave empty parent nodes, remove those
changed = changed or rm_empty_nodes(entry)
if args.detect_changes and changed:
print(("E1: Found duplicated entries or empty XML nodes. Try "
"`make rm_duplicates`."))
sys.exit(42)
else: # always apply changes
for entry in tei_iter(tree.root, 'entry'):
changed1 = rm_doubled_senses(entry)
changed2 = rm_doubled_quotes(entry)
# the processing above might leave empty parent nodes, remove those
changed3 = rm_empty_nodes(entry)
if changed1 or changed2 or changed3:
changed = True
if args.detect_changes and changed:
print(("E1: Found duplicated entries or empty XML nodes. Try "
"`make rm_duplicates`."))
sys.exit(42)
if changed:
output_fn = os.path.join('build', 'tei',
dictionary_path.replace('.tei', '-dedup.tei'))
......
#!/usr/bin/env python3
SRC_URL = "http://wordnet.dk/DanNet-2.2_csv.zip"
try:
. ~/daten/quellen/freedict/venv/bin/activate; \
python3 ding2tei.py deu-eng.txt ~/daten/quellen/freedict/fd-dictionaries/deu-eng/deu-eng.tei /tmp 2>&1 | vim -R -
- m/f steht oft ohne {}; Fehler melden
- 3136: straying /
- [orig source]: 118917: are translations correctly parsed
- multiple pos:, currently:
<gramGrp>
<gramGrp>
<pos>vt</pos>
</gramGrp>
<gramGrp>
<pos>vi</pos>
</gramGrp>
</gramGrp>
- doc (more likely than tests)
......@@ -80,7 +80,7 @@ FREEDICTRC = $(wildcard $(HOME)/.config/freedict/freedictrc $(LOCALAPPDATA)/free
PYTHON := $(shell command -v python3 2> /dev/null)
ifeq ($(PYTHON),)
PYTHON := $(shell command -v python 2> /dev/null)
ifeq ("$(PYTHON)" "")
ifeq ("$(PYTHON)","")
$(error No Python executable found, please make sure that a recent Python is in the search path)
endif
endif
......
......@@ -13,9 +13,8 @@ NOTE: For the targets `release` and `build`, there are targets called
one platform.
endef
#######################
#### set some variables
#### Common variable definitions
#######################
# let the tools from $(toolsdir) override tools
......@@ -33,9 +32,10 @@ xsldir ?= $(FREEDICT_TOOLS)/xsl
XMLLINT := /usr/bin/xmllint
dictname ?= $(shell basename "$(shell pwd)")
source_lang = $(shell echo $(dictname) | sed 's/-.*//g')
rdictname := $(shell export V=$(dictname); echo $${V:4:3}-$${V:0:3})
version1 := $(shell sed -e '100q;/<edition>/!d;s/.*<edition>\(.*\)<\/edition>.*/\1/;q'\
$(wildcard $(dictname).tei*) $(dictname)-nophon.tei)
$(wildcard $(dictname).tei))
version := $(subst $(space),,$(version1))
# these files are included in each of the platform releases which are *not* a
......@@ -49,6 +49,54 @@ DISTFILES_BINARY = $(foreach f, README README.md README.txt README.rst \
PREFIX ?= /usr
DESTDIR ?=
################
# Common Function Definitions
################
# Helper function to retrieve the release path. We cannot declare the value
# statically, because it is only required for the deploy target and this is only
# executed by admins. The first argument is "optional".
deploy_to = $(shell $(MAKE) --no-print-directory -C $(FREEDICT_TOOLS) release-path)/$(1)
# This function assists the release-% rules. It generates the release path for
# each platform; Arg1: platform
gen_release_path = $(RELEASE_DIR)/freedict-$(dictname)-$(version).$(if \
$(findstring slob,$(1)),slob,$(1).tar.xz)
gen_release_hashpath = $(call gen_release_path,$(1)).sha512
# dictionary source file -- normally just $(dictname).tei, but can be
# overwritten e.g. by the phonetics generator
dict_tei_source = $(dictname).tei
#######################
#### Phonetics import
####
#### This needs to come before all others, so that the relevant functions are
#### defined correctly.
#######################
TEIADDPHONETICS := $(shell which teiaddphonetics 2>/dev/null)
ifeq ($(TEIADDPHONETICS),)
TEIADDPHONETICS := $(FREEDICT_TOOLS)/teiaddphonetics
endif
supported_lang = $(shell $(TEIADDPHONETICS) --supports-lang $(source_lang);echo $$?)
ifeq ($(supported_lang),0) # supported language
dict_tei_source = build/tei/$(dictname)-phonetics.tei
$(BUILD_DIR)/tei:
mkdir -p $@
$(call dict_tei_source): $(dictname).tei | $(BUILD_DIR)/tei
$(TEIADDPHONETICS) --infile $< --outfile $@
else ifeq ($(shell echo '$(supported_lang)' |tr -d '[:space:]'|tail -c 1),1)
foo:
echo '$(supported_lang:%1=%k1)'
echo x$(shell echo '$(supported_lang)' |tail -c 1)x
dict_tei_source = $(error Espeak or espeak-ng not installed, please install it and proceed.)
endif
################
# General targets (default target, maintenance targets)
################
......@@ -83,10 +131,6 @@ clean:: #! clean build files
rm -rf build
rm -f valid.stamp
# Helper rule to retrieve the release path. We cannot declare the value
# statically, because it is only required for the deploy target and this is only
# executed by admins. The first argument is "optional".
deploy_to = $(shell $(MAKE) --no-print-directory -C $(FREEDICT_TOOLS) release-path)/$(1)
deploy: #! deploy all platforms of a release to the remote file hosting service
deploy: $(foreach r, $(available_platforms), release-$(r))
@MOUNTED=0; \
......@@ -123,7 +167,7 @@ deploy: $(foreach r, $(available_platforms), release-$(r))
find-homographs: #! find all homographs and list them, one per line
find-homographs: $(dictname).tei
@cat $< | grep orth | \
sed -e s:' <orth>':'':g -e s:'<\/orth>':'':g | sort -f | \
sed -e s:'[ ]*<orth>':'':g -e s:'<\/orth>':'':g | sort -f | \
uniq -i -d
list-platforms: #! list all available platforms, AKA output formats
......@@ -160,13 +204,6 @@ query-%: #! query platform support status; 0=dictd supported, 1=dictd unsupporte
release: #! build releases for all available platforms
release: $(foreach platform,$(available_platforms),release-$(platform))
# This function is here to assist the release-% rules. It generates the release
# path for each platform.
# Arg1: platform
gen_release_path = $(RELEASE_DIR)/freedict-$(dictname)-$(version).$(if \
$(findstring slob,$(1)),slob,$(1).tar.xz)
gen_release_hashpath = $(call gen_release_path,$(1)).sha512
version: #! output current (source) version number
@echo $(version)
......@@ -210,7 +247,7 @@ validation: $(dictname).tei
BUILD_DICTD=$(BUILD_DIR)/dictd
$(BUILD_DICTD)/$(dictname).c5: $(dictname).tei $(BUILD_DICTD) \
$(BUILD_DICTD)/$(dictname).c5: $(call dict_tei_source) $(BUILD_DICTD) \
$(xsldir)/tei2c5.xsl $(xsldir)/inc/teientry2txt.xsl \
$(xsldir)/inc/teiheader2txt.xsl \
$(xsldir)/inc/indent.xsl
......@@ -352,30 +389,13 @@ clean::
$(BUILD_DIR)/stardict/freedict-$(dictname)-$(version)-stardict.tar.bz2 \
dictd2dic.out authorresp.out title.out sourceurl.out
#######################
#### Phonetics import
#######################
# ToDo: should be scripted to let eSpeak emit ISO 639-3 codes
#supported_phonetics ?= $(shell PATH="$(FREEDICT_TOOLS):$(PATH)" teiaddphonetics -li)
la1 := $(shell export V=$(dictname); echo $${V:0:3})
#la2 := $(shell export V=$(dictname); echo $${V:4:3})
ifeq ($(la1),$(findstring $(la1),$(supported_phonetics)))
# TEIADDPHONETICS ?= -v
$(dictname).tei: $(dictname)-nophon.tei
teiaddphonetics $(TEIADDPHONETICS) -i $< -ou $@ -mbrdico-path $(MBRDICO_PATH)
endif
#######################
#### Slob format for the Aard Android dictionary client
#######################
build-slob: $(BUILD_DIR)/slob/$(dictname)-$(version).slob
$(BUILD_DIR)/slob/$(dictname)-$(version).slob: $(dictname).tei | $(BUILD_DIR)/slob
$(BUILD_DIR)/slob/$(dictname)-$(version).slob: $(call dict_tei_source) | $(BUILD_DIR)/slob
$(call exc_pyscript,tei2slob,-w,$(BUILD_DIR)/slob,-o,$@,$<)
$(call gen_release_path,slob): $(BUILD_DIR)/slob/$(dictname)-$(version).slob $(RELEASE_DIR)
......
#!/usr/bin/perl
#vim: set expandtab sts=2 ts=2 sw=2 ft=80:
# This script requires the following:
# Dependencies: libxml, libxslt, and /usr/share/xml/iso-codes/iso_639.xml
# from iso-codecs
use v5.10;
use warnings;
use strict;
use open ':locale';
use Getopt::Long;
use Speech::eSpeak 0.5;
use strict; use open ':locale'; use Getopt::Long;
use XML::LibXML;
use XML::LibXSLT;
use XML::LibXML::XPathContext;
# an alternative could be: https://metacpan.org/pod/C::DynaLib
use IPC::Open2;
my $verbose = 0;
my $help = 0;
my $infile = 'fd/eng-ita/eng-ita.tei';
my $infile;
my $outfile;
my $inlang;
my $supports_lang;
my $dryrun = 0;
my $espeak_path;
sub iso639_2T_to_1
{
my $code_2T = shift;
my $isofile = '/usr/share/xml/iso-codes/iso_639.xml';
die "Missing $isofile (In Debian install package iso-codes)!" unless -r $isofile;
my $isodoc = XML::LibXML->load_xml(location => $isofile);
my $xpc = XML::LibXML::XPathContext->new($isodoc);
my $expr = '/iso_639_entries/iso_639_entry/@iso_639_1_code[ ../@iso_639_2T_code=\'' . $code_2T . '\' ]';
say "Evaluating $expr..." if $verbose>1;
my $code_1 = $xpc->findvalue($expr);
say "Translated '$code_2T' to '$code_1'" if $verbose;
return $code_1;
}
sub read_espeak_voices {
my $F;
open($F, "$espeak_path --voices|");
die "unable to execute espeak $!" unless defined $F;
my %voices;
while (<$F>) {
my $line = $_;
$line =~ s/^\s*[0-9]*\s*([a-z]+)\s.*$/$1/g;
$line =~ s/\s+$//;
# add if a match occured
$voices{$line} = 1 if length($line) <= 3;
}
return %voices;
}
sub language_exists {
my $language = shift;
my %espeak_langs = read_espeak_voices();
if(exists($espeak_langs{$language})) {
return 1;
}
$inlang = iso639_2T_to_1($language);
if(!defined($inlang) || $inlang eq '' || !exists($espeak_langs{$inlang})) {
return 0;
}
return 1;
}
GetOptions("verbose" => \$verbose,
"infile=s" => \$infile,
"outfile=s" => \$outfile,
"inlang=s" => \$inlang,
"help" => \$help,
"dry-run" => \$dryrun,
) or die("Error in command line arguments\n");
"infile=s" => \$infile,
"outfile=s" => \$outfile,
"inlang=s" => \$inlang,
"help" => \$help,
"dry-run" => \$dryrun,
"supports-lang=s" => \$supports_lang,
"espeak-path=s" => \$espeak_path,
) or die("Error in command line arguments\n");
if($help)
{
say <<"EOT";
$0
say <<"EOT";
$0 [OPTIONS]
This script adds pronunciation information to the headwords of a TEI file.
It goes through all <orth> elements and adds <pron> element following them,
unless <pron> already exists. This allows to override the espeak
pronunciation.
This script adds pronunciation information to the headwords of a TEI file.
It goes through all <orth> elements and adds a <pron> element after it,
unless <pron> already exists.
It works similar to 'espeak -v de --ipa ok', but uses the espeak
API directly.
This script uses 'espeak(-ng) -v <LANG> --ipa -q ok'. A previous version used the
espeak API by means of a modified version of Speech::eSpeak, but the
maintainer of that module is unresponsive. Experiments with the espeak API
via C::DynaLib (or Python c_types) were successful, but due to
cross-platform issues and espeak to espeak-ng API issues not further
pursuit.
Options:
Espeak is started as a subprocess and connected using stdin/stdout. In this
line-based mode, espeak reads a line from STDIN and outputs a line on
STDOUT. Since espeak doesn't flush stdout, we never get a reply. To prevent
this deadlock, we use the 'stdbuf' command, which uses LD_PRELOAD to
change libc behaviour. stdbuf is even available on MSYS2.
--dry-run
Options:
--dry-run
Don't write OUTFILE.
--help
--espeak-path <FULL-PATH-TO-ESPEAK-BINARY>
Give full path to espeak(-ng) in case autodetection fails.
--help
This help.
--infile, -i <INFILE>
Use INFILE as input.
Example: -i eng-ita.tei
--inlang, -l <CODE>
Language of the headwords in INFILE. The CODE is used to
select the speaker language of espeak. If not given,
the basename of INFILE is assumed to have the form la1-la2.tei,
......@@ -65,76 +122,92 @@ if($help)
Example: -l de
--outfile, -o <OUTFILE>
Use OUTFILE for output. If not given, "INFILE.withpron" is used.
Example: -o output.tei
--supports-lang <LANG>
Exit with 0 (success) if the language is supported and with 2
otherwise. This supports both 639-2 and 639-3 codes, but is
meant to be used with 639-3 codes to check whether a
dictionary's source language is supported.
--verbose
Say what is going on.
EOT
exit 0;
}
unless(defined $infile) { say "--infile not given"; exit 1 };
unless(-r $infile) { say "$infile not readable"; exit 1 };
$outfile ||= "$infile.withpron";
$espeak_path ||= `which espeak-ng`;
$espeak_path ||= `which espeak`;
chomp $espeak_path;
if(!defined $infile && !defined $supports_lang) {
say "Error: need to specify input file with --infile"; exit 1
};
unless(-x $espeak_path) {
if (not length $espeak_path) {
say "Espeak(-ng) executable missing"
} else {
say "$espeak_path not executable";
}
exit 1
};
say "espeak version: ", Speech::eSpeak::espeak_Info(0) if $verbose;
my $speaker = Speech::eSpeak::new;
sub iso639_2T_to_1
{
my $code_2T = shift;
my $isofile = '/usr/share/xml/iso-codes/iso_639.xml';
die "Missing $isofile (In Debian install package iso-codes)!" unless -r $isofile;
my $isodoc = XML::LibXML->load_xml(location => $isofile);
my $xpc = XML::LibXML::XPathContext->new($isodoc);
my $expr = '/iso_639_entries/iso_639_entry/@iso_639_1_code[ ../@iso_639_2T_code=\'' . $code_2T . '\' ]';
say "Evaluating $expr..." if $verbose>1;
my $code_1 = $xpc->findvalue($expr);
say "Translated '$code_2T' to '$code_1'" if $verbose;
return $code_1;
if(defined $supports_lang) {
exit(language_exists($supports_lang) ? 0 : 2);
}
unless(-r $infile) { say "$infile not readable"; exit 1 };
$outfile ||= "$infile.withpron";
say "Using espeak from '$espeak_path'" if $verbose;
unless(defined $inlang)
{
unless($infile =~ /(\w{3})-\w{3}\.tei$/)
{
say "Could not guess input language from $infile. Please provide --inlang.";
say "Could not guess input language from $infile. Please provide --inlang.";
exit 3;
}
my $in3 = $1;
$inlang = iso639_2T_to_1($in3);
if(!defined($inlang) or $inlang eq '')
{
say "Failed to find input languge from 3 letter code $in3";
if (!language_exists($in3)) {
say "No voice found for input language code $in3";
exit 4
}
}
$speaker->language($inlang);
say "Using language: ", $speaker->language if $verbose;
unless($speaker->language eq $inlang)
say "Using language: ", $inlang if $verbose;
say "espeak version: ", `"$espeak_path" --version` if $verbose;
$SIG{PIPE} = sub
{
say "Language not supported. Add --verbose to see the espeak supported languages.";
if($verbose)
{
say "espeak supported voices/languages:\n";
my $voices = Speech::eSpeak::espeak_ListVoices('');
foreach my $voice_spec (@{$voices}) {
foreach (sort keys %{$voice_spec}) {
print $_, '=', $voice_spec->{$_}, ' ';
}
print "\n";
}
}
exit 2;
}
die "Got SIGPIPE: $!";
};
my($chld_out, $chld_in);
# https://github.com/espeak-ng/espeak-ng/pull/536
#, '--fflush-stdout'
my @cmd = ('stdbuf', '-o0', $espeak_path, '--ipa', '-v', $inlang, '-q');
my $pid = open2($chld_out, $chld_in, @cmd);
die "open2 failed: $!" unless defined $pid;
say "espeak has PID $pid" if $verbose;
binmode $chld_in, ':utf8';
binmode $chld_out, ':utf8';
XML::LibXSLT->register_function("urn:espeak", "ipa",
sub { $speaker->ipa(shift); });
sub {
my $inword = shift;
my $count = kill 0, $pid;
die "espeak exited" if $count != 1;
say "Writing to espeak-ng: ", $inword if $verbose;
say $chld_in $inword;
say "Will read..." if $verbose;
my $ipa = <$chld_out>;
$ipa =~ s/^\s+|\s+$//g;# trim
chomp($ipa);
say "Got from espeak-ng: ", $ipa if $verbose;
return $ipa;
});
my $sdoc = XML::LibXML->load_xml(string => <<'EOT');
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
......@@ -162,5 +235,6 @@ my $stylesheet = $xslt->parse_stylesheet($sdoc);
print "Transforming $infile to $outfile... " if $verbose;
my $results = $stylesheet->transform_file($infile);
$stylesheet->output_file($results, $outfile) unless $dryrun;
close($chld_in);
say "done." if $verbose;
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment