Commit a27f2e31 authored by josd's avatar josd

modalities producing Turtle or N3

parent 8880810c
from sympy import *
x = Symbol('x')
y = integrate(exp(-x**2), (x, -oo, oo))
print('PREFIX : <http://josd.github.io/eye/modality#>')
print('PREFIX : <http://josd.github.io/eye/modalities#>')
print('')
print('"exp(-x**2), (x, -oo, oo)" :integrate "%s".' % (y))
print('"integrate(exp(-x**2), (x, -oo, oo))" :integral "%s".' % (integrate(exp(-x**2), (x, -oo, oo))))
PREFIX : <http://josd.github.io/eye/modality#>
PREFIX : <http://josd.github.io/eye/modalities#>
"exp(-x**2), (x, -oo, oo)" :integrate "sqrt(pi)".
"integrate(exp(-x**2), (x, -oo, oo))" :integral "sqrt(pi)".
......@@ -14,7 +14,7 @@ def calc_easter(year):
return date(year, month, day)
print('PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>')
print('PREFIX : <http://josd.github.io/eye/modality#>')
print('PREFIX : <http://josd.github.io/eye/modalities#>')
print('')
print('2019 :easter "%s"^^xsd:date.' % (calc_easter(2019)))
print('2020 :easter "%s"^^xsd:date.' % (calc_easter(2020)))
......
......@@ -4,8 +4,8 @@ def fib(n, c={0:1, 1:1}):
c[n] = fib(x-1) * fib(n-x-1) + fib(x) * fib(n-x)
return c[n]
y = fib(91)
print('PREFIX : <http://josd.github.io/eye/modality#>')
print('PREFIX : <http://josd.github.io/eye/modalities#>')
print('')
print('91 :fib "%d".' % (y))
print('0 :fib "%d".' % (fib(0)))
print('91 :fib "%d".' % (fib(91)))
print('283 :fib "%d".' % (fib(283)))
PREFIX : <http://josd.github.io/eye/modality#>
0 :fib "1".
91 :fib "7540113804746346429".
283 :fib "100694286476841731898333719576864360661213863366454327287613".
=== Transformer for wind turbines ===
This is about predicting wind turbine power from wind turbine observations.
The run.sh script is doing the following:
- generate data for the turbine model training
- train the turbine model
- export the turbine model
- test the exported turbine model using the test.sh script
from . import observation_prediction_turbine
#!/usr/bin/env python3
import sys
# predictions
with open('test_turbine.prediction', 'r') as f:
p = [r.split()[1] for r in f]
# expectations
with open('test_turbine.expectation', 'r') as f:
e = [r.split()[1] for r in f]
# beliefs
with open('test_turbine.prediction', 'r') as f:
b = [float(r.split()[-1]) for r in f]
# belief treshold
t = 0.0
if len(sys.argv) > 1:
t = eval(sys.argv[1])
# metrics
r = sum([1 if i == j and k >= t else 0 for i, j, k in zip(p, e, b)])
w = sum([1 if i != j and k >= t else 0 for i, j, k in zip(p, e, b)])
e = sum([1 if k < t else 0 for i, j, k in zip(p, e, b)])
print("right_predictions: %d" % (r))
print("wrong_predictions: %d" % (w))
print("excl_predictions: %d" % (e))
print("accuracy: %.2f" % (r/(r+w) if r+w != 0 else float('NaN')))
print("excl_rate: %.2f" % (e/(r+w+e) if r+w+e != 0 else float('NaN')))
#!/usr/bin/env python3
import random
import sys
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import registry
@registry.register_problem
class ObservationPredictionTurbine(text_problems.Text2TextProblem):
"""Processing Observations into Propositions for wind turbines."""
@property
def approx_vocab_size(self):
return 2**14 # ~16k
@property
def is_generate_per_split(self):
# generate_data will shard the data into TRAIN and EVAL for us.
return False
@property
def dataset_splits(self):
"""Splits of data to produce and number of output shards for each."""
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": 9,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": 1,
}]
def generate_samples(self, data_dir, tmp_dir, dataset_split):
del data_dir
del tmp_dir
del dataset_split
for n in range(100000):
# wind turbine size factor
size_factor = random.randint(1, 10)
# wind speed
wind_speed = max(0, int(random.gauss(25, 15)))
# wind turbine power
turbine_power = int(0.01*size_factor*wind_speed**3)
yield {
"inputs": "TURBINE_SIZE_FACTOR " + repr(size_factor) + " WIND_SPEED_KM/H " + repr(wind_speed),
"targets": "TURBINE_POWER_KW " + repr(turbine_power)
}
if __name__ == '__main__':
what = "inputs"
if len(sys.argv) > 1:
what = sys.argv[1]
gen = ObservationPredictionDice.generate_samples(None, None, None, None)
for i in gen:
print(i.get(what))
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Query an exported model. Py2 only. Install tensorflow-serving-api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from oauth2client.client import GoogleCredentials
from six.moves import input # pylint: disable=redefined-builtin
from tensor2tensor import problems as problems_lib # pylint: disable=unused-import
from tensor2tensor.serving import serving_utils
from tensor2tensor.utils import registry
from tensor2tensor.utils import usr_dir
import tensorflow as tf
import math
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("server", None, "Address to Tensorflow Serving server.")
flags.DEFINE_string("servable_name", None, "Name of served model.")
flags.DEFINE_string("problem", None, "Problem name.")
flags.DEFINE_string("data_dir", None, "Data directory, for vocab files.")
flags.DEFINE_string("t2t_usr_dir", None, "Usr dir for registrations.")
flags.DEFINE_string("inputs_once", None, "Query once with this input.")
flags.DEFINE_integer("timeout_secs", 10, "Timeout for query.")
# For Cloud ML Engine predictions.
flags.DEFINE_string("cloud_mlengine_model_name", None,
"Name of model deployed on Cloud ML Engine.")
flags.DEFINE_string(
"cloud_mlengine_model_version", None,
"Version of the model to use. If None, requests will be "
"sent to the default version.")
def validate_flags():
"""Validates flags are set to acceptable values."""
if FLAGS.cloud_mlengine_model_name:
assert not FLAGS.server
assert not FLAGS.servable_name
else:
assert FLAGS.server
assert FLAGS.servable_name
def make_request_fn():
"""Returns a request function."""
if FLAGS.cloud_mlengine_model_name:
request_fn = serving_utils.make_cloud_mlengine_request_fn(
credentials=GoogleCredentials.get_application_default(),
model_name=FLAGS.cloud_mlengine_model_name,
version=FLAGS.cloud_mlengine_model_version)
else:
request_fn = serving_utils.make_grpc_request_fn(
servable_name=FLAGS.servable_name,
server=FLAGS.server,
timeout_secs=FLAGS.timeout_secs)
return request_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
validate_flags()
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
problem = registry.problem(FLAGS.problem)
hparams = tf.contrib.training.HParams(
data_dir=os.path.expanduser(FLAGS.data_dir))
problem.get_hparams(hparams)
request_fn = make_request_fn()
while True:
try:
inputs = FLAGS.inputs_once if FLAGS.inputs_once else input("")
outputs = serving_utils.predict([inputs], problem, request_fn)
outputs, = outputs
output, score = outputs
print('%s BELIEF %.2f' % (output, math.exp(score)))
if FLAGS.inputs_once:
break
except EOFError:
break
if __name__ == "__main__":
flags.mark_flags_as_required(["problem", "data_dir"])
tf.app.run()
#!/bin/bash
PROBLEM=observation_prediction_turbine
MODEL=transformer
HPARAMS=transformer_small
USER_DIR=$PWD
DATA_DIR=/tmp/t2t_data/$PROBLEM
TRAIN_DIR=/tmp/t2t_train/$PROBLEM/$MODEL-$HPARAMS
# create data and train directories
mkdir -p $DATA_DIR $TRAIN_DIR
# clear data and train directories
rm -fr $DATA_DIR $TRAIN_DIR
# generate data for the turbine model training
t2t-datagen \
--data_dir=$DATA_DIR \
--problem=$PROBLEM \
--t2t_usr_dir=$USER_DIR
# train the turbine model
t2t-trainer \
--data_dir=$DATA_DIR \
--eval_steps=200 \
--eval_throttle_seconds=30 \
--hparams_set=$HPARAMS \
--model=$MODEL \
--output_dir=$TRAIN_DIR \
--problem=$PROBLEM \
--t2t_usr_dir=$USER_DIR \
--train_steps=4000
# export the turbine model
t2t-exporter \
--data_dir=$DATA_DIR \
--hparams_set=$HPARAMS \
--model=$MODEL \
--output_dir=$TRAIN_DIR \
--problem=$PROBLEM \
--t2t_usr_dir=$USER_DIR
# zip the turbine model
rm -fr model_turbine.zip
zip -9r model_turbine.zip /tmp/t2t_train/observation_prediction_turbine/$MODEL-$HPARAMS/export/ \
/tmp/t2t_data/observation_prediction_turbine/vocab.observation_prediction_turbine.16384.subwords
rm -fr /tmp/t2t_train/observation_prediction_turbine/$MODEL-$HPARAMS/export/
# test the turbine model
./test.sh
#!/bin/bash
PROBLEM=observation_prediction_turbine
MODEL=transformer
HPARAMS=transformer_small
USER_DIR=$PWD
DATA_DIR=/tmp/t2t_data/$PROBLEM
TMP_DIR=/tmp/t2t_datagen/$PROBLEM
TRAIN_DIR=/tmp/t2t_train/$PROBLEM/$MODEL-$HPARAMS
# unzip the model
rm -fr $TRAIN_DIR/export
unzip -o model_turbine.zip -d /
# start the model server
tensorflow_model_server \
--port=9000 \
--model_name=$PROBLEM \
--model_base_path=$TRAIN_DIR/export &
# clear test results
echo -n > test.txt
# create test_turbine.observation
./test_turbine.py
# test the turbine model
cat test_turbine.observation | ./query.py \
--data_dir=$DATA_DIR \
--problem=$PROBLEM \
--server=localhost:9000 \
--servable_name=$PROBLEM \
--t2t_usr_dir=$USER_DIR > test_turbine.prediction
# calculate the metrics
echo "belief_treshold: 0.0" |& tee -a test.txt
./calculate_test_turbine.py 0.0 |& tee -a test.txt
echo "" |& tee -a test.txt
echo "belief_treshold: 0.2" |& tee -a test.txt
./calculate_test_turbine.py 0.2 |& tee -a test.txt
echo "" |& tee -a test.txt
echo "belief_treshold: 0.4" |& tee -a test.txt
./calculate_test_turbine.py 0.4 |& tee -a test.txt
echo "" |& tee -a test.txt
# stop the model server
pgrep -f tensorflow_model_server | xargs kill -9
belief_treshold: 0.0
right_predictions: 1000
wrong_predictions: 0
excl_predictions: 0
accuracy: 1.00
excl_rate: 0.00
belief_treshold: 0.2
right_predictions: 1000
wrong_predictions: 0
excl_predictions: 0
accuracy: 1.00
excl_rate: 0.00
belief_treshold: 0.4
right_predictions: 1000
wrong_predictions: 0
excl_predictions: 0
accuracy: 1.00
excl_rate: 0.00
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#!/usr/bin/env python3
import random
file1 = open('test_turbine.observation','w')
file2 = open('test_turbine.expectation','w')
for n in range(1000):
# wind turbine size factor
size_factor = random.randint(1, 10)
# wind speed
wind_speed = max(0, int(random.gauss(25, 15)))
# wind turbine power
turbine_power = int(0.01*size_factor*wind_speed**3)
file1.write("TURBINE_SIZE_FACTOR " + repr(size_factor) + " WIND_SPEED_KM/H " + repr(wind_speed) + "\n")
file2.write("TURBINE_POWER_KW " + repr(turbine_power) + "\n")
file1.close()
file2.close()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment