From 288481365b126e7ece032a1fea628af0fc4e551b Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 27 Mar 2015 16:08:31 +0100 Subject: [PATCH 0001/1483] Added initial debian folder. --- debian/changelog | 5 ++ debian/compat | 1 + debian/control | 109 +++++++++++++++++++++++++++++ debian/copyright | 31 ++++++++ debian/gbp.conf | 9 +++ debian/python-gnocchi-doc.doc-base | 9 +++ debian/rules | 58 +++++++++++++++ debian/source/format | 1 + debian/source/options | 1 + debian/watch | 3 + 10 files changed, 227 insertions(+) create mode 100644 debian/changelog create mode 100644 debian/compat create mode 100644 debian/control create mode 100644 debian/copyright create mode 100644 debian/gbp.conf create mode 100644 debian/python-gnocchi-doc.doc-base create mode 100755 debian/rules create mode 100644 debian/source/format create mode 100644 debian/source/options create mode 100644 debian/watch diff --git a/debian/changelog b/debian/changelog new file mode 100644 index 00000000..3b9aaa02 --- /dev/null +++ b/debian/changelog @@ -0,0 +1,5 @@ +gnocchi (1.0.0a1-1) unstable; urgency=medium + + * Initial release. (Closes: #XXXXXX) + + -- Thomas Goirand Fri, 27 Mar 2015 10:32:47 +0100 diff --git a/debian/compat b/debian/compat new file mode 100644 index 00000000..ec635144 --- /dev/null +++ b/debian/compat @@ -0,0 +1 @@ +9 diff --git a/debian/control b/debian/control new file mode 100644 index 00000000..7f4872f5 --- /dev/null +++ b/debian/control @@ -0,0 +1,109 @@ +Source: gnocchi +Section: python +Priority: optional +Maintainer: PKG OpenStack +Uploaders: Thomas Goirand +Build-Depends: debhelper (>= 9), + dh-python, + openstack-pkg-tools (>= 22~), + python-all (>= 2.6.6-3~), + python-pbr, + python-setuptools, + python-sphinx, +Build-Depends-Indep: python-ceilometer (>= 2015.1~b3), + python-concurrent.futures (>= 2.1.6), + python-coverage (>= 3.6), + python-fixtures, + python-flask, + python-future, + python-gabbi (>= 0.6.0), + python-jinja2, + python-keystonemiddleware, + python-mock, + python-msgpack, + python-mysqldb, + python-numpy, + python-oslo.config (>= 1.4.1), + python-oslo.db (>= 0.5.0), + python-oslo.log, + python-oslo.policy (>= 0.3.0), + python-oslo.serialization (>= 1.0.0), + python-oslo.utils (>= 0.3.0), + python-oslosphinx (>= 2.2.0.0), + python-oslotest, + python-pandas, + python-pecan, + python-psycopg2, + python-pytimeparse, + python-requests, + python-six, + python-sphinxcontrib.httpdomain, + python-sqlalchemy, + python-sqlalchemy-utils, + python-stevedore, + python-swiftclient, + python-sysv-ipc, + python-tempest-lib (>= 0.2.0), + python-testscenarios, + python-testtools (>= 0.9.38), + python-tooz (>= 0.4), + python-trollius, + python-voluptuous, + python-webtest (>= 2.0.16), + python-werkzeug, + python-yaml, + subunit (>= 0.0.18), + testrepository, +Standards-Version: 3.9.6 +Vcs-Browser: http://anonscm.debian.org/gitweb/?p=openstack/python-gnocchi.git +Vcs-Git: git://anonscm.debian.org/openstack/python-gnocchi.git +Homepage: https://github.com/stackforge/gnocchi + +Package: python-gnocchi +Architecture: all +Pre-Depends: dpkg (>= 1.15.6~) +Depends: python-concurrent.futures (>= 2.1.6), + python-flask, + python-future, + python-jinja2, + python-msgpack, + python-numpy, + python-oslo.config (>= 1.4.1), + python-oslo.db (>= 0.5.0), + python-oslo.log, + python-oslo.policy (>= 0.3.0), + python-oslo.serialization (>= 1.0.0), + python-oslo.utils (>= 0.3.0), + python-oslosphinx (>= 2.2.0.0), + python-pandas, + python-pecan, + python-pytimeparse, + python-requests, + python-six, + python-sqlalchemy, + python-sqlalchemy-utils, + python-stevedore, + python-swiftclient, + python-sysv-ipc, + python-tooz (>= 0.4), + python-trollius, + python-voluptuous, + python-werkzeug, + python-yaml, + ${misc:Depends}, + ${python:Depends}, +Suggests: python-gnocchi-doc +Description: Metric as a Service - Python 2.x + HTTP API to store metrics and index resources. + . + This package contains the Python 2.x module. + +Package: python-gnocchi-doc +Section: doc +Architecture: all +Pre-Depends: dpkg (>= 1.15.6~) +Depends: ${misc:Depends}, ${sphinxdoc:Depends} +Description: Metric as a Service - doc + HTTP API to store metrics and index resources. + . + This package contains the documentation. diff --git a/debian/copyright b/debian/copyright new file mode 100644 index 00000000..1fa2c9fe --- /dev/null +++ b/debian/copyright @@ -0,0 +1,31 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: gnocchi +Upstream-Contact: Julien Danjou +Source: git://github.com/stackforge/gnocchi.git + +Files: debian/* +Copyright: (c) 2014, Thomas Goirand +License: Apache-2 + +Files: * +Copyright: (c) 2014-2015, Julien Danjou + (c) 2014-2015, Mirantis INC. + (c) 2014-2015, Red Hat INC. + (c) 2014-2015, Objectif Libre +License: Apache-2 + +License: Apache-2 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian-based systems the full text of the Apache version 2.0 license + can be found in /usr/share/common-licenses/Apache-2.0. diff --git a/debian/gbp.conf b/debian/gbp.conf new file mode 100644 index 00000000..fd8ec27c --- /dev/null +++ b/debian/gbp.conf @@ -0,0 +1,9 @@ +[DEFAULT] +upstream-branch = master +debian-branch = debian/unstable +upstream-tag = %(version)s +compression = xz + +[git-buildpackage] +export-dir = ../build-area/ + diff --git a/debian/python-gnocchi-doc.doc-base b/debian/python-gnocchi-doc.doc-base new file mode 100644 index 00000000..67ee04ce --- /dev/null +++ b/debian/python-gnocchi-doc.doc-base @@ -0,0 +1,9 @@ +Document: python-gnocchi-doc +Title: gnocchi Documentation +Author: N/A +Abstract: Sphinx documentation for gnocchi +Section: Programming/Python + +Format: HTML +Index: /usr/share/doc/python-gnocchi-doc/html/index.html +Files: /usr/share/doc/python-gnocchi-doc/html/* diff --git a/debian/rules b/debian/rules new file mode 100755 index 00000000..5085ec1f --- /dev/null +++ b/debian/rules @@ -0,0 +1,58 @@ +#!/usr/bin/make -f + +PYTHONS:=$(shell pyversions -vr) +#PYTHON3S:=$(shell py3versions -vr) + +UPSTREAM_GIT = git://github.com/stackforge/gnocchi.git +include /usr/share/openstack-pkg-tools/pkgos.make + +export OSLO_PACKAGE_VERSION=$(VERSION) + +%: + dh $@ --buildsystem=python_distutils --with python2,python3,sphinxdoc + +override_dh_install: + set -e ; for pyvers in $(PYTHONS); do \ + python$$pyvers setup.py install --install-layout=deb \ + --root $(CURDIR)/debian/python-gnocchi; \ + done +# set -e ; for pyvers in $(PYTHON3S); do \ +# python$$pyvers setup.py install --install-layout=deb \ +# --root $(CURDIR)/debian/python3-gnocchi; \ +# done + rm -rf $(CURDIR)/debian/python*-gnocchi/usr/lib/python*/dist-packages/*.pth + +override_dh_auto_test: +ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) + @echo "===> Running tests" + set -e ; set -x ; for i in 2.7 ; do \ + PYMAJOR=`echo $$i | cut -d'.' -f1` ; \ + echo "===> Testing with python$$i (python$$PYMAJOR)" ; \ + rm -rf .testrepository ; \ + testr-python$$PYMAJOR init ; \ + TEMP_REZ=`mktemp -t` ; \ + PYTHONPATH=$(CURDIR) PYTHON=python$$i testr-python$$PYMAJOR run --subunit | tee $$TEMP_REZ | subunit2pyunit ; \ + cat $$TEMP_REZ | subunit-filter -s --no-passthrough | subunit-stats ; \ + rm -f $$TEMP_REZ ; \ + testr-python$$PYMAJOR slowest ; \ + done +endif + +override_dh_sphinxdoc: + sphinx-build -b html doc/source debian/python-gnocchi-doc/usr/share/doc/python-gnocchi-doc/html + dh_sphinxdoc -O--buildsystem=python_distutils + +override_dh_clean: + dh_clean -O--buildsystem=python_distutils + rm -rf build + +# Commands not to run +override_dh_installcatalogs: +override_dh_installemacsen override_dh_installifupdown: +override_dh_installinfo override_dh_installmenu override_dh_installmime: +override_dh_installmodules override_dh_installlogcheck: +override_dh_installpam override_dh_installppp override_dh_installudev override_dh_installwm: +override_dh_installxfonts override_dh_gconf override_dh_icons override_dh_perl override_dh_usrlocal: +override_dh_installcron override_dh_installdebconf: +override_dh_installlogrotate override_dh_installgsettings: + diff --git a/debian/source/format b/debian/source/format new file mode 100644 index 00000000..163aaf8d --- /dev/null +++ b/debian/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/debian/source/options b/debian/source/options new file mode 100644 index 00000000..cb61fa52 --- /dev/null +++ b/debian/source/options @@ -0,0 +1 @@ +extend-diff-ignore = "^[^/]*[.]egg-info/" diff --git a/debian/watch b/debian/watch new file mode 100644 index 00000000..60a46dc8 --- /dev/null +++ b/debian/watch @@ -0,0 +1,3 @@ +version=3 +http://pypi.python.org/packages/source/g/gnocchi gnocchi-(.*).tar.gz + -- GitLab From 6d92fd6d6a22588114e2bb690fc0a5b94d23d252 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 27 Mar 2015 16:39:44 +0100 Subject: [PATCH 0002/1483] Do not run command tests. --- debian/rules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/rules b/debian/rules index 5085ec1f..bd885b33 100755 --- a/debian/rules +++ b/debian/rules @@ -31,7 +31,7 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) rm -rf .testrepository ; \ testr-python$$PYMAJOR init ; \ TEMP_REZ=`mktemp -t` ; \ - PYTHONPATH=$(CURDIR) PYTHON=python$$i testr-python$$PYMAJOR run --subunit | tee $$TEMP_REZ | subunit2pyunit ; \ + PYTHONPATH=$(CURDIR) PYTHON=python$$i testr-python$$PYMAJOR run --subunit '^(?!('"gnocchi.tests.test_carbonara.CarbonaraCmd.*"'))' | tee $$TEMP_REZ | subunit2pyunit ; \ cat $$TEMP_REZ | subunit-filter -s --no-passthrough | subunit-stats ; \ rm -f $$TEMP_REZ ; \ testr-python$$PYMAJOR slowest ; \ -- GitLab From 8826a5033dd29f1b721c74d48bd2945276c5914c Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 27 Mar 2015 16:40:22 +0100 Subject: [PATCH 0003/1483] Fixed released version. --- debian/changelog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 3b9aaa02..50c325bc 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -gnocchi (1.0.0a1-1) unstable; urgency=medium +gnocchi (1.0~a1-1) unstable; urgency=medium * Initial release. (Closes: #XXXXXX) -- GitLab From 1b0364c349a91acc8f3d36c7d67320d24b3d18fb Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 27 Mar 2015 16:42:42 +0100 Subject: [PATCH 0004/1483] Fixed sphinx-build with PYTHONPATH=. --- debian/rules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/rules b/debian/rules index bd885b33..8dcee2e4 100755 --- a/debian/rules +++ b/debian/rules @@ -39,7 +39,7 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) endif override_dh_sphinxdoc: - sphinx-build -b html doc/source debian/python-gnocchi-doc/usr/share/doc/python-gnocchi-doc/html + PYTHONPATH=. sphinx-build -b html doc/source debian/python-gnocchi-doc/usr/share/doc/python-gnocchi-doc/html dh_sphinxdoc -O--buildsystem=python_distutils override_dh_clean: -- GitLab From 939d3908abd5e7e40274994eb4e54bffeb78c0d5 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 27 Mar 2015 16:58:52 +0100 Subject: [PATCH 0005/1483] Starting pgsql on tests. --- debian/control | 5 ++++- debian/rules | 10 +++++++++ debian/start_pg.sh | 52 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 debian/start_pg.sh diff --git a/debian/control b/debian/control index 7f4872f5..99d32010 100644 --- a/debian/control +++ b/debian/control @@ -10,7 +10,10 @@ Build-Depends: debhelper (>= 9), python-pbr, python-setuptools, python-sphinx, -Build-Depends-Indep: python-ceilometer (>= 2015.1~b3), +Build-Depends-Indep: libpq-dev, + postgresql, + postgresql-server-dev-all, + python-ceilometer (>= 2015.1~b3), python-concurrent.futures (>= 2.1.6), python-coverage (>= 3.6), python-fixtures, diff --git a/debian/rules b/debian/rules index 8dcee2e4..7b789307 100755 --- a/debian/rules +++ b/debian/rules @@ -27,6 +27,14 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) @echo "===> Running tests" set -e ; set -x ; for i in 2.7 ; do \ PYMAJOR=`echo $$i | cut -d'.' -f1` ; \ + echo "===> Starting PGSQL" ; \ + BINDIR=`pg_config --bindir` ; \ + PG_MYTMPDIR=`mktemp -d` ; \ + chown postgres:postgres $$PG_MYTMPDIR || true ; \ + export PGHOST=$$PG_MYTMPDIR ; \ + chmod +x debian/start_pg.sh ; \ + debian/start_pg.sh $$PG_MYTMPDIR ; \ + export GNOCCHI_TEST_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ echo "===> Testing with python$$i (python$$PYMAJOR)" ; \ rm -rf .testrepository ; \ testr-python$$PYMAJOR init ; \ @@ -35,6 +43,8 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) cat $$TEMP_REZ | subunit-filter -s --no-passthrough | subunit-stats ; \ rm -f $$TEMP_REZ ; \ testr-python$$PYMAJOR slowest ; \ + echo "===> Stopping PGSQL" ; \ + $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR ; \ done endif diff --git a/debian/start_pg.sh b/debian/start_pg.sh new file mode 100644 index 00000000..ef149f8a --- /dev/null +++ b/debian/start_pg.sh @@ -0,0 +1,52 @@ +#!/bin/sh +# +# Copyright (C) 2014 Thomas Goirand +# +# Runs pgsql server, then use that to run tests. +# + +set -e +set -x + +PG_MYTMPDIR=${1} + +############################## +### RUN THE PGSQL INSTANCE ### +############################## +MYUSER=`whoami` +# initdb refuses to run as root +if [ "${MYUSER}" = "root" ] ; then + echo dropping root privs.. + exec /bin/su postgres -- "$0" "$@" +fi + +BINDIR=`pg_config --bindir` + +# depends on language-pack-en | language-pack-en +# because initdb acquires encoding from locale +export LC_ALL="C" +export LANGUAGE=C +PGSQL_PORT=9823 +${BINDIR}/initdb -D ${PG_MYTMPDIR} + +${BINDIR}/pg_ctl -w -D ${PG_MYTMPDIR} -o "-k ${PG_MYTMPDIR} -p ${PGSQL_PORT}" start > /dev/null +#${BINDIR}/postgres -D ${PG_MYTMPDIR} -h '' -k ${PG_MYTMPDIR} & +attempts=0 +while ! [ -e ${PG_MYTMPDIR}/postmaster.pid ] ; do + attempts=$((attempts+1)) + if [ "${attempts}" -gt 10 ] ; then + echo "Exiting test: postgres pid file was not created after 30 seconds" + exit 1 + fi + sleep 3 + echo `date`: retrying.. +done + +# Set the env. var so that pgsql client doesn't use networking +# libpq uses this for all host params if not explicitly passed +export PGHOST=${PG_MYTMPDIR} + +## Create a new test db +#createuser --superuser nailgun +#createdb -O nailgun nailgun +#export TEST_NAILGUN_DB=nailgun -- GitLab From e1d14a7c19ed32e97f80c22a1077a32dc10cca71 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 27 Mar 2015 19:42:38 +0100 Subject: [PATCH 0006/1483] Bumped minimum version for tooz. --- debian/control | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/control b/debian/control index 99d32010..c28aee7f 100644 --- a/debian/control +++ b/debian/control @@ -49,7 +49,7 @@ Build-Depends-Indep: libpq-dev, python-tempest-lib (>= 0.2.0), python-testscenarios, python-testtools (>= 0.9.38), - python-tooz (>= 0.4), + python-tooz (>= 0.13.1), python-trollius, python-voluptuous, python-webtest (>= 2.0.16), @@ -88,7 +88,7 @@ Depends: python-concurrent.futures (>= 2.1.6), python-stevedore, python-swiftclient, python-sysv-ipc, - python-tooz (>= 0.4), + python-tooz (>= 0.13.1), python-trollius, python-voluptuous, python-werkzeug, -- GitLab From b09d40685a772f30dc257843c3ca6e34c5a49d3d Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 27 Mar 2015 23:41:03 +0100 Subject: [PATCH 0007/1483] Blacklists the failed tests. --- debian/rules | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/rules b/debian/rules index 7b789307..a63dd925 100755 --- a/debian/rules +++ b/debian/rules @@ -7,6 +7,7 @@ UPSTREAM_GIT = git://github.com/stackforge/gnocchi.git include /usr/share/openstack-pkg-tools/pkgos.make export OSLO_PACKAGE_VERSION=$(VERSION) +UNIT_TEST_BLACKLIST = test_carbonara.CarbonaraCmd.*|test_gabbi_resource_list_instance_resources.test_request.* %: dh $@ --buildsystem=python_distutils --with python2,python3,sphinxdoc @@ -39,7 +40,7 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) rm -rf .testrepository ; \ testr-python$$PYMAJOR init ; \ TEMP_REZ=`mktemp -t` ; \ - PYTHONPATH=$(CURDIR) PYTHON=python$$i testr-python$$PYMAJOR run --subunit '^(?!('"gnocchi.tests.test_carbonara.CarbonaraCmd.*"'))' | tee $$TEMP_REZ | subunit2pyunit ; \ + PYTHONPATH=$(CURDIR) PYTHON=python$$i testr-python$$PYMAJOR run --subunit 'gnocchi\.tests\.(?!.*('"$(UNIT_TEST_BLACKLIST)"'))' | tee $$TEMP_REZ | subunit2pyunit ; \ cat $$TEMP_REZ | subunit-filter -s --no-passthrough | subunit-stats ; \ rm -f $$TEMP_REZ ; \ testr-python$$PYMAJOR slowest ; \ -- GitLab From 545b6681faf7e2001bd496eb63a8131e0875a55c Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Sat, 28 Mar 2015 00:02:45 +0100 Subject: [PATCH 0008/1483] Starts PGSQL when generating sphinx doc. --- debian/rules | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/debian/rules b/debian/rules index a63dd925..d0d2a934 100755 --- a/debian/rules +++ b/debian/rules @@ -50,8 +50,17 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) endif override_dh_sphinxdoc: - PYTHONPATH=. sphinx-build -b html doc/source debian/python-gnocchi-doc/usr/share/doc/python-gnocchi-doc/html - dh_sphinxdoc -O--buildsystem=python_distutils + echo "===> Starting PGSQL" ; \ + BINDIR=`pg_config --bindir` ; \ + PG_MYTMPDIR=`mktemp -d` ; \ + chown postgres:postgres $$PG_MYTMPDIR || true ; \ + export PGHOST=$$PG_MYTMPDIR ; \ + chmod +x debian/start_pg.sh ; \ + debian/start_pg.sh $$PG_MYTMPDIR ; \ + export GNOCCHI_TEST_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ + PYTHONPATH=. sphinx-build -b html doc/source debian/python-gnocchi-doc/usr/share/doc/python-gnocchi-doc/html ; \ + dh_sphinxdoc -O--buildsystem=python_distutils ; \ + $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR override_dh_clean: dh_clean -O--buildsystem=python_distutils -- GitLab From b1e4f54c78ab995053897b21f1661b919bd6169c Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 30 Mar 2015 10:47:26 +0200 Subject: [PATCH 0009/1483] Do not blacklist the failed unit tests. --- debian/control | 2 ++ debian/rules | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/debian/control b/debian/control index c28aee7f..06df6fc1 100644 --- a/debian/control +++ b/debian/control @@ -21,6 +21,7 @@ Build-Depends-Indep: libpq-dev, python-future, python-gabbi (>= 0.6.0), python-jinja2, + python-jsonpatch (>= 1.9), python-keystonemiddleware, python-mock, python-msgpack, @@ -69,6 +70,7 @@ Depends: python-concurrent.futures (>= 2.1.6), python-flask, python-future, python-jinja2, + python-jsonpatch (>= 1.9), python-msgpack, python-numpy, python-oslo.config (>= 1.4.1), diff --git a/debian/rules b/debian/rules index d0d2a934..bf601ebd 100755 --- a/debian/rules +++ b/debian/rules @@ -7,7 +7,7 @@ UPSTREAM_GIT = git://github.com/stackforge/gnocchi.git include /usr/share/openstack-pkg-tools/pkgos.make export OSLO_PACKAGE_VERSION=$(VERSION) -UNIT_TEST_BLACKLIST = test_carbonara.CarbonaraCmd.*|test_gabbi_resource_list_instance_resources.test_request.* +UNIT_TEST_BLACKLIST = test_carbonara.CarbonaraCmd.* %: dh $@ --buildsystem=python_distutils --with python2,python3,sphinxdoc -- GitLab From e468107a3bd27eb05fe2d15df64d972cccb18a6a Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 10 Sep 2015 14:46:12 +0200 Subject: [PATCH 0010/1483] Now packaging 1.1.0 --- debian/changelog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 50c325bc..d194f4a6 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -gnocchi (1.0~a1-1) unstable; urgency=medium +gnocchi (1.1.0-1) unstable; urgency=medium * Initial release. (Closes: #XXXXXX) -- GitLab From bdab49321421ea42daf19138a5c8e2e827276e4b Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 10 Sep 2015 14:58:08 +0200 Subject: [PATCH 0011/1483] Fixed (build-)depends for this release. --- debian/control | 56 ++++++++++++++++++++++++++++---------------------- 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/debian/control b/debian/control index 06df6fc1..2b4c6ec5 100644 --- a/debian/control +++ b/debian/control @@ -5,40 +5,42 @@ Maintainer: PKG OpenStack Uploaders: Thomas Goirand Build-Depends: debhelper (>= 9), dh-python, - openstack-pkg-tools (>= 22~), - python-all (>= 2.6.6-3~), + openstack-pkg-tools (>= 23~), + python-all, python-pbr, python-setuptools, python-sphinx, -Build-Depends-Indep: libpq-dev, +Build-Depends-Indep: alembic (>= 0.7.6), + libpq-dev, postgresql, postgresql-server-dev-all, python-ceilometer (>= 2015.1~b3), python-concurrent.futures (>= 2.1.6), python-coverage (>= 3.6), python-fixtures, - python-flask, python-future, - python-gabbi (>= 0.6.0), + python-gabbi (>= 1), python-jinja2, python-jsonpatch (>= 1.9), - python-keystonemiddleware, + python-keystonemiddleware (>= 1.5.0), python-mock, python-msgpack, python-mysqldb, python-numpy, - python-oslo.config (>= 1.4.1), - python-oslo.db (>= 0.5.0), - python-oslo.log, + python-oslo.config (>= 1:2.3.0), + python-oslo.db (>= 1.8.0), + python-oslo.log (>= 1.0.0), python-oslo.policy (>= 0.3.0), - python-oslo.serialization (>= 1.0.0), - python-oslo.utils (>= 0.3.0), + python-oslo.serialization (>= 1.4.0), + python-oslo.utils (>= 1.6.0), python-oslosphinx (>= 2.2.0.0), python-oslotest, python-pandas, - python-pecan, + python-pecan (>= 0.9), python-psycopg2, - python-pytimeparse, + python-pymysql, + python-pytimeparse (>= 1.1.5), + python-retrying, python-requests, python-six, python-sphinxcontrib.httpdomain, @@ -52,8 +54,10 @@ Build-Depends-Indep: libpq-dev, python-testtools (>= 0.9.38), python-tooz (>= 0.13.1), python-trollius, + python-tz, python-voluptuous, python-webtest (>= 2.0.16), + python-webob (>= 1.4.1), python-werkzeug, python-yaml, subunit (>= 0.0.18), @@ -65,34 +69,37 @@ Homepage: https://github.com/stackforge/gnocchi Package: python-gnocchi Architecture: all -Pre-Depends: dpkg (>= 1.15.6~) -Depends: python-concurrent.futures (>= 2.1.6), - python-flask, +Depends: alembic (>= 0.7.6), + python-concurrent.futures (>= 2.1.6), python-future, python-jinja2, python-jsonpatch (>= 1.9), python-msgpack, python-numpy, - python-oslo.config (>= 1.4.1), - python-oslo.db (>= 0.5.0), - python-oslo.log, + python-oslo.config (>= 1:2.3.0), + python-oslo.db (>= 1.8.0), + python-oslo.log (>= 1.0.0), python-oslo.policy (>= 0.3.0), - python-oslo.serialization (>= 1.0.0), - python-oslo.utils (>= 0.3.0), + python-oslo.serialization (>= 1.4.0), + python-oslo.utils (>= 1.6.0), python-oslosphinx (>= 2.2.0.0), python-pandas, - python-pecan, - python-pytimeparse, + python-pecan (>= 0.9), + python-psycopg2, + python-pymysql, + python-pytimeparse (>= 1.1.5), + python-retrying, python-requests, python-six, python-sqlalchemy, python-sqlalchemy-utils, python-stevedore, python-swiftclient, - python-sysv-ipc, python-tooz (>= 0.13.1), python-trollius, + python-tz, python-voluptuous, + python-webob (>= 1.4.1), python-werkzeug, python-yaml, ${misc:Depends}, @@ -106,7 +113,6 @@ Description: Metric as a Service - Python 2.x Package: python-gnocchi-doc Section: doc Architecture: all -Pre-Depends: dpkg (>= 1.15.6~) Depends: ${misc:Depends}, ${sphinxdoc:Depends} Description: Metric as a Service - doc HTTP API to store metrics and index resources. -- GitLab From 392bbd3e08cb46350240ddc1c33dddd90fb32d3f Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 10 Sep 2015 13:00:54 +0000 Subject: [PATCH 0012/1483] Uploading to experimental. --- debian/changelog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index d194f4a6..25769f35 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -gnocchi (1.1.0-1) unstable; urgency=medium +gnocchi (1.1.0-1) experimental; urgency=medium * Initial release. (Closes: #XXXXXX) -- GitLab From 07b36991d188efcfff0c1dfd32f7006c440cb3e9 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 10 Sep 2015 13:11:43 +0000 Subject: [PATCH 0013/1483] More packages --- debian/control | 40 ++++++++++++++++++++++++++++++- debian/gnocchi-api.init.in | 17 +++++++++++++ debian/gnocchi-common.install | 2 ++ debian/gnocchi-common.postinst.in | 16 +++++++++++++ debian/gnocchi-common.postrm | 13 ++++++++++ debian/gnocchi-metricd.init.in | 17 +++++++++++++ debian/rules | 13 +++++++++- 7 files changed, 116 insertions(+), 2 deletions(-) create mode 100644 debian/gnocchi-api.init.in create mode 100644 debian/gnocchi-common.install create mode 100644 debian/gnocchi-common.postinst.in create mode 100644 debian/gnocchi-common.postrm create mode 100644 debian/gnocchi-metricd.init.in diff --git a/debian/control b/debian/control index 2b4c6ec5..a3c8d698 100644 --- a/debian/control +++ b/debian/control @@ -106,10 +106,48 @@ Depends: alembic (>= 0.7.6), ${python:Depends}, Suggests: python-gnocchi-doc Description: Metric as a Service - Python 2.x - HTTP API to store metrics and index resources. + Gnocchi is a service for managing a set of resources and storing metrics about + them, in a scalable and resilient way. Its functionalities are exposed over an + HTTP REST API. . This package contains the Python 2.x module. +Package: gnocchi-common +Architecture: all +Depends: python-gnocchi (= ${binary:Version}), + ${misc:Depends}, + ${python:Depends}, +Description: Metric as a Service - common files + Gnocchi is a service for managing a set of resources and storing metrics about + them, in a scalable and resilient way. Its functionalities are exposed over an + HTTP REST API. + . + This package contains the common files. + +Package: gnocchi-api +Architecture: all +Depends: gnocchi-common (= ${binary:Version}), + ${misc:Depends}, + ${python:Depends}, +Description: Metric as a Service - API daemon + Gnocchi is a service for managing a set of resources and storing metrics about + them, in a scalable and resilient way. Its functionalities are exposed over an + HTTP REST API. + . + This package contains the API server. + +Package: gnocchi-metricd +Architecture: all +Depends: gnocchi-common (= ${binary:Version}), + ${misc:Depends}, + ${python:Depends}, +Description: Metric as a Service - metric daemon + Gnocchi is a service for managing a set of resources and storing metrics about + them, in a scalable and resilient way. Its functionalities are exposed over an + HTTP REST API. + . + This package contains the metric daemon. + Package: python-gnocchi-doc Section: doc Architecture: all diff --git a/debian/gnocchi-api.init.in b/debian/gnocchi-api.init.in new file mode 100644 index 00000000..b7adc13f --- /dev/null +++ b/debian/gnocchi-api.init.in @@ -0,0 +1,17 @@ +#!/bin/sh +### BEGIN INIT INFO +# Provides: gnocchi-api +# Required-Start: $network $local_fs $remote_fs $syslog +# Required-Stop: $remote_fs +# Should-Start: postgresql mysql keystone rabbitmq-server ntp +# Should-Stop: postgresql mysql keystone rabbitmq-server ntp +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: gnocchi Api +# Description: gnocchi-api +### END INIT INFO + +# Author: Thomas Goirand +DESC="OpenStack Gnocchi Api" +PROJECT_NAME=gnocchi +NAME=${PROJECT_NAME}-api diff --git a/debian/gnocchi-common.install b/debian/gnocchi-common.install new file mode 100644 index 00000000..5d4f0c8f --- /dev/null +++ b/debian/gnocchi-common.install @@ -0,0 +1,2 @@ +etc/gnocchi/policy.json /usr/share/gnocchi-common + diff --git a/debian/gnocchi-common.postinst.in b/debian/gnocchi-common.postinst.in new file mode 100644 index 00000000..e156c06b --- /dev/null +++ b/debian/gnocchi-common.postinst.in @@ -0,0 +1,16 @@ +#!/bin/sh + +set -e + +#PKGOS-INCLUDE# + +if [ "$1" = "configure" ] || [ "$1" = "reconfigure" ] ; then + pkgos_var_user_group gnocchi + + pkgos_write_new_conf gnocchi gnocchi.conf + pkgos_write_new_conf gnocchi policy.json +fi + +#DEBHELPER# + +exit 0 diff --git a/debian/gnocchi-common.postrm b/debian/gnocchi-common.postrm new file mode 100644 index 00000000..537ba729 --- /dev/null +++ b/debian/gnocchi-common.postrm @@ -0,0 +1,13 @@ +#!/bin/sh + +set -e + +if [ "$1" = "purge" ] ; then + rm -f /etc/gnocchi/gnocchi.conf /etc/gnocchi/policy.json + rmdir --ignore-fail-on-non-empty /etc/gnocchi || true + rm -rf /var/lib/gnocchi /var/log/gnocchi +fi + +#DEBHELPER# + +exit 0 diff --git a/debian/gnocchi-metricd.init.in b/debian/gnocchi-metricd.init.in new file mode 100644 index 00000000..7e8a159d --- /dev/null +++ b/debian/gnocchi-metricd.init.in @@ -0,0 +1,17 @@ +#!/bin/sh +### BEGIN INIT INFO +# Provides: gnocchi-metricd +# Required-Start: $network $local_fs $remote_fs $syslog +# Required-Stop: $remote_fs +# Should-Start: postgresql mysql keystone rabbitmq-server ntp mongodb +# Should-Stop: postgresql mysql keystone rabbitmq-server ntp mongodb +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: gnocchi Api +# Description: gnocchi-api +### END INIT INFO + +# Author: Thomas Goirand +DESC="OpenStack Gnocchi Api" +PROJECT_NAME=gnocchi +NAME=${PROJECT_NAME}-metricd diff --git a/debian/rules b/debian/rules index bf601ebd..fd9f85b8 100755 --- a/debian/rules +++ b/debian/rules @@ -23,6 +23,18 @@ override_dh_install: # done rm -rf $(CURDIR)/debian/python*-gnocchi/usr/lib/python*/dist-packages/*.pth +override_dh_auto_build: + dh_auto_build -O--buildsystem=python_distutils + mkdir -p $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common + oslo-config-generator --output-file $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf \ + --wrap-width 140 \ + --namespace gnocchi \ + --namespace oslo.db \ + --namespace oslo.log \ + --namespace oslo.policy \ + --namespace keystonemiddleware.auth_token + + override_dh_auto_test: ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) @echo "===> Running tests" @@ -75,4 +87,3 @@ override_dh_installpam override_dh_installppp override_dh_installudev override_d override_dh_installxfonts override_dh_gconf override_dh_icons override_dh_perl override_dh_usrlocal: override_dh_installcron override_dh_installdebconf: override_dh_installlogrotate override_dh_installgsettings: - -- GitLab From 94b6da3c59c297a96e04303550fe5fc7484cd19b Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 10 Sep 2015 13:26:48 +0000 Subject: [PATCH 0014/1483] Fixed gbp.conf --- debian/gbp.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/gbp.conf b/debian/gbp.conf index fd8ec27c..10f9500d 100644 --- a/debian/gbp.conf +++ b/debian/gbp.conf @@ -4,6 +4,6 @@ debian-branch = debian/unstable upstream-tag = %(version)s compression = xz -[git-buildpackage] +[buildpackage] export-dir = ../build-area/ -- GitLab From db1b09e899bacad43c7e4cf37f049fb78d1516ef Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 10 Sep 2015 13:27:57 +0000 Subject: [PATCH 0015/1483] Removed SQLAutils from build-depends. --- debian/control | 2 -- 1 file changed, 2 deletions(-) diff --git a/debian/control b/debian/control index a3c8d698..73fb005c 100644 --- a/debian/control +++ b/debian/control @@ -45,7 +45,6 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-six, python-sphinxcontrib.httpdomain, python-sqlalchemy, - python-sqlalchemy-utils, python-stevedore, python-swiftclient, python-sysv-ipc, @@ -92,7 +91,6 @@ Depends: alembic (>= 0.7.6), python-requests, python-six, python-sqlalchemy, - python-sqlalchemy-utils, python-stevedore, python-swiftclient, python-tooz (>= 0.13.1), -- GitLab From 0912a49a0222e228eadd3ac0ecb0793ce995144c Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 10 Sep 2015 14:17:56 +0000 Subject: [PATCH 0016/1483] Re-added python-sqlalchemy-utils as (build-)depends. --- debian/control | 2 ++ 1 file changed, 2 insertions(+) diff --git a/debian/control b/debian/control index 73fb005c..a3c8d698 100644 --- a/debian/control +++ b/debian/control @@ -45,6 +45,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-six, python-sphinxcontrib.httpdomain, python-sqlalchemy, + python-sqlalchemy-utils, python-stevedore, python-swiftclient, python-sysv-ipc, @@ -91,6 +92,7 @@ Depends: alembic (>= 0.7.6), python-requests, python-six, python-sqlalchemy, + python-sqlalchemy-utils, python-stevedore, python-swiftclient, python-tooz (>= 0.13.1), -- GitLab From eb609d4d0877da00a77e401f086a4499178af853 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 10 Sep 2015 14:27:38 +0000 Subject: [PATCH 0017/1483] Do not generate the docs for now. --- debian/rules | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/debian/rules b/debian/rules index fd9f85b8..cbbecbc4 100755 --- a/debian/rules +++ b/debian/rules @@ -62,17 +62,18 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) endif override_dh_sphinxdoc: - echo "===> Starting PGSQL" ; \ - BINDIR=`pg_config --bindir` ; \ - PG_MYTMPDIR=`mktemp -d` ; \ - chown postgres:postgres $$PG_MYTMPDIR || true ; \ - export PGHOST=$$PG_MYTMPDIR ; \ - chmod +x debian/start_pg.sh ; \ - debian/start_pg.sh $$PG_MYTMPDIR ; \ - export GNOCCHI_TEST_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ - PYTHONPATH=. sphinx-build -b html doc/source debian/python-gnocchi-doc/usr/share/doc/python-gnocchi-doc/html ; \ - dh_sphinxdoc -O--buildsystem=python_distutils ; \ - $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR +# echo "===> Starting PGSQL" ; \ +# BINDIR=`pg_config --bindir` ; \ +# PG_MYTMPDIR=`mktemp -d` ; \ +# chown postgres:postgres $$PG_MYTMPDIR || true ; \ +# export PGHOST=$$PG_MYTMPDIR ; \ +# chmod +x debian/start_pg.sh ; \ +# debian/start_pg.sh $$PG_MYTMPDIR ; \ +# export GNOCCHI_TEST_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ +# PYTHONPATH=. sphinx-build -b html doc/source debian/python-gnocchi-doc/usr/share/doc/python-gnocchi-doc/html ; \ +# dh_sphinxdoc -O--buildsystem=python_distutils ; \ +# $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR + echo "Do nothing" override_dh_clean: dh_clean -O--buildsystem=python_distutils -- GitLab From 6f07e5c2bc02dbd3ad2d1208e4a1c1afa331a297 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 18 Sep 2015 14:36:29 +0200 Subject: [PATCH 0018/1483] Packaging 1.2.0, fixed (build-)depends. --- debian/changelog | 2 +- debian/control | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index d194f4a6..924bc3f5 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -gnocchi (1.1.0-1) unstable; urgency=medium +gnocchi (1.2.0-1) unstable; urgency=medium * Initial release. (Closes: #XXXXXX) diff --git a/debian/control b/debian/control index 2b4c6ec5..14eb133d 100644 --- a/debian/control +++ b/debian/control @@ -11,6 +11,8 @@ Build-Depends: debhelper (>= 9), python-setuptools, python-sphinx, Build-Depends-Indep: alembic (>= 0.7.6), + influxdb, + influxdb-dev, libpq-dev, postgresql, postgresql-server-dev-all, @@ -74,6 +76,7 @@ Depends: alembic (>= 0.7.6), python-future, python-jinja2, python-jsonpatch (>= 1.9), + python-keystonemiddleware (>= 1.5.0), python-msgpack, python-numpy, python-oslo.config (>= 1:2.3.0), -- GitLab From 1d688843e84270c414d886ddaa965fa2c04aa3ac Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 18 Sep 2015 12:48:41 +0000 Subject: [PATCH 0019/1483] Packaging 1.2.0 --- debian/changelog | 4 ++-- debian/control | 9 +++++++-- debian/copyright | 2 +- debian/rules | 2 +- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/debian/changelog b/debian/changelog index 25769f35..ba1c9606 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,5 @@ -gnocchi (1.1.0-1) experimental; urgency=medium +gnocchi (1.2.0-1) experimental; urgency=medium - * Initial release. (Closes: #XXXXXX) + * Initial release. (Closes: #799374) -- Thomas Goirand Fri, 27 Mar 2015 10:32:47 +0100 diff --git a/debian/control b/debian/control index a3c8d698..5d0a9958 100644 --- a/debian/control +++ b/debian/control @@ -11,6 +11,8 @@ Build-Depends: debhelper (>= 9), python-setuptools, python-sphinx, Build-Depends-Indep: alembic (>= 0.7.6), + influxdb, + influxdb-dev, libpq-dev, postgresql, postgresql-server-dev-all, @@ -65,7 +67,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), Standards-Version: 3.9.6 Vcs-Browser: http://anonscm.debian.org/gitweb/?p=openstack/python-gnocchi.git Vcs-Git: git://anonscm.debian.org/openstack/python-gnocchi.git -Homepage: https://github.com/stackforge/gnocchi +Homepage: https://github.com/openstack/gnocchi Package: python-gnocchi Architecture: all @@ -74,6 +76,7 @@ Depends: alembic (>= 0.7.6), python-future, python-jinja2, python-jsonpatch (>= 1.9), + python-keystonemiddleware (>= 1.5.0), python-msgpack, python-numpy, python-oslo.config (>= 1:2.3.0), @@ -153,6 +156,8 @@ Section: doc Architecture: all Depends: ${misc:Depends}, ${sphinxdoc:Depends} Description: Metric as a Service - doc - HTTP API to store metrics and index resources. + Gnocchi is a service for managing a set of resources and storing metrics about + them, in a scalable and resilient way. Its functionalities are exposed over an + HTTP REST API. . This package contains the documentation. diff --git a/debian/copyright b/debian/copyright index 1fa2c9fe..85d755cc 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,7 +1,7 @@ Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: gnocchi Upstream-Contact: Julien Danjou -Source: git://github.com/stackforge/gnocchi.git +Source: git://github.com/openstack/gnocchi.git Files: debian/* Copyright: (c) 2014, Thomas Goirand diff --git a/debian/rules b/debian/rules index cbbecbc4..445aaf65 100755 --- a/debian/rules +++ b/debian/rules @@ -3,7 +3,7 @@ PYTHONS:=$(shell pyversions -vr) #PYTHON3S:=$(shell py3versions -vr) -UPSTREAM_GIT = git://github.com/stackforge/gnocchi.git +UPSTREAM_GIT = git://github.com/openstack/gnocchi.git include /usr/share/openstack-pkg-tools/pkgos.make export OSLO_PACKAGE_VERSION=$(VERSION) -- GitLab From 63725a5fa6c7d0cbb9f03df3ff9073658e693ebb Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 18 Sep 2015 13:17:22 +0000 Subject: [PATCH 0020/1483] Fixed python-keystoneclient version requirements. --- debian/control | 2 ++ 1 file changed, 2 insertions(+) diff --git a/debian/control b/debian/control index 5d0a9958..8db50cff 100644 --- a/debian/control +++ b/debian/control @@ -24,6 +24,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-gabbi (>= 1), python-jinja2, python-jsonpatch (>= 1.9), + python-keystoneclient (>= 1:1.6.0), python-keystonemiddleware (>= 1.5.0), python-mock, python-msgpack, @@ -76,6 +77,7 @@ Depends: alembic (>= 0.7.6), python-future, python-jinja2, python-jsonpatch (>= 1.9), + python-keystoneclient (>= 1:1.6.0), python-keystonemiddleware (>= 1.5.0), python-msgpack, python-numpy, -- GitLab From e86481c14a08f2601374bcb54c4ebaced21b4f77 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 18 Sep 2015 13:18:07 +0000 Subject: [PATCH 0021/1483] Fixed version of keystonemiddleware --- debian/control | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/control b/debian/control index 8db50cff..7f2c3af1 100644 --- a/debian/control +++ b/debian/control @@ -25,7 +25,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-jinja2, python-jsonpatch (>= 1.9), python-keystoneclient (>= 1:1.6.0), - python-keystonemiddleware (>= 1.5.0), + python-keystonemiddleware (>= 2.1.0), python-mock, python-msgpack, python-mysqldb, @@ -78,7 +78,7 @@ Depends: alembic (>= 0.7.6), python-jinja2, python-jsonpatch (>= 1.9), python-keystoneclient (>= 1:1.6.0), - python-keystonemiddleware (>= 1.5.0), + python-keystonemiddleware (>= 2.1.0), python-msgpack, python-numpy, python-oslo.config (>= 1:2.3.0), -- GitLab From 3bd65294a811efeb919cbc5ced1db90fe5830930 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 18 Sep 2015 13:58:13 +0000 Subject: [PATCH 0022/1483] Added definition of GNOCCHI_TEST_STORAGE_DRIVER=file --- debian/rules | 1 + 1 file changed, 1 insertion(+) diff --git a/debian/rules b/debian/rules index 445aaf65..f273411b 100755 --- a/debian/rules +++ b/debian/rules @@ -48,6 +48,7 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) chmod +x debian/start_pg.sh ; \ debian/start_pg.sh $$PG_MYTMPDIR ; \ export GNOCCHI_TEST_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ + export GNOCCHI_TEST_STORAGE_DRIVER=file ; \ echo "===> Testing with python$$i (python$$PYMAJOR)" ; \ rm -rf .testrepository ; \ testr-python$$PYMAJOR init ; \ -- GitLab From a431efe83d7c71ac7c34b76476ce926f7a033a79 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 23 Sep 2015 12:04:07 +0000 Subject: [PATCH 0023/1483] no --with python3 --- debian/rules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/rules b/debian/rules index f273411b..4d27f01f 100755 --- a/debian/rules +++ b/debian/rules @@ -10,7 +10,7 @@ export OSLO_PACKAGE_VERSION=$(VERSION) UNIT_TEST_BLACKLIST = test_carbonara.CarbonaraCmd.* %: - dh $@ --buildsystem=python_distutils --with python2,python3,sphinxdoc + dh $@ --buildsystem=python_distutils --with python2,sphinxdoc override_dh_install: set -e ; for pyvers in $(PYTHONS); do \ -- GitLab From 1dec8e1a9083d17542a2190ebd1b901ec6548943 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 6 Nov 2015 16:25:50 +0100 Subject: [PATCH 0024/1483] devstack: install PostgreSQL development headers It's needed for psycopg2. Change-Id: Iaa89c266569fefc6580fa1c0c17345bd1146853b (cherry picked from commit 3cbb645d2eb09c143d2e5111f9474b94f2969286) --- devstack/plugin.sh | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index b08fa7e8..cb6e47d7 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -327,6 +327,15 @@ function init_gnocchi { fi } +function preinstall_gnocchi { + # Needed to build psycopg2 + if is_ubuntu; then + install_package libpq-dev + else + install_package postgresql-devel + fi +} + # install_gnocchi() - Collect source and prepare function install_gnocchi { if [ "${GNOCCHI_COORDINATOR_URL%%:*}" == "redis" ]; then @@ -426,7 +435,10 @@ function stop_gnocchi { } if is_service_enabled gnocchi-api; then - if [[ "$1" == "stack" && "$2" == "install" ]]; then + if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then + echo_summary "Configuring system services for Gnocchi" + preinstall_gnocchi + elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Gnocchi" stack_install_service gnocchi elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then -- GitLab From 38ee3f05c547961eee6853bdc32b68a0c7f7c229 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 19 Nov 2015 10:07:08 +0100 Subject: [PATCH 0025/1483] Update branch for git-review to stable/1.3 Change-Id: I9c15ee66baf3746e61a6073bcab30b23666096ba --- .gitreview | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitreview b/.gitreview index e4b8477d..e93df263 100644 --- a/.gitreview +++ b/.gitreview @@ -2,3 +2,4 @@ host=review.openstack.org port=29418 project=openstack/gnocchi.git +defaultbranch=stable/1.3 -- GitLab From 7b6544f25bebfe1b30e5ac9b51548998a49c123f Mon Sep 17 00:00:00 2001 From: Pradeep Kilambi Date: Thu, 5 Nov 2015 15:32:55 -0500 Subject: [PATCH 0026/1483] Fix metricd TypeError's due to bad method signature Change-Id: I6052e5e2b8bd67bfd36f6f1dd1fbe752594bedf5 Closes-Bug: #1513604 (cherry picked from commit 9169e18c702685d550570a09fdf9e19ea87a5dee) --- gnocchi/indexer/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 80566ed8..92b528ba 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -292,7 +292,7 @@ class IndexerDriver(object): raise exceptions.NotImplementedError @staticmethod - def list_metrics(user_id=None, project_id=None): + def list_metrics(user_id=None, project_id=None, details=False, **kwargs): raise exceptions.NotImplementedError @staticmethod -- GitLab From 81a04ef593fa790c2410a67c8c74e4287fd9e3b1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 18 Nov 2015 17:47:52 +0100 Subject: [PATCH 0027/1483] ceph: fix computation of read offset Change-Id: I53caf7de791914a503033a72799b148b4dcdedab (cherry picked from commit 3a68df19aff9f0c69762594b5cd840d485812457) --- gnocchi/storage/ceph.py | 2 +- gnocchi/tests/test_storage.py | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index c712acba..753dea58 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -200,5 +200,5 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): if not data: break content += data - offset += len(content) + offset += len(data) return content diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 02b069f8..1b4408d7 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -17,6 +17,7 @@ import datetime import uuid import mock +import six.moves from gnocchi import storage from gnocchi.storage import null @@ -125,6 +126,17 @@ class TestStorageDriver(tests_base.TestCase): report = self.storage.measures_report() self.assertEqual({}, report) + def test_add_measures_big(self): + m = storage.Metric(uuid.uuid4(), self.archive_policies['high']) + self.storage.add_measures(m, [ + storage.Measure(datetime.datetime(2014, 1, 1, 12, i, j), 100) + for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) + with mock.patch.object(self.index, 'get_metrics') as f: + f.return_value = [m] + self.storage.process_background_tasks(self.index) + + self.assertEqual(3661, len(self.storage.get_measures(m))) + def test_add_and_get_measures(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), -- GitLab From db32b91553478cf0d8e4ee06c648b0cd03ab60bd Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 25 Nov 2015 08:01:40 +0000 Subject: [PATCH 0028/1483] Now packaging 1.3.0 --- debian/changelog | 2 +- debian/control | 24 ++++++++++++++---------- debian/gnocchi-common.install | 1 - 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/debian/changelog b/debian/changelog index ba1c9606..b1bcc21b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -gnocchi (1.2.0-1) experimental; urgency=medium +gnocchi (1.3.0-1) unstable; urgency=medium * Initial release. (Closes: #799374) diff --git a/debian/control b/debian/control index 7f2c3af1..9c00fb4f 100644 --- a/debian/control +++ b/debian/control @@ -2,7 +2,7 @@ Source: gnocchi Section: python Priority: optional Maintainer: PKG OpenStack -Uploaders: Thomas Goirand +Uploaders: Thomas Goirand , Build-Depends: debhelper (>= 9), dh-python, openstack-pkg-tools (>= 23~), @@ -25,7 +25,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-jinja2, python-jsonpatch (>= 1.9), python-keystoneclient (>= 1:1.6.0), - python-keystonemiddleware (>= 2.1.0), + python-keystonemiddleware (>= 2.3.0), python-mock, python-msgpack, python-mysqldb, @@ -38,14 +38,16 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-oslo.utils (>= 1.6.0), python-oslosphinx (>= 2.2.0.0), python-oslotest, - python-pandas, + python-pandas (>= 0.17), + python-pastedeploy, python-pecan (>= 0.9), python-psycopg2, python-pymysql, python-pytimeparse (>= 1.1.5), - python-retrying, python-requests, + python-retrying, python-six, + python-sphinx-bootstrap-theme, python-sphinxcontrib.httpdomain, python-sqlalchemy, python-sqlalchemy-utils, @@ -59,8 +61,8 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-trollius, python-tz, python-voluptuous, - python-webtest (>= 2.0.16), python-webob (>= 1.4.1), + python-webtest (>= 2.0.16), python-werkzeug, python-yaml, subunit (>= 0.0.18), @@ -78,7 +80,7 @@ Depends: alembic (>= 0.7.6), python-jinja2, python-jsonpatch (>= 1.9), python-keystoneclient (>= 1:1.6.0), - python-keystonemiddleware (>= 2.1.0), + python-keystonemiddleware (>= 2.3.0), python-msgpack, python-numpy, python-oslo.config (>= 1:2.3.0), @@ -88,13 +90,14 @@ Depends: alembic (>= 0.7.6), python-oslo.serialization (>= 1.4.0), python-oslo.utils (>= 1.6.0), python-oslosphinx (>= 2.2.0.0), - python-pandas, + python-pandas (>= 0.17), + python-pastedeploy, python-pecan (>= 0.9), python-psycopg2, python-pymysql, python-pytimeparse (>= 1.1.5), - python-retrying, python-requests, + python-retrying, python-six, python-sqlalchemy, python-sqlalchemy-utils, @@ -109,7 +112,7 @@ Depends: alembic (>= 0.7.6), python-yaml, ${misc:Depends}, ${python:Depends}, -Suggests: python-gnocchi-doc +Suggests: python-gnocchi-doc, Description: Metric as a Service - Python 2.x Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an @@ -156,7 +159,8 @@ Description: Metric as a Service - metric daemon Package: python-gnocchi-doc Section: doc Architecture: all -Depends: ${misc:Depends}, ${sphinxdoc:Depends} +Depends: ${misc:Depends}, + ${sphinxdoc:Depends}, Description: Metric as a Service - doc Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an diff --git a/debian/gnocchi-common.install b/debian/gnocchi-common.install index 5d4f0c8f..59169d06 100644 --- a/debian/gnocchi-common.install +++ b/debian/gnocchi-common.install @@ -1,2 +1 @@ etc/gnocchi/policy.json /usr/share/gnocchi-common - -- GitLab From 2dcb706d3536590d058ac8d99f8384025abe2031 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 25 Nov 2015 08:18:33 +0000 Subject: [PATCH 0029/1483] Disable the doc package. --- debian/control | 22 +++++++++---------- ...cchi-doc.doc-base => gnocchi-doc.doc-base} | 0 2 files changed, 11 insertions(+), 11 deletions(-) rename debian/{python-gnocchi-doc.doc-base => gnocchi-doc.doc-base} (100%) diff --git a/debian/control b/debian/control index 9c00fb4f..d3008462 100644 --- a/debian/control +++ b/debian/control @@ -156,14 +156,14 @@ Description: Metric as a Service - metric daemon . This package contains the metric daemon. -Package: python-gnocchi-doc -Section: doc -Architecture: all -Depends: ${misc:Depends}, - ${sphinxdoc:Depends}, -Description: Metric as a Service - doc - Gnocchi is a service for managing a set of resources and storing metrics about - them, in a scalable and resilient way. Its functionalities are exposed over an - HTTP REST API. - . - This package contains the documentation. +#Package: gnocchi-doc +#Section: doc +#Architecture: all +#Depends: ${misc:Depends}, +# ${sphinxdoc:Depends}, +#Description: Metric as a Service - doc +# Gnocchi is a service for managing a set of resources and storing metrics about +# them, in a scalable and resilient way. Its functionalities are exposed over an +# HTTP REST API. +# . +# This package contains the documentation. diff --git a/debian/python-gnocchi-doc.doc-base b/debian/gnocchi-doc.doc-base similarity index 100% rename from debian/python-gnocchi-doc.doc-base rename to debian/gnocchi-doc.doc-base -- GitLab From d997f66c7876755dd6e10e47dfe04bbf976cf12b Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 25 Nov 2015 08:21:55 +0000 Subject: [PATCH 0030/1483] Fixed watch file. --- debian/watch | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/watch b/debian/watch index 60a46dc8..62628b6a 100644 --- a/debian/watch +++ b/debian/watch @@ -1,3 +1,4 @@ version=3 -http://pypi.python.org/packages/source/g/gnocchi gnocchi-(.*).tar.gz +opts="uversionmangle=s/\.(b|rc)/~$1/" \ +https://github.com/openstack/nova/gnocchi .*/(\d[\d\.]+)\.tar\.gz -- GitLab From 3f26ff3738d21d80908f2c3ca7e3e583f5a14f00 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 25 Nov 2015 08:38:30 +0000 Subject: [PATCH 0031/1483] Attempt fixing gnocchi.conf install. --- debian/rules | 2 -- 1 file changed, 2 deletions(-) diff --git a/debian/rules b/debian/rules index 4d27f01f..3de37d9d 100755 --- a/debian/rules +++ b/debian/rules @@ -23,8 +23,6 @@ override_dh_install: # done rm -rf $(CURDIR)/debian/python*-gnocchi/usr/lib/python*/dist-packages/*.pth -override_dh_auto_build: - dh_auto_build -O--buildsystem=python_distutils mkdir -p $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common oslo-config-generator --output-file $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf \ --wrap-width 140 \ -- GitLab From aa6a189b817222ef46bfaed8593f75d96063a0ca Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 25 Nov 2015 08:48:01 +0000 Subject: [PATCH 0032/1483] Fixed postinst setup. --- debian/rules | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/rules b/debian/rules index 3de37d9d..34ba1c96 100755 --- a/debian/rules +++ b/debian/rules @@ -31,7 +31,7 @@ override_dh_install: --namespace oslo.log \ --namespace oslo.policy \ --namespace keystonemiddleware.auth_token - + /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-common.postinst override_dh_auto_test: ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) @@ -76,7 +76,7 @@ override_dh_sphinxdoc: override_dh_clean: dh_clean -O--buildsystem=python_distutils - rm -rf build + rm -rf build debian/gnocchi-common.postinst # Commands not to run override_dh_installcatalogs: -- GitLab From 37f8c00fbb3b3e25e583e7b5e9ca7636dca870cc Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 25 Nov 2015 09:24:33 +0000 Subject: [PATCH 0033/1483] Added debconf based setup. --- debian/control | 11 +- debian/gnocchi-api.config.in | 11 ++ debian/gnocchi-api.postinst.in | 15 +++ debian/gnocchi-api.templates | 48 +++++++ debian/gnocchi-common.config.in | 15 +++ debian/gnocchi-common.postinst.in | 10 ++ debian/gnocchi-common.postrm | 22 +++- debian/gnocchi-common.templates | 57 ++++++++ debian/po/POTFILES.in | 2 + debian/po/templates.pot | 211 ++++++++++++++++++++++++++++++ debian/rules | 13 +- 11 files changed, 403 insertions(+), 12 deletions(-) create mode 100644 debian/gnocchi-api.config.in create mode 100644 debian/gnocchi-api.postinst.in create mode 100644 debian/gnocchi-api.templates create mode 100644 debian/gnocchi-common.config.in create mode 100644 debian/gnocchi-common.templates create mode 100644 debian/po/POTFILES.in create mode 100644 debian/po/templates.pot diff --git a/debian/control b/debian/control index d3008462..9c90e5c9 100644 --- a/debian/control +++ b/debian/control @@ -5,7 +5,7 @@ Maintainer: PKG OpenStack Uploaders: Thomas Goirand , Build-Depends: debhelper (>= 9), dh-python, - openstack-pkg-tools (>= 23~), + openstack-pkg-tools (>= 37~), python-all, python-pbr, python-setuptools, @@ -122,7 +122,9 @@ Description: Metric as a Service - Python 2.x Package: gnocchi-common Architecture: all -Depends: python-gnocchi (= ${binary:Version}), +Depends: adduser, + debconf, + python-gnocchi (= ${binary:Version}), ${misc:Depends}, ${python:Depends}, Description: Metric as a Service - common files @@ -134,7 +136,10 @@ Description: Metric as a Service - common files Package: gnocchi-api Architecture: all -Depends: gnocchi-common (= ${binary:Version}), +Depends: adduser, + gnocchi-common (= ${binary:Version}), + python-openstackclient, + q-text-as-data, ${misc:Depends}, ${python:Depends}, Description: Metric as a Service - API daemon diff --git a/debian/gnocchi-api.config.in b/debian/gnocchi-api.config.in new file mode 100644 index 00000000..2cb26ee9 --- /dev/null +++ b/debian/gnocchi-api.config.in @@ -0,0 +1,11 @@ +#!/bin/sh + +set -e + +. /usr/share/debconf/confmodule + +#PKGOS-INCLUDE# + +pkgos_register_endpoint_config gnocchi + +exit 0 diff --git a/debian/gnocchi-api.postinst.in b/debian/gnocchi-api.postinst.in new file mode 100644 index 00000000..45507567 --- /dev/null +++ b/debian/gnocchi-api.postinst.in @@ -0,0 +1,15 @@ +#!/bin/sh + +set -e + +#PKGOS-INCLUDE# + +if [ "$1" = "configure" ] || [ "$1" = "reconfigure" ] ; then + . /usr/share/debconf/confmodule + pkgos_register_endpoint_postinst gnocchi gnocchi metric "OpenStack Metric Service" 8041 "" + db_stop +fi + +#DEBHELPER# + +exit 0 diff --git a/debian/gnocchi-api.templates b/debian/gnocchi-api.templates new file mode 100644 index 00000000..e913e49d --- /dev/null +++ b/debian/gnocchi-api.templates @@ -0,0 +1,48 @@ +# These templates have been reviewed by the debian-l10n-english +# team +# +# If modifications/additions/rewording are needed, please ask +# debian-l10n-english@lists.debian.org for advice. +# +# Even minor modifications require translation updates and such +# changes should be coordinated with translators and reviewers. + +Template: gnocchi/register-endpoint +Type: boolean +Default: false +_Description: Register Gnocchi in the Keystone endpoint catalog? + Each OpenStack service (each API) should be registered in order to be + accessible. This is done using "keystone service-create" and "keystone + endpoint-create". This can be done automatically now. + . + Note that you will need to have an up and running Keystone server on which to + connect using the Keystone authentication token. + +Template: gnocchi/keystone-ip +Type: string +_Description: Keystone server IP address: + Please enter the IP address of the Keystone server, so that gnocchi-api can + contact Keystone to do the Gnocchi service and endpoint creation. + +Template: gnocchi/keystone-auth-token +Type: password +_Description: Keystone authentication token: + To configure its endpoint in Keystone, gnocchi-api needs the Keystone + authentication token. + +Template: gnocchi/endpoint-ip +Type: string +_Description: Gnocchi endpoint IP address: + Please enter the IP address that will be used to contact Gnocchi. + . + This IP address should be accessible from the clients that will use this + service, so if you are installing a public cloud, this should be a public + IP address. + +Template: gnocchi/region-name +Type: string +Default: regionOne +_Description: Name of the region to register: + OpenStack supports using availability zones, with each region representing + a location. Please enter the zone that you wish to use when registering the + endpoint. diff --git a/debian/gnocchi-common.config.in b/debian/gnocchi-common.config.in new file mode 100644 index 00000000..98cc527f --- /dev/null +++ b/debian/gnocchi-common.config.in @@ -0,0 +1,15 @@ +#!/bin/sh + +set -e + +. /usr/share/debconf/confmodule + +CONF=/etc/gnocchi/gnocchi.conf + +#PKGOS-INCLUDE# + +pkgos_var_user_group gnocchi +pkgos_dbc_read_conf -pkg gnocchi-common ${CONF} database connection gnocchi $@ +pkgos_read_admin_creds ${CONF} keystone_authtoken gnocchi + +exit 0 diff --git a/debian/gnocchi-common.postinst.in b/debian/gnocchi-common.postinst.in index e156c06b..1c992ecd 100644 --- a/debian/gnocchi-common.postinst.in +++ b/debian/gnocchi-common.postinst.in @@ -2,13 +2,23 @@ set -e +CONF=/etc/gnocchi/gnocchi.conf + #PKGOS-INCLUDE# if [ "$1" = "configure" ] || [ "$1" = "reconfigure" ] ; then + . /usr/share/debconf/confmodule + . /usr/share/dbconfig-common/dpkg/postinst + pkgos_var_user_group gnocchi pkgos_write_new_conf gnocchi gnocchi.conf pkgos_write_new_conf gnocchi policy.json + + db_get gnocchi/configure_db + if [ "$RET" = "true" ] ; then + pkgos_dbc_postinst ${CONF} database connection gnocchi $@ + fi fi #DEBHELPER# diff --git a/debian/gnocchi-common.postrm b/debian/gnocchi-common.postrm index 537ba729..e5ac092f 100644 --- a/debian/gnocchi-common.postrm +++ b/debian/gnocchi-common.postrm @@ -3,9 +3,25 @@ set -e if [ "$1" = "purge" ] ; then - rm -f /etc/gnocchi/gnocchi.conf /etc/gnocchi/policy.json - rmdir --ignore-fail-on-non-empty /etc/gnocchi || true - rm -rf /var/lib/gnocchi /var/log/gnocchi + if [ -f /usr/share/debconf/confmodule ] ; then + . /usr/share/debconf/confmodule + + db_get gnocchi/configure_db || true + if [ "$RET" = "true" ]; then + if [ -f /usr/share/dbconfig-common/dpkg/postrm ]; then + . /usr/share/dbconfig-common/dpkg/postrm + dbc_go gnocchi-common $@ + else + rm -f /etc/dbconfig-common/gnocchi-common.conf + if which ucf >/dev/null 2>&1; then + ucf --purge /etc/dbconfig-common/gnocchi-common.conf + ucfr --purge gnocchi-common /etc/dbconfig-common/gnocchi-common.conf + fi + fi + fi + fi + + rm -fr /etc/gnocchi fi #DEBHELPER# diff --git a/debian/gnocchi-common.templates b/debian/gnocchi-common.templates new file mode 100644 index 00000000..06a93e75 --- /dev/null +++ b/debian/gnocchi-common.templates @@ -0,0 +1,57 @@ +# These templates have been reviewed by the debian-l10n-english +# team +# +# If modifications/additions/rewording are needed, please ask +# debian-l10n-english@lists.debian.org for advice. +# +# Even minor modifications require translation updates and such +# changes should be coordinated with translators and reviewers. + +Template: gnocchi/auth-host +Type: string +Default: 127.0.0.1 +_Description: Authentication server hostname: + Please specify the hostname of the authentication server for Gnocchi. Typically + this is also the hostname of the OpenStack Identity Service (Keystone). + +Template: gnocchi/admin-tenant-name +Type: string +Default: service +# Translators: a "tenant" in OpenStack world is +# an entity that contains one or more username/password couples. +# It's typically the tenant that will be used for billing. Having more than one +# username/password is very helpful in larger organization. +# You're advised to either keep "tenant" without translating it +# or keep it parenthezised. Example for French: +# locataire ("tenant") +_Description: Authentication server tenant name: + Please specify the authentication server tenant name. + +Template: gnocchi/admin-user +Type: string +Default: admin +_Description: Authentication server username: + Please specify the username to use with the authentication server. + +Template: gnocchi/admin-password +Type: password +_Description: Authentication server password: + Please specify the password to use with the authentication server. + +Template: gnocchi/configure_db +Type: boolean +Default: false +_Description: Set up a database for Gnocchi? + No database has been set up for Gnocchi to use. Before + continuing, you should make sure you have the following information: + . + * the type of database that you want to use; + * the database server hostname (that server must allow TCP connections from this + machine); + * a username and password to access the database. + . + If some of these requirements are missing, do not choose this option and run with + regular SQLite support. + . + You can change this setting later on by running "dpkg-reconfigure -plow + gnocchi-common". diff --git a/debian/po/POTFILES.in b/debian/po/POTFILES.in new file mode 100644 index 00000000..d94abc48 --- /dev/null +++ b/debian/po/POTFILES.in @@ -0,0 +1,2 @@ +[type: gettext/rfc822deb] gnocchi-common.templates +[type: gettext/rfc822deb] gnocchi-api.templates diff --git a/debian/po/templates.pot b/debian/po/templates.pot new file mode 100644 index 00000000..916d4478 --- /dev/null +++ b/debian/po/templates.pot @@ -0,0 +1,211 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the gnocchi package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: gnocchi\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=CHARSET\n" +"Content-Transfer-Encoding: 8bit\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "Authentication server hostname:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Authentication server tenant name:" +msgstr "" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Authentication server username:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Authentication server password:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "Set up a database for Gnocchi?" +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "Keystone server IP address:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "Keystone authentication token:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Gnocchi endpoint IP address:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" diff --git a/debian/rules b/debian/rules index 34ba1c96..393faf93 100755 --- a/debian/rules +++ b/debian/rules @@ -12,7 +12,7 @@ UNIT_TEST_BLACKLIST = test_carbonara.CarbonaraCmd.* %: dh $@ --buildsystem=python_distutils --with python2,sphinxdoc -override_dh_install: +override_dh_auto_install: set -e ; for pyvers in $(PYTHONS); do \ python$$pyvers setup.py install --install-layout=deb \ --root $(CURDIR)/debian/python-gnocchi; \ @@ -32,6 +32,9 @@ override_dh_install: --namespace oslo.policy \ --namespace keystonemiddleware.auth_token /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-common.postinst + /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-common.config + /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-api.postinst + /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-api.config override_dh_auto_test: ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) @@ -76,14 +79,12 @@ override_dh_sphinxdoc: override_dh_clean: dh_clean -O--buildsystem=python_distutils - rm -rf build debian/gnocchi-common.postinst + rm -rf build debian/gnocchi-common.postinst debian/gnocchi-common.config debian/gnocchi-api.config debian/gnocchi-api.postinst # Commands not to run override_dh_installcatalogs: override_dh_installemacsen override_dh_installifupdown: override_dh_installinfo override_dh_installmenu override_dh_installmime: -override_dh_installmodules override_dh_installlogcheck: -override_dh_installpam override_dh_installppp override_dh_installudev override_dh_installwm: +override_dh_installmodules override_dh_installpam override_dh_installppp override_dh_installudev override_dh_installwm: override_dh_installxfonts override_dh_gconf override_dh_icons override_dh_perl override_dh_usrlocal: -override_dh_installcron override_dh_installdebconf: -override_dh_installlogrotate override_dh_installgsettings: +override_dh_installgsettings: -- GitLab From e5f9b8bf7e12578ee073771bb04ec3a65b1135ea Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 25 Nov 2015 10:36:11 +0000 Subject: [PATCH 0034/1483] Fixed the db setup. --- debian/gnocchi-common.config.in | 2 +- debian/rules | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/debian/gnocchi-common.config.in b/debian/gnocchi-common.config.in index 98cc527f..3f7a1d9c 100644 --- a/debian/gnocchi-common.config.in +++ b/debian/gnocchi-common.config.in @@ -9,7 +9,7 @@ CONF=/etc/gnocchi/gnocchi.conf #PKGOS-INCLUDE# pkgos_var_user_group gnocchi -pkgos_dbc_read_conf -pkg gnocchi-common ${CONF} database connection gnocchi $@ +pkgos_dbc_read_conf -pkg gnocchi-common ${CONF} indexer url gnocchi $@ pkgos_read_admin_creds ${CONF} keystone_authtoken gnocchi exit 0 diff --git a/debian/rules b/debian/rules index 393faf93..8fd7f021 100755 --- a/debian/rules +++ b/debian/rules @@ -17,6 +17,8 @@ override_dh_auto_install: python$$pyvers setup.py install --install-layout=deb \ --root $(CURDIR)/debian/python-gnocchi; \ done + mkdir -p $(CURDIR)/debian/python-gnocchi/usr/lib/python2.7/dist-packages/gnocchi/indexer + cp -auxf gnocchi/indexer/alembic $(CURDIR)/debian/python-gnocchi/usr/lib/python2.7/dist-packages/gnocchi/indexer # set -e ; for pyvers in $(PYTHON3S); do \ # python$$pyvers setup.py install --install-layout=deb \ # --root $(CURDIR)/debian/python3-gnocchi; \ @@ -31,6 +33,8 @@ override_dh_auto_install: --namespace oslo.log \ --namespace oslo.policy \ --namespace keystonemiddleware.auth_token + sed -i 's|^[ \t#]*url[ \t#]*=.*|url = sqlite:////var/lib/gnocchi/gnocchidb|' $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf + /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-common.postinst /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-common.config /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-api.postinst -- GitLab From 8a6111b4c7d2be538699d627496ae59667eba1d3 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 25 Nov 2015 10:40:54 +0000 Subject: [PATCH 0035/1483] Wired-in the dbsync --- debian/gnocchi-common.postinst.in | 3 +++ 1 file changed, 3 insertions(+) diff --git a/debian/gnocchi-common.postinst.in b/debian/gnocchi-common.postinst.in index 1c992ecd..55ee14fc 100644 --- a/debian/gnocchi-common.postinst.in +++ b/debian/gnocchi-common.postinst.in @@ -18,7 +18,10 @@ if [ "$1" = "configure" ] || [ "$1" = "reconfigure" ] ; then db_get gnocchi/configure_db if [ "$RET" = "true" ] ; then pkgos_dbc_postinst ${CONF} database connection gnocchi $@ + echo "Now calling gnocchi-dbsync: this may take a while..." + su -s /bin/sh -c 'gnocchi-dbsync' gnocchi fi + pkgos_write_admin_creds ${CONF} keystone_authtoken gnocchi fi #DEBHELPER# -- GitLab From ff3f23f061a395ccc66b24c6344660c890832339 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 25 Nov 2015 13:18:35 +0000 Subject: [PATCH 0036/1483] Adds missing config file: api-paste.ini. --- debian/changelog | 6 ++++++ debian/gnocchi-common.install | 1 + debian/gnocchi-common.postinst.in | 1 + 3 files changed, 8 insertions(+) diff --git a/debian/changelog b/debian/changelog index b1bcc21b..4e74e7d9 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (1.3.0-2) unstable; urgency=medium + + * Adds missing config file: api-paste.ini. + + -- Thomas Goirand Wed, 25 Nov 2015 13:18:12 +0000 + gnocchi (1.3.0-1) unstable; urgency=medium * Initial release. (Closes: #799374) diff --git a/debian/gnocchi-common.install b/debian/gnocchi-common.install index 59169d06..cf6a7884 100644 --- a/debian/gnocchi-common.install +++ b/debian/gnocchi-common.install @@ -1 +1,2 @@ etc/gnocchi/policy.json /usr/share/gnocchi-common +etc/gnocchi/api-paste.ini /usr/share/gnocchi-common diff --git a/debian/gnocchi-common.postinst.in b/debian/gnocchi-common.postinst.in index 55ee14fc..ca04648d 100644 --- a/debian/gnocchi-common.postinst.in +++ b/debian/gnocchi-common.postinst.in @@ -14,6 +14,7 @@ if [ "$1" = "configure" ] || [ "$1" = "reconfigure" ] ; then pkgos_write_new_conf gnocchi gnocchi.conf pkgos_write_new_conf gnocchi policy.json + pkgos_write_new_conf gnocchi api-paste.ini db_get gnocchi/configure_db if [ "$RET" = "true" ] ; then -- GitLab From a763ec796725956f9868284bf078503b2268b8c5 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 25 Nov 2015 15:17:12 +0000 Subject: [PATCH 0037/1483] Fixed auth_protocol to be http by default.Fixed auth_protocol to be http by default. --- debian/changelog | 1 + debian/gnocchi-common.templates | 2 +- debian/rules | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 4e74e7d9..81ff080f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,6 +1,7 @@ gnocchi (1.3.0-2) unstable; urgency=medium * Adds missing config file: api-paste.ini. + * Fixed auth_protocol to be http by default. -- Thomas Goirand Wed, 25 Nov 2015 13:18:12 +0000 diff --git a/debian/gnocchi-common.templates b/debian/gnocchi-common.templates index 06a93e75..1c2bdbb1 100644 --- a/debian/gnocchi-common.templates +++ b/debian/gnocchi-common.templates @@ -16,7 +16,7 @@ _Description: Authentication server hostname: Template: gnocchi/admin-tenant-name Type: string -Default: service +Default: admin # Translators: a "tenant" in OpenStack world is # an entity that contains one or more username/password couples. # It's typically the tenant that will be used for billing. Having more than one diff --git a/debian/rules b/debian/rules index 8fd7f021..67247cbc 100755 --- a/debian/rules +++ b/debian/rules @@ -34,6 +34,7 @@ override_dh_auto_install: --namespace oslo.policy \ --namespace keystonemiddleware.auth_token sed -i 's|^[ \t#]*url[ \t#]*=.*|url = sqlite:////var/lib/gnocchi/gnocchidb|' $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf + sed -i 's|^[# \t]*auth_protocol[\t #]*=.*|auth_protocol = http|' $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-common.postinst /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-common.config -- GitLab From b21267f8743687ae0c459e97d069d1a324486d2a Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 25 Nov 2015 15:32:55 +0000 Subject: [PATCH 0038/1483] Added debconf translations imported from Glance. --- debian/changelog | 1 + debian/po/cs.po | 291 ++++++++++++++++++++++++++++++++++++++ debian/po/da.po | 252 +++++++++++++++++++++++++++++++++ debian/po/de.po | 338 +++++++++++++++++++++++++++++++++++++++++++++ debian/po/es.po | 281 +++++++++++++++++++++++++++++++++++++ debian/po/fr.po | 256 ++++++++++++++++++++++++++++++++++ debian/po/gl.po | 252 +++++++++++++++++++++++++++++++++ debian/po/it.po | 254 ++++++++++++++++++++++++++++++++++ debian/po/ja.po | 289 ++++++++++++++++++++++++++++++++++++++ debian/po/nl.po | 253 +++++++++++++++++++++++++++++++++ debian/po/pl.po | 256 ++++++++++++++++++++++++++++++++++ debian/po/pt.po | 252 +++++++++++++++++++++++++++++++++ debian/po/pt_BR.po | 260 ++++++++++++++++++++++++++++++++++ debian/po/ru.po | 251 +++++++++++++++++++++++++++++++++ debian/po/sv.po | 252 +++++++++++++++++++++++++++++++++ debian/po/zh_CN.po | 245 ++++++++++++++++++++++++++++++++ 16 files changed, 3983 insertions(+) create mode 100644 debian/po/cs.po create mode 100644 debian/po/da.po create mode 100644 debian/po/de.po create mode 100644 debian/po/es.po create mode 100644 debian/po/fr.po create mode 100644 debian/po/gl.po create mode 100644 debian/po/it.po create mode 100644 debian/po/ja.po create mode 100644 debian/po/nl.po create mode 100644 debian/po/pl.po create mode 100644 debian/po/pt.po create mode 100644 debian/po/pt_BR.po create mode 100644 debian/po/ru.po create mode 100644 debian/po/sv.po create mode 100644 debian/po/zh_CN.po diff --git a/debian/changelog b/debian/changelog index 81ff080f..127570cd 100644 --- a/debian/changelog +++ b/debian/changelog @@ -2,6 +2,7 @@ gnocchi (1.3.0-2) unstable; urgency=medium * Adds missing config file: api-paste.ini. * Fixed auth_protocol to be http by default. + * Added debconf translations imported from Glance. -- Thomas Goirand Wed, 25 Nov 2015 13:18:12 +0000 diff --git a/debian/po/cs.po b/debian/po/cs.po new file mode 100644 index 00000000..fce3884b --- /dev/null +++ b/debian/po/cs.po @@ -0,0 +1,291 @@ +# Czech PO debconf template translation of glance. +# Copyright (C) 2012 Michal Simunek +# This file is distributed under the same license as the glance package. +# Michal Simunek , 2012 - 2013. +# +msgid "" +msgstr "" +"Project-Id-Version: glance 2013.1.2-4\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: 2013-08-25 13:01+0200\n" +"Last-Translator: Michal Simunek \n" +"Language-Team: Czech \n" +"Language: cs\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +#, fuzzy +#| msgid "Auth server hostname:" +msgid "Authentication server hostname:" +msgstr "Název hostitele autentizačního serveru:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" +"Zadejte prosím URL autentizačního serveru pro Gnocchi. Většinou je to také " +"URL OpenStack Identity Service (Keystone)." + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +#, fuzzy +#| msgid "Auth server tenant name:" +msgid "Authentication server tenant name:" +msgstr "Název nájemce pro autentizační server:" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +#, fuzzy +#| msgid "Auth server username:" +msgid "Authentication server username:" +msgstr "Uživatel autentizačního serveru:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +#, fuzzy +#| msgid "Auth server password:" +msgid "Authentication server password:" +msgstr "Heslo autentizačního serveru:" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "Set up a database for glance?" +msgid "Set up a database for Gnocchi?" +msgstr "Nastavit databázi pro glance?" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "No database has been set up for glance-registry or glance-api to use. " +#| "Before continuing, you should make sure you have:" +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" +"glance-registry, nebo glance-api, nemá nastavenu žádnou databázi k " +"používání. Před tím, než budete pokračovat se ujistěte že máte:" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| " - the server host name (that server must allow TCP connections from " +#| "this\n" +#| " machine);\n" +#| " - a username and password to access the database.\n" +#| " - A database type that you want to use." +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" +" - název hostitelského serveru (tento server musí přijímat TCP spojení\n" +" z tohoto počítače);\n" +" - uživatelské jméno a heslo pro přístup k databázi.\n" +" - Typ databáze, kterou chcete používat." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "If some of these requirements are missing, reject this option and run " +#| "with regular sqlite support." +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "" +"Pokud některou z těchto povinných voleb neznáte, přeskočte ji a glance " +"spouštějte s běžnou podporou sqlite." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "You can change this setting later on by running 'dpkg-reconfigure -plow " +#| "glance-common'." +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" +"Toto nastavení můžete později změnit spuštěním 'dpkg-reconfigure -plow " +"glance-common'." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "Zaregistrovat Gnocchi v katalogu koncových bodů keystone?" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +#, fuzzy +#| msgid "" +#| "Each Openstack services (each API) should be registered in order to be " +#| "accessible. This is done using \"keystone service-create\" and \"keystone " +#| "endpoint-create\". Select if you want to run these commands now." +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" +"Aby byla každá služba Openstack (každé API) přístupná, musí být " +"zaregistrována. To se provádí pomocí příkazů \"keystone service-create\" a " +"\"keystone endpoint-create\". Zvolte si, zda-li se tyto příkazy mají nyní " +"spustit." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +#, fuzzy +#| msgid "" +#| "Note that you will need to have an up and running keystone server on " +#| "which to connect using the Keystone auth token." +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "" +"Berte na vědomí, že musíte mít běžící server keystone, na který se lze " +"připojit pomocí ověřovacího klíče pro Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +#, fuzzy +#| msgid "Keystone IP address:" +msgid "Keystone server IP address:" +msgstr "IP adresa serveru keystone:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" +"Zadejte IP adresu serveru keystone, aby se mohlo glance-api spojit s " +"Keystone a provozovat službu Gnocchi a vytvářet koncové body." + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +#, fuzzy +#| msgid "Keystone Auth Token:" +msgid "Keystone authentication token:" +msgstr "Autentizační klíč pro Keystone:" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +#, fuzzy +#| msgid "" +#| "To configure its endpoint in Keystone, glance-api needs the Keystone auth " +#| "token." +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" +"Aby mohlo glance-api nastavit v Keystone svůj koncový bod, potřebuje " +"autentizační klíč pro Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Gnocchi endpoint IP address:" +msgstr "IP adresa koncového bodu Gnocchi:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "" +"Zadejte IP adresu, která se bude používat ke spojení s Gnocchi (např: IP " +"adresa koncového bodu Gnocchi)." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" +"Tato IP adresa musí být přístupná z klientů, kteří budou tuto službu " +"používat, takže pokud instalujete veřejný cloud, musí to být veřejná IP " +"adresa." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "Název registrované oblasti:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +#, fuzzy +#| msgid "" +#| "Openstack can be used using availability zones, with each region " +#| "representing a location. Please enter the zone that you wish to use when " +#| "registering the endpoint." +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" +"Openstack lze využívat pomocí oblastí dostupnosti, přičemž každá oblast " +"představuje místo. Zadejte prosím oblast, kterou chcete použít při " +"registraci koncového bodu." diff --git a/debian/po/da.po b/debian/po/da.po new file mode 100644 index 00000000..a67a0ab0 --- /dev/null +++ b/debian/po/da.po @@ -0,0 +1,252 @@ +# Danish translation glance. +# Copyright (C) 2014 glance & nedenstående oversættere. +# This file is distributed under the same license as the glance package. +# Joe Hansen (joedalton2@yahoo.dk), 2012, 2013, 2014. +# +msgid "" +msgstr "" +"Project-Id-Version: glance\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: 2014-02-22 12:42+0000\n" +"Last-Translator: Joe Hansen \n" +"Language-Team: Danish \n" +"Language: da\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "Authentication server hostname:" +msgstr "Værtsnavn for godkendelsesserver:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" +"Angiv venligst adressen for din godkendelsesserver for Gnocchi. Typisk er " +"dette også adressen for din OpenStack Identity Service (Keystone)." + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Authentication server tenant name:" +msgstr "Lejenavn (tenant) for godkendelsesserver:" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "Angiv venligst lejenavn (tenant) for godkendelsesserveren." + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Authentication server username:" +msgstr "Brugernavn for godkendelsesserver:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "Angiv venligst brugernavnet der skal bruges med godkendelsesserveren." + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Authentication server password:" +msgstr "Adgangskode for godkendelsesserver:" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "Angiv venligst adgangskoden der skal bruges med godkendelsesserveren." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "Set up a database for Gnocchi?" +msgstr "Opsæt en database for Gnocchi?" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "No database has been set up for glance-registry or glance-api to use. " +#| "Before continuing, you should make sure you have the following " +#| "information:" +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" +"Ingen database er blevet opsat som glance-registry eller glance-api kan " +"bruge. Før du fortsætter, skal du sikre dig, at du har den følgende " +"information:" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" +" * databasetypen som du ønsker at bruge\n" +" * serverens værtsnavn (denne server skal tillade TCP-forbindelser\n" +" fra denne maskine)\n" +" * et brugernavn og adgangskode til at tilgå databasen" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "" +"Hvis nogle af disse krav mangler så afvis denne indstilling og kør med " +"normal SQLite-understøttelse." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "You can change this setting later on by running \"dpkg-reconfigure -plow " +#| "glance-common\"." +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" +"Du kan ændre denne indstilling senere ved at køre »dpkg-reconfigure -plow " +"glance-common«." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "Registrer Gnocchi i Keystones slutpunktskatalog?" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" +"Hver Openstacktjeneste (hver API) skal være registreret for at kunne tilgås. " +"Dette gøres med »keystone service-create« og »keystone endpoint-create«. " +"Dette kan gøres automatiks nu." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "" +"Bemærk at du skal have en op og kørende Keystoneserver, som du skal forbinde " +"til med Keystones godkendelsessymbol." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "Keystone server IP address:" +msgstr "IP-adresse for Keystoneserver:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" +"Indtast venligst IP-adressen for Keystoneserveren, så at glance-api kan " +"kontakte Keystone for at udføre Gnocchitjenesten og slutpunktsoprettelse." + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "Keystone authentication token:" +msgstr "Godkendelsessymbol for Keystone:" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +#, fuzzy +#| msgid "" +#| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#| "authentication token." +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" +"For at konfigurere dets slutpunkt i Keystone, kræver glance-api Keystones " +"godkendelsessymbol." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Gnocchi endpoint IP address:" +msgstr "IP-adresse for Gnochis slutpunkt:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "" +"Indtast venligst IP-adressen som vil blive brugt til at kontakte Gnocchi." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" +"Denne IP-adresse skal være tilgængelig fra klienterne, som vil bruge denne " +"tjeneste, så hvis du installerer en offentlig sky, skal dette være en " +"offentlig IP-adresse." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "Navn på regionen der skal registreres:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" +"OpenStack understøtter brug af tilgængelighedszoner, hvor hver region " +"repræsenterer et sted. Indtast venligst zonen du ønsker at bruge, når " +"slutpunktet registreres." diff --git a/debian/po/de.po b/debian/po/de.po new file mode 100644 index 00000000..147bd249 --- /dev/null +++ b/debian/po/de.po @@ -0,0 +1,338 @@ +# German debconf translation of glance. +# This file is distributed under the same license as the glance package. +# Copyright (C) 2010 United States Government,2010-2011 OpenStack LLC. +# Copyright (C) of this file 2012-2014 Chris Leick . +# +msgid "" +msgstr "" +"Project-Id-Version: glance 2013.2.1-1\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: 2014-01-09 22:51+0100\n" +"Last-Translator: Chris Leick \n" +"Language-Team: German \n" +"Language: de\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "Authentication server hostname:" +msgstr "Rechnername des Authentifizierungsservers:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +#, fuzzy +#| msgid "" +#| "Please specify the hostname of the authentication server for Glance. " +#| "Typically this is also the hostname of the OpenStack Identity Service " +#| "(Keystone)." +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" +"Bitte geben Sie den Rechnernamen des Glance-Authentifizierungsservers an. " +"Typischerweise ist das gleichzeitig der Rechnername Ihres OpenStack-" +"Identitätsdienstes (Keystone)." + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Authentication server tenant name:" +msgstr "Tenant-Name des Authentifizierungsservers:" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "Bitte geben Sie den Tenant-Namen des Authentifizierungsservers an." + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Authentication server username:" +msgstr "Benutzername des Authentifizierungsservers:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "" +"Bitte geben Sie den Benutzernamen an, der für den Authentifizierungsserver " +"benutzt wird." + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Authentication server password:" +msgstr "Passwort des Authentifizierungsservers:" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "" +"Bitte geben Sie das Passwort an, der für den Authentifizierungsserver " +"benutzt wird." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "Set up a database for Glance?" +msgid "Set up a database for Gnocchi?" +msgstr "Eine Datenbank für Glance einrichten?" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "No database has been set up for glance-registry or glance-api to use. " +#| "Before continuing, you should make sure you have the following " +#| "information:" +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" +"Es wurde keine Datenbank für die Benutzung mit der Glance-Registry oder das " +"Glance-API eingerichtet. Bevor Sie fortfahren, sollten Sie sicherstellen, " +"dass Sie die folgenden Informationen haben:" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" +" * einen Datenbanktyp, den Sie verwenden möchten\n" +" * den Rechnernamen des Datenbankservers (dieser Server muss TCP-" +"Verbindungen\n" +" von diesem Rechner erlauben)\n" +" * einen Benutzernamen und ein Passwort, um auf die Datenbank zuzugreifen" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "" +"Falls einige dieser Anforderungen nicht erfüllt sind, wählen Sie diese " +"Option nicht und verwenden Sie stattdessen die reguläre Sqlite-Unterstützung." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "You can change this setting later on by running \"dpkg-reconfigure -plow " +#| "glance-common\"." +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" +"Sie können diese Einstellung später ändern, indem Sie »dpkg-reconfigure -" +"plow glance-common« ausführen." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +#, fuzzy +#| msgid "Register Glance in the Keystone endpoint catalog?" +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "Glance im Keystone-Endpunktkatalog registrieren?" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" +"Jeder OpenStack-Dienst (jedes API) sollte registriert werden, damit darauf " +"zugegriffen werden kann. Dies wird mittels »keystone service-create« und " +"»keystone endpoint-create« erreicht und kann nun automatisch erledigt werden." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "" +"Beachten Sie, dass Sie einen gestarteten und laufenden Keystone-Server haben " +"müssen, mit dem Sie sich anhand des Keystone-Authentifizierungs-Tokens " +"verbinden." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "Keystone server IP address:" +msgstr "IP-Adresse des Keystone-Servers:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +#, fuzzy +#| msgid "" +#| "Please enter the IP address of the Keystone server, so that glance-api " +#| "can contact Keystone to do the Glance service and endpoint creation." +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" +"Bitte geben Sie die IP-Adresse des Keystone-Servers an, so dass Glance-API " +"Keystone kontaktieren kann, um den Glance-Dienst und den Endpunkt zu " +"erstellen." + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "Keystone authentication token:" +msgstr "Keystone-Authentifizierungs-Token:" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +#, fuzzy +#| msgid "" +#| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#| "authentication token." +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" +"Glance-API benötigt das Keystone-Authentifizierungs-Token, um seinen " +"Endpunkt in Keystone zu konfigurieren." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +#, fuzzy +#| msgid "Glance endpoint IP address:" +msgid "Gnocchi endpoint IP address:" +msgstr "IP-Adresse des Glance-Endpunkts" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +#, fuzzy +#| msgid "Please enter the IP address that will be used to contact Glance." +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "" +"Bitte geben Sie die IP-Adresse ein, die zum Kontaktieren von Glance benutzt " +"wird." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" +"Auf diese IP-Adresse sollte von den Clients, die diesen Dienst verwenden, " +"zugegriffen werden können, daher sollte sie, falls Sie eine öffentliche " +"Cloud installieren, eine öffentliche IP-Adresse sein." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "Name der Region, die registriert wird:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" +"OpenStack unterstützt die Verwendung von Verfügbarkeitszonen, bei der jede " +"Region einen Ort repräsentiert. Bitte geben Sie die Zone, die Sie benutzen " +"möchten, bei der Registrierung des Endpunkts an." + +#~ msgid "keystone" +#~ msgstr "Keystone" + +#~ msgid "caching" +#~ msgstr "Zwischenspeichern" + +#~ msgid "keystone+caching" +#~ msgstr "Keystone+Zwischenspeichern" + +#~ msgid "cachemanagement" +#~ msgstr "Zwischenspeicherverwaltung" + +#~ msgid "keystone+cachemanagement" +#~ msgstr "Keystone+Zwischenspeicherverwaltung" + +#~ msgid "Pipeline flavor:" +#~ msgstr "Pipeline-Variante:" + +#~ msgid "Please specify the flavor of the pipeline to be used by Glance." +#~ msgstr "" +#~ "Bitte geben Sie die Variante der von Glance zu benutzenden Pipeline an." + +#~ msgid "" +#~ "If you use the OpenStack Identity Service (Keystone), you might want to " +#~ "select \"keystone\". If you don't use this service, you can safely choose " +#~ "\"caching\" only." +#~ msgstr "" +#~ "Falls Sie den OpenStack-Identitätsdienst (Keystone) verwenden, möchten " +#~ "Sie möglicherweise »Keystone« auswählen. Falls Sie diesen Dienst nicht " +#~ "nutzen, können Sie problemlos »Zwischenspeichern« auswählen." + +#~ msgid "IP address of your RabbitMQ host:" +#~ msgstr "IP-Adresse Ihres RabbitMQ-Rechners:" + +#~ msgid "" +#~ "In order to interoperate with other components of OpenStack, this package " +#~ "needs to connect to a central RabbitMQ server." +#~ msgstr "" +#~ "Um mit weiteren Bestandteilen von OpenStack zusammenzuarbeiten, muss sich " +#~ "dieses Paket mit einem zentralen RabbitMQ-Server verbinden." + +#~ msgid "Please specify the IP address of that server." +#~ msgstr "Bitte geben Sie die IP-Adresse dieses Servers an." + +#~ msgid "Username for connection to the RabbitMQ server:" +#~ msgstr "Benutzername für die Verbindung mit dem RabbitMQ-Server:" + +#~ msgid "Please specify the username used to connect to the RabbitMQ server." +#~ msgstr "" +#~ "Bitte geben Sie den Benutzernamen ein, den Sie zum Verbinden mit dem " +#~ "RabbitMQ-Server verwenden." + +#~ msgid "Password for connection to the RabbitMQ server:" +#~ msgstr "Passwort für die Verbindung mit dem RabbitMQ-Server:" + +#~ msgid "Please specify the password used to connect to the RabbitMQ server." +#~ msgstr "" +#~ "Bitte geben Sie das Passwort ein, das Sie zum Verbinden mit dem RabbitMQ-" +#~ "Server verwenden." diff --git a/debian/po/es.po b/debian/po/es.po new file mode 100644 index 00000000..50ed8925 --- /dev/null +++ b/debian/po/es.po @@ -0,0 +1,281 @@ +# glance po-debconf translation to Spanish +# Copyright (C) 2010 Software in the Public Interest +# This file is distributed under the same license as the glance package. +# +# Changes: +# - Initial translation +# Camaleón , 2012, 2013. +# +# - Updates +# +# +# Traductores, si no conocen el formato PO, merece la pena leer la +# documentación de gettext, especialmente las secciones dedicadas a este +# formato, por ejemplo ejecutando: +# info -n '(gettext)PO Files' +# info -n '(gettext)Header Entry' +# +# Equipo de traducción al español, por favor lean antes de traducir +# los siguientes documentos: +# +# - El proyecto de traducción de Debian al español +# http://www.debian.org/intl/spanish/ +# especialmente las notas y normas de traducción en +# http://www.debian.org/intl/spanish/notas +# +# - La guía de traducción de po's de debconf: +# /usr/share/doc/po-debconf/README-trans +# o http://www.debian.org/intl/l10n/po-debconf/README-trans +# +msgid "" +msgstr "" +"Project-Id-Version: glance 2012.1-3\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: 2013-10-19 11:01+0200\n" +"Last-Translator: Camaleón \n" +"Language-Team: Debian Spanish \n" +"Language: es\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"X-Generator: Virtaal 0.7.1\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "Authentication server hostname:" +msgstr "Nombre del equipo del servidor de autenticación:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" +"Indique el nombre del equipo del servidor de autenticación de Gnocchi. Suele " +"ser el nombre del equipo del Servicio de Identidad de OpenStack (Keystone)." + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Authentication server tenant name:" +msgstr "Nombre del inquilino («tenant») del servidor de autenticación:" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "" +"Indique el nombre del inquilino («tenant») del servidor de autenticación." + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Authentication server username:" +msgstr "Nombre de usuario del servidor de autenticación:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "" +"Indique el nombre de usuario para usar con el servidor de autenticación." + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Authentication server password:" +msgstr "Contraseña del servidor de autenticación:" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "Indique la contraseña para usar con del servidor de autenticación." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "Set up a database for Gnocchi?" +msgstr "¿Desea configurar una base de datos para Gnocchi?" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "No database has been set up for glance-registry or glance-api to use. " +#| "Before continuing, you should make sure you have the following " +#| "information:" +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" +"No se ha configurado ninguna base de datos para glance-registry o glance-" +"api. Antes de continuar debe asegurarse de que dispone de los siguientes " +"datos:" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" +" * el tipo de base de datos que quiere utilizar;\n" +" * el nombre del equipo del servidor de la base de datos (el servidor debe " +"permitir conexiones TCP desde este equipo).\n" +" * el nombre de usuario y la contraseña para acceder a la base de datos." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "" +"Si no dispone de alguno de estos datos, seleccione «no» en este apartado y " +"ejecute Gnocchi con SQLite." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "You can change this setting later on by running \"dpkg-reconfigure -plow " +#| "glance-common\"." +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" +"Podrá cambiar esta configuración más adelante ejecutando «dpkg-reconfigure -" +"plow glance-common»." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "¿Desea registrar Gnocchi en el catálogo de puntos finales de Keystone?" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" +"Debe registrar cada uno de los servicios OpenStack (cada API) para que sean " +"accesibles. Esto se lleva a cabo mediante las órdenes «keystone service-" +"create» y «keystone endpoint-create». Elija si desea ejecutar estas órdenes " +"ahora." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "" +"Tenga en cuenta que necesitará disponer de un servidor Keystone en ejecución " +"al que conectarse utilizando el token de autenticación de Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "Keystone server IP address:" +msgstr "Dirección IP del servidor Keystone:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" +"Introduzca la dirección IP del servidor Keystone para que glance-api pueda " +"contactar con Keystone para realizar el servicio Gnocchi y crear el punto de " +"cierre." + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "Keystone authentication token:" +msgstr "Token de autenticación de Keystone:" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +#, fuzzy +#| msgid "" +#| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#| "authentication token." +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" +"Para configurar su punto final en Keystone, glance-api necesita el token de " +"autenticación de Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Gnocchi endpoint IP address:" +msgstr "Dirección IP del punto de cierre de Gnocchi:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "Introduzca la dirección IP que se utilizará para contactar con Gnocchi." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" +"Esta dirección IP debe ser accesible desde los clientes que usarán este " +"servicio, por lo que si está instalando una nube pública, debería ser una " +"dirección IP pública." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "Nombre de la región a registrar:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" +"OpenStack puede utilizarse con zonas de disponibilidad, donde cada región " +"representa una ubicación. Introduzca la zona que desea utilizar cuando \n" +"registre un punto de cierre." diff --git a/debian/po/fr.po b/debian/po/fr.po new file mode 100644 index 00000000..5e57e342 --- /dev/null +++ b/debian/po/fr.po @@ -0,0 +1,256 @@ +# Translation of glance debconf templates to French. +# Copyright (C) 2013, French l10n team +# This file is distributed under the same license as the GLANCE package. +# Julien Patriarca , 2013. +# +msgid "" +msgstr "" +"Project-Id-Version: glance\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: 2013-10-26 18:35+0100\n" +"Last-Translator: Julien Patriarca \n" +"Language-Team: FRENCH \n" +"Language: fr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.5.4\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "Authentication server hostname:" +msgstr "Nom d'hôte du serveur d'authentification." + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" +"Veuillez indiquer le nom d'hôte de votre serveur d'authentification pour " +"Gnocchi. Typiquement c'est également le nom d'hôte de votre Service " +"d'Identité OpenStack (Keystone)." + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Authentication server tenant name:" +msgstr "Nom d'espace client du serveur d'authentification :" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "" +"Veuillez indiquer le nom de l'espace client du serveur d'authentification." + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Authentication server username:" +msgstr "Nom d'utilisateur pour le serveur d'authentification :" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "" +"Veuillez indiquer le nom d'utilisateur à utiliser sur le serveur " +"d'authentification." + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Authentication server password:" +msgstr "Mot de passe pour le serveur d'authentification :" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "" +"Veuillez indiquer le mot de passe à utiliser sur le serveur " +"d'authentification." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "Set up a database for Gnocchi?" +msgstr "Installer une base de données pour Gnocchi ?" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "No database has been set up for glance-registry or glance-api to use. " +#| "Before continuing, you should make sure you have the following " +#| "information:" +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" +"Aucune base de données n'a été installée pour le registre de gnocchi ou pour " +"l'API de Gnocchi. Avant de continuer, assurez vous d'avoir :" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" +" - Le type de base de données que vous souhaitez utiliser ;\n" +" - le nom d'hôte du serveur de base de données (ce serveur\n" +" doit accepter les connexions TCP depuis cette machine);\n" +" - un nom d'utilisateur et un mot de passe pour accéder\n" +" à cette base de données." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "" +"Si certains de ces prérequis sont manquants, ignorer cette option et " +"exécutez l'application avec le support SQLite normal." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "You can change this setting later on by running \"dpkg-reconfigure -plow " +#| "glance-common\"." +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" +"Vous pouvez modifier ce réglage plus tard en lançant « dpkg-reconfigure -" +"plow glance-common »." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "Enregistrer Gnocchi dans le catalogue de points d'accès de Keystone ?" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" +"Chaque service OpenStack (chaque API) doit être enregistré pour être " +"accessible. Cela peut être fait en utilisant « keystone service-create » et " +"« keystone endpoint-create ». Cela peut maintenant être fait automatiquement." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "" +"Veuillez noter que vous aurez besoin d'avoir un serveur Keystone fonctionnel " +"sur lequel se connecter pour utiliser le jeton d'authentification Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "Keystone server IP address:" +msgstr "Adresse IP du serveur Keystone : " + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" +"Veuillez indiquer l'adresse IP du serveur Keystone, pour que l'API de Gnocchi " +"puisse contacter Keystone pour établir le service Glance et créer le point " +"d'accès." + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "Keystone authentication token:" +msgstr "Jeton d'authentification Keystone : " + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" +"Pour configurer son point d'accès dans Keystone, l'API de Gnocchi a besoin du " +"jeton d'authentification Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Gnocchi endpoint IP address:" +msgstr "Adresse IP du point d'accès Gnocchi : " + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "" +"Veuillez indiquer l'adresse IP qui sera utilisée pour contacter Gnocchi." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" +"Cette adresse IP doit être accessible depuis les clients qui utiliseront ce " +"service, donc si vous installez un nuage public, ce devra être une adresse " +"IP publique." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "Nom de la région à enregistrer : " + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" +"OpenStack supporte l'utilisation de zones disponibles, avec chaque région " +"représentant un lieu. Veuillez entrer une zone que vous souhaitez utiliser " +"lors de l'enregistrement d'un point d'accès." diff --git a/debian/po/gl.po b/debian/po/gl.po new file mode 100644 index 00000000..2d623bf9 --- /dev/null +++ b/debian/po/gl.po @@ -0,0 +1,252 @@ +# Galician translations for glance package. +# Copyright (C) 2012 THE glance'S COPYRIGHT HOLDER +# This file is distributed under the same license as the glance package. +# +# Jorge Barreiro Gonzalez , 2012. +msgid "" +msgstr "" +"Project-Id-Version: glance\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: 2012-06-23 12:02+0200\n" +"Last-Translator: Jorge Barreiro \n" +"Language-Team: Galician \n" +"Language: gl\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Lokalize 1.0\n" +"Plural-Forms: nplurals=2; plural=n != 1;\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +#, fuzzy +#| msgid "Auth server admin token:" +msgid "Authentication server hostname:" +msgstr "Token do administrador do servidor de autenticación:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" +"Indique o URL do seu servidor de autenticación de Gnocchi. Normalmente isto " +"será tamén a URL do seu servizo de identidade OpenStack (Keystone)." + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +#, fuzzy +#| msgid "Auth server admin token:" +msgid "Authentication server tenant name:" +msgstr "Token do administrador do servidor de autenticación:" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +#, fuzzy +#| msgid "Auth server admin token:" +msgid "Authentication server username:" +msgstr "Token do administrador do servidor de autenticación:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +#, fuzzy +#| msgid "Auth server admin token:" +msgid "Authentication server password:" +msgstr "Token do administrador do servidor de autenticación:" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "Set up a database for glance-registry?" +msgid "Set up a database for Gnocchi?" +msgstr "Quere configurar unha base de datos para «glance-registry»?" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "No database has been set up for glance-registry to use. Before " +#| "continuing, you should make sure you have:" +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" +"Non se configurou ningunha base de datos para que «glance-registry» a use. " +"Antes de continuar, debería asegurarse de que ten:" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| " - the server host name (that server must allow TCP connections\n" +#| " from this machine);\n" +#| " - a username and password to access the database.\n" +#| " - A database type that you want to use." +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" +" - o nome do servidor (o servidor debe permitir conexións TCP\n" +" desde esta máquina);\n" +" - un nome de usuario e contrasinal para acceder á base de datos.\n" +" - O tipo de base de datos que quere usar." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "If some of these requirements are missing, reject this option and run " +#| "with regular sqlite support." +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "" +"Se non cumpre algún destes requisitos, rexeite esta opción e use a " +"infraestrutura «sqlite» normal." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "You can change this setting later on by running 'dpkg-reconfigure -plow " +#| "glance-registry" +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" +"Pode cambiar esta opción máis tarde executando «dpkg-reconfigure -plow " +"glance-registry»." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "Keystone server IP address:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "Keystone authentication token:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Gnocchi endpoint IP address:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" diff --git a/debian/po/it.po b/debian/po/it.po new file mode 100644 index 00000000..0740ac73 --- /dev/null +++ b/debian/po/it.po @@ -0,0 +1,254 @@ +# Italian description of glance debconf messages. +# Copyright (C) 2012, glance package copyright holder. +# This file is distributed under the same license as the glance package. +# Beatrice Torracca , 2012, 2013, 2014. +msgid "" +msgstr "" +"Project-Id-Version: glance\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: 2014-04-21 10:03+0200\n" +"Last-Translator: Beatrice Torracca \n" +"Language-Team: Italian \n" +"Language: it\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"X-Generator: Virtaal 0.7.1\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "Authentication server hostname:" +msgstr "Nome host del server di autenticazione:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" +"Specificare il nome host del server di autenticazione per Gnocchi. " +"Tipicamente, è anche il nome host dell'OpenStack Identity Service (Keystone)." + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Authentication server tenant name:" +msgstr "Nome del locatario («tenant») per il server di autenticazione:" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "" +"Inserire il nome del locatario («tenant») per il server di autenticazione." + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Authentication server username:" +msgstr "Nome utente per il server di autenticazione:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "Inserire il nome utente da usare con il server di autenticazione." + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Authentication server password:" +msgstr "Password per il server di autenticazione:" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "Inserire la password da usare con il server di autenticazione." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "Set up a database for Gnocchi?" +msgstr "Impostare un database per Gnocchi?" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "No database has been set up for glance-registry or glance-api to use. " +#| "Before continuing, you should make sure you have the following " +#| "information:" +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" +"Non è stato impostato alcun database per essere usato da glance-registry o " +"glance-api. Prima di continuare assicurarsi di avere le seguenti " +"informazioni:" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" +" * il tipo di database che si desidera usare;\n" +" * il nome host del server di database (che deve permettere le connessioni\n" +" TCP da questa macchina);\n" +" * un nome utente e una password per accedere al database." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "" +"Se non si ha uno o più di questi requisiti, non scegliere questa opzione ed " +"eseguire con il regolare supporto per SQLite." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "You can change this setting later on by running \"dpkg-reconfigure -plow " +#| "glance-common\"." +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" +"È possibile cambiare questa impostazione successivamente eseguendo «dpkg-" +"reconfigure -plow glance-common»." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "Registrare Gnocchi nel catalogo dei punti terminali di Keystone?" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" +"Ogni servizio OpenStack (ogni API) dovrebbe essere registrato per poter " +"essere accessibile. Ciò viene fatto usando «keystone service-create» e " +"«keystone endpoint-create». Ciò può essere fatto ora automaticamente." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "" +"Notare che sarà necessario avere un server Keystone in funzione a cui " +"connettersi usando il token di autenticazione Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "Keystone server IP address:" +msgstr "Indirizzo IP del server Keystone:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" +"Inserire l'indirizzo IP del server Keystone, in modo che glance-api possa " +"contattare Keystone per effettuare la creazione del servizio e del punto " +"terminale Gnocchi." + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "Keystone authentication token:" +msgstr "Token di autenticazione Keystone:" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +#, fuzzy +#| msgid "" +#| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#| "authentication token." +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" +"Per configurare il proprio punto terminale in Keystone, glance-api ha " +"bisogno del token di autenticazione Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Gnocchi endpoint IP address:" +msgstr "Indirizzo IP del punto terminale Gnocchi:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "Inserire l'indirizzo IP che verrà usato per contattare Gnocchi." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" +"Questo indirizzo IP dovrebbe essere accessibile dai client che useranno il " +"servizio, perciò se si sta installando una cloud pubblica, questo dovrebbe " +"essere un indirizzo IP pubblico." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "Nome della regione da registrare:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" +"OpenStack gestisce le zone di disponibilità, con ogni regione che " +"rappresenta una posizione. Inserire la zona che si desidera usare durante la " +"registrazione del punto terminale." diff --git a/debian/po/ja.po b/debian/po/ja.po new file mode 100644 index 00000000..db266ee8 --- /dev/null +++ b/debian/po/ja.po @@ -0,0 +1,289 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# victory , 2012. +# +msgid "" +msgstr "" +"Project-Id-Version: glance\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: 2012-11-10 23:28+0900\n" +"Last-Translator: victory \n" +"Language-Team: Japanese \n" +"Language: ja\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +#, fuzzy +#| msgid "Auth server hostname:" +msgid "Authentication server hostname:" +msgstr "認証サーバのホスト名:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" +"Gnocchi 認証サーバの URL を指定してください。これは通常 OpenStack Identity " +"Service (Keystone) の URL にもなります。" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +#, fuzzy +#| msgid "Auth server tenant name:" +msgid "Authentication server tenant name:" +msgstr "認証サーバの管理用アカウント (tenant) 名" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +#, fuzzy +#| msgid "Auth server username:" +msgid "Authentication server username:" +msgstr "認証サーバのユーザ名:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +#, fuzzy +#| msgid "Auth server password:" +msgid "Authentication server password:" +msgstr "認証サーバのパスワード:" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "Set up a database for glance?" +msgid "Set up a database for Gnocchi?" +msgstr "glance 用のデータベースを用意しますか?" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "No database has been set up for glance-registry or glance-api to use. " +#| "Before continuing, you should make sure you have:" +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" +"glance-registry または glance-api で利用するデータベースが用意されていませ" +"ん。続ける前に以下の情報が揃っていることを確認してください:" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| " - the server host name (that server must allow TCP connections from " +#| "this\n" +#| " machine);\n" +#| " - a username and password to access the database.\n" +#| " - A database type that you want to use." +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" +" - サーバのホスト名 (このサーバはこのマシンからの\n" +" TCP 接続を許可しなければなりません)\n" +" - データベースにアクセスするためのユーザ名とパスワード\n" +" - 使いたいデータベースの種類" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "If some of these requirements are missing, reject this option and run " +#| "with regular sqlite support." +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "" +"必要な情報が欠けている場合このオプションを却下して標準の SQLite を利用しま" +"す。" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "You can change this setting later on by running 'dpkg-reconfigure -plow " +#| "glance-common'." +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" +"この設定は後で「dpkg-reconfigure -plow glance-common」を実行することにより変" +"更できます。" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "Gnocchi を Keystone の端末リストに登録しますか?" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +#, fuzzy +#| msgid "" +#| "Each Openstack services (each API) should be registered in order to be " +#| "accessible. This is done using \"keystone service-create\" and \"keystone " +#| "endpoint-create\". Select if you want to run these commands now." +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" +"OpenStack のサービスごと (API ごと) に、アクセスできるようにするため登録すべ" +"きです。「keystone service-create」と「keystone endpoint-create」を使って登録" +"することができます。ここで自動的に行うことができます。" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +#, fuzzy +#| msgid "" +#| "Note that you will need to have an up and running keystone server on " +#| "which to connect using the Keystone auth token." +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "" +"Keystone 認証文字列を使って接続する先の Keystone サーバが必要なことに注意して" +"ください。" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +#, fuzzy +#| msgid "Keystone IP address:" +msgid "Keystone server IP address:" +msgstr "Keystone の IP アドレス:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" +"Keystone サーバの IP アドレスを入力してください。それにより glance-api は " +"Keystone と通信し、Gnocchi サービスや端末の作成ができるようになります。" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +#, fuzzy +#| msgid "Keystone Auth Token:" +msgid "Keystone authentication token:" +msgstr "Keystone 認証文字列:" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +#, fuzzy +#| msgid "" +#| "To configure its endpoint in Keystone, glance-api needs the Keystone auth " +#| "token." +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" +"Keystone で端末を設定するには、glance-api は Keystone 認証文字列を必要としま" +"す。" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Gnocchi endpoint IP address:" +msgstr "Gnocchi 端末の IP アドレス:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "" +"Gnocchi への通信に利用する IP アドレス (例えば Gnocchi 端末の IP アドレス) を入" +"力してください。" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" +"この IP アドレスはこのサービスを利用するクライアントからアクセスできないとい" +"けないので、パブリッククラウドをインストールしている場合、これは公開 IP アド" +"レスを使うようにしてください。" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "登録する領域の名前:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +#, fuzzy +#| msgid "" +#| "Openstack can be used using availability zones, with each region " +#| "representing a location. Please enter the zone that you wish to use when " +#| "registering the endpoint." +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" +"OpenStack は位置を示す各領域による利用可能区分を利用することができます。端末" +"の登録時に利用したい区分を入力してください。" diff --git a/debian/po/nl.po b/debian/po/nl.po new file mode 100644 index 00000000..89ec6ec1 --- /dev/null +++ b/debian/po/nl.po @@ -0,0 +1,253 @@ +# Dutch translation of glance debconf templates. +# Copyright (C) 2012 THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the glance package. +# Jeroen Schot , 2012. +# Frans Spiesschaert , 2014. +# +msgid "" +msgstr "" +"Project-Id-Version: glance 2012.1~e3-4\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: 2014-09-26 22:55+0200\n" +"Last-Translator: Frans Spiesschaert \n" +"Language-Team: Debian Dutch l10n Team \n" +"Language: nl\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "Authentication server hostname:" +msgstr "Computernaam van de authenticatieserver:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" +"Geef de computernaam van de authenticatieserver voor Gnocchi. Meestal is dit " +"ook de computernaam van de OpenStack identiteitsserver (Keystone)." + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Authentication server tenant name:" +msgstr "Naam van de clientruimte (tenant) op de authenticatieserver:" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "" +"Gelieve de naam te vermelden van de clientruimte (tenant) op de " +"authenticatieserver" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Authentication server username:" +msgstr "Gebruikersnaam voor de authenticatieserver:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "Geef de gebruikersnaam op voor de authenticatieserver." + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Authentication server password:" +msgstr "Wachtwoord voor de authenticatieserver:" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "Geef het wachtwoord op voor de authenticatieserver." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "Set up a database for Gnocchi?" +msgstr "Een database voor Gnocchi opzetten?" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" +"Er werd geen database opgezet voor het register van Gnocchi noch voor de API " +"van Gnocchi. Voor u doorgaat moet u beschikken over de volgende informatie:" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" +" * het type database dat u wenst te gebruiken;\n" +" * de computernaam van de databeseserver (die server moet\n" +" TCP-verbindingen vanaf deze computer accepteren);\n" +" * een gebruikersnaam en wachtwoord voor toegang tot de database." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "" +"Indien sommige van deze gegevens ontbreken, moet u deze optie niet kiezen en " +"de toepassing gebruiken met gewone SQLite-ondersteuning." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "You can change this setting later on by running \"dpkg-reconfigure -plow " +#| "glance-common\"." +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" +"U kunt deze instelling later wijzigen met het commando \"dpkg-reconfigure -" +"plow glance-common\"." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "Gnocchi registreren in de catalogus van toegangspunten van keystone?" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" +"Elke OpenStackdienst (elke API) moet geregistreerd worden om toegankelijk te " +"zijn. Dit gebeurt aan de hand van \"keystone service-create\" en \"keystone " +"endpoint-create\". Dit kan nu automatisch gedaan worden." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "" +"Merk op dat u hiervoor een volledig werkende keystone-server nodig heeft, " +"waarmee een verbinding gemaakt wordt met behulp van het authenticatiebewijs " +"voor Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "Keystone server IP address:" +msgstr "IP-adres van de Keystone-server:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" +"Gelieve het IP-adres van de Keystone-server op te geven, zodat glance-api " +"met Keystone kan verbinden om de Gnocchi-service en het troegangspunt aan te " +"maken." + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "Keystone authentication token:" +msgstr "Authenticatiebewijs voor Keystone:" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +#, fuzzy +#| msgid "" +#| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#| "authentication token." +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" +"Om zijn toegangspunt te kunnen aanmaken in Keystone, heeft glance-api het " +"authenticatiebewijs voor Keystone nodig." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Gnocchi endpoint IP address:" +msgstr "IP-adres van het toegangspunt voor Gnocchi:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "" +"Gelieve het IP-adres in te voeren dat gebruikt zal worden om contact te " +"maken met Gnocchi." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" +"Dit IP-adres moet bereikbaar zijn voor de clients die van deze service " +"gebruik zullen maken. Indien u een openbare cloud installeert, moet dit dus " +"een algemeen bereikbaar IP-adres zijn." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "Naam van de te registreren regio:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" +"Openstack ondersteunt het gebruik van zones van beschikbaarheid, waarbij " +"elke regio een locatie vertegenwoordigt. Geef aan welke zone u wenst te " +"gebruiken bij het registreren van het toegangspunt." diff --git a/debian/po/pl.po b/debian/po/pl.po new file mode 100644 index 00000000..2b2e8d8f --- /dev/null +++ b/debian/po/pl.po @@ -0,0 +1,256 @@ +# Translation of glance debconf templates to Polish. +# Copyright (C) 2012 +# This file is distributed under the same license as the glance package. +# +# Michał Kułach , 2012. +msgid "" +msgstr "" +"Project-Id-Version: glance\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: 2012-06-09 10:11+0200\n" +"Last-Translator: Michał Kułach \n" +"Language-Team: Polish \n" +"Language: pl\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Lokalize 1.2\n" +"Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 " +"|| n%100>=20) ? 1 : 2);\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +#, fuzzy +#| msgid "Auth server admin token:" +msgid "Authentication server hostname:" +msgstr "Token administratora serwera uwierzytelniania:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" +"Proszę podać adres URL serwera uwierzytelniania Gnocchi. Z reguły jest to " +"adres OpenStack Identity Service (Keystone) danego użytkownika." + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +#, fuzzy +#| msgid "Auth server admin token:" +msgid "Authentication server tenant name:" +msgstr "Token administratora serwera uwierzytelniania:" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +#, fuzzy +#| msgid "Auth server admin token:" +msgid "Authentication server username:" +msgstr "Token administratora serwera uwierzytelniania:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +#, fuzzy +#| msgid "Auth server admin token:" +msgid "Authentication server password:" +msgstr "Token administratora serwera uwierzytelniania:" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "Set up a database for glance-registry?" +msgid "Set up a database for Gnocchi?" +msgstr "Skonfigurować bazę danych do glance-registry?" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "No database has been set up for glance-registry to use. Before " +#| "continuing, you should make sure you have:" +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" +"Nie skonfigurowano bazy danych do użycia z glance-registry. Przed " +"kontynuowaniem, proszę upewnić się, że posiada się:" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| " - the server host name (that server must allow TCP connections\n" +#| " from this machine);\n" +#| " - a username and password to access the database.\n" +#| " - A database type that you want to use." +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" +" - nazwę serwera (serwer musi pozwalać na połączenia TCP\n" +" z tego komputera),\n" +" - nazwę użytkownika i hasło dostępowe do bazy danych,\n" +" - typ bazy danych, który chce się wykorzystać." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "If some of these requirements are missing, reject this option and run " +#| "with regular sqlite support." +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "" +"Jeśli nie zna się któregoś z powyższych punktów, proszę wybrać \"nie\" i " +"skorzystać ze zwykłego trybu, używającego sqlite." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "You can change this setting later on by running 'dpkg-reconfigure -plow " +#| "glance-registry" +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" +"Można zmienić to ustawienie później, wykonując \"dpkg-reconfigure -plow " +"glance-registry\"." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "Keystone server IP address:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "Keystone authentication token:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Gnocchi endpoint IP address:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" + +#~ msgid "Pipeline flavor:" +#~ msgstr "Odmiana potoku:" diff --git a/debian/po/pt.po b/debian/po/pt.po new file mode 100644 index 00000000..367d0291 --- /dev/null +++ b/debian/po/pt.po @@ -0,0 +1,252 @@ +# glance debconf portuguese messages +# Copyright (C) 2012 the glance'S COPYRIGHT HOLDER +# This file is distributed under the same license as the glance package. +# Pedro Ribeiro , 2012 +# +msgid "" +msgstr "" +"Project-Id-Version: glance\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: 2013-10-20 23:43+0100\n" +"Last-Translator: Pedro Ribeiro \n" +"Language-Team: Potuguese \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "Authentication server hostname:" +msgstr "Nome do servidor de autenticação:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" +"Indique o nome do seu servidor de autenticação para o Gnocchi. Normalmente, é " +"o nome do seu Serviço de Identidade OpenStack (Keystone)." + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Authentication server tenant name:" +msgstr "Nome do 'tenant' do servidor de autenticação:" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "Indique, por favor, o nome do 'tenant' do servidor de autenticação." + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Authentication server username:" +msgstr "Nome de utilizador para o servidor de autenticação:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "" +"Indique, por favor, o nome de utilizador para o servidor de autenticação." + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Authentication server password:" +msgstr "Palavra chave do servidor de autenticação:" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "" +"Indique, por favor, a palavra-chave para usar no servidor de autenticação." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "Set up a database for Gnocchi?" +msgstr "Configurar uma base de dados para o Gnocchi?" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "No database has been set up for glance-registry or glance-api to use. " +#| "Before continuing, you should make sure you have the following " +#| "information:" +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" +"Não foi definida nenhuma base de dados para ser usada pelo glance-registry " +"ou glance-api. Antes de continuar, certifique-se que tem:" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" +" * o tipo de base de dados que quer usar;\n" +" * o nome do servidor (esse servidor deve aceitar ligações TCP a partir\n" +"desta máquina);\n" +" * o nome de utilizador e palavra passe para aceder à base de dados." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "" +"Se algum destes requisitos estiver em falta, rejeite esta opção e execute " +"com o suporte SQLite normal." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "You can change this setting later on by running \"dpkg-reconfigure -plow " +#| "glance-common\"." +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" +"Pode mudar esta definição mais tarde ao executar \"dpkg-reconfigure -plow " +"glance-common\"." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "Registar o Gnocchi no catálogo de pontos finais do Keystone?" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" +"Cada serviço Openstack (cada API) deve estar registado para que seja " +"acessível. Isto é feito com \"keystone service-create\" e \"keystone " +"endpoint-create\". Pode correr estes comandos agora." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "" +"Note que irá necessitar de ter um servidor keystone a correr e pronto para " +"receber ligações autenticadas com o token de autenticação Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "Keystone server IP address:" +msgstr "Endereço IP do keystone:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" +"Indique o endereço IP do seu servidor keystone, de modo a que o glance-api " +"possa contactar o Keystone para criar o serviço e ponto final Gnocchi." + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "Keystone authentication token:" +msgstr "Token de Autenticação Keystone:" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +#, fuzzy +#| msgid "" +#| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#| "authentication token." +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" +"Para configurar o seu ponto final no Keystone, o glance-api precisa do token " +"de autenticação do Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Gnocchi endpoint IP address:" +msgstr "Endereço IP do ponto final Gnocchi:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "Indique o endereço IP que irá ser usado para contactar o Gnocchi." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" +"Este endereço IP deve ser acessível a partir dos clientes que irão usar este " +"serviço, portanto se está a instalar uma cloud pública, este deve ser um " +"endereço IP público." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "Nome da região a registar:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" +"O Openstack pode ser usado com zonas de disponibilidade, com cada região a " +"representar uma localização. Por favor, indique a zona que quer usar ao " +"registar um ponto final." diff --git a/debian/po/pt_BR.po b/debian/po/pt_BR.po new file mode 100644 index 00000000..465caa75 --- /dev/null +++ b/debian/po/pt_BR.po @@ -0,0 +1,260 @@ +# Debconf translations for glance. +# Copyright (C) 2012 THE glance'S COPYRIGHT HOLDER +# This file is distributed under the same license as the glance package. +# Adriano Rafael Gomes , 2012-2014. +# +msgid "" +msgstr "" +"Project-Id-Version: glance 2014.1.2-1\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: 2014-09-04 08:49-0300\n" +"Last-Translator: Adriano Rafael Gomes \n" +"Language-Team: Brazilian Portuguese \n" +"Language: pt_BR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "Authentication server hostname:" +msgstr "Nome de máquina do servidor de autenticação:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" +"Por favor, especifique o nome de máquina do seu servidor de autenticação " +"para o Gnocchi. Tipicamente, esse é também o nome de máquina do Serviço de " +"Identidade do OpenStack (Keystone)." + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Authentication server tenant name:" +msgstr "Nome de locatário (\"tenant\") do servidor de autenticação:" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "" +"Por favor, especifique o nome de locatário (\"tenant\") do servidor de " +"autenticação." + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Authentication server username:" +msgstr "Nome de usuário do servidor de autenticação:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "" +"Por favor, especifique o nome de usuário para usar com o servidor de " +"autenticação." + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Authentication server password:" +msgstr "Senha do servidor de autenticação:" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "" +"Por favor, especifique a senha para usar com o servidor de autenticação." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "Set up a database for Gnocchi?" +msgstr "Configurar um banco de dados para o Gnocchi?" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "No database has been set up for glance-registry or glance-api to use. " +#| "Before continuing, you should make sure you have the following " +#| "information:" +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" +"Nenhum banco de dados foi configurado para o glance-registry ou para o " +"glance-api utilizar. Antes de continuar, você deve se certificar que você " +"tem as seguintes informações:" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" +" * o tipo de banco de dados que você quer usar;\n" +" * o nome de máquina do servidor de banco de dados (tal servidor deve\n" +" permitir conexões TCP a partir deste computador);\n" +" * um usuário e uma senha para acessar o banco de dados." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "" +"Se algum desses requisitos estiver faltando, rejeite essa opção e execute " +"com suporte regular ao SQLite." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "You can change this setting later on by running \"dpkg-reconfigure -plow " +#| "glance-common\"." +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" +"Você pode mudar essa configuração depois, executando \"dpkg-reconfigure -" +"plow glance-common\"." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "Registrar o Gnocchi no catálogo de \"endpoint\" do Keystone?" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" +"Cada serviço OpenStack (cada API) deve ser registrado para ser acessível. " +"Isso é feito usando \"keystone service-create\" e \"keystone endpoint-create" +"\". Isso pode ser feito automaticamente agora." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "" +"Note que você precisará ter um servidor Keystone configurado e em execução " +"no qual conectar usando o \"token\" de autenticação do Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "Keystone server IP address:" +msgstr "Endereço IP do servidor Keystone:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" +"Por favor, informe o endereço IP do servidor Keystone, de forma que o glance-" +"api possa contatar o Keystone para efetuar a criação do \"endpoint\" e do " +"serviço Gnocchi." + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "Keystone authentication token:" +msgstr "\"Token\" de autenticação Keystone:" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +#, fuzzy +#| msgid "" +#| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#| "authentication token." +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" +"Para configurar o seu \"endpoint\" no Keystone, o glance-api precisa do " +"\"token\" de autenticação do Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Gnocchi endpoint IP address:" +msgstr "Endereço IP do \"endpoint\" Gnocchi:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "" +"Por favor, informe o endereço IP que será usado para contatar o Gnocchi." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" +"Esse endereço IP deveria ser acessível a partir dos clientes que usarão esse " +"serviço, assim se você está instalando uma nuvem pública, ele deveria ser um " +"endereço IP público." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "Nome da região para registrar:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" +"O OpenStack suporta usar zonas de disponibilidade, com cada região " +"representando uma localidade. Por favor, informe a zona que você deseja usar " +"ao registrar o \"endpoint\"." diff --git a/debian/po/ru.po b/debian/po/ru.po new file mode 100644 index 00000000..b37f8f15 --- /dev/null +++ b/debian/po/ru.po @@ -0,0 +1,251 @@ +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the glance package. +# +# Yuri Kozlov , 2012, 2013. +msgid "" +msgstr "" +"Project-Id-Version: glance 2013.2-1\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: 2013-11-17 08:45+0400\n" +"Last-Translator: Yuri Kozlov \n" +"Language-Team: Russian \n" +"Language: ru\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Lokalize 1.4\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" +"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "Authentication server hostname:" +msgstr "Имя узла сервера аутентификации:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" +"Введите имя узла сервера аутентификации для Gnocchi. Данное имя обычно " +"совпадает с именем узла OpenStack Identity Service (Keystone)." + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Authentication server tenant name:" +msgstr "Членское имя сервера аутентификации:" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "Укажите членское (tenant) имя сервера аутентификации." + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Authentication server username:" +msgstr "Имя пользователя для сервера аутентификации:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "Введите имя пользователя для работы с сервером аутентификации." + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Authentication server password:" +msgstr "Пароль для сервера аутентификации:" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "Введите пароль для работы с сервером аутентификации." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "Set up a database for Gnocchi?" +msgstr "Настроить базу данных для Gnocchi?" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "No database has been set up for glance-registry or glance-api to use. " +#| "Before continuing, you should make sure you have the following " +#| "information:" +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" +"Для использования glance-registry или glance-api требуется база данных, " +"которая пока не настроена. Перед тем как продолжить, проверьте:" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" +" * тип базы данных, который хотите использовать;\n" +" * имя узла сервера базы данных (этот сервер должен принимать\n" +" TCP-соединения с этой машины);\n" +" * имя пользователя и пароль для доступа к базе данных." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "" +"Если не хватает хотя бы одного параметра, ответьте отрицательно и включите " +"поддержку SQLite." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "You can change this setting later on by running \"dpkg-reconfigure -plow " +#| "glance-common\"." +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" +"Позднее, вы можете изменить эту настройку, запустив «dpkg-reconfigure -plow " +"glance-common»." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "Зарегистрировать Gnocchi в каталоге конечных точек Keystone?" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" +"Для доступа к службам Openstack (каждому API) их нужно регистрировать. Это " +"выполняется с помощью команды «keystone service-create» и «keystone endpoint-" +"create». Это может быть сделано автоматически прямо сейчас." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "" +"Заметим, что у вас должен быть работающий сервер Keystone, к которому будет " +"произведено подключение с помощью токена аутентификации Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "Keystone server IP address:" +msgstr "IP-адрес сервера Keystone:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" +"Введите IP-адрес сервера Keystone для того, чтобы glance-api могла " +"подключиться к Keystone для запуска службы Gnocchi и создания конечной точки." + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "Keystone authentication token:" +msgstr "Токен аутентификации Keystone:" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +#, fuzzy +#| msgid "" +#| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#| "authentication token." +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" +"Для настройки собственной конечной точки в Keystone glance-api требуется " +"токен аутентификации Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Gnocchi endpoint IP address:" +msgstr "IP-адрес конечной точки Gnocchi:" + +#. Type: string +#. Description +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "Введите IP-адрес, который будет использован для подключения к Gnocchi." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" +"Этот IP-адрес должен быть доступен клиентам, которые будут использовать эту " +"службу, поэтому если вы разворачиваете открытое облако, то это должен быть " +"публичный IP-адрес." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "Название области для регистрации:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" +"Openstack поддерживает разделение на зоны доступности, где каждая область " +"представляет определённое расположение. Введите зону, которую вы хотите " +"использовать при регистрации конечной точки." diff --git a/debian/po/sv.po b/debian/po/sv.po new file mode 100644 index 00000000..a48d0eb6 --- /dev/null +++ b/debian/po/sv.po @@ -0,0 +1,252 @@ +# Translation of glance debconf template to Swedish +# Copyright (C) 2012-2014 Martin Bagge +# This file is distributed under the same license as the glance package. +# +# Martin Bagge , 2012, 2014 +msgid "" +msgstr "" +"Project-Id-Version: glance\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: 2014-01-09 10:35+0100\n" +"Last-Translator: Martin Bagge / brother \n" +"Language-Team: Swedish \n" +"Language: Swedish\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.5.4\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "Authentication server hostname:" +msgstr "Värdnamn för identifieringsserver:Värdnamn för identifieringsserver:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" +"Ange värdnamn till din Gnocchi-identifieringsserver. Detta är vanligtvis " +"samma värdnamn som till din OpenStack-identitetstjänst (Keystone)." + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Authentication server tenant name:" +msgstr "Namn för \"tenant\" (administratör) på identifieringsservern:" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "" +"Ange \"tenant\"-namn för identifieringsservern. (\"Tenant\" är ungefär " +"översättningsbart till \"administratör\")." + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Authentication server username:" +msgstr "Användarnamn på identifieringsservern:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "" +"Ange användarnamnet som ska användas för att komma åt identifieringsservern." + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Authentication server password:" +msgstr "Lösenord på identifieringsservern:" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "" +"Ange lösenordet som ska användas för att komma åt identifieringsservern." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "Set up a database for Gnocchi?" +msgstr "Ska en databas installeras för Gnocchi?" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "No database has been set up for glance-registry or glance-api to use. " +#| "Before continuing, you should make sure you have the following " +#| "information:" +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" +"Ingen databas har installerats för glance-registry. Innan du fortsätter " +"behöver följande finnas tillgängligt:" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" +" * vilken databastyp du vill använda.\n" +" * serverns värdnamn (som måste kunna ta emot TCP-anslutningar\n" +" från den här maskinen)\n" +" * användarnamn och lösenord för att komma åt databasen." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "" +"Om något av dessa krav saknar bör du avböja detta alternativ och fortsätta " +"använda SQLite-stödet." + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "You can change this setting later on by running \"dpkg-reconfigure -plow " +#| "glance-common\"." +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" +"Denna inställning kan ändras senare genom att köra \"dpkg-reconfigure -plow " +"neutron\"." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "Ska Gnocchi registreras i keystones katalog med ändpunkter?" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" +"Alla OpenStack-tjänster (varje API) ska registreras för att kunna användas. " +"Detta görs med kommandona \"keystone service-create\" och \"keystone " +"endpoint-create\". Detta kan göras automatiskt nu." + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "OBS. Du behöver ha en fungerande keystone-server att ansluta till." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "Keystone server IP address:" +msgstr "IP-adress till Keystone:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" +"Ange IP-adressen till din Keystone-server så att glance-api kan kontakta " +"Keystone för att lägga till Gnocchi-tjänsten som en ändpunkt." + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "Keystone authentication token:" +msgstr "Autetiseringsvärde för Keystone:" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +#, fuzzy +#| msgid "" +#| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#| "authentication token." +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" +"För att lägga till ändpunkt i Keystone behöver glance-api ett " +"autentiseringsvärde för Keystone." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Gnocchi endpoint IP address:" +msgstr "IP-adress för Gnocchi-ändpunkt:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "Ange den IP-adress som ska användas för att kontakta Gnocchi." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" +"Denna IP-adress ska vara nåbar från klienterna som vill använda den här " +"tjänsten. Om detta är en publik molntjänst så ska det vara en publik IP-" +"adress." + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "Regionnamn:" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" +"OpenStack kan användas med tillgänglighetszoner. Varje region representerar " +"en plats. Ange zonen som ska användas när ändpunkten registreras." diff --git a/debian/po/zh_CN.po b/debian/po/zh_CN.po new file mode 100644 index 00000000..f5c3d51c --- /dev/null +++ b/debian/po/zh_CN.po @@ -0,0 +1,245 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: glance\n" +"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" +"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"PO-Revision-Date: 2012-08-27 17:14+0800\n" +"Last-Translator: ben \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +#, fuzzy +#| msgid "Auth server admin token:" +msgid "Authentication server hostname:" +msgstr "Auth 服务器管理token:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:2001 +msgid "" +"Please specify the hostname of the authentication server for Gnocchi. " +"Typically this is also the hostname of the OpenStack Identity Service " +"(Keystone)." +msgstr "" +"请指定您的Gnocchi认证服务器的URL。一般来说这个URL也是您的OpenStack身份服务的" +"URL(keystone)。" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +#, fuzzy +#| msgid "Auth server admin token:" +msgid "Authentication server tenant name:" +msgstr "Auth 服务器管理token:" + +#. Type: string +#. Description +#. Translators: a "tenant" in OpenStack world is +#. an entity that contains one or more username/password couples. +#. It's typically the tenant that will be used for billing. Having more than one +#. username/password is very helpful in larger organization. +#. You're advised to either keep "tenant" without translating it +#. or keep it parenthezised. Example for French: +#. locataire ("tenant") +#: ../gnocchi-common.templates:3001 +msgid "Please specify the authentication server tenant name." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +#, fuzzy +#| msgid "Auth server admin token:" +msgid "Authentication server username:" +msgstr "Auth 服务器管理token:" + +#. Type: string +#. Description +#: ../gnocchi-common.templates:4001 +msgid "Please specify the username to use with the authentication server." +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +#, fuzzy +#| msgid "Auth server admin token:" +msgid "Authentication server password:" +msgstr "Auth 服务器管理token:" + +#. Type: password +#. Description +#: ../gnocchi-common.templates:5001 +msgid "Please specify the password to use with the authentication server." +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "Set up a database for glance-registry?" +msgid "Set up a database for Gnocchi?" +msgstr "为glance-registry设置数据库?" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "No database has been set up for glance-registry to use. Before " +#| "continuing, you should make sure you have:" +msgid "" +"No database has been set up for Gnocchi to use. Before continuing, you " +"should make sure you have the following information:" +msgstr "" +"未曾为glance-registry 设置数据库。如果你想现在设置,请确定你有以下信息:" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| " - the server host name (that server must allow TCP connections\n" +#| " from this machine);\n" +#| " - a username and password to access the database.\n" +#| " - A database type that you want to use." +msgid "" +" * the type of database that you want to use;\n" +" * the database server hostname (that server must allow TCP connections from " +"this\n" +" machine);\n" +" * a username and password to access the database." +msgstr "" +" * 数据库服务器的主机名 (需要这台主机的TCP链接);\n" +" * 访问这个数据库的用户名及密码;\n" +" * 你希望使用的数据库管理软件的类型。" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "If some of these requirements are missing, reject this option and run " +#| "with regular sqlite support." +msgid "" +"If some of these requirements are missing, do not choose this option and run " +"with regular SQLite support." +msgstr "如果部分需求缺失,请运行通用的SQLite。" + +#. Type: boolean +#. Description +#: ../gnocchi-common.templates:6001 +#, fuzzy +#| msgid "" +#| "You can change this setting later on by running 'dpkg-reconfigure -plow " +#| "glance-registry" +msgid "" +"You can change this setting later on by running \"dpkg-reconfigure -plow " +"gnocchi-common\"." +msgstr "" +"您可以通过运行\"dpkg-reconfigure-plow glance-registry\" 命令来修改配置。" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "Register Gnocchi in the Keystone endpoint catalog?" +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Each OpenStack service (each API) should be registered in order to be " +"accessible. This is done using \"keystone service-create\" and \"keystone " +"endpoint-create\". This can be done automatically now." +msgstr "" + +#. Type: boolean +#. Description +#: ../gnocchi-api.templates:2001 +msgid "" +"Note that you will need to have an up and running Keystone server on which " +"to connect using the Keystone authentication token." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "Keystone server IP address:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:3001 +msgid "" +"Please enter the IP address of the Keystone server, so that gnocchi-api can " +"contact Keystone to do the Gnocchi service and endpoint creation." +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "Keystone authentication token:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:4001 +msgid "" +"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +"authentication token." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Gnocchi endpoint IP address:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "Please enter the IP address that will be used to contact Gnocchi." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:5001 +msgid "" +"This IP address should be accessible from the clients that will use this " +"service, so if you are installing a public cloud, this should be a public IP " +"address." +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Name of the region to register:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:6001 +msgid "" +"OpenStack supports using availability zones, with each region representing a " +"location. Please enter the zone that you wish to use when registering the " +"endpoint." +msgstr "" -- GitLab From d27f740ae3e4330d17669ffa27cc6f779fc0b60b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 18 Nov 2015 09:58:42 +0100 Subject: [PATCH 0039/1483] Add missing PrettyTable dependency It's used by Carbonara CLI tools. Change-Id: Ifc2c0ce7cdede66ad80e7d36e5271463440de773 (cherry picked from commit f287c0912b324106a0148fb758211627197e1165) --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 636049af..f7a1b443 100644 --- a/requirements.txt +++ b/requirements.txt @@ -32,3 +32,4 @@ pymysql keystonemiddleware>=2.3.0 PasteDeploy sphinx_bootstrap_theme +prettytable -- GitLab From 0a6e2c562f9aab35a10596a168d67dfb4b08d44b Mon Sep 17 00:00:00 2001 From: xialinjuan Date: Fri, 27 Nov 2015 02:50:18 +0800 Subject: [PATCH 0040/1483] typos in rest.j2 1. duplicate "first" when list the aggregation method 2. typo "retrived" to retrieved Change-Id: I13d5f409d0cef67324f0b347d971d2ea9d8a2304 (cherry picked from commit 27953779c5fdb256c33e3b65a00bffb4fcb7c53b) --- doc/source/rest.j2 | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 5fae3763..a80eed31 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -69,8 +69,7 @@ method. It is possible to request for any other method by specifying the {{ scenarios['get-measures-max']['doc'] }} The list of aggregation method available is: *mean*, *sum*, *last*, *max*, -*min*, *std*, *median*, *first*, *count*, *first* and *Npct* (with 0 < N < -100). +*min*, *std*, *median*, *first*, *count* and *Npct* (with 0 < N < 100). Archive Policy ============== @@ -101,7 +100,7 @@ process measures back to 14:00 with a `back_window` of 0. If the `back_window` is set to 2, it will be possible to send measures with timestamp back to 12:00 (14:00 minus 2 times 1 hour). -The REST API allows to create archive policies this way: +The REST API allows to create archive policies in this way: {{ scenarios['create-archive-policy']['doc'] }} @@ -142,7 +141,7 @@ Archive Policy Rule Gnocchi provides the ability to define a mapping called `archive_policy_rule`. An archive policy rule defines a mapping between a metric and an archive policy. -This gives users ability to pre-define rules so an archive policy is assigned to +This gives users the ability to pre-define rules so an archive policy is assigned to metrics based on a matched pattern. An archive policy rule has a few properties: a name to identify it, an archive @@ -373,7 +372,7 @@ aggregation methods from the API server: Status ====== -The overall status of the Gnocchi installation can be retrived via an API call +The overall status of the Gnocchi installation can be retrieved via an API call reporting values such as the number of new measures to process for each metric: {{ scenarios['get-status']['doc'] }} -- GitLab From 768db9f649f2432fa244eca7f07cddf47b8a2ecb Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 14 Oct 2015 18:01:29 +0200 Subject: [PATCH 0041/1483] Depend on oslo.middleware, add CORS default conf Change-Id: Ia02ea19dc03aa95cb84cb98bce9b25612cd4e611 (cherry picked from commit 4d77bb915603b89b5040f08cf1c8eda0a90a76b2) --- etc/gnocchi/gnocchi-config-generator.conf | 1 + requirements.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/etc/gnocchi/gnocchi-config-generator.conf b/etc/gnocchi/gnocchi-config-generator.conf index 5b05ea80..fa6ae57b 100644 --- a/etc/gnocchi/gnocchi-config-generator.conf +++ b/etc/gnocchi/gnocchi-config-generator.conf @@ -4,5 +4,6 @@ wrap_width = 79 namespace = gnocchi namespace = oslo.db namespace = oslo.log +namespace = oslo.middleware namespace = oslo.policy namespace = keystonemiddleware.auth_token diff --git a/requirements.txt b/requirements.txt index f7a1b443..7b730b2f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,6 +5,7 @@ oslo.log>=1.0.0 oslo.policy>=0.3.0 oslo.serialization>=1.4.0 oslo.utils>=1.6.0 +oslo.middleware oslosphinx>=2.2.0 # Apache-2.0 pandas>=0.17.0 pecan>=0.9 -- GitLab From d117738585ffc0aa2dd22a448310024ae550a61a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 5 Nov 2015 14:47:19 +0100 Subject: [PATCH 0042/1483] carbonara: fix exception when creating metric MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit _create_metric() never returns MetricDoesNotExist, which makes no sense. But it can raise MetricAlreadyExists. Now, the way we use this function is always inside a lock that is tied to the metric, so it's impossible that this exception is raised in theory – and we never saw it so far in practice. But still, for the sake of it, let's fix that mistake! Change-Id: I256c3d6e7a0c4ac1c50af145b1938106289bce88 (cherry picked from commit b810d2252418b7ad617e4d9d9aeb6de52ddfbef6) --- gnocchi/storage/_carbonara.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index f8878352..20353a20 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -189,7 +189,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): except storage.MetricDoesNotExist: try: self._create_metric(metric) - except storage.MetricDoesNotExist: + except storage.MetricAlreadyExists: # Created in the mean time, do not worry pass ts = None -- GitLab From 42b482fa9861166e2f97699ae2cbf431bbd310f5 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 6 Nov 2015 15:18:50 +0100 Subject: [PATCH 0043/1483] Remove Ceilometer test dependency It's unused now. Change-Id: I413912c84f2df549f02a480cefcc09f4928bb351 (cherry picked from commit 6430d104566babfbba4ec73023e75de6632a7590) --- test-requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 34a3e900..713355df 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,4 +1,3 @@ -http://tarballs.openstack.org/ceilometer/ceilometer-master.tar.gz#egg=ceilometer gabbi>=0.101.2 coverage>=3.6 fixtures -- GitLab From 5d9e48fd2fe79227f6a9688066fb9be7324cd7e1 Mon Sep 17 00:00:00 2001 From: Pradeep Kilambi Date: Fri, 6 Nov 2015 15:48:35 -0500 Subject: [PATCH 0044/1483] Make swift timeout configurable Change-Id: I512b01c82310d923537015abef2f6ac1cfd45569 Closes-Bug: #1512329 (cherry picked from commit e0b317787cf08ea072efba857488fd13be66ef68) --- gnocchi/storage/swift.py | 7 ++++++- requirements.txt | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index a8f5966a..651aa565 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -52,6 +52,10 @@ OPTS = [ cfg.StrOpt('swift_container_prefix', default='gnocchi', help='Prefix to namespace metric containers.'), + cfg.IntOpt('swift_timeout', + min=0, + default=300, + help='Connection timeout in seconds.'), ] @@ -68,7 +72,8 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): preauthtoken=conf.swift_preauthtoken, user=conf.swift_user, key=conf.swift_key, - tenant_name=conf.swift_tenant_name) + tenant_name=conf.swift_tenant_name, + timeout=conf.swift_timeout) self._lock = _carbonara.CarbonaraBasedStorageToozLock(conf) self._container_prefix = conf.swift_container_prefix self.swift.put_container(self.MEASURE_PREFIX) diff --git a/requirements.txt b/requirements.txt index 7b730b2f..ed1cc583 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ oslo.middleware oslosphinx>=2.2.0 # Apache-2.0 pandas>=0.17.0 pecan>=0.9 -python-swiftclient +python-swiftclient>=2.5.0 pytimeparse>=1.1.5 futures requests -- GitLab From 33d3a1a1dad0fa3414090c5f024134f769c2c91e Mon Sep 17 00:00:00 2001 From: xialinjuan Date: Mon, 16 Nov 2015 21:24:34 +0800 Subject: [PATCH 0045/1483] copyright Openstack should be OpenStack small typos, should use OpenStack instead of Openstack Change-Id: I2525c2604aa8abec9c49b994e0ff16d009920110 (cherry picked from commit f0a580c2a4458578c6d9fd943dc92f679c55885f) --- gnocchi/aggregates/__init__.py | 2 +- gnocchi/aggregates/moving_stats.py | 2 +- gnocchi/tests/test_aggregates.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/gnocchi/aggregates/__init__.py b/gnocchi/aggregates/__init__.py index 5d016132..93cac35a 100644 --- a/gnocchi/aggregates/__init__.py +++ b/gnocchi/aggregates/__init__.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright 2014 Openstack Foundation +# Copyright 2014 OpenStack Foundation # # Authors: Ana Malagon # diff --git a/gnocchi/aggregates/moving_stats.py b/gnocchi/aggregates/moving_stats.py index 16084325..fa4290ae 100644 --- a/gnocchi/aggregates/moving_stats.py +++ b/gnocchi/aggregates/moving_stats.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright 2014-2015 Openstack Foundation +# Copyright 2014-2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 71e74ecd..c9bf266c 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright 2014-2015 Openstack Foundation +# Copyright 2014-2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain -- GitLab From 850c55d0f1c69d7857dea93df191d6f3e32e5b1e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 16 Nov 2015 15:32:30 +0100 Subject: [PATCH 0046/1483] carbonara: optimize _first_block_timestamp Leverage _round_timestamp which is a lot faster than faking a resample using Pandas. Change-Id: I05709a728f1605227a80e2347a9436fc6bfcf832 (cherry picked from commit f672c8c7de66062dcfb563b5a77a078036241246) --- gnocchi/carbonara.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 8ea3dfcd..c73621a6 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -138,6 +138,11 @@ class TimeSerie(SerializableMixin): if value: return six.text_type(value.n) + value.rule_code + @staticmethod + def _round_timestamp(ts, freq): + return pandas.Timestamp( + (ts.value // freq.delta.value) * freq.delta.value) + class BoundTimeSerie(TimeSerie): def __init__(self, timestamps=None, values=None, @@ -218,8 +223,8 @@ class BoundTimeSerie(TimeSerie): return basic def _first_block_timestamp(self): - ts = self.ts[-1:].resample(self.block_size) - return (ts.index[-1] - (self.block_size * self.back_window)) + rounded = self._round_timestamp(self.ts.index[-1], self.block_size) + return rounded - (self.block_size * self.back_window) def _truncate(self): """Truncate the timeserie.""" @@ -297,11 +302,6 @@ class AggregatedTimeSerie(TimeSerie): # Remove empty points if any that could be added by aggregation self.ts = self.ts.dropna()[-self.max_size:] - @staticmethod - def _round_timestamp(ts, freq): - return pandas.Timestamp( - (ts.value // freq.delta.value) * freq.delta.value) - def _resample(self, after): if self.sampling: # Group by the sampling, and then apply the aggregation method on -- GitLab From eec02bac51d5abcd2361dd3ae3f44ee62a3db8c5 Mon Sep 17 00:00:00 2001 From: Pradeep Kilambi Date: Fri, 13 Nov 2015 16:45:30 -0500 Subject: [PATCH 0047/1483] Ensure file basepath exists On a manual gnocchi install, i run into OSError on /var/lib/gnocchi/measure. This is because the basepath doesnt exist yet. On devstack this is handled. But we should handle it if gnocchi is installed manually. Change-Id: Iabe00a7cadede851ea3147d98bc2eba0708f92c3 (cherry picked from commit c5cf626c5fc578c5a89529bcc03bb029d4e044bb) --- gnocchi/storage/file.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index a6c529f2..021ccf6c 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -45,6 +45,11 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): self.basepath = conf.file_basepath self.basepath_tmp = conf.file_basepath_tmp self._lock = _carbonara.CarbonaraBasedStorageToozLock(conf) + try: + os.mkdir(self.basepath) + except OSError as e: + if e.errno != errno.EEXIST: + raise self.measure_path = os.path.join(self.basepath, self.MEASURE_PREFIX) try: os.mkdir(self.measure_path) -- GitLab From 5ab54d99e57970fa746607f6278a719ad5b79357 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 2 Dec 2015 08:22:52 +0000 Subject: [PATCH 0048/1483] Added missing dbconfig-common dependency (Closes: #806538). --- debian/changelog | 6 ++++++ debian/control | 1 + 2 files changed, 7 insertions(+) diff --git a/debian/changelog b/debian/changelog index 127570cd..fa7bf2f5 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (1.3.0-3) unstable; urgency=medium + + * Added missing dbconfig-common dependency (Closes: #806538). + + -- Thomas Goirand Wed, 02 Dec 2015 08:22:21 +0000 + gnocchi (1.3.0-2) unstable; urgency=medium * Adds missing config file: api-paste.ini. diff --git a/debian/control b/debian/control index 9c90e5c9..3ac01b08 100644 --- a/debian/control +++ b/debian/control @@ -123,6 +123,7 @@ Description: Metric as a Service - Python 2.x Package: gnocchi-common Architecture: all Depends: adduser, + dbconfig-common, debconf, python-gnocchi (= ${binary:Version}), ${misc:Depends}, -- GitLab From db06ed8a374864b882757eb15b1cb6c810bc98ee Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 2 Dec 2015 10:21:45 +0100 Subject: [PATCH 0049/1483] French debconf templates translation update (Closes: #806506). --- debian/changelog | 6 ++++++ debian/po/fr.po | 23 +++++++---------------- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/debian/changelog b/debian/changelog index fa7bf2f5..7b99ceea 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (1.3.0-4) unstable; urgency=medium + + * French debconf templates translation update (Closes: #806506). + + -- Thomas Goirand Wed, 02 Dec 2015 10:21:02 +0100 + gnocchi (1.3.0-3) unstable; urgency=medium * Added missing dbconfig-common dependency (Closes: #806538). diff --git a/debian/po/fr.po b/debian/po/fr.po index 5e57e342..cee8809b 100644 --- a/debian/po/fr.po +++ b/debian/po/fr.po @@ -31,8 +31,8 @@ msgid "" "Typically this is also the hostname of the OpenStack Identity Service " "(Keystone)." msgstr "" -"Veuillez indiquer le nom d'hôte de votre serveur d'authentification pour " -"Gnocchi. Typiquement c'est également le nom d'hôte de votre Service " +"Veuillez indiquer le nom d'hôte du serveur d'authentification pour " +"Gnocchi. Typiquement c'est également le nom d'hôte du Service " "d'Identité OpenStack (Keystone)." #. Type: string @@ -99,17 +99,12 @@ msgstr "Installer une base de données pour Gnocchi ?" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "No database has been set up for glance-registry or glance-api to use. " -#| "Before continuing, you should make sure you have the following " -#| "information:" msgid "" "No database has been set up for Gnocchi to use. Before continuing, you " "should make sure you have the following information:" msgstr "" -"Aucune base de données n'a été installée pour le registre de gnocchi ou pour " -"l'API de Gnocchi. Avant de continuer, assurez vous d'avoir :" +"Aucune base de données n'a été installée pour " +"Gnocchi. Avant de continuer, assurez-vous d'avoir :" #. Type: boolean #. Description @@ -135,21 +130,17 @@ msgid "" "with regular SQLite support." msgstr "" "Si certains de ces prérequis sont manquants, ignorer cette option et " -"exécutez l'application avec le support SQLite normal." +"exécutez l'application avec la gestion SQLite normale." #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "You can change this setting later on by running \"dpkg-reconfigure -plow " -#| "glance-common\"." msgid "" "You can change this setting later on by running \"dpkg-reconfigure -plow " "gnocchi-common\"." msgstr "" "Vous pouvez modifier ce réglage plus tard en lançant « dpkg-reconfigure -" -"plow glance-common »." +"plow gnocchi-common »." #. Type: boolean #. Description @@ -251,6 +242,6 @@ msgid "" "location. Please enter the zone that you wish to use when registering the " "endpoint." msgstr "" -"OpenStack supporte l'utilisation de zones disponibles, avec chaque région " +"OpenStack gère l'utilisation de zones disponibles, avec chaque région " "représentant un lieu. Veuillez entrer une zone que vous souhaitez utiliser " "lors de l'enregistrement d'un point d'accès." -- GitLab From 82969971d5e44498843048b192a638f5408477ba Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 2 Dec 2015 10:38:44 +0100 Subject: [PATCH 0050/1483] Make the wheel universal Change-Id: I91c14863d9c921ba1e5e890114311efbe4da9ad8 --- setup.cfg | 3 +++ 1 file changed, 3 insertions(+) diff --git a/setup.cfg b/setup.cfg index 5996d17d..23ca8c03 100644 --- a/setup.cfg +++ b/setup.cfg @@ -88,3 +88,6 @@ oslo.config.opts = all_files = 1 build-dir = doc/build source-dir = doc/source + +[wheel] +universal = 1 -- GitLab From a3d0c7fd02bd7fb3c45f7f963076c64f0c3816b4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 2 Dec 2015 12:29:29 +0100 Subject: [PATCH 0051/1483] Limit version of keystonemiddleware to before 4 Major version 4 fixed the way options were handled, but broke our code. We could probably fix into Gnocchi, but it's simpler to just limit people to an old Keystonemiddleware. Future version of Gnocchi won't depend on it anyway. Change-Id: Ie657c9dd874bed61118fec1b28abd4a6fa05c010 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ed1cc583..a8b5a431 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ WebOb>=1.4.1 alembic>=0.7.6,!=0.8.1 psycopg2 pymysql -keystonemiddleware>=2.3.0 +keystonemiddleware>=2.3.0,<4 PasteDeploy sphinx_bootstrap_theme prettytable -- GitLab From b9210d36c745b3637e5d6e7bed4adbf74a52e26a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 26 Nov 2015 11:36:07 +0100 Subject: [PATCH 0052/1483] storage: retry to delete metric on failure Currently, if a deletion fails from the storage engine, the metric is still expunged from the indexer, meaning metricd will not retry to delete it. Let's make it retry by not expunging the metric from the indexer. Change-Id: Id2c14cbbedda66aa3be4327a142f26415debf5db (cherry picked from commit 63b0271856158414cbcb632bfed790fcb3d10917) --- gnocchi/storage/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index f320628e..b903a4f2 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -171,6 +171,7 @@ class StorageDriver(object): except Exception: LOG.error("Unable to expunge metric %s from storage" % m, exc_info=True) + continue try: self.index.expunge_metric(m.id) except indexer.NoSuchMetric: -- GitLab From 8d5f31cf57d4fa4170f519697043c87817b839d7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 26 Nov 2015 17:42:33 +0100 Subject: [PATCH 0053/1483] ceph: delete unaggregated timeserie when deleting metric Change-Id: Icf96c8f22f1e89792a4921f8b0d4cdca7fc72dfb (cherry picked from commit 0ad3562ceb54bffa0f580ab93a70a83a0ef04c63) --- gnocchi/storage/ceph.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 753dea58..312a57c7 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -159,12 +159,13 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): def _delete_metric(self, metric): with self._get_ioctx() as ioctx: - name = self._get_object_name(metric, 'container') - try: - ioctx.remove_object(name) - except rados.ObjectNotFound: - # Maybe it never got measures - pass + for name in ('container', 'none'): + name = self._get_object_name(metric, name) + try: + ioctx.remove_object(name) + except rados.ObjectNotFound: + # Maybe it never got measures + pass for aggregation in metric.archive_policy.aggregation_methods: name = self._get_object_name(metric, aggregation) try: -- GitLab From c65cb62058ff63e5ed63643f0a99530d2776069d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 25 Nov 2015 13:34:08 +0100 Subject: [PATCH 0054/1483] tests: improve fake Swift client delete_container You cannot delete a container that is not empty. Change-Id: I056212df060ff20325fbc55c8ad5fa6add6d9f40 (cherry picked from commit 2ff4b5136a15952f0303d20e883ca34ebb418baf) --- gnocchi/storage/swift.py | 8 ++++++++ gnocchi/tests/base.py | 3 +++ 2 files changed, 11 insertions(+) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 651aa565..4f51b5d3 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -139,6 +139,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): self.swift.put_object(self._container_name(metric), aggregation, data) def _delete_metric(self, metric): + self._delete_unaggregated_timeserie(metric) for aggregation in metric.archive_policy.aggregation_methods: try: self.swift.delete_object(self._container_name(metric), @@ -171,3 +172,10 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): raise storage.AggregationDoesNotExist(metric, aggregation) raise return contents + + def _delete_unaggregated_timeserie(self, metric): + try: + self.swift.delete_object(self._container_name(metric), "none") + except swclient.ClientException as e: + if e.http_status != 404: + raise diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 2d46891a..b0daa1fa 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -243,6 +243,9 @@ class FakeSwiftClient(object): if container not in self.kvs: raise swexc.ClientException("No such container", http_status=404) + if self.kvs[container]: + raise swexc.ClientException("Container not empty", + http_status=409) del self.kvs[container] def head_container(self, container): -- GitLab From 45c286ccbf94a069caeaa18514f38c97cdf56c54 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Mon, 30 Nov 2015 16:26:37 -0500 Subject: [PATCH 0055/1483] fix expunge_metrics method expunge_metrics method does not work properly. it uses list_metrics from indexer to get metrics with status == 'delete'. this makes sense but list_metrics itself is hardcoded to always search for 'active' status. because of that, the query will always be look for status == 'active' and status == 'delete' which can't happen so it never finds any match. Change-Id: I318fa5b4642a187776dce23ee9a933a5f852c4f8 (cherry picked from commit f6a61a289dfe20bd8cf1ce597ac415323ddf8c1a) --- gnocchi/indexer/sqlalchemy.py | 4 ++-- gnocchi/tests/test_indexer.py | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index a980d7ee..e97cb790 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -221,9 +221,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): return m def list_metrics(self, user_id=None, project_id=None, details=False, - **kwargs): + status='active', **kwargs): session = self.engine_facade.get_session() - q = session.query(Metric).filter(Metric.status == 'active') + q = session.query(Metric).filter(Metric.status == status) if user_id is not None: q = q.filter(Metric.created_by_user_id == user_id) if project_id is not None: diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 297f0827..abb440dc 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -907,3 +907,5 @@ class TestIndexerDriver(tests_base.TestCase): self.index.delete_metric(e1) metrics = self.index.list_metrics() self.assertNotIn(e1, [m.id for m in metrics]) + metrics = self.index.list_metrics(status='delete') + self.assertIn(e1, [m.id for m in metrics]) -- GitLab From d105c0fc94c67903f94e906e52c368bfe1d0d1c7 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Mon, 30 Nov 2015 12:54:17 -0500 Subject: [PATCH 0056/1483] fix pecan _lookup usage _lookup routing in pecan is expected to be an object method. we incorrectly set it to static which throws a few warnings. this patch fixes it. Closes-Bug: #1521283 Change-Id: I0911cbe830eef84af613e08dc9495109d27a1646 (cherry picked from commit 96e43af961a3d1722425e6f03822f4a217c68c08) --- gnocchi/rest/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 5cc63fcb..b91402de 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -530,9 +530,9 @@ def UUID(value): class MetricsController(rest.RestController): - @staticmethod + @pecan.expose() - def _lookup(id, *remainder): + def _lookup(self, id, *remainder): try: metric_id = uuid.UUID(id) except ValueError: -- GitLab From 26641ac63c86fa2eee5769dcf4eaaf74bb1b892e Mon Sep 17 00:00:00 2001 From: Luo Gangyi Date: Sat, 28 Nov 2015 21:56:42 +0800 Subject: [PATCH 0057/1483] fix error in alembic when upgrade postgresql ALTER TYPE ... ADD cannot run inside a transaction block, therefore we use another way to update it. Change-Id: I8c6f62b7642917a2ea1bc0280e41261797aca172 Closes-Bug: #1520777 (cherry picked from commit 20418cd339af775842e84c2b6d2a820289888f00) --- ...ea2b8e_create_instance_disk_and_instance_.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/gnocchi/indexer/alembic/versions/3901f5ea2b8e_create_instance_disk_and_instance_.py b/gnocchi/indexer/alembic/versions/3901f5ea2b8e_create_instance_disk_and_instance_.py index 686fad22..2c221f70 100644 --- a/gnocchi/indexer/alembic/versions/3901f5ea2b8e_create_instance_disk_and_instance_.py +++ b/gnocchi/indexer/alembic/versions/3901f5ea2b8e_create_instance_disk_and_instance_.py @@ -47,10 +47,23 @@ def upgrade(): # just altering the column won't works. # https://bitbucket.org/zzzeek/alembic/issues/270/altering-enum-type # Does it break offline migration because we use get_bind() ? + + # NOTE(luogangyi): since we cannot use 'ALTER TYPE' in transaction, + # we split the 'ALTER TYPE' operation into several steps. bind = op.get_bind() if bind and bind.engine.name == "postgresql": - for value in ["instance_network_interface", "instance_disk"]: - op.execute("ALTER TYPE resource_type_enum ADD VALUE '%s'" % value) + op.execute("ALTER TYPE resource_type_enum RENAME TO \ + old_resource_type_enum") + op.execute("CREATE TYPE resource_type_enum AS ENUM \ + ('generic', 'instance', 'swift_account', \ + 'volume', 'ceph_account', 'network', \ + 'identity', 'ipmi', 'stack', 'image', \ + 'instance_network_interface', 'instance_disk')") + for table in ["resource", "resource_history"]: + op.execute("ALTER TABLE %s ALTER COLUMN type TYPE \ + resource_type_enum USING \ + type::text::resource_type_enum" % table) + op.execute("DROP TYPE old_resource_type_enum") for table in ['instance_disk', 'instance_net_int']: op.create_table( -- GitLab From 8b10497041e56664ead9ce5313f986f5a7dd37a7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 8 Dec 2015 11:22:49 +0100 Subject: [PATCH 0058/1483] tox: exclude .eggs in flake8 Change-Id: I2b8d1daea3e4e80722f1b4917cba8e9b0d3a0189 --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index e6902f11..0a47e699 100644 --- a/tox.ini +++ b/tox.ini @@ -54,7 +54,7 @@ setenv = GNOCCHI_TEST_STORAGE_DRIVER=file commands = {toxinidir}/setup-postgresql-tests.sh {posargs} [flake8] -exclude = .tox,doc,gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py +exclude = .tox,.eggs,doc,gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py show-source = true [testenv:genconfig] -- GitLab From 2572f929cbf28c00571e9ec373242a4a678d173d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 8 Dec 2015 13:47:50 +0100 Subject: [PATCH 0059/1483] storage: fix expunge_metric Change-Id: I6dca54411268b791c87198e7634b30c44a06d4b0 --- gnocchi/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index f320628e..55048470 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -172,7 +172,7 @@ class StorageDriver(object): LOG.error("Unable to expunge metric %s from storage" % m, exc_info=True) try: - self.index.expunge_metric(m.id) + index.expunge_metric(m.id) except indexer.NoSuchMetric: # It's possible another process deleted the metric in the mean # time, not a big deal -- GitLab From 69387777d907629c3853123643d9460794d3fd1a Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 3 Dec 2015 15:45:48 +0100 Subject: [PATCH 0060/1483] Adds aggregation across metrics tests Related-bug: #1522434 Change-Id: I78b9712aabc31c26f39a5c9cc13e58c21a08942a (cherry picked from commit ff6e7f5433878c417d4e06ce1cf07b4864e6559e) --- gnocchi/tests/test_carbonara.py | 54 +++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 8d3c18d3..41c5cf94 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -838,6 +838,60 @@ class TestTimeSerieArchive(base.BaseTestCase): (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 4.0), ], output) + def test_aggregated_partial_overlap(self): + tsc1 = carbonara.TimeSerieArchive.from_definitions([(1, 86400)]) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) + tsc2 = carbonara.TimeSerieArchive.from_definitions([(1, 86400)]) + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.max_block_size) + + tsb1.set_values([ + (datetime.datetime(2015, 12, 3, 13, 19, 15), 1), + (datetime.datetime(2015, 12, 3, 13, 20, 15), 1), + (datetime.datetime(2015, 12, 3, 13, 21, 15), 1), + (datetime.datetime(2015, 12, 3, 13, 22, 15), 1), + ], before_truncate_callback=tsc1.update) + + tsb2.set_values([ + (datetime.datetime(2015, 12, 3, 13, 21, 15), 10), + (datetime.datetime(2015, 12, 3, 13, 22, 15), 10), + (datetime.datetime(2015, 12, 3, 13, 23, 15), 10), + (datetime.datetime(2015, 12, 3, 13, 24, 15), 10), + ], before_truncate_callback=tsc2.update) + + output = carbonara.TimeSerieArchive.aggregated( + [tsc1, tsc2], aggregation="sum") + + self.assertEqual([ + (pandas.Timestamp('2015-12-03 13:21:15'), 1.0, 11.0), + (pandas.Timestamp('2015-12-03 13:22:15'), 1.0, 11.0), + ], output) + + dtfrom = datetime.datetime(2015, 12, 3, 13, 17, 0) + dtto = datetime.datetime(2015, 12, 3, 13, 25, 0) + + output = carbonara.TimeSerieArchive.aggregated( + [tsc1, tsc2], from_timestamp=dtfrom, to_timestamp=dtto, + aggregation="sum", needed_percent_of_overlap=0) + + self.assertEqual([ + (pandas.Timestamp('2015-12-03 13:19:15'), 1.0, 1.0), + (pandas.Timestamp('2015-12-03 13:20:15'), 1.0, 1.0), + (pandas.Timestamp('2015-12-03 13:21:15'), 1.0, 11.0), + (pandas.Timestamp('2015-12-03 13:22:15'), 1.0, 11.0), + (pandas.Timestamp('2015-12-03 13:23:15'), 1.0, 10.0), + (pandas.Timestamp('2015-12-03 13:24:15'), 1.0, 10.0), + ], output) + + # FIXME(sileht): this doesn't raise an error when it should + # By default we require 100% of point that overlap + # so that fail if from or to is set + # self.assertRaises(carbonara.UnAggregableTimeseries, + # carbonara.TimeSerieArchive.aggregated, + # timeseries, to_timestamp=dtto) + # self.assertRaises(carbonara.UnAggregableTimeseries, + # carbonara.TimeSerieArchive.aggregated, + # timeseries, from_timestamp=dtfrom) + class CarbonaraCmd(base.BaseTestCase): -- GitLab From abf9ce3c201fa16b6bd96b226ac78ec77ff836ec Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 3 Dec 2015 16:38:32 +0100 Subject: [PATCH 0061/1483] Checks percent_of_overlap when one boundary is set Aggregation across metrics have different behavior depending on if boundary are set and if needed_percent_of_overlap is set. If boundaries are not set, Carbonara makes the aggregation only with points at timestamp present in all timeseries. But when boundaries are set, Carbonara expects that we have certain percent of timestamps common between timeseries, this percent is controlled by needed_percent_of_overlap (defaulted with 100%). This change fixes a weird behavior when only one boundary is set, the needed_percent_of_overlap wasn't checked. Change-Id: Ifc85a9004e864d14d42fd482ec144e4d27dd615b Closes-bug: #1522434 (cherry picked from commit 444656dcb2dbdb9d3d8fc5c4c83cecf687fe5a13) --- gnocchi/carbonara.py | 2 +- gnocchi/tests/test_carbonara.py | 13 ++++++------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index c73621a6..b6a4a48c 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -460,7 +460,7 @@ class TimeSerieArchive(SerializableMixin): else: right_boundary_ts = timestamp - if to_timestamp is not None and from_timestamp is not None: + if to_timestamp is not None or from_timestamp is not None: maximum = len(grouped) percent_of_overlap = (float(maximum - holes) * 100.0 / float(maximum)) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 41c5cf94..b7c42415 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -882,15 +882,14 @@ class TestTimeSerieArchive(base.BaseTestCase): (pandas.Timestamp('2015-12-03 13:24:15'), 1.0, 10.0), ], output) - # FIXME(sileht): this doesn't raise an error when it should # By default we require 100% of point that overlap # so that fail if from or to is set - # self.assertRaises(carbonara.UnAggregableTimeseries, - # carbonara.TimeSerieArchive.aggregated, - # timeseries, to_timestamp=dtto) - # self.assertRaises(carbonara.UnAggregableTimeseries, - # carbonara.TimeSerieArchive.aggregated, - # timeseries, from_timestamp=dtfrom) + self.assertRaises(carbonara.UnAggregableTimeseries, + carbonara.TimeSerieArchive.aggregated, + [tsc1, tsc2], to_timestamp=dtto) + self.assertRaises(carbonara.UnAggregableTimeseries, + carbonara.TimeSerieArchive.aggregated, + [tsc1, tsc2], from_timestamp=dtfrom) class CarbonaraCmd(base.BaseTestCase): -- GitLab From 0d6399e2f2c5cc7d2f157a6e60d8f30b83a778f9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 1 Dec 2015 12:12:57 +0100 Subject: [PATCH 0062/1483] statsd: fix flush() scheduling Currently, flush() is scheduled only _once_. Indeed, loop.call_later() is not "recursive", as it does not reschedule every flush_delay seconds the call. It only does it once! So let's build a closure that reschedule itself each time. Change-Id: I2e16101890996a51cb2baa4ae7b21dd6d1e3c801 (cherry picked from commit 47c4d216b42df997becac229ccd91d0243a37252) --- gnocchi/statsd.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index 0295f8e7..e1891455 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -166,7 +166,12 @@ def start(): listen = loop.create_datagram_endpoint( # TODO(jd) Add config options for host/port lambda: StatsdServer(stats), local_addr=('0.0.0.0', 8125)) - loop.call_later(conf.statsd.flush_delay, stats.flush) + + def _flush(): + loop.call_later(conf.statsd.flush_delay, _flush) + stats.flush() + + loop.call_later(conf.statsd.flush_delay, _flush) transport, protocol = loop.run_until_complete(listen) try: -- GitLab From 9c6b7f99896195bf5a03d0956194c3db13aa569f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 24 Nov 2015 12:07:01 +0100 Subject: [PATCH 0063/1483] tests: block when acquiring processing lock In tests, not blocking when processing the data in background might trigger a race condition since all the tests run in parallel on the same database. Change-Id: Id808f9e659f242fdaecd52cad08a6d00c96ac051 (cherry picked from commit 9309cdc8f64bb0c3ea14a27490c0e81770206c8c) --- gnocchi/storage/__init__.py | 20 +++++++++++++++++--- gnocchi/storage/_carbonara.py | 6 ++++-- gnocchi/tests/test_aggregates.py | 2 +- gnocchi/tests/test_rest.py | 2 +- gnocchi/tests/test_statsd.py | 8 ++++---- gnocchi/tests/test_storage.py | 16 ++++++++-------- 6 files changed, 35 insertions(+), 19 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 7bc673a5..21a4b088 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -149,17 +149,31 @@ class StorageDriver(object): def stop(): pass - def process_background_tasks(self, index): + def process_background_tasks(self, index, sync=False): + """Process background tasks for this storage. + + This calls :func:`process_measures` to process new measures and + :func:`expunge_metrics` to expunge deleted metrics. + + :param index: An indexer to be used for querying metrics + :param sync: If True, then process everything synchronously and raise + on error + :type sync: bool + """ LOG.debug("Processing new and to delete measures") try: - self.process_measures(index) + self.process_measures(index, sync) except Exception: + if sync: + raise LOG.error("Unexpected error during measures processing", exc_info=True) LOG.debug("Expunging deleted metrics") try: self.expunge_metrics(index) except Exception: + if sync: + raise LOG.error("Unexpected error during deleting metrics", exc_info=True) @@ -189,7 +203,7 @@ class StorageDriver(object): raise exceptions.NotImplementedError @staticmethod - def process_measures(indexer=None): + def process_measures(indexer=None, sync=False): """Process added measures in background. Some drivers might need to have a background task running that process diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 20353a20..7892aaf8 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -159,7 +159,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): (metric_id, self._pending_measures_to_process_count(metric_id)) for metric_id in metrics_to_process) - def process_measures(self, indexer): + def process_measures(self, indexer, sync=False): metrics_to_process = self._list_metric_with_measures_to_process() metrics = indexer.get_metrics(metrics_to_process) # This build the list of deleted metrics, i.e. the metrics we have @@ -174,7 +174,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): # Do not block if we cannot acquire the lock, that means some other # worker is doing the job. We'll just ignore this metric and may # get back later to it if needed. - if lock.acquire(blocking=False): + if lock.acquire(blocking=sync): try: LOG.debug("Processing measures for %s" % metric) with self._process_measure_for_metric(metric) as measures: @@ -231,6 +231,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): self._store_metric_measures(metric, 'none', ts.serialize()) except Exception: + if sync: + raise LOG.error("Error processing new measures", exc_info=True) finally: lock.release() diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index c9bf266c..74da82f1 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -60,7 +60,7 @@ class TestAggregates(tests_base.TestCase): with mock.patch.object(self.index, 'get_metrics') as f: f.return_value = [metric] - self.storage.process_background_tasks(self.index) + self.storage.process_background_tasks(self.index, True) return metric diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 97ed5f32..c09b50a2 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -148,7 +148,7 @@ class TestingApp(webtest.TestApp): if self.auth: req.headers['X-Auth-Token'] = self.token response = super(TestingApp, self).do_request(req, *args, **kwargs) - self.storage.process_background_tasks(self.indexer) + self.storage.process_background_tasks(self.indexer, True) return response diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index d1c08451..a4073b5b 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -67,7 +67,7 @@ class TestStatsd(tests_base.TestCase): metric = r.get_metric(metric_key) - self.stats.storage.process_background_tasks(self.stats.indexer) + self.stats.storage.process_background_tasks(self.stats.indexer, True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ @@ -86,7 +86,7 @@ class TestStatsd(tests_base.TestCase): ("127.0.0.1", 12345)) self.stats.flush() - self.stats.storage.process_background_tasks(self.stats.indexer) + self.stats.storage.process_background_tasks(self.stats.indexer, True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ @@ -117,7 +117,7 @@ class TestStatsd(tests_base.TestCase): with_metrics=True) metric = r.get_metric(metric_key) - self.stats.storage.process_background_tasks(self.stats.indexer) + self.stats.storage.process_background_tasks(self.stats.indexer, True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ @@ -134,7 +134,7 @@ class TestStatsd(tests_base.TestCase): ("127.0.0.1", 12345)) self.stats.flush() - self.stats.storage.process_background_tasks(self.stats.indexer) + self.stats.storage.process_background_tasks(self.stats.indexer, True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 1b4408d7..5d2b2b4c 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -44,7 +44,7 @@ class TestStorageDriver(tests_base.TestCase): ]) with mock.patch.object(self.index, 'get_metrics') as f: f.return_value = [self.metric] - self.storage.process_background_tasks(self.index) + self.storage.process_background_tasks(self.index, True) self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 13, 0, 1), 1), @@ -55,7 +55,7 @@ class TestStorageDriver(tests_base.TestCase): side_effect=ValueError("boom!")): with mock.patch('gnocchi.carbonara.msgpack.loads', side_effect=ValueError("boom!")): - self.storage.process_background_tasks(self.index) + self.storage.process_background_tasks(self.index, True) expected_calls = [ mock.call.debug('Processing measures for %s' % self.metric.id), @@ -81,7 +81,7 @@ class TestStorageDriver(tests_base.TestCase): ]) with mock.patch.object(self.index, 'get_metrics') as f: f.return_value = [self.metric] - self.storage.process_background_tasks(self.index) + self.storage.process_background_tasks(self.index, True) self.storage.delete_metric(self.metric) def test_delete_nonempty_metric_unprocessed(self): @@ -133,7 +133,7 @@ class TestStorageDriver(tests_base.TestCase): for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) with mock.patch.object(self.index, 'get_metrics') as f: f.return_value = [m] - self.storage.process_background_tasks(self.index) + self.storage.process_background_tasks(self.index, True) self.assertEqual(3661, len(self.storage.get_measures(m))) @@ -146,7 +146,7 @@ class TestStorageDriver(tests_base.TestCase): ]) with mock.patch.object(self.index, 'get_metrics') as f: f.return_value = [self.metric] - self.storage.process_background_tasks(self.index) + self.storage.process_background_tasks(self.index, True) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), @@ -281,7 +281,7 @@ class TestStorageDriver(tests_base.TestCase): ]) with mock.patch.object(self.index, 'get_metrics') as f: f.return_value = [self.metric, metric2] - self.storage.process_background_tasks(self.index) + self.storage.process_background_tasks(self.index, True) values = self.storage.get_cross_metric_measures([self.metric, metric2]) self.assertEqual([ @@ -341,7 +341,7 @@ class TestStorageDriver(tests_base.TestCase): ]) with mock.patch.object(self.index, 'get_metrics') as f: f.return_value = [self.metric, metric2] - self.storage.process_background_tasks(self.index) + self.storage.process_background_tasks(self.index, True) values = self.storage.get_cross_metric_measures([self.metric, metric2]) self.assertEqual([ @@ -371,7 +371,7 @@ class TestStorageDriver(tests_base.TestCase): ]) with mock.patch.object(self.index, 'get_metrics') as f: f.return_value = [self.metric, metric2] - self.storage.process_background_tasks(self.index) + self.storage.process_background_tasks(self.index, True) self.assertEqual( {metric2: [], -- GitLab From 2476d245b731fbbde7ffa49ba3e42447bbeb96e7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sun, 13 Dec 2015 14:14:56 +0100 Subject: [PATCH 0064/1483] tox: create a target for each indexer Change-Id: Ifaab92b607cf5280a9b8556b6f9764c5b8533c01 (cherry picked from commit 6cab508acd71ca09e756d251add2adc2639cd924) --- tox.ini | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 0a47e699..80c1d76b 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] minversion = 1.8 -envlist = py{27,34},py{27,34}-{postgresql,mysql}-{file,swift,ceph,influxdb},pep8,bashate +envlist = py{27,34},py{27,34}-{postgresql,mysql}{,-file,-swift,-ceph,-influxdb},pep8,bashate [testenv] usedevelop = True @@ -11,6 +11,8 @@ deps = -r{toxinidir}/requirements.txt setenv = GNOCCHI_TEST_STORAGE_DRIVERS=file swift ceph GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql + py{27,34}-postgresql: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql + py{27,34}-mysql: GNOCCHI_TEST_INDEXER_DRIVERS=mysql py{27,34}-{postgresql,mysql}-file: GNOCCHI_TEST_STORAGE_DRIVERS=file py{27,34}-{postgresql,mysql}-swift: GNOCCHI_TEST_STORAGE_DRIVERS=swift py{27,34}-{postgresql,mysql}-ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph -- GitLab From 72efaf3a0b33a92c7f611363acd4f2efe5386a73 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 22 Dec 2015 14:30:32 +0100 Subject: [PATCH 0065/1483] indexer: do not store useless revision If no attributes are modified, do not create a new revision history. Change-Id: I93b7a02e80eb782b5c6daacfb906c426363a6faa Closes-Bug: #1520324 (cherry picked from commit ac484806e79cd9fc366062d73aab68a803278d94) --- gnocchi/indexer/__init__.py | 1 + gnocchi/indexer/sqlalchemy.py | 21 +++---- gnocchi/rest/__init__.py | 31 +++++----- gnocchi/tests/gabbi/gabbits/history.yaml | 75 +++++++++++++++++++++++ gnocchi/tests/gabbi/gabbits/resource.yaml | 7 +-- gnocchi/tests/test_indexer.py | 18 ++++++ gnocchi/tests/test_rest.py | 3 +- 7 files changed, 124 insertions(+), 32 deletions(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 92b528ba..6cefac7f 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -309,6 +309,7 @@ class IndexerDriver(object): def update_resource(resource_type, resource_id, ended_at=_marker, metrics=_marker, append_metrics=False, + create_revision=True, **kwargs): raise exceptions.NotImplementedError diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index e97cb790..bca8eca6 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -281,10 +281,8 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): def update_resource(self, resource_type, resource_id, ended_at=_marker, metrics=_marker, append_metrics=False, + create_revision=True, **kwargs): - - now = utils.utcnow() - resource_cls = self._resource_type_to_class(resource_type) resource_history_cls = self._resource_type_to_class(resource_type, "history") @@ -302,12 +300,15 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): if r is None: raise indexer.NoSuchResource(resource_id) - # Build history - rh = resource_history_cls() - for col in sqlalchemy.inspect(resource_cls).columns: - setattr(rh, col.name, getattr(r, col.name)) - rh.revision_end = now - session.add(rh) + if create_revision: + # Build history + rh = resource_history_cls() + for col in sqlalchemy.inspect(resource_cls).columns: + setattr(rh, col.name, getattr(r, col.name)) + now = utils.utcnow() + rh.revision_end = now + session.add(rh) + r.revision_start = now # Update the resource if ended_at is not _marker: @@ -320,8 +321,6 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): resource_type, "ended_at", ended_at) r.ended_at = ended_at - r.revision_start = now - if kwargs: for attribute, value in six.iteritems(kwargs): if hasattr(r, attribute): diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index b91402de..d664af2c 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -675,7 +675,8 @@ class NamedMetricController(rest.RestController): try: pecan.request.indexer.update_resource( self.resource_type, self.resource_id, metrics=metrics, - append_metrics=True) + append_metrics=True, + create_revision=False) except (indexer.NoSuchMetric, indexer.NoSuchArchivePolicy, ValueError) as e: @@ -807,20 +808,29 @@ class GenericResourceController(rest.RestController): body = deserialize_and_validate(self.Resource, required=False) - if not self._resource_need_update(resource, body): - # No need to go further, we assume the db resource - # doesn't change between the get and update - return resource if len(body) == 0: etag_set_headers(resource) return resource + create_revision = False + if 'metrics' not in body: + for k, v in six.iteritems(body): + if getattr(resource, k) != v: + create_revision = True + break + else: + # No need to go further, we assume the db resource + # doesn't change between the get and update + return resource + try: if 'metrics' in body: user, project = get_user_and_project() resource = pecan.request.indexer.update_resource( self._resource_type, - self.id, **body) + self.id, + create_revision=create_revision, + **body) except (indexer.NoSuchMetric, indexer.NoSuchArchivePolicy, ValueError) as e: @@ -830,15 +840,6 @@ class GenericResourceController(rest.RestController): etag_set_headers(resource) return resource - @staticmethod - def _resource_need_update(resource, new_attributes): - if 'metrics' in new_attributes: - return True - for k, v in new_attributes.items(): - if getattr(resource, k) != v: - return True - return False - @pecan.expose() def delete(self): resource = pecan.request.indexer.get_resource( diff --git a/gnocchi/tests/gabbi/gabbits/history.yaml b/gnocchi/tests/gabbi/gabbits/history.yaml index 01b62915..04878cb7 100644 --- a/gnocchi/tests/gabbi/gabbits/history.yaml +++ b/gnocchi/tests/gabbi/gabbits/history.yaml @@ -6,6 +6,19 @@ fixtures: - ConfigFixture tests: + - name: create archive policy + url: /v1/archive_policy + method: POST + request_headers: + content-type: application/json + x-roles: admin + data: + name: low + definition: + - granularity: 1 hour + response_headers: + location: $SCHEME://$NETLOC/v1/archive_policy/low + status: 201 # Try creating a new generic resource @@ -77,6 +90,68 @@ tests: x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea response_json_paths: + $.`len`: 3 + $[0].id: f93450f2-d8a5-4d67-9985-02511241e7d1 + $[0].user_id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c + $[0].project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + $[1].id: f93450f2-d8a5-4d67-9985-02511241e7d1 + $[1].user_id: f53c58a4-fdea-4c09-aac4-02135900be67 + $[1].project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + $[2].id: f93450f2-d8a5-4d67-9985-02511241e7d1 + $[2].user_id: f53c58a4-fdea-4c09-aac4-02135900be67 + $[2].project_id: fe20a931-1012-4cc6-addc-39556ec60907 + + - name: patch resource metrics + url: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 + method: patch + request_headers: + x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c + x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + content-type: application/json + data: + metrics: + foo: + archive_policy_name: low + status: 200 + + - name: list all resources with history no change after metrics update + url: /v1/resource/generic + request_headers: + accept: application/json; details=True; history=True + x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c + x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + response_json_paths: + $.`len`: 3 + $[0].id: f93450f2-d8a5-4d67-9985-02511241e7d1 + $[0].user_id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c + $[0].project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + $[1].id: f93450f2-d8a5-4d67-9985-02511241e7d1 + $[1].user_id: f53c58a4-fdea-4c09-aac4-02135900be67 + $[1].project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + $[2].id: f93450f2-d8a5-4d67-9985-02511241e7d1 + $[2].user_id: f53c58a4-fdea-4c09-aac4-02135900be67 + $[2].project_id: fe20a931-1012-4cc6-addc-39556ec60907 + + - name: create new metrics + url: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1/metric + method: post + request_headers: + x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c + x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + content-type: application/json + data: + foobar: + archive_policy_name: low + status: 204 + + - name: list all resources with history no change after metrics creation + url: /v1/resource/generic + request_headers: + accept: application/json; details=True; history=True + x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c + x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + response_json_paths: + $.`len`: 3 $[0].id: f93450f2-d8a5-4d67-9985-02511241e7d1 $[0].user_id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c $[0].project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index d6266bb7..4650e06e 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -257,12 +257,11 @@ tests: x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea content-type: application/json response_json_paths: - $.`len`: 3 + $.`len`: 2 $[0].host: compute1 $[1].host: compute2 - $[2].revision_end: null - $[2].host: compute2 - $[2].metrics.'disk.iops': $RESPONSE["metrics.'disk.iops'"] + $[1].revision_end: null + $[1].metrics.'disk.iops': $RESPONSE["metrics.'disk.iops'"] - name: patch instance bad metric association url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index abb440dc..ee25c2b3 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -407,6 +407,24 @@ class TestIndexerDriver(tests_base.TestCase): r = self.index.get_resource('instance', r1, with_metrics=True) self.assertEqual(rc, r) + def test_update_resource_no_change(self): + r1 = uuid.uuid4() + user = uuid.uuid4() + project = uuid.uuid4() + rc = self.index.create_resource('instance', r1, user, project, + flavor_id="1", + image_ref="http://foo/bar", + host="foo", + display_name="lol") + updated = self.index.update_resource('instance', r1, host="foo", + create_revision=False) + r = self.index.list_resources('instance', + {"=": {"id": r1}}, + history=True) + self.assertEqual(1, len(r)) + self.assertEqual(dict(rc), dict(r[0])) + self.assertEqual(dict(updated), dict(r[0])) + def test_update_resource_ended_at_fail(self): r1 = uuid.uuid4() user = uuid.uuid4() diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index c09b50a2..b032f5fc 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -1216,8 +1216,7 @@ class ResourceTest(RestTest): self.assertTrue(uuid.UUID(result['metrics']['foo'])) self.assertIsNone(result['revision_end']) self.assertIsNone(r['revision_end']) - self.assertEqual(result['revision_start'], - "2014-01-02T06:49:00+00:00") + self.assertEqual(result['revision_start'], "2014-01-01T10:23:00+00:00") self.assertEqual(r['revision_start'], "2014-01-01T10:23:00+00:00") del result['metrics'] -- GitLab From c262bd1682608dcb5a572bf6db9b5f24641d9ff3 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 6 Jan 2016 12:13:41 +0100 Subject: [PATCH 0066/1483] rest: fix revision creation Currently, there's a still an edge case if a user change both metrics and something else in a PATCH request: no resource will be created at all because 'metrics' is found. This fixes that. Change-Id: Ic17a798ecba4892a07517a08bde4b2d96a1f3967 Related-Bug: #1520324 --- gnocchi/rest/__init__.py | 14 +++++++------- gnocchi/tests/gabbi/gabbits/history.yaml | 3 +++ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index d664af2c..46b09b57 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -812,16 +812,16 @@ class GenericResourceController(rest.RestController): etag_set_headers(resource) return resource - create_revision = False - if 'metrics' not in body: - for k, v in six.iteritems(body): - if getattr(resource, k) != v: - create_revision = True - break - else: + for k, v in six.iteritems(body): + if k != 'metrics' and getattr(resource, k) != v: + create_revision = True + break + else: + if 'metrics' not in body: # No need to go further, we assume the db resource # doesn't change between the get and update return resource + create_revision = False try: if 'metrics' in body: diff --git a/gnocchi/tests/gabbi/gabbits/history.yaml b/gnocchi/tests/gabbi/gabbits/history.yaml index 04878cb7..f683e090 100644 --- a/gnocchi/tests/gabbi/gabbits/history.yaml +++ b/gnocchi/tests/gabbi/gabbits/history.yaml @@ -67,6 +67,9 @@ tests: content-type: application/json data: project_id: fe20a931-1012-4cc6-addc-39556ec60907 + metrics: + mymetric: + archive_policy_name: low status: 200 response_json_paths: user_id: f53c58a4-fdea-4c09-aac4-02135900be67 -- GitLab From acf2003095cc8166889b465d7e53877a876d25bb Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 5 Jan 2016 09:07:02 +0100 Subject: [PATCH 0067/1483] swift: fix typo in unprocessed measures deletion The method name has a typo that was never caught because the unit tests didn't run this function. This patch fixes the unit test by processing the measures after we delete the metric, making metricd deleting them, and executing that code. Change-Id: I20973a30856ad697baefadc736e45e6465a65176 --- gnocchi/storage/swift.py | 2 +- gnocchi/tests/test_storage.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 4f51b5d3..ceac8ff5 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -115,7 +115,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): return len(self._list_measure_files_for_metric_id(metric_id)) def _delete_unprocessed_measures_for_metric_id(self, metric_id): - files = self._list_measure_files_for_metric(metric_id) + files = self._list_measure_files_for_metric_id(metric_id) for f in files: self.swift.delete_object(self.MEASURE_PREFIX, f['name']) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 5d2b2b4c..21052262 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -83,6 +83,7 @@ class TestStorageDriver(tests_base.TestCase): f.return_value = [self.metric] self.storage.process_background_tasks(self.index, True) self.storage.delete_metric(self.metric) + self.storage.process_background_tasks(self.index, True) def test_delete_nonempty_metric_unprocessed(self): self.storage.add_measures(self.metric, [ @@ -98,6 +99,7 @@ class TestStorageDriver(tests_base.TestCase): f.return_value = [self.metric] self.storage.process_background_tasks(self.index) self.storage.delete_metric(self.metric) + self.storage.process_background_tasks(self.index, True) def test_measures_reporting(self): self.storage.add_measures(self.metric, [ -- GitLab From 03d981c789454dedc0ec5db1496f46871d20717d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 7 Jan 2016 16:00:25 +0100 Subject: [PATCH 0068/1483] sqlalchemy: fix metric expunge There's a typo in the current driver which prevents expunging the metric for real. This patches fixes that and ensures the code works. It also deletes a check in another test that might fail due to the fact that expunge can be run at the same time by another test process. Change-Id: I2d02b338bb5d98411a097ecba22d6934bd61ed3b --- gnocchi/indexer/sqlalchemy.py | 2 +- gnocchi/tests/test_indexer.py | 22 ++++++++++++++++++++-- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index bca8eca6..705e4d9d 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -543,7 +543,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): def expunge_metric(self, id): session = self.engine_facade.get_session() - if session.query(Metric).filter(Metric.id == id).delete == 0: + if session.query(Metric).filter(Metric.id == id).delete() == 0: raise indexer.NoSuchMetric(id) def delete_metric(self, id): diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index ee25c2b3..ad17a983 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -95,6 +95,26 @@ class TestIndexerDriver(tests_base.TestCase): m2 = self.index.get_metrics([r1]) self.assertEqual([m], m2) + def test_expunge_metric(self): + r1 = uuid.uuid4() + user = uuid.uuid4() + project = uuid.uuid4() + m = self.index.create_metric(r1, user, project, "low") + self.index.delete_metric(m.id) + try: + self.index.expunge_metric(m.id) + except indexer.NoSuchMetric: + # It's possible another test process expunged the metric just + # before us; in that case, we're good, we'll just check that the + # next call actually really raises NoSuchMetric anyway + pass + self.assertRaises(indexer.NoSuchMetric, + self.index.delete_metric, + m.id) + self.assertRaises(indexer.NoSuchMetric, + self.index.expunge_metric, + m.id) + def test_create_resource(self): r1 = uuid.uuid4() user = uuid.uuid4() @@ -925,5 +945,3 @@ class TestIndexerDriver(tests_base.TestCase): self.index.delete_metric(e1) metrics = self.index.list_metrics() self.assertNotIn(e1, [m.id for m in metrics]) - metrics = self.index.list_metrics(status='delete') - self.assertIn(e1, [m.id for m in metrics]) -- GitLab From 144a08a65c8c1d0d18e3fba09ebde563ca419c35 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Wed, 23 Dec 2015 01:31:09 +0000 Subject: [PATCH 0069/1483] remove python 2.6 trove classifier OpenStack projects are no longer being tested under Python 2.6, so remove the trove classifier implying that this project supports 2.6. Change-Id: Ie8233fe674eb856d62bc7610864ef2f641bf6b47 (cherry picked from commit 0eacf5d53b1c7f51ee84e5761d93f544b64e56ed) --- setup.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 23ca8c03..837834ec 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,7 +14,6 @@ classifier = Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 - Programming Language :: Python :: 2.6 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3.4 Topic :: System :: Monitoring -- GitLab From ac6ae07024157cc3c274c1281979779ad3a7dc5b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 15 Dec 2015 16:24:13 +0100 Subject: [PATCH 0070/1483] swift: make sure we retrieve full listing in containers Change-Id: Id38a5be4e6f69ce5213b502ac6848e8501c4bd08 (cherry picked from commit 3df54c049f2d12ab3a714b9ec3fba80286caae31) --- gnocchi/storage/swift.py | 6 ++++-- gnocchi/tests/base.py | 11 +++++++++-- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index ceac8ff5..71cf13fe 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -103,12 +103,14 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): def _list_metric_with_measures_to_process(self): headers, files = self.swift.get_container(self.MEASURE_PREFIX, - delimiter='/') + delimiter='/', + full_listing=True) return set(f['subdir'][:-1] for f in files if 'subdir' in f) def _list_measure_files_for_metric_id(self, metric_id): headers, files = self.swift.get_container( - self.MEASURE_PREFIX, path=six.text_type(metric_id)) + self.MEASURE_PREFIX, path=six.text_type(metric_id), + full_listing=True) return files def _pending_measures_to_process_count(self, metric_id): diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index b0daa1fa..174e9f92 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -187,7 +187,7 @@ class FakeSwiftClient(object): self.kvs[container] = {} def get_container(self, container, delimiter=None, - path=None): + path=None, full_listing=False): try: container = self.kvs[container] except KeyError: @@ -212,7 +212,14 @@ class FakeSwiftClient(object): 'name': k, 'content_type': None}) - return {}, files + list(directories) + if full_listing: + end = None + else: + # In truth, it's 10000, but 1 is enough to make sure our test fails + # otherwise. + end = 1 + + return {}, (files + list(directories))[:end] def put_object(self, container, key, obj): if hasattr(obj, "seek"): -- GitLab From 85e1ca4e8d9cf3dc7d62fbf367b2eeffd8fceb86 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 8 Jan 2016 13:52:57 +0100 Subject: [PATCH 0071/1483] statsd: Use archive policy rule Currently when the archive policy configured for statd is empty, no user usable error is returned. This change uses archive policy rule to get an archive policy name in this case. And log the "NoArchivePolicyRuleMatch" exception if not. Change-Id: I6977382d4989934dbf7b6f4e9fa3713f20a8e1ca Closes-bug: #1532179 (cherry picked from commit 44b1a06abe454c5221be0f0b0d1c4aa4d1aa9e8c) --- gnocchi/indexer/__init__.py | 18 ++++++++++++++++++ gnocchi/rest/__init__.py | 14 +++++--------- gnocchi/statsd.py | 9 ++++++++- gnocchi/tests/test_statsd.py | 15 +++++++++++++++ 4 files changed, 46 insertions(+), 10 deletions(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 6cefac7f..107f1a93 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -13,6 +13,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import fnmatch import hashlib from oslo_config import cfg @@ -130,6 +131,15 @@ class NoSuchArchivePolicyRule(IndexerException): self.archive_policy_rule = archive_policy_rule +class NoArchivePolicyRuleMatch(IndexerException): + """Error raised when no archive policy rule found for metric.""" + def __init__(self, metric_name): + super(NoArchivePolicyRuleMatch, self).__init__( + "No Archive policy rule found for metric %s" % + str(metric_name)) + self.metric_name = metric_name + + class NamedMetricAlreadyExists(IndexerException): """Error raised when a named metric already exists.""" def __init__(self, metric): @@ -324,3 +334,11 @@ class IndexerDriver(object): @staticmethod def expunge_metric(id): raise exceptions.NotImplementedError + + def get_archive_policy_for_metric(self, metric_name): + """Helper to get the archive policy according archive policy rules.""" + rules = self.list_archive_policy_rules() + for rule in rules: + if fnmatch.fnmatch(metric_name or "", rule.metric_pattern): + return self.get_archive_policy(rule.archive_policy_name) + raise NoArchivePolicyRuleMatch(metric_name) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 46b09b57..f35b0634 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -13,7 +13,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import fnmatch import uuid from oslo_log import log @@ -559,14 +558,9 @@ class MetricsController(rest.RestController): name = definition.get('name') if archive_policy_name is None: - rules = pecan.request.indexer.list_archive_policy_rules() - for rule in rules: - if fnmatch.fnmatch(name or "", rule.metric_pattern): - ap = pecan.request.indexer.get_archive_policy( - rule.archive_policy_name) - definition['archive_policy_name'] = ap.name - break - else: + try: + ap = pecan.request.indexer.get_archive_policy_for_metric(name) + except indexer.NoArchivePolicyRuleMatch: # NOTE(jd) Since this is a schema-like function, we # should/could raise ValueError, but if we do so, voluptuous # just returns a "invalid value" with no useful message – so we @@ -575,6 +569,8 @@ class MetricsController(rest.RestController): abort(400, "No archive policy name specified " "and no archive policy rule found matching " "the metric name %s" % name) + else: + definition['archive_policy_name'] = ap.name user_id, project_id = get_user_and_project() diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index e1891455..5c2839c6 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -103,7 +103,7 @@ class Stats(object): # API at the same time. metric = resource.get_metric(metric_name) if not metric: - ap_name = self.conf.statsd.archive_policy_name + ap_name = self._get_archive_policy_name(metric_name) metric = self.indexer.create_metric( uuid.uuid4(), self.conf.statsd.user_id, @@ -118,6 +118,13 @@ class Stats(object): self.reset() + def _get_archive_policy_name(self, metric_name): + if self.conf.statsd.archive_policy_name: + return self.conf.statsd.archive_policy_name + # NOTE(sileht): We didn't catch NoArchivePolicyRuleMatch to log it + ap = self.indexer.get_archive_policy_for_metric(metric_name) + return ap.name + class StatsdServer(object): def __init__(self, stats): diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index a4073b5b..b01bfb35 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -19,6 +19,7 @@ import uuid import mock from oslo_utils import timeutils +from gnocchi import indexer from gnocchi import statsd from gnocchi.tests import base as tests_base from gnocchi import utils @@ -116,6 +117,7 @@ class TestStatsd(tests_base.TestCase): self.conf.statsd.resource_id, with_metrics=True) metric = r.get_metric(metric_key) + self.assertIsNotNone(metric) self.stats.storage.process_background_tasks(self.stats.indexer, True) @@ -142,3 +144,16 @@ class TestStatsd(tests_base.TestCase): (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 28), (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13, 59), 60.0, 55.0)], measures) + + +class TestStatsdArchivePolicyRule(TestStatsd): + STATSD_ARCHIVE_POLICY_NAME = "" + + def setUp(self): + super(TestStatsdArchivePolicyRule, self).setUp() + try: + self.stats.indexer.create_archive_policy_rule( + "statsd", "*", "medium") + except indexer.ArchivePolicyRuleAlreadyExists: + # Created by another test run + pass -- GitLab From 7ac05b77692c4c080da16b75aaaef85453993b67 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 11 Jan 2016 13:21:16 +0100 Subject: [PATCH 0072/1483] indexer: fix a possible test failure due to a race condition Change-Id: Id7dd0394f2160258749995a92be2cc9d6b55cfe1 --- gnocchi/tests/test_indexer.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index ad17a983..673f1b9b 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -514,7 +514,12 @@ class TestIndexerDriver(tests_base.TestCase): metrics={'foo': e1, 'bar': e2}) self.index.delete_metric(e1) # It can be called twice - self.index.delete_metric(e1) + try: + self.index.delete_metric(e1) + except indexer.NoSuchMetric: + # It's possible that the metric has been expunged by another + # parallel test. No worry. + pass r = self.index.get_resource('generic', r1, with_metrics=True) self.assertIsNotNone(r.started_at) self.assertIsNotNone(r.revision_start) -- GitLab From 19e32b3859dc2433a44d920525a604ed2aced4a7 Mon Sep 17 00:00:00 2001 From: liusheng Date: Wed, 28 Oct 2015 11:40:19 +0800 Subject: [PATCH 0073/1483] Fix gnocchi resource update without change When use "gnocchi update resource " command to update a resource without any attribute change, a KeyError will be returned, because the resource metrics won't be returned if no attribute updated. Change-Id: I0da5987a2f295ba8b2f10a0349add4d21ab29ab5 (cherry picked from commit 4c59a98299901a4b48b4938753574abca6306c88) --- gnocchi/rest/__init__.py | 2 +- gnocchi/tests/gabbi/gabbits/resource.yaml | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index f35b0634..075dbd00 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -796,7 +796,7 @@ class GenericResourceController(rest.RestController): @pecan.expose('json') def patch(self): resource = pecan.request.indexer.get_resource( - self._resource_type, self.id) + self._resource_type, self.id, with_metrics=True) if not resource: abort(404, indexer.NoSuchResource(self.id)) enforce("update resource", resource) diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index 4650e06e..71c72e65 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -340,6 +340,19 @@ tests: data: host: compute2 + - name: patch resource without change with metrics in response + desc: an empty dict in patch is an existence check + url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 + method: PATCH + request_headers: + x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c + x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + content-type: application/json + data: "{}" + status: 200 + response_json_paths: + $.metrics.'disk.io.rate': $RESPONSE["$.metrics.'disk.io.rate'"] + # Failure modes for history - name: post instance history -- GitLab From ee1740bf12c13f546990f01ef76a4f0b29a78aeb Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 14 Jan 2016 17:31:14 +0100 Subject: [PATCH 0074/1483] ceph: fix the metric list to process with new measures Currently, the list returned in the Ceph driver contains a lot of doubloons because it returns a list and not a set. If 1 metric has N new measures batch waiting to be processed, the list return will be size N and not 1. Using a set() avoids that issue and the memory draining implied. Closes-Bug: #1533793 Change-Id: I3a0b726aae14a17a23a365babc1a2537fb4d1052 (cherry picked from commit 614e13d47fdcaeea9d41bebc214014e0c83a0e83) --- gnocchi/storage/ceph.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 312a57c7..f5219692 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -79,14 +79,13 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): ioctx.write_full(name, data) ioctx.set_xattr(self.MEASURE_PREFIX, name, "") - @classmethod - def _list_object_names_to_process(cls, ioctx, prefix=None): + def _list_object_names_to_process(self, ioctx, prefix=None): try: - xattrs_iterator = ioctx.get_xattrs(cls.MEASURE_PREFIX) + xattrs = ioctx.get_xattrs(self.MEASURE_PREFIX) except rados.ObjectNotFound: return [] - return [name for name, __ in xattrs_iterator - if prefix is None or name.startswith(prefix)] + return set(name for name, __ in xattrs + if prefix is None or name.startswith(prefix)) def _pending_measures_to_process_count(self, metric_id): with self._get_ioctx() as ioctx: -- GitLab From e739a926c65085e33876f9af79339af1eb700edd Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 21 Jan 2016 11:24:57 +0800 Subject: [PATCH 0075/1483] Fixed python-future version to be >= 0.15. --- debian/changelog | 6 ++++++ debian/control | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index 7b99ceea..4825720e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (1.3.0-5) UNRELEASED; urgency=medium + + * Fixed python-future version to be >= 0.15. + + -- Thomas Goirand Thu, 21 Jan 2016 11:24:39 +0800 + gnocchi (1.3.0-4) unstable; urgency=medium * French debconf templates translation update (Closes: #806506). diff --git a/debian/control b/debian/control index 3ac01b08..70d7e3cf 100644 --- a/debian/control +++ b/debian/control @@ -20,7 +20,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-concurrent.futures (>= 2.1.6), python-coverage (>= 3.6), python-fixtures, - python-future, + python-future (>= 0.15), python-gabbi (>= 1), python-jinja2, python-jsonpatch (>= 1.9), @@ -76,7 +76,7 @@ Package: python-gnocchi Architecture: all Depends: alembic (>= 0.7.6), python-concurrent.futures (>= 2.1.6), - python-future, + python-future (>= 0.15), python-jinja2, python-jsonpatch (>= 1.9), python-keystoneclient (>= 1:1.6.0), -- GitLab From 4833fb3d6d966ff58327a600358d6cce558e6657 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 21 Jan 2016 12:19:55 +0800 Subject: [PATCH 0076/1483] Now packaging 1.3.3 --- debian/changelog | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index 4825720e..b95aa141 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,9 @@ -gnocchi (1.3.0-5) UNRELEASED; urgency=medium +gnocchi (1.3.3-1) unstable; urgency=medium + * New upstream release. * Fixed python-future version to be >= 0.15. - -- Thomas Goirand Thu, 21 Jan 2016 11:24:39 +0800 + -- Thomas Goirand Thu, 21 Jan 2016 12:19:33 +0800 gnocchi (1.3.0-4) unstable; urgency=medium -- GitLab From daa508eaa43b92a0a25e6434343a82bb8834557e Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 21 Jan 2016 12:22:29 +0800 Subject: [PATCH 0077/1483] Fixed (build-)depends for this release. --- debian/changelog | 1 + debian/control | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/debian/changelog b/debian/changelog index b95aa141..664e5586 100644 --- a/debian/changelog +++ b/debian/changelog @@ -2,6 +2,7 @@ gnocchi (1.3.3-1) unstable; urgency=medium * New upstream release. * Fixed python-future version to be >= 0.15. + * Fixed (build-)depends for this release. -- Thomas Goirand Thu, 21 Jan 2016 12:19:33 +0800 diff --git a/debian/control b/debian/control index 70d7e3cf..2ec239f0 100644 --- a/debian/control +++ b/debian/control @@ -16,7 +16,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), libpq-dev, postgresql, postgresql-server-dev-all, - python-ceilometer (>= 2015.1~b3), + python-ceilometer (>= 1:5.0.0), python-concurrent.futures (>= 2.1.6), python-coverage (>= 3.6), python-fixtures, @@ -33,6 +33,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-oslo.config (>= 1:2.3.0), python-oslo.db (>= 1.8.0), python-oslo.log (>= 1.0.0), + python-oslo.middleware, python-oslo.policy (>= 0.3.0), python-oslo.serialization (>= 1.4.0), python-oslo.utils (>= 1.6.0), @@ -41,6 +42,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-pandas (>= 0.17), python-pastedeploy, python-pecan (>= 0.9), + python-prettytable, python-psycopg2, python-pymysql, python-pytimeparse (>= 1.1.5), @@ -52,7 +54,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-sqlalchemy, python-sqlalchemy-utils, python-stevedore, - python-swiftclient, + python-swiftclient (>= 2.5.0), python-sysv-ipc, python-tempest-lib (>= 0.2.0), python-testscenarios, @@ -86,6 +88,7 @@ Depends: alembic (>= 0.7.6), python-oslo.config (>= 1:2.3.0), python-oslo.db (>= 1.8.0), python-oslo.log (>= 1.0.0), + python-oslo.middleware, python-oslo.policy (>= 0.3.0), python-oslo.serialization (>= 1.4.0), python-oslo.utils (>= 1.6.0), @@ -93,6 +96,7 @@ Depends: alembic (>= 0.7.6), python-pandas (>= 0.17), python-pastedeploy, python-pecan (>= 0.9), + python-prettytable, python-psycopg2, python-pymysql, python-pytimeparse (>= 1.1.5), @@ -102,7 +106,7 @@ Depends: alembic (>= 0.7.6), python-sqlalchemy, python-sqlalchemy-utils, python-stevedore, - python-swiftclient, + python-swiftclient (>= 2.5.0), python-tooz (>= 0.13.1), python-trollius, python-tz, -- GitLab From bc706f533c990f2c0dd8c426568f0e06df9de063 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 21 Jan 2016 12:27:53 +0800 Subject: [PATCH 0078/1483] * Install the app.wsgi into /usr/share/gnocchi-common * Fixed debian/copyright ordering. --- debian/changelog | 2 ++ debian/copyright | 8 ++++---- debian/gnocchi-common.install | 1 + 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/debian/changelog b/debian/changelog index 664e5586..5f2fce98 100644 --- a/debian/changelog +++ b/debian/changelog @@ -3,6 +3,8 @@ gnocchi (1.3.3-1) unstable; urgency=medium * New upstream release. * Fixed python-future version to be >= 0.15. * Fixed (build-)depends for this release. + * Pushes app.wsgi to /usr/share/gnocchi-common. + * Fixed debian/copyright ordering. -- Thomas Goirand Thu, 21 Jan 2016 12:19:33 +0800 diff --git a/debian/copyright b/debian/copyright index 85d755cc..91dd8ca1 100644 --- a/debian/copyright +++ b/debian/copyright @@ -3,10 +3,6 @@ Upstream-Name: gnocchi Upstream-Contact: Julien Danjou Source: git://github.com/openstack/gnocchi.git -Files: debian/* -Copyright: (c) 2014, Thomas Goirand -License: Apache-2 - Files: * Copyright: (c) 2014-2015, Julien Danjou (c) 2014-2015, Mirantis INC. @@ -14,6 +10,10 @@ Copyright: (c) 2014-2015, Julien Danjou (c) 2014-2015, Objectif Libre License: Apache-2 +Files: debian/* +Copyright: (c) 2014-2016, Thomas Goirand +License: Apache-2 + License: Apache-2 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/debian/gnocchi-common.install b/debian/gnocchi-common.install index cf6a7884..339258a5 100644 --- a/debian/gnocchi-common.install +++ b/debian/gnocchi-common.install @@ -1,2 +1,3 @@ etc/gnocchi/policy.json /usr/share/gnocchi-common etc/gnocchi/api-paste.ini /usr/share/gnocchi-common +gnocchi/rest/app.wsgi /usr/share/gnocchi-common -- GitLab From 5581fcb56fe1193a718ada4c3ca6c8734609754a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 19 Jan 2016 13:39:05 +0100 Subject: [PATCH 0079/1483] influxdb: do not try to create a database This is not the responsibility of the driver to do that. Change-Id: I72384a292b62e056f2082cee21bf916561f3d3d7 Closes-Bug: #1535487 (cherry picked from commit 8fa223e7658a21c30ca81757f312a98c2eea40d1) --- gnocchi/storage/influxdb.py | 12 ------------ gnocchi/tests/base.py | 1 + setup-influxdb-tests.sh | 2 ++ 3 files changed, 3 insertions(+), 12 deletions(-) diff --git a/gnocchi/storage/influxdb.py b/gnocchi/storage/influxdb.py index e02f0bb6..13c8409f 100644 --- a/gnocchi/storage/influxdb.py +++ b/gnocchi/storage/influxdb.py @@ -73,18 +73,6 @@ class InfluxDBStorage(storage.StorageDriver): conf.influxdb_password, conf.influxdb_database) self.database = conf.influxdb_database - try: - dbs = [db['name'] for db in self.influx.get_list_database()] - if conf.influxdb_database not in dbs: - self.influx.create_database(conf.influxdb_database) - except influxdb.client.InfluxDBClientError as e: - if "database already exists" in e.content: - LOG.warning("InfluxDB database \"%s\" already exists", - self.database) - else: - LOG.warning('InfluxDB database creation failed: %s %s' - % (e.message, e.code), exc_info=True) - raise @staticmethod def _get_metric_id(metric): diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 174e9f92..08194b58 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -402,6 +402,7 @@ class TestCase(base.BaseTestCase): elif self.conf.storage.driver == 'influxdb': self.conf.set_override('influxdb_block_until_data_ingested', True, 'storage') + self.conf.set_override('influxdb_database', 'test', 'storage') self.conf.set_override('influxdb_password', 'root', 'storage') self.conf.set_override('influxdb_port', os.getenv("GNOCCHI_TEST_INFLUXDB_PORT", diff --git a/setup-influxdb-tests.sh b/setup-influxdb-tests.sh index ecf8038a..78296a31 100755 --- a/setup-influxdb-tests.sh +++ b/setup-influxdb-tests.sh @@ -36,6 +36,8 @@ EOF PATH=$PATH:/opt/influxdb influxd -config $INFLUXDB_DATA/config > ${INFLUXDB_DATA}/out 2>&1 & # Wait for InfluxDB to start listening to connections wait_for_line "Listening on HTTP" ${INFLUXDB_DATA}/out +influx -port $GNOCCHI_TEST_INFLUXDB_PORT -execute "CREATE DATABASE test;" + $* -- GitLab From c111a405eab9558d8905d907fb00508544627224 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 2 Dec 2015 11:19:34 +0100 Subject: [PATCH 0080/1483] Remove keystonemiddleware workaround This is a partial cherry-pick from master with a bit more so we support Keystonemiddleware 4 and beyond while keeping 2.3.0 support. Change-Id: Iaa8b18c52b7bc47182a0e61676356994047569a7 (cherry picked from commit 7e56cd671282c34f5f8c0efc215e2f1c77fe38a2) --- gnocchi/service.py | 15 --------------- gnocchi/tests/test_rest.py | 14 ++++++++++++-- requirements.txt | 2 +- 3 files changed, 13 insertions(+), 18 deletions(-) diff --git a/gnocchi/service.py b/gnocchi/service.py index c5049e81..d5ba1256 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -17,8 +17,6 @@ import logging import multiprocessing -import keystoneclient.auth -from keystonemiddleware import opts as ks_opts from oslo_config import cfg from oslo_db import options as db_options from oslo_log import log @@ -35,8 +33,6 @@ def prepare_service(args=None): # FIXME(jd) Use the pkg_entry info to register the options of these libs log.register_options(conf) db_options.set_defaults(conf) - for group, options in ks_opts.list_auth_token_opts(): - conf.register_opts(list(options), group=group) policy_opts.set_defaults(conf) # Register our own Gnocchi options @@ -61,15 +57,4 @@ def prepare_service(args=None): log.setup(conf, 'gnocchi') conf.log_opt_values(LOG, logging.DEBUG) - # NOTE(sileht): keystonemiddleware assume we use the global CONF object - # (LP#1428317). In gnocchi, this is not the case, so we have to register - # some keystoneclient options ourself. Missing options are hidden into - # private area of keystonemiddleware and keystoneclient, so we - # create a keystoneclient AuthPlugin object, that will register the options - # into our configuration object. This have to be done after the - # configuration files have been loaded because the authplugin options - # depends of the authplugin present in the configuration file. - keystoneclient.auth.register_conf_options(conf, 'keystone_authtoken') - keystoneclient.auth.load_from_conf_options(conf, 'keystone_authtoken') - return conf diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index b032f5fc..104c83f6 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -24,7 +24,9 @@ import json import uuid import keystonemiddleware.auth_token +from keystonemiddleware import opts as ks_opts import mock +import oslo_config from oslo_utils import timeutils import six from stevedore import extension @@ -183,14 +185,22 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): pecan_config['storage'] = self.storage pecan_config['not_implemented_middleware'] = False + # NOTE(sileht): We register keystonemiddleware options + for group, options in ks_opts.list_auth_token_opts(): + self.conf.register_opts(list(options), group=group) + self.conf.set_override("cache", TestingApp.CACHE_NAME, group='keystone_authtoken') # TODO(jd) Override these options with values. They are not used, but # if they are None (their defaults), the keystone authtoken middleware # prints a warning… :( When the bug is fixed we can remove that! # See https://bugs.launchpad.net/keystonemiddleware/+bug/1429179 - self.conf.set_override("identity_uri", "foobar", - group="keystone_authtoken") + try: + self.conf.set_override("identity_uri", "foobar", + group="keystone_authtoken") + except oslo_config.cfg.NoSuchOptError: + # This option does not exist in keystonemiddleware>=4 + pass self.conf.set_override("auth_uri", "foobar", group="keystone_authtoken") self.conf.set_override("delay_auth_decision", diff --git a/requirements.txt b/requirements.txt index a8b5a431..ed1cc583 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ WebOb>=1.4.1 alembic>=0.7.6,!=0.8.1 psycopg2 pymysql -keystonemiddleware>=2.3.0,<4 +keystonemiddleware>=2.3.0 PasteDeploy sphinx_bootstrap_theme prettytable -- GitLab From 6f74a33bec8e35edcaa898b680fff3b71bcc12b4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 11 Jan 2016 14:43:10 +0100 Subject: [PATCH 0081/1483] rest: fix timestamp parsing for aggregates Currently, start/stop are not parsed as Timestamp like in the rest of the measures retrieve requests. This patches fixes that. Closes-Bug: #1523549 (partially cherry-picked from 7df853e61b6fc7feff206e95b542d51284220a62) Change-Id: Ie60f8f8140675964bb058ed733dfa7958eddd419 --- gnocchi/rest/__init__.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 075dbd00..a4ca9655 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -388,6 +388,18 @@ class AggregatedMetricController(rest.RestController): except ValueError: abort(400, 'needed_overlap must be a number') + if start is not None: + try: + start = Timestamp(start) + except Exception: + abort(400, "Invalid value for start") + + if stop is not None: + try: + stop = Timestamp(stop) + except Exception: + abort(400, "Invalid value for stop") + if (aggregation not in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS): abort( -- GitLab From 577b403e41c101f84b8916a28cb8d58e8950786a Mon Sep 17 00:00:00 2001 From: heha Date: Sun, 20 Dec 2015 07:40:28 -0500 Subject: [PATCH 0082/1483] Resource list filtered by project_id or created_by user_id and project_id List some resources filtered by the project_id that you provide, and the resources that you have created. Change-Id: I30f473ea720835447da3282647d803305390941a Closes-Bug: #1520439 Closes-Bug: #1521895 (cherry picked from commit 396a93e7bdf246e5bbf19d43a49f21132969d8d8) --- etc/gnocchi/policy.json | 6 +-- gnocchi/rest/__init__.py | 81 ++++++++++++++++++++++-------------- gnocchi/tests/test_rest.py | 84 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 136 insertions(+), 35 deletions(-) diff --git a/etc/gnocchi/policy.json b/etc/gnocchi/policy.json index 01aa6446..b1a52c05 100644 --- a/etc/gnocchi/policy.json +++ b/etc/gnocchi/policy.json @@ -9,10 +9,8 @@ "get resource": "rule:admin_or_creator or rule:resource_owner", "update resource": "rule:admin_or_creator", "delete resource": "rule:admin_or_creator", - "list resource": "", - "list all resource": "role:admin", - "search resource": "", - "search all resource": "role:admin", + "list resource": "rule:admin_or_creator or rule:resource_owner", + "search resource": "rule:admin_or_creator or rule:resource_owner", "get archive policy": "", "list archive policy": "", diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index a4ca9655..5ac7378f 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -111,6 +111,44 @@ def enforce(rule, target): abort(403) +def _get_list_resource_policy_filter(rule, resource_type, user, project): + try: + # Check if the policy allows the user to list any resource + enforce(rule, { + "resource_type": resource_type, + }) + except webob.exc.HTTPForbidden: + policy_filter = [] + try: + # Check if the policy allows the user to list resources linked + # to their project + enforce(rule, { + "resource_type": resource_type, + "project_id": project, + }) + except webob.exc.HTTPForbidden: + pass + else: + policy_filter.append({"=": {"project_id": project}}) + try: + # Check if the policy allows the user to list resources linked + # to their created_by_project + enforce(rule, { + "resource_type": resource_type, + "created_by_project_id": project, + }) + except webob.exc.HTTPForbidden: + pass + else: + policy_filter.append({"=": {"created_by_project_id": project}}) + + if not policy_filter: + # We need to have at least one policy filter in place + abort(403, "Insufficient privileges") + + return {"or": policy_filter} + + def set_resp_location_hdr(location): location = '%s%s' % (pecan.request.script_name, location) # NOTE(sileht): according the pep-3333 the headers must be @@ -976,27 +1014,16 @@ class GenericResourcesController(rest.RestController): history = get_history(kwargs) pagination_opts = get_pagination_options( kwargs, RESOURCE_DEFAULT_PAGINATION) - - try: - enforce("list all resource", { - "resource_type": self._resource_type, - }) - except webob.exc.HTTPForbidden: - enforce("list resource", { - "resource_type": self._resource_type, - }) - user, project = get_user_and_project() - attr_filter = {"and": [{"=": {"created_by_user_id": user}}, - {"=": {"created_by_project_id": project}}]} - else: - attr_filter = None + user, project = get_user_and_project() + policy_filter = _get_list_resource_policy_filter( + "list resource", self._resource_type, user, project) try: # FIXME(sileht): next API version should returns # {'resources': [...], 'links': [ ... pagination rel ...]} return pecan.request.indexer.list_resources( self._resource_type, - attribute_filter=attr_filter, + attribute_filter=policy_filter, details=details, history=history, **pagination_opts @@ -1136,25 +1163,17 @@ class SearchResourceTypeController(rest.RestController): pagination_opts = get_pagination_options( kwargs, RESOURCE_DEFAULT_PAGINATION) - try: - enforce("search all resource", { - "resource_type": self._resource_type, - }) - except webob.exc.HTTPForbidden: - enforce("search resource", { - "resource_type": self._resource_type, - }) - user, project = get_user_and_project() + user, project = get_user_and_project() + policy_filter = _get_list_resource_policy_filter( + "search resource", self._resource_type, user, project) + if policy_filter: if attr_filter: attr_filter = {"and": [ - {"=": {"created_by_user_id": user}}, - {"=": {"created_by_project_id": project}}, - attr_filter]} - else: - attr_filter = {"and": [ - {"=": {"created_by_user_id": user}}, - {"=": {"created_by_project_id": project}}, + policy_filter, + attr_filter ]} + else: + attr_filter = policy_filter try: return pecan.request.indexer.list_resources( diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 104c83f6..1c3c2a91 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -1526,6 +1526,49 @@ class ResourceTest(RestTest): self.assertGreaterEqual(len(resources), 1) self.assertEqual(created_resource, resources[0]) + def test_search_resources_with_another_project_id(self): + u1 = str(uuid.uuid4()) + result = self.app.post_json( + "/v1/resource/generic", + params={ + "id": str(uuid.uuid4()), + "started_at": "2014-01-01 02:02:02", + "user_id": u1, + "project_id": FakeMemcache.PROJECT_ID_2, + }) + g = json.loads(result.text) + + with self.app.use_another_user(): + result = self.app.post_json( + "/v1/resource/generic", + params={ + "id": str(uuid.uuid4()), + "started_at": "2014-01-01 03:03:03", + "user_id": u1, + "project_id": str(uuid.uuid4()), + }) + j = json.loads(result.text) + g_found = False + j_found = False + + result = self.app.post_json( + "/v1/search/resource/generic", + params={"=": {"user_id": u1}}, + status=200) + resources = json.loads(result.text) + self.assertGreaterEqual(len(resources), 2) + for r in resources: + if r['id'] == str(g['id']): + self.assertEqual(g, r) + g_found = True + elif r['id'] == str(j['id']): + self.assertEqual(j, r) + j_found = True + if g_found and j_found: + break + else: + self.fail("Some resources were not found") + def test_search_resources_by_unknown_field(self): result = self.app.post_json( "/v1/search/resource/" + self.resource_type, @@ -1650,6 +1693,47 @@ class ResourceTest(RestTest): else: self.fail("Some resources were not found") + def test_list_resources_with_another_project_id(self): + result = self.app.post_json( + "/v1/resource/generic", + params={ + "id": str(uuid.uuid4()), + "started_at": "2014-01-01 02:02:02", + "user_id": FakeMemcache.USER_ID_2, + "project_id": FakeMemcache.PROJECT_ID_2, + }) + g = json.loads(result.text) + + with self.app.use_another_user(): + result = self.app.post_json( + "/v1/resource/generic", + params={ + "id": str(uuid.uuid4()), + "started_at": "2014-01-01 03:03:03", + "user_id": str(uuid.uuid4()), + "project_id": str(uuid.uuid4()), + }) + j = json.loads(result.text) + + g_found = False + j_found = False + + result = self.app.get("/v1/resource/generic") + self.assertEqual(200, result.status_code) + resources = json.loads(result.text) + self.assertGreaterEqual(len(resources), 2) + for r in resources: + if r['id'] == str(g['id']): + self.assertEqual(g, r) + g_found = True + elif r['id'] == str(j['id']): + self.assertEqual(j, r) + j_found = True + if g_found and j_found: + break + else: + self.fail("Some resources were not found") + def test_list_resources_with_details(self): self._do_test_list_resources_with_detail( lambda: self.app.get("/v1/resource/generic?details=true")) -- GitLab From 804886e8e9db9b16e2f268eff4800b4763db4246 Mon Sep 17 00:00:00 2001 From: Mohammed Naser Date: Fri, 22 Jan 2016 14:44:32 -0500 Subject: [PATCH 0083/1483] Skip already processed measurements When using multiple workers, the list of metrics to process could possibly include those that have already been procesed by other workers. This causes the worker to do extra processing. This patch addresses this by ensuring that there is measurements in the system first before starting to process it. Change-Id: I0307d3ca679c087d141db6bd02d8de1706298bbb Partial-Bug: #1536909 (cherry picked from commit d8170f43af936bc1f56f5f18488294ca5858d7c1) --- gnocchi/storage/_carbonara.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 7892aaf8..d53645a3 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -178,6 +178,13 @@ class CarbonaraBasedStorage(storage.StorageDriver): try: LOG.debug("Processing measures for %s" % metric) with self._process_measure_for_metric(metric) as measures: + # NOTE(mnaser): The metric could have been handled by + # another worker, ignore if no measures. + if len(measures) == 0: + LOG.debug("Skipping %s (already processed)" + % metric) + continue + try: with timeutils.StopWatch() as sw: raw_measures = self._get_measures(metric, -- GitLab From 6297156f053db9414d9aed7ae753bee030c2b230 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 19 Jan 2016 17:28:33 +0100 Subject: [PATCH 0084/1483] rest: fix metric access for linked projects (partial cherry-picked from 7178859adbe0e94719591c551f4592314cba5864 with only the test, as the regression was only introduced in master) Change-Id: I95da3decb69ae4d4f3d95fc749f39b7f7fe2a26c Closes-Bug: #1535415 --- gnocchi/tests/test_rest.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 1c3c2a91..3cecd847 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -1,9 +1,8 @@ # -*- encoding: utf-8 -*- # +# Copyright © 2016 Red Hat, Inc. # Copyright © 2014-2015 eNovance # -# Authors: Julien Danjou -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -335,6 +334,21 @@ class ArchivePolicyTest(RestTest): class MetricTest(RestTest): + def test_get_metric_with_another_user_linked_resource(self): + result = self.app.post_json( + "/v1/resource/generic", + params={ + "id": str(uuid.uuid4()), + "started_at": "2014-01-01 02:02:02", + "user_id": FakeMemcache.USER_ID_2, + "project_id": FakeMemcache.PROJECT_ID_2, + "metrics": {"foobar": {"archive_policy_name": "low"}}, + }) + resource = json.loads(result.text) + metric_id = resource["metrics"]["foobar"] + with self.app.use_another_user(): + self.app.get("/v1/metric/%s" % metric_id) + def test_get_metric_with_another_user(self): result = self.app.post_json("/v1/metric", params={"archive_policy_name": "medium"}, -- GitLab From 97334a0e68b892ee3a0f3c6d88205e202afcb7fb Mon Sep 17 00:00:00 2001 From: Guangyu Suo Date: Sat, 16 Jan 2016 01:59:06 +0000 Subject: [PATCH 0085/1483] Fix the wrong datetime format in _store_measures method The %m stands for Month, and the %M stands for Minute, now it uses %M to represent the Month, which may causes the measures to handle has no order, this patch fixed this. Change-Id: I7f770148e095c415fdc1f4ef0a1ac913945455a4 --- gnocchi/storage/ceph.py | 2 +- gnocchi/storage/file.py | 2 +- gnocchi/storage/swift.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index f5219692..ee7c552b 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -74,7 +74,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): self.MEASURE_PREFIX, str(metric.id), str(uuid.uuid4()), - datetime.datetime.utcnow().strftime("%Y%M%d_%H:%M:%S"))) + datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S"))) with self._get_ioctx() as ioctx: ioctx.write_full(name, data) ioctx.set_xattr(self.MEASURE_PREFIX, name, "") diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 021ccf6c..ffbde963 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -80,7 +80,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): path = os.path.join(self.measure_path, six.text_type(metric_id)) if random_id: if random_id is True: - now = datetime.datetime.utcnow().strftime("_%Y%M%d_%H:%M:%S") + now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") random_id = six.text_type(uuid.uuid4()) + now return os.path.join(path, random_id) return path diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 71cf13fe..1b79e608 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -95,7 +95,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): raise storage.MetricAlreadyExists(metric) def _store_measures(self, metric, data): - now = datetime.datetime.utcnow().strftime("_%Y%M%d_%H:%M:%S") + now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") self.swift.put_object( self.MEASURE_PREFIX, six.text_type(metric.id) + "/" + six.text_type(uuid.uuid4()) + now, -- GitLab From cffcc4d5082c29efc789f0cf4bc81100b82c2d33 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 26 Jan 2016 12:18:43 +0100 Subject: [PATCH 0086/1483] ceph, swift: do not fail if unprocess measures already deleted If several workers are trying to delete the same unprocessed metric at the same time, they might fail to do so. Ignore these error, as it's not a big deal in the end: they're deleted. Closes-Bug: #1537767 Change-Id: Ib8002b757698a8c6601bc63614cc0c63a18f2e83 (cherry picked from commit 115c275a667bf60c694d02f12800de52c857593e) --- gnocchi/storage/_carbonara.py | 18 +++++++++++++----- gnocchi/storage/ceph.py | 8 ++++++-- gnocchi/storage/swift.py | 8 +++++++- 3 files changed, 26 insertions(+), 8 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index d53645a3..43af119f 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -54,8 +54,8 @@ class CarbonaraBasedStorageToozLock(object): def stop(self): self.coord.stop() - def __call__(self, metric): - lock_name = b"gnocchi-" + str(metric.id).encode('ascii') + def __call__(self, metric_id): + lock_name = b"gnocchi-" + str(metric_id).encode('ascii') return self.coord.get_lock(lock_name) @@ -146,7 +146,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): raise NotImplementedError def delete_metric(self, metric): - with self._lock(metric): + with self._lock(metric.id): self._delete_metric(metric) @staticmethod @@ -167,9 +167,17 @@ class CarbonaraBasedStorage(storage.StorageDriver): deleted_metrics_id = (set(map(uuid.UUID, metrics_to_process)) - set(m.id for m in metrics)) for metric_id in deleted_metrics_id: - self._delete_unprocessed_measures_for_metric_id(metric_id) + # NOTE(jd): We need to lock the metric otherwise we might delete + # measures that another worker might be processing. Deleting + # measurement files under its feet is not nice! + lock = self._lock(metric_id) + lock.acquire(blocking=sync) + try: + self._delete_unprocessed_measures_for_metric_id(metric_id) + finally: + lock.release() for metric in metrics: - lock = self._lock(metric) + lock = self._lock(metric.id) agg_methods = list(metric.archive_policy.aggregation_methods) # Do not block if we cannot acquire the lock, that means some other # worker is doing the job. We'll just ignore this metric and may diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index ee7c552b..1eaef052 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -104,8 +104,12 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): object_names = self._list_object_names_to_process(ctx, object_prefix) for n in object_names: - ctx.rm_xattr(self.MEASURE_PREFIX, n) - ctx.remove_object(n) + try: + ctx.rm_xattr(self.MEASURE_PREFIX, n) + ctx.remove_object(n) + except rados.ObjectNotFound: + # Another worker may have removed it, don't worry. + pass @contextlib.contextmanager def _process_measure_for_metric(self, metric): diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 1b79e608..2a907b3c 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -119,7 +119,13 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): def _delete_unprocessed_measures_for_metric_id(self, metric_id): files = self._list_measure_files_for_metric_id(metric_id) for f in files: - self.swift.delete_object(self.MEASURE_PREFIX, f['name']) + try: + self.swift.delete_object(self.MEASURE_PREFIX, f['name']) + except swclient.ClientException as e: + # If the object has already been deleted by another worker, do + # not worry. + if e.http_status != 404: + raise @contextlib.contextmanager def _process_measure_for_metric(self, metric): -- GitLab From 281ddfca2d4e5e49f61cd0b7e97f20f76a3cda43 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 27 Jan 2016 11:14:56 +0000 Subject: [PATCH 0087/1483] Now packaging from debian/mitaka --- debian/gbp.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/gbp.conf b/debian/gbp.conf index 10f9500d..7436424b 100644 --- a/debian/gbp.conf +++ b/debian/gbp.conf @@ -1,6 +1,6 @@ [DEFAULT] upstream-branch = master -debian-branch = debian/unstable +debian-branch = debian/mitaka upstream-tag = %(version)s compression = xz -- GitLab From 38dfdb5a05ecfca0c697dd1095f175645aeb3024 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 27 Jan 2016 11:22:04 +0000 Subject: [PATCH 0088/1483] Now packaging 1.3.3+2016.01.27.git.e1339d77a9 --- debian/changelog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 5f2fce98..70abdd14 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -gnocchi (1.3.3-1) unstable; urgency=medium +gnocchi (1.3.3+2016.01.27.git.e1339d77a9-1) unstable; urgency=medium * New upstream release. * Fixed python-future version to be >= 0.15. -- GitLab From e734d010d10734630dc71eddcf1609cde828d361 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 27 Jan 2016 11:24:28 +0000 Subject: [PATCH 0089/1483] Fixed diff with upstream tag --- devstack/plugin.sh | 9 ------ gnocchi/carbonara.py | 5 ---- gnocchi/tests/test_carbonara.py | 53 --------------------------------- tox.ini | 2 -- 4 files changed, 69 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index a30ff952..16cb7e1c 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -324,15 +324,6 @@ function preinstall_gnocchi { fi } -function preinstall_gnocchi { - # Needed to build psycopg2 - if is_ubuntu; then - install_package libpq-dev - else - install_package postgresql-devel - fi -} - # install_gnocchi() - Collect source and prepare function install_gnocchi { if [ "${GNOCCHI_COORDINATOR_URL%%:*}" == "redis" ]; then diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 5c4820bc..46c262e9 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -153,11 +153,6 @@ class TimeSerie(SerializableMixin): return pandas.tseries.offsets.Nano(value * 10e8) return pandas.tseries.frequencies.to_offset(value) - @staticmethod - def _round_timestamp(ts, freq): - return pandas.Timestamp( - (ts.value // freq.delta.value) * freq.delta.value) - class BoundTimeSerie(TimeSerie): def __init__(self, ts=None, block_size=None, back_window=0): diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 5381c960..6c78eb8a 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -907,59 +907,6 @@ class TestAggregatedTimeSerie(base.BaseTestCase): max_size=agg.max_size, aggregation_method=agg.aggregation_method)) - def test_aggregated_partial_overlap(self): - tsc1 = carbonara.TimeSerieArchive.from_definitions([(1, 86400)]) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) - tsc2 = carbonara.TimeSerieArchive.from_definitions([(1, 86400)]) - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.max_block_size) - - tsb1.set_values([ - (datetime.datetime(2015, 12, 3, 13, 19, 15), 1), - (datetime.datetime(2015, 12, 3, 13, 20, 15), 1), - (datetime.datetime(2015, 12, 3, 13, 21, 15), 1), - (datetime.datetime(2015, 12, 3, 13, 22, 15), 1), - ], before_truncate_callback=tsc1.update) - - tsb2.set_values([ - (datetime.datetime(2015, 12, 3, 13, 21, 15), 10), - (datetime.datetime(2015, 12, 3, 13, 22, 15), 10), - (datetime.datetime(2015, 12, 3, 13, 23, 15), 10), - (datetime.datetime(2015, 12, 3, 13, 24, 15), 10), - ], before_truncate_callback=tsc2.update) - - output = carbonara.TimeSerieArchive.aggregated( - [tsc1, tsc2], aggregation="sum") - - self.assertEqual([ - (pandas.Timestamp('2015-12-03 13:21:15'), 1.0, 11.0), - (pandas.Timestamp('2015-12-03 13:22:15'), 1.0, 11.0), - ], output) - - dtfrom = datetime.datetime(2015, 12, 3, 13, 17, 0) - dtto = datetime.datetime(2015, 12, 3, 13, 25, 0) - - output = carbonara.TimeSerieArchive.aggregated( - [tsc1, tsc2], from_timestamp=dtfrom, to_timestamp=dtto, - aggregation="sum", needed_percent_of_overlap=0) - - self.assertEqual([ - (pandas.Timestamp('2015-12-03 13:19:15'), 1.0, 1.0), - (pandas.Timestamp('2015-12-03 13:20:15'), 1.0, 1.0), - (pandas.Timestamp('2015-12-03 13:21:15'), 1.0, 11.0), - (pandas.Timestamp('2015-12-03 13:22:15'), 1.0, 11.0), - (pandas.Timestamp('2015-12-03 13:23:15'), 1.0, 10.0), - (pandas.Timestamp('2015-12-03 13:24:15'), 1.0, 10.0), - ], output) - - # By default we require 100% of point that overlap - # so that fail if from or to is set - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.TimeSerieArchive.aggregated, - [tsc1, tsc2], to_timestamp=dtto) - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.TimeSerieArchive.aggregated, - [tsc1, tsc2], from_timestamp=dtfrom) - class CarbonaraCmd(base.BaseTestCase): diff --git a/tox.ini b/tox.ini index 591cef72..f1d9f928 100644 --- a/tox.ini +++ b/tox.ini @@ -15,8 +15,6 @@ setenv = GNOCCHI_TEST_INDEXER_DRIVER=postgresql GNOCCHI_TEST_STORAGE_DRIVERS=file swift ceph GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql - py{27,34}-postgresql: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql - py{27,34}-mysql: GNOCCHI_TEST_INDEXER_DRIVERS=mysql py{27,34}-{postgresql,mysql}-file: GNOCCHI_TEST_STORAGE_DRIVERS=file py{27,34}-{postgresql,mysql}-swift: GNOCCHI_TEST_STORAGE_DRIVERS=swift py{27,34}-{postgresql,mysql}-ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph -- GitLab From 0d6918bcef2dbdf3275620bc15176f106d58df7a Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 27 Jan 2016 13:11:29 +0000 Subject: [PATCH 0090/1483] Fixed (build-)depends --- debian/control | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/debian/control b/debian/control index 2ec239f0..08d2a2e4 100644 --- a/debian/control +++ b/debian/control @@ -19,18 +19,19 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-ceilometer (>= 1:5.0.0), python-concurrent.futures (>= 2.1.6), python-coverage (>= 3.6), + python-doc8, python-fixtures, python-future (>= 0.15), python-gabbi (>= 1), python-jinja2, python-jsonpatch (>= 1.9), python-keystoneclient (>= 1:1.6.0), - python-keystonemiddleware (>= 2.3.0), + python-keystonemiddleware (>= 4.0.0), python-mock, python-msgpack, python-mysqldb, python-numpy, - python-oslo.config (>= 1:2.3.0), + python-oslo.config (>= 1:2.6.0), python-oslo.db (>= 1.8.0), python-oslo.log (>= 1.0.0), python-oslo.middleware, @@ -61,7 +62,6 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-testtools (>= 0.9.38), python-tooz (>= 0.13.1), python-trollius, - python-tz, python-voluptuous, python-webob (>= 1.4.1), python-webtest (>= 2.0.16), @@ -82,10 +82,10 @@ Depends: alembic (>= 0.7.6), python-jinja2, python-jsonpatch (>= 1.9), python-keystoneclient (>= 1:1.6.0), - python-keystonemiddleware (>= 2.3.0), + python-keystonemiddleware (>= 4.0.0), python-msgpack, python-numpy, - python-oslo.config (>= 1:2.3.0), + python-oslo.config (>= 1:2.6.0), python-oslo.db (>= 1.8.0), python-oslo.log (>= 1.0.0), python-oslo.middleware, @@ -109,7 +109,6 @@ Depends: alembic (>= 0.7.6), python-swiftclient (>= 2.5.0), python-tooz (>= 0.13.1), python-trollius, - python-tz, python-voluptuous, python-webob (>= 1.4.1), python-werkzeug, -- GitLab From fc1addf22fb099d2a287b4b75923554bd4fef932 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 27 Jan 2016 15:35:02 +0000 Subject: [PATCH 0091/1483] Follow upstream rename of GNOCCHI_TEST_INDEXER_URL to GNOCCHI_INDEXER_URL. --- debian/changelog | 1 + debian/rules | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 70abdd14..942ca8a7 100644 --- a/debian/changelog +++ b/debian/changelog @@ -5,6 +5,7 @@ gnocchi (1.3.3+2016.01.27.git.e1339d77a9-1) unstable; urgency=medium * Fixed (build-)depends for this release. * Pushes app.wsgi to /usr/share/gnocchi-common. * Fixed debian/copyright ordering. + * Follow upstream rename of GNOCCHI_TEST_INDEXER_URL to GNOCCHI_INDEXER_URL. -- Thomas Goirand Thu, 21 Jan 2016 12:19:33 +0800 diff --git a/debian/rules b/debian/rules index 67247cbc..505d5647 100755 --- a/debian/rules +++ b/debian/rules @@ -53,7 +53,7 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) export PGHOST=$$PG_MYTMPDIR ; \ chmod +x debian/start_pg.sh ; \ debian/start_pg.sh $$PG_MYTMPDIR ; \ - export GNOCCHI_TEST_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ + export GNOCCHI_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ export GNOCCHI_TEST_STORAGE_DRIVER=file ; \ echo "===> Testing with python$$i (python$$PYMAJOR)" ; \ rm -rf .testrepository ; \ -- GitLab From 2efff2e5c304648d31e62e1f8e50a26466bc0ec1 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 27 Jan 2016 15:54:49 +0000 Subject: [PATCH 0092/1483] Fixed namespaces when generating config file. --- debian/changelog | 1 + debian/rules | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 942ca8a7..a2766ed7 100644 --- a/debian/changelog +++ b/debian/changelog @@ -6,6 +6,7 @@ gnocchi (1.3.3+2016.01.27.git.e1339d77a9-1) unstable; urgency=medium * Pushes app.wsgi to /usr/share/gnocchi-common. * Fixed debian/copyright ordering. * Follow upstream rename of GNOCCHI_TEST_INDEXER_URL to GNOCCHI_INDEXER_URL. + * Fixed namespaces when generating config file. -- Thomas Goirand Thu, 21 Jan 2016 12:19:33 +0800 diff --git a/debian/rules b/debian/rules index 505d5647..1b9f1daa 100755 --- a/debian/rules +++ b/debian/rules @@ -24,13 +24,16 @@ override_dh_auto_install: # --root $(CURDIR)/debian/python3-gnocchi; \ # done rm -rf $(CURDIR)/debian/python*-gnocchi/usr/lib/python*/dist-packages/*.pth + rm -rf $(CURDIR)/debian/python*-gnocchi/usr/etc mkdir -p $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common - oslo-config-generator --output-file $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf \ + PYTHONPATH=$(CURDIR)/debian/python-gnocchi/usr/lib/python2.7/dist-packages oslo-config-generator \ + --output-file $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf \ --wrap-width 140 \ --namespace gnocchi \ --namespace oslo.db \ --namespace oslo.log \ + --namespace oslo.middleware \ --namespace oslo.policy \ --namespace keystonemiddleware.auth_token sed -i 's|^[ \t#]*url[ \t#]*=.*|url = sqlite:////var/lib/gnocchi/gnocchidb|' $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf -- GitLab From 25bde5721aaa2645e62ec1b4ef530da5303e5aae Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 27 Jan 2016 17:17:17 +0000 Subject: [PATCH 0093/1483] Removed influxdb and influxdb-dev from build-depends-indep. --- debian/changelog | 6 ++++++ debian/control | 2 -- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index a2766ed7..1e376132 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (1.3.3+2016.01.27.git.e1339d77a9-2) UNRELEASED; urgency=medium + + * Removed influxdb and influxdb-dev from build-depends-indep. + + -- Thomas Goirand Wed, 27 Jan 2016 17:16:52 +0000 + gnocchi (1.3.3+2016.01.27.git.e1339d77a9-1) unstable; urgency=medium * New upstream release. diff --git a/debian/control b/debian/control index 08d2a2e4..f19ac551 100644 --- a/debian/control +++ b/debian/control @@ -11,8 +11,6 @@ Build-Depends: debhelper (>= 9), python-setuptools, python-sphinx, Build-Depends-Indep: alembic (>= 0.7.6), - influxdb, - influxdb-dev, libpq-dev, postgresql, postgresql-server-dev-all, -- GitLab From aaa68ab37e34706f632053b560a0fed490ceaad0 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 27 Jan 2016 17:23:35 +0000 Subject: [PATCH 0094/1483] Fixed diff with upstream tag. --- .gitreview | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitreview b/.gitreview index e93df263..e4b8477d 100644 --- a/.gitreview +++ b/.gitreview @@ -2,4 +2,3 @@ host=review.openstack.org port=29418 project=openstack/gnocchi.git -defaultbranch=stable/1.3 -- GitLab From 22668e75edf5a83449b0b7cbb20a8ea62e1664b2 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 27 Jan 2016 17:27:51 +0000 Subject: [PATCH 0095/1483] Do not build-depends on python-ceilometer. --- debian/changelog | 1 + debian/control | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 1e376132..c7b672ae 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,6 +1,7 @@ gnocchi (1.3.3+2016.01.27.git.e1339d77a9-2) UNRELEASED; urgency=medium * Removed influxdb and influxdb-dev from build-depends-indep. + * Do not build-depends on python-ceilometer. -- Thomas Goirand Wed, 27 Jan 2016 17:16:52 +0000 diff --git a/debian/control b/debian/control index f19ac551..d6f9b278 100644 --- a/debian/control +++ b/debian/control @@ -14,7 +14,6 @@ Build-Depends-Indep: alembic (>= 0.7.6), libpq-dev, postgresql, postgresql-server-dev-all, - python-ceilometer (>= 1:5.0.0), python-concurrent.futures (>= 2.1.6), python-coverage (>= 3.6), python-doc8, -- GitLab From 34c29672f9cb0199f6c8162484a6dda775eb67f8 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 28 Jan 2016 04:13:22 +0000 Subject: [PATCH 0096/1483] Updated debian/changelog and uploading. --- debian/changelog | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/debian/changelog b/debian/changelog index c7b672ae..bf055f71 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,13 +1,8 @@ -gnocchi (1.3.3+2016.01.27.git.e1339d77a9-2) UNRELEASED; urgency=medium +gnocchi (1.3.3+2016.01.27.git.e1339d77a9-1) unstable; urgency=medium + * New upstream release based on commit e1339d77a9. * Removed influxdb and influxdb-dev from build-depends-indep. * Do not build-depends on python-ceilometer. - - -- Thomas Goirand Wed, 27 Jan 2016 17:16:52 +0000 - -gnocchi (1.3.3+2016.01.27.git.e1339d77a9-1) unstable; urgency=medium - - * New upstream release. * Fixed python-future version to be >= 0.15. * Fixed (build-)depends for this release. * Pushes app.wsgi to /usr/share/gnocchi-common. @@ -15,7 +10,7 @@ gnocchi (1.3.3+2016.01.27.git.e1339d77a9-1) unstable; urgency=medium * Follow upstream rename of GNOCCHI_TEST_INDEXER_URL to GNOCCHI_INDEXER_URL. * Fixed namespaces when generating config file. - -- Thomas Goirand Thu, 21 Jan 2016 12:19:33 +0800 + -- Thomas Goirand Thu, 28 Jan 2016 04:13:02 +0000 gnocchi (1.3.0-4) unstable; urgency=medium -- GitLab From a6e9c211565b19245259cd1e1825ee2863b80c2c Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Tue, 2 Feb 2016 09:16:04 +0000 Subject: [PATCH 0097/1483] Clean /var/{lib,log}/gnocchi on purge (Closes: #810700). --- debian/changelog | 6 ++++++ debian/gnocchi-common.postrm | 1 + 2 files changed, 7 insertions(+) diff --git a/debian/changelog b/debian/changelog index bf055f71..29281344 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (1.3.3+2016.01.27.git.e1339d77a9-2) UNRELEASED; urgency=medium + + * Clean /var/{lib,log}/gnocchi on purge (Closes: #810700). + + -- Thomas Goirand Tue, 02 Feb 2016 09:15:03 +0000 + gnocchi (1.3.3+2016.01.27.git.e1339d77a9-1) unstable; urgency=medium * New upstream release based on commit e1339d77a9. diff --git a/debian/gnocchi-common.postrm b/debian/gnocchi-common.postrm index e5ac092f..531cc6de 100644 --- a/debian/gnocchi-common.postrm +++ b/debian/gnocchi-common.postrm @@ -22,6 +22,7 @@ if [ "$1" = "purge" ] ; then fi rm -fr /etc/gnocchi + rm -rf /var/lib/gnocchi /var/log/gnocchi fi #DEBHELPER# -- GitLab From 9e5e5c0aaa8296858d0738fb79665c441471b649 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Tue, 2 Feb 2016 09:18:51 +0000 Subject: [PATCH 0098/1483] Now packaging 1.3.4. --- debian/changelog | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 29281344..89b5535e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,6 @@ -gnocchi (1.3.3+2016.01.27.git.e1339d77a9-2) UNRELEASED; urgency=medium +gnocchi (1.3.4-1) experimental; urgency=medium + * New upstream release. * Clean /var/{lib,log}/gnocchi on purge (Closes: #810700). -- Thomas Goirand Tue, 02 Feb 2016 09:15:03 +0000 -- GitLab From 347684f1ead720bb89b55ff4dbf93cc289a674b4 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Tue, 2 Feb 2016 09:21:06 +0000 Subject: [PATCH 0099/1483] Added nl.po debconf translation (Closes: #812356). --- debian/changelog | 1 + debian/po/nl.po | 15 +++++++-------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/debian/changelog b/debian/changelog index 89b5535e..1960411b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -2,6 +2,7 @@ gnocchi (1.3.4-1) experimental; urgency=medium * New upstream release. * Clean /var/{lib,log}/gnocchi on purge (Closes: #810700). + * Added nl.po debconf translation (Closes: #812356). -- Thomas Goirand Tue, 02 Feb 2016 09:15:03 +0000 diff --git a/debian/po/nl.po b/debian/po/nl.po index 89ec6ec1..0efd1f9d 100644 --- a/debian/po/nl.po +++ b/debian/po/nl.po @@ -1,15 +1,15 @@ -# Dutch translation of glance debconf templates. +# Dutch translation of gnocchi debconf templates. # Copyright (C) 2012 THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the glance package. # Jeroen Schot , 2012. -# Frans Spiesschaert , 2014. +# Frans Spiesschaert , 2014, 2016. # msgid "" msgstr "" -"Project-Id-Version: glance 2012.1~e3-4\n" +"Project-Id-Version: gnocchi_1.3.0-4\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" "POT-Creation-Date: 2015-11-25 09:24+0000\n" -"PO-Revision-Date: 2014-09-26 22:55+0200\n" +"PO-Revision-Date: 2016-01-12 16:31+0100\n" "Last-Translator: Frans Spiesschaert \n" "Language-Team: Debian Dutch l10n Team \n" "Language: nl\n" @@ -17,6 +17,7 @@ msgstr "" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" +"X-Generator: Gtranslator 2.91.6\n" #. Type: string #. Description @@ -131,7 +132,6 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy #| msgid "" #| "You can change this setting later on by running \"dpkg-reconfigure -plow " #| "glance-common\"." @@ -140,7 +140,7 @@ msgid "" "gnocchi-common\"." msgstr "" "U kunt deze instelling later wijzigen met het commando \"dpkg-reconfigure -" -"plow glance-common\"." +"plow gnocchi-common\"." #. Type: boolean #. Description @@ -197,7 +197,6 @@ msgstr "Authenticatiebewijs voor Keystone:" #. Type: password #. Description #: ../gnocchi-api.templates:4001 -#, fuzzy #| msgid "" #| "To configure its endpoint in Keystone, glance-api needs the Keystone " #| "authentication token." @@ -205,7 +204,7 @@ msgid "" "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " "authentication token." msgstr "" -"Om zijn toegangspunt te kunnen aanmaken in Keystone, heeft glance-api het " +"Om zijn toegangspunt te kunnen aanmaken in Keystone, heeft gnocchi-api het " "authenticatiebewijs voor Keystone nodig." #. Type: string -- GitLab From a279721a776f47dc45ef418938939b6e488523c0 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 17 Feb 2016 16:31:17 +0000 Subject: [PATCH 0100/1483] Remove diff with upstream. --- .gitreview | 1 + MANIFEST.in | 1 - README.rst | 27 +- devstack/apache-gnocchi.template | 2 +- devstack/apache-ported-gnocchi.template | 2 +- devstack/plugin.sh | 50 +- devstack/settings | 17 +- doc/source/architecture.rst | 119 --- doc/source/client.rst | 13 - doc/source/conf.py | 26 +- doc/source/configuration.rst | 148 ---- doc/source/grafana-screenshot.png | Bin 82601 -> 0 bytes doc/source/grafana.rst | 63 -- doc/source/index.rst | 46 +- doc/source/install.rst | 215 +++-- doc/source/resource_types.rst | 2 +- doc/source/rest.j2 | 47 +- doc/source/rest.yaml | 42 - doc/source/running.rst | 71 -- doc/source/statsd.rst | 26 +- etc/gnocchi/api-paste.ini | 4 +- .../gnocchi/gnocchi-config-generator.conf | 0 gnocchi/archive_policy.py | 10 +- gnocchi/carbonara.py | 433 +++++----- gnocchi/cli.py | 28 +- gnocchi/genconfig.py | 21 - gnocchi/indexer/__init__.py | 17 +- gnocchi/indexer/alembic/env.py | 16 +- ...c2_allow_volume_display_name_to_be_null.py | 41 - ...469b308577a9_allow_image_ref_to_be_null.py | 41 - gnocchi/indexer/sqlalchemy.py | 421 +++++----- gnocchi/indexer/sqlalchemy_extension.py | 4 +- gnocchi/opts.py | 30 +- gnocchi/rest/__init__.py | 331 +++++--- gnocchi/rest/app.py | 19 + gnocchi/service.py | 9 +- gnocchi/statsd.py | 9 +- gnocchi/storage/__init__.py | 37 +- gnocchi/storage/_carbonara.py | 302 ++----- gnocchi/storage/ceph.py | 106 +-- gnocchi/storage/file.py | 120 +-- gnocchi/storage/influxdb.py | 13 +- gnocchi/storage/swift.py | 111 +-- gnocchi/tests/api-paste.ini | 3 +- gnocchi/tests/base.py | 31 +- gnocchi/tests/gabbi/fixtures.py | 22 +- gnocchi/tests/gabbi/gabbits-live/live.yaml | 7 +- gnocchi/tests/gabbi/gabbits/aggregation.yaml | 98 --- gnocchi/tests/gabbi/gabbits/archive.yaml | 6 +- .../tests/gabbi/gabbits/batch_measures.yaml | 88 -- .../gabbi/gabbits/metric_granularity.yaml | 6 +- gnocchi/tests/gabbi/gabbits/resource.yaml | 4 +- gnocchi/tests/gabbi/test_gabbi_live.py | 2 +- .../indexer/sqlalchemy/test_migrations.py | 2 +- gnocchi/tests/storage/test_carbonara.py | 152 ++-- gnocchi/tests/test_archive_policy.py | 3 +- gnocchi/tests/test_carbonara.py | 776 +++++++++--------- gnocchi/tests/test_indexer.py | 26 +- gnocchi/tests/test_rest.py | 122 ++- gnocchi/tests/test_storage.py | 172 +--- gnocchi/utils.py | 8 - requirements.txt | 13 +- run-tests.sh | 9 +- setup-influxdb-tests.sh | 47 ++ setup-mysql-tests.sh | 29 + setup-postgresql-tests.sh | 19 + setup-test-env.sh | 18 - setup.cfg | 86 +- test-requirements.txt | 16 + tox.ini | 43 +- 70 files changed, 1877 insertions(+), 2972 deletions(-) delete mode 100644 MANIFEST.in delete mode 100644 doc/source/architecture.rst delete mode 100644 doc/source/client.rst delete mode 100644 doc/source/configuration.rst delete mode 100644 doc/source/grafana-screenshot.png delete mode 100644 doc/source/grafana.rst delete mode 100644 doc/source/running.rst rename gnocchi-config-generator.conf => etc/gnocchi/gnocchi-config-generator.conf (100%) delete mode 100644 gnocchi/genconfig.py delete mode 100644 gnocchi/indexer/alembic/versions/1f21cbdd6bc2_allow_volume_display_name_to_be_null.py delete mode 100644 gnocchi/indexer/alembic/versions/469b308577a9_allow_image_ref_to_be_null.py delete mode 100644 gnocchi/tests/gabbi/gabbits/aggregation.yaml delete mode 100644 gnocchi/tests/gabbi/gabbits/batch_measures.yaml create mode 100755 setup-influxdb-tests.sh create mode 100755 setup-mysql-tests.sh create mode 100755 setup-postgresql-tests.sh delete mode 100755 setup-test-env.sh create mode 100644 test-requirements.txt diff --git a/.gitreview b/.gitreview index e4b8477d..e93df263 100644 --- a/.gitreview +++ b/.gitreview @@ -2,3 +2,4 @@ host=review.openstack.org port=29418 project=openstack/gnocchi.git +defaultbranch=stable/1.3 diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 8f248e6e..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1 +0,0 @@ -include etc/gnocchi/gnocchi.conf diff --git a/README.rst b/README.rst index 8c5bfb57..dfcc6474 100644 --- a/README.rst +++ b/README.rst @@ -1,17 +1,20 @@ -=============================== - Gnocchi – Metric as a Service -=============================== +======= +Gnocchi +======= -.. image:: doc/source/gnocchi-logo.jpg +Gnocchi is a service for managing a set of resources and storing metrics about +them, in a scalable and resilient way. Its functionalities are exposed over an +HTTP REST API. -Gnocchi is a multi-tenant timeseries, metrics and resources database. It -provides an `HTTP REST`_ interface to create and manipulate the data. It is -designed to store metrics at a very large scale while providing access to -metrics and resources information to operators and users. +There is a more consistent presentation of Gnocchi: -Gnocchi is part of the `OpenStack` project. While Gnocchi has support for -OpenStack, it is fully able to work stand-alone. + https://julien.danjou.info/blog/2015/openstack-gnocchi-first-release -You can read the full documentation online at http://gnocchi.xyz. +And online documentation: -.. _`HTTP REST`: https://en.wikipedia.org/wiki/Representational_state_transfer + http://docs.openstack.org/developer/gnocchi/ + +Your are invited to file bug reports (if you find bugs) in +the bug tracker, available at: + + http://bugs.launchpad.net/gnocchi diff --git a/devstack/apache-gnocchi.template b/devstack/apache-gnocchi.template index bc288755..54fafbd1 100644 --- a/devstack/apache-gnocchi.template +++ b/devstack/apache-gnocchi.template @@ -1,5 +1,5 @@ -WSGIDaemonProcess gnocchi lang='en_US.UTF-8' locale='en_US.UTF-8' user=%USER% display-name=%{GROUP} processes=%APIWORKERS% threads=32 %VIRTUALENV% +WSGIDaemonProcess gnocchi lang='en_US.UTF-8' locale='en_US.UTF-8' user=%USER% display-name=%{GROUP} processes=%APIWORKERS% threads=10 %VIRTUALENV% WSGIProcessGroup gnocchi WSGIScriptAlias %SCRIPT_NAME% %WSGI% diff --git a/devstack/apache-ported-gnocchi.template b/devstack/apache-ported-gnocchi.template index 2a56fa8d..58f0c480 100644 --- a/devstack/apache-ported-gnocchi.template +++ b/devstack/apache-ported-gnocchi.template @@ -1,7 +1,7 @@ Listen %GNOCCHI_PORT% - WSGIDaemonProcess gnocchi lang='en_US.UTF-8' locale='en_US.UTF-8' user=%USER% display-name=%{GROUP} processes=%APIWORKERS% threads=32 %VIRTUALENV% + WSGIDaemonProcess gnocchi lang='en_US.UTF-8' locale='en_US.UTF-8' user=%USER% display-name=%{GROUP} processes=%APIWORKERS% threads=10 %VIRTUALENV% WSGIProcessGroup gnocchi WSGIScriptAlias / %WSGI% WSGIApplicationGroup %{GLOBAL} diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 16cb7e1c..cb6e47d7 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -18,6 +18,7 @@ # - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # - ``SERVICE_HOST`` # - ``OS_AUTH_URL``, ``KEYSTONE_SERVICE_URI`` for auth in api +# - ``CEILOMETER_CONF`` for ceilometer dispatcher configuration # stack.sh # --------- @@ -65,9 +66,9 @@ function create_gnocchi_accounts { "metric" "OpenStack Metric Service") get_or_create_endpoint $gnocchi_service \ "$REGION_NAME" \ - "$(gnocchi_service_url)" \ - "$(gnocchi_service_url)" \ - "$(gnocchi_service_url)" + "$(gnocchi_service_url)/" \ + "$(gnocchi_service_url)/" \ + "$(gnocchi_service_url)/" fi if is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then get_or_create_project "gnocchi_swift" default @@ -237,12 +238,6 @@ function configure_gnocchi { # Configure auth token middleware configure_auth_token_middleware $GNOCCHI_CONF gnocchi $GNOCCHI_AUTH_CACHE_DIR - if is_service_enabled gnocchi-statsd ; then - iniset $GNOCCHI_CONF statsd resource_id $GNOCCHI_STATSD_RESOURCE_ID - iniset $GNOCCHI_CONF statsd project_id $GNOCCHI_STATSD_PROJECT_ID - iniset $GNOCCHI_CONF statsd user_id $GNOCCHI_STATSD_USER_ID - fi - # Configure the storage driver if is_service_enabled ceph && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then iniset $GNOCCHI_CONF storage driver ceph @@ -273,8 +268,6 @@ function configure_gnocchi { iniset $GNOCCHI_CONF cors allowed_origin ${GRAFANA_URL} iniset $GNOCCHI_CONF cors allow_methods GET,POST,PUT,DELETE,OPTIONS,HEAD,PATCH iniset $GNOCCHI_CONF cors allow_headers Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token,X-Subject-Token - else - iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline "keystone_authtoken gnocchi" fi else iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline gnocchi @@ -301,6 +294,25 @@ function configure_ceph_gnocchi { sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring } +function configure_ceilometer_gnocchi { + gnocchi_url=$(gnocchi_service_url) + iniset $CEILOMETER_CONF DEFAULT dispatcher gnocchi + iniset $CEILOMETER_CONF alarms gnocchi_url $gnocchi_url + iniset $CEILOMETER_CONF dispatcher_gnocchi url $gnocchi_url + iniset $CEILOMETER_CONF dispatcher_gnocchi archive_policy ${GNOCCHI_ARCHIVE_POLICY} + if is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then + iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "True" + iniset $CEILOMETER_CONF dispatcher_gnocchi filter_project "gnocchi_swift" + else + iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "False" + fi +} + +function configure_aodh_gnocchi { + gnocchi_url=$(gnocchi_service_url) + iniset $AODH_CONF DEFAULT gnocchi_url $gnocchi_url +} + # init_gnocchi() - Initialize etc. function init_gnocchi { @@ -311,8 +323,8 @@ function init_gnocchi { if is_service_enabled mysql postgresql; then recreate_database gnocchi + $GNOCCHI_BIN_DIR/gnocchi-dbsync fi - $GNOCCHI_BIN_DIR/gnocchi-upgrade } function preinstall_gnocchi { @@ -342,10 +354,8 @@ function install_gnocchi { install_gnocchiclient - is_service_enabled key && EXTRA_FLAVOR=,keystonmiddleware - # We don't use setup_package because we don't follow openstack/requirements - sudo -H pip install -e "$GNOCCHI_DIR"[test,$GNOCCHI_STORAGE_BACKEND,${DATABASE_TYPE}${EXTRA_FLAVOR}] + sudo -H pip install -e "$GNOCCHI_DIR" if [ "$GNOCCHI_USE_MOD_WSGI" == "True" ]; then install_apache_wsgi @@ -386,6 +396,7 @@ function start_gnocchi { fi # Create a default policy + archive_policy_url="$(gnocchi_service_url)/v1/archive_policy" if ! is_service_enabled key; then export OS_AUTH_TYPE=gnocchi-noauth export GNOCCHI_USER_ID=`uuidgen` @@ -401,7 +412,6 @@ function start_gnocchi { # run metricd last so we are properly waiting for swift and friends run_process gnocchi-metricd "$GNOCCHI_BIN_DIR/gnocchi-metricd -d -v --config-file $GNOCCHI_CONF" - run_process gnocchi-statsd "$GNOCCHI_BIN_DIR/gnocchi-statsd -d -v --config-file $GNOCCHI_CONF" } # stop_gnocchi() - Stop running processes @@ -435,6 +445,14 @@ if is_service_enabled gnocchi-api; then echo_summary "Configuring Gnocchi" configure_gnocchi create_gnocchi_accounts + if is_service_enabled ceilometer; then + echo_summary "Configuring Ceilometer for gnocchi" + configure_ceilometer_gnocchi + fi + if is_service_enabled aodh; then + echo_summary "Configuring Aodh for gnocchi" + configure_aodh_gnocchi + fi if is_service_enabled ceph && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then echo_summary "Configuring Gnocchi for Ceph" configure_ceph_gnocchi diff --git a/devstack/settings b/devstack/settings index e28c40e6..77231dc7 100644 --- a/devstack/settings +++ b/devstack/settings @@ -1,6 +1,5 @@ enable_service gnocchi-api enable_service gnocchi-metricd -enable_service gnocchi-statsd # Set up default directories GNOCCHI_DIR=$DEST/gnocchi @@ -32,10 +31,8 @@ GNOCCHI_SERVICE_PORT=${GNOCCHI_SERVICE_PORT:-8041} GNOCCHI_SERVICE_PREFIX=${GNOCCHI_SERVICE_PREFIX:-'/metric'} GNOCCHI_SERVICE_HOST=$SERVICE_HOST -# Gnocchi statsd info -GNOCCHI_STATSD_RESOURCE_ID=${GNOCCHI_STATSD_RESOURCE_ID:-$(uuidgen)} -GNOCCHI_STATSD_USER_ID=${GNOCCHI_STATSD_USER_ID:-$(uuidgen)} -GNOCCHI_STATSD_PROJECT_ID=${GNOCCHI_STATSD_PROJECT_ID:-$(uuidgen)} +# Gnocchi ceilometer default archive_policy +GNOCCHI_ARCHIVE_POLICY=${GNOCCHI_ARCHIVE_POLICY:-low} # ceph gnochi info GNOCCHI_CEPH_USER=${GNOCCHI_CEPH_USER:-gnocchi} @@ -48,12 +45,12 @@ GNOCCHI_STORAGE_BACKEND=${GNOCCHI_STORAGE_BACKEND:-file} # InfluxDB Settings GNOCCHI_INFLUXDB_DBNAME=${GNOCCHI_INFLUXDB_DBNAME:-gnocchidevstack} -GNOCCHI_INFLUXDB_RPM_PKG=${GNOCCHI_INFLUXDB_RPM_PKG:-https://s3.amazonaws.com/influxdb/influxdb-0.9.4.2-1.x86_64.rpm} -GNOCCHI_INFLUXDB_DEB_PKG=${GNOCCHI_INFLUXDB_DEB_PKG:-https://s3.amazonaws.com/influxdb/influxdb_0.9.4.2_amd64.deb} +GNOCCHI_INFLUXDB_RPM_PKG=${GNOCCHI_INFLUXDB_RPM_PKG:-https://s3.amazonaws.com/influxdb/influxdb-0.9.4.1-1.x86_64.rpm} +GNOCCHI_INFLUXDB_DEB_PKG=${GNOCCHI_INFLUXDB_DEB_PKG:-https://s3.amazonaws.com/influxdb/influxdb_0.9.4.1_amd64.deb} # Grafana settings -GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm} -GRAFANA_DEB_PKG=${GRAFANA_DEB_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana_2.6.0_amd64.deb} +GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana-2.1.3-1.x86_64.rpm} +GRAFANA_DEB_PKG=${GRAFANA_DEB_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana_2.1.3_amd64.deb} GRAFANA_PLUGINS_DIR=${GRAFANA_PLUGINS_DIR:-$DEST/grafana-plugins} -GRAFANA_PLUGINS_REPO=${GRAFANA_PLUGINS_REPO:-http://github.com/sileht/grafana-plugins-gnocchi.git} +GRAFANA_PLUGINS_REPO=${GRAFANA_PLUGINS_REPO:-http://github.com/grafana/grafana-plugins.git} GRAFANA_URL=${GRAFANA_URL:-http://$HOST_IP:3000} diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst deleted file mode 100644 index 30e26979..00000000 --- a/doc/source/architecture.rst +++ /dev/null @@ -1,119 +0,0 @@ -====================== - Project Architecture -====================== - -Gnocchi consists of several services: a HTTP REST API (see :doc:`rest`), an -optional statsd-compatible daemon (see :doc:`statsd`), and an asynchronous -processing daemon. Data is received via the HTTP REST API and statsd daemon. -The asynchronous processing daemon, called `gnocchi-metricd`, performs -operations (statistics computing, metric cleanup, etc...) on the received data -in the background. - -Both the HTTP REST API and the asynchronous processing daemon are stateless and -are scalable. Additional workers can be added depending on load. - - -Back-ends ---------- - -Gnocchi uses two different back-end for storing data: one for storing the time -series (the storage driver) and one for indexing the data (the index driver). - -The *storage* is responsible for storing measures of created metrics. It -receives timestamps and values, and pre-computes aggregations according to -the defined archive policies. - -The *indexer* is responsible for storing the index of all resources, along with -their types and properties. Gnocchi only knows about resource types from the -OpenStack project, but also provides a *generic* type so you can create basic -resources and handle the resource properties yourself. The indexer is also -responsible for linking resources with metrics. - -How to choose back-ends -~~~~~~~~~~~~~~~~~~~~~~~ - -Gnocchi currently offers 4 storage drivers: - -* File -* Swift -* Ceph (preferred) -* InfluxDB (experimental) - -The first three drivers are based on an intermediate library, named -*Carbonara*, which handles the time series manipulation, since none of these -storage technologies handle time series natively. `InfluxDB`_ does not need -this layer since it is itself a time series database. However, The InfluxDB -driver is still experimental and suffers from bugs in InfluxDB itself that are -yet to be fixed as of this writing. - -The three *Carbonara* based drivers are working well and are as scalable as -their back-end technology permits. Ceph and Swift are inherently more scalable -than the file driver. - -Depending on the size of your architecture, using the file driver and storing -your data on a disk might be enough. If you need to scale the number of server -with the file driver, you can export and share the data via NFS among all -Gnocchi processes. In any case, it is obvious that Ceph and Swift drivers are -largely more scalable. Ceph also offers better consistency, and hence is the -recommended driver. - -.. _InfluxDB: http://influxdb.com - -How to plan for Gnocchi’s storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Gnocchi uses a custom file format based on its library *Carbonara*. In Gnocchi, -a time serie is a collection of points, where a point is a given measure, or -sample, in the lifespan of a time serie. The storage format is pretty -straightforward, therefore the computing of a time serie's size can be done -with the following formula:: - - number of points × (64 bits timestamp + 64 bits floating value) × 1.12 - = number of points × 16 bytes × 1.12 - = number of points × 17.92 - = size in bytes - -The number of points you want to keep is usually determined by the following -formula:: - - number of points = timespan ÷ granularity - -For example, if you want to keep a year of data with a one minute resolution:: - - number of points = (365 days × 24 hours × 60 minutes) ÷ 1 minute - number of points = 525 600 - -Then:: - - size in bytes = 525 600 × 17.92 = 9 418 752 bytes = 9 198 KiB - -This is just for a single aggregated time serie. If your archive policy uses -the 8 default aggregation methods (mean, min, max, sum, std, median, count, -95pct) with the same "one year, one minute aggregations" resolution, the space -used will go up to 8 × 9 MiB = 72 MiB. - -How to set the archive policy and granularity -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In Gnocchi, the archive policy is expressed in number of points. If your -archive policy defines a policy of 10 points with a granularity of 1 second, -the time serie archive will keep up to 10 points, each representing an -aggregation over 1 second. This means the time serie will at maximum retain 10 -seconds of data, **but** that does not mean it will be 10 consecutive seconds: -there might be a gap if data is fed irregularly. - -Consequently, there is no expiry of data relative to the current timestamp, and -you cannot delete old data points (at least for now). - -Therefore, both the archive policy and the granularity entirely depends on your -use case. Depending on the usage of your data, you can define several archiving -policies. A typical low grained use case could be:: - - 3600 points with a granularity of 1 second = 1 hour - 1440 points with a granularity of 1 minute = 24 hours - 1800 points with a granularity of 1 hour = 30 days - 365 points with a granularity of 1 day = 1 year - -This would represent 7205 points × 17.92 = 126 KiB per aggregation method. If -you use the 8 standard aggregation method, your metric will take up to 8 × 126 -KiB = 0.98 MiB of disk space. diff --git a/doc/source/client.rst b/doc/source/client.rst deleted file mode 100644 index 6aa428a1..00000000 --- a/doc/source/client.rst +++ /dev/null @@ -1,13 +0,0 @@ -======== - Client -======== - -Gnocchi currently only provides a Python client and SDK which can be installed -using *pip*:: - - pip install gnocchiclient - -This package provides the `gnocchi` command line tool that can be used to send -requests to Gnocchi. You can read the `full documentation online`_. - -.. _full documentation online: http://gnocchi.xyz/gnocchiclient diff --git a/doc/source/conf.py b/doc/source/conf.py index ea782e69..8d947844 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -15,6 +15,7 @@ import os import subprocess import oslosphinx +import sphinx_bootstrap_theme # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -105,13 +106,24 @@ html_theme = 'openstack' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -if html_theme == "sphinx_rtd_theme": - import sphinx_rtd_theme - html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] -else: - html_theme_path = [os.path.join(os.path.dirname(oslosphinx.__file__), - 'theme')] - +if html_theme == "bootstrap": + html_theme_options = { + 'navbar_class': "navbar navbar-inverse", + 'navbar_site_name': "Documentation", + 'navbar_links': [ + ("Launchpad project", "https://launchpad.net/gnocchi", True), + ("Bug tracking", "https://bugs.launchpad.net/gnocchi", True), + ("Git", "http://github.com/openstack/gnocchi", True), + ], + 'navbar_sidebarrel': False, + 'navbar_pagenav': False, + 'globaltoc_depth': 2, + } + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] +html_theme_path = ([os.path.join(os.path.dirname(oslosphinx.__file__), 'theme')] + + sphinx_bootstrap_theme.get_html_theme_path()) # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst deleted file mode 100644 index debf5e26..00000000 --- a/doc/source/configuration.rst +++ /dev/null @@ -1,148 +0,0 @@ -=============== - Configuration -=============== - -Configure Gnocchi by editing `/etc/gnocchi/gnocchi.conf`. The configuration -file should be pretty explicit, but here are some of the base options you want -to change and configure: - - -+---------------------+---------------------------------------------------+ -| Option name | Help | -+=====================+===================================================+ -| storage.driver | The storage driver for metrics. | -+---------------------+---------------------------------------------------+ -| indexer.url | URL to your indexer. | -+---------------------+---------------------------------------------------+ -| storage.file_* | Configuration options to store files | -| | if you use the file storage driver. | -+---------------------+---------------------------------------------------+ -| storage.swift_* | Configuration options to access Swift | -| | if you use the Swift storage driver. | -+---------------------+---------------------------------------------------+ -| storage.ceph_* | Configuration options to access Ceph | -| | if you use the Ceph storage driver. | -+---------------------+---------------------------------------------------+ - - -Gnocchi provides these storage drivers: - -- File (default) -- `Swift`_ -- `Ceph`_ -- `InfluxDB`_ (experimental) - -Gnocchi provides these indexer drivers: - -- `PostgreSQL`_ (recommended) -- `MySQL`_ - -.. _`Swift`: https://launchpad.net/swift -.. _`Ceph`: http://ceph.com/ -.. _`PostgreSQL`: http://postgresql.org -.. _`MySQL`: http://mysql.com -.. _`InfluxDB`: http://influxdb.com - -Configuring the WSGI pipeline ------------------------------ - -The API server leverages `Paste Deployment`_ to manage its configuration. You -can edit the `/etc/gnocchi/api-paste.ini` to tweak the WSGI pipeline of the -Gnocchi REST HTTP server. By default, no authentication middleware is enabled, -meaning your request will have to provides the authentication headers. - -Gnocchi is easily connectable with `OpenStack Keystone`_. If you successfully -installed the `keystone` flavor using `pip` (see :ref:`installation`), you can -edit the `api-paste.ini` file to add the Keystone authentication middleware:: - - [pipeline:main] - pipeline = keystone_authtoken gnocchi - -Also, if you're planning on using `CORS`_ (e.g. to use `Grafana`_), you an also -add the CORS middleware in the server pipeline:: - - [pipeline:main] - pipeline = keystone_authtoken cors gnocchi - -With or without Keystone support. - -.. _`Paste Deployment`: http://pythonpaste.org/deploy/ -.. _`OpenStack Keystone`: http://launchpad.net/keystone -.. _`CORS`: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing -.. _`Grafana`: http://grafana.org/ - - -Driver notes -============ - -Carbonara based drivers (file, swift, ceph) -------------------------------------------- - -To ensure consistency across all *gnocchi-api* and *gnocchi-metricd* workers, -these drivers need a distributed locking mechanism. This is provided by the -'coordinator' of the `tooz`_ library. - -By default, the configured backend for `tooz`_ is `file`, this allows locking -across workers on the same node. - -In a multi-nodes deployment, the coordinator needs to be changed via -the storage/coordination_url configuration options to one of the other -`tooz backends`_. - -For example to use Redis backend:: - - coordination_url = redis://?sentinel= - -or alternatively, to use the Zookeeper backend:: - - coordination_url = zookeeper:///hosts=&hosts= - -.. _`tooz`: http://docs.openstack.org/developer/tooz/ -.. _`tooz backends`: http://docs.openstack.org/developer/tooz/drivers.html - - -Ceph driver implementation details ----------------------------------- - -Each batch of measurements to process is stored into one rados object. -These objects are named `measures___` - -Also a special empty object called `measures` has the list of measures to -process stored in its xattr attributes. - -Because of the asynchronous nature of how we store measurements in Gnocchi, -`gnocchi-metricd` needs to know the list of objects that are waiting to be -processed: - -- Listing rados objects for this is not a solution since it takes too much - time. -- Using a custom format into a rados object, would force us to use a lock - each time we would change it. - -Instead, the xattrs of one empty rados object are used. No lock is needed to -add/remove a xattr. - -But depending on the filesystem used by ceph OSDs, this xattrs can have a -limitation in terms of numbers and size if Ceph is not correctly configured. -See `Ceph extended attributes documentation`_ for more details. - -Then, each Carbonara generated file is stored in *one* rados object. -So each metric has one rados object per aggregation in the archive policy. - -Because of this, the filling of OSDs can look less balanced compared to RBD. -Some objects will be big and others small, depending on how archive policies -are set up. - -We can imagine an unrealistic case such as retaining 1 point per second over -a year, in which case the rados object size will be ~384MB. - -Whereas in a more realistic scenario, a 4MB rados object (like RBD uses) could -result from: - -- 20 days with 1 point every second -- 100 days with 1 point every 5 seconds - -So, in realistic scenarios, the direct relation between the archive policy and -the size of the rados objects created by Gnocchi is not a problem. - -.. _`Ceph extended attributes documentation`: http://docs.ceph.com/docs/master/rados/configuration/filestore-config-ref/#extended-attributes diff --git a/doc/source/grafana-screenshot.png b/doc/source/grafana-screenshot.png deleted file mode 100644 index eff160321972884e6811d4dfa7fe7dd26fac2912..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 82601 zcma&N1ys~s7dMKcgv5YI=g^%3l0&F;gLId4H_WKCv~&p44blzLNJ=*&-CYCBz#X4A zp6C6(yY9NoTCAD>*(dfs`|Pvh_YYT9mce;O_6!9D1xHTywK@vQ6F3UWV}A@Z&)RIHrB&smrD;@Movdu^Em2Tp!xNLyRpaN0`j4;kLxYm=J}3N2r+L$SK_=nb zfd2(g3KRI0Pz@hn+nFOQjHdb<8_lx{c|$C-&Uy^iIm6gDl7_@sI;uX@Cm6?VJ|a#l zsZHky*De2t3^%vqVGv5}+my$Wtn2^@GGlh^^UH7jJ$>UY9jNTz9?||niB?RK{Y~}! zImW#JmK`9cYuWZp{PubE7 zDwfKV3>s79^Gfn75Hd}ANGT4-IDZWzK=FPa&$97YLW<7EmoXw@C-wrb{&$U!e-Is> zrzr!^JO(jcD>ny6K$-}ifIN(t@~FdxQIiKhq0xeY(}=^Nom-zDVk|B@hkEoAj< z8@d@zitom*2#DWvd%d!%9)L@yx*%;En?T?r9w{^`|2ZskUC~JM7e$167wVRsXueN2 zz2bsf&Mz5Z^R)Nvf<$iZ=q~C(P=6<$392~?E+?BzYL;^1FtNO}Pg8g{VocN8X~dQx z3Q$5ibo=P|)O+B$FmM9#2%iR17KQrzBXkVO$Fk_f64*OGuueW;V8p5@exLyGSVwvL zVvMH%ZY=?hxu8p5QlPjt7icHa>t37{xKf6iyY#T>ROBy0E2n<8;hD4xaAdx`f? zJ^gklgpv?`lW`=n(RlFqw>?0Cb~s~1iCX`Iq+>J1x5rxFFx@}mbECfdLGTCaG``pfO6k5Lb48wL5;R7% z?%uM#4Rd*J8E&PNp%n39ZmY);y%XO4b|vP zUCVIke8s37In;mHPucyof4L85U3}H@M9-V86(Ei~*1x@`a3XWMebR8sbrS!?=aYD6 z9G;>H!OU~d=ePt5m<<@+L2@~d$76RCBWV~31o0KZae9V&`g^WCn~ujv7(X5(jnPy(5`f!32%J- zrc)iL15`3?=~4@}dq(&9)f?0|PH*bXGS`5;5uc@Bym*tmCTFT9pnoHS%CZkChSgRImcIGx z-{cNHCKM!$mj5W9E1xw;tkqo7VEcK`ahhnF$@V=zL7i&dv@QBHXK9;}gyHYn4|U&6 zI)6R|Oq+TTTykG}?1fG_mBiS+w}shayu7kAnDQ={n{=O4oME34EfgwwGwwRRWkao;eyA(!vW+F?wRP>aFrLGt(>P^p`1HPV3l9{xk z-e%wC+ZKBJB)|_rj|jLmyrss$#>v5{C#xliBq<@`#je5L$Dt;Vi!zAfjVy^eA~TOV zjM9i?i}XWoDtSD8ylcFE!S~{Fma*EdS}&kgcDK}h*Iw}Tu&mXQRjSpcRp8dkbBS|< zbE0#>hzp`#j)k@&yKyfp@(9g{ZlV}=LJkCn8^!U8EXp!p-zJf8uCfcYS`6s+r|ueTivE`J3irDCY57BIF>zP#@OD^f zHtNu`=Pm4DM)6B= z^LByueSZ-+H~0~_1#!gll7#&&rTH*)6w>yPkD@N&Eus6PMj~Fq)Z|XxyZB*3Oo1AG z7yUF)?~^1qowv$KJYIKfWUR4tRW>ktGkdI?y}P=*xr=4Bv@0e=<+b@M8pv7{I$uiF;}iajzO-{C$p7zWznKzV{jverIUr9MWcAUILZcA3#%1qrv(K= zSPc0Blzc0HXvT7p3^)(ZrSmXi<5A&D<`Cw*<00e$2{OF>!tte3F?slGSfw=gU6x+% z_mGaQExq`<#hLbjv!Ay7B{mQxyBUiAwWdJ?#3 z3>H9LIb0)LLH5xrOPPgn$?<}u|Jo=(x|_jL8QnAIdI`w@rF(cN)$a!VM8EhF-HoEH5kxaSnBx3!74m3{>H! z$0cpE@<-J74UpOUN=rG*fK95+DD)|Sy~s(`CqtLJZ;PEF@==3b zgIu6faI^8I{oAr#>F+j?{*JH-Fn=7whKnBzfvFLxwp3eXycx0 zDmfqTO^NX03O3qnr*OX6Bg|qWZdqy>HTG!t-AY@ptKYwx)pwj*dEaj1RT$yfl z|CAAm;fcY@900NSl-$?edaXmr(e2SkaeZ-C{E6-l#_{ZSrm1s4?YP@wOdu|w?8_X- z9@~xKqOtd5LzY~j#+XJ*?V}GGt7E86kt-SZLx+{=<~-)zPQgxvD-Mv#+xD&QHNe+( z;JOOa1S2De$X^|yc{hLWHk-V|#l@%%=?&mOSi#E?em}1JHEc9A9v8$4nLkw>_(VYP z$Xgo~MTqDWH>{FzaMX7z^@CNwJ@&_rbzNgIf_jvx#z%Oms1h`5YPhrtAnO@@75xGX zK@@#`g;|hv!g~k)(l=5WOk0yvQ+_xo&W9-e1jl@ZYM^oqohhB?c{1n!_WDWlbcrj z84V4MsH=sQu=?w_|L%_bB}V(+-Q8K3i_6Q)i_?pb)5+DEi$_REh>M$-iyL^Y97^i2l{#|7!Ys%71j# z{ky9G-~aUdN6Wu^igNwo;2#|QOI&|FMaoP3nJCwPWiS3rd_A-nz_3JFUBOlkFzaDl)FP#wRZ-!5Ep&V{9>p5XToWpeM^)?~ z(dYK=dsA}i>gx1#bb+z9fOagH^2waUzo(y;fK8X{YlpWi)Y-9RU*}RI2DrXsC-n+{ z=LCn*>sT23`?uCQF6%AS+Rz;?)Y0YX1s*?cCrAMQdnxL{!N%PoI9i@YUxhv3N&ZJa zn!hWTrBgE&%f^np9+wGYgDL-R=`?@xZ$W5Eu7*BeGKQ}amnaIVR;vVhm@TuPGZ13R zldyPWuyDdpmVfhn`nTXpQcnC1s`@wX?~bpT56KV*oK4&P`2-MeoK_-`rqrs8?1{vx z%)dF&QiMtV$es^jQWMN|p^R(4+%Q>0-%R#u8AGv$*QB}TuX9=aFt1x44V+zT1P67+ z;5`HVdqc8M*m8M|R8j-@kKm9T#MmZv8#VFnWn(K9;(P6Vp`NQ(6!p(B+Zk0+o&A2^ z`unysNIaF6*@=9Qbe|i)sMT#%YqBZ~AGx4^ujK{!(G#VY+!dLE^Ve9?OKo^J3O6#h z1ZN#TW5V;qI6IW!b!84N9S4qxZwGeg-$a*Bf%wuG_c1((EzCvcGL_5Q+nY`M@jl&QZNeH;>PL4WKUm0Hi$0J=tHVD~%)ZVtq3s~4F%CF9kmTH=>Nj4 z|G0h=XYvBfrd^TH)g=`pacjE5qEX=emH4lzJHCuF=4E4tRW9LV6)1kCU2gVRuS)rs za;STD9V1W2S#jx6m@g2qlh zxVs)2U+>S;S62^qbUNP}u{=xoX{#^B<2y`ZGhL$f>xkfKe8fukZ(yL$D1A^Yt2+YZ zJN!PC$F3kU>y_l~^)VL^2#mx5($jpC;bH0v znxzIDKI}Tm-A$`7Je+3zoI|_*nUn+=IF8>hv7#I}>V^gEOCKe_^LmoWw0ECZ5LxV1 z39qRdwtmZK5}&oa>1kw-MedEqrjX2;c--+|M8vLxJWsOx#9-5MvcZRga$v`zk;V+} zz0WTPXhxHp<=rSHuji5usf7h%t?vmjLEGI$Tr1q4l*cuw*+-|5g2>K~w_!={v zCJUm3XMU$lmg&|Evcb6>jYiV=4fFI@2tXS1-Vat5r-*6h{zOG{#rFHlDJVn$rebd2 zK8|z5!HDTc^2ynD^3FnT2UPsPw?C6)ynJr%mdR~S(E;#<>#ZpH_BhP0v>Y`@nBT;i zRUfw;x5r}=FlySZUc=AFuJ@DWuq}IBul-|f+zvNOaY)19wK@?6Jx!0*I=JR@4aE0`C7{wcH=4oLxTNn7kFnC z8bliJo&4%-ks4jU>k8?}e%QuratAJV^$nUh5JGhl1Rsapsh~jwsZ{KYUanC0U3Wil zP!}=Ct}jXaaqe+FP*yYYzO^{JHF73gOc_5 zmy$2d78|A2uWaT8=b2|qju_hXV2FMQJhZ33UjwEF(-K)FULl(g03|Is#>lVtMQyC} zdkv+*fB!t2JS%fq?|V)~Rl)h9^{4mhXx z{u^|`#aESU#~Dio8n^k^hx;C1;~9xby2Y?vd~oN5UmdS8Qa^WDxUiXMR zuT`^G;E?-m* zqZHj4#J5M)%$#9i_?(k*%Y0B0R%T&gy_}pJ>{UU--a(tPigwZf%f+k1B`6LpHc&tF zELZz@rP>Q>bV+({;D`_IOG)Z%je@m0-om#$Zr==7p`!%N1iKIuXFy@lAUl z&g@jk*`3R!g=7z6;&MDowqNEb&R=?~Z3&9^Q6p-TiR`hnFRD=1)%U5k$|C>9eBd+V zod-6X3l;u|wyR|?XnB2Ne{H%mlj9@T~0f{vTa+wQ)WT#n+}#+v=Q?5%Z%*T!+c{!haswOu-`^g?D7JVI`2BK zOj^FT6B~I;_UeqeUF@jw17^Gq7x-^3CRL`-!^k(bt+N6cA-9uR8-g7-QO|o`fT@%R zfH)ibg|oPu0n|y_$#BhkmmL{i2E^*K zWb4M=31UnvA`VU6rsgT)#c~C-+Jn*}EZuzpP@PYFZeGv%ObQ)=SyvAxA>F5_h}RT! z7fL{h8%L-0f^W>{Cf``&W+s32ph68Z^)Z2#m#9IBQ&XBkCI;K9txhA&-QRJP`)P-h z=Qz}&2=lY-mYeJJ&>q%F%K1v-mLq+ZEw#&an$)B4?b<%}`o{7UrbA6BJwKvW6;!xH zn%l_B%LiLJuLta&)~*pex1Si%f~_KU*?sM60t~~LMzK(abc3n;yn9_6E2dmNHBOH; z5r7BSm%L0mC`A2bzXp~~JRXu(HzVsQzPwad*RU90d&9Zk(`0kd=(6Vo2hsR_umDgL zS-5NujM`!^6bd+~o0-J`TSIWk_XHfr4Rb9o)A(8GFhRe71W;C{?+Eg&g`#?g`f*k4 zW9bfoAN>2bQRGu+DS$5{yB-t1`Kjxyjyx5oRc+o>kI}!7n&F;G+KV1a&ry}3ALE=a zT9#SC!dCa%%(LcdeQKW9c(;eTEVp&eDMyypTg&F{lsiJQN?nk#S7mVxRh(K&$H@mhBHq#4d$C^P?!uih)T$p|(+mFC zi;*@k$F-W^Z(aN;MBtVBMO?Ctr12hOTw)&9E}6>j?*jjPEb{tK06lUK5e-r`VL4+t8&_ zBR(tzShqjQzLp24`$c#h^a_|RdIKfrwUa70V@|p|Ir;cBT%!W33FP{|-Ob$iUG5=W zJw>+gpz$sMMEW^mu>+CbL&n76)de7yz%zAfJq;nd2ACc1S9T;hbs*lYt*uRoQU_eb zrBx)$@!`U6jX$|mu|-8F!lk!>Mpz6(Fbd#US(j7uDHiL!VVkek2)g*q z1_8spb5)R!QNX1Z19LLFE-gV+v>aX@p^Yt?dtYDV~Ixk?OAU~%Vw$& zh>!Q4Oz8eu;dU@y5JkrHak30iImgqZ~UMMq89St8Y%2bk@8N5ma3PDxS&V@a-N`{qg-!|S0R%thXkfK1l-Pe{6f zI>A84BbSV<29x&NswZZ&LiuKcAAoAkts$-7trvtaLH2xG)Mjk6brEw6eQm*ptdbad zV{M4YQ=`-7qbG!BffMQ&+6Hu{Muuk3)N=aCdM^35`pv~O(XSr)h5UX(7eO^2YFmW8 zu4j_!q7*)1=KpR^V1V-FgH*$>VQ{Mat5^IvQW-SP-=klxhrS6j^e-4V>T#DPAHcE@ z=_6YValxx{L@dZY{s9hTG{iHFwNIfQ68gEj)SBaJYfo_H#gGf>rT)7M|CK9e_Q6qV~sz>>l;`Ge{x`lNq^@Fq6AiROMzg&V+1ev(^Gwa_zZXC~a9jOXKh9P5K;gIC(#L!6rb zO8j@g+Igpc7 z0V)ike3vGwsQ?yi_y~7#k-q7_WwKPclz!HtPai z27RHM2MmD(#91H$TZrYZ+lkxpmWV!-mpxFr>-)yJeY5nr#z3UQWjsc`N{p=&Os!0V zXMksY6a;k}6D!P%=Vt?aI)OeQpq7jE4tqTG3&!_n#2yoU>%myWth!HALn6gQL(g!_ zh*N!(7lU^Ps%x+$EnZ6I=>|g+*|eu_%NK(t4B$fo%6Z<{H(q;FEXf{}ubd?;$8>|U z5SKJ-CjjC|J=m_I#J+z75r_I%43GTdI?>)M{Y{p}fV=uZ-l;0dkd58R!icu5hx_A) zW(*L#`}|fYL3|BLwmz%9U+o<95MsRzer>mBmP6K^jdSLG3iF99*k)UIj zi(AEPghzdY1zw4HlZzn+&rYoaGD3YfFQ*674MyLH9d!>Za`qZ!1<+_XxAuANTz|Hw z=y}{V%nGl??u3)9bx!&oc8!4@1H(STe%J{aN=y8fG}7t6`DD`F^!+_~D2of$O+n#0 zUkH5=vokAZrVjVdH!w+N$@ez%DC0Syc_^fBCmsdGYB17SR3L-tmnF{_N%yt$heBTU zk%Y`#NA43K7E&rU6EdT(>-w_kDJRPq+Ptwz(XxZLJr2G*(Hbd#s@7K*WN0q03`Gcy ze6g_#5DR-_U#Yc18!;^m=rcXYnl_H=3`3AF<|s_z>$gERHn&MrlOiZV=3&U7r$2o% zHg2)M5&G*cre(Ir`qON!RfJU7D&Gl1`#_5HRgMQF=tpou_a*QO3-0hWdOd^)oQb^I z=(*|ZjI@)BIQMFLs<5)?#QFBYLF;G*(y*I5A#*CPiAq|_tjTL=^6=s3xs?x?pqKqc zYPsGG1{?=ckbnmG*h0uzs7KTrPyWQj;tpokrZ&D1Xk*3o5L7@gDfcGk)idd@r^NFo zg=fpvXlJt=eZE(M@w&^@?rfyf`{av34(m@}Up``zvyhjBWe-;c2)Tc}kb!NIpvxsB zHnWYt2_cua@JL$HP!aock9CXYXLRLK^n=qzR=twS?!tiRs|d z=oX^w-lsVfLawjFg*X~wEkiHYX6XtKyM)X4zsKZmJ7R+j=CYTrX3F(#eIolmZL2Xx z%!+{FjjWf!s_tanV`%~Cs%w#u<=vs zA;V)OFt-q`%w2MPuYBz{g4pL%Tz#F@BRlU!0{Yw=zLh;WE~E{n=43y`60A18=%s7E z&>LfD#A`^pquqZ@+9NvqTx#%H*b|FmT_il}rWP-95gFd2s~eh?W>pFSy{ ztm2YP`exvE^1E|SM#^hm_RXE%m*+{ZppwHT(5`cIT%wX{t9{t`g0oLx@iO4;s!r93#6;R*hpgi3qc9%ln zY`+KQOw<=c3mmDmHQ`5QhC^=tGCU)OSLE&F15<%C2ewPOJJ%b#{8bd<`geD;F>SrX zU1JL$P=|nSxOXqxBf@ZA*LXDRZAG7)*Dfp55RlCJ&OMw^)t-1Xn zmUu8B$DMm$`Q-vpR&L8W6)z7@j0TZQB0iXMZG&tyf1wZf$Uyww0}oEY2uNY$7kcN}Ay7&KgZGFL`cm0zd^PbQQ&m zA*xcl%z4!GvbqW&NKh8jd?Bgz61KTf!l-Zhthl3*Tu@dWoUp3q8EC1;?>1Cjvkf=W zt*~aBJA|^DE$(UPgt4gC!5q(}@KHS$!*^Kd7wfxPj<@sSS}@ZVRkAy-hchl){f?V0 za{jA79@e*rNO60tdsFe&`wx#$MFkFjbpblcY*s_7&NAvrbB_9=$XPX>KV&pBt|NX^ zuTWFbq2gyf;$;J3#);h=wHy->fJw4`HB-rI+qE99GpNIE7!lxxwt@|M@x50#%f48* zq7<54Di3G7HnC7GyLaj4zn^pq(6u}fd=%@W{5#od*f_3B#aPywEMH{CIMO9scs%xm zM8EI0b+l~N1R4}uJuLcmD-(fEHNe`kGoAxb2dwo9ka$9wiJ@BU4u`Jk-Pftq$}4D*ax8a-#RxTDynr@E-ckqE#Y z(G>xlxu5~vjD??Lnh{=*S{b$LIX{zY4Y>4e#{dmbwYCR{zr1$+QU{$8k?hO_M;Nd-Wx2cixz7k;dp`8ssrXeTC0-hvj zBHqnVvDMk~QQySx)t04pP6b~U#Gv%;4IR7{Q7FCk3vn`0{t(12>zWsdCL*Cce)#x#x*%PjAJ!DGOSMSzfNEeAJcE1X_ducKH zCkDmtu@)I-hWrp%6NE(B;JomElTVL;XRlAHGrb4sZ6TVs3w!779(OFMr)3ipVhc3!-(6r zW$U#qOx*qXcOQXec%#qkddCg&j#y~438knK!0zV;FVMK$=f=EE&O5)$+=+qJ^VlVZ`8_>LdbbW#j?JW{R{S zv>wxQV}h$=+XRgb3VYIzj>lRk=jVGHTD;b)Km z$g!^w$b0ipX7O1Nuavg1)*O7Cw>AE?AA}c3(SMmyINQTdu~Mpam0L!Xn%dqo8-LP$ zimFaNYdEuu=NIdVjG*tutFmZtZ{2!debj?xpJ8X^Udq^t76xZ^WYr+%UMLzSA-ykO z`?}BWaFkr~$OM^9)8LnfvY0K}_wL!L@Jlnk^@lCmvETs(yS<+T9qi9NKozIbj3(7Q z#|r!VL&wpAaSv_>a!s{G6}3+A_#89}uR%y5Y1c8ep#I>4QCG7R1Jf$Fp({p7g!zvK zYdu}Hc1U^TW5Kce1{JGtSTyo3bXWxOij7TyBW6OwqI`Ed&NOtJ<`MAek^?cgT)Hnk zz+WVT=6>wq25G{(pEBNO=gkXKS5wxVbj#uBx^;&Fyi(D-@E^EoDnVUkB1+@bY=)f^lp;S+%CgfKups|^z*_BK0!U9uIBaGRtFlT4%aR5tM| z{c#C?>t(Ff0Z`i7AfyxXgz z5DQ_Z^Z0VuIMDT!n$vZtWj~p#YizkSr80*PexY=pDU#|xaP*@$<#KIAPyEwZZjQMk zMsc$VjUPpzGrTs?9FQhTqwr*&4-Y@q=XQ(2+;&xH>GyH_B(S0AXUIdBN>*TOIO?Nt z>I53{xQ~HuaqaUVPdcMSqNA7@g2E!S&D=8IqLB$!k9-!ryv4qTe?F|Y zDyCf-AnCc5Gamb0)5Uv~rfWj#51`p{uOSKJ^N(q5W4xl4b&t#~B+DWl%WjG|N;>3d z&kV|PR77133y>E~mTaYgw<4oVd!}mEx?f!`Zxn3oMa231JySxiKU2T+Kr2Toc+12& zve4O_M-3YO?Hx6S3-uDC~BQD8%bcJcc>IN}EHWUHl+vZSrZ?j?J&DTu1Ebw5Dv&$G0wzIogrvn37k!@DkrDAj1E z6i+f59Hc`iHVCVq0;_5IR7A-lS5 z%}2a`7a_TGD)!?>s-K8>rcvjOOxI?gw~jB0ZKgIRaFYk4Rm7QnLRaT%vwVc+!2H9_ z*_@4j>}DZ3+gx%lPVdh=mkGb#SRZ8S#3o( zEo+%H%OdCVjQk?u+v0pb!;qa4jV;lNW&}7x-Q2k1{CS1VR|eTAireGqTj;wo)%}1i z6~<9GhNe_clZgc4Dp}AFo znc4EFJTd|qj#rzz{Er6f)jXqib%>Tfo;Jar8>@7w2;O(zM~vYvh9JQ0z(<$c+cqs< z(>zT;o$Vid?yLzdLo0M!X5bV+;!{2Eb`@W0kG7KIPQ*<{ASnjOU;7MwyL4aAtN-!A zy57>e9#1sI{}!s62I?0;#Xzn@-oe5Zdj^Q_ICEzJi{|~Uyu!`-Q1)5OwSKV;0Hibd z<`%f9^58IXFD=A%|B%n~P=1K>S{&H}%&&QWrdKq8UcNbZ*33C%vo}?oi_~uXe`)uZ zlkVWYtosWl;?+WO$DOJZ)pyT$-jBVU!~vZE&iA)}B@#c(R2n18S|{`0+zHhcI%aCd z#7py1>hg%A1h~#;xX{k?C}*kRIqdVaa}J*R`^=$z+{})2zyn;=- zO#}rWVUZa#;r1+s?eC4zq=xjsyUHiFv0A|uBQvALNA;w$j_*248LIDH^u5C08Tv;J zcjME^5cvTP*>zo4f~qI4*7-X9F6u=sh36-@cEhz& z4T)lune%J~$XZ+A?5|^6F!yb~&R2fpX#FF|IQMOorW-e>=ncN+?U zG5SN$6-O3G0G%eH&8Dw%GqWWkEnrFOJd|A=UilDmQ0h@=(?8G-7^H^U6wjOzo3}xH z`84mHMS5bv)rxQkhnLzX&Rz$3a*4K2;1hl-fTqJu&Sfqk8JdY10xG#9ozV~*% z1BJR0_|0>J!vmMPQKs^j8Cx0qR3$$*7v4BmPj>ynwCCF>se=)Mf^?FDv7mdcZF6%dpb}x$6s3*Y_xoOoN7B!X-lX*CZbBcK6SeyS|qhmA*`EWekt{%EACOl$(C7(BXBvgEswhkS)jL8D#ft!E4!rnA|hgYyyBoB$}9(Sa%XA zdrFVsj;2DIoE%M*^{J+G;z@T*X5dE@OLFFbiV`Ie+1}*br+zaOHMbbRyuGYk62QYR zhWr7OuDJ#_?Mn4!2&sbELo|@*$vRt32C?~voQuVUp4?^%>ra+`k6GbV5V%XRVN3lY z8X2gqFr?P*N~5WIn)xT9H3~i->j~lSTMH~}X>DrH>wkzBRW0-`g!UuDZvu32dSmkW zPiN@@XXv>J+Tu1GPUPmOjw$zcx}sb7i8o$e99>Y%P3G+OzhqhB(e&L ztCF2&rsHyWOIh3wdd$bDB>{U>SZ$QBbw9`3z7nsp#Nk9j=^d9HTD=6y-IyJixU1Bb z+DPLOhlbe1$aj7-9++x19-Ntv8N|9HCr$kjLh_RL?tYQv0Z@GsQ944;-qU6z!$?-o5Lz(7O1ixztxF{9&_QB%k0Waz2yA8cg9aG1{L{b(Iptf~qT{ZvPk9 z2wW}9&ni>|88^1PQ?oW!Ij5 z#zrC#w=CVV^*;bVB!(~{{si@X&d1RvkG(0(l!I<0rcz*s=5K<$jNmLZ%LahhMeJ0C zq5e_xK4%95by8=w4!VLj=D(qqNSs`;$*MKS`5RBF9?CR_rGV>Y_0o47#kw^Gtca&D z@-b7g>KksZB-0ztzq8R0LZ$oRi7WqKpS`VD*^VH~ss!zCgdnHO;muyhoKVO^+s_6lnC(v#ks<6+qG)v86HZVaaOW9wPJygcJ;TBZRQC4Rop{sB zOiTOepBvT1)ff!D*qvnH=hvE;n9$6PRZ16Nx$)W>CiU5flVOJc`TVU!tJ>d-GX3HPn`}FjbA+4h|nEDmuzO@xujNV#DCjOa8uRGDH z2Z)XRfr5@pv5}G3ZoaazVSgYf;_wscn-M24T{5SkdZj5bIG#~SJ-<5zT{j4cnPy{z zhhUTD%sia-Qx73aX$(9(tWSxI|;3(-Dox|txX3D zbqa~R>o?6F`-4bm_U5#~`K9Hc_7d5W2$x?=6Vmn9J=e#mHb`aA{q)az(T zh~eP*^XJ1iG-oGEtv=@#zrIOUJ3+<&xgQC}z7Qn4Pp`$NVC3^TP z^FHi4aSojF{}(PQdDoO+Nc$tN^dVu)JI*c@AkVQ z668E|C3V||ge=Kl`79c7z)$oB&L+(WtA2>amd|)li~G;L0Jpbl|HB5v%5%b1%@%5` zRi-bx!jW#jf~KZs_;w}6svsEd&2MPkquW5aW`~5G+WN*&i4B(tCNwIi^7guviC}af z5uiKR|-_T@Cu?K=kt6#$JtJBsGxk@$+~Idu_QjwXYcNkpZjl&C)%77X-i6(vwTU zd#;-6IR^QFBSUjDSSrCEqf--~n7BEa;*Ugnv;L((NgVnG^7@zN38O}>-fD>xQl7x| zQPy8+{B{L>UEPgd-c<9R%vd4LZnL%0SRL=m^-{Y4=IiqfPtA{aZ@xz(QC9VHqOl<< zg~$@WxC2)ZROGmIy&AXdZnrRtv)<7k_ucCDVVnA2Cf||?E|GQ7zEz7X%?T<`T^nGncqApbp+Eq=4Q)>!Vc&Ck5<_JGQ8>nT`Ja~a-FuB3<2*Q?#rcA ztmMy_9Z%MqP4=S=S=pTLD%vIrokD!u8{&HFAAaNWKlGNWlpeH<_QlfjS&wN!q`?kL ztyKJx~AN)h`IP@7a3UfG8d|qex{lij>>l?*z{K8dRRd_bNi?FGx)Z8eaQqJ z%L**7_PJWF2}@?7Z5^s3rOD*pOmui=1dHyggOlr zz1(BL_kdsMUf#wQbia+fXsm=i*H+;*A4fUjtzX2mNCp@ z4?1pr8nKap53ulG6VtucWm-j+{pnN1yxJE+GwNq2{jOd}i^d>`D?g4|FDefjAuq=l zU`}?O??0G6qPu8)xf#-j5x#g)WV53j(XS$QnP98FO27Y|3Zo@gCPtoo{nLo>Se@PM zFt3quFm?47(%aqPrLNxn3+yOJW+SoXakMmVnJMJ9Rb(%2@Y-+D2nnJ3G%a_)pFZPx3pDX{{UVJ_84`CL?ysK!KBzvNq2Rma!hYpJEf=0{rkKF1~^4SzPl}` z_(zqtkoXXQVc0agDB^yIo8NVcKSj9v6DIcs9MzxW#?+u$lrv#e@2C*%8k>IKmM1o7Cv~gP$d}&h?G-Mh+kSVU|2qB)f(k6 zY}WM1j%wnoN9SE}Txl)dl1|opSuHWoF1(OahL-Atyv9pfIC|3lBy#q$a~);Y&B&-< z_8oV-?bMg1d$*nqvh7`(o10Gu)2}^IYjoX|&#EU@63z&so+)p+qcz7P-q?8ozuC@8 z?*dr1to-LNdv;Zt3{KI0{352RM>dNWtAocjAb_mVV&E!H&RY!(cU^q-yDIKiW+>A$ z0lJk&Am5Y&%hoDTTa>b7^R~(sef#`-qRu+;z;*B}(4+^>#-STtIBKZ5h=v*3f4JzS zI)RlM)>uEDgTCLB;u|Y+xi@_+2Rjt;)^R}pK;Pw7<>E2{viopLevCgEn=fcOVL{4k z-?-=c{#EY583bopt`%0Ufr_E>m9*!J}{9mK(LK7%;8HY(H;dASJbm zpN5i1SIFWCsCPX(Zt*!MlF!i_QsOC~swd^~_g%K5~Ag5{Pi%3WiVSH6B@D`o><{&n<_VU7*WwCd>R=-ODb z86PzwKg6wv+sdpl#86Huj7PN3eL%!wde`TVgEHUbRw#N8TL)u;TJMopj%GZrV)v_0 zd{)EGx|~zYIGwlc_qTTZfDI~o>Y%Tci~IC&6y9s@D`eK$M@?MRevUhN802)E_?L^w zf_yz-xvn%7m|v5u+J!77#+naw1$s-}RHCKorW8sBqEQ7?zfKg{&E=bsiaHGpnD=Zs zk;Y`BA|@l%nz?bmqIi-|Bo^zS0(;e-VT3H|`BEh?tP9C9C7{spDJCD%PO=L^zO(#4 zyuD>qoLkc-9K{0!lHl$h+}+(ZNPx!O9l}9_yG!sOf#43oEx5b8djk!8caw9T_slcj ztob*8vRHxc-gi~)lB?>frdkYNg|!Qi`N?_yA!*h$N`1=pa2$s&qs`suFIj9OQf)9$hPx5IB3Yfa$R0~v8(q}SR^ig9)6CD=5WqyN8Jb1 z^^_VXMdPjLb!&chiB^2x*hy~$HzZAMl^X8RA16T8(9&dL!$xXB@;C$~?O2{A7hRg~Fy(C(c# zW#-fXVWkXW>t^xP_g3Wv39j_RpvzhQoI3C;R(T-<_T*aIXsQ=K?0<*w+g-jRk!KWR zOTVuaPo~YlVKlFzzHyp^dDETKtr659gKbDz+Higgv|$<#FE@ETLW!P2EdoD^ObSaM zh3|BMd?wnU{GZAPp_KqMQH50*0#D;1S!ONvrF`eYolvsMbL=mR>81NF|4h}-vw0{| z0Zx)d2^>-Ez1|KnUjQNklXS2IXkIX6l@l`Zwfy`efw#g%9(QqTJIXQqdc&@3XLnaB zRwU(db*kAJ&9^g8P%F_`Ra?O0aaC}#olO4xF!Vo3V?proI)K0w6yiJ|I;h2Am(}>&;MAOt`f|Sf7!26e9 zRK(25mNs$;qf|&CGC1;GiGr&wmUY8>V4nw; zzfgQY5Dbvv>{Sf&D8Tu;axTRDwu#RGaW$C9fGj6}SYbZMy4tmAMz+vFa&0g2L>T=8 zkC-NVio@r|izLAJC&9`poZxoHirbv=<@!VO?1WhduP z8kdT`WMdqF;tGK7pK4l(0C@gh!nkCi534~c6stip6maCc`M3>9LiRi7*xjjqxNTe^ zDTJMOq!5*axY3`&10Q>7;*0|WYc4dw`Zzz4Hq;e&PtU36WlhH715Sj06P`Kp{E9{| z_{%5hEwqnr3>te&N(SuJMNOr7U~)FUJyPb}chwq_P*$ zMYWPHsa+awN7iVN@M0hc325(Kf4`P&Zhl7WkU^Aq$@cAzuHqWiM`l`9Z=%Jpm+#*7 zp}mU?&3kF7vKG?-S0L>9cj*-8`UZaffA?np3n~OXG8RV}6mN}FLpmkn=}TD`qk=_~|l(mp19s8!amg;SoN(Ak%1fe*&0l=i_7aa zXQT_d2@rSOcpe2@uP3Mss=m0|6C8H^7@1_Y4$p-WP3C&%d$IDH1lmvf5-ojuHlWMH znPz#TdoogIW+~5Wk2t^oU~EsX&N&gz&+uMo5=9gS^Xgghg{`{m?7+{~I7Fhk_aeMP z_4c^JX!zY&hT6`sbb7fKzU>zcfk3`3*7Uzx@6}+62qV*{#S-t1u_Fy51O7ve2QWbu zhtj@XK{{VdmQU-x@Vsg_QpMIz_D-fO zdugB=WCwbBvyTa3B!+z)Xu#WoXp{e_50M{^n`II1ja4DOdC)Z)#P|Qsw@jljbsG~Vcni>e~o&2Ye`J$?&xw5 zEgV^?tf zsl6;47!bos<#s96iy6j~PM}qs^^KDSJQDPo{{K`-1)-JzhvRXx5mv}ETg|cblMAi( zyyRH#1+)Ns#(%5jJsAX`KcgzF*1BJs|KI=CcsK(O_*=)_F>|_PD3(_kNMULTKw(0s zVox6<@uc=YhRPK2=V=gH&*F(kM@K7s9v{B+wg70G1hAJO+1lp%XmRuIxRw3pYTjn^ za#}A@xj?ROrOoeN7rI;_5BPI)+Kw^n`QW!C8;7;a&a*8R)1`%&%D)uBr_YA|w%z|S zhOz}yfF~xC%p?lm7!%wA(Cd>q9r6K=f7b6>Rm4^Rh?F)rFUrl!qZ+L-8z(1w97^LP ziR3v89^MZP2~pY?PFx$a#K{D47cX`gknq z_9w~yZHr|SB4T1iOe&D@@UZZ3)nt1pCc5aM{zHol==>-%+1_0pnuoQxoo`7B3i=}9 zajAICaB(ANzj^V4=f7}T^`my1YSiuCIC%&GiT^LgR`kxrBy2G&S&$nc&`FH>%580vqD>_9#l!usbbJBql zf6y7UW8D}*z+WmAO5hFFPuxCr-RO-TtF};2w0j^_K23Roc8!~5Qvinjdk^cGpZfME zD}q2BP1_$(UTQRuPnAHDs1#606|j~Uj73jYY0yfEFnsHGd6qQ-^OHQl#-H7)fx|CUwW5E0FkP2 zJ8Cg0J)JI=LOx-b|53ZEYtcCzi&>1$;p#v$-fTw_Sx7*Ll=!L_GGA+x=?NgpF`1b* zL+5~T6(LFO+f$}`A68FKEW19sHbc2Cg|!aba+fJzzkW4eq>#^uBN7h$j%L^^$1}A* zTWyg@&5Y0Q-Q+$9ERvwqoCP8i>pg(cNx=}0JFNWj-QyL6WXU8$eZ?OeHSE(!2t1f= z-Y5(e-dHcTJ8QJS+dLmoJR6D^sdwbmiO|L-OPH@dXXP475B5QW~|PP zq&xDC^gqs#3gSrytls&i?OaoNAn$y4z1F}}ap{UdRf*ePT@k<9evStdoGzBb= zsmr6;${x`aPG7s%{I@$K)h$5RL(8aJFpSM~UDkoB=OW}=037+leHnBAr^EKpmSfXaaal_d7u)0QG{D_!xe=3;*$qHfL zrZfo$*PX@6X7ZBd=kImTb@@M zA|n3%8yy$G(hM+5AVELq?qZh&I7vQr^JR@!l!+<%Uk5z^2CVi22g@1Qkf1UiZ5O1Z z>2fR`>AO>WB1~12fvgFe9Aqi%p~;wwXYkQv2seH(XsAt1VG0TtFn6HL2aC}MWA*&! zfg~^o#=h9yB3z+F=Ok9hWVtxH;q)C$N>(VP_JJ2ZRM@%Gtk>7r8+v$@F`*KlRMRZR z!!1;`M#|_+DH|{o7azKQS)hOhmvT~QBv@4s8yQhJKW4L<%F_1SNAXBuVqdUcF>e+?n#HMGbV;SzgXWvS186{IXEhBTNa!(a1{6k zCvHQy2B-U`(P=+H9>AMPDRtEuLd%~Ky*NB4vYtWiTqamGIRu;#A5V~yk;^>5Y~Ydm z%)|fNZ%jAt-ur6SWX}zRpRF>N!YTm`CHhZsu$;7Igp5o(etW4Q#sGo4w>kKP!bm_Z z#_?!EW|S(s&>h)7h&3V3pPY%bvYs{_mAY}eHUVdTZh#$wm?z{HL#eYW(}{*Yie^16 zKt>`LYUgI#=KCiIV!?J-+yb;3 zLiDjb4bQXtYM7OZN-}&u_%eGV@qf9$w+K=;jQ9kkxc&>%+r*%$UBv@&qGRDX#(Qn4JfZ+_{jr8?&o-cZvCpcTnDT`l>(gpE^4`KbSw`PP>$bW{b(eGziPTuf3-rrwWuo0X`6y|?50 z0y&{T()IfFYkDl}C`VI1{o%TEQb-l5NF~C)Jtod@I?&5U&gR~kYXf`sqINC^cz=vDs;!h8_WV0n3d@t9Np%po%JRf`AwR&Q=by<>{8P^!J(+_;rG^1McEF>d zH?<(AIRSQ=YeaI$1|c!eP#C0OjxHH&2?U2_n|~rVP3wst5&~!(*)fm~WiiUFw!3ck8tcWP?tNi4(u6K&120Sz`N+!aewwoHhXUJ%AlPgidZuUU zrP^ktDE4p9ySlolID^j2KyTgAy`!8dg~iU%4s4q}t`oL1eRNqDM+~vD1!1R_fq9Jh zNuV72I0eHzfs;lBeX^RK%DqUH77!rdvguo|-S@PYjrYEGQa z#pV?43^2(ub}*N6C0;n2lH$#=$6e^s<$m@|I+(+x!$9foYfN0*sMW;|^0cMZF6En) z`}%A2K2_=%+>JI?!gnp{-3tGwFv-gtxR5lZ^$1_({OgiO%P4@Lx3^azyM!eh#jwT5 z#3b?4IPx11_NytNe;8`CnH7eSSMRRNQv8L~zplejCTOL-&YLo8QkDkj#phP6d>mHS z^U5Q|XF5N@G`y>yXA5Qq-D=hzr@JgECI?;Fp^w62#=JQfkP?y;g3*%pRJ;w{MRi5B zMW>LvpoS>4gjx!gDBHYbdKq1Bf>->u9i_bEVmrfQC~p+Df-oAZFi{-I`DkIcCO7q8 zcS`bEit#t}vSvCJVMdaXsBpEnn6dAu3tzSy^__750nGH7J-O!BU-G4$ zuJ3&HGN{meaSCcw07v^Xd%&+={_pI%=X1oSM^MKEr(<6d(CSqUb zKA6dpQdxxOgClg@!eh5i!`_xPY1zGejow=&OoDKaxqhZe!2P`bHDi9y$@m~`iP8`} zK>ONRmHuK(o`J1HXsq^jyR=YB(lEq{M0#}GY2RPDwgUigGHQ0a?iV*e{fb$R0|PzjBujNi8hHkW%NB)hy*> zn|p34--6d}NZqdO)27cp2mgwtnTzvq`* zOctQAgtD4XNPK08^T|e}?M`xt(Et`X6@|T%rJ~NkC00d2;LkvU7Fa-7Ho8Gg#o8T9 zTTeYN9po$#>2ZnDPqgj@yR_Op9~OK;zGnk$3in%2JpNx#ZW3BIBc|ck^1F%Zwpm+EmZJI_5==nAj6l`$h3)Xm$6QO|cula=4l21$xEMo5 zLYZExuZSoPJ7IM6-Z4SThHMzq&J}r97M&Z@#^2?)CYhi^L=zd7=pbjQoAuE1G4e?E z0NsaO8nj7fwcG7HX^^M*?mHZ!#lUUpN7;GM%;cx8ibpXXmrq8Im$=D#t7pTKFPx>V zXkPa83?z}6F?oGTpygr|`GCtP@>xIP_Z+*#6v6K(oq$4n1Dhu8x%X5I2cdALP+ReC z{iEwP%mFX49OSg|DLI5_XF8l-Sn<&AGe5Z86B^j4dUp2<)5*;mz*ESQk;^2l)P&Lj z3y8sk$+ZFqCkh z$6kibW@jCA!gXt}wyKV;%kBSvlwH2Kvt z&G#Z`*X7mH>cmsdtBLmeZsKj+Ce)k_*2A?9_>ITZ%b(p10%qfHhg7OTY^amj7?X`n zy;@sJH~~=%Z7;)ZoACrn4JM$noCVT<`52qWhx-cCQDQ6>Q-yNta(@-J)u1q53BI_W z>rlwcXmakdYinSTfR;wBjFrG;9SO5!_|G7Q=mBPp-p6c84;3o2v2D6^YT z-PEqN!wJ8Nh$1UMdh6=>lNtrTwI~vaIyliRmb)JUZ#6OaR%FG(gWGiwt|SBt9pcRl zWY|DCpf;qjchDjn`iW{5HoG0W=uDSKPdEGrC1knFXz1vw=g2HO zzZ|8=l;ZuDSF$=lZEJa3>py?ma(W#Zl;60M7xV(4o(h!>&}-co8AV6K$W(Atpy45f z|GEfLu+F%_{g-|bK&kY6r zE1j#%0;Lu2jvOQK0*LInx-PC+1!MtE)7`xec*@GAT16&YiasnnkU-fMQ-dBl*$Wf~ zGrD0bEAi8we=g^DfVQE6SumBdSTM`jnCFf!6a~P(O7!hE3;vSyWOcw0OsvJU4aZQU zWfwIkfQhBGda8b)*UZllR!%J*UW&@=u3}v89%v)!F&f(_#7Bv1lkoQT9Gynx`81 z5U%n8r{1rI+s(`9mX{uqTmFhMjn>WXc#RMo3%Isb*htLzN`Z=VcZ~U=&)<9=Mf!S! zV_Uoo^W-y2W7r#l!jeo~<`&6>Nn9Zyv>%A#}X1VjVVVyB; zsr^SCRna*;lzp{^*Sf4fnAnb1`l9{@W3xGXAm|-X*g`keAy*dvQMe;)TwO);2SY_! zlYgepgXrShFFNBMMF*{E!W5ybk0Y}_rVol(6^|c35N;6k`;5No)Vh-X&#f+RGDwb{ z5=l4YE1_#S>2|(E1N&@yu=UG zX;!7Zv*~oGRlU+1j=mA3fs{lN86U3MJgoBc;oy+%7X&InPBox#c^C;*tj1c}Uk(G= zii5J{7#~KJx|is0^{sZ2rRg=jQT9FQnvA9obL>i|?<95{eJD@t-C&@1>l#o^rn!J0Dg zq}BBi(-`Uovls{M!wCIx)z3OH19k*>Y+bSpuk;?iB2vqlg3xH&hhxE??9DN|2qNR3 z{7Q^c7gM&?%X3KdyUCF_ulo-&ird}rWvvvFFJnLp`S1%wp<1~77k^~dop$ziFWVYu)=oBl z1jx?-QNa-HQ$LVHb#*Rpaat8ugV)QUodGQIH6L1bIl36b=tqb?ZWh;V`_Hq(Vd}y;lTJU-SGc(2Em3dQ35h;WR zNZrytsu?|IEzHtCd)pJ1C;c+x9_SlahP2LD{{S@c*ZaZ>E5SSS|4{`PV<5`-a&v%r zwX5Rpqgp1{WAi1>*W=XhL&Wk3VbHgv|#i|RLXT%RyD{kMGZMvW3Vx2cUo6X|Bour9u4Y2 zGQnK@+RI~VbAeOr0m~cP!-eNMM?-`3+kF?Jp`oQT^vh9Hskg-rL$|Sh6I_JIM5s^( z1poqLJ@v+bT%BLQA!5}67Ce#G!KF*kFqz=o)B=oJ^RPY&ToLqVEdqkks}+o~XCBZ2 zYj)`+?rZ_NIQW58Vc{lHjq1ar27RM-2}tNu?dC?rekiGYwK>?wG;C9zT-Y@fPGa4Z zmA@*^kZ$yFl$X6KO8CDWfj|%%sV=9|I{1JSjKdfJCjcJ#jDfmb;+eDTabd+Hp>le~ zqdhY2CFdQ3Li;fNLq$(lA4J$~QtCg~{**Vt@D?My^}22JBXc84I7j|6k0Oina$85l zc86}IagyQUTAJVaV9WWexlU%rzRZ7_<*ksb=O@4-W_tF8#k9V4?*mJU((b3NTwD4d zgmIb1sWq|2P}+@ufw&L~&0>2nR6lGuCNR)DDeljE{Y)&9`u1P8x%j8fds)FiQ*Bnhg=@sC zO8~4SxG8u|Sn;T2ETcZ_gIebNJotb=PJ92LwF1pstqPo4ZSjrj;lshM-e%{Y+p2l> z4SoOH1bKy{WiTQ$4140zX?U|z4C7LrJdAf{XZcdbB1Yzf*4ENO&XB6#ux_j=%H(tR z9aPBoykS=9{2KNBPFCr*2cjY_-=|J}Pbk9Qb+TE<^cL2}A$eYkh;#SSN|PI3l*spoB;53yePQb)dD2oG z(Ak~#NR0P?dsehMiy_76^-D!?Jgz6c)-$oouy|kz6=>zC-C))SOe;NqTFbI#v)krt z?`q+g_u-G-8ubZG;Z+<*71p|VTez92d(^fguYcjItxiW1-A53Tb^dK#$knXX=BcJT zKi|evW1Cr0=Y`h(KC@#w(DQICOX4c;Dl+PZz2^J*Xz%v&$u59i{B^oDQ(Z^icC%TC zWi`>D$B9pOP9LURWjY@AM~a9ne{!kI2r~MR!$HmjIXF7{o^mePSUm&zm(GrG8N0)1CcWySsXl^eyx5xa<^!J40nwIsW(!b;d7LZd}D>tp1(8R8dB*tC&oN+mdCzZNKHj58NZBy{2fOeD0@7ukMgaVmV)xzI>l>3VRA{v{Yq0b@7x>^K{5M3;uIE#@>StHKpx& zh^Ji~108@$pa6to+uzO2fma?55h#&C z0BZgH`{^wuq@OQh&*2~y``wxiP|s+3+M5E4;i3L(k~QHWZiBc3*&apHBj=7a+FA!Y zE_uNLwgRS{jzYa^l}~f%9et=$Ie%4M{4FhUdxlES9TtP0%63`{%{15xA286%qtW(- zWpb;2r7;F7_Q3yAb%;@*;@;Xs8c7n=tCfnmj$q4M(2RjGjGI>9E!L9|afv@c z!xc0|10?3#{S*^RK9q~NNiW490TJmheW*2_MH)tfrS0DO)Ft<8G*k(7E(0DnAI$Ag z_etT;>54u`gAVmexP7GT3pQD4#$y%)%3U25KVbTkiKzTMh^0`kVKm&cVPE|Wdxy4! zfnD_3X9sOk=*{}a{Vruutj)%&M6kB;@EHyxp-S_Vdbg{0DDo!tZjd?5pUjD21X`tk zj8NrDi1KP-*@jDiDw3K$0^HP?Pf?6^6!OjoQ@UdLWarzTD;>|8FcqU6SMN^K7x2?g zJ?4|5rU%*ck}j2W++ZMH4G1jIsD>%$qa=i?%G@Y!17Bo@(FqwlTS(^aZG#sl|kfVYjKo;7~F(@a`(J z>9`%nHl$ro@*c`!!QbrBy)0)!XgzA4dvA2D!+=fyYCB;J89keMtg`l4Z9B9xY0Eqb zjg{?m6Q~Z_*XN;%1jbN${&6jnp=AA(l$ls5@RQAd3>2<#(D1t&ZKwde#d!(vm7Qu9G@=ZALCMU zR;ZYvU{`Hln`6R__v|5;w@jrpl_w>U{~ROR_@X4PJ6;Mc?HP)rf2n z)w~I(b98vyTTw&|xhmQF4qE;_fDwi%4A^=oX=4z-&9cSWnPLLiyrGBq#vni)65@EiLrjO@-zwfo=UY zZi;(uM<(&-cW-XOy%s+?L}cW^z#h7Kz4LCv5|7h+9SZK?@L5!vhbFM^$sb%FjBy9B zd8hC@-&M3Uv5vW#$x~F4VuZ64X-Qgt>)lsXk-FQ|+-5xQH_Y^}niQI9u-cg_=9kyw zLQscUb=j2x0zC=+d)7ILbWW1b+f~*t-2tJ$eCf{0&h#sE&2?7*>1j0W!AFtX5iYir zkr6+!Om7)XMm5I)77;6I)sb5xB0|H#D0tX@jm_Zr_1dnNaCgMWakWjbV%;D^yCIE! zSobK;yLntwhSYW?(PZH>Vd$5;&zoTHyScQNn@Mdv(2#H2qZry62L#uRa~rifaHyoE zrVPYO9T}Jux9Mgad0|>b}$Xl?&+dES1pp{ESHLGPd zxKDJXG!EMUqpDO_BDN3KBzf|cZ_AQx|Jh5w%K2WKGH12PnP6qGSu`MTI!Z) zSIR|X@=>I7Vc6U)0&`)r>aH4WmUG0PTn-X=?-27OW3zF#OP_D}9Tdf!(9 zE)G%0;JK!Y@^)um4R4|SgSL&^p_EJIu7K#-(x$8U64$)y5|2*jK1KFMd`G&h^&vca zeff`OZ1w_{6=MDDo^Q( zmAUrw@Jo3(-IVagmhxOl-q(}NkC@#d(^;$Trn;7-jCH^Bb^})Np9?R?TfFer6;4=A zQV->NB;UMr;AE%O9$$>}Zew8sRsa$S(n6xjVf%oE`=bo>!hvrRf$2evR0d%7yab>V zc{W*XY^=$wfxb&R&FLG)!)`voqHxTWY-{f;Kx3#~F1iFA15f zEo~SY!_lw1cYlB+j2$E;Es1#>#3fvVx~<_YmU{z`53!|!uJ}h;yf{n7c?J4no+u9}be^VWkg@)&pmJd(_#Zw~Hh^!&F+1Pl$y>1o(GG zyfHLZ^}iPfStP9;Z=tL)Jaz^cu_G?2A$# z&a>_qsXioipf34g&30ZuR%nTiL|@j1NuKvyXNcV9pa8R#Nyna?Jc%@~n)xf7M@1*b zuPGcD?cgq=b40u##LFko;a4X? znV&1(g}*TRn*;TX6w@8#dQ^{{D2u_l%8=%mxD8;%9||?zN&UCZnx-pBZRSPu)?{elW8%hj3BC~9uoJ1_- zZC}WNujHPwKIFULo)7nKVLA0(nVwc|3FQv$u;rol!MSJi&&u0_Amrf9>qgv`WfJwR z@8z^kkYTHx+!ltZVv|2bsM2tv&3X5(v^R_s`l5 zt6BPWYajC?pB#D~lKSzU#5+5zo~5_rV<6qEN^nQLd6V7IniFc7UsH4fu8r1`2bv2llx?JR|sBHASmPvcpq)EC~(Tz+s?lAn5gko|Z zX|ZB$V-+9IU2caCvVWvdcRrdelXA|b|zefZk6;@@3_fpoIy@Y&-r8`Vc$I^(ow|67q49;kVW$_)LDPrseCu&0tm z{9+`*S%sLRQy;i?jI8K&=!n5b&@5$cF=!u+K6%T0K64*nWua8{gkpB}!B z8!u2YnvZU&$o|0{r3nwpMkZz@GaaZuZ$;vWuJ|wRVf@=I99LiNJ3_%%lf4us~Q|;USfq% z7gN;jq@c+*N{Lt)M8tFAhZ}hHtG>QUIKcG6mdN>_BS@o-)t9?oBPC0eI@>p>#ahs0 zwqk)oQ7&Y<4VCJ_lH`iB;1tGB2R!JpAD(TD-2t08a3hv!Xp0BpJwm}HD7!lU>rJw; za9qg2KO*`+PH)>dp6N7M1~3NdjS9L=rMWR#ZP8w08nrBXT&N6F6yz3>69VJo>(GRX zMzm%r^p3m#9JaA{*Y9h^u`9u>Gl!1xAs%JrH3}0wpM%kD1T7X{YXJx;G$zSE!KHi7 z0?oD!7C_gajc}&V^Yp6pfjAOMTw-aBL0(NQrMtW|K-T}xTa`KDsFNUK`8^pFP1f#Q zwz)_3X+kdYoEF?%6v=7I#qXxHyvZ+nClLAlhqX2?m`IZx*O_>{mv4>=5uyu z2d}AkJXN5a<@m?FKzx8*td!T(d$m8xoQz@GH1ej%l2-y764LCV_{pt*=UbzxS;=qR z%l!UH=z@jsG?a#!9FZ^;7<5xP$oPZWmZfi7Eab=>!Ec3;h1Jmbw zsXm=32K4C*r*~gm>M_JtlOws-M;NTcCpMKJ=s-Cpt{p#K#HCDPNCC6 zf+d}pk~t5wOM$t7QmI}b2`PFnPWBMyRr#22Mc3UW)q6F!9Qs21AYb%jEAuA9J-~#4w_k;Y?(w11@^?)ylN`@SxI6FNL4<$v!!g+_ z&9ipWVuW>e$a_m1zmHFZs?Hu4U z89DH4y0Ykf$iyU;9!5&s+!p_ip4YO{8St&87pZEcLpC$PqnFEpA}6ZtRr#pND6wQF zi*7u9%vdz5VZ~lWvcnFj+%?jBQ`kv4gVCnqf?!%l=b&)_^OtPc5x%w#_XEt{T{>@L zqO?Oq&cM9NE)+E=632}XJ4&UV72EFq{MHARvL%Ecp*4hGZmmJ_@f-Z}HEB*L$TGj7zfck9o>5x+g171`FhIgh_C6G>uQAZs5_?YSG5Mm8VS#}pvt{>6%jWl> zItVhDs&sOTgYWb0H?R8;m`Fb;S0=rvVym54>sDv3xN$9^3v8`HF?GL}akY2()5|by zpSt-T{>HD#+7Q5Nw%FF(sWe)rqi;wiJW_7P5G%Z8ukCqj!#p2kdn%T|M^?X&u>%X* zT<#ZAro{Z(;GW8&U_2XQDg`hJ8c}1Kh=FRu2%T(A2EIeLRtj3`gw$Q6( zQkE;-5JPY*fuOZq#p+|Fft^kzKCB+Sn$_ z_nv0t?zX#7bWqTi*Lv^>$7{O*<7_z5Qj`~&@T6c=!HsFJetN=#%iXGlR%5YIdzeZy zwU#sC@`ZN6nnw~djgP!+N7-uP&~NM0dDJ47ol5zTHyToeH9&%(wcTTDx!#m-}5B-ttb z1N-E$HAwL{1>FN_>xM>KS)(!B{GVdN2qKkDh7-e_Qpt`tHUVV5CE;NYs+R_OI`M=I z)D_^_$`%=$R=(*8sAtMvW`zF(6A^!4(7hVc0lUP@|Q7Cld z7C?^F0|+vTiz3;zmb?X_pWs?9nk1as_DYLWTR7-KVQ^X!EOtU$2sU)r-u(S0Mi^myL)>-?mp-gQ?cPk&viM1KtXCTC#=2E#+u5m;XyIyeF&4^BI`Ze zR75NAJ!4BL4k|%Vc^TtAkDx<+c~emKDjeK#T6{c0&tzf|Sh5&RJ0Edz@0;Ug)EGcQ z5mwGFe@(k4W6vme6za12D^LQX)cQ^ACy#d73#Z_Ftxmj6P1d&(o>tk4x=JV=DJu=| z{9$WNd1dL;QhTa#CzZ&pYmo~;`=xBcemE5@eSmXF{P64h<}6z3f+!a@6CH6&X#Xv9 zB#D@@&NX9BKd?BziMIq@73u1L&ypl~H_^IZQhvUmZ|4e>9KorPeb)hTtiF$3eh|qY zI?Un=4MFv8C}H=wJ2JSlch|)@4>JDa$8iE>k8&UIFjUnDv-*{h)YziIV;ORM zM*EyP3*8jeYC4Lec#}HY6BXt^^$(Nw3k${EbT1U)R4j2rrK}vU^h(Sj04oY)J9mw^ zKr4XULn@y70+no|H^z4FRa3huCi5_+q5Vu#1`|lcza)9KRLK!?sSY-JcX2V3meUY6 z4V~G-{Jn;BejcwZ0vt{~V@^I{qWCeK(~G9o1@TAIoKDAD3Hh znpf^X6VohVJ)!C=jMTa9;8y}(JKm7}YyqiefrwAqSpJ!1hBS%SP8qcpQJpP1iy^Hi zLx-f)Ig!$Z4E>%tN@xS3;N3CDuvU}dqkYpX@ivgM6qi*ho~MiBrLgGZ$=7K;NPAwk z{gR)B(a1&1dvb`QCc~9ZftNOW+I}n!w4D_|U}w-as;|qCz2J-ZQ9F3H9I>;A{B633 zfmS4CrcZgdhqw$Xjf>sn8@3mm4;d{d=LLq=g#E0Puc&KLf#H}LSRfgXu{t2>y226u ztiLQ2_`Uj^;7WfHu5%9FwJRXHVq-$V=_QYsk&9++E+slSCierdB7G#yaO2D|a*(rV z9QYC(U=UDFge#aIgtgjJ5E4ame@D@9XuK!At+ZIE;umJ}?DUmm-}n67Y3(E1CYV%( z&TsIdcvG=TKEKAR!x`%Ch^k%efv=DDU6AC}Te47%argb;-O|sy4Uki|%fE@`@UQ=7R zGtNL?Z;wrvpmiD8ux$t*LDEL}@IsSM)5t$7$bwF<)URJ^PqP?@x^aSLkk71a=tTX< zsRpSP?O>ab?xDK`A0*YvDc&u0OeCp0;Q5jc;;62JG-)_^wnx((qC zYF?6djI>tC&ugxegEF0pP=O8x*N~reTcaKwwhODx{)bCGYAvuE_5LAH+!#Nj$gL`P z;)1{9{(1fN+TCu?(HG(3*~1dO+M=Y0o)impsK^9H!_?_@6YC7z1lXemxh4#e--mna zOe{g@m&JM{H(*)?w>X;u85NHue)Rk6Z(3en$^9A_!ah6JXkmO!jay>$H&I#hN`{oo!(U}$U?R|`fAjQFl9h&i+rEnTP%bC+8t zT($zMO;L}b`E?_MVUXAOh`=bM^{*eogG|dqeUkJ>Exkn^>)lab5L#nt=D%Oc;*2i+ zJ_>b+T`c%2b!cr&1bnIT)}^<>>COiuwYgL-CdDlsOcSPcutZ7=Co>0bAh!xpL$ig2os>l?k(i*Kf!;UW3e<^b8;vGwpt*Tb;VK#WG-_dti^Pos z@HbTN-B651y8YLfSc;}Sm+ej z1SV3(LU1teTEZ>yd#x5QXB(ZhUK6%h%&xB)9h@yOIUEaU+I`DR<4Rtme9V}Cue+B| z0IY5qOUyb~=4IRdbzRpG`6n2fo(4G>e(M5yUMs1YD(uMxL8_y|#h>||)O}YAsfE>z zk+fy&w~N91oP=`u2bf1nTl2gmYDRavV3=$k=S)S>Z?6_I^1O|%HQSgPihnv`exuDf z_1C%GJ$=#2(y3>bnn1SM{Vx6_1Cxqwjx{zvOI9YupNGn7o|j4wtIT09uUeGRV>N-c znPWCjQ%V$(UuTMY4mCgGT48ZsV(a_=$JblOMV-Fi!!`nv1|3Q&B_Ps_h;)N=i7+57 z-NT}Ubc3`?Nr`}TDKSHeG)Osg*AN5Dz;lnTyYA=reLa6qzVEp5T<7Ro4QAs=Y=B{I zE|~%KCq;xIZWW-O{3Q56Yml{B6@Y!QXKWeDz*=7atCmb|`eqEDiwcR}$DAlyi44{% z9_DLtehze-oQk=wTXoC9TIlOGUmg1@gz@ap6i+X&fPNpavSx20uQyO|x>ylkliLQh z8%O;NK2?&67cy4T*QumOT(wJ+GA6^EdVBe%i}3StHbiA0Q<70+`su_I^Uo2!L#JyJ z%8LQ)D53j@JH|-_Z*TH@$IDyse9*hA%@AOsBx6Z2mM7d$+2G|6H_h28w0@t{BqB-Z zsa0jS=zVVXfXu2g<8L2~%<>JYxPN6{EYiN)4^a={p^fd^7rj}Dd0Z4ncy*HOE=c`a zfq6%%C>mN9qHu=S`4_0{!_xqQtbN;PC%%Gkdssl?(b1f1;@g3z&;u}od$)a{_t+Y zu-B8^OH3uPzMCRef$$DiIH6MP288_SA-4Aex4IhxAMs5_%Qd>&Uq3V&x-&?jKDnJR6@?;anp`z)(ezp^#Q zE&p4uZqnYuHF{@^N|l4j3HM*WG9JshHcQ{ekN6j=qx;R0JkC>?sPOgEUB7*MFVJ3I zbU{1sY?3Eam(Wo^`usG0JnH6guh&b5$PP1~rf4kQ+l_n|254patyDQDi`PZE^jsrR z2&$cu@-WQtjJ1yEg1MmHCoxIa!g7+ME65QRmWdo;1WHsCcpJG?v5a5-M*9ugx${Eq zR$&IvYFb6o`mgd{x$f`VI-uLW0hb*%V;1eIXMnw~aBt!25ulBV^S_M}D5-JvTuY8k zK!8cE0=^%t4cjrjW4;ZS*lu?OmtHJ}mbO2n^?Wvv&T7G+Fp{BOkRL&hCnk|!5w_d4 zI@HR!zE>r3Q>?K#)|X#3Ja#x;Dy*Y%pDv|SXYtnH5nIEqm-_xht7D`i-CgFaW{82& zFk#xptcc6AhsZn}IWhJHUx*r7%V_7mgMeScAdud(F`h%e80hiPYNn7EUTNmnDl_AD zFsDL10eK$Lb$DT``(-{L9L8Y3B{q5C)-j2)fXF8Y_Ny|LNpS{D^8jf{=XZ1W?)~XX zvt;WuiOZkUZYR^HIzO+M0FZyg4^lr;(G1vyH(Q06W+0VK50{3Me1wEY7~bcLn%>;u zKn-QOT6LM3d>@*ko&Kun#=txcQV#B%wliQM#JVXHIMqYQtjC>*)9=xcn>NvxGb_tC zQHfrX10Y-Oeh0)_qV;$w-HEL`K7JcSiWjw{|q`S3sUqyW1RTeDQFRFoyQ_FD^eQa)E{$}rlpFFzr^>te&1}&J zkz>$o(HHU>BCg%*N;L>VBRKDyF~1I(eFpB=>(fy4F@WABdR}jNDiHILI3gR&k&ZPA zDNTuw)r?mAl^895_!ebJOv3wfK{k@Y%}8HCq&^-PX*mIFLLt^nqL%{YK{bna)-N$u z%gi2#-d)0;)pKUyijtOLEIZC^Tu+-!;=4PgzSA=SQIMA~r?Kv^Y&eNpPk3`7Rl-;r z5zt;_bUtC1rs8geuGJ=sA2)K;5e-BGW@}O8x;bQ=uqP6+n5XCs=<2R0o*Tov71sjp$_MqHZjCAY@+0q_ z4Z>VsKG269FPpNyjhP5(G{LUuz$CYSvLP27doWHhL$zTho#Mtze36pT*x*u>Mg+|R zcYr}XQ;poc7s)oqP= zITad;=gA|WoQskjF1Uq4W4Zm1f(NaS%6Z}Cmfq}rd;1;S3PGNAsv@=bHF6t_T)6t& zH7c*!(yi!zSS_3@kGt<2RFhs^xdeGvSW~>ejq%}Hrvua#vA!=C4%}=9w|5iYX-pB) zd{3TMzY?XUw=W;=zwvn3U_BsHu^#+{pW z6R?8)OE1sLFDva5kzsu1%Wr89XPo**8z@uV0W(A_C`)Hy2(7fI zyEjsICp`7MNm|u)DA~9j>-vm&#>Ipp zl`7wSI{YeAj&z+xoB4EoUoqImxwQ<71ROFUXN>KblI2td+AySm9)o7aEqQ~{df#Sn z#>N0s4nSj}+QRt3JnEHU8b6zT2Kd-O6Y)5WA0y#q!B&-rp8@*1>@o-v4;100J7G;b zH(+1bXBA%9%jHseE(^Qb4)O7HNG}9>#(UC^zn(BFA>r!yO7Fxv!H?>qIP@Uj9~AG_2FkS-r_Wv;U}mjx&S%L?f2^3xzz5y)fPqBrC@GvLYJT7#F$D_UtjgI>n_VxCw;m7{YeSv z!NqZjLq))d*W2q~rd3anJ@B5^qRGcM$63VnMdF;+GhUFNx;Mm^#vi-b+_G(^=!L#i= zz2iORx<@&I&e*sg&NseaLy*AQBxDVe8UGN~oklajTU z_vQ1zt-Og5>yY5uI6BzeyoEM~9YRqK$Id*B=yxd;UAnmE%2#MEkqW-MOnr6M?Jzo; z)N15I10mpVatGtBh8ZM8cJfF>7(b5NCZT8>&t7{L&!S)rGC}HnM}F4BcY=osp}=_( zzA4I_xVO+$PX)z%{=q!+{QRm4T(UJm20liUtVfgSX_J_XxQiRRb%#bW?eEdgBHG&* z{m}jnL&aBj#>(@{pl&on9hF*nr1JFiJWPV-iW{9b7knC=G&+?H4RWU6Xq9(wV`>kq z7jI!dI9L_VP1?vTOgS5jcMy8F%vZ~RB<($%(xyQ!G9MS|`L+*paB@B-@8w1pEiI#`2@>sw3a*1y z(P(z~e?VihH&XMSblw{)Z~Art#bs2gpbU5-!>`c`vjXl~HNaB#5(g^BaW8Rr&XUN9 zr(^SI{D2$Bd95J|zYO!VxK}9V+2^#kxo2+~zT%7sS0wnbCJg9LpdS%r(-b}kwdR7} zb@nmwy?*MB4TiVyYN$g2`hS9hOaMfjicA66T}N^(x#N5YUu+c6V?PxqUhj zFr$-H9BnT@sn@UQ;RpxELhByLBj>GN;yL|#((CaUv@zS>#zIwin8Fn$ZQ29*X5BIVFeixv9P1f5B%oM2WmERYJZZ0q4D0#m{IZW2h z?~LNR_ebf$vib1;mw`??s*98Nf{9Gi$-=B}U@-H|($p)RJ#l~#lmKGv z)4m8j2ma^RD7qGVB&lCJJlz?Ua(m4zDiT-0ocWjS~`yKWNQsW0-PLy{x za;tN7m!4?wwKQ}}m>(=}OV>qFYUCftd+RwQP{8&zjE&&ldn;NjXk!oL#d0x=&a{<>CSB zx?F?T-r(#dBI@=bONaM6Q-4gkUW5G}kNZX1h9joZej${*x54b)+Z%V=!EZ)m=5)8C zh@f|{LZJx^Ah4GvS;sTjAcKn_VuFs!CYfc&(L{OFaa1i-Xw3rJqT_7t9vScu*I^lD z$Gfk6-57U_efP8utNe}y9S=4kC$o^biVTG#|&Evnh+cDAoNpIrK-eY7KMNHQ%xGK3#-m-ufkpkI?( z;IEen+3P0W;>nbc>lwL!$N$rE&7_l$P462I24RlG8o8#NydcH>&&*W!Zz^{*=Hvy_ z9K6TpW8~=IIxlOkc+!87Z$VpqAa*TTv#?b{c&%N24;EBp#6?9lX}IB77{BG*xzW|x z>0}Ta=6H0n+H|5A($IumcL;4d#cl#>h|`lrzY|$w&-v>~<6(oKSdO zWeJ5nh;$~V`2gPB%z^r7Pl^%=E*ygmSpX7A#*Ips@RN7NLW3Cmq<`_U{jndBLK)ww zc`^+ZVma_?yVkGb^I0i9{#H-nojVQY&VXNDb98LuJH%l-s?>Kh8LEF8ETKk3e90Xb498G_y_W(V1}u`|Rk{loEY z14G`<6c`;-RC4;XOPkCC2BD%cS1AnVGA`wmJag(Bk#pOw;CFzIu>plIbB)JCS7G~3 z#(6Sy+R^F3;SG?J?I?u}Fi3y(^+_HCE1$5h&wpI>B<+EBqKRJ89?+yPX^4vDOifH= z-K<9uU!)4meEO5%kWR#_`PpYj-S4053Nv_K^9?P*>$=h$03%~Ezn^63ylfQfl2#@TC2+yELN&%>QJ+<>9>fzYi&k}{Gp*cWt0QNP1?zy|+*(Zv0lPs@j? zSpPag7P|$P<&=yfk(gDL*(E;Xg#8YGZ-TJeJ#KwN-+<}vWd1Pd+%j=}w9&AVMEaN| zLIjIFnAYjiAIU$3+h_%BwSWN!y&Dl!N4v{Xk}^ok{*R_oR(-d!TTNWdcM~i;fhkHO z`}u!2swI})3m3Rx5V3OG3_d=7!NgeY+8fE2kH>a`8DQ)Nm9{Ym zdhhUa#nkvsE5v~eJYR9Fg@CpZM;eBs|1zf))$y=2HdzujO=DI~W!*l1Oy$JW-97bk zf+K95WFP0eE7z!X!~-(vn|iqy8C$1d$LmjX>Q!cXMb;@?R-R)HBU zHJeX$XjDJvfI)AQTZXsnPdNAH?~N;Zm2zHK>WE{@0n}-+si_>9Km{b6S|IdXL^HS; zTQlL{0l>8E&x`SsMvHgp^5cDDtyekRtLrt-T=z&}UiIRblG08Qm+8HGL75waVV);j z!H{Z~m5L9lrzX_MU~&WVUA>oDy1MLu2~W*W%J_px(-brr(69kEfAj4j0Lo80neyK3 zD1&x&Bihk*KqDn<>v*NG>lV)1u`JpsD9DLf&cXT9GOuWGDlI}IZvo6 z_sGHx{2Mo3%LM0kAE}_U3dIu5+a(+Qgf|xka|G3zNuHc+OxAV9-_|srZuGq)O54yixdR#RBMO$I~lNsPn z1f(+#1<*ZbpxDy93A97BXZQ7zk|BN)fj@J=*^ua8&jhyn7vE;HkTy!!ZSDJ8YXS%t zwy4=*p*z`L(d*{c8NNl}BTuq)XNj?t1K+NkM}0@EmA@D%BZM>85M2AsKWV0wEe% z0WLW2>dBtTsn#xp>u5q8Uat$!Sk4pO!!ayAZYGg@D9ppm@MsFqN}3NB=@szv%+U_x z;Ri31Mn}H{6sLeR7e})!M)qsU8MkY{DOCYHjSW)-ZHTthUJl-<)E3TzPWxd;{BFk_ zj5M88=(Z4hqlm>5r#Jmhy)}>Yx;(e3wZ|;4!%iB1x{o)@h~cj_XI9~2*9^eqC16c@ z_>+|gM`gnI+vCTS7Tsv>&6p2fpcvLLY_Ih@J@6Bxan)_h__|bXaiEts%aB-+bUDjY z#!7UT{O8Ql)tkST;qSxs%fa5ph{f;e=P-yYy<5A{BZwpWPx?^rws!8@QPbC5sbsF< zK-ceesKo5FB%nD3IFc^^O=ZgRi~8=@Gb0ye6B*zmxa&31=xcDYlhpKj=j)x_H9rFy z_fgS}8EtKPQ>%fJRzWB$5`-4fg2$05eHdR*(B_s#?X_qd32pCx|t@uQ- zx-weI^nZT%Z`Me_6bMMMZg=(G!28_J5-tav0@$1Ro$Ab3%q<=j_ z2+8X==E4G=*twS^5?iCmBbtaQAbV%h+}x}*w)nX7pM&+k5ejZFJJ94zU~t@Sxy6w` z{#Dj?KnuF)?8dOr#^>efp6na%`+Fz-f0>xy@c{0!XKxXH+(NNpi)Kiu{6yW>qLYlh zI#^JfEhLb~MjpIGC|zj==-;K3_rEB2e;>7dNZ1n(t>ZDeN}VGduo>UB!s z4v{RF`ut_F|GrT_Kd6D$irzvAjQ3KoAN?QO`_B$hFykh=i^jb4F*O|X6}^i8|9_Q{ z47Se+NkowoG~EK?1-LmY`y`-*5Ch1L>qyC|SNOzn4Bx6q#P6g1b6I}#Mc>B29+S=2 z(;}kO(ARk_r_SLJuG$~;*bu6jaVqIdVLh66p($%)CeL0R0yUhRTqsd{;#K%8PUhwG zc(3=}7O+}>FEnnOev>;~gF$(YLHVv9_@PipC#weF8T&gCw9cj{p}KFVU2L@Jn9Qo2 z+Br;Dm*X2u+z<4ygx!}5ZvUPF%7$S9#OF5?)c>*ruYrov+^>p~{E7NeOeZ(y5(ynE z;SC-EhTy=!u4QKmaqe>SF@lbz5fkjBFrP+bhS)kEogNHHMt2+IaqIs+pocDV6=99Q zSgLI_92nOC(z$FP2(^>NfR*!a+NRAlrLRM0?J{Xeq9ao8%I7Mjno8IY7bP3k!CKZa z+xQ`6Hk)VjbxJb4(xOLuJnE;R84Y;E@`gU=0P#k^-&Xy<=b9t6TY{}G7y+abW2gnJ zU%%+3c}>yvMDOSC07eSsRNgM1zd6S@-A-I-1m*qAx+SobK&0wOZ*ffLGLmAG^Gd|x zKV<0|Zja@Wi^}_xey3Pihy>eq9{&kDRt`!5t2unBRZfJZWT|EEa80QK1int&D+4(F zE>z8{NV>3uGnqj4DB+S0+`i!->T`TPRWYm67dpy^;do3r>%kmZen^|03rvUuF_9QdZ73D zuTJDO*xBw4?;rkn6AHEO-?NW>vWl}EuT%y+#mjCIN-bhwo2yevO4>LlIEXD4Zk_i~204!wDQxs)YwNkya1m7wHE3e~ z{thtBEQ21(*F?-#`|QU~0V?<8r>ZX(eYln{di3-YGy$&TE}Yt--SE zC-&WaiLqF}%B{`^=n&!vk4}qwM@pt2++wPeTWpuxb9W7AnsNscAId->0wI*>lzD}a z@SqyPmzXg-`d!FG|3Dr`;{z`ZI5r3D+>gVbr_D*|b?Qdd@QhPBxnNrr4a7u|2Ve#KG0k+_F87~T*=Q1Pwl(AXE<5wic@{e@ZJ#CtF)!M zNE2wH#`jh487`9SmPSWjYDDgis3#b|Uk*V`#p5w$X)61ND8}HCLxI4Bg zkK{X3Y_ysN+fLLcIC7eD-aKEqKEae)X`eKCOaMjBhR59N?lk@++)oP3vMR;1bm`O@C6?~eA_7|Noz~{4u4hUiRN8lw z%c&6LaK3zR5?AF$pCRh&S@?0RVBOLGtUC}8Ihb&c%8EbkP=#qItw_$)T!pC&d?e0u zofSBL6Qibq`EFBkJ=F~`kqh+P!PL4e-vVU4aX58JiNm?834M6-Ay!vem7-Y`6W=#^ zA8c@e>?#3BCUx|r3ML4FQc;-L{-{~xcq!1T^jt``oCtknev_4zRiP`9YaSRQ_}Se2 zYd3Bblv#EU)HquTd+ucWfYt>r9qnjuj|L>AYMp@E$z&$*(DRsJe5owab9sGBLPS*ewkme={bss74&%ZQ#a>#bPf`Wl3C_vrxV_MW1w&UoYnt%BCf$wib24Z0G_Z)eZ^Q=#TuAT@j-BQ;gJ0uVZaelk#U3OCtPU5a z&Mho-=4Ha}0C9DviAZ3zy3&N5C+rtFO6qTc3(luuG~R(DmIXa)(E#1n%7^R^Rp4-V zC&;gdW`V3`oJM4Gi4v8V%WKY$=JLpyvw}uPjzApHl{AyjH^`H()g2$t8`Jq}(8m0! z?nsDV`v1~fuiP>`2aSM~7jeep(Rcu(J;kCQ; zmTw*`w4FrMuP9{|r5jwgho^w> z5;5$$@k^~`kVo=%zSROs2)|kp^`X=8k=GFj0-?L3jw@J)$x=n^yEdUC3dL5!G0nme zJGGp8_EqWckjVA$9KYl2OUH9S=cOAFo^@KZ~Ce>e>VfhTIsltlBBUG}dX8FshG0V3wlwGlHWQ@lyfY1(% zL-8fH&ow-sw~|+ppO?^kA+$r!_8jL4fJG9p8Ft~AR|`r>Pq$M@)+*G-sap&sU5q+= z0(X8Lg!mb;xvv(3S3U=raAf7uQu)f!w4s2~t{Svd8yUP<3Nyzl12K#J^>MaoA5;W+ zyb6FCcn*&=3;Ag0Kf6d31R8^`b?KuS$^4cn%{;emHK$$TC7^ni&QMxd&`h#(mG|Sz zgD{gbAd@**=<;fr26O|i1EHIJwGpY^gEp%j-w23*un&Eds45}F+wyQdBkL9fYjn?a z(R!hmKJ%)HDLfX2wV_afy;6;?w$5| zBd)$T|C||M{CpB#^@M9EkhbNu`{$H(yt^e;dl#**KqfC9tc{iP0iNuT$Vlz^YTXim za<_cMI$7Q;QJCUm4uy7PhTPXOpgtY37Mcg`^?w=45BK1p zbjOEE4$nJAS0&*;wY?E-6;a@`;@Y$PeYovPv7&J z4NZ?yIKB;{1n03$J?wTJClu$ixP6?RP(8lnlUeGK&ALYniZ1KxaZ@_jh_y4c9NV+( zX3Lp|{%Yy>JPVR925?URomoo6nWSC5#6|=eLb|Trl2(ybCpx*LJTJP#8$BvN)cUe? zmo2g$fp1>P{L`d*Z+*dQAP{lM-vcMi*kQHB=8^{5R95o9h}3M&17I?PXdIfbmB~g) zv9a&?Hg~Ii0Q>rn=0L(7P%ABf(|GWVhHR&p_+cx;>BaSXW|=YH+9-0`NE2OGhZ8=1 zx|i_weIUc|OS?yLF$JvH`wmlvrnrT?eEHhMUvI&jmX_n4ZD+wZT=ZEeYT|vl9(al1 z(ZdVV{4V;}P`UZ|ytcvOPj8=scyh+e@XYMY^?A@32JXxj65bUe!`EK@P4E&hsZJM_J@)Cu zIPs!rb;=xC-}8o>{S^q#oi}aYGeYP>^-1Xq&d2b+Q(y9P7fZjCULET*L#W~cZATW* zD9u{k9f>^h%McThy_M~Kp9EiO&RKikec}dxQrH7=ebq+f|_(S;9C z`U6zyy*J?md^y=&QVPKVK?SGl1K-e>9xn=cg_6uJol1n5R5Tb}xtoZwPqO6PF1!zy z2E>XCit)vcO`PJdc%tIw@jyK-(LEfEU69bDdfKY1{nr-iUZe>SiWak&1A453mSe%5 z;>Cm|THm*i6$onEbOTc>t~xM&_I?KX8ojH5-&$S=qiME3pW?zxe#@`8IFRh$EV9HL z+Mr+W*|{TSIYvZt-y^}7M$rB-%Bypuyvbg^?6`mc^f2ylkWNNy+bBCZmDQuZigG81 z{h2dr>7vDVze#Cm>%perJB51Xxz-Z?kCoU!!hsfbGw}pS6ONach%}TrzkQzROyDTU zPI`!&Hv9c2hUCyzoliNzcI+o!^SZ|TK@n51xg@*4H)Vd28G9~y*|KL1j3eNHz5o6YNNy8h!~MpquVER~o*w7B^bu=6 zYh}N4Nx-s+0#UM7E(1N2R4>DJuxIIj2T?T>eGyxrmm0W5tz+}O1lRZGmx_|y1z{H(HQnV{U3U|vxAr87pZ~ym8bG8WMaj(^3Pywz~ ztF`R7g6bbht^%k$aA3tkASTbrf0Z5oSwArF$q8_0O|h3Dq5oZzrZWUh{R)sLJ%a<) z9P_RzNlyW4^N-Ev`Ky^nEvKOySM&bo32!E-p&bDy@gIvzKG)o zz#{uZNgTab1@rU@|M?6kGm^qoSnA!z@}#Fg;gubj=MvCE64xly<~r<)f@Oc6_8=a( z8)bx1KO!S&irq$EjZ(x0?fGU&iX&VDf`Sx4iv8(X-$M;e%^cAB;T`}rRCeFv{e%xj zbvFzyo#tg{zlU!7n44$o%YyK`+jDiOkd2p@mz)9+H0>XV-?v{9W%2YY)~|?_@I8_P zLh;!kqk=U-Na^Xh#l83A;Sn^)*pn53XY6}40{bCsv@^jYwj&?Xj~K?k91yxm?Mf4IRp2(Rf1g<6dt8ar#aZF4cAO1LDFG2KF2fVp1l^1| zQ266ujT0+1Rd|ot(3i~=Zmh2`+2mnklLh@jI~?bGN~GPvBG!MXP^a)9(DjKCX#Pnw zHN84{X}*?ff|Ehm4tWcN(2nlsUnVV zX3~-~xu3{vR6nTQ9!8avsm2NL52?Qr$L4a|FelCu=KaVDA78(afyHCa<9o{E!Pl?CaP8jCuPw7cQLAHbbws=o z!im~OFr@BW&m1T=sOolo7#&H;`>^L2h=e7t+K-jx;4Gj8oayhoiK9U-0tX;`9L^`{3f0KPY0{mx7n_sCS@(c zH;02cu;8ACkb*lN*I4lJ3k(e00gHTGzuBttdLwC#=HTl{ztATwUZiD&)#JzUo$aXI zlaE&(TpS zx@aJ7H|wuk*I;eSqyzfoSA5L0PqM<3M2PnGT%wqPM^T=HK##TNm!nmpS0Vq;WG(e8|HUb!? zx`0fa?i$vVPVDyYlgDxuwxQs8K7DNQ_CW9V_LsO~XUUbW7n7GucXwSpk%1K5i#Mwn z{V+i9JXcwB7V%b$vX_a;9yBc_w@=^*vsdqC5m8CrhT>2(Fn!7b6%XXs zfY}2hZ|o-fdsyUVzGhy5E2DP%dnwtOC#0~9smzjQ59RW?Ocxvbb z4fJw?P{ib-gPVzlMl48(Wc~Nhsq=_Z%zr&w=L584y{VzN$s;2}!!)HdA$90J(2yLy z2BXv@powxU1)^96Go=^>CX8CpD6q>~v6OPR1MJh{&HnUSl3Wl8po<_Adr0S|qAeCkzK3qanU4@gKu zVW7^(=YxKZSdMnZRX6;2^gqdgCuU`m30HICaFKL$EV#Ir z%x^UGWI{|rlx$nTmBYDYS3QcEa9f#fev58O3S_X|mt$^#C@l#dd6^{_$AX2U^- zQl`YD;WQ{FuyAlFVu1s;y)M{Fh{ zfyV*BjZ(ujPSR0cIsOWh<`xdw^6tv)g@=4I6|R?%T|7;kz*`i=LP;}@4>onml$t2O zV}2^ml8s7Kl{kEa4+jpp3nZa)z`zRTnbf%{Cm{FN#xz63fu4^X7v>|Ehh2Nm>(`W? z0KGV5cKLxh3ZUTR?YwdE0|p3f@hWM33{<$^LcbGA` zD(Nd4m4w^S^pkKBZsw4t#=FRoh&u&t$BwgNK z8@W?g)J!s1sH56;R?k6~hnbe%ro+N6#ov>6QsjO{-bmT`w|M@+SEPz)R$qK<*lKv* z_FI798UK$%RF?ZNU>Zc+OIqJd4MoINGF|H>h~V8Vr|kIH!(P7)S6^IX3}Sxp*0gP9 zsIce9TiP4x*1Q&(qDTsQ`VY+1Df-XOqWcwjItY2{f3Rb=8lxxksz}TzpL&W{rh^CW z0tcv^)SPT(@6A{cJqhXd3(`l6PcTHKHK02okpAa7IFUa`;xR^9557lg^}lW5?DS&r z_;EG?H02o-j6q0Olw$s-CZKBXM@3OtNj6BMZMd@sLA$OH4nN-M;u)fR@hleLo0f|j ze(H9vNPd=l{q|;%y@%;_7`tZBt7G(RlvWKFwaXKw&$w=iys!gH!<2E?)Ti@Q-Z-Ok zhlyUq<$bJAyG846d%K+H$>HLVmWLU zhmpNy9>x9P(~B3FX*x|xjZKoQ2H(frv+8g42`|&LjT@*R{hG*SkTOtO+?Z{iI9Gua zas(-}qo*y`?9lvsj|>xnf9^NPQUihO*j`Xf%)B(a`?J{{_OLz(*3%!4BLDYo50REJ zFp)x)EMi=iNN8laoq;UYKwz^Gds|cJ#!iI z24KUxR27?W&1%Da({%lP-nxIYVIbxL;)ZM!Mt*EjvabrL>-_PW8v5evG-!+$qRYD= zs=7&~AHx3gJ5JX?nCG)T(@T-|2taE7{fjsM%#8p2r5BITRz=BP)b0Q@iQ19+V^ssF z{z!xYRSPZ={`Yh2n9vRhw`G<;b|IGV|4D-XeLr|2;e@U4(99d2BogR16W~NT^@<iZXVk* zf+XpmiUN4>gtD{|<4&5TZVA<7*su(J_s$=ybPbks4F(8X#4SwpIwT@ol^`Jc`*)qd z;d5QAuslGX_TO9D%8LoE2zU)5?|;9(ndkF-eS38wXdfhZNe~%_{pZzqT;%wm$Z8B= zQa#NN^vW?piGS`Xlo7)?dG|vGo8$<<-%tT9!?o`fiohFZJsbV_Hf1^5rKMVT-XFvA8252s1^>f29Ln>Om$GE7N*>|nL!FrT}sDkga-^M~~U(gy@hDFQaN z*PTJ&5ra!Bqsq(6q0IsKtN=uNH!u^@Ujkr3Ipi=9SZOue0*z+wUO}5-r5zMX%ZL7mXs%m9B~6=3!gx#MA6jrZ5IXzw3a>jat_ylrZD!9w1dn6t!}2UUca$91s5Ur-QXWn=3C5QYuDN#lOP77ybruX z7l`Qc;5S>*ey)HiP z1|Xsi@KVB~*4M8*#M?Unu7p77k2nlizml!-I`@(h8AebOW)Dv1w~VX@g&Bnm$&>Zz zCXfpiAM_%e8?4*>K9Yd#{+(MKcJd{3@#~S)0t)i?<413WeFbDUj8akSmOO-& z1WjNj&DjfJka0nPi%-L3oqHE3UWGwHvdd1GyM1= zelK|Y03OA#c?)wFHU(Nx2DbH(x~+1HGhXB{JQDi&1P&dML{c_pX0QaUp*Upf@9zS{ z2xqUu^pC)iZ8(@R*f8?iPH{u#L9=HLo&vtIddd?wEs^pZ8eJ-iB_Gq&ML) z`lbQN>{omZ&&pHQEhMuqWeSLk_zF^gKqBJ!$;@=E5mw+Px)TL=! zE7d>$=RffE(Fvy-rb0;dMm`)7<&x8;9--D5{&UQXuMf`J_(DtF4%lA^#&T&&Wufx?TYXAt*# zoUlAq?I}*V2dP$U1h!Aj+|w??l1D!8%|#UE86)gy7^wiiKrHI?;G?Jrgd8V ziDnI-iryt=C)xddu7Uc98hWhDt6~>5SSt7}r+z+RQdZ8Bu!&n;ozHv2=i*7FDgvcT ze_`f_KP#Uq1pA5lDkx9(@VP9h$tx+vudUe%Br<0p&znCV7p6OweinW!hgiUDL) z;SAjkdU`Of{|sr_vop~Dv#^%rtDBeTdp!pCGz1W_5|SAe)CGO?I!O8&F*}Mw&)`-^ z0Gbo;k}BvxADBs@cCNu{fK}kaXD|?uOTF`B6PFZki@bD9z3ELzKyYWSqE86_G*>lq zLkOE}nV~*_2&bAII0Aaope{NvyZF*?>57RS&+a!g!F0%{PoD_JgGNVnYuwgTtla`n z)lW9J{v~S>teKYSwP} zdzkxXjb%ZB=Hj44Q-K%XumEmozNM;huXt;$7j{3KM)1S37clScqo1raTjtH+;rN?P zrzd9>!+3mw()?nT zRDFDMH6ikIfL+f?U){^H{T7CkZvc zw%-b)fQ^LHUmdm^otSUl+@`CmeT42wuX=zSQAds_9l1iYP3XsTSdXv4-fgW40oP`l zW|tCVR6Z@f)6RtaP`6Rwr&2D%REo~9-$*j9J=W!^nT!1ha41^|B|bS*XEGpuC&uG- zrzgi*pPt;Xw#b4lrn@(ToOGspL>eh*-X8tYX})u7p2x37!g)`4F>PS|RuULD91@zn zPZoRIYx!Lmcc{h%^#M8t+GuOM0@IV-0oCA4?3$%I=2}v^THQ7lK=rTShAQ=Mz~aL! zmX$I-7H3d5xODR(9(4q<&r^%2A^fWi>fG$&Jq!_#&Ilgk4hvPuB)Al9yblNVoO%RC zT1_vos%A_)H~PTx;jKmUCV%E(H*N=D+hN=Fq^3$6ZKZrun~RXs!`kj_&L~>pvT@^* zcKH?S$QYXdImiZ48V%3g{>}D6^(-0JW3Qa7J;A;R{8oCh&1L@i{p>iEP@d1W&qH6e z`(J~pLJnr8B*_IMNygp9Q6ov^6~;0gIwi%@r@ptu4*C%-v2}?RRH?p8^DXb*dm|Hc zT7r5ETA~$$6A79?cPC(;MC@AbG@nh0uS%V#%;@$Y5S5E+x>4o0pHWE_r&&h))Tyg- z-LyYGU%7X=ASP?($kwv$CC6;dTyx1-O`2M&fxmC*+VY|aL0IC1o}XUGRw^FhjkbBw zOfPU6A#WqL%uEirn;>GxGhN%1GLf~lJ*7mA=5o5_Ff&8H$slE9nSSkjmqQ9nablyZ zq>-%Gwf$Sy3ZiW@Dqr}exApj!qvJ5}dKSLhOgz*I7AjW&na(7kk#4%wp zix3AU%&SL7{Yj12nq==W1)xugtENJyv1kM&(hBaL23ZyZQ<=KM(Y%igZ@uZ?kM4fN z4@^N5!Di#Jy?9FeZm!bF92*3`>13~@1>{d78qDL3=3s0PdAOJvwYXhT;#;gN=zR;> zj$%%^O$e$4UPi5`&oUIg%*2byvTHjA%=9rv*>v+IV*%nMVp|WoX*xw#OD8Q5SkR0N z1TF7)ZW`XFk6wdD$VTvJUsl#=p2RytHOuZ40ggzf6c_}CLBAduJX^2(8k8*0Qoxqc z&i0^0>=B+Vy7)3&Wu73Wm5{7IbyL%mStP-EhBjaGu*WP){6?hcMs@${-l$nrge$Kn zypgoTHq4jQR90Nwr;iVG6#LojR~!Sh)i00>bRA?1Yed(kuZFIxO@;DA3rPSnN*(GU z_n!XQ2RjX$zC4YiolSK!p&U=eqnWu~@jbhQ)eqD%xB2{S;(N2LxG&cj%a=1?mt=XH zcASg?FpGhdhhdzHq7s_J`B5HJ%`kQoPN{kx-H(h5JtVNLms{9PKgbjh$}aZnpetuq z;}b*cPJR8an|fc5CUp8Mr5|M6>-Q?ziIQeTZoTGjS)`v z>r|Nii-cKeH1{!`=%*>(lLNg?lpD2+jo+H~m3-&5m}VAJExT<;H|BLo9c>*}ln1iV zip?b}hC=tjw0{ih7Tu@}In2d9*I=_|ajyiwbGfb;uxz;^gSYBE;arZ>f-0{mqgdn{ z&YtH9&(zyYbM7U)%BC8zeDB#7%U=D12gFS~1R@XnD`Su9oUQxv5>K`?C|k}b{}2Yi%cdZKRJT^U zA6Aw{=^JAH@JBTFt2_%t#KaTZZv`kOhUn_G%-R6H6!+yEF(&3{*?27IWh3m|!LZRT z6Ewtzd$T|6*}MnU!?Thyjf{e3Rj>71nMQb^iO$eUmlcWrY$Y_AvQqMs<7~Wfc~6y; z`MwW^Oty7O`kGam^sf}MGk#Q8U+Q<7qaC5}RDnHTt8e{*d1c+5g92&fzQ|XLJtLq< zV&BXIeujCGfro*8+)%R#-Ny5d6E_c0RQP@zoi@Cvf75}i&Je*l`~NZb)?ra??cew@ z0TD?hC5BW=8l(hd#(GvKcD-vWYs4xbdNx`W5?sDb0z}XrX1BAgJ)Vqp=-3=5qwGU z7f5C|>fb~7kpM>#=91qv^jb;e=_^pKOVa7bFL3lfpXSNZ?Z+2Df9v=JS2(6^%WT3=7MgphVKMeH{Kk4xj(FB=RcAIYpQ&HviTgn>`>)I8;<4dlXW zwm9Z;tW4i3HL9=g>ppeiICUN(!0Vn@=(O#`9G)ieGj4$;lRB8re>FB%r9n|*Wt!Tp z?~}L;c7m&8a^aGwGuTfVx?6OZ?>HNQ(aom_iwD_dY-b66&X4dVAKpOHK2zURTaeF* zkkjXR$Qkw-vKA#7r6@sOUoiNS1MxoOHa+TL^i8RgK~D}qN}5 z$00Y*ui>Fx0O`S=Y7qJS&egY7a+ZViy*F+}IZ+MZN?!EeU{{agpID44WU&kC(biR4jqnZ*Lj1bjoGa-bJSwJ-!ir3jji@^L zZPX@QNc%c>b2bN`3@~Q?tPWL>v7~>FX460&uB@S&rD&Y)=XQxQDV0k}I4!oFP$8U+AuB6zn5_;6ZIS?Z7g9k>`iYI&e zK{AsRuw_To9W)O?ea>!JsUH98y`&Dx9eQM#2qC86jt?T~ybRPT}dY zC$asfg7D>~+&|oIx*5{;$r~PbN?!2Xk{rrhflmJz-=YJ_){T%du*Ornu9dhM=K3Yl zn?{wd!v%8;>*}AnSy6Z{$eLiK@-p$#+wu)e=rBl8dw(&aIiOGY~yP?e2}whtIxRGgVso7)F3xnHuq&PieO@r<}p^)wi-Gu+iF}gL(UcJuLgByP7n{o ze}2b=IkVuFQ%qZAf)vk}1RGZ>G#i|)ES81)-nYm)xVaG8rhJvKu0}GLE~>;d;U@h{ zD_o{bkR{O=L^t?{#jLjVH+p>dHxqH_wT*kh#yMZJy-XOQd+kn*Cz(HMww=1iteE3X^ZmTtP}&q-@)UR{$npUz3EZrd(tEv2B{ZaB|GF_~d%`7MnZ7vx>3?Vl}^ z$G@$5h@l|l-pZ@(+vlD*7a{ga>S$z7tIAn7SnO$;QttWsJTF+&cC7Ok@@I2}qG)(a znXq+dFu5U3FeEP4j2$s4?_q>mmz&gy55|OWp`honA7mGjuiGh6x4n!`smifil3h!V zXwJsi$~CDraav@tVNgaVxm;C>7yN-dji9_@6Q$N9wrf;d{Lr5cP;x1zr|KtzTUD+d z$kYkQ86!EYUDq2d2}`xq8Kf;yOJL19S#)Hgot1Ne2TGstbm)QBoDJER)pBhxFC@#y ze)|&~?Q9lfMS}?Firr?x#0 zs+W{g-ivwa#=@YTUA^Cpb7(k`ADEXhWsy=TlAD7!$na8oUUk$hd2=RFD$Gj8kQ$WX zYIG9~8d`KpQz1*+aB(}(jLx^czOmVBXb)CcD9jn00?bodfmoJAO4;x3%!w8F8-Bp9S7q3ph)qg9jC`<^Io7F%rHq|A?08bNa#1E^f3C$UsE=J)6`|n6GG)!R#X3BHYLm8eN~9 ztQ-rWc-4;?O^9=rg;pTsut*U-7EFplgn#?j@GjA$JO$`BPrF$UBOlg2y(P&K`s4+% zeaJUc-YCoss#qt*ID20^{Y~%bVr6a;PQ>r}?LlEVqoNkV_n&O~P=d`S9FoHUMK%V_6U);~ve;{t8Vy%_7}(Ezp=HESx$SX=H*2wx`e7|Zx5nKZYI1^N zOEZ?i-E3k#kXuYJ?0=fi{zYscsj4<2NS-P$IDa4Tk zHgg^R;i23DkJJ@n3eA@2DrGa8AI_b%@1q(j_&4iUXl!YhQz>5`R86;G?poBpccI!-XG&QexKypP>f zjlaSlmuv0$h6$t5rt@x>aVpa3JbZfFp(WboJqItXp*L|{5M2vz^HBk}!Z1G<^;#wq z2a#;wp_0cI#+&(JnrlzzvlU_I1$ptWs7=y$b3bVS?vOd;^T({jKJG?^u6L{ot_7Mi z3-jRqD^0&~4)HrI3!TFL#c9TGi1^+!;`KtDTT+i0rtoUL)L?x`ASIcC zV-?niZfH_I7keLk6Ap)O{#F!Cxgi+vtiE3G-u+mgkyEo$I_mnZ4?lU4L&{m6oHG*s z$2wk1tg7LpCh2)xtdO0&%wH&?Z&l9EtQfPOAJ%nBF-+smCT0VjC6dQ z1HPtG`kH5VRoVuzZ||JXcBa$7T3j!Z#>{bI_G0F66aSEFpsG>0lS!~Ah;G(r4VG?s zmBs#N_)JcaapH+BVK~<9@L5PUCy7GI5fDOxyE}H-Z#9#(Yo+@JgjmmQ@n%)!2uCmx zFd=oR@`_ji>5RsjjFeKQ6_LiW`a~PrW@FPfq>$qTf0=znUgo}fcVD5>34?4*?yDV2 z9L7E|<(iMY3!$5gsA&6ZEs^uD4Q3TjuEd7AGWK-F_XN|;E}|WZk$md@zm6J$p&#i5 zHJFt77Lq$8pj`Vt-HsBpG}dzFHmP{Yt`|MV7Zut$p8lUF3=;w>s3ljzaXGuMsk^VG zW=F8D;9ARzAbaUM*^<*qR~{G^xiY-c_14K2`|wUGg6N7uW~`AN^{L9~5*4@H{0by7 z`HBLwxlJMWPYIO~@F_>LJ!0zkEKX<%XER>{evhla>>oTAG(-5OfmNz40_3pXXUddM z$JyMd-Dro{qZT>^ly!3*o;)VbNH)qwZX}}O=0Es78$g=|^QO_!4JIeNWxCx%fB%7e zLID4tg6E7a5oa8VEZs!s{q3AUk|n|`W}Llmz20)o=@Fh}sT4XwH^1(tqH(`ymQEg+ z{oe6AaVe{MAj}3M2f;|K2bee-WU+=eWbdT+%-SpUJA5l%9(wLR;`GIgkf9spJ~x3{OF54Ww+7Lk{IMm=f&%wWOau*i zV6309U*EP>+oBF5+e3h&D4`TTDQLA3vA zk8$;yM1Av<7=|ZQQAMH4%V$iyg3%7ks2|=;-t7H)TJ<05Zq_vsDRixoDTW}Wf6y1i zT!)GXhMOZJiLE=`*_P}`%VQyY`;@5ZJ4(I=Gpt^6y5>L35owPD4#A4GEIIpPQfT_m z()YGM!3ULFqBqs1HEQ$pypLlM?>76e~3J=50kFM7IU9Fg>$O3(CvP1|PvFr3xj zKm$#1%+^3Ys)L9Vj4PjD-1aNUbKT83HNUntY8vgtUB<{WFhl?g7@%UY$wWMX;yEHxJ!R40IE?mVu*D3D%smmhG zbIdH#qN+hyaOd5Z)rv`Y#Ons!_M_ZaLS(SMHe)_ArJ=MSV~;mVNmkTU-%g7g;H>7F zBz9n*3pm}k|Mn(lQ2H7dUON-Uil5|D49npYn@2EhTB*_DAY=7Nekwlp9j@6yYEG{) z8xM9U6TwJxQ~6`_vMVz_BsT$eC@$e)!hiN@6`0 zTcJ$3d{#p8Omn4GNR!lef`7BM_O4QBK+V;RNUgp5!27o9jbz7X4ewS4=ZAeYA9A{CdpT|WTWkN=rxkQNt@+hu(z>cpky#k zX;)+II7^N72g@yLP^f#fqN2xn>BK$K#&4r_gPUE3Ux3ILZGG4WC(7J&>2VYAJy1sf zG!yHq?0?)-MNCYre)0eVMd_U!x|6{mfBkMr5(m#qwE=F!2I*7qqRSsBeF02?%O7Ac zWx=tpyZn*Cu>X+t_j8xE`ujP^f8N0U!~cg8a2|vbP~5v%YyTa4Ge#nblz4+_L-ew~ zVF!QwQ%*&h`bpTP{K7He?_I)wz4iR|e^os)iR|6VMNrb^??0ae)yv)`{pZGiRbOn& z{(11&Qs#1`z%|FBAynu-!YY zeDVt@fGxpR-hwa_Y#yL+*k9L|i6c#H<^E$5URLQ}Bb~kb{6BAFFTuS1fB7_M=l_Ai zJYKtusLQ{*!+fwd%u3_^^Zf_xR|!|({th%X2#d5zZWU>lA^h2~ZMxO$uH`)-`Pu&6p#?|)2w!dCg-n9L`jn-yOvvp5f6Fw5 zkEE)?4C$^zit3(m-0a$=k>|@oP-YF0=8Llp6TmRAN0Lo5q9f-SIfd+}$lA$FNdpeT z)O@r1$^}HZ&BuU5p(z>dYRAz@Zy(^Nne0W8T35NO_CIppoCwi~Nh8}ppI12!CGPtI zWg0Wun7OEK*&J8So|;l&k7Z|=ov(2c`8Aw7k?Nk-`U<6h?-2}CBlt?eyQ)4|yzWkT zus74y`N?*CjG3}iL4*Mny(GMv?(*2Lw69y2Yt6DR_3pU;`Bu#m8l>I{f$Rz%xzpSV zmC2>|Z!o@xquQ<42*rqob5&06do9PBtPSPzm?pW{C$ZGby6Y!IB`_cPbx^+I=gfji z0p^!5Z7YBz@9r;F4XYM$_!QVlg1k?k#9%B=xmMnr!ox_5@1@nmo&bs~taH7z5 z+Dcb^oy%%Revx84OZyf`kvKivVz48+%OTGjlLTit3LANO4rqNvV%$SC?*b=YQf5t( z8>+0WE1Aw!?kjtU@LwB1KdHLoe36=G<0VjmtGZH zI0IrS#SF1_P$$U#st&oN1ISV&AlYC=2k?#={;5xVWOkD zkI$QEPycz6wTHMqvV39_Ra|)Oa^aYiKPS`BHp-;*>evbAY80zsslyQ~Tg}pS2m%{N z8E_7V?12(WIYdT-i1MvLmj&I&$oXCUu{EGRsv2-hFvS3cA)nnv??E#y!uW3Rk*2CJzH9)oJ z5_L`YLP9z__FiO=YXmhvKhPp!z_F8juiJ{b&i z&nZ#a88S|Ecv?+loc_v4A_ium{;9xj@BGDFAsSvG`Y4Ip2(H;S#6;})9t>9ICRcN5b&$$|M=;hcSy02tBe}xb%h%v3g^#(BxPzKeNQJ~* zEZ<~fHii_*xv7;fG$qiF>ow5rOd18t~z14SXsJ zHlBns!#&OQn>e!x>$LU{a#cPy*r}*8Q3EZXLNRH!A-{V83Q#7m#OubIleV%aiT9Pc znq3M@do11FU{`mxb7#Cx7n>q8ubRi!IE|{|v3|Y|Zdc{7+j5x*rvbl`y z`~+cv37L1d2%_^RefM_x2|Ybq{(LW^BUIf!&RJI%`JwkS$%hj1Y|x$@{%a|N#W@;wc~ z`Fpc^idm*-((i<2DnRm6^F>{vn5k$lS)bb%H(A3kJTDR(D=PY$To#h)@ZDr8D~#L> z1&b~}Af!jPM9A)1+5+nkW5;ezHe>irycYW3@T=b)o6IPirGi{nC}5bU<4W0` z@NY`$dH~vr{S9(QK80;7nFjI8kSq^AdEiXUBg^z$Vw*nk0Xpc{-n(1(b)P5D)a{xr zzBOnfJ8pj*)uzfv{KCx$EVLNGj8}nsBRZj_?=)M=bW!eFftndUj0?%DvS4OCTC#lS z81a=0?NC!^XEuL25&Pu$+lvd}3J9>j+PVvCcbMz)L}3Mhi4&$&nY*!yOa85mfg(7NRa?% z9*hKS0nqgSvi#!&r}iaiwt`c|Q)y{J1qO3uT59mN08q6LnCOXy92ZSa3OhFLt;%@{UlbfV4D8zF`w>}RNW-5z^?mEuY7q> zBtt1`q}S;m=!8_6i>wA&nHUDKY@qd%7`-RtK^>OaHa>D*4BSxik$qOVUkGB)NVtY{ zG1$p-_%nuki)q-UgZx$oP5Oe2HL-I@K-A;O$?IMqb5%wLdt)H&>r@Oc1&OQz9R3OH z{lQy#zP+-R+W5_uE5&S>6U4?fMOkmMV7h6G#e3H&r2nH3dg~PS=(-Z$xCEu$?4l+0N zXWolH>kPj8Bqe{n1B+0xve*owL9tYOWE^E`Ykw(sUuMxtES=q(c|KyqPDDAGIqwmI zNUBWty)B8fT~9Tb88Hl~sk;jQNHN1UP%mT^2iu0f>f7w_5aVgZ+IV><_&@Lb z)zC|?^zW8j`k|Me9cX6s-wnhDDnR#KMi>5aXa8OJ|3eX2&-T)dy!=1xP^e-195m{G zEaK7y{;L*rG~l~j0O9{zq4(W&qVK_gJ*`Ud==J9-_AgvR&0>C}J9j_$@D}dsgY}qR zFNU$YuL%Nvw>h@V%NII-#BTHJ!8uahzv+`r*(Bn%{QT0w&-w)LCDiVY55r|`Nd$xsSAUNk(i zJt`KQHWt=IE{XI5G_ zOB>b;U;lnf*yg&VM-4=>(pI!@H)khhDz~P(*_x%sYnLQ7TfWHfpxiVY8P(SQz`pjp z`ZiZH2AxzkdzWyAZrH+lORsN(7CF$X%h$?iJKpA6cUY}IoiKWYRMyqylBSK7E59@O zoIGi8=U{ftgZNN|lUry6@hd=Y&d6o!lYO-i$Wz$;E<-rBj{zQtOza#LL{My zAE2`$6`|$DfGya)hp~&kwdS;^fr&?Btj2z(NU;3wEp>(hEis8m3LXeHLILvt4Cmm<%2%P_P>iK=p2_&2 z`4(CHR^-?Csqj*tBY0`<78y)HN{-Q^{nv5mm1}y@W9+ooAa4c~Dw#6lc5?!nxB;xk z<4EJif4>`%AxFa-&@p{Vo=!*YiJRfIzuLK?5Y0p#BP_#ppG30e;V&X8g}Vq5C#@2b ziNf6?J%7uRFG|-a2a~DFBW$DkrQYI%29O(3P;cVfcRoAIg{Z)UWQA$)7Q zp&{q+PK3{H@=T{8BpaEZ+m|cDUDE0%H%Cu*^l7(f>Nvg5#dPvRBx9oGXsK7wjqI!{ zMbqn2-KXNU==XxFFpH;ljGsru6g} z@wn*Ry~G5&8HTPdXlh;*TDi{=1kbOJs<&z8vacm^zuhD4kbf^V;>bQuy(;G&%Vp+R z%g~)!#WnuO>$8^&=6GbCt;u!;Rs67rz(W1$2-7QNkgs;~3xJ-}8f~tX_VLltV?!w`2@L39 zQa3Dr`POqm)z`}Nn&VXhboX^5!+rEmI;w*%Sw!fPj(UXY*Y`C~?bo(DYBICIpvy&`mAyrwAoLY>s|17*6 z<6mHsB|oCtY$1oCrhFdxO=&|bv6)Q7^|-=fk4E@Tw+#Dg20AF85q&+@ z*4L%gwpQ0GuztI#{;5|(p%(XZ_aVnF`SrU)!8>)9!{wS=^*Pf&JVoT!N!RNJ&E%4y zGh2;2a$a3MqTS?fc!B7#X@{Z)L{Ya&rP#_;*Tza!#=-0f9_bP4*g#;h&~tWA2=b8v zuTMM*-qI{LOc;GAJkGtU<@+@kyC+|+UgNi#*=?d;2F-OCtwrfpaH9S0m>UB?5>rIUT)tCFmUz0R?m1b6t?nfDt#G2Fk^`uMIh;Sa-BA zKuw8XEGa2?KCZeVs)Ox3uY6|C>M?BKjQ`_vnfRMG43kr3F?XW_LewC#Fy@8O-uL*A zR@HJ4KP#&45YRyetZE}s-Y+oeR31FixnuD91H=Igr9}&DpQ$2X}1I4o^ z_B`1Hk>-LVR@Hm<<3xO=z-C@iCDRa|j^qGqgPYG--2+-`>yU$~1`B2Cx_1h^EWb+& zZ-09&PjQ!Vhwb+=Pp4c)Am&N+o`3gzAzEHU`U3-@h+q(oMaY5uO2b;F!U&5+AnKM^ zAkza_q-(nh`n-4QKL$XbY_WRP;W95Q4$8Vs8NBuNc@VZ_xF~;VN;A2<_zJ6}{%6Qa= zPrq){O+F4CZ4#}t?~k$#FHtc7`L zHhY)sraiNL*@!{+5F`^83OoT4TEB$f$tUjW8``$@77KWySD<@Qr2;QJf4&l}8D)B+ zCkSHowW?>4^yWSpTSIBlpGH+oiAB_JZg^A|S+IA;P%YM_OT8KNR54o}ROq4Ro?beA3d@IKpWew{*@qQ2$9t9ox)8^Mid+FHA48M)d6QL;8p>X?(`&45J@eRt{ z1d3GOFo0;vbCW)5K+dqg_NLX(dpW9>;}G*$>NUPa9P1cJ^-qV?-KX?@!1cM!$1fxhM`siTb`eqFN6;Lk{_Ux&~y_zmhed8Eh=!F#T4 zg$`zGbT$5QLx$&JjFl83VrLljhM%=BV%6K?D?Z^r{zs$gNfeC;Z^N&n>BJsFk>wbr zg-~IG8VAE#prpSpw%^OAzKrcwX}#p;u(J)2+=EeZHP-Z)d$`a!Vb@h%ICS18T8B!P|3OL6+F{JJ$9~f!oU(i@+M$H zM6lzI%oLd{P9V3e3(;1~gc&~#X~CCt2yw)$jL7(iVJf_F@#TI&tj;GG5L@xm5lR+R zz@f^v3xx}|*(gne@L3`|gojvP7B491VR*Oi?MEOHXJk@WBb;nZI0s{ zZq{RQv-2$*qjW(bCYPslNg^|-gX zG{mPaB-Sr#J#Kc<-+hLbSY2~XLoUa5|&Ax=VV zEdF2v0U(LSgZ4MbSVimwYpCLP_OoOgM<-*RC~-DBmle_Up9qOCx9j=FkvI$SLK zfXZzx!`LScB#&2~OMoU{W>3`6;^GJwTB%J}BzS+Ic=?;rt>TkuqJOSGIJ*Edsso!|TGkd>oU+z$cz zWa>0#HuS?EnU&aZbjPf2_VVZOthMx(^|5SORf8EY$XsjJ62^3eSYClV+wu*Zor%y( z=2~)yx6=>3HVT4qj3_GXUgwk&X~;J?^VL|}09+&>azKZ1Kj5?R{;?LY+!bu>G=$(s z=yUn^96g;>L<|4U+=jt9sWvx7=T+l)WxqaS!OjAW^!zT6e8 zO7up+{JkD>zJ09>p-yOmdZnUhA(|hLx&M=}15MyN3>Pgm67{A*PHu&i9Vx~hZr)8F zn7kdOxi(bHk+;Dcp^xL5?Jb55JUO)zS&fiwVLV*I{$=H)1@Vi*QJ-u{9t7_~GANgLa`h|y zJlv0xZFbS06zyfeu0RZN}&Pd^|kGUQN2`Y@-<}nuF&F$1zO6} zx$^=BWPC|r^57hDBfjgE1a05@K83hw_YNt$sf_?zXv1pxNro z&@xF^Qg%KvAU$c-tB=CaLf!pz!T@PV0>k>dBpk*#c**$9x;Gxs;QTDZgBSYc*{>}9 z%$?^oq8V&d$TKitJ$-WaA}p|w_ervGc3n8Q_wzn16vK9BLXAvc^Hhd}7;nlF9jP~| z6z|M0>e#Kc*Fdb_6>M-KpgcbDt0@Uq{5<_M*`X<2pG)I`&zk2D@`kKbLPt}ix}oli zN7D9SGs@{P^YB9|khTX<4G&VnY;fB7=1zo#jI$YhPtYkI0G|t7d>a zmcPviGT5X!rE5t>}#Ni+~R_a zFDDDtSALBLSI$tSwArWCfzm@!Q8rdW-sZ#flNHswLWzZqT8>wKMYCB!c_36~v`*kv|! zGgfab_>&6Ri^MBI6dUx?3N9$jo=D#_l{UuHxKhIFkj{7sA$ETQQQSGEhPp9|EIzy} z%hkT}*92(liysYZHmX!VuyDzTudlniiKcTlNw0KY)WRnHN#xRTxK&RiLd_QZQ9LB# zUq$xrpUNChF}+C5Ro2vHiHeP~)_F^20+`;=q`NtG@x{nHi~jxbX3#siJS)QT-LD?* zKwV<<5^hK^c2OipfiESFpCzNPy3bAiejFmhuMPL3_@IO4N|-%EibkWVD>HA_-nCQC zwSK2u^4lEBv!9BvQGjGT$#vKD&!$YqMP3tgJebtW4W{Fzd3`%NeO{h|E(XIAm{O(S zc60nnCxR*O_A_H=`wx`SULtS0EX0$dC-}~tl$8H%dl&(ZFQ;pPA7G|rBYglJAm_jb zuuV+OSblA1>b<~OM9kaG)bjL}H2SHvL{GY{UF}Wr=kN#QkL>H_x&T1PzAiLQi`FmM zd>A}`f3%{+WmZ>yGl`$z=DOn{SbL1Adb4g4L{VJ7X?XW}%?pV9&#AWKH6azN*fEO_ zx=B26pUR~_zZlT$m)x~d7}7DL1Jk3sYasZYVeCU1@@aV+WC(2(A5=C~HhByAm1dEN z=93jGl{ZJuURQ64R}gMo+zKvp66s2;$6ASAGaE)HTIz6;wlhfQ>0&|Qmxu}J1_ z-8rWm_s|NbZ1r6RNU)p})rq7Vj~p(sct>xo9Xb5$y`57eoF$0P^bK3y3pt`Mb<6hE zjziapr7p^!LZ29w)*V^*RvIQTY7IlLm%sxpE$9^~#u%d&&c)X66ukXo(PwOxW}h;! zsGClMKllEu+dgh_;XVr$kzyb*mwGzIO8epdW{H zCU!$5ya+qrg|NO9G3D6_nYNp@{^5*HUo!jU+uKx#4pNbxiNC;vG(5HLcVfU7F{ierusWvIA60Ecb$qy(ebftUngs_#)Y5)62uHLuxri zwN+7Mnh^t%mJ;twCmSx8_h5++(rh05lv;C7qVbTl}1*9qv!>Dsv)DGWE* zn){4%Z>-Oz!`If`)M??4ZJI!lWl;6{-S7hRPEDX_l zxw@3!Q@-ZOopyP9O;y!koQ$HX4c+GKe2ft|N~v11-vvdhW|vdontfBnagj`v=<%4@5`=FOJp4?9z3%iifS;Zc{QIgdvwv;dZdwWBxr8nL`ywve_wORWzg@bw|9wTF z;a3mya}Xr4s((Hq+R!83n99@>%55eT-Tau+4=|xBZrsOT7eeW1CC&z;JC0ITRHl45 zA7Tk-wd5VPx0ctdXd-tzbni|&s4x#=Sxg|7W2P+#DAToXS}Ji zg8g5()PKCE1Utb043eOoUwu{V@Zq9stdw#R@w7!`XXDhV7x%s;jx0cFQLrfzheJsb}OI$jd6rp=0;mX5wO6o!^z zXtZSqwNV=3w*FLTc4TQITA8bk={Ch9Ahpu>Nlz0bS9AXso&g zkY(Neh60(`o()aav`02E2q4`c=dgYjEA`5gDIsC(Np9snK} zE>M=yOZhkd@ss>X-TI~@jddVMuKgG1b*rw91E@F@8`f7}cmXMJ6F>)8I2<>|a!$hue_2y* zkT#;Rq(&f4o%572z8eF?l_G`SCL6;S!AS&kH^ ziVf;`9aser1rzh&IO&sV42#Qhtb7O%lIz$9YI@o!g7$iAUc)c3C&DcfIA9e3z&o7I z0UOaJZJzi_*KxZ1V+ zlV??@<(v!eL$y10VoLDuao*OimbjM*a2AL75au2oN3fcjEa$769Hz>*m`zuMUS4hI z-5akd1Zam&@{$t4vp-E9zuHd!Xx;N;w<}e^WDyxAk*-uj1oN12k}qbD=*h>?1^f<+ zL%eZjw?QybsB7AfE!v_FN;N?U~M=fa6m_>bFrv3q6Pz+>$5X-A^rg8xUU>462#)W*-agv2~Lw-0%!&6JI!*c8u2YS&w;*Z4THKvso-N{nqr z--j}PH-4J~(=WUfsWbkOmiu}BTl9+jeYY10BqO0C%E@EkoMwZs`!ei3<0bm!8()Gy zCBIYT-)*7!q99&f=bmxTxC<+bGPWv4I2>UK4ge@OGN)XX6IYtWPP4Yvo?%r{PA+}J z`lE>-tP*=+yKSUuBQ-*~?pqo{m65B524e~Ljb8;Bd*7t-TnJtWz@dW=xQ=Mg_;@BQ z?N1$UtPl8=h($cDx7M5ZXl>?eiOi&QZ@+7QG`i_kDf;HSas{`O;+9c0ckn!r5j4vj zo`X<5a;&89V|kiNwlklAmq{8$GC|p2JO^CzL=#hFYrYDGozh>z(NzQXx3p z=G4>QMWM8v?bJ*Xd1+*cqC`slQEzUp2I-jg4uxj`lsgph(@xn?PEmgi^? zB3ggelxF19k)0_UzU$M!0~i$WwHNNbfULJhx%(xNP}Yg}pyItcTAf@ai$^?@^iZ>q zx-heyg5}MjwP?Ekm(vA+u0;*_oiYmA7i`ZXU z^kyIsCSN`UBysZS;Hpv-?OkA@#Tv0kW8u~!Lbn|>PRqYptqA9jh$J<e?17O*h% z_R8t0%gYz1;-g_Fe5ZJF*Ridk5p_7G--AX%Og- zP88~+x#pN9f@q-Toi8hsZJT%*w@r`9r*j#UBcqcB{jYfPNh537(5idA+u&p`rspKU zVEB|mG|EWf88_)lhD6@h^D-BjhFLW?iH{klpKKqAr|f^slArqMKzsU>3=;LW@LbuE z7}oG;vToyRiq@Janp<{@0b|9H=X7A2z!$qMfY;SiFMVS)PtYN4p)>KLYOTN=sbuqiB_u}!@M}91`Cj^Jmqm>lm z(nHDyPtMA@4s}QOE!A=>TcX`bEgt^9vlbDfIu!XT%va=ih0J_#(+E)QG-vnq*M=f? zd#1xJ%uWPEj{r}$SlO|1Lmg{?t|hiaDjH>}&gN`T3mUBrxsPVnc*_2CPC~UZ!&#OmL~y8|I{z6=H!gc& z8pv?N^W{LWG5ew7yWO4*FxKiq8frCq+|Rg;UHSxa4|%hQ2f^gmgtwGF9WPJaWtXc#p1s4h4$jgUzc8Z~k>A1he#Mk88n3Xx zH}Jkald}L(q?ftQ2}@35gy#3`dq?7a0mp-)E{1Z1CD`4ZN$x!vp_oSAc{)qgb6si5 z=;A6XUerPFUxPSfdvTWSW|WdBTeadQ%RXS5H{SpD{I=UD;4~eRz@jpRe2R9JK@vL8 z;+^-(A_@>nYjtc&LHR3TSm-}Wi!_$f1N#NK@qPj0S=v&2w3 zD11A}Zum1NE$g!5hg3XCDn8eoB}Y_eu9FamG=K3W$Kxdr_o{h?FPHlia?kV5TuGA#Ry18rCUH+ z#(;E*6h(*-1JYaQ354*@O<-W&Z@stHTkpO6W%_Tb(oz<*92Vs{Ca!&u)Yx>uHFX}-wPmw z3L*~*KdV;<`Uk^PWBnNOmvAdl`!i3zBN0yd0Bi8 z^^%WVxZD@_3oUV4MzI&VhwRCerj2Q7;~8{c=iJ_nIXGWk`-NZDc9T-aY1oyI=JRGU&t%p|~Web0&+ z@Y`ew*eebVej94>9UU1q3sMtlO(|LRDZb8m;c#Go+fNP+tGTN8rcp-ycZQ$%-y9vr zDMsy1%CPg{1Q08Rzx=!4Tv@4aSt!UBR%EdvOUp|@)zm{FDr`VC;vHg=thk!twWFGyM}jFB^dzb{f9xWV5&!bbb2L=V{~Ziu_Wy~ zt=-YWw}Ol+qR5$M15;WY#baW}`OpPjGC-}Q=*V4C!`lzFXdR1Pv378oGHalvKB-yc z>;m_+qNr737kEkDjs-jP2i0dvHS@kE+3(4hoHTB}EFBfCII+Cp7L0HjI1_*RN^MlhBREutk}C9sDE}$o&y#8c)-C zc&tADdApGw&o^gPQp4q=Uvpaocy#urbZJLVy@u@ya6CkG@06?IX%^YjsdurTb67eg zthM2uAueX-ygJ)$L9OC!ck7PlZ)gc?xx3$8tUot4hC^CS?G;1Z@|{Sopmh9_-J+1@ za;}3S`Kd>kAl}!ZC*G&gL6HZ9>qAF;AN@S4&Jh>CgTG}mZrA*ZEPw7t$9Rjc_N}J` zOgj+4A}H>?#^MQ`N$Ky&uGu>2g_-PoP%cl%gMQ=pgzxuUtTMaFeVC_V-bJ6H+_eLB zCtNzKy@_(EYYJo|KTRuE>r>ndF=cKKSiQ&Jyg-o*QjB_Y@7?OrZn?=N zz5KyvhQcc}QLizeq*PE(jTIIwPLQ@eaXci=bEG2SWP!yoRibJ+ z4DZLnHZDJ|+^mCgeHx#6(>M9tpvGh6=bheNC0dF(M(muDEQykBRMYB*dDm8r&PZz* zb?b@$(zz5X)P))QU2S=HTd3<}yjFz7j?M@uiUo02-i);AGtTwl_m5_cZ4#YVQ^jfJ zWwVpX4z{mHYnE8Bhg6;Fjy2f@{Hh!_Z4&3)em>lzwobcJ-QEu>5h*ay8{o*uemTmIUNLuNILSK5=8nyoY}27+C*siX zUwVd5?UY0Ql`A(u-K(Cx;B;U|ogOQZFW1yONba>r!tA>T(IrI;(GIF95-G`DZ>$HH@O!(h>Ap4W?Q zGH&e4vAHLp{m1Mn_2s4a?(eSf2YFT^tS~5b?e8j@iX3M=Pn&T09y|O{^7PVdakI1V zK0r)Tzb(w)!xyBh8Ykc=8sqQlv-r|y&*WW~TTUjG^Tyu$(Q@G@k)L@&Y%c`hYE>6S z2MUcTJAV5nF=vTr`Awst|0=1$z5Q+IZuG7_GB1Ny;szbkpw|;vB`yj5AV2-Fg44pF zy3youd_N1e@+S4|{bViIMShf&Dn?b)UdNpabqpn}N{344K!_QjvO6_e=d7 zpfOHy7a8mw;XjKv+IWd)S+io7{`z!3biLG(Md!BHM`dFVyNiNI$v{KBfj2VyqwQV3 zUPuT{_sqPZ_+8%TQge)qNG-?MH~E#|HQwFM(=VbvFX>j0pFb0(JpJI7Nj0tVZ>xNB z;R9yMNjzq%UG#0bWZ!8DD!#DL{Df0Rx@M(NVL?^R%;e|ap=kp0Uw!@Ii`R4>h;S)I z@hAA3S9iyHw0adNW~+C-?RsH&lWv9*D`jgsrET@L>gH;f{l^iTuemi20SP6D`pP!v z;p|lWgoTdvmoHi|IkMQcuCuEuM1Jjv*=I@Stsh1%rV3RGL=Gk5WxpRHbvFk*4#Qe0 z$eK7VUq#0)Ul|YgyJbzB$Dmq}o|A&!egrSa?o1|HI+1-xLSo?2j>Y*ftW%uVLc*E6 zFu{W)nXhj_8N{3B#@_t4$lovRYN>jVtmn*Vw!W>N?n)T zUH1)FY!_m7F8c_;daXw)mb|{mR}b91&&1jZp`4^sCp5(7KB^lwp33o@nZ#HD>^gIyHcU{FD0+L!qs3jBx1j?=%p%i1!y_%r+!5+ z!Q_#hem=?xhh*?78^i3BY^{09gMa? z@`aIZ*(}q6G4necqz^`dgxU7LH@1oQZxDAFCtc)X=_#!ujU-;_V!+W-YsHB>q z7AB1PX`@s*cuLbdI0~a4ViYISop`i*qU{c#!G}tDE9{NAMc^-MFS5ldIgWP~+jZ6# z19%m}L~ku=pmCI9r@r1|m-%5O@Qxq>AkXSBG~0|1lwO)(BY6YCfl!1wj7QjK^n((p zw5|P|u2=ig;9_V9P!S(m!_SC#X}wo9*39~mG*MqioY!vQ@(!zRx33S5P^ggAS|B&e z{+=k0;D2!Cq$C7>%`O9Uby~PbT`b?vGoal6+mIbuDkxL*N~z~VOCts^2r%TJX|S~h z23EE!`+S!hd(~!l*n2=P#4SV^U}YrE?4%sqhZmhW zAwzG&s;+Q~(+c^OGw1}@!s_E^ScX?cf4u{Me&yyE)QDn|$Z%U_^}fC^gJ}NjOZzt# z3s`D^J9Pm!16cD%c+h~%&j7B0#9^V5-9WHX7H`V(jaxP#{!9A95r+hh((bmZPW}6H z@Jao5d&>6!gVp^V--eT^V50>G_jTJRD{Mxv0( zbHX7-*s=QF^3m-9zzEMRxJi?ut-Acqnlif#kwE%d5<&nTtr0y2Rh*ZwD-kZr|6`^J z4Y-+s(?Pi5qn8I91eESBeRMt=NlIrKx&?jukB zIjA9I*Wblw+q5R~?*R3O20VOl=zg)pvpYD}TTXL4|9He!jkZaBZ063kCZ4e^{iAW_ zh6v_wGj$5eshCdf40yU4rOsWYL$?mY?+hH7$2@Klb?@VGH46|80LuebU(nZuVNE#pLu23 zSfyP)Zw>86Oz9pMqa~h@pkH(R*wZiWjP>;+4>4Gl%=KU?xBB?fx$*_m)A14M87EYb z9EG&0{DrNiv-nzSu&b5WxZB1Oa$TkelgqSWft9)|TT9I25j(cDu~<@wPF!CNTwL!Y zdR%Lq7sW?hKW0l83dEG{wx;uDGq{q>Yj!9BrP##~PYMCMMlht837(kq&kJX927{tRVr>nZ(;{R6QNwikKU9;BcJ(@YIJmEP`u@k;G>#B11(W=*cS=d@uTdIF=tZ zb(9S@|IT@2+W>Wx0=x&6shM7yJ#q@2JxlCx;CN3qA!49$I1ii&7!?Z88qUTQxzTj8 z&*6D=9beZBr+p%%reP6k(cA948t8e(UNcTll?k@&muj+JD)S}G&zH%rG4k(>RTyhb zaz1bgKht4?PGE)2FZ~#Uc+Q*K!5c~1{rCaFZY z*us3;x|aX+-86P?Y12G|ASjOyRgRaKj6p1N3I;%sCm~kr;v(=<6{rC+=no)1J_%?w zdL$ilvBXS4^e+FQBeoA47z1ZVqs%Tk$~?QH`<=eNMqCd`qy3bzI3^TGn?;d!1IeE- z0-e5$6`8JConEN`o_1H_&DpT^5ZRDM4u$OjF2pDw$kYV*auMWVz!s$zB#~>qfh7qU zTvP=l$+?xQ?~>H-4!0%&%d;tDtqhFXbO-#G<2-2KXYC?y@N>DlX6daG5b7>RW6^6k zh?2UlGnN%Mjve_nH(Ea45A}jD0{I5z5On^DZJ>V*J=9g zcY(%AFr;0NC$9D{j}JDA4RGF{oJqKR8?X*g+4^5OH69pN0JcXhWZ>c~DcSrLk;yh6mQ^3 zT|Z=4?}WH#G|1uq<2R7|Akxm3aid*8Wmn+Tp-v8)grDC*wZwt>#W0EJJjFoZ|C{NS z0@CyGb&6P;2L*UL*z%#(`+e+NVekpy9hBh%r>TL8PwlzL#ikjE1`1h4%$5Vg%*i`I z-lq;L=S@}4ne+PY-tckI1~_iXn>XseYpAiI1Ens}Glled*!o@hS3vBD9c;bpgyDm- ze84=(5_;(|cjbG|iC5u*T-H?YNK7I-ESMwK#)S&(Lpk3eiNfpBDo0o=c5+=0)1pgT zM&PL);ww3($@`^*?ORenF~FiDB!S}t$`2=SMLuqD>e)1%YAI5;;R*`AGzFA63>Ge| zxvC97IkP;2zl2a-7Romjn!edbYe<87>4lvRKfG(^Q!2=Mg1J>~^F^^_Kj36u3u#Fm zCyG74%!3AnF6@v!&NBb`M*-3#gD%_02{NfZ*AK)Sv6Kb`1Dn3I{H?8Lp-rJ0DwH{J znCdOJU!E14@x7!@U64Pu6$AQ zp8lQ?Vr#L2vRYcKW5EK)XYJ-*VD;A!rv6-D63U8~Ka(|~52>hU`|U@==(92Q3$``S zWbpvinX`I!V+i;cyV+2xDFYECAeIIjpmR44A_FEaqe#|~{St@Wfn(O}43N%w04gs! zmA5wK>M|2W4+mgleygUEI*cK(U^C)@IM4N>6HX#r4saX5y;A{jd*}^?omPI*Q=?&I zla3z$oK z25O$M;Kj-yV+%}^bQ5pZ+qEWkIvxk;pqo1pzUTOUbbJ)ve^~Wajex3?DFXOLB+<=U z4&^F7QU57yai6*Zq`V%+HAs>|e;hG0)y zT9di;S=)V0JKA58^<Kw572 zM1+@3B)^*^ZB#lE?ohf<=QTpw(dcnZ$#T*{NC};?3QTJDt}oiIUrxpM_mvbPcD2sO+^Ae~iz7KUn%3Y)iuq7GO#Y~cfiB+nrl^xms%>iB4BkxiaBe6VQt zZrbVTmNTlX3tZCdP=LjOJp~^g%Eq zEJ)oR{9LB|X_+^|njY^RB~EK41W5!!qEXDFNURqttTI4Y5TV2EHKt9OQ^x`;`oF)d z212Q{o~ijCqaB!t^ec2Ic1-LaP}{b@;fky%QW{<09goSH(CZrr7iJtB-=PxbKi-*ZE+A&b3-wxSd#{f@L4Q>K;Gqn6;Ob+| z_JhLL3$Xzp6ivjY9Kvozud!G80n9i3w4P%yP^0)K^;~5C5w`R*D&196Jyb%^C_^)a z2{qjNliVVu+XDho(Zj|pJ%K-cAC2f+?BvN@0fTZRFrs6tRW6r6~(5(@Y zC$?5l>{Z#dcwKD+7PYw3;FeOb+tzSscG`kqE200%wtozpE8;T|?R>dxyzO@2@O_qB zLF?A3lCek_N5e22KHPx$C_~})#*~A2dE2SzAL!8*7{xr=F;1cz4g8N<7}^*lINZTD zbK^h-njF*Le8+=E8HJ|I>o6r$7!fJsc)EGXty86V#{^|v?sentinel= + coordination_url = zookeeper:///hosts=&hosts= + +.. _`tooz`: http://docs.openstack.org/developer/tooz/ +.. _`tooz backends`: http://docs.openstack.org/developer/tooz/drivers.html + + +Ceph driver implementation details +---------------------------------- +Each batch of measurements to process is stored into one rados object. +These objects are named `measures___` -Upgrading -========= -In order to upgrade from a previous version of Gnocchi, you need to make sure -that your indexer and storage are properly upgraded. Run the following: +Also a special empty object called `measures` has the list of measures to +process stored in its xattr attributes. -1. Stop the old version of Gnocchi API server and metric daemon +Because of the asynchronous nature of how we store measurements in Gnocchi, +`gnocchi-metricd` need to known the list of objects that wait to be processed: -2. Install the new version of Gnocchi +- Listing rados objects for this is not a solution since it takes too much + time. +- Using a custom format into a rados object, would force us to use a lock + each time we would change it. -2. Run `gnocchi-upgrade` - This can take several hours depending on the size of your index and - storage. +Instead, the xattrs of one empty rados object are used. No lock is needed to +add/remove a xattr. -3. Start the new Gnocchi API server and metric daemon +But depending of the filesystem used by ceph OSDs, this xattrs can have +limitation in term of numbers and size if Ceph if not correctly configured. +See `Ceph extended attributes documentation`_ for more details. -Minimal interruption upgrade -============================ -Gnocchi supports online upgrade of its storage system, which avoids -interrupting Gnocchi for a long time. In order to upgrade from previous -versions, you need to follow the following steps: +Then, each Carbonara generated file is stored in *one* rados object. +So each metric has one rados object per aggregation in the archive policy. -1. Stop the old Gnocchi API server and metric daemon +Because of this, the OSDs filling can look less balanced comparing of the RBD. +Some other objects will be big and some others small depending on how archive +policies are set up. -2. Run `gnocchi-upgrade --skip-storage` with the new version of Gnocchi. - This can take several minutes depending on the size of your index. +We can imagine an unrealisting case like 1 point per second during one year, +the rados object size will be ~384MB. -3. Start the new Gnocchi API server. +And a more realistic scenario, a 4MB rados object (like rbd uses) could +come from: -4. Run `gnocchi-upgrade` with the new version of Gnocchi - This can take several hours depending on the size of your storage. +- 20 days with 1 point every seconds +- 100 days with 1 point every 5 seconds -5. Start the new Gnocchi metric daemon. +So, in realistic scenarios, the direct relation between the archive policy and +the size of the rados objects created by Gnocchi is not a problem. -This will upgrade the indexer and storage in two passes. While a new version of -Gnocchi API cannot run with an old version of the indexer, it can run with an -old version of its storage back-end. For performance reasons, _metricd_ needs -to run an upgraded storage back-end, otherwise it would spend too much time -checking for upgrade pattern on each run. +.. _`Ceph extended attributes documentation`: http://docs.ceph.com/docs/master/rados/configuration/filestore-config-ref/#extended-attributes diff --git a/doc/source/resource_types.rst b/doc/source/resource_types.rst index 0409572d..fba2723d 100644 --- a/doc/source/resource_types.rst +++ b/doc/source/resource_types.rst @@ -3,7 +3,7 @@ ================ Gnocchi offers different resource types to manage your resources. Each resource -type has strongly typed attributes. All resource types are subtypes of the +type has its specific typed attributes. All resource types are subtype of the `generic` type. Immutable attributes are attributes that cannot be modified after the resource diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 07d5212c..a80eed31 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -5,19 +5,10 @@ Authentication ============== -By default, no authentication is configured in Gnocchi. You need to provides -these headers in your HTTP requests: - -* X-User-Id -* X-Project-Id - -The `X-Roles` header can also be provided in order to match role based ACL -specified in `policy.json`. - -If you enable the OpenStack Keystone middleware, you only need to authenticate -against Keystone and provide `X-Auth-Token` header with a valid token for each -request sent to Gnocchi. The headers mentionned above will be filled -automatically based on your Keystone authorizations. +By default, the `api.middleware` configuration option is set to use the +Keystone middleware. Therefore you must authenticate using Keystone to use the +API and provide an `X-Auth-Token` header with a valid token for each request +sent to Gnocchi. Metrics ======= @@ -80,20 +71,6 @@ method. It is possible to request for any other method by specifying the The list of aggregation method available is: *mean*, *sum*, *last*, *max*, *min*, *std*, *median*, *first*, *count* and *Npct* (with 0 < N < 100). -It's possible to provide the `granularity` argument to specify the granularity -to retrieve, rather than all the granularities available: - -{{ scenarios['get-measures-granularity']['doc'] }} - - -Measures batching -================= -It is also possible to batch measures sending, i.e. send several measures for -different metrics in a simple call: - -{{ scenarios['post-measures-batch']['doc'] }} - - Archive Policy ============== @@ -372,7 +349,7 @@ It can also be done by providing the list of metrics to aggregate: .. Note:: This aggregation is done against the aggregates built and updated for - a metric when new measurements are posted in Gnocchi. Therefore, the aggregate + a metric when new measurements are posted in Gnocchi. Therefore the aggregate of this already aggregated data may not have sense for certain kind of aggregation method (e.g. stdev). @@ -383,22 +360,12 @@ the one described in `Searching for resources`_. {{ scenarios['get-across-metrics-measures-by-attributes-lookup']['doc'] }} -Also aggregation across metrics have different behavior depending -on if boundary are set ('start' and 'stop') and if 'needed_overlap' is set. - -If boundaries are not set, Gnocchi makes the aggregation only with points -at timestamp present in all timeseries. - -But when boundaries are set, Gnocchi expects that we have certain -percent of timestamps common between timeseries, this percent is controlled -by needed_overlap (defaulted with 100%). If this percent is not reached an -error is returned. Capabilities ============ -The list aggregation methods that can be used in Gnocchi are extendable and -can differ between deployments. It is possible to get the supported list of +The list aggregation methods that can be used in Gnocchi is extendable and +can differ between deployement. It is possible to get the supported list of aggregation methods from the API server: {{ scenarios['get-capabilities']['doc'] }} diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index b97548ca..03d5c41b 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -77,15 +77,6 @@ "archive_policy_name": "low" } -- name: create-metric-2 - request: | - POST /v1/metric HTTP/1.1 - Content-Type: application/json - - { - "archive_policy_name": "low" - } - - name: create-archive-policy-rule request: | POST /v1/archive_policy_rule HTTP/1.1 @@ -144,36 +135,6 @@ } ] -- name: post-measures-batch - request: | - POST /v1/batch/measures HTTP/1.1 - Content-Type: application/json - - { - "{{ scenarios['create-metric']['response'].json['id'] }}": - [ - { - "timestamp": "2014-10-06T14:34:12", - "value": 12 - }, - { - "timestamp": "2014-10-06T14:34:20", - "value": 2 - } - ], - "{{ scenarios['create-metric-2']['response'].json['id'] }}": - [ - { - "timestamp": "2014-10-06T16:12:12", - "value": 3 - }, - { - "timestamp": "2014-10-06T18:14:52", - "value": 4 - } - ] - } - - name: search-value-in-metric request: | POST /v1/search/metric?metric_id={{ scenarios['create-metric']['response'].json['id'] }} HTTP/1.1 @@ -190,9 +151,6 @@ - name: get-measures-max request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?aggregation=max HTTP/1.1 -- name: get-measures-granularity - request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?granularity=1 HTTP/1.1 - - name: create-resource-generic request: | POST /v1/resource/generic HTTP/1.1 diff --git a/doc/source/running.rst b/doc/source/running.rst deleted file mode 100644 index 253b6f9e..00000000 --- a/doc/source/running.rst +++ /dev/null @@ -1,71 +0,0 @@ -=============== -Running Gnocchi -=============== - -To run Gnocchi, simply run the HTTP server and metric daemon: - -:: - - gnocchi-api - gnocchi-metricd - - -Running As A WSGI Application -============================= - -It's possible – and strongly advised – to run Gnocchi through a WSGI -service such as `mod_wsgi`_ or any other WSGI application. The file -`gnocchi/rest/app.wsgi` provided with Gnocchi allows you to enable Gnocchi as -a WSGI application. -For other WSGI setup you can refer to the `pecan deployment`_ documentation. - -.. _`pecan deployment`: http://pecan.readthedocs.org/en/latest/deployment.html#deployment - - -How to scale out the Gnocchi HTTP REST API tier -=============================================== - -The Gnocchi API tier runs using WSGI. This means it can be run using `Apache -httpd`_ and `mod_wsgi`_, or other HTTP daemon such as `uwsgi`_. You should -configure the number of process and threads according to the number of CPU you -have, usually around 1.5 × number of CPU. If one server is not enough, you can -spawn any number of new API server to scale Gnocchi out, even on different -machines. - -.. _Apache httpd: http://httpd.apache.org/ -.. _mod_wsgi: https://modwsgi.readthedocs.org/ -.. _uwsgi: https://uwsgi-docs.readthedocs.org/ - - -How many metricd workers do we need to run -========================================== - -By default, `gnocchi-metricd` daemon spans all your CPU power in order to -maximize CPU utilisation when computing metric aggregation. You can use the -`gnocchi status` command to query the HTTP API and get the cluster status for -metric processing. It’ll show you the number of metric to process, known as the -processing backlog for `gnocchi-metricd`. As long as this backlog is not -continuously increasing, that means that `gnocchi-metricd` is able to cope with -the amount of metric that are being sent. In case this number of measure to -process is continuously increasing, you will need to (maybe temporarily) -increase the number of `gnocchi-metricd` daemons. You can run any number of -metricd daemon on any number of servers. - -How to monitor Gnocchi -====================== - -The `/v1/status` endpoint of the HTTP API returns various information, such as -the number of measures to process (measures backlog), which you can easily -monitor (see `How many metricd workers do we need to run`_). Making sure that -the HTTP server and `gnocchi-metricd` daemon are running and are not writing -anything alarming in their logs is a sign of good health of the overall system. - -How to backup and restore Gnocchi -================================= - -In order to be able to recover from an unfortunate event, you need to backup -both the index and the storage. That means creating a database dump (PostgreSQL -or MySQL) and doing snapshots or copy of your data storage (Ceph, Swift or your -file system). The procedure to restore is no more complicated than initial -deployment: restore your index and storage backups, reinstall Gnocchi if -necessary, and restart it. diff --git a/doc/source/statsd.rst b/doc/source/statsd.rst index 88405b8a..7e39968a 100644 --- a/doc/source/statsd.rst +++ b/doc/source/statsd.rst @@ -5,7 +5,7 @@ Statsd Daemon Usage What Is It? =========== `Statsd`_ is a network daemon that listens for statistics sent over the network -using TCP or UDP, and then sends aggregates to another backend. +using TCP or UDP and that send aggregates to another backend. Gnocchi provides a daemon that is compatible with the statsd protocol and can listen to metrics sent over the network, named `gnocchi-statsd`. @@ -16,28 +16,16 @@ How It Works? ============= In order to enable statsd support in Gnocchi, you need to configure the `[statsd]` option group in the configuration file. You need to provide a -resource ID that will be used as the main generic resource where all the -metrics will be attached, a user and project id that will be associated with -the resource and metrics, and an archive policy name that will be used to +resource id that will be used as a the main generic resource where all the +metrics will be attached, a user and project id that will be used to create the +resource and metrics for, and an archive policy name that will be used to create the metrics. All the metrics will be created dynamically as the metrics are sent to -`gnocchi-statsd`, and attached with the provided name to the resource ID you -configured. +`gnocchi-statsd`, and attached with the provided name to the resource id you +provided. -The `gnocchi-statsd` may be scaled, but trade-offs have to been made due to the -nature of the statsd protocol. That means that if you use metrics of type -`counter`_ or sampling (`c` in the protocol), you should always send those -metrics to the same daemon – or not use them at all. The other supported -types (`timing`_ and `gauges`_) does not suffer this limitation, but be aware -that you might have more measures that expected if you send the same metric to -different `gnocchi-statsd` server, as their cache nor their flush delay are -synchronized. - -.. _`counter`: https://github.com/etsy/statsd/blob/master/docs/metric_types.md#counting -.. _`timing`: https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing -.. _`gauges`: https://github.com/etsy/statsd/blob/master/docs/metric_types.md#gauges .. note :: - The statsd protocol support is incomplete: relative gauge values with +/- + The statsd protocol support is incomplete: relative gauges values with +/- and sets are not supported yet. diff --git a/etc/gnocchi/api-paste.ini b/etc/gnocchi/api-paste.ini index e82ccb2b..7b6a9e1b 100644 --- a/etc/gnocchi/api-paste.ini +++ b/etc/gnocchi/api-paste.ini @@ -1,6 +1,6 @@ -# Add keystone_authtoken in the pipeline if you want to use keystone authentication +# Remove keystone_authtoken from the pipeline if you don't want to use keystone authentication [pipeline:main] -pipeline = gnocchi +pipeline = keystone_authtoken gnocchi [app:gnocchi] paste.app_factory = gnocchi.rest.app:app_factory diff --git a/gnocchi-config-generator.conf b/etc/gnocchi/gnocchi-config-generator.conf similarity index 100% rename from gnocchi-config-generator.conf rename to etc/gnocchi/gnocchi-config-generator.conf diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py index 9b460efb..5a5a27eb 100644 --- a/gnocchi/archive_policy.py +++ b/gnocchi/archive_policy.py @@ -137,12 +137,13 @@ class ArchivePolicy(object): OPTS = [ - cfg.ListOpt( + cfg.Opt( 'default_aggregation_methods', - item_type=types.String( - choices=ArchivePolicy.VALID_AGGREGATION_METHODS), default=['mean', 'min', 'max', 'sum', 'std', 'median', 'count', '95pct'], + type=types.List( + item_type=types.String( + choices=ArchivePolicy.VALID_AGGREGATION_METHODS)), help='Default aggregation methods to use in created archive policies'), ] @@ -168,8 +169,6 @@ class ArchivePolicyItem(dict): "At least two of granularity/points/timespan " "must be provided") granularity = round(timespan / float(points)) - else: - granularity = float(granularity) if points is None: if timespan is None: @@ -178,7 +177,6 @@ class ArchivePolicyItem(dict): points = int(timespan / granularity) self['timespan'] = granularity * points else: - points = int(points) self['timespan'] = granularity * points self['points'] = points diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 46c262e9..b6a4a48c 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -2,6 +2,8 @@ # # Copyright © 2014-2015 eNovance # +# Authors: Julien Danjou +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -17,7 +19,6 @@ import functools import logging -import numbers import operator import re @@ -25,11 +26,12 @@ import msgpack import pandas import six -from gnocchi import utils - LOG = logging.getLogger(__name__) +AGGREGATION_METHODS = set(('mean', 'sum', 'last', 'max', 'min', + 'std', 'median', 'first', 'count')) + class NoDeloreanAvailable(Exception): """Error raised when trying to insert a value that is too old.""" @@ -80,18 +82,13 @@ class TimeSerie(SerializableMixin): last in the group when the TimeSerie is created or extended. """ - def __init__(self, ts=None): - if ts is None: - ts = pandas.Series() - self.ts = ts.groupby(level=0).last().sort_index() - - @classmethod - def from_data(cls, timestamps=None, values=None): - return cls(pandas.Series(values, timestamps)) + def __init__(self, timestamps=None, values=None): + self.ts = pandas.Series(values, timestamps).groupby( + level=0).last().sort_index() @classmethod def from_tuples(cls, timestamps_values): - return cls.from_data(*zip(*timestamps_values)) + return cls(*zip(*timestamps_values)) def __eq__(self, other): return (isinstance(other, TimeSerie) @@ -127,8 +124,7 @@ class TimeSerie(SerializableMixin): :param d: The dict. :returns: A TimeSerie object """ - return cls.from_data( - *cls._timestamps_and_values_from_dict(d['values'])) + return cls(*cls._timestamps_and_values_from_dict(d['values'])) def to_dict(self): return { @@ -140,22 +136,17 @@ class TimeSerie(SerializableMixin): @staticmethod def _serialize_time_period(value): if value: - return value.nanos / 10e8 + return six.text_type(value.n) + value.rule_code @staticmethod def _round_timestamp(ts, freq): return pandas.Timestamp( - (pandas.Timestamp(ts).value // freq) * freq) - - @staticmethod - def _to_offset(value): - if isinstance(value, numbers.Real): - return pandas.tseries.offsets.Nano(value * 10e8) - return pandas.tseries.frequencies.to_offset(value) + (ts.value // freq.delta.value) * freq.delta.value) class BoundTimeSerie(TimeSerie): - def __init__(self, ts=None, block_size=None, back_window=0): + def __init__(self, timestamps=None, values=None, + block_size=None, back_window=0): """A time serie that is limited in size. Used to represent the full-resolution buffer of incoming raw @@ -171,17 +162,14 @@ class BoundTimeSerie(TimeSerie): used. """ - super(BoundTimeSerie, self).__init__(ts) - self.block_size = self._to_offset(block_size) + super(BoundTimeSerie, self).__init__(timestamps, values) + if isinstance(block_size, (float, six.integer_types)): + self.block_size = pandas.tseries.offsets.Nano(block_size * 10e8) + else: + self.block_size = pandas.tseries.frequencies.to_offset(block_size) self.back_window = back_window self._truncate() - @classmethod - def from_data(cls, timestamps=None, values=None, - block_size=None, back_window=0): - return cls(pandas.Series(values, timestamps), - block_size=block_size, back_window=back_window) - def __eq__(self, other): return (isinstance(other, BoundTimeSerie) and super(BoundTimeSerie, self).__eq__(other) @@ -222,9 +210,9 @@ class BoundTimeSerie(TimeSerie): :returns: A TimeSerie object """ timestamps, values = cls._timestamps_and_values_from_dict(d['values']) - return cls.from_data(timestamps, values, - block_size=d.get('block_size'), - back_window=d.get('back_window')) + return cls(timestamps, values, + block_size=d.get('block_size'), + back_window=d.get('back_window')) def to_dict(self): basic = super(BoundTimeSerie, self).to_dict() @@ -235,8 +223,7 @@ class BoundTimeSerie(TimeSerie): return basic def _first_block_timestamp(self): - rounded = self._round_timestamp(self.ts.index[-1], - self.block_size.delta.value) + rounded = self._round_timestamp(self.ts.index[-1], self.block_size) return rounded - (self.block_size * self.back_window) def _truncate(self): @@ -252,9 +239,8 @@ class AggregatedTimeSerie(TimeSerie): _AGG_METHOD_PCT_RE = re.compile(r"([1-9][0-9]?)pct") - POINTS_PER_SPLIT = 14400 - - def __init__(self, ts=None, max_size=None, + def __init__(self, timestamps=None, values=None, + max_size=None, sampling=None, aggregation_method='mean'): """A time serie that is downsampled. @@ -262,7 +248,7 @@ class AggregatedTimeSerie(TimeSerie): granularity/aggregation-function pair stored for a metric. """ - super(AggregatedTimeSerie, self).__init__(ts) + super(AggregatedTimeSerie, self).__init__(timestamps, values) m = self._AGG_METHOD_PCT_RE.match(aggregation_method) @@ -275,62 +261,10 @@ class AggregatedTimeSerie(TimeSerie): raise UnknownAggregationMethod(aggregation_method) self.aggregation_method_func_name = aggregation_method - if sampling is None: - self._sampling = None - else: - self._sampling = self._to_offset(sampling) + self.sampling = pandas.tseries.frequencies.to_offset(sampling) self.max_size = max_size self.aggregation_method = aggregation_method - @classmethod - def from_data(cls, timestamps=None, values=None, - max_size=None, sampling=None, aggregation_method='mean'): - return cls(pandas.Series(values, timestamps), - max_size=max_size, sampling=sampling, - aggregation_method=aggregation_method) - - @classmethod - def get_split_key_datetime(cls, timestamp, sampling, - chunk_size=POINTS_PER_SPLIT): - return cls._round_timestamp(timestamp, - freq=sampling * chunk_size * 10e8) - - @staticmethod - def _split_key_to_string(timestamp): - return str(utils.datetime_to_unix(timestamp)) - - @classmethod - def get_split_key(cls, timestamp, sampling, chunk_size=POINTS_PER_SPLIT): - return cls._split_key_to_string( - cls.get_split_key_datetime( - timestamp, sampling, chunk_size)) - - def split(self, chunk_size=POINTS_PER_SPLIT): - groupby = self.ts.groupby(functools.partial( - self.get_split_key_datetime, sampling=self.sampling, - chunk_size=chunk_size)) - keys = sorted(groupby.groups.keys()) - for i, ts in enumerate(keys): - if i + 1 == len(keys): - yield self._split_key_to_string(ts), TimeSerie(self.ts[ts:]) - elif i + 1 < len(keys): - t = self.ts[ts:keys[i + 1]] - del t[t.index[-1]] - yield self._split_key_to_string(ts), TimeSerie(t) - - @classmethod - def from_timeseries(cls, timeseries, sampling=None, max_size=None, - aggregation_method='mean'): - ts = pandas.Series() - for t in timeseries: - ts = ts.combine_first(t.ts) - return cls(ts, sampling=sampling, max_size=max_size, - aggregation_method=aggregation_method) - - @property - def sampling(self): - return self._sampling.nanos / 10e8 - def __eq__(self, other): return (isinstance(other, AggregatedTimeSerie) and super(AggregatedTimeSerie, self).__eq__(other) @@ -338,15 +272,6 @@ class AggregatedTimeSerie(TimeSerie): and self.sampling == other.sampling and self.aggregation_method == other.aggregation_method) - def __repr__(self): - return "<%s 0x%x sampling=%fs max_size=%s agg_method=%s>" % ( - self.__class__.__name__, - id(self), - self.sampling, - self.max_size, - self.aggregation_method, - ) - @classmethod def from_dict(cls, d): """Build a time series from a dict. @@ -357,18 +282,17 @@ class AggregatedTimeSerie(TimeSerie): :returns: A TimeSerie object """ timestamps, values = cls._timestamps_and_values_from_dict(d['values']) - return cls.from_data( - timestamps, values, - max_size=d.get('max_size'), - sampling=d.get('sampling'), - aggregation_method=d.get('aggregation_method', 'mean')) + return cls(timestamps, values, + max_size=d.get('max_size'), + sampling=d.get('sampling'), + aggregation_method=d.get('aggregation_method', 'mean')) def to_dict(self): d = super(AggregatedTimeSerie, self).to_dict() d.update({ 'aggregation_method': self.aggregation_method, 'max_size': self.max_size, - 'sampling': self._serialize_time_period(self._sampling), + 'sampling': self._serialize_time_period(self.sampling), }) return d @@ -379,12 +303,12 @@ class AggregatedTimeSerie(TimeSerie): self.ts = self.ts.dropna()[-self.max_size:] def _resample(self, after): - if self._sampling: + if self.sampling: # Group by the sampling, and then apply the aggregation method on # the points after `after' groupedby = self.ts[after:].groupby( functools.partial(self._round_timestamp, - freq=self.sampling * 10e8)) + freq=self.sampling)) agg_func = getattr(groupedby, self.aggregation_method_func_name) if self.aggregation_method_func_name == 'quantile': aggregated = agg_func(self.q) @@ -394,28 +318,6 @@ class AggregatedTimeSerie(TimeSerie): # that is before `after' self.ts = aggregated.combine_first(self.ts[:after][:-1]) - def fetch(self, from_timestamp=None, to_timestamp=None): - """Fetch aggregated time value. - - Returns a sorted list of tuples (timestamp, granularity, value). - """ - # Round timestamp to our granularity so we're sure that if e.g. 17:02 - # is requested and we have points for 17:00 and 17:05 in a 5min - # granularity, we do return the 17:00 point and not nothing - if from_timestamp is None: - from_ = None - else: - from_ = self._round_timestamp(from_timestamp, self.sampling * 10e8) - points = self[from_:to_timestamp] - try: - # Do not include stop timestamp - del points[to_timestamp] - except KeyError: - pass - return [(timestamp, self.sampling, value) - for timestamp, value - in six.iteritems(points)] - def update(self, ts): if ts.ts.empty: return @@ -439,6 +341,80 @@ class AggregatedTimeSerie(TimeSerie): self._resample(first_timestamp) self._truncate() + +class TimeSerieArchive(SerializableMixin): + + def __init__(self, agg_timeseries): + """A raw data buffer and a collection of downsampled timeseries. + + Used to represent the set of AggregatedTimeSeries for the range of + granularities supported for a metric (for a particular aggregation + function). + + """ + self.agg_timeseries = sorted(agg_timeseries, + key=operator.attrgetter("sampling")) + + @property + def max_block_size(self): + return max(agg.sampling for agg in self.agg_timeseries) + + @classmethod + def from_definitions(cls, definitions, aggregation_method='mean'): + """Create a new collection of archived time series. + + :param definition: A list of tuple (sampling, max_size) + :param aggregation_method: Aggregation function to use. + """ + # Limit the main timeserie to a timespan mapping + return cls( + [AggregatedTimeSerie( + max_size=size, + sampling=pandas.tseries.offsets.Nano(sampling * 10e8), + aggregation_method=aggregation_method) + for sampling, size in definitions] + ) + + def fetch(self, from_timestamp=None, to_timestamp=None, + timeserie_filter=None): + """Fetch aggregated time value. + + Returns a sorted list of tuples (timestamp, granularity, value). + """ + result = [] + end_timestamp = to_timestamp + for ts in reversed(self.agg_timeseries): + if timeserie_filter and not timeserie_filter(ts): + continue + granularity = ts.sampling.nanos / 1000000000.0 + points = ts[from_timestamp:to_timestamp] + try: + # Do not include stop timestamp + del points[end_timestamp] + except KeyError: + pass + result.extend([(timestamp, granularity, value) + for timestamp, value + in six.iteritems(points)]) + return result + + def __eq__(self, other): + return (isinstance(other, TimeSerieArchive) + and self.agg_timeseries == other.agg_timeseries) + + def update(self, timeserie): + for agg in self.agg_timeseries: + agg.update(timeserie) + + def to_dict(self): + return { + "archives": [ts.to_dict() for ts in self.agg_timeseries], + } + + @classmethod + def from_dict(cls, d): + return cls([AggregatedTimeSerie.from_dict(a) for a in d['archives']]) + @staticmethod def aggregated(timeseries, from_timestamp=None, to_timestamp=None, aggregation='mean', needed_percent_of_overlap=100.0): @@ -450,8 +426,16 @@ class AggregatedTimeSerie(TimeSerie): if not timeseries: return [] + granularities = [set(ts.sampling for ts in timeserie.agg_timeseries) + for timeserie in timeseries] + granularities = granularities[0].intersection(*granularities[1:]) + if len(granularities) == 0: + raise UnAggregableTimeseries('No granularity match') + for timeserie in timeseries: - timeserie_raw = timeserie.fetch(from_timestamp, to_timestamp) + timeserie_raw = timeserie.fetch( + from_timestamp, to_timestamp, + lambda ts: ts.sampling in granularities) if timeserie_raw: dataframe = pandas.DataFrame(timeserie_raw, columns=columns) @@ -461,37 +445,20 @@ class AggregatedTimeSerie(TimeSerie): if not dataframes: return [] - number_of_distinct_datasource = len(timeseries) / len( - set(ts.sampling for ts in timeseries) - ) - grouped = pandas.concat(dataframes).groupby(level=index) left_boundary_ts = None right_boundary_ts = None maybe_next_timestamp_is_left_boundary = False - - left_holes = 0 - right_holes = 0 holes = 0 for (timestamp, __), group in grouped: - if group.count()['value'] != number_of_distinct_datasource: + if group.count()['value'] != len(timeseries): maybe_next_timestamp_is_left_boundary = True - if left_boundary_ts is not None: - right_holes += 1 - else: - left_holes += 1 + holes += 1 elif maybe_next_timestamp_is_left_boundary: left_boundary_ts = timestamp maybe_next_timestamp_is_left_boundary = False else: right_boundary_ts = timestamp - holes += right_holes - right_holes = 0 - - if to_timestamp is not None: - holes += left_holes - if from_timestamp is not None: - holes += right_holes if to_timestamp is not None or from_timestamp is not None: maximum = len(grouped) @@ -502,7 +469,7 @@ class AggregatedTimeSerie(TimeSerie): 'Less than %f%% of datapoints overlap in this ' 'timespan (%.2f%%)' % (needed_percent_of_overlap, percent_of_overlap)) - if (needed_percent_of_overlap > 0 and + elif (needed_percent_of_overlap > 0 and (right_boundary_ts == left_boundary_ts or (right_boundary_ts is None and maybe_next_timestamp_is_left_boundary))): @@ -519,7 +486,7 @@ class AggregatedTimeSerie(TimeSerie): # NOTE(sileht): this call the aggregation method on already # aggregated values, for some kind of aggregation this can - # result can looks weird, but this is the best we can do + # result can looks wierd, but this is the best we can do # because we don't have anymore the raw datapoints in those case. # FIXME(sileht): so should we bailout is case of stddev, percentile # and median? @@ -539,98 +506,112 @@ class AggregatedTimeSerie(TimeSerie): for __, timestamp, granularity, value in points] -class TimeSerieArchive(SerializableMixin): +import argparse +import datetime - def __init__(self, agg_timeseries): - """A raw data buffer and a collection of downsampled timeseries. +from oslo_utils import timeutils +import prettytable - Used to represent the set of AggregatedTimeSeries for the range of - granularities supported for a metric (for a particular aggregation - function). - """ - self.agg_timeseries = sorted(agg_timeseries, - key=operator.attrgetter("sampling")) +def _definition(value): + result = value.split(",") + if len(result) != 2: + raise ValueError("Format is: seconds,points") + return int(result[0]), int(result[1]) - @classmethod - def from_definitions(cls, definitions, aggregation_method='mean'): - """Create a new collection of archived time series. - :param definition: A list of tuple (sampling, max_size) - :param aggregation_method: Aggregation function to use. - """ - # Limit the main timeserie to a timespan mapping - return cls( - [AggregatedTimeSerie( - max_size=size, - sampling=sampling, - aggregation_method=aggregation_method) - for sampling, size in definitions] - ) +def create_archive_file(): + parser = argparse.ArgumentParser( + description="Create a Carbonara file", + ) + parser.add_argument("--aggregation-method", + type=six.text_type, + default="mean", + choices=AGGREGATION_METHODS, + help="aggregation method to use") + parser.add_argument("--back-window", + type=int, + default=0, + help="back window to keep") + parser.add_argument("definition", + type=_definition, + nargs='+', + help="archive definition as granularity,points") + parser.add_argument("filename", + nargs=1, + type=argparse.FileType(mode="wb"), + help="File name to create") + args = parser.parse_args() + ts = TimeSerieArchive.from_definitions(args.definition, + args.aggregation_method) + args.filename[0].write(ts.serialize()) - def fetch(self, from_timestamp=None, to_timestamp=None, - timeserie_filter=None): - """Fetch aggregated time value. - Returns a sorted list of tuples (timestamp, granularity, value). - """ - result = [] - end_timestamp = to_timestamp - for ts in reversed(self.agg_timeseries): - if timeserie_filter and not timeserie_filter(ts): - continue - points = ts[from_timestamp:to_timestamp] - try: - # Do not include stop timestamp - del points[end_timestamp] - except KeyError: - pass - result.extend([(timestamp, ts.sampling, value) - for timestamp, value - in six.iteritems(points)]) - return result +def dump_archive_file(): + parser = argparse.ArgumentParser( + description="Dump a Carbonara file", + ) + parser.add_argument("filename", + nargs=1, + type=argparse.FileType(mode="rb"), + help="File name to read") + args = parser.parse_args() - def update(self, timeserie): - for agg in self.agg_timeseries: - agg.update(timeserie) + ts = TimeSerieArchive.unserialize_from_file(args.filename[0]) - def to_dict(self): - return { - "archives": [ts.to_dict() for ts in self.agg_timeseries], - } + print("Aggregation method: %s" + % (ts.agg_timeseries[0].aggregation_method)) - def __eq__(self, other): - return (isinstance(other, TimeSerieArchive) - and self.agg_timeseries == other.agg_timeseries) + print("Number of aggregated timeseries: %d" % len(ts.agg_timeseries)) - @classmethod - def from_dict(cls, d): - return cls([AggregatedTimeSerie.from_dict(a) for a in d['archives']]) + for idx, agg_ts in enumerate(ts.agg_timeseries): + sampling = agg_ts.sampling.nanos / 1000000000 + timespan = datetime.timedelta(seconds=sampling * agg_ts.max_size) + print("\nAggregated timeserie #%d: %ds × %d = %s" + % (idx + 1, sampling, agg_ts.max_size, timespan)) + print("Number of measures: %d" % len(agg_ts)) + table = prettytable.PrettyTable(("Timestamp", "Value")) + for k, v in agg_ts.ts.iteritems(): + table.add_row((k, v)) + print(table.get_string()) -import argparse -import datetime +def _timestamp_value(value): + result = value.split(",") + if len(result) != 2: + raise ValueError("Format is: timestamp,value") + try: + timestamp = float(result[0]) + except (ValueError, TypeError): + timestamp = timeutils.normalize_time( + timeutils.parse_isotime(result[0])) + else: + timestamp = datetime.datetime.utcfromtimestamp(timestamp) -import prettytable + return timestamp, float(result[1]) -def dump_archive_file(): +def update_archive_file(): parser = argparse.ArgumentParser( - description="Dump a Carbonara aggregated file", + description="Insert values in a Carbonara file", ) + parser.add_argument("timestamp,value", + nargs='+', + type=_timestamp_value, + help="Timestamp and value to set") parser.add_argument("filename", nargs=1, - type=argparse.FileType(mode="rb"), - help="File name to read") + type=argparse.FileType(mode="rb+"), + help="File name to update") args = parser.parse_args() - ts = AggregatedTimeSerie.unserialize_from_file(args.filename[0]) + ts = TimeSerieArchive.unserialize_from_file(args.filename[0]) + + try: + ts.update(TimeSerie.from_tuples(getattr(args, 'timestamp,value'))) + except Exception as e: + print("E: %s: %s" % (e.__class__.__name__, e)) + return 1 - print("Aggregation method: %s" % (ts.aggregation_method)) - timespan = datetime.timedelta(seconds=ts.sampling * ts.max_size) - print("Timespan: %ds × %d = %s" % (ts.sampling, ts.max_size, timespan)) - print("Number of measures: %d" % len(ts)) - table = prettytable.PrettyTable(("Timestamp", "Value")) - for k, v in ts.ts.iteritems(): - table.add_row((k, v)) - print(table.get_string()) + args.filename[0].seek(0) + ts.serialize_to_file(args.filename[0]) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index f7626274..74f77fb0 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -1,5 +1,4 @@ # Copyright (c) 2013 Mirantis Inc. -# Copyright (c) 2015 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,11 +18,11 @@ import signal import sys import time -from oslo_config import cfg from oslo_utils import timeutils import retrying from gnocchi import indexer +from gnocchi.indexer import sqlalchemy as sql_db from gnocchi.rest import app from gnocchi import service from gnocchi import statsd as statsd_service @@ -33,24 +32,11 @@ from gnocchi import storage LOG = logging.getLogger(__name__) -def upgrade(): - conf = cfg.ConfigOpts() - conf.register_cli_opts([ - cfg.BoolOpt("skip-index", default=False, - help="Skip index upgrade."), - cfg.BoolOpt("skip-storage", default=False, - help="Skip storage upgrade.") - ]) - conf = service.prepare_service(conf=conf) - if not conf.skip_index: - index = indexer.get_driver(conf) - index.connect() - LOG.info("Upgrading indexer %s" % index) - index.upgrade() - if not conf.skip_storage: - s = storage.get_driver(conf) - LOG.info("Upgrading storage %s" % s) - s.upgrade(index) +def storage_dbsync(): + conf = service.prepare_service() + index = sql_db.SQLAlchemyIndexer(conf) + index.connect() + index.upgrade() def api(): @@ -138,7 +124,7 @@ def metricd(): _metricd_cleanup(workers) sys.exit(0) except Exception: - LOG.warning("exiting", exc_info=True) + LOG.warn("exiting", exc_info=True) _metricd_cleanup(workers) sys.exit(1) diff --git a/gnocchi/genconfig.py b/gnocchi/genconfig.py deleted file mode 100644 index cb08b7c3..00000000 --- a/gnocchi/genconfig.py +++ /dev/null @@ -1,21 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_config import generator - - -def sdist_prehook(sdist): - generator.main(['--config-file', - 'gnocchi-config-generator.conf']) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index b138a525..107f1a93 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -15,11 +15,10 @@ # under the License. import fnmatch import hashlib -import os -import iso8601 from oslo_config import cfg from oslo_utils import netutils +import pytz import six from stevedore import driver @@ -28,8 +27,7 @@ from gnocchi import exceptions OPTS = [ cfg.StrOpt('url', secret=True, - required=True, - default=os.getenv("GNOCCHI_INDEXER_URL"), + default="null://", help='Indexer driver to use'), ] @@ -68,7 +66,7 @@ class Resource(object): def lastmodified(self): # less precise revision start for Last-Modified http header return self.revision_start.replace(microsecond=0, - tzinfo=iso8601.iso8601.UTC) + tzinfo=pytz.UTC) def get_driver(conf): @@ -83,11 +81,11 @@ class IndexerException(Exception): """Base class for all exceptions raised by an indexer.""" -class NoSuchResourceType(IndexerException): +class UnknownResourceType(IndexerException): """Error raised when the resource type is unknown.""" def __init__(self, type): - super(NoSuchResourceType, self).__init__( - "Resource type %s does not exist" % str(type)) + super(UnknownResourceType, self).__init__( + "Resource type %s is unknown" % type) self.type = type @@ -290,12 +288,11 @@ class IndexerDriver(object): raise exceptions.NotImplementedError @staticmethod - def get_metrics(uuids, active_only=True, with_resource=False): + def get_metrics(uuids, active_only=True): """Get metrics informations from the indexer. :param uuids: A list of metric UUID. :param active_only: Whether to only get active metrics - :param with_resource: Include resource details """ raise exceptions.NotImplementedError diff --git a/gnocchi/indexer/alembic/env.py b/gnocchi/indexer/alembic/env.py index cf636cfa..dc577200 100644 --- a/gnocchi/indexer/alembic/env.py +++ b/gnocchi/indexer/alembic/env.py @@ -71,16 +71,16 @@ def run_migrations_online(): conf = config.conf indexer = sqlalchemy.SQLAlchemyIndexer(conf) indexer.connect() - with indexer.facade.writer_connection() as connectable: + connectable = indexer.engine_facade.get_engine() - with connectable.connect() as connection: - context.configure( - connection=connection, - target_metadata=target_metadata - ) + with connectable.connect() as connection: + context.configure( + connection=connection, + target_metadata=target_metadata + ) - with context.begin_transaction(): - context.run_migrations() + with context.begin_transaction(): + context.run_migrations() indexer.disconnect() diff --git a/gnocchi/indexer/alembic/versions/1f21cbdd6bc2_allow_volume_display_name_to_be_null.py b/gnocchi/indexer/alembic/versions/1f21cbdd6bc2_allow_volume_display_name_to_be_null.py deleted file mode 100644 index e2e48d9b..00000000 --- a/gnocchi/indexer/alembic/versions/1f21cbdd6bc2_allow_volume_display_name_to_be_null.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""allow volume display name to be null - -Revision ID: 1f21cbdd6bc2 -Revises: 469b308577a9 -Create Date: 2015-12-08 02:12:20.273880 - -""" - -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = '1f21cbdd6bc2' -down_revision = '469b308577a9' -branch_labels = None -depends_on = None - - -def upgrade(): - op.alter_column('volume', 'display_name', - existing_type=sa.String(length=255), - nullable=True) - op.alter_column('volume_history', 'display_name', - existing_type=sa.String(length=255), - nullable=True) diff --git a/gnocchi/indexer/alembic/versions/469b308577a9_allow_image_ref_to_be_null.py b/gnocchi/indexer/alembic/versions/469b308577a9_allow_image_ref_to_be_null.py deleted file mode 100644 index 5ac8dfcf..00000000 --- a/gnocchi/indexer/alembic/versions/469b308577a9_allow_image_ref_to_be_null.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""allow image_ref to be null - -Revision ID: 469b308577a9 -Revises: 39b7d449d46a -Create Date: 2015-11-29 00:23:39.998256 - -""" - -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = '469b308577a9' -down_revision = '39b7d449d46a' -branch_labels = None -depends_on = None - - -def upgrade(): - op.alter_column('instance', 'image_ref', - existing_type=sa.String(length=255), - nullable=True) - op.alter_column('instance_history', 'image_ref', - existing_type=sa.String(length=255), - nullable=True) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 514c69af..705e4d9d 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -17,13 +17,12 @@ from __future__ import absolute_import import itertools import operator import os.path -import threading import uuid import oslo_db.api from oslo_db import exception -from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import models +from oslo_db.sqlalchemy import session from oslo_db.sqlalchemy import utils as oslo_db_utils import six import sqlalchemy @@ -64,44 +63,6 @@ def get_resource_mappers(ext): 'history': resource_history_ext} -class PerInstanceFacade(object): - def __init__(self, conf): - self.trans = enginefacade.transaction_context() - self.trans.configure( - **dict(conf.database.items()) - ) - self._context = threading.local() - - def independent_writer(self): - return self.trans.independent.writer.using(self._context) - - def independent_reader(self): - return self.trans.independent.reader.using(self._context) - - def writer_connection(self): - return self.trans.connection.writer.using(self._context) - - def reader_connection(self): - return self.trans.connection.reader.using(self._context) - - def writer(self): - return self.trans.writer.using(self._context) - - def reader(self): - return self.trans.reader.using(self._context) - - def get_engine(self): - # TODO(mbayer): add get_engine() to enginefacade - if not self.trans._factory._started: - self.trans._factory._start() - return self.trans._factory._writer_engine - - def dispose(self): - # TODO(mbayer): add dispose() to enginefacade - if self.trans._factory._started: - self.trans._factory._writer_engine.dispose() - - class SQLAlchemyIndexer(indexer.IndexerDriver): resources = extension.ExtensionManager('gnocchi.indexer.resources') @@ -111,10 +72,12 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): def __init__(self, conf): conf.set_override("connection", conf.indexer.url, "database") self.conf = conf - self.facade = PerInstanceFacade(conf) + + def connect(self): + self.engine_facade = session.EngineFacade.from_config(self.conf) def disconnect(self): - self.facade.dispose() + self.engine_facade.get_engine().dispose() def _get_alembic_config(self): from alembic import config @@ -125,9 +88,6 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): self.conf.database.connection) return cfg - def get_engine(self): - return self.facade.get_engine() - def upgrade(self, nocreate=False): from alembic import command from alembic import migration @@ -137,51 +97,56 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): if nocreate: command.upgrade(cfg, "head") else: - with self.facade.writer_connection() as connection: - ctxt = migration.MigrationContext.configure(connection) - current_version = ctxt.get_current_revision() - if current_version is None: - Base.metadata.create_all(connection) - command.stamp(cfg, "head") - else: - command.upgrade(cfg, "head") + engine = self.engine_facade.get_engine() + ctxt = migration.MigrationContext.configure(engine.connect()) + current_version = ctxt.get_current_revision() + if current_version is None: + Base.metadata.create_all(engine) + command.stamp(cfg, "head") + else: + command.upgrade(cfg, "head") def _resource_type_to_class(self, resource_type, purpose="resource"): if resource_type not in self._RESOURCE_CLASS_MAPPER: - raise indexer.NoSuchResourceType(resource_type) + raise indexer.UnknownResourceType(resource_type) return self._RESOURCE_CLASS_MAPPER[resource_type][purpose] def list_archive_policies(self): - with self.facade.independent_reader() as session: - return list(session.query(ArchivePolicy).all()) + session = self.engine_facade.get_session() + aps = list(session.query(ArchivePolicy).all()) + session.expunge_all() + return aps def get_archive_policy(self, name): - with self.facade.independent_reader() as session: - return session.query(ArchivePolicy).get(name) + session = self.engine_facade.get_session() + ap = session.query(ArchivePolicy).get(name) + session.expunge_all() + return ap def delete_archive_policy(self, name): - with self.facade.writer() as session: - try: - if session.query(ArchivePolicy).filter( - ArchivePolicy.name == name).delete() == 0: - raise indexer.NoSuchArchivePolicy(name) - except exception.DBReferenceError as e: - if (e.constraint == - 'fk_metric_archive_policy_name_archive_policy_name'): - raise indexer.ArchivePolicyInUse(name) - raise - - def get_metrics(self, uuids, active_only=True, with_resource=False): + session = self.engine_facade.get_session() + try: + if session.query(ArchivePolicy).filter( + ArchivePolicy.name == name).delete() == 0: + raise indexer.NoSuchArchivePolicy(name) + except exception.DBReferenceError as e: + if (e.constraint == + 'fk_metric_archive_policy_name_archive_policy_name'): + raise indexer.ArchivePolicyInUse(name) + raise + + def get_metrics(self, uuids, active_only=True): if not uuids: return [] - with self.facade.independent_reader() as session: - query = session.query(Metric).filter(Metric.id.in_(uuids)) - if active_only: - query = query.filter(Metric.status == 'active') - if with_resource: - query = query.options(sqlalchemy.orm.joinedload('resource')) + session = self.engine_facade.get_session() + query = session.query(Metric).filter(Metric.id.in_(uuids)).options( + sqlalchemy.orm.joinedload('resource')) + if active_only: + query = query.filter(Metric.status == 'active') - return list(query.all()) + metrics = list(query.all()) + session.expunge_all() + return metrics def create_archive_policy(self, archive_policy): ap = ArchivePolicy( @@ -190,27 +155,33 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): definition=archive_policy.definition, aggregation_methods=list(archive_policy.aggregation_methods), ) + session = self.engine_facade.get_session() + session.add(ap) try: - with self.facade.writer() as session: - session.add(ap) + session.flush() except exception.DBDuplicateEntry: raise indexer.ArchivePolicyAlreadyExists(archive_policy.name) + session.expunge_all() return ap def list_archive_policy_rules(self): - with self.facade.independent_reader() as session: - return session.query(ArchivePolicyRule).order_by( - ArchivePolicyRule.metric_pattern.desc()).all() + session = self.engine_facade.get_session() + aps = session.query(ArchivePolicyRule).order_by( + ArchivePolicyRule.metric_pattern.desc()).all() + session.expunge_all() + return aps def get_archive_policy_rule(self, name): - with self.facade.independent_reader() as session: - return session.query(ArchivePolicyRule).get(name) + session = self.engine_facade.get_session() + ap = session.query(ArchivePolicyRule).get(name) + session.expunge_all() + return ap def delete_archive_policy_rule(self, name): - with self.facade.writer() as session: - if session.query(ArchivePolicyRule).filter( - ArchivePolicyRule.name == name).delete() == 0: - raise indexer.NoSuchArchivePolicyRule(name) + session = self.engine_facade.get_session() + if session.query(ArchivePolicyRule).filter( + ArchivePolicyRule.name == name).delete() == 0: + raise indexer.NoSuchArchivePolicyRule(name) def create_archive_policy_rule(self, name, metric_pattern, archive_policy_name): @@ -219,11 +190,13 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): archive_policy_name=archive_policy_name, metric_pattern=metric_pattern ) + session = self.engine_facade.get_session() + session.add(apr) try: - with self.facade.writer() as session: - session.add(apr) + session.flush() except exception.DBDuplicateEntry: raise indexer.ArchivePolicyRuleAlreadyExists(name) + session.expunge_all() return apr def create_metric(self, id, created_by_user_id, created_by_project_id, @@ -235,31 +208,34 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): archive_policy_name=archive_policy_name, name=name, resource_id=resource_id) + session = self.engine_facade.get_session() + session.add(m) try: - with self.facade.writer() as session: - session.add(m) + session.flush() except exception.DBReferenceError as e: if (e.constraint == 'fk_metric_archive_policy_name_archive_policy_name'): raise indexer.NoSuchArchivePolicy(archive_policy_name) raise + session.expunge_all() return m def list_metrics(self, user_id=None, project_id=None, details=False, status='active', **kwargs): - with self.facade.independent_reader() as session: - q = session.query(Metric).filter( - Metric.status == status).order_by(Metric.id) - if user_id is not None: - q = q.filter(Metric.created_by_user_id == user_id) - if project_id is not None: - q = q.filter(Metric.created_by_project_id == project_id) - for attr in kwargs: - q = q.filter(getattr(Metric, attr) == kwargs[attr]) - if details: - q = q.options(sqlalchemy.orm.joinedload('resource')) - - return list(q.all()) + session = self.engine_facade.get_session() + q = session.query(Metric).filter(Metric.status == status) + if user_id is not None: + q = q.filter(Metric.created_by_user_id == user_id) + if project_id is not None: + q = q.filter(Metric.created_by_project_id == project_id) + for attr in kwargs: + q = q.filter(getattr(Metric, attr) == kwargs[attr]) + if details: + q = q.options(sqlalchemy.orm.joinedload('resource')) + + metrics = list(q.all()) + session.expunge_all() + return metrics def create_resource(self, resource_type, id, created_by_user_id, created_by_project_id, @@ -270,19 +246,19 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): if (started_at is not None and ended_at is not None and started_at > ended_at): - raise ValueError( - "Start timestamp cannot be after end timestamp") - with self.facade.writer() as session: - r = resource_cls( - id=id, - type=resource_type, - created_by_user_id=created_by_user_id, - created_by_project_id=created_by_project_id, - user_id=user_id, - project_id=project_id, - started_at=started_at, - ended_at=ended_at, - **kwargs) + raise ValueError("Start timestamp cannot be after end timestamp") + r = resource_cls( + id=id, + type=resource_type, + created_by_user_id=created_by_user_id, + created_by_project_id=created_by_project_id, + user_id=user_id, + project_id=project_id, + started_at=started_at, + ended_at=ended_at, + **kwargs) + session = self.engine_facade.get_session() + with session.begin(): session.add(r) try: session.flush() @@ -295,10 +271,11 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): if metrics is not None: self._set_metrics_for_resource(session, r, metrics) - # NOTE(jd) Force load of metrics :) - r.metrics + # NOTE(jd) Force load of metrics :) + r.metrics - return r + session.expunge_all() + return r @oslo_db.api.retry_on_deadlock def update_resource(self, resource_type, @@ -309,8 +286,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): resource_cls = self._resource_type_to_class(resource_type) resource_history_cls = self._resource_type_to_class(resource_type, "history") - with self.facade.writer() as session: - try: + session = self.engine_facade.get_session() + try: + with session.begin(): # NOTE(sileht): We use FOR UPDATE that is not galera friendly, # but they are no other way to cleanly patch a resource and # store the history that safe when two concurrent calls are @@ -335,7 +313,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): # Update the resource if ended_at is not _marker: # NOTE(jd) MySQL does not honor checks. I hate it. - engine = session.connection() + engine = self.engine_facade.get_engine() if engine.dialect.name == "mysql": if r.started_at is not None and ended_at is not None: if r.started_at > ended_at: @@ -358,18 +336,17 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): Metric.status == 'active').update( {"resource_id": None}) self._set_metrics_for_resource(session, r, metrics) + except exception.DBConstraintError as e: + if e.check_name == "ck_started_before_ended": + raise indexer.ResourceValueError( + resource_type, "ended_at", ended_at) + raise - session.flush() - except exception.DBConstraintError as e: - if e.check_name == "ck_started_before_ended": - raise indexer.ResourceValueError( - resource_type, "ended_at", ended_at) - raise - - # NOTE(jd) Force load of metrics – do it outside the session! - r.metrics + # NOTE(jd) Force load of metrics – do it outside the session! + r.metrics - return r + session.expunge_all() + return r @staticmethod def _set_metrics_for_resource(session, r, metrics): @@ -410,7 +387,8 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): session.expire(r, ['metrics']) def delete_resource(self, resource_id): - with self.facade.writer() as session: + session = self.engine_facade.get_session() + with session.begin(): # We are going to delete the resource; the on delete will set the # resource_id of the attached metrics to NULL, we just have to mark # their status as 'delete' @@ -418,18 +396,21 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): Metric.resource_id == resource_id).update( {"status": "delete"}) if session.query(Resource).filter( - Resource.id == resource_id).delete() == 0: + Resource.id == resource_id).options( + sqlalchemy.orm.joinedload('metrics')).delete() == 0: raise indexer.NoSuchResource(resource_id) def get_resource(self, resource_type, resource_id, with_metrics=False): resource_cls = self._resource_type_to_class(resource_type) - with self.facade.independent_reader() as session: - q = session.query( - resource_cls).filter( - resource_cls.id == resource_id) - if with_metrics: - q = q.options(sqlalchemy.orm.joinedload('metrics')) - return q.first() + session = self.engine_facade.get_session() + q = session.query( + resource_cls).filter( + resource_cls.id == resource_id) + if with_metrics: + q = q.options(sqlalchemy.orm.joinedload('metrics')) + r = q.first() + session.expunge_all() + return r def _get_history_result_mapper(self, resource_type): resource_cls = self._resource_type_to_class(resource_type) @@ -479,95 +460,97 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): sorts=None): sorts = sorts or [] + session = self.engine_facade.get_session() + if history: target_cls = self._get_history_result_mapper(resource_type) else: target_cls = self._resource_type_to_class(resource_type) - with self.facade.independent_reader() as session: - q = session.query(target_cls) - - if attribute_filter: - engine = session.connection() - try: - f = QueryTransformer.build_filter(engine.dialect.name, - target_cls, - attribute_filter) - except indexer.QueryAttributeError as e: - # NOTE(jd) The QueryAttributeError does not know about - # resource_type, so convert it - raise indexer.ResourceAttributeError(resource_type, - e.attribute) - - q = q.filter(f) - - # transform the api-wg representation to the oslo.db one - sort_keys = [] - sort_dirs = [] - for sort in sorts: - sort_key, __, sort_dir = sort.partition(":") - sort_keys.append(sort_key.strip()) - sort_dirs.append(sort_dir or 'asc') - - # paginate_query require at list one uniq column - if 'id' not in sort_keys: - sort_keys.append('id') - sort_dirs.append('asc') - - if marker: - resource_marker = self.get_resource(resource_type, marker) - if resource_marker is None: - raise indexer.InvalidPagination( - "Invalid marker: `%s'" % marker) - else: - resource_marker = None + q = session.query(target_cls) + if attribute_filter: + engine = self.engine_facade.get_engine() try: - q = oslo_db_utils.paginate_query(q, target_cls, limit=limit, - sort_keys=sort_keys, - marker=resource_marker, - sort_dirs=sort_dirs) - except (exception.InvalidSortKey, ValueError) as e: - raise indexer.InvalidPagination(e) - - # Always include metrics - q = q.options(sqlalchemy.orm.joinedload("metrics")) - all_resources = q.all() - - if details: - grouped_by_type = itertools.groupby( - all_resources, lambda r: (r.revision != -1, r.type)) - all_resources = [] - for (is_history, type), resources in grouped_by_type: - if type == 'generic': - # No need for a second query - all_resources.extend(resources) + f = QueryTransformer.build_filter(engine.dialect.name, + target_cls, + attribute_filter) + except indexer.QueryAttributeError as e: + # NOTE(jd) The QueryAttributeError does not know about + # resource_type, so convert it + raise indexer.ResourceAttributeError(resource_type, + e.attribute) + + q = q.filter(f) + + # transform the api-wg representation to the oslo.db one + sort_keys = [] + sort_dirs = [] + for sort in sorts: + sort_key, __, sort_dir = sort.partition(":") + sort_keys.append(sort_key.strip()) + sort_dirs.append(sort_dir or 'asc') + + # paginate_query require at list one uniq column + if 'id' not in sort_keys: + sort_keys.append('id') + sort_dirs.append('asc') + + if marker: + resource_marker = self.get_resource(resource_type, marker) + if resource_marker is None: + raise indexer.InvalidPagination( + "Invalid marker: `%s'" % marker) + else: + resource_marker = None + + try: + q = oslo_db_utils.paginate_query(q, target_cls, limit=limit, + sort_keys=sort_keys, + marker=resource_marker, + sort_dirs=sort_dirs) + except (exception.InvalidSortKey, ValueError) as e: + raise indexer.InvalidPagination(e) + + # Always include metrics + q = q.options(sqlalchemy.orm.joinedload("metrics")) + all_resources = q.all() + + if details: + grouped_by_type = itertools.groupby( + all_resources, lambda r: (r.revision != -1, r.type)) + all_resources = [] + for (is_history, type), resources in grouped_by_type: + if type == 'generic': + # No need for a second query + all_resources.extend(resources) + else: + if is_history: + target_cls = self._resource_type_to_class(type, + "history") + f = target_cls.revision.in_( + [r.revision for r in resources]) else: - if is_history: - target_cls = self._resource_type_to_class( - type, "history") - f = target_cls.revision.in_( - [r.revision for r in resources]) - else: - target_cls = self._resource_type_to_class(type) - f = target_cls.id.in_([r.id for r in resources]) + target_cls = self._resource_type_to_class(type) + f = target_cls.id.in_([r.id for r in resources]) - q = session.query(target_cls).filter(f) - # Always include metrics - q = q.options(sqlalchemy.orm.joinedload('metrics')) - all_resources.extend(q.all()) - return all_resources + q = session.query(target_cls).filter(f) + # Always include metrics + q = q.options(sqlalchemy.orm.joinedload('metrics')) + all_resources.extend(q.all()) + session.expunge_all() + return all_resources def expunge_metric(self, id): - with self.facade.writer() as session: - if session.query(Metric).filter(Metric.id == id).delete() == 0: - raise indexer.NoSuchMetric(id) + session = self.engine_facade.get_session() + if session.query(Metric).filter(Metric.id == id).delete() == 0: + raise indexer.NoSuchMetric(id) def delete_metric(self, id): - with self.facade.writer() as session: - if session.query(Metric).filter( - Metric.id == id).update({"status": "delete"}) == 0: - raise indexer.NoSuchMetric(id) + session = self.engine_facade.get_session() + if session.query(Metric).filter( + Metric.id == id).update({"status": "delete"}) == 0: + raise indexer.NoSuchMetric(id) class QueryTransformer(object): diff --git a/gnocchi/indexer/sqlalchemy_extension.py b/gnocchi/indexer/sqlalchemy_extension.py index 3fb5d8eb..9a66b83c 100644 --- a/gnocchi/indexer/sqlalchemy_extension.py +++ b/gnocchi/indexer/sqlalchemy_extension.py @@ -27,7 +27,7 @@ class Image(object): class Instance(object): flavor_id = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) - image_ref = sqlalchemy.Column(sqlalchemy.String(255)) + image_ref = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) host = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) display_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) server_group = sqlalchemy.Column(sqlalchemy.String(255)) @@ -47,4 +47,4 @@ class InstanceNetworkInterface(object): class Volume(object): - display_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=True) + display_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 435f783c..8bd5209c 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -1,5 +1,7 @@ # -*- encoding: utf-8 -*- # +# Copyright © 2014-2015 eNovance +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -14,7 +16,6 @@ import itertools from oslo_config import cfg -import uuid import gnocchi.archive_policy import gnocchi.indexer @@ -37,15 +38,20 @@ def list_opts(): cfg.StrOpt('paste_config', default='api-paste.ini', help='Path to API Paste configuration.'), - cfg.PortOpt('port', - default=8041, - help='The port for the Gnocchi API server.'), + cfg.IntOpt('port', + default=8041, + help='The port for the Gnocchi API server.'), cfg.StrOpt('host', default='0.0.0.0', help='The listen IP for the Gnocchi API server.'), cfg.BoolOpt('pecan_debug', default=False, help='Toggle Pecan Debug Middleware.'), + cfg.MultiStrOpt( + 'middlewares', + deprecated_for_removal=True, + default=[], + help='Middlewares to use. Use Paste config instead.',), cfg.IntOpt('workers', min=1, help='Number of workers for Gnocchi API server. ' 'By default the available number of CPU is used.'), @@ -61,30 +67,20 @@ def list_opts(): gnocchi.storage.swift.OPTS, gnocchi.storage.influxdb.OPTS)), ("statsd", ( - cfg.StrOpt('host', - default='0.0.0.0', - help='The listen IP for statsd'), - cfg.PortOpt('port', - default=8125, - help='The port for statsd'), - cfg.Opt( + cfg.StrOpt( 'resource_id', - type=uuid.UUID, help='Resource UUID to use to identify statsd in Gnocchi'), - cfg.Opt( + cfg.StrOpt( 'user_id', - type=uuid.UUID, help='User UUID to use to identify statsd in Gnocchi'), - cfg.Opt( + cfg.StrOpt( 'project_id', - type=uuid.UUID, help='Project UUID to use to identify statsd in Gnocchi'), cfg.StrOpt( 'archive_policy_name', help='Archive policy name to use when creating metrics'), cfg.FloatOpt( 'flush_delay', - default=10, help='Delay between flushes'), )), ("archive_policy", gnocchi.archive_policy.OPTS), diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 650d6893..5ac7378f 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -15,6 +15,7 @@ # under the License. import uuid +from oslo_log import log from oslo_utils import strutils import pecan from pecan import rest @@ -32,6 +33,8 @@ from gnocchi import json from gnocchi import storage from gnocchi import utils +LOG = log.getLogger(__name__) + def arg_to_list(value): if isinstance(value, list): @@ -51,9 +54,9 @@ def abort(status_code, detail='', headers=None, comment=None, **kw): def get_user_and_project(): headers = pecan.request.headers - # NOTE(jd) If user_id or project_id is UUID, try to convert them into - # the proper dashed format. It's indeed possible that a middleware passes - # these UUIDs without the dash representation. It's valid, we can parse, + # NOTE(jd) If user_id or project_id are UUID, try to convert them in the + # proper dashed format. It's indeed possible that a middleware passes + # theses UUID without the dash representation. It's valid, we can parse, # but the policy module won't see the equality in the string # representations. user_id = headers.get("X-User-Id") @@ -239,7 +242,9 @@ def get_pagination_options(params, default): sorts = [sorts] try: - limit = PositiveNotNullInt(limit) + limit = int(limit) + if limit <= 0: + raise ValueError except ValueError: abort(400, "Invalid 'limit' value: %s" % params.get('limit')) @@ -393,15 +398,13 @@ class AggregatedMetricController(rest.RestController): @pecan.expose('json') def get_measures(self, start=None, stop=None, aggregation='mean', - granularity=None, needed_overlap=100.0): + needed_overlap=100.0): return self.get_cross_metric_measures_from_ids( - self.metric_ids, start, stop, - aggregation, granularity, needed_overlap) + self.metric_ids, start, stop, aggregation, needed_overlap) @classmethod def get_cross_metric_measures_from_ids(cls, metric_ids, start=None, stop=None, aggregation='mean', - granularity=None, needed_overlap=100.0): # Check RBAC policy metrics = pecan.request.indexer.get_metrics(metric_ids) @@ -412,12 +415,11 @@ class AggregatedMetricController(rest.RestController): abort(404, storage.MetricDoesNotExist( missing_metric_ids.pop())) return cls.get_cross_metric_measures_from_objs( - metrics, start, stop, aggregation, granularity, needed_overlap) + metrics, start, stop, aggregation, needed_overlap) @staticmethod def get_cross_metric_measures_from_objs(metrics, start=None, stop=None, aggregation='mean', - granularity=None, needed_overlap=100.0): try: needed_overlap = float(needed_overlap) @@ -447,26 +449,15 @@ class AggregatedMetricController(rest.RestController): for metric in metrics: enforce("get metric", metric) - number_of_metrics = len(metrics) try: - if number_of_metrics == 0: - return [] - if granularity is not None: - try: - granularity = float(granularity) - except ValueError as e: - abort(400, "granularity must be a float: %s" % e) - if number_of_metrics == 1: + if len(metrics) == 1: # NOTE(sileht): don't do the aggregation if we only have one # metric measures = pecan.request.storage.get_measures( - metrics[0], start, stop, aggregation, - granularity) + metrics[0], start, stop, aggregation) else: measures = pecan.request.storage.get_cross_metric_measures( - metrics, start, stop, aggregation, - granularity, - needed_overlap) + metrics, start, stop, aggregation, needed_overlap) # Replace timestamp keys by their string versions return [(timestamp.isoformat(), offset, v) for timestamp, offset, v in measures] @@ -479,21 +470,6 @@ class AggregatedMetricController(rest.RestController): abort(404, e) -def MeasureSchema(m): - # NOTE(sileht): don't use voluptuous for performance reasons - try: - value = float(m['value']) - except Exception: - abort(400, "Invalid input for a value") - - try: - timestamp = utils.to_timestamp(m['timestamp']) - except Exception: - abort(400, "Invalid input for a timestamp") - - return storage.Measure(timestamp, value) - - class MetricController(rest.RestController): _custom_actions = { 'measures': ['POST', 'GET'] @@ -505,6 +481,23 @@ class MetricController(rest.RestController): invoke_on_load=True) self.custom_agg = dict((x.name, x.obj) for x in mgr) + @staticmethod + def to_measure(m): + # NOTE(sileht): we do the input validation + # during the iteration for not loop just for this + # and don't use voluptuous for performance reason + try: + value = float(m['value']) + except Exception: + abort(400, "Invalid input for a value") + + try: + timestamp = utils.to_timestamp(m['timestamp']) + except Exception: + abort(400, "Invalid input for a timestamp") + + return storage.Measure(timestamp, value) + def enforce_metric(self, rule): enforce(rule, json.to_primitive(self.metric)) @@ -521,7 +514,7 @@ class MetricController(rest.RestController): abort(400, "Invalid input for measures") if params: pecan.request.storage.add_measures( - self.metric, six.moves.map(MeasureSchema, params)) + self.metric, six.moves.map(self.to_measure, params)) pecan.response.status = 202 @pecan.expose('json') @@ -558,13 +551,13 @@ class MetricController(rest.RestController): else: measures = pecan.request.storage.get_measures( self.metric, start, stop, aggregation, - float(granularity) if granularity is not None else None) + int(granularity) if granularity is not None else None) # Replace timestamp keys by their string versions return [(timestamp.isoformat(), offset, v) for timestamp, offset, v in measures] - except (storage.MetricDoesNotExist, - storage.GranularityDoesNotExist, - storage.AggregationDoesNotExist) as e: + except storage.MetricDoesNotExist as e: + abort(404, e) + except storage.AggregationDoesNotExist as e: abort(404, e) except aggregates.CustomAggFailure as e: abort(400, e) @@ -593,8 +586,7 @@ class MetricsController(rest.RestController): metric_id = uuid.UUID(id) except ValueError: abort(404, indexer.NoSuchMetric(id)) - metrics = pecan.request.indexer.get_metrics( - [metric_id], with_resource=True) + metrics = pecan.request.indexer.get_metrics([metric_id]) if not metrics: abort(404, indexer.NoSuchMetric(id)) return MetricController(metrics[0]), remainder @@ -705,8 +697,7 @@ class NamedMetricController(rest.RestController): @pecan.expose() def _lookup(self, name, *remainder): - details = True if pecan.request.method == 'GET' else False - m = pecan.request.indexer.list_metrics(details=details, + m = pecan.request.indexer.list_metrics(details=True, name=name, resource_id=self.resource_id) if m: @@ -827,10 +818,12 @@ def ResourceSchema(schema): return base_schema -class ResourceController(rest.RestController): +class GenericResourceController(rest.RestController): + _resource_type = 'generic' - def __init__(self, resource_type, id): - self._resource_type = resource_type + Resource = ResourceSchema({}) + + def __init__(self, id): try: self.id = utils.ResourceUUID(id) except ValueError: @@ -859,9 +852,7 @@ class ResourceController(rest.RestController): enforce("update resource", resource) etag_precondition_check(resource) - body = deserialize_and_validate( - schema_for(self._resource_type), - required=False) + body = deserialize_and_validate(self.Resource, required=False) if len(body) == 0: etag_set_headers(resource) @@ -879,6 +870,8 @@ class ResourceController(rest.RestController): create_revision = False try: + if 'metrics' in body: + user, project = get_user_and_project() resource = pecan.request.indexer.update_resource( self._resource_type, self.id, @@ -907,58 +900,89 @@ class ResourceController(rest.RestController): abort(404, e) -GenericSchema = ResourceSchema({}) +class SwiftAccountResourceController(GenericResourceController): + _resource_type = 'swift_account' -InstanceDiskSchema = ResourceSchema({ - "name": six.text_type, - "instance_id": UUID, -}) -InstanceNetworkInterfaceSchema = ResourceSchema({ - "name": six.text_type, - "instance_id": UUID, -}) +class InstanceDiskResourceController(GenericResourceController): + _resource_type = 'instance_disk' + Resource = ResourceSchema({ + "name": six.text_type, + "instance_id": UUID, + }) -InstanceSchema = ResourceSchema({ - "flavor_id": six.text_type, - voluptuous.Optional("image_ref"): six.text_type, - "host": six.text_type, - "display_name": six.text_type, - voluptuous.Optional("server_group"): six.text_type, -}) -VolumeSchema = ResourceSchema({ - voluptuous.Optional("display_name"): voluptuous.Any(None, - six.text_type), -}) +class InstanceNetworkInterfaceResourceController(GenericResourceController): + _resource_type = 'instance_network_interface' + Resource = ResourceSchema({ + "name": six.text_type, + "instance_id": UUID, + }) -ImageSchema = ResourceSchema({ - "name": six.text_type, - "container_format": six.text_type, - "disk_format": six.text_type, -}) +class InstanceResourceController(GenericResourceController): + _resource_type = 'instance' + + Resource = ResourceSchema({ + "flavor_id": six.text_type, + "image_ref": six.text_type, + "host": six.text_type, + "display_name": six.text_type, + voluptuous.Optional("server_group"): six.text_type, + }) -# NOTE(sileht): Must be loaded after all ResourceSchema -RESOURCE_SCHEMA_MANAGER = extension.ExtensionManager( - 'gnocchi.controller.schemas') +class VolumeResourceController(GenericResourceController): + _resource_type = 'volume' -def schema_for(resource_type): - return RESOURCE_SCHEMA_MANAGER[resource_type].plugin + Resource = ResourceSchema({ + "display_name": six.text_type, + }) -class ResourcesController(rest.RestController): - def __init__(self, resource_type): - self._resource_type = resource_type +class CephAccountResourceController(GenericResourceController): + _resource_type = 'ceph_account' + + +class NetworkResourceController(GenericResourceController): + _resource_type = 'network' + + +class IdentityResourceController(GenericResourceController): + _resource_type = 'identity' + + +class IPMIResourceController(GenericResourceController): + _resource_type = 'ipmi' + + +class StackResourceController(GenericResourceController): + _resource_type = 'stack' + + +class ImageResourceController(GenericResourceController): + _resource_type = 'image' + + Resource = ResourceSchema({ + "name": six.text_type, + "container_format": six.text_type, + "disk_format": six.text_type, + }) + + +class GenericResourcesController(rest.RestController): + _resource_type = 'generic' + _resource_rest_class = GenericResourceController + + Resource = GenericResourceController.Resource @pecan.expose() def _lookup(self, id, *remainder): - return ResourceController(self._resource_type, id), remainder + return self._resource_rest_class(id), remainder @pecan.expose('json') def post(self): - body = deserialize_and_validate(schema_for(self._resource_type)) + body = deserialize_and_validate(self.Resource) target = { "resource_type": self._resource_type, } @@ -1008,20 +1032,91 @@ class ResourcesController(rest.RestController): abort(400, e) -class ResourcesByTypeController(rest.RestController): +class SwiftAccountsResourcesController(GenericResourcesController): + _resource_type = 'swift_account' + _resource_rest_class = SwiftAccountResourceController + + +class InstanceDisksResourcesController(GenericResourcesController): + _resource_type = 'instance_disk' + _resource_rest_class = InstanceDiskResourceController + + Resource = InstanceDiskResourceController.Resource + + +class InstanceNetworkInterfacesResourcesController(GenericResourcesController): + _resource_type = 'instance_network_interface' + _resource_rest_class = InstanceNetworkInterfaceResourceController + + Resource = InstanceNetworkInterfaceResourceController.Resource + + +class InstancesResourcesController(GenericResourcesController): + _resource_type = 'instance' + _resource_rest_class = InstanceResourceController + + Resource = InstanceResourceController.Resource + + +class VolumesResourcesController(GenericResourcesController): + _resource_type = 'volume' + _resource_rest_class = VolumeResourceController + + Resource = VolumeResourceController.Resource + + +class CephAccountsResourcesController(GenericResourcesController): + _resource_type = 'ceph_account' + _resource_rest_class = CephAccountResourceController + + +class NetworkResourcesController(GenericResourcesController): + _resource_type = 'network' + _resource_rest_class = NetworkResourceController + + +class IdentityResourcesController(GenericResourcesController): + _resource_type = 'identity' + _resource_rest_class = IdentityResourceController + + +class IPMIResourcesController(GenericResourcesController): + _resource_type = 'ipmi' + _resource_rest_class = IPMIResourceController + + +class StackResourcesController(GenericResourcesController): + _resource_type = 'stack' + _resource_rest_class = StackResourceController + + +class ImageResourcesController(GenericResourcesController): + _resource_type = 'image' + _resource_rest_class = ImageResourceController + + Resource = ImageResourceController.Resource + + +class ResourcesController(rest.RestController): + resources_ctrl_by_type = dict( + (ext.name, ext.plugin()) + for ext in extension.ExtensionManager( + 'gnocchi.controller.resources').extensions) + @pecan.expose('json') def get_all(self): return dict( - (ext.name, - pecan.request.application_url + '/v1/resource/' + ext.name) - for ext in RESOURCE_SCHEMA_MANAGER) + (type_name, + pecan.request.application_url + '/v1/resource/' + type_name) + for type_name in self.resources_ctrl_by_type.keys()) @pecan.expose() def _lookup(self, resource_type, *remainder): - if resource_type in RESOURCE_SCHEMA_MANAGER: - return ResourcesController(resource_type), remainder + ctrl = self.resources_ctrl_by_type.get(resource_type) + if ctrl: + return ctrl, remainder else: - abort(404, indexer.NoSuchResourceType(resource_type)) + abort(404, indexer.UnknownResourceType(resource_type)) def _ResourceSearchSchema(v): @@ -1094,10 +1189,10 @@ class SearchResourceTypeController(rest.RestController): class SearchResourceController(rest.RestController): @pecan.expose() def _lookup(self, resource_type, *remainder): - if resource_type in RESOURCE_SCHEMA_MANAGER: + if resource_type in ResourcesController.resources_ctrl_by_type: return SearchResourceTypeController(resource_type), remainder else: - abort(404, indexer.NoSuchResourceType(resource_type)) + abort(404, indexer.UnknownResourceType(resource_type)) def _MetricSearchSchema(v): @@ -1193,30 +1288,6 @@ class SearchMetricController(rest.RestController): abort(400, e) -class MeasuresBatchController(rest.RestController): - MeasuresBatchSchema = voluptuous.Schema({ - UUID: [MeasureSchema], - }) - - @pecan.expose() - def post(self): - body = deserialize_and_validate(self.MeasuresBatchSchema) - metrics = pecan.request.indexer.get_metrics(body.keys()) - - if len(metrics) != len(body): - missing_metrics = sorted(set(body) - set(m.id for m in metrics)) - abort(400, "Unknown metrics: %s" % ", ".join( - six.moves.map(str, missing_metrics))) - - for metric in metrics: - enforce("post measures", metric) - - for metric in metrics: - pecan.request.storage.add_measures(metric, body[metric.id]) - - pecan.response.status = 202 - - class SearchController(object): resource = SearchResourceController() metric = SearchMetricController() @@ -1229,7 +1300,7 @@ class AggregationResource(rest.RestController): @pecan.expose('json') def post(self, start=None, stop=None, aggregation='mean', - granularity=None, needed_overlap=100.0): + needed_overlap=100.0): resources = SearchResourceTypeController(self.resource_type).post() metrics = [] for r in resources: @@ -1237,7 +1308,7 @@ class AggregationResource(rest.RestController): if m: metrics.append(m) return AggregatedMetricController.get_cross_metric_measures_from_objs( - metrics, start, stop, aggregation, granularity, needed_overlap) + metrics, start, stop, aggregation, needed_overlap) class Aggregation(rest.RestController): @@ -1255,10 +1326,9 @@ class Aggregation(rest.RestController): @pecan.expose('json') def get_metric(self, metric=None, start=None, stop=None, aggregation='mean', - granularity=None, needed_overlap=100.0): + needed_overlap=100.0): return AggregatedMetricController.get_cross_metric_measures_from_ids( - arg_to_list(metric), start, stop, aggregation, - granularity, needed_overlap) + arg_to_list(metric), start, stop, aggregation, needed_overlap) class CapabilityController(rest.RestController): @@ -1282,10 +1352,6 @@ class StatusController(rest.RestController): return {"storage": {"measures_to_process": report}} -class BatchController(object): - measures = MeasuresBatchController() - - class V1Controller(object): def __init__(self): @@ -1294,8 +1360,7 @@ class V1Controller(object): "archive_policy": ArchivePoliciesController(), "archive_policy_rule": ArchivePolicyRulesController(), "metric": MetricsController(), - "batch": BatchController(), - "resource": ResourcesByTypeController(), + "resource": ResourcesController(), "aggregation": Aggregation(), "capabilities": CapabilityController(), "status": StatusController(), diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 4b0ededb..44505b45 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -15,9 +15,11 @@ # under the License. import os +import keystonemiddleware.auth_token from oslo_config import cfg from oslo_log import log from oslo_policy import policy +from oslo_utils import importutils from paste import deploy import pecan import webob.exc @@ -123,6 +125,23 @@ def setup_app(config=None, cfg=None): if config.get('not_implemented_middleware', True): app = webob.exc.HTTPExceptionMiddleware(NotImplementedMiddleware(app)) + for middleware in reversed(cfg.api.middlewares): + if not middleware: + continue + klass = importutils.import_class(middleware) + # FIXME(jd) Find a way to remove that special handling… + # next version of keystonemiddleware > 2.1.0 will support + # 'oslo_config_project' option, so we could remove this + # workaround. + if klass == keystonemiddleware.auth_token.AuthProtocol: + middleware_config = dict(cfg.keystone_authtoken) + else: + middleware_config = dict(cfg) + # NOTE(sileht): Allow oslo.config compatible middleware to load + # our configuration file. + middleware_config['oslo_config_project'] = 'gnocchi' + app = klass(app, middleware_config) + return app diff --git a/gnocchi/service.py b/gnocchi/service.py index bd9ea8b3..d5ba1256 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -28,10 +28,8 @@ from gnocchi import opts LOG = log.getLogger(__name__) -def prepare_service(args=None, conf=None, - default_config_files=None): - if conf is None: - conf = cfg.ConfigOpts() +def prepare_service(args=None): + conf = cfg.ConfigOpts() # FIXME(jd) Use the pkg_entry info to register the options of these libs log.register_options(conf) db_options.set_defaults(conf) @@ -55,8 +53,7 @@ def prepare_service(args=None, conf=None, conf.set_default("workers", default_workers, group="api") conf.set_default("workers", default_workers, group="metricd") - conf(args, project='gnocchi', validate_default_values=True, - default_config_files=default_config_files) + conf(args, project='gnocchi', validate_default_values=True) log.setup(conf, 'gnocchi') conf.log_opt_values(LOG, logging.DEBUG) diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index 693a063a..5c2839c6 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -19,7 +19,6 @@ try: import asyncio except ImportError: import trollius as asyncio -from oslo_config import cfg from oslo_log import log import six @@ -167,17 +166,13 @@ class StatsdServer(object): def start(): conf = service.prepare_service() - for field in ["resource_id", "user_id", "project_id"]: - if conf.statsd[field] is None: - raise cfg.RequiredOptError(field, cfg.OptGroup("statsd")) - stats = Stats(conf) loop = asyncio.get_event_loop() # TODO(jd) Add TCP support listen = loop.create_datagram_endpoint( - lambda: StatsdServer(stats), - local_addr=(conf.statsd.host, conf.statsd.port)) + # TODO(jd) Add config options for host/port + lambda: StatsdServer(stats), local_addr=('0.0.0.0', 8125)) def _flush(): loop.call_later(conf.statsd.flush_delay, _flush) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index f81dc1c0..21a4b088 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -82,15 +82,11 @@ class Metric(object): __hash__ = object.__hash__ -class StorageError(Exception): +class InvalidQuery(Exception): pass -class InvalidQuery(StorageError): - pass - - -class MetricDoesNotExist(StorageError): +class MetricDoesNotExist(Exception): """Error raised when this metric does not exist.""" def __init__(self, metric): @@ -99,7 +95,7 @@ class MetricDoesNotExist(StorageError): "Metric %s does not exist" % metric) -class AggregationDoesNotExist(StorageError): +class AggregationDoesNotExist(Exception): """Error raised when the aggregation method doesn't exists for a metric.""" def __init__(self, metric, method): @@ -110,18 +106,7 @@ class AggregationDoesNotExist(StorageError): (method, metric)) -class GranularityDoesNotExist(StorageError): - """Error raised when the granularity doesn't exist for a metric.""" - - def __init__(self, metric, granularity): - self.metric = metric - self.granularity = granularity - super(GranularityDoesNotExist, self).__init__( - "Granularity '%s' for metric %s does not exist" % - (granularity, metric)) - - -class MetricAlreadyExists(StorageError): +class MetricAlreadyExists(Exception): """Error raised when this metric already exists.""" def __init__(self, metric): @@ -130,7 +115,7 @@ class MetricAlreadyExists(StorageError): "Metric %s already exists" % metric) -class MetricUnaggregatable(StorageError): +class MetricUnaggregatable(Exception): """Error raised when metrics can't be aggregated.""" def __init__(self, metrics, reason): @@ -164,10 +149,6 @@ class StorageDriver(object): def stop(): pass - @staticmethod - def upgrade(index): - pass - def process_background_tasks(self, index, sync=False): """Process background tasks for this storage. @@ -233,7 +214,7 @@ class StorageDriver(object): def measures_report(): """Return a report of pending to process measures. - Only useful for drivers that process measurements in background + Only usefull for drivers that process measurements in background :return: {metric_id: pending_measures_count} """ @@ -260,23 +241,17 @@ class StorageDriver(object): @staticmethod def get_cross_metric_measures(metrics, from_timestamp=None, to_timestamp=None, aggregation='mean', - granularity=None, needed_overlap=None): """Get aggregated measures of multiple entities. :param entities: The entities measured to aggregate. :param from timestamp: The timestamp to get the measure from. :param to timestamp: The timestamp to get the measure to. - :param granularity: The granularity to retrieve. :param aggregation: The type of aggregation to retrieve. """ for metric in metrics: if aggregation not in metric.archive_policy.aggregation_methods: raise AggregationDoesNotExist(metric, aggregation) - if (granularity is not None and granularity - not in set(d.granularity - for d in metric.archive_policy.definition)): - raise GranularityDoesNotExist(metric, granularity) @staticmethod def search_value(metrics, query, from_timestamp=None, diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index f18058e5..43af119f 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -2,6 +2,8 @@ # # Copyright © 2014-2015 eNovance # +# Authors: Julien Danjou +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -13,7 +15,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import collections import logging import multiprocessing import uuid @@ -23,7 +24,6 @@ import iso8601 from oslo_config import cfg from oslo_serialization import msgpackutils from oslo_utils import timeutils -import six from tooz import coordination from gnocchi import carbonara @@ -31,6 +31,7 @@ from gnocchi import storage OPTS = [ cfg.IntOpt('aggregation_workers_number', + default=None, help='Number of workers to run during adding new measures for ' 'pre-aggregation needs.'), cfg.StrOpt('coordination_url', @@ -43,22 +44,12 @@ OPTS = [ LOG = logging.getLogger(__name__) -class CarbonaraBasedStorage(storage.StorageDriver): - MEASURE_PREFIX = "measure" - +class CarbonaraBasedStorageToozLock(object): def __init__(self, conf): - super(CarbonaraBasedStorage, self).__init__(conf) self.coord = coordination.get_coordinator( conf.coordination_url, str(uuid.uuid4()).encode('ascii')) self.coord.start() - if conf.aggregation_workers_number is None: - try: - self.aggregation_workers_number = multiprocessing.cpu_count() - except NotImplementedError: - self.aggregation_workers_number = 2 - else: - self.aggregation_workers_number = conf.aggregation_workers_number def stop(self): self.coord.stop() @@ -67,128 +58,72 @@ class CarbonaraBasedStorage(storage.StorageDriver): lock_name = b"gnocchi-" + str(metric_id).encode('ascii') return self.coord.get_lock(lock_name) - @staticmethod - def _get_measures(metric, timestamp_key, aggregation, granularity): - raise NotImplementedError + +class CarbonaraBasedStorage(storage.StorageDriver): + MEASURE_PREFIX = "measure" + + def __init__(self, conf): + super(CarbonaraBasedStorage, self).__init__(conf) + self.executor = futures.ThreadPoolExecutor( + max_workers=(conf.aggregation_workers_number or + multiprocessing.cpu_count())) @staticmethod - def _get_unaggregated_timeserie(metric): - raise NotImplementedError + def _create_metric_container(metric, archive_policy): + pass @staticmethod - def _store_unaggregated_timeserie(metric, data): + def _lock(metric): raise NotImplementedError @staticmethod - def _store_metric_measures(metric, aggregation, granularity, data): + def _get_measures(metric, aggregation): raise NotImplementedError @staticmethod - def _list_split_keys_for_metric(metric, aggregation, granularity): + def _store_metric_measures(metric, aggregation, data): raise NotImplementedError def get_measures(self, metric, from_timestamp=None, to_timestamp=None, aggregation='mean', granularity=None): super(CarbonaraBasedStorage, self).get_measures( metric, from_timestamp, to_timestamp, aggregation) - if granularity is None: - agg_timeseries = self._map_in_thread( - self._get_measures_timeserie, - ((metric, aggregation, ap.granularity, - from_timestamp, to_timestamp) - for ap in reversed(metric.archive_policy.definition))) - else: - agg_timeseries = [self._get_measures_timeserie( - metric, aggregation, granularity, - from_timestamp, to_timestamp)] + archive = self._get_measures_archive(metric, aggregation) return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v) - for ts in agg_timeseries - for timestamp, r, v in ts.fetch(from_timestamp, to_timestamp)] + for timestamp, r, v + in archive.fetch(from_timestamp, to_timestamp) + if granularity is None or r == granularity] - def _get_measures_and_unserialize(self, metric, key, - aggregation, granularity): - data = self._get_measures(metric, key, aggregation, granularity) - try: - return carbonara.TimeSerie.unserialize(data) - except ValueError: - LOG.error("Data corruption detected for %s " - "aggregated `%s' timeserie, granularity `%s' " - "around time `%s', ignoring." - % (metric.id, aggregation, granularity, key)) - - def _get_measures_timeserie(self, metric, - aggregation, granularity, - from_timestamp=None, to_timestamp=None): - - # Find the number of point - for d in metric.archive_policy.definition: - if d.granularity == granularity: - points = d.points - break - else: - raise storage.GranularityDoesNotExist(metric, granularity) + @staticmethod + def _log_data_corruption(metric, aggregation): + LOG.error("Data are corrupted for metric %(metric)s and aggregation " + "%(aggregation)s, recreating an empty timeserie." % + dict(metric=metric.id, aggregation=aggregation)) - all_keys = None + def _get_measures_archive(self, metric, aggregation): try: - all_keys = self._list_split_keys_for_metric( - metric, aggregation, granularity) - except storage.MetricDoesNotExist: - # This can happen if it's an old metric with a TimeSerieArchive - all_keys = None - - if not all_keys: - # It does not mean we have no data: it can be an old metric with a - # TimeSerieArchive. + contents = self._get_measures(metric, aggregation) + except (storage.MetricDoesNotExist, storage.AggregationDoesNotExist): + ts = None + else: try: - data = self._get_metric_archive(metric, aggregation) - except (storage.MetricDoesNotExist, - storage.AggregationDoesNotExist): - # It really does not exist - for d in metric.archive_policy.definition: - if d.granularity == granularity: - return carbonara.AggregatedTimeSerie( - aggregation_method=aggregation, - sampling=granularity, - max_size=d.points) - raise storage.GranularityDoesNotExist(metric, granularity) - else: - archive = carbonara.TimeSerieArchive.unserialize(data) - # It's an old metric with an TimeSerieArchive! - for ts in archive.agg_timeseries: - if ts.sampling == granularity: - return ts - raise storage.GranularityDoesNotExist(metric, granularity) - - if from_timestamp: - from_timestamp = carbonara.AggregatedTimeSerie.get_split_key( - from_timestamp, granularity) - - if to_timestamp: - to_timestamp = carbonara.AggregatedTimeSerie.get_split_key( - to_timestamp, granularity) - - timeseries = filter( - lambda x: x is not None, - self._map_in_thread( - self._get_measures_and_unserialize, - ((metric, key, aggregation, granularity) - for key in all_keys - if ((not from_timestamp or key >= from_timestamp) - and (not to_timestamp or key <= to_timestamp)))) - ) - - return carbonara.AggregatedTimeSerie.from_timeseries( - timeseries, - sampling=granularity, - max_size=points) - - def _add_measures(self, aggregation, granularity, metric, timeserie): - # TODO(jd) only retrieve the part we update - ts = self._get_measures_timeserie(metric, aggregation, granularity) - ts.update(timeserie) - for key, split in ts.split(): - self._store_metric_measures(metric, key, aggregation, granularity, - split.serialize()) + ts = carbonara.TimeSerieArchive.unserialize(contents) + except ValueError: + self._log_data_corruption(metric, aggregation) + ts = None + + if ts is None: + ts = carbonara.TimeSerieArchive.from_definitions( + [(v.granularity, v.points) + for v in metric.archive_policy.definition], + aggregation_method=aggregation) + return ts + + def _add_measures(self, aggregation, metric, timeserie): + archive = self._get_measures_archive(metric, aggregation) + archive.update(timeserie) + self._store_metric_measures(metric, aggregation, + archive.serialize()) def add_measures(self, metric, measures): self._store_measures(metric, msgpackutils.dumps( @@ -224,42 +159,6 @@ class CarbonaraBasedStorage(storage.StorageDriver): (metric_id, self._pending_measures_to_process_count(metric_id)) for metric_id in metrics_to_process) - def _check_for_metric_upgrade(self, metric): - lock = self._lock(metric.id) - with lock: - for agg_method in metric.archive_policy.aggregation_methods: - LOG.debug( - "Checking if the metric %s needs migration for %s" - % (metric, agg_method)) - try: - data = self._get_metric_archive(metric, agg_method) - except storage.MetricDoesNotExist: - # Just try the next metric, this one has no measures - break - except storage.AggregationDoesNotExist: - # This should not happen, but you never know. - LOG.warn( - "Metric %s does not have an archive " - "for aggregation %s, " - "no migration can be done" % (metric, agg_method)) - else: - LOG.info("Migrating metric %s to new format" % metric) - archive = carbonara.TimeSerieArchive.unserialize(data) - for ts in archive.agg_timeseries: - # Store each AggregatedTimeSerie independently - for key, split in ts.split(): - self._store_metric_measures(metric, key, - ts.aggregation_method, - ts.sampling, - split.serialize()) - self._delete_metric_archives(metric) - LOG.info("Migrated metric %s to new format" % metric) - - def upgrade(self, index): - self._map_in_thread( - self._check_for_metric_upgrade, - ((metric,) for metric in index.list_metrics())) - def process_measures(self, indexer, sync=False): metrics_to_process = self._list_metric_with_measures_to_process() metrics = indexer.get_metrics(metrics_to_process) @@ -296,10 +195,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): try: with timeutils.StopWatch() as sw: - raw_measures = ( - self._get_unaggregated_timeserie( - metric) - ) + raw_measures = self._get_measures(metric, + 'none') LOG.debug( "Retrieve unaggregated measures " "for %s in %.2fs" @@ -311,21 +208,20 @@ class CarbonaraBasedStorage(storage.StorageDriver): # Created in the mean time, do not worry pass ts = None + except storage.AggregationDoesNotExist: + ts = None else: try: ts = carbonara.BoundTimeSerie.unserialize( raw_measures) except ValueError: ts = None - LOG.error( - "Data corruption detected for %s " - "unaggregated timeserie, " - "recreating an empty one." - % metric.id) + self._log_data_corruption(metric, "none") if ts is None: # This is the first time we treat measures for this - # metric, or data are corrupted, create a new one + # metric, or data are corrupted, + # create a new one mbs = metric.archive_policy.max_block_size ts = carbonara.BoundTimeSerie( block_size=mbs, @@ -334,10 +230,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): def _map_add_measures(bound_timeserie): self._map_in_thread( self._add_measures, - ((aggregation, d.granularity, - metric, bound_timeserie) - for aggregation in agg_methods - for d in metric.archive_policy.definition)) + list((aggregation, metric, bound_timeserie) + for aggregation in agg_methods)) with timeutils.StopWatch() as sw: ts.set_values( @@ -349,8 +243,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): "in %.2f seconds" % (metric.id, len(measures), sw.elapsed())) - self._store_unaggregated_timeserie(metric, - ts.serialize()) + self._store_metric_measures(metric, 'none', + ts.serialize()) except Exception: if sync: raise @@ -360,84 +254,46 @@ class CarbonaraBasedStorage(storage.StorageDriver): def get_cross_metric_measures(self, metrics, from_timestamp=None, to_timestamp=None, aggregation='mean', - granularity=None, needed_overlap=100.0): super(CarbonaraBasedStorage, self).get_cross_metric_measures( - metrics, from_timestamp, to_timestamp, - aggregation, granularity, needed_overlap) - - if granularity is None: - granularities = ( - definition.granularity - for metric in metrics - for definition in metric.archive_policy.definition - ) - granularities_in_common = [ - g - for g, occurence in six.iteritems( - collections.Counter(granularities)) - if occurence == len(metrics) - ] - - if not granularities_in_common: - raise storage.MetricUnaggregatable( - metrics, 'No granularity match') - else: - granularities_in_common = [granularity] + metrics, from_timestamp, to_timestamp, aggregation, needed_overlap) - tss = self._map_in_thread(self._get_measures_timeserie, - [(metric, aggregation, g, - from_timestamp, to_timestamp) - for metric in metrics - for g in granularities_in_common]) + tss = self._map_in_thread(self._get_measures_archive, + [(metric, aggregation) + for metric in metrics]) try: return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v) for timestamp, r, v - in carbonara.AggregatedTimeSerie.aggregated( + in carbonara.TimeSerieArchive.aggregated( tss, from_timestamp, to_timestamp, aggregation, needed_overlap)] except carbonara.UnAggregableTimeseries as e: raise storage.MetricUnaggregatable(metrics, e.reason) - def _find_measure(self, metric, aggregation, granularity, predicate, + def _find_measure(self, metric, aggregation, predicate, from_timestamp, to_timestamp): - timeserie = self._get_measures_timeserie( - metric, aggregation, granularity, - from_timestamp, to_timestamp) + timeserie = self._get_measures_archive(metric, aggregation) values = timeserie.fetch(from_timestamp, to_timestamp) return {metric: [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), - g, value) - for timestamp, g, value in values + granularity, value) + for timestamp, granularity, value in values if predicate(value)]} - # TODO(jd) Add granularity parameter here and in the REST API - # rather than fetching all granularities def search_value(self, metrics, query, from_timestamp=None, to_timestamp=None, aggregation='mean'): + result = {} predicate = storage.MeasureQuery(query) - results = self._map_in_thread( - self._find_measure, - [(metric, aggregation, - ap.granularity, predicate, - from_timestamp, to_timestamp) - for metric in metrics - for ap in metric.archive_policy.definition]) - result = collections.defaultdict(list) + results = self._map_in_thread(self._find_measure, + [(metric, aggregation, predicate, + from_timestamp, to_timestamp) + for metric in metrics]) for r in results: - for metric, metric_result in six.iteritems(r): - result[metric].extend(metric_result) - - # Sort the result - for metric, r in six.iteritems(result): - # Sort by timestamp asc, granularity desc - r.sort(key=lambda t: (t[0], - t[1])) - + result.update(r) return result def _map_in_thread(self, method, list_of_args): - with futures.ThreadPoolExecutor( - max_workers=self.aggregation_workers_number) as executor: - # We use 'list' to iterate all threads here to raise the first - # exception now, not much choice - return list(executor.map(lambda args: method(*args), list_of_args)) + # We use 'list' to iterate all threads here to raise the first + # exception now , not much choice + return list(self.executor.map(lambda args: method(*args), + list_of_args)) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 08e3cb91..1eaef052 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -32,8 +32,10 @@ OPTS = [ default='gnocchi', help='Ceph pool name to use.'), cfg.StrOpt('ceph_username', + default=None, help='Ceph username (ie: client.admin).'), cfg.StrOpt('ceph_keyring', + default=None, help='Ceph keyring path.'), cfg.StrOpt('ceph_conffile', default='/etc/ceph/ceph.conf', @@ -45,6 +47,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): def __init__(self, conf): super(CephStorage, self).__init__(conf) self.pool = conf.ceph_pool + self._lock = _carbonara.CarbonaraBasedStorageToozLock(conf) options = {} if conf.ceph_keyring: options['keyring'] = conf.ceph_keyring @@ -58,6 +61,9 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): conf=options) self.rados.connect() + def stop(self): + self._lock.stop() + def _store_measures(self, metric, data): # NOTE(sileht): list all objects in a pool is too slow with # many objects (2min for 20000 objects in 50osds cluster), @@ -128,9 +134,8 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): return self.rados.open_ioctx(self.pool) @staticmethod - def _get_object_name(metric, timestamp_key, aggregation, granularity): - return str("gnocchi_%s_%s_%s_%s" % ( - metric.id, timestamp_key, aggregation, granularity)) + def _get_object_name(metric, lock_name): + return str("gnocchi_%s_%s" % (metric.id, lock_name)) @staticmethod def _object_exists(ioctx, name): @@ -143,42 +148,38 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): return False def _create_metric(self, metric): - name = "gnocchi_%s_container" % metric.id + name = self._get_object_name(metric, 'container') with self._get_ioctx() as ioctx: if self._object_exists(ioctx, name): raise storage.MetricAlreadyExists(metric) else: ioctx.write_full(name, "metric created") - def _store_metric_measures(self, metric, timestamp_key, - aggregation, granularity, data): - name = self._get_object_name(metric, timestamp_key, - aggregation, granularity) + def _store_metric_measures(self, metric, aggregation, data): + name = self._get_object_name(metric, aggregation) with self._get_ioctx() as ioctx: ioctx.write_full(name, data) - ioctx.set_xattr("gnocchi_%s_container" % metric.id, name, "") def _delete_metric(self, metric): with self._get_ioctx() as ioctx: - try: - xattrs = ioctx.get_xattrs("gnocchi_%s_container" % metric.id) - except rados.ObjectNotFound: - pass - else: - for xattr, _ in xattrs: - ioctx.remove_object(xattr) for name in ('container', 'none'): + name = self._get_object_name(metric, name) try: - ioctx.remove_object("gnocchi_%s_%s" % (metric.id, name)) + ioctx.remove_object(name) except rados.ObjectNotFound: # Maybe it never got measures pass + for aggregation in metric.archive_policy.aggregation_methods: + name = self._get_object_name(metric, aggregation) + try: + ioctx.remove_object(name) + except rados.ObjectNotFound: + pass - def _get_measures(self, metric, timestamp_key, aggregation, granularity): + def _get_measures(self, metric, aggregation): try: with self._get_ioctx() as ioctx: - name = self._get_object_name(metric, timestamp_key, - aggregation, granularity) + name = self._get_object_name(metric, aggregation) content = self._get_object_content(ioctx, name) if len(content) == 0: # NOTE(sileht: the object have been created by @@ -187,44 +188,13 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): else: return content except rados.ObjectNotFound: + name = self._get_object_name(metric, 'container') with self._get_ioctx() as ioctx: - if self._object_exists( - ioctx, "gnocchi_%s_container" % metric.id): + if self._object_exists(ioctx, name): raise storage.AggregationDoesNotExist(metric, aggregation) else: raise storage.MetricDoesNotExist(metric) - def _list_split_keys_for_metric(self, metric, aggregation, granularity): - with self._get_ioctx() as ioctx: - try: - xattrs = ioctx.get_xattrs("gnocchi_%s_container" % metric.id) - except rados.ObjectNotFound: - raise storage.MetricDoesNotExist(metric) - keys = [] - for xattr, value in xattrs: - _, metric_id, key, agg, g = xattr.split('_', 4) - if aggregation == agg and granularity == float(g): - keys.append(key) - - return keys - - def _get_unaggregated_timeserie(self, metric): - try: - with self._get_ioctx() as ioctx: - content = self._get_object_content( - ioctx, "gnocchi_%s_none" % metric.id) - if len(content) == 0: - # NOTE(sileht: the object have been created by - # the lock code - raise rados.ObjectNotFound - return content - except rados.ObjectNotFound: - raise storage.MetricDoesNotExist(metric) - - def _store_unaggregated_timeserie(self, metric, data): - with self._get_ioctx() as ioctx: - ioctx.write_full("gnocchi_%s_none" % metric.id, data) - @staticmethod def _get_object_content(ioctx, name): offset = 0 @@ -236,33 +206,3 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): content += data offset += len(data) return content - - # The following methods deal with Gnocchi <= 1.3 archives - def _get_metric_archive(self, metric, aggregation): - """Retrieve data in the place we used to store TimeSerieArchive.""" - try: - with self._get_ioctx() as ioctx: - content = self._get_object_content( - ioctx, str("gnocchi_%s_%s" % (metric.id, aggregation))) - if len(content) == 0: - # NOTE(sileht: the object have been created by - # the lock code - raise rados.ObjectNotFound - return content - except rados.ObjectNotFound: - raise storage.AggregationDoesNotExist(metric, aggregation) - - def _store_metric_archive(self, metric, aggregation, data): - """Stores data in the place we used to store TimeSerieArchive.""" - with self._get_ioctx() as ioctx: - ioctx.write_full( - str("gnocchi_%s_%s" % (metric.id, aggregation)), data) - - def _delete_metric_archives(self, metric): - with self._get_ioctx() as ioctx: - for aggregation in metric.archive_policy.aggregation_methods: - try: - ioctx.remove_object( - str("gnocchi_%s_%s" % (metric.id, aggregation))) - except rados.ObjectNotFound: - pass diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 8fa6ed88..ffbde963 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -44,6 +44,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): super(FileStorage, self).__init__(conf) self.basepath = conf.file_basepath self.basepath_tmp = conf.file_basepath_tmp + self._lock = _carbonara.CarbonaraBasedStorageToozLock(conf) try: os.mkdir(self.basepath) except OSError as e: @@ -66,26 +67,14 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): dir=self.basepath_tmp, delete=False) - def _atomic_file_store(self, dest, data): - tmpfile = self._get_tempfile() - tmpfile.write(data) - tmpfile.close() - os.rename(tmpfile.name, dest) - - def _build_metric_dir(self, metric): - return os.path.join(self.basepath, str(metric.id)) - - def _build_unaggregated_timeserie_path(self, metric): - return os.path.join(self._build_metric_dir(metric), 'none') + def stop(self): + self._lock.stop() - def _build_metric_path(self, metric, aggregation): - return os.path.join(self._build_metric_dir(metric), - "agg_" + aggregation) - - def _build_metric_path_for_split(self, metric, aggregation, - timestamp_key, granularity): - return os.path.join(self._build_metric_path(metric, aggregation), - timestamp_key + "_" + str(granularity)) + def _build_metric_path(self, metric, aggregation=None): + path = os.path.join(self.basepath, str(metric.id)) + if aggregation: + return os.path.join(path, aggregation) + return path def _build_measure_path(self, metric_id, random_id=None): path = os.path.join(self.measure_path, six.text_type(metric_id)) @@ -97,19 +86,13 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): return path def _create_metric(self, metric): - path = self._build_metric_dir(metric) + path = self._build_metric_path(metric) try: os.mkdir(path, 0o750) except OSError as e: if e.errno == errno.EEXIST: raise storage.MetricAlreadyExists(metric) raise - for agg in metric.archive_policy.aggregation_methods: - try: - os.mkdir(self._build_metric_path(metric, agg), 0o750) - except OSError as e: - if e.errno != errno.EEXIST: - raise def _store_measures(self, metric, data): tmpfile = self._get_tempfile() @@ -182,44 +165,14 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): self._delete_measures_files_for_metric_id(metric.id, files) - def _store_unaggregated_timeserie(self, metric, data): - self._atomic_file_store( - self._build_unaggregated_timeserie_path(metric), - data) - - def _get_unaggregated_timeserie(self, metric): - path = self._build_unaggregated_timeserie_path(metric) - try: - with open(path, 'rb') as f: - return f.read() - except IOError as e: - if e.errno == errno.ENOENT: - raise storage.MetricDoesNotExist(metric) - raise - - def _list_split_keys_for_metric(self, metric, aggregation, granularity): - try: - files = os.listdir(self._build_metric_path(metric, aggregation)) - except OSError as e: - if e.errno == errno.ENOENT: - raise storage.MetricDoesNotExist(metric) - raise - keys = [] - for f in files: - key, sep, file_granularity = f.partition("_") - if file_granularity == str(granularity): - keys.append(key) - return keys - - def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data): - self._atomic_file_store( - self._build_metric_path_for_split(metric, aggregation, - timestamp_key, granularity), - data) + def _store_metric_measures(self, metric, aggregation, data): + tmpfile = self._get_tempfile() + tmpfile.write(data) + tmpfile.close() + os.rename(tmpfile.name, self._build_metric_path(metric, aggregation)) def _delete_metric(self, metric): - path = self._build_metric_dir(metric) + path = self._build_metric_path(metric) try: shutil.rmtree(path) except OSError as e: @@ -228,48 +181,15 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): # measures) raise - def _get_measures(self, metric, timestamp_key, aggregation, granularity): - path = self._build_metric_path_for_split(metric, aggregation, - timestamp_key, granularity) - try: - with open(path, 'rb') as aggregation_file: - return aggregation_file.read() - except IOError as e: - if e.errno == errno.ENOENT: - if os.path.exists(self._build_metric_dir(metric)): - raise storage.AggregationDoesNotExist(metric, aggregation) - raise storage.MetricDoesNotExist(metric) - raise - - # The following methods deal with Gnocchi <= 1.3 archives - def _build_metric_archive_path(self, metric, aggregation): - return os.path.join(self._build_metric_dir(metric), aggregation) - - def _get_metric_archive(self, metric, aggregation): - """Retrieve data in the place we used to store TimeSerieArchive.""" - path = self._build_metric_archive_path(metric, aggregation) + def _get_measures(self, metric, aggregation): + path = self._build_metric_path(metric, aggregation) try: with open(path, 'rb') as aggregation_file: return aggregation_file.read() except IOError as e: if e.errno == errno.ENOENT: - if os.path.exists(self._build_metric_dir(metric)): + if os.path.exists(self._build_metric_path(metric)): raise storage.AggregationDoesNotExist(metric, aggregation) - raise storage.MetricDoesNotExist(metric) + else: + raise storage.MetricDoesNotExist(metric) raise - - def _store_metric_archive(self, metric, aggregation, data): - """Stores data in the place we used to store TimeSerieArchive.""" - self._atomic_file_store( - self._build_metric_archive_path(metric, aggregation), - data) - - def _delete_metric_archives(self, metric): - for agg in metric.archive_policy.aggregation_methods: - try: - os.unlink(self._build_metric_archive_path(metric, agg)) - except OSError as e: - if e.errno != errno.ENOENT: - # NOTE(jd) Maybe the metric has never been created (no - # measures) - raise diff --git a/gnocchi/storage/influxdb.py b/gnocchi/storage/influxdb.py index 8e7f9d5d..13c8409f 100644 --- a/gnocchi/storage/influxdb.py +++ b/gnocchi/storage/influxdb.py @@ -36,9 +36,9 @@ OPTS = [ cfg.StrOpt('influxdb_host', default='localhost', help='InfluxDB host'), - cfg.PortOpt('influxdb_port', - default=8086, - help='InfluxDB port'), + cfg.IntOpt('influxdb_port', + default=8086, + help='InfluxDB port'), cfg.StrOpt('influxdb_username', default='root', help='InfluxDB username'), @@ -150,12 +150,13 @@ class InfluxDBStorage(storage.StorageDriver): metric_id = self._get_metric_id(metric) + result = self._query(metric, "select * from \"%(metric_id)s\"" % + dict(metric_id=metric_id)) + result = list(result[metric_id]) + if from_timestamp: first_measure_timestamp = from_timestamp else: - result = self._query(metric, "select * from \"%(metric_id)s\"" % - dict(metric_id=metric_id)) - result = list(result[metric_id]) if result: first_measure_timestamp = self._timestamp_to_utc( timeutils.parse_isotime(result[0]['time'])) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index a6ba688b..2a907b3c 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -20,10 +20,7 @@ import uuid from oslo_config import cfg import retrying import six -try: - from swiftclient import client as swclient -except ImportError: - swclient = None +from swiftclient import client as swclient from gnocchi import storage from gnocchi.storage import _carbonara @@ -34,12 +31,14 @@ OPTS = [ default='1', help='Swift authentication version to user.'), cfg.StrOpt('swift_preauthurl', + default=None, help='Swift pre-auth URL.'), cfg.StrOpt('swift_authurl', default="http://localhost:8080/auth/v1.0", help='Swift auth URL.'), cfg.StrOpt('swift_preauthtoken', secret=True, + default=None, help='Swift token to user to authenticate.'), cfg.StrOpt('swift_user', default="admin:admin", @@ -67,8 +66,6 @@ def retry_if_result_empty(result): class SwiftStorage(_carbonara.CarbonaraBasedStorage): def __init__(self, conf): super(SwiftStorage, self).__init__(conf) - if swclient is None: - raise RuntimeError("python-swiftclient unavailable") self.swift = swclient.Connection( auth_version=conf.swift_auth_version, authurl=conf.swift_authurl, @@ -77,16 +74,16 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): key=conf.swift_key, tenant_name=conf.swift_tenant_name, timeout=conf.swift_timeout) + self._lock = _carbonara.CarbonaraBasedStorageToozLock(conf) self._container_prefix = conf.swift_container_prefix self.swift.put_container(self.MEASURE_PREFIX) + def stop(self): + self._lock.stop() + def _container_name(self, metric): return '%s.%s' % (self._container_prefix, str(metric.id)) - @staticmethod - def _object_name(split_key, aggregation, granularity): - return '%s_%s_%s' % (split_key, aggregation, granularity) - def _create_metric(self, metric): # TODO(jd) A container per user in their account? resp = {} @@ -146,41 +143,32 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): for f in files: self.swift.delete_object(self.MEASURE_PREFIX, f['name']) - def _store_metric_measures(self, metric, timestamp_key, - aggregation, granularity, data): - self.swift.put_object( - self._container_name(metric), - self._object_name(timestamp_key, aggregation, granularity), - data) + def _store_metric_measures(self, metric, aggregation, data): + self.swift.put_object(self._container_name(metric), aggregation, data) def _delete_metric(self, metric): self._delete_unaggregated_timeserie(metric) - container = self._container_name(metric) + for aggregation in metric.archive_policy.aggregation_methods: + try: + self.swift.delete_object(self._container_name(metric), + aggregation) + except swclient.ClientException as e: + if e.http_status != 404: + raise try: - headers, files = self.swift.get_container( - container, full_listing=True) + self.swift.delete_container(self._container_name(metric)) except swclient.ClientException as e: if e.http_status != 404: # Maybe it never has been created (no measure) raise - else: - for obj in files: - self.swift.delete_object(container, obj['name']) - try: - self.swift.delete_container(container) - except swclient.ClientException as e: - if e.http_status != 404: - # Deleted in the meantime? Whatever. - raise @retrying.retry(stop_max_attempt_number=4, wait_fixed=500, retry_on_result=retry_if_result_empty) - def _get_measures(self, metric, timestamp_key, aggregation, granularity): + def _get_measures(self, metric, aggregation): try: headers, contents = self.swift.get_object( - self._container_name(metric), self._object_name( - timestamp_key, aggregation, granularity)) + self._container_name(metric), aggregation) except swclient.ClientException as e: if e.http_status == 404: try: @@ -193,70 +181,9 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): raise return contents - def _list_split_keys_for_metric(self, metric, aggregation, granularity): - container = self._container_name(metric) - try: - headers, files = self.swift.get_container( - container, full_listing=True) - except swclient.ClientException as e: - if e.http_status == 404: - raise storage.MetricDoesNotExist(metric) - raise - keys = [] - for f in files: - try: - key, agg, g = f['name'].split('_', 2) - except ValueError: - # Might be "none", or any other file. Be resilient. - continue - if aggregation == agg and granularity == float(g): - keys.append(key) - return keys - - @retrying.retry(stop_max_attempt_number=4, - wait_fixed=500, - retry_on_result=retry_if_result_empty) - def _get_unaggregated_timeserie(self, metric): - try: - headers, contents = self.swift.get_object( - self._container_name(metric), "none") - except swclient.ClientException as e: - if e.http_status == 404: - raise storage.MetricDoesNotExist(metric) - raise - return contents - - def _store_unaggregated_timeserie(self, metric, data): - self.swift.put_object(self._container_name(metric), "none", data) - def _delete_unaggregated_timeserie(self, metric): try: self.swift.delete_object(self._container_name(metric), "none") except swclient.ClientException as e: if e.http_status != 404: raise - - # The following methods deal with Gnocchi <= 1.3 archives - def _get_metric_archive(self, metric, aggregation): - """Retrieve data in the place we used to store TimeSerieArchive.""" - try: - headers, contents = self.swift.get_object( - self._container_name(metric), aggregation) - except swclient.ClientException as e: - if e.http_status == 404: - raise storage.AggregationDoesNotExist(metric, aggregation) - raise - return contents - - def _store_metric_archive(self, metric, aggregation, data): - """Stores data in the place we used to store TimeSerieArchive.""" - self.swift.put_object(self._container_name(metric), aggregation, data) - - def _delete_metric_archives(self, metric): - for aggregation in metric.archive_policy.aggregation_methods: - try: - self.swift.delete_object(self._container_name(metric), - aggregation) - except swclient.ClientException as e: - if e.http_status != 404: - raise diff --git a/gnocchi/tests/api-paste.ini b/gnocchi/tests/api-paste.ini index 6a30433b..2d44d589 100644 --- a/gnocchi/tests/api-paste.ini +++ b/gnocchi/tests/api-paste.ini @@ -8,5 +8,4 @@ pipeline = keystone_authtoken_testing gnocchi_testing paste.app_factory = gnocchi.tests.test_rest:RestTest.app_factory [filter:keystone_authtoken_testing] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory -oslo_config_project = gnocchi +paste.filter_factory = gnocchi.tests.test_rest:RestTest.keystone_authtoken_filter_factory diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 8b6ca560..08194b58 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -22,10 +22,7 @@ from oslotest import base from oslotest import mockpatch import six from stevedore import extension -try: - from swiftclient import exceptions as swexc -except ImportError: - swexc = None +from swiftclient import exceptions as swexc from testtools import testcase from tooz import coordination @@ -125,7 +122,8 @@ class FakeRadosModule(object): def get_xattrs(self, key): if key not in self.kvs: raise FakeRadosModule.ObjectNotFound - return six.iteritems(self.kvs_xattrs.get(key, {}).copy()) + return iter((k, v) for k, v in + self.kvs_xattrs.get(key, {}).items()) def set_xattr(self, key, attr, value): self._ensure_key_exists(key) @@ -198,7 +196,7 @@ class FakeSwiftClient(object): files = [] directories = set() - for k, v in six.iteritems(container.copy()): + for k, v in six.iteritems(container): if path and not k.startswith(path): continue @@ -336,12 +334,16 @@ class TestCase(base.BaseTestCase): def setUp(self): super(TestCase, self).setUp() - self.conf = service.prepare_service([], - default_config_files=[]) + self.conf = service.prepare_service([]) self.conf.set_override('policy_file', self.path_get('etc/gnocchi/policy.json'), group="oslo_policy") + self.conf.set_override( + 'url', + os.environ.get("GNOCCHI_TEST_INDEXER_URL", "null://"), + 'indexer') + self.index = indexer.get_driver(self.conf) self.index.connect() @@ -380,10 +382,9 @@ class TestCase(base.BaseTestCase): except indexer.ArchivePolicyAlreadyExists: pass - if swexc: - self.useFixture(mockpatch.Patch( - 'swiftclient.client.Connection', - FakeSwiftClient)) + self.useFixture(mockpatch.Patch( + 'swiftclient.client.Connection', + FakeSwiftClient)) self.useFixture(mockpatch.Patch('gnocchi.storage.ceph.rados', FakeRadosModule())) @@ -414,12 +415,6 @@ class TestCase(base.BaseTestCase): 'storage') self.storage = storage.get_driver(self.conf) - # NOTE(jd) Do not upgrade the storage. We don't really need the storage - # upgrade for now, and the code that upgrade from pre-1.3 - # (TimeSerieArchive) uses a lot of parallel lock, which makes tooz - # explodes because MySQL does not support that many connections in real - # life. - # self.storage.upgrade(self.index) self.mgr = extension.ExtensionManager('gnocchi.aggregates', invoke_on_load=True) diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index 76019176..462f9fc6 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -76,16 +76,18 @@ class ConfigFixture(fixture.GabbiFixture): data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi') - if os.getenv("GABBI_LIVE"): - dcf = None - else: - dcf = [] - conf = service.prepare_service([], - default_config_files=dcf) + conf = service.prepare_service([]) CONF = self.conf = conf self.tmp_dir = data_tmp_dir + # Use the indexer set in the conf, unless we have set an + # override via the environment. + if 'GNOCCHI_TEST_INDEXER_URL' in os.environ: + conf.set_override('url', + os.environ.get("GNOCCHI_TEST_INDEXER_URL"), + 'indexer') + # TODO(jd) It would be cool if Gabbi was able to use the null:// # indexer, but this makes the API returns a lot of 501 error, which # Gabbi does not want to see, so let's just disable it. @@ -123,16 +125,16 @@ class ConfigFixture(fixture.GabbiFixture): conf.set_override('pecan_debug', False, 'api') + # Turn off any middleware. + conf.set_override('middlewares', [], 'api') + # Set pagination to a testable value conf.set_override('max_limit', 7, 'api') self.index = index - s = storage.get_driver(conf) - s.upgrade(index) - # start up a thread to async process measures - self.metricd_thread = MetricdThread(index, s) + self.metricd_thread = MetricdThread(index, storage.get_driver(conf)) self.metricd_thread.start() def stop_fixture(self): diff --git a/gnocchi/tests/gabbi/gabbits-live/live.yaml b/gnocchi/tests/gabbi/gabbits-live/live.yaml index bca36cdf..169410cb 100644 --- a/gnocchi/tests/gabbi/gabbits-live/live.yaml +++ b/gnocchi/tests/gabbi/gabbits-live/live.yaml @@ -21,13 +21,12 @@ tests: - '{"definition": [{"points": 12, "timespan": "1:00:00", "granularity": "0:05:00"}, {"points": 24, "timespan": "1 day, 0:00:00", "granularity": "1:00:00"}, {"points": 30, "timespan": "30 days, 0:00:00", "granularity": "1 day, 0:00:00"}], "back_window": 0, "name": "low", "aggregation_methods": ["std", "count", "95pct", "min", "max", "sum", "median", "mean"]}' - '{"definition": [{"points": 60, "timespan": "1:00:00", "granularity": "0:01:00"}, {"points": 168, "timespan": "7 days, 0:00:00", "granularity": "1:00:00"}, {"points": 365, "timespan": "365 days, 0:00:00", "granularity": "1 day, 0:00:00"}], "back_window": 0, "name": "medium", "aggregation_methods": ["std", "count", "95pct", "min", "max", "sum", "median", "mean"]}' - - name: check generic resources with the default one for statsd + - name: check generic resources url: /v1/resource/generic response_headers: content-type: /application/json/ - response_json_paths: - $[0].type: generic - $.`len`: 1 + response_strings: + - "[]" - name: post unicode archive policy url: /v1/archive_policy diff --git a/gnocchi/tests/gabbi/gabbits/aggregation.yaml b/gnocchi/tests/gabbi/gabbits/aggregation.yaml deleted file mode 100644 index f85c2521..00000000 --- a/gnocchi/tests/gabbi/gabbits/aggregation.yaml +++ /dev/null @@ -1,98 +0,0 @@ -fixtures: - - ConfigFixture - -tests: - - name: create archive policy - desc: for later use - url: /v1/archive_policy - method: POST - request_headers: - content-type: application/json - x-roles: admin - data: - name: low - definition: - - granularity: 1 second - - granularity: 300 seconds - status: 201 - - - name: create metric 1 - url: /v1/metric - request_headers: - content-type: application/json - method: post - data: - archive_policy_name: low - status: 201 - - - name: create metric 2 - url: /v1/metric - request_headers: - content-type: application/json - method: post - data: - archive_policy_name: low - status: 201 - - - name: get metric list to push metric 1 - url: /v1/metric - - - name: push measurements to metric 1 - url: /v1/metric/$RESPONSE['$[0].id']/measures - request_headers: - content-type: application/json - method: post - data: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 202 - - - name: get metric list to push metric 2 - url: /v1/metric - - - name: push measurements to metric 2 - url: /v1/metric/$RESPONSE['$[1].id']/measures - request_headers: - content-type: application/json - method: post - data: - - timestamp: "2015-03-06T14:33:57" - value: 3.1 - - timestamp: "2015-03-06T14:34:12" - value: 2 - status: 202 - - - name: get metric list to get aggregates - url: /v1/metric - - - name: get measure aggregates by granularity not float - url: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=foobar - status: 400 - - - name: get metric list to get aggregates 2 - url: /v1/metric - - - name: get measure aggregates by granularity - url: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1 - poll: - count: 10 - delay: 1 - response_json_paths: - $: - - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - - - name: get metric list to push metric 3 - url: /v1/metric - - - name: get measure aggregates by granularity with timestamps - url: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 - poll: - count: 10 - delay: 1 - response_json_paths: - $: - - ['2015-03-06T14:30:00+00:00', 300.0, 15.05] - - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] diff --git a/gnocchi/tests/gabbi/gabbits/archive.yaml b/gnocchi/tests/gabbi/gabbits/archive.yaml index 36e22b1d..4b59ab02 100644 --- a/gnocchi/tests/gabbi/gabbits/archive.yaml +++ b/gnocchi/tests/gabbi/gabbits/archive.yaml @@ -18,9 +18,9 @@ tests: # http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ # The caveats point out that this is only an issue if your data is # sensitive, which in this case...? -# However, the api-wg has made it recommendation that collections -# should be returned as an object with a named key with a value of -# a list as follows: {"archive_policies": [...]} +# However, the api-wg has made it recommedation that collections +# be returned as an object with a named key with a value of a list +# as follows: {"archive_policies": [...]} # This allows for extensibility such as future support for pagination. # Do we care? diff --git a/gnocchi/tests/gabbi/gabbits/batch_measures.yaml b/gnocchi/tests/gabbi/gabbits/batch_measures.yaml deleted file mode 100644 index 77c33051..00000000 --- a/gnocchi/tests/gabbi/gabbits/batch_measures.yaml +++ /dev/null @@ -1,88 +0,0 @@ -fixtures: - - ConfigFixture - -tests: - - name: create archive policy - desc: for later use - url: /v1/archive_policy - method: POST - request_headers: - content-type: application/json - x-roles: admin - data: - name: simple - definition: - - granularity: 1 second - status: 201 - - - name: create metric - url: /v1/metric - request_headers: - content-type: application/json - method: post - data: - archive_policy_name: simple - status: 201 - - - name: push measurements to metric - url: /v1/batch/measures - request_headers: - content-type: application/json - method: post - data: - $RESPONSE['$.id']: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 202 - - - name: push measurements to unknown metrics - url: /v1/batch/measures - request_headers: - content-type: application/json - method: post - data: - 37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5C: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - 37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5D: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 400 - response_strings: - - "Unknown metrics: 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5c, 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5d" - - - name: create second metric - url: /v1/metric - request_headers: - content-type: application/json - method: post - data: - archive_policy_name: simple - status: 201 - - - name: list metrics - url: /v1/metric - - - name: push measurements to two metrics - url: /v1/batch/measures - request_headers: - content-type: application/json - method: post - data: - $RESPONSE['$[0].id']: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - $RESPONSE['$[1].id']: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 202 \ No newline at end of file diff --git a/gnocchi/tests/gabbi/gabbits/metric_granularity.yaml b/gnocchi/tests/gabbi/gabbits/metric_granularity.yaml index e132190c..232a8adb 100644 --- a/gnocchi/tests/gabbi/gabbits/metric_granularity.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric_granularity.yaml @@ -42,9 +42,9 @@ tests: - name: get measurements invalid granularity url: /v1/metric/$RESPONSE['$[0].id']/measures?granularity=42 - status: 404 - response_strings: - - Granularity '42.0' for metric $RESPONSE['$[0].id'] does not exist + status: 200 + response_json_paths: + $: [] - name: get metric list for granularity url: /v1/metric diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index f259e5a3..71c72e65 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -54,9 +54,9 @@ tests: redirects: true response_json_paths: $.version: "1.0" - $.links.`len`: 10 + $.links.`len`: 9 $.links[0].href: $SCHEME://$NETLOC/v1 - $.links[7].href: $SCHEME://$NETLOC/v1/resource + $.links[7].href: $SCHEME://$NETLOC/v1/search - name: root of resource url: /v1/resource diff --git a/gnocchi/tests/gabbi/test_gabbi_live.py b/gnocchi/tests/gabbi/test_gabbi_live.py index 63bc7c08..b9425631 100644 --- a/gnocchi/tests/gabbi/test_gabbi_live.py +++ b/gnocchi/tests/gabbi/test_gabbi_live.py @@ -44,5 +44,5 @@ def load_tests(loader, tests, pattern): host=parsed_url.hostname, port=port, prefix=prefix) - elif os.getenv("GABBI_LIVE"): + elif os.getenv("GABBI_LIVE_FAIL_IF_NO_TEST"): raise RuntimeError('"GNOCCHI_SERVICE_URL" is not set') diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py index 63f22f47..0b917e39 100644 --- a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py +++ b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py @@ -40,7 +40,7 @@ class ModelsMigrationsSync( return sqlalchemy_base.Base.metadata def get_engine(self): - return self.index.get_engine() + return self.index.engine_facade.get_engine() @staticmethod def db_sync(engine): diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py index 64acb469..b0fddc65 100644 --- a/gnocchi/tests/storage/test_carbonara.py +++ b/gnocchi/tests/storage/test_carbonara.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2015 Red Hat, Inc. +# Copyright © 2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -13,100 +13,66 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import datetime -import uuid +from oslotest import base -import mock - -from gnocchi import carbonara from gnocchi import storage -from gnocchi.storage import _carbonara -from gnocchi.tests import base as tests_base -from gnocchi import utils - - -class TestCarbonaraMigration(tests_base.TestCase): - def setUp(self): - super(TestCarbonaraMigration, self).setUp() - if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): - self.skipTest("This driver is not based on Carbonara") - self.metric = storage.Metric(uuid.uuid4(), - self.archive_policies['low']) - - archive = carbonara.TimeSerieArchive.from_definitions( - [(v.granularity, v.points) - for v in self.metric.archive_policy.definition] - ) - archive_max = carbonara.TimeSerieArchive.from_definitions( - [(v.granularity, v.points) - for v in self.metric.archive_policy.definition], - aggregation_method='max', +class TestMeasureQuery(base.BaseTestCase): + def test_equal(self): + q = storage.MeasureQuery({"=": 4}) + self.assertTrue(q(4)) + self.assertFalse(q(40)) + + def test_gt(self): + q = storage.MeasureQuery({">": 4}) + self.assertTrue(q(40)) + self.assertFalse(q(4)) + + def test_and(self): + q = storage.MeasureQuery({"and": [{">": 4}, {"<": 10}]}) + self.assertTrue(q(5)) + self.assertFalse(q(40)) + self.assertFalse(q(1)) + + def test_or(self): + q = storage.MeasureQuery({"or": [{"=": 4}, {"=": 10}]}) + self.assertTrue(q(4)) + self.assertTrue(q(10)) + self.assertFalse(q(-1)) + + def test_modulo(self): + q = storage.MeasureQuery({"=": [{"%": 5}, 0]}) + self.assertTrue(q(5)) + self.assertTrue(q(10)) + self.assertFalse(q(-1)) + self.assertFalse(q(6)) + + def test_math(self): + q = storage.MeasureQuery( + { + u"and": [ + # v+5 is bigger 0 + {u"≥": [{u"+": 5}, 0]}, + # v-6 is not 5 + {u"≠": [5, {u"-": 6}]}, + ], + } ) - - for a in (archive, archive_max): - a.update(carbonara.TimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [4, 5, 6])) - - self.storage._create_metric(self.metric) - - self.storage._store_metric_archive( - self.metric, - archive.agg_timeseries[0].aggregation_method, - archive.serialize()) - - self.storage._store_metric_archive( - self.metric, - archive_max.agg_timeseries[0].aggregation_method, - archive_max.serialize()) - - def upgrade(self): - with mock.patch.object(self.index, 'list_metrics') as f: - f.return_value = [self.metric] - self.storage.upgrade(self.index) - - def test_get_measures(self): - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400, 5), - (utils.datetime_utc(2014, 1, 1, 12), 3600, 5), - (utils.datetime_utc(2014, 1, 1, 12), 300, 5) - ], self.storage.get_measures(self.metric)) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400, 6), - (utils.datetime_utc(2014, 1, 1, 12), 3600, 6), - (utils.datetime_utc(2014, 1, 1, 12), 300, 6) - ], self.storage.get_measures(self.metric, aggregation='max')) - - # This is to make gordc safer - self.assertIsNotNone(self.storage._get_metric_archive( - self.metric, "mean")) - - self.upgrade() - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400, 5), - (utils.datetime_utc(2014, 1, 1, 12), 3600, 5), - (utils.datetime_utc(2014, 1, 1, 12), 300, 5) - ], self.storage.get_measures(self.metric)) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400, 6), - (utils.datetime_utc(2014, 1, 1, 12), 3600, 6), - (utils.datetime_utc(2014, 1, 1, 12), 300, 6) - ], self.storage.get_measures(self.metric, aggregation='max')) - - self.assertRaises( - storage.AggregationDoesNotExist, - self.storage._get_metric_archive, - self.metric, "mean") - - def test_delete_metric_not_upgraded(self): - # Make sure that we delete everything (e.g. objects + container) - # correctly even if the metric has not been upgraded. - self.storage.delete_metric(self.metric) - self.assertEqual([], self.storage.get_measures(self.metric)) + self.assertTrue(q(5)) + self.assertTrue(q(10)) + self.assertFalse(q(11)) + + def test_empty(self): + q = storage.MeasureQuery({}) + self.assertFalse(q(5)) + self.assertFalse(q(10)) + + def test_bad_format(self): + self.assertRaises(storage.InvalidQuery, + storage.MeasureQuery, + {"foo": [{"=": 4}, {"=": 10}]}) + + self.assertRaises(storage.InvalidQuery, + storage.MeasureQuery, + {"=": [1, 2, 3]}) diff --git a/gnocchi/tests/test_archive_policy.py b/gnocchi/tests/test_archive_policy.py index 3b2afb08..d1ae6b63 100644 --- a/gnocchi/tests/test_archive_policy.py +++ b/gnocchi/tests/test_archive_policy.py @@ -28,8 +28,7 @@ class TestArchivePolicy(base.BaseTestCase): ["*"]) def test_aggregation_methods(self): - conf = service.prepare_service([], - default_config_files=[]) + conf = service.prepare_service([]) ap = archive_policy.ArchivePolicy("foobar", 0, diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 6c78eb8a..b7c42415 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -2,6 +2,8 @@ # # Copyright © 2014-2015 eNovance # +# Authors: Julien Danjou +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -14,12 +16,11 @@ # License for the specific language governing permissions and limitations # under the License. import datetime -import math +import os import subprocess import tempfile import fixtures -from oslo_utils import timeutils from oslotest import base # TODO(jd) We shouldn't use pandas here import pandas @@ -31,14 +32,13 @@ from gnocchi import carbonara class TestBoundTimeSerie(base.BaseTestCase): @staticmethod def test_base(): - carbonara.BoundTimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [3, 5, 6]) + carbonara.BoundTimeSerie([datetime.datetime(2014, 1, 1, 12, 0, 0), + datetime.datetime(2014, 1, 1, 12, 0, 4), + datetime.datetime(2014, 1, 1, 12, 0, 9)], + [3, 5, 6]) def test_block_size(self): - ts = carbonara.BoundTimeSerie.from_data( + ts = carbonara.BoundTimeSerie( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 4), datetime.datetime(2014, 1, 1, 12, 0, 9)], @@ -50,7 +50,7 @@ class TestBoundTimeSerie(base.BaseTestCase): self.assertEqual(2, len(ts)) def test_block_size_back_window(self): - ts = carbonara.BoundTimeSerie.from_data( + ts = carbonara.BoundTimeSerie( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 4), datetime.datetime(2014, 1, 1, 12, 0, 9)], @@ -63,7 +63,7 @@ class TestBoundTimeSerie(base.BaseTestCase): self.assertEqual(3, len(ts)) def test_block_size_unordered(self): - ts = carbonara.BoundTimeSerie.from_data( + ts = carbonara.BoundTimeSerie( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 9), datetime.datetime(2014, 1, 1, 12, 0, 5)], @@ -75,7 +75,7 @@ class TestBoundTimeSerie(base.BaseTestCase): self.assertEqual(2, len(ts)) def test_duplicate_timestamps(self): - ts = carbonara.BoundTimeSerie.from_data( + ts = carbonara.BoundTimeSerie( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 9), datetime.datetime(2014, 1, 1, 12, 0, 9)], @@ -99,50 +99,11 @@ class TestBoundTimeSerie(base.BaseTestCase): class TestAggregatedTimeSerie(base.BaseTestCase): @staticmethod def test_base(): - carbonara.AggregatedTimeSerie.from_data( + carbonara.AggregatedTimeSerie( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 4), datetime.datetime(2014, 1, 1, 12, 0, 9)], [3, 5, 6]) - carbonara.AggregatedTimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [3, 5, 6], sampling=3) - carbonara.AggregatedTimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [3, 5, 6], sampling="4s") - - def test_fetch_basic(self): - ts = carbonara.AggregatedTimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [3, 5, 6], - sampling="1s") - self.assertEqual( - [(datetime.datetime(2014, 1, 1, 12), 1, 3), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], - ts.fetch()) - self.assertEqual( - [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], - ts.fetch(from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 4))) - self.assertEqual( - [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], - ts.fetch( - from_timestamp=timeutils.parse_isotime( - "2014-01-01 12:00:04"))) - self.assertEqual( - [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], - ts.fetch( - from_timestamp=timeutils.parse_isotime( - "2014-01-01 13:00:04+01:00"))) def test_bad_percentile(self): for bad_percentile in ('0pct', '100pct', '-1pct', '123pct'): @@ -187,7 +148,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): def test_different_length_in_timestamps_and_data(self): self.assertRaises(ValueError, - carbonara.AggregatedTimeSerie.from_data, + carbonara.AggregatedTimeSerie, [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 4), datetime.datetime(2014, 1, 1, 12, 0, 9)], @@ -196,7 +157,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): def test_max_size(self): ts = carbonara.AggregatedTimeSerie( max_size=2) - ts.update(carbonara.TimeSerie.from_data( + ts.update(carbonara.TimeSerie( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 4), datetime.datetime(2014, 1, 1, 12, 0, 9)], @@ -207,7 +168,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): def test_down_sampling(self): ts = carbonara.AggregatedTimeSerie(sampling='5Min') - ts.update(carbonara.TimeSerie.from_data( + ts.update(carbonara.TimeSerie( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 4), datetime.datetime(2014, 1, 1, 12, 0, 9)], @@ -219,7 +180,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts = carbonara.AggregatedTimeSerie( sampling='1Min', max_size=2) - ts.update(carbonara.TimeSerie.from_data( + ts.update(carbonara.TimeSerie( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 1, 4), datetime.datetime(2014, 1, 1, 12, 1, 9), @@ -234,7 +195,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): sampling='1Min', max_size=2, aggregation_method='max') - ts.update(carbonara.TimeSerie.from_data( + ts.update(carbonara.TimeSerie( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 1, 4), datetime.datetime(2014, 1, 1, 12, 1, 9), @@ -249,7 +210,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): sampling='1Min', max_size=2, aggregation_method='max') - ts.update(carbonara.TimeSerie.from_data( + ts.update(carbonara.TimeSerie( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 1, 4), datetime.datetime(2014, 1, 1, 12, 1, 9), @@ -258,153 +219,23 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts2 = carbonara.AggregatedTimeSerie.from_dict(ts.to_dict()) self.assertEqual(ts, ts2) - def test_aggregated_different_archive_no_overlap(self): - tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) - tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50) - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling) - tsb1.set_values([(datetime.datetime(2014, 1, 1, 11, 46, 4), 4)], - before_truncate_callback=tsc1.update) - tsb2.set_values([(datetime.datetime(2014, 1, 1, 9, 1, 4), 4)], - before_truncate_callback=tsc2.update) +class TestTimeSerieArchive(base.BaseTestCase): - dtfrom = datetime.datetime(2014, 1, 1, 11, 0, 0) - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.AggregatedTimeSerie.aggregated, - [tsc1, tsc2], from_timestamp=dtfrom) + def test_empty_update(self): + tsc = carbonara.TimeSerieArchive.from_definitions( + [(60, 10), + (300, 6)]) + tsb = carbonara.BoundTimeSerie(block_size=tsc.max_block_size) + tsb.set_values([], before_truncate_callback=tsc.update) - def test_aggregated_different_archive_no_overlap2(self): - tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) - tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50) - - tsb1.set_values([(datetime.datetime(2014, 1, 1, 12, 3, 0), 4)], - before_truncate_callback=tsc1.update) - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.AggregatedTimeSerie.aggregated, - [tsc1, tsc2]) - - def test_aggregated_different_archive_overlap(self): - tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) - tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling) - - # NOTE(sileht): minute 8 is missing in both and - # minute 7 in tsc2 too, but it looks like we have - # enough point to do the aggregation - tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 11, 0, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 10), - (datetime.datetime(2014, 1, 1, 12, 9, 0), 2), - ], before_truncate_callback=tsc1.update) - - tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 12, 1, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 9, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 11, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 12, 0), 2), - ], before_truncate_callback=tsc2.update) - - dtfrom = datetime.datetime(2014, 1, 1, 12, 0, 0) - dtto = datetime.datetime(2014, 1, 1, 12, 10, 0) - - # By default we require 100% of point that overlap - # so that fail - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.AggregatedTimeSerie.aggregated, - [tsc1, tsc2], from_timestamp=dtfrom, - to_timestamp=dtto) - - # Retry with 80% and it works - output = carbonara.AggregatedTimeSerie.aggregated([ - tsc1, tsc2], from_timestamp=dtfrom, to_timestamp=dtto, - needed_percent_of_overlap=80.0) - - self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 3.0), - (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 3.0), - (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 4.0), - (pandas.Timestamp('2014-01-01 12:04:00'), 60.0, 4.0), - (pandas.Timestamp('2014-01-01 12:05:00'), 60.0, 3.0), - (pandas.Timestamp('2014-01-01 12:06:00'), 60.0, 5.0), - (pandas.Timestamp('2014-01-01 12:07:00'), 60.0, 10.0), - (pandas.Timestamp('2014-01-01 12:09:00'), 60.0, 2.0), - ], output) - - def test_aggregated_different_archive_overlap_edge_missing1(self): - tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) - tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling) - - tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 1), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 7), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), - ], before_truncate_callback=tsc1.update) - - tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 11, 0, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 13), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 24), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 16), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 12), - ], before_truncate_callback=tsc2.update) - - # By default we require 100% of point that overlap - # but we allow that the last datapoint is missing - # of the precisest granularity - output = carbonara.AggregatedTimeSerie.aggregated([ - tsc1, tsc2], aggregation='sum') - - self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 33.0), - (pandas.Timestamp('2014-01-01 12:04:00'), 60.0, 5.0), - (pandas.Timestamp('2014-01-01 12:05:00'), 60.0, 18.0), - (pandas.Timestamp('2014-01-01 12:06:00'), 60.0, 19.0), - ], output) - - def test_aggregated_different_archive_overlap_edge_missing2(self): - tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) - tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling) - - tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - ], before_truncate_callback=tsc1.update) - - tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 11, 0, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - ], before_truncate_callback=tsc2.update) - - output = carbonara.AggregatedTimeSerie.aggregated([tsc1, tsc2]) - self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 4.0), - ], output) + self.assertEqual([], tsc.fetch()) def test_fetch(self): - ts = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + tsc = carbonara.TimeSerieArchive.from_definitions( + [(60, 10), + (300, 6)]) + tsb = carbonara.BoundTimeSerie(block_size=tsc.max_block_size) tsb.set_values([ (datetime.datetime(2014, 1, 1, 11, 46, 4), 4), @@ -422,14 +253,19 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 4, 9), 7), (datetime.datetime(2014, 1, 1, 12, 5, 1), 15), (datetime.datetime(2014, 1, 1, 12, 5, 12), 1), - (datetime.datetime(2014, 1, 1, 12, 6, 0, 2), 3), - ], before_truncate_callback=ts.update) + (datetime.datetime(2014, 1, 1, 12, 6, 0), 3), + ], before_truncate_callback=tsc.update) tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 6), 5), - ], before_truncate_callback=ts.update) + (datetime.datetime(2014, 1, 1, 12, 5, 13), 5), + ], before_truncate_callback=tsc.update) self.assertEqual([ + (datetime.datetime(2014, 1, 1, 11, 45), 300.0, 6.0), + (datetime.datetime(2014, 1, 1, 11, 50), 300.0, 27.0), + (datetime.datetime(2014, 1, 1, 11, 55), 300.0, 5.0), + (datetime.datetime(2014, 1, 1, 12, 00), 300.0, 6.166666666666667), + (datetime.datetime(2014, 1, 1, 12, 5), 300.0, 6.0), (datetime.datetime(2014, 1, 1, 11, 54), 60.0, 4.0), (datetime.datetime(2014, 1, 1, 11, 56), 60.0, 4.0), (datetime.datetime(2014, 1, 1, 11, 57), 60.0, 6.0), @@ -438,31 +274,39 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 2), 60.0, 8.0), (datetime.datetime(2014, 1, 1, 12, 3), 60.0, 3.0), (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0), - (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0), - (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0) - ], ts.fetch()) + (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 7.0), + (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 3.0) + ], tsc.fetch()) self.assertEqual([ + (datetime.datetime(2014, 1, 1, 12), 300.0, 6.166666666666667), + (datetime.datetime(2014, 1, 1, 12, 5), 300.0, 6.0), (datetime.datetime(2014, 1, 1, 12, 1), 60.0, 5.5), (datetime.datetime(2014, 1, 1, 12, 2), 60.0, 8.0), (datetime.datetime(2014, 1, 1, 12, 3), 60.0, 3.0), (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0), - (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0), - (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0) - ], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 7.0), + (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 3.0) + ], tsc.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) def test_fetch_agg_pct(self): - ts = carbonara.AggregatedTimeSerie(sampling=1, max_size=3600 * 24, - aggregation_method='90pct') - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) - + tsc = carbonara.TimeSerieArchive.from_definitions( + [(1, 3600 * 24), + (60, 24 * 60 * 30)], + aggregation_method='90pct') + tsb = carbonara.BoundTimeSerie(block_size=tsc.max_block_size) + + # NOTE(jd) What's interesting in this test is that we lack a point for + # a second, so we have an interval with no value tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), (datetime.datetime(2014, 1, 1, 12, 0, 0, 123), 4), (datetime.datetime(2014, 1, 1, 12, 0, 2), 4)], - before_truncate_callback=ts.update) + before_truncate_callback=tsc.update) - result = ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) + result = tsc.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) reference = [ + (pandas.Timestamp('2014-01-01 12:00:00'), + 60.0, 4), (pandas.Timestamp('2014-01-01 12:00:00'), 1.0, 3.9), (pandas.Timestamp('2014-01-01 12:00:02'), @@ -478,10 +322,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertAlmostEqual(ref[2], res[2]) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 2, 113), 110)], - before_truncate_callback=ts.update) + before_truncate_callback=tsc.update) - result = ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) + result = tsc.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) reference = [ + (pandas.Timestamp('2014-01-01 12:00:00'), + 60.0, 78.2), (pandas.Timestamp('2014-01-01 12:00:00'), 1.0, 3.9), (pandas.Timestamp('2014-01-01 12:00:02'), @@ -497,8 +343,10 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertAlmostEqual(ref[2], res[2]) def test_fetch_nano(self): - ts = carbonara.AggregatedTimeSerie(sampling=0.2, max_size=10) - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + tsc = carbonara.TimeSerieArchive.from_definitions( + [(0.2, 10), + (0.5, 6)]) + tsb = carbonara.BoundTimeSerie(block_size=tsc.max_block_size) tsb.set_values([ (datetime.datetime(2014, 1, 1, 11, 46, 0, 200123), 4), @@ -506,78 +354,95 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 11, 47, 0, 323154), 50), (datetime.datetime(2014, 1, 1, 11, 48, 0, 590903), 4), (datetime.datetime(2014, 1, 1, 11, 48, 0, 903291), 4), - ], before_truncate_callback=ts.update) + ], before_truncate_callback=tsc.update) tsb.set_values([ (datetime.datetime(2014, 1, 1, 11, 48, 0, 821312), 5), - ], before_truncate_callback=ts.update) + ], before_truncate_callback=tsc.update) self.assertEqual([ + (datetime.datetime(2014, 1, 1, 11, 46), 0.5, 6.0), + (datetime.datetime(2014, 1, 1, 11, 47), 0.5, 50.0), + (datetime.datetime(2014, 1, 1, 11, 48, 0, 500000), 0.5, + 4.333333333333333), (datetime.datetime(2014, 1, 1, 11, 46, 0, 200000), 0.2, 6.0), (datetime.datetime(2014, 1, 1, 11, 47, 0, 200000), 0.2, 50.0), (datetime.datetime(2014, 1, 1, 11, 48, 0, 400000), 0.2, 4.0), (datetime.datetime(2014, 1, 1, 11, 48, 0, 800000), 0.2, 4.5) - ], ts.fetch()) + ], tsc.fetch()) def test_fetch_agg_std(self): - ts = carbonara.AggregatedTimeSerie(sampling=60, max_size=60, - aggregation_method='std') - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + tsc = carbonara.TimeSerieArchive.from_definitions( + [(60, 60), + (300, 24)], + aggregation_method='std') + tsb = carbonara.BoundTimeSerie(block_size=tsc.max_block_size) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), (datetime.datetime(2014, 1, 1, 12, 1, 4), 4), (datetime.datetime(2014, 1, 1, 12, 1, 9), 7), (datetime.datetime(2014, 1, 1, 12, 2, 1), 15), (datetime.datetime(2014, 1, 1, 12, 2, 12), 1)], - before_truncate_callback=ts.update) + before_truncate_callback=tsc.update) self.assertEqual([ + (pandas.Timestamp('2014-01-01 12:00:00'), + 300.0, 5.4772255750516612), (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 2.1213203435596424), (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 9.8994949366116654), - ], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], tsc.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)], - before_truncate_callback=ts.update) + before_truncate_callback=tsc.update) self.assertEqual([ + (pandas.Timestamp('2014-01-01 12:00:00'), + 300.0, 42.739521132865619), (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 2.1213203435596424), (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 59.304300012730948), - ], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], tsc.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) def test_fetch_agg_max(self): - ts = carbonara.AggregatedTimeSerie(sampling=60, max_size=60, - aggregation_method='max') - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + tsc = carbonara.TimeSerieArchive.from_definitions( + [(60, 60), + (300, 24)], + aggregation_method='max') + tsb = carbonara.BoundTimeSerie(block_size=tsc.max_block_size) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), (datetime.datetime(2014, 1, 1, 12, 1, 4), 4), (datetime.datetime(2014, 1, 1, 12, 1, 9), 7), (datetime.datetime(2014, 1, 1, 12, 2, 1), 15), (datetime.datetime(2014, 1, 1, 12, 2, 12), 1)], - before_truncate_callback=ts.update) + before_truncate_callback=tsc.update) self.assertEqual([ + (pandas.Timestamp('2014-01-01 12:00:00'), 300.0, 15), (pandas.Timestamp('2014-01-01 12:00:00'), 60.0, 3), (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 7), (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 15), - ], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], tsc.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)], - before_truncate_callback=ts.update) + before_truncate_callback=tsc.update) self.assertEqual([ + (pandas.Timestamp('2014-01-01 12:00:00'), 300.0, 110), (pandas.Timestamp('2014-01-01 12:00:00'), 60.0, 3), (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 7), (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 110), - ], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], tsc.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) def test_serialize(self): - ts = carbonara.AggregatedTimeSerie(sampling=0.5) - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + tsc = carbonara.TimeSerieArchive.from_definitions( + [(0.5, None), + (2, None)]) + + tsb = carbonara.BoundTimeSerie(block_size=tsc.max_block_size) tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 0, 0, 1234), 3), @@ -585,14 +450,43 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 1, 4, 234), 5), (datetime.datetime(2014, 1, 1, 12, 1, 9, 32), 7), (datetime.datetime(2014, 1, 1, 12, 2, 12, 532), 1), - ], before_truncate_callback=ts.update) - - self.assertEqual(ts, - carbonara.AggregatedTimeSerie.unserialize( - ts.serialize())) + ], before_truncate_callback=tsc.update) + + self.assertEqual(tsc, + carbonara.TimeSerieArchive.unserialize( + tsc.serialize())) + + def test_from_dict_resampling_stddev(self): + d = {'timeserie': {'values': {u'2013-01-01 23:45:01.182000': 1.0, + u'2013-01-01 23:45:02.975000': 2.0, + u'2013-01-01 23:45:03.689000': 3.0, + u'2013-01-01 23:45:04.292000': 4.0, + u'2013-01-01 23:45:05.416000': 5.0, + u'2013-01-01 23:45:06.995000': 6.0, + u'2013-01-01 23:45:07.065000': 7.0, + u'2013-01-01 23:45:08.634000': 8.0, + u'2013-01-01 23:45:09.572000': 9.0, + u'2013-01-01 23:45:10.672000': 10.0}, + 'timespan': u'120S'}, + 'archives': [{'aggregation_method': u'std', + 'values': {u'2013-01-01 23:40:00': + 3.0276503540974917, + u'2013-01-01 23:45:00': + 3.0276503540974917}, + 'max_size': 3600, + 'sampling': u'60S'}]} + timeseries = carbonara.TimeSerieArchive.from_dict(d) + measure = timeseries.fetch() + self.assertEqual(2, len(measure)) + measure = timeseries.fetch('2013-01-01 23:45:00', + '2013-01-01 23:46:00') + self.assertEqual(pandas.Timestamp('2013-01-01 23:45:00'), + measure[0][0]) + self.assertAlmostEquals(measure[0][2], 3.0276503540974917) def test_no_truncation(self): - ts = carbonara.AggregatedTimeSerie(sampling=60) + ts = carbonara.TimeSerieArchive.from_definitions( + [(60, None)]) tsb = carbonara.BoundTimeSerie() for i in six.moves.range(1, 11): @@ -607,11 +501,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): def test_back_window(self): """Back window testing. - Test the back window on an archive is not longer than the window we + Test the the back window on an archive is not longer than the window we aggregate on. """ - ts = carbonara.AggregatedTimeSerie(sampling=1, max_size=60) - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + ts = carbonara.TimeSerieArchive.from_definitions( + [(1, 60)]) + tsb = carbonara.BoundTimeSerie(block_size=ts.max_block_size) tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 0, 1, 2300), 1), @@ -647,11 +542,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): def test_back_window_ignore(self): """Back window testing. - Test the back window on an archive is not longer than the window we + Test the the back window on an archive is not longer than the window we aggregate on. """ - ts = carbonara.AggregatedTimeSerie(sampling=1, max_size=60) - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + ts = carbonara.TimeSerieArchive.from_definitions( + [(1, 60)]) + tsb = carbonara.BoundTimeSerie(block_size=1) tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 0, 1, 2300), 1), @@ -695,20 +591,14 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts.fetch()) def test_aggregated_nominal(self): - tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) - tsc12 = carbonara.AggregatedTimeSerie(sampling=300, max_size=6) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc12.sampling) - tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) - tsc22 = carbonara.AggregatedTimeSerie(sampling=300, max_size=6) - tsb2 = carbonara.BoundTimeSerie(block_size=tsc22.sampling) - - def ts1_update(ts): - tsc1.update(ts) - tsc12.update(ts) - - def ts2_update(ts): - tsc2.update(ts) - tsc22.update(ts) + tsc1 = carbonara.TimeSerieArchive.from_definitions( + [(60, 10), + (300, 6)]) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) + tsc2 = carbonara.TimeSerieArchive.from_definitions( + [(60, 10), + (300, 6)]) + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.max_block_size) tsb1.set_values([ (datetime.datetime(2014, 1, 1, 11, 46, 4), 4), @@ -727,7 +617,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 5, 1), 15), (datetime.datetime(2014, 1, 1, 12, 5, 12), 1), (datetime.datetime(2014, 1, 1, 12, 6, 0), 3), - ], before_truncate_callback=ts1_update) + ], before_truncate_callback=tsc1.update) tsb2.set_values([ (datetime.datetime(2014, 1, 1, 11, 46, 4), 6), @@ -746,10 +636,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 5, 1), 10), (datetime.datetime(2014, 1, 1, 12, 5, 12), 1), (datetime.datetime(2014, 1, 1, 12, 6, 0), 1), - ], before_truncate_callback=ts2_update) + ], before_truncate_callback=tsc2.update) - output = carbonara.AggregatedTimeSerie.aggregated([tsc1, tsc12, - tsc2, tsc22]) + output = carbonara.TimeSerieArchive.aggregated([tsc1, tsc2]) self.assertEqual([ (datetime.datetime(2014, 1, 1, 11, 45), 300.0, 5.75), (datetime.datetime(2014, 1, 1, 11, 50), 300.0, 27.5), @@ -768,11 +657,192 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 2.0), ], output) + def test_aggregated_different_archive(self): + tsc1 = carbonara.TimeSerieArchive.from_definitions( + [(60, 50), + (120, 24)]) + tsc2 = carbonara.TimeSerieArchive.from_definitions( + [(180, 50), + (300, 24)]) + + self.assertRaises(carbonara.UnAggregableTimeseries, + carbonara.TimeSerieArchive.aggregated, + [tsc1, tsc2]) + + def test_aggregated_different_archive_no_overlap(self): + tsc1 = carbonara.TimeSerieArchive.from_definitions( + [(60, 50), + (120, 24)]) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) + tsc2 = carbonara.TimeSerieArchive.from_definitions( + [(60, 50)]) + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.max_block_size) + + tsb1.set_values([(datetime.datetime(2014, 1, 1, 11, 46, 4), 4)], + before_truncate_callback=tsc1.update) + tsb2.set_values([(datetime.datetime(2014, 1, 1, 9, 1, 4), 4)], + before_truncate_callback=tsc2.update) + + dtfrom = datetime.datetime(2014, 1, 1, 11, 0, 0) + self.assertRaises(carbonara.UnAggregableTimeseries, + carbonara.TimeSerieArchive.aggregated, + [tsc1, tsc2], from_timestamp=dtfrom) + + def test_aggregated_different_archive_no_overlap2(self): + tsc1 = carbonara.TimeSerieArchive.from_definitions( + [(60, 50), + (120, 24)]) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) + tsc2 = carbonara.TimeSerieArchive.from_definitions( + [(60, 50)]) + + tsb1.set_values([(datetime.datetime(2014, 1, 1, 12, 3, 0), 4)], + before_truncate_callback=tsc1.update) + self.assertRaises(carbonara.UnAggregableTimeseries, + carbonara.TimeSerieArchive.aggregated, + [tsc1, tsc2]) + + def test_aggregated_different_archive_no_overlap_but_dont_care(self): + tsc1 = carbonara.TimeSerieArchive.from_definitions( + [(60, 50), + (120, 24)]) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) + tsc2 = carbonara.TimeSerieArchive.from_definitions( + [(60, 50)]) + + tsb1.set_values([(datetime.datetime(2014, 1, 1, 12, 3, 0), 4)], + before_truncate_callback=tsc1.update) + + res = carbonara.TimeSerieArchive.aggregated( + [tsc1, tsc2], needed_percent_of_overlap=0) + self.assertEqual([(pandas.Timestamp('2014-01-01 12:03:00'), + 60.0, 4.0)], res) + + def test_aggregated_different_archive_overlap(self): + tsc1 = carbonara.TimeSerieArchive.from_definitions( + [(60, 10), + (600, 6)]) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) + tsc2 = carbonara.TimeSerieArchive.from_definitions( + [(60, 10)]) + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.max_block_size) + + # NOTE(sileht): minute 8 is missing in both and + # minute 7 in tsc2 too, but it looks like we have + # enough point to do the aggregation + tsb1.set_values([ + (datetime.datetime(2014, 1, 1, 11, 0, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 1, 0), 3), + (datetime.datetime(2014, 1, 1, 12, 2, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 5, 0), 3), + (datetime.datetime(2014, 1, 1, 12, 6, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 7, 0), 10), + (datetime.datetime(2014, 1, 1, 12, 9, 0), 2), + ], before_truncate_callback=tsc1.update) + + tsb2.set_values([ + (datetime.datetime(2014, 1, 1, 12, 1, 0), 3), + (datetime.datetime(2014, 1, 1, 12, 2, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 6), + (datetime.datetime(2014, 1, 1, 12, 5, 0), 3), + (datetime.datetime(2014, 1, 1, 12, 6, 0), 6), + (datetime.datetime(2014, 1, 1, 12, 9, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 11, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 12, 0), 2), + ], before_truncate_callback=tsc2.update) + + dtfrom = datetime.datetime(2014, 1, 1, 12, 0, 0) + dtto = datetime.datetime(2014, 1, 1, 12, 10, 0) + + # By default we require 100% of point that overlap + # so that fail + self.assertRaises(carbonara.UnAggregableTimeseries, + carbonara.TimeSerieArchive.aggregated, + [tsc1, tsc2], from_timestamp=dtfrom, + to_timestamp=dtto) + + # Retry with 80% and it works + output = carbonara.TimeSerieArchive.aggregated([ + tsc1, tsc2], from_timestamp=dtfrom, to_timestamp=dtto, + needed_percent_of_overlap=80.0) + + self.assertEqual([ + (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 3.0), + (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 3.0), + (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 4.0), + (pandas.Timestamp('2014-01-01 12:04:00'), 60.0, 4.0), + (pandas.Timestamp('2014-01-01 12:05:00'), 60.0, 3.0), + (pandas.Timestamp('2014-01-01 12:06:00'), 60.0, 5.0), + (pandas.Timestamp('2014-01-01 12:07:00'), 60.0, 10.0), + (pandas.Timestamp('2014-01-01 12:09:00'), 60.0, 2.0), + ], output) + + def test_aggregated_different_archive_overlap_edge_missing1(self): + tsc1 = carbonara.TimeSerieArchive.from_definitions([(60, 10)]) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) + tsc2 = carbonara.TimeSerieArchive.from_definitions([(60, 10)]) + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.max_block_size) + + tsb1.set_values([ + (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 1), + (datetime.datetime(2014, 1, 1, 12, 5, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 6, 0), 7), + (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), + (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), + ], before_truncate_callback=tsc1.update) + + tsb2.set_values([ + (datetime.datetime(2014, 1, 1, 11, 0, 0), 6), + (datetime.datetime(2014, 1, 1, 12, 1, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 2, 0), 13), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 24), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 5, 0), 16), + (datetime.datetime(2014, 1, 1, 12, 6, 0), 12), + ], before_truncate_callback=tsc2.update) + + # By default we require 100% of point that overlap + # but we allow that the last datapoint is missing + # of the precisest granularity + output = carbonara.TimeSerieArchive.aggregated([ + tsc1, tsc2], aggregation='sum') + + self.assertEqual([ + (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 33.0), + (pandas.Timestamp('2014-01-01 12:04:00'), 60.0, 5.0), + (pandas.Timestamp('2014-01-01 12:05:00'), 60.0, 18.0), + (pandas.Timestamp('2014-01-01 12:06:00'), 60.0, 19.0), + ], output) + + def test_aggregated_different_archive_overlap_edge_missing2(self): + tsc1 = carbonara.TimeSerieArchive.from_definitions([(60, 10)]) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) + tsc2 = carbonara.TimeSerieArchive.from_definitions([(60, 10)]) + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.max_block_size) + + tsb1.set_values([ + (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), + ], before_truncate_callback=tsc1.update) + + tsb2.set_values([ + (datetime.datetime(2014, 1, 1, 11, 0, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), + ], before_truncate_callback=tsc2.update) + + output = carbonara.TimeSerieArchive.aggregated([tsc1, tsc2]) + self.assertEqual([ + (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 4.0), + ], output) + def test_aggregated_partial_overlap(self): - tsc1 = carbonara.AggregatedTimeSerie(sampling=1, max_size=86400) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) - tsc2 = carbonara.AggregatedTimeSerie(sampling=1, max_size=86400) - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling) + tsc1 = carbonara.TimeSerieArchive.from_definitions([(1, 86400)]) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) + tsc2 = carbonara.TimeSerieArchive.from_definitions([(1, 86400)]) + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.max_block_size) tsb1.set_values([ (datetime.datetime(2015, 12, 3, 13, 19, 15), 1), @@ -788,7 +858,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2015, 12, 3, 13, 24, 15), 10), ], before_truncate_callback=tsc2.update) - output = carbonara.AggregatedTimeSerie.aggregated( + output = carbonara.TimeSerieArchive.aggregated( [tsc1, tsc2], aggregation="sum") self.assertEqual([ @@ -799,7 +869,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): dtfrom = datetime.datetime(2015, 12, 3, 13, 17, 0) dtto = datetime.datetime(2015, 12, 3, 13, 25, 0) - output = carbonara.AggregatedTimeSerie.aggregated( + output = carbonara.TimeSerieArchive.aggregated( [tsc1, tsc2], from_timestamp=dtfrom, to_timestamp=dtto, aggregation="sum", needed_percent_of_overlap=0) @@ -815,98 +885,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): # By default we require 100% of point that overlap # so that fail if from or to is set self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.AggregatedTimeSerie.aggregated, + carbonara.TimeSerieArchive.aggregated, [tsc1, tsc2], to_timestamp=dtto) self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.AggregatedTimeSerie.aggregated, + carbonara.TimeSerieArchive.aggregated, [tsc1, tsc2], from_timestamp=dtfrom) - # Retry with 50% and it works - output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1, tsc2], from_timestamp=dtfrom, - aggregation="sum", - needed_percent_of_overlap=50.0) - self.assertEqual([ - (pandas.Timestamp('2015-12-03 13:19:15'), 1.0, 1.0), - (pandas.Timestamp('2015-12-03 13:20:15'), 1.0, 1.0), - (pandas.Timestamp('2015-12-03 13:21:15'), 1.0, 11.0), - (pandas.Timestamp('2015-12-03 13:22:15'), 1.0, 11.0), - ], output) - - output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1, tsc2], to_timestamp=dtto, - aggregation="sum", - needed_percent_of_overlap=50.0) - self.assertEqual([ - (pandas.Timestamp('2015-12-03 13:21:15'), 1.0, 11.0), - (pandas.Timestamp('2015-12-03 13:22:15'), 1.0, 11.0), - (pandas.Timestamp('2015-12-03 13:23:15'), 1.0, 10.0), - (pandas.Timestamp('2015-12-03 13:24:15'), 1.0, 10.0), - ], output) - - def test_split_key(self): - self.assertEqual( - "1420128000.0", - carbonara.AggregatedTimeSerie.get_split_key( - datetime.datetime(2015, 1, 1, 23, 34), 5)) - self.assertEqual( - "1420056000.0", - carbonara.AggregatedTimeSerie.get_split_key( - datetime.datetime(2015, 1, 1, 15, 3), 5)) - - def test_split_key_datetime(self): - self.assertEqual( - datetime.datetime(2014, 5, 10), - carbonara.AggregatedTimeSerie.get_split_key_datetime( - datetime.datetime(2015, 1, 1, 15, 3), 3600)) - self.assertEqual( - datetime.datetime(2014, 12, 29, 8), - carbonara.AggregatedTimeSerie.get_split_key_datetime( - datetime.datetime(2015, 1, 1, 15, 3), 58)) - - def test_split(self): - sampling = 5 - points = 100000 - ts = carbonara.TimeSerie.from_data( - timestamps=map(datetime.datetime.utcfromtimestamp, - six.moves.range(points)), - values=six.moves.range(points)) - agg = carbonara.AggregatedTimeSerie(sampling=sampling) - agg.update(ts) - - grouped_points = list(agg.split()) - - self.assertEqual( - math.ceil((points / float(sampling)) - / carbonara.AggregatedTimeSerie.POINTS_PER_SPLIT), - len(grouped_points)) - self.assertEqual("0.0", - grouped_points[0][0]) - # 14400 × 5s = 20 hours - self.assertEqual("72000.0", - grouped_points[1][0]) - self.assertEqual(carbonara.AggregatedTimeSerie.POINTS_PER_SPLIT, - len(grouped_points[0][1])) - - def test_from_timeseries(self): - sampling = 5 - points = 100000 - ts = carbonara.TimeSerie.from_data( - timestamps=map(datetime.datetime.utcfromtimestamp, - six.moves.range(points)), - values=six.moves.range(points)) - agg = carbonara.AggregatedTimeSerie(sampling=sampling) - agg.update(ts) - - split = [t[1] for t in list(agg.split())] - - self.assertEqual(agg, - carbonara.AggregatedTimeSerie.from_timeseries( - split, - sampling=agg.sampling, - max_size=agg.max_size, - aggregation_method=agg.aggregation_method)) - class CarbonaraCmd(base.BaseTestCase): @@ -914,21 +898,53 @@ class CarbonaraCmd(base.BaseTestCase): super(CarbonaraCmd, self).setUp() self.useFixture(fixtures.NestedTempfile()) + def test_create(self): + filename = tempfile.mktemp() + subp = subprocess.Popen(['carbonara-create', + '1,2', + filename], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out, err = subp.communicate() + subp.wait() + os.stat(filename) + self.assertEqual(0, subp.returncode) + self.assertEqual(b"", out) + def test_dump(self): - ts = carbonara.AggregatedTimeSerie(sampling=60, max_size=60, - aggregation_method='max') - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) - tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 1, 4), 4), - (datetime.datetime(2014, 1, 1, 12, 1, 9), 7), - (datetime.datetime(2014, 1, 1, 12, 2, 1), 15), - (datetime.datetime(2014, 1, 1, 12, 2, 12), 1)], - before_truncate_callback=ts.update) + filename = tempfile.mktemp() + subp = subprocess.Popen(['carbonara-create', + '1,2', + filename]) + subp.wait() + subp = subprocess.Popen(['carbonara-dump', + filename], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out, err = subp.communicate() + subp.wait() + self.assertIn(b"Aggregation method", out) + def test_update(self): filename = tempfile.mktemp() + subp = subprocess.Popen(['carbonara-create', + '2,2', + filename]) + subp.wait() + self.assertEqual(0, subp.returncode) - with open(filename, "wb") as f: - f.write(ts.serialize()) + subp = subprocess.Popen(['carbonara-update', + '2014-12-23 23:23:23,1', + '2014-12-23 23:23:24,10', + filename]) + subp.wait() + self.assertEqual(0, subp.returncode) + + subp = subprocess.Popen(['carbonara-update', + '2014-12-23 23:23:25,7', + filename]) + subp.wait() + self.assertEqual(0, subp.returncode) subp = subprocess.Popen(['carbonara-dump', filename], @@ -936,4 +952,16 @@ class CarbonaraCmd(base.BaseTestCase): stderr=subprocess.PIPE) out, err = subp.communicate() subp.wait() - self.assertIn(b"Aggregation method", out) + self.assertEqual(0, subp.returncode) + self.assertEqual(u"""Aggregation method: mean +Number of aggregated timeseries: 1 + +Aggregated timeserie #1: 2s × 2 = 0:00:04 +Number of measures: 2 ++---------------------+-------+ +| Timestamp | Value | ++---------------------+-------+ +| 2014-12-23 23:23:22 | 1.0 | +| 2014-12-23 23:23:24 | 7.0 | ++---------------------+-------+ +""", out.decode('utf-8')) diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 065a38a1..673f1b9b 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -169,7 +169,7 @@ class TestIndexerDriver(tests_base.TestCase): m = self.index.get_metrics([rc.metrics[0].id]) self.assertEqual(m[0], rc.metrics[0]) - def _do_test_create_instance(self, server_group=None, image_ref=None): + def _do_test_create_instance(self, server_group=None): r1 = uuid.uuid4() user = uuid.uuid4() project = uuid.uuid4() @@ -177,7 +177,7 @@ class TestIndexerDriver(tests_base.TestCase): rc = self.index.create_resource('instance', r1, user, project, flavor_id="1", - image_ref=image_ref, + image_ref="http://foo/bar", host="foo", display_name="lol", **kwargs) self.assertIsNotNone(rc.started_at) @@ -195,7 +195,7 @@ class TestIndexerDriver(tests_base.TestCase): "display_name": "lol", "server_group": server_group, "host": "foo", - "image_ref": image_ref, + "image_ref": "http://foo/bar", "flavor_id": "1", "metrics": {}}, rc.jsonify()) @@ -205,14 +205,10 @@ class TestIndexerDriver(tests_base.TestCase): self.assertEqual(rc.metrics, rg.metrics) def test_create_instance(self): - self._do_test_create_instance(image_ref='http://foo/bar') + self._do_test_create_instance() def test_create_instance_with_server_group(self): - self._do_test_create_instance('my_autoscaling_group', - image_ref='http://foo/bar') - - def test_create_instance_without_image_ref(self): - self._do_test_create_instance(image_ref=None) + self._do_test_create_instance('my_autoscaling_group') def test_delete_resource(self): r1 = uuid.uuid4() @@ -941,18 +937,8 @@ class TestIndexerDriver(tests_base.TestCase): self.index.create_metric(e1, user, project, archive_policy_name="low") - e2 = uuid.uuid4() - self.index.create_metric(e2, - user, project, - archive_policy_name="low") metrics = self.index.list_metrics() - id_list = [m.id for m in metrics] - self.assertIn(e1, id_list) - # Test ordering - if e1 < e2: - self.assertLess(id_list.index(e1), id_list.index(e2)) - else: - self.assertLess(id_list.index(e2), id_list.index(e1)) + self.assertIn(e1, [m.id for m in metrics]) def test_list_metrics_delete_status(self): e1 = uuid.uuid4() diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 7ed9e49e..3cecd847 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -42,18 +42,77 @@ from gnocchi import utils load_tests = testscenarios.load_tests_apply_scenarios -class TestingApp(webtest.TestApp): - VALID_TOKEN_ADMIN = str(uuid.uuid4()) +class FakeMemcache(object): + VALID_TOKEN_ADMIN = '4562138218392830' + ADMIN_TOKEN_HASH = hashlib.sha256( + VALID_TOKEN_ADMIN.encode('utf-8')).hexdigest() USER_ID_ADMIN = str(uuid.uuid4()) PROJECT_ID_ADMIN = str(uuid.uuid4()) - VALID_TOKEN = str(uuid.uuid4()) + VALID_TOKEN = '4562138218392831' + TOKEN_HASH = hashlib.sha256(VALID_TOKEN.encode('utf-8')).hexdigest() USER_ID = str(uuid.uuid4()) PROJECT_ID = str(uuid.uuid4()) - VALID_TOKEN_2 = str(uuid.uuid4()) - USER_ID_2 = str(uuid.uuid4()) - PROJECT_ID_2 = str(uuid.uuid4()) + VALID_TOKEN_2 = '4562138218392832' + TOKEN_2_HASH = hashlib.sha256(VALID_TOKEN_2.encode('utf-8')).hexdigest() + # We replace "-" to simulate a middleware that would send UUID in a non + # normalized format. + USER_ID_2 = str(uuid.uuid4()).replace("-", "") + PROJECT_ID_2 = str(uuid.uuid4()).replace("-", "") + + def get(self, key): + dt = "2100-01-01T23:59:59" + if (key == "tokens/%s" % self.ADMIN_TOKEN_HASH or + key == "tokens/%s" % self.VALID_TOKEN_ADMIN): + return json.dumps(({'access': { + 'token': {'id': self.VALID_TOKEN_ADMIN, + 'expires': dt}, + 'user': { + 'id': self.USER_ID_ADMIN, + 'name': 'adminusername', + 'tenantId': self.PROJECT_ID_ADMIN, + 'tenantName': 'myadmintenant', + 'roles': [ + {'name': 'admin'}, + ]}, + }}, dt)) + elif (key == "tokens/%s" % self.TOKEN_HASH or + key == "tokens/%s" % self.VALID_TOKEN): + return json.dumps(({'access': { + 'token': {'id': self.VALID_TOKEN, + 'expires': dt}, + 'user': { + 'id': self.USER_ID, + 'name': 'myusername', + 'tenantId': self.PROJECT_ID, + 'tenantName': 'mytenant', + 'roles': [ + {'name': 'member'}, + ]}, + }}, dt)) + elif (key == "tokens/%s" % self.TOKEN_2_HASH or + key == "tokens/%s" % self.VALID_TOKEN_2): + return json.dumps(({'access': { + 'token': {'id': self.VALID_TOKEN_2, + 'expires': dt}, + 'user': { + 'id': self.USER_ID_2, + 'name': 'myusername2', + 'tenantId': self.PROJECT_ID_2, + 'tenantName': 'mytenant2', + 'roles': [ + {'name': 'member'}, + ]}, + }}, dt)) + + @staticmethod + def set(key, value, **kwargs): + pass + + +class TestingApp(webtest.TestApp): + CACHE_NAME = 'fake.cache' def __init__(self, *args, **kwargs): self.auth = kwargs.pop('auth') @@ -61,14 +120,15 @@ class TestingApp(webtest.TestApp): self.indexer = kwargs.pop('indexer') super(TestingApp, self).__init__(*args, **kwargs) # Setup Keystone auth_token fake cache - self.token = self.VALID_TOKEN + self.extra_environ.update({self.CACHE_NAME: FakeMemcache()}) + self.token = FakeMemcache.VALID_TOKEN @contextlib.contextmanager def use_admin_user(self): if not self.auth: raise testcase.TestSkipped("No auth enabled") old_token = self.token - self.token = self.VALID_TOKEN_ADMIN + self.token = FakeMemcache.VALID_TOKEN_ADMIN try: yield finally: @@ -79,7 +139,7 @@ class TestingApp(webtest.TestApp): if not self.auth: raise testcase.TestSkipped("No auth enabled") old_token = self.token - self.token = self.VALID_TOKEN_2 + self.token = FakeMemcache.VALID_TOKEN_2 try: yield finally: @@ -104,6 +164,15 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): def app_factory(cls, global_config, **local_conf): return app.setup_app(cls.pecan_config, cls.conf) + @classmethod + def keystone_authtoken_filter_factory(cls, global_conf, **local_conf): + def auth_filter(app): + return keystonemiddleware.auth_token.AuthProtocol(app, { + "oslo_config_project": "gnocchi", + "oslo_config_config": cls.conf, + }) + return auth_filter + def setUp(self): super(RestTest, self).setUp() self.conf.set_override('paste_config', @@ -140,30 +209,6 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): RestTest.pecan_config = pecan_config RestTest.conf = self.conf - self.auth_token_fixture = self.useFixture( - ksm_fixture.AuthTokenFixture()) - self.auth_token_fixture.add_token_data( - is_v2=True, - token_id=TestingApp.VALID_TOKEN_ADMIN, - user_id=TestingApp.USER_ID_ADMIN, - user_name='adminusername', - project_id=TestingApp.PROJECT_ID_ADMIN, - role_list=['admin']) - self.auth_token_fixture.add_token_data( - is_v2=True, - token_id=TestingApp.VALID_TOKEN, - user_id=TestingApp.USER_ID, - user_name='myusername', - project_id=TestingApp.PROJECT_ID, - role_list=["member"]) - self.auth_token_fixture.add_token_data( - is_v2=True, - token_id=TestingApp.VALID_TOKEN_2, - user_id=TestingApp.USER_ID_2, - user_name='myusername2', - project_id=TestingApp.PROJECT_ID_2, - role_list=["member"]) - # TODO(chdent) Linting is turned off until a # keystonemiddleware bug is resolved. # See: https://bugs.launchpad.net/keystonemiddleware/+bug/1466499 @@ -436,7 +481,6 @@ class MetricTest(RestTest): result = json.loads(ret.text) now = utils.utcnow() self.assertEqual([ - ['2014-01-01T10:00:00+00:00', 3600.0, 1234.2], [(now - datetime.timedelta( seconds=now.second, @@ -807,8 +851,8 @@ class ResourceTest(RestTest): self.attributes['id'] = str(uuid.uuid4()) self.resource = self.attributes.copy() if self.auth: - self.resource['created_by_user_id'] = TestingApp.USER_ID - self.resource['created_by_project_id'] = TestingApp.PROJECT_ID + self.resource['created_by_user_id'] = FakeMemcache.USER_ID + self.resource['created_by_project_id'] = FakeMemcache.PROJECT_ID else: self.resource['created_by_user_id'] = None self.resource['created_by_project_id'] = None @@ -1982,10 +2026,8 @@ class GenericResourceTest(RestTest): params={ "id": resource_id, "started_at": "2014-01-01 02:02:02", - # We replace "-" to simulate a middleware that would send UUID - # in a non normalized format. - "user_id": TestingApp.USER_ID_2.replace("-", ""), - "project_id": TestingApp.PROJECT_ID_2.replace("-", ""), + "user_id": FakeMemcache.USER_ID_2, + "project_id": FakeMemcache.PROJECT_ID_2, "metrics": {"foobar": {"archive_policy_name": "low"}}, }) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 495882b3..21052262 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -17,12 +17,9 @@ import datetime import uuid import mock -from oslo_utils import timeutils -from oslotest import base import six.moves from gnocchi import storage -from gnocchi.storage import _carbonara from gnocchi.storage import null from gnocchi.tests import base as tests_base from gnocchi import utils @@ -42,9 +39,6 @@ class TestStorageDriver(tests_base.TestCase): @mock.patch('gnocchi.storage._carbonara.LOG') def test_corrupted_data(self, logger): - if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): - self.skipTest("This driver is not based on Carbonara") - self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) @@ -63,6 +57,18 @@ class TestStorageDriver(tests_base.TestCase): side_effect=ValueError("boom!")): self.storage.process_background_tasks(self.index, True) + expected_calls = [ + mock.call.debug('Processing measures for %s' % self.metric.id), + mock.call.debug('Processing measures for %s' % self.metric.id), + ] + aggs = ["none"] + self.conf.archive_policy.default_aggregation_methods + for agg in aggs: + expected_calls.append(mock.call.error( + 'Data are corrupted for metric %s and aggregation %s, ' + 'recreating an empty timeserie.' % (self.metric.id, agg))) + + logger.assert_has_calls(expected_calls, any_order=True) + self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 1), (utils.datetime_utc(2014, 1, 1, 13), 3600.0, 1), @@ -84,6 +90,15 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) self.storage.delete_metric(self.metric) + + def test_delete_nonempty_metric_with_process(self): + self.storage.add_measures(self.metric, [ + storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), + ]) + with mock.patch.object(self.index, 'get_metrics') as f: + f.return_value = [self.metric] + self.storage.process_background_tasks(self.index) + self.storage.delete_metric(self.metric) self.storage.process_background_tasks(self.index, True) def test_measures_reporting(self): @@ -144,8 +159,6 @@ class TestStorageDriver(tests_base.TestCase): ], self.storage.get_measures(self.metric)) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), ], self.storage.get_measures( self.metric, @@ -160,17 +173,14 @@ class TestStorageDriver(tests_base.TestCase): self.metric, to_timestamp=datetime.datetime(2014, 1, 1, 12, 6, 0))) - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), - ], self.storage.get_measures( - self.metric, - to_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10), - from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10))) + self.assertEqual( + [], + self.storage.get_measures( + self.metric, + to_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10), + from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10))) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), ], self.storage.get_measures( @@ -178,22 +188,13 @@ class TestStorageDriver(tests_base.TestCase): from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2))) - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - ], self.storage.get_measures( - self.metric, - from_timestamp=timeutils.parse_isotime("2014-1-1 13:00:00+01:00"), - to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2))) - self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), ], self.storage.get_measures( self.metric, from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2), - granularity=3600.0)) + granularity=3600)) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), @@ -201,12 +202,10 @@ class TestStorageDriver(tests_base.TestCase): self.metric, from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2), - granularity=300.0)) + granularity=300)) - self.assertRaises(storage.GranularityDoesNotExist, - self.storage.get_measures, - self.metric, - granularity=42) + self.assertEqual([], self.storage.get_measures(self.metric, + granularity=42)) def test_get_cross_metric_measures_unknown_metric(self): self.assertEqual([], @@ -247,26 +246,6 @@ class TestStorageDriver(tests_base.TestCase): [self.metric, metric2], aggregation='last') - def test_get_cross_metric_measures_unknown_granularity(self): - metric2 = storage.Metric(uuid.uuid4(), - self.archive_policies['low']) - self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), - ]) - self.storage.add_measures(metric2, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), - ]) - self.assertRaises(storage.GranularityDoesNotExist, - self.storage.get_cross_metric_measures, - [self.metric, metric2], - granularity=12345.456) - def test_add_and_get_cross_metric_measures_different_archives(self): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['no_granularity_match']) @@ -318,9 +297,7 @@ class TestStorageDriver(tests_base.TestCase): values = self.storage.get_cross_metric_measures( [self.metric, metric2], from_timestamp='2014-01-01 12:10:00') self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0), + (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0) ], values) values = self.storage.get_cross_metric_measures( @@ -336,11 +313,7 @@ class TestStorageDriver(tests_base.TestCase): [self.metric, metric2], to_timestamp='2014-01-01 12:10:10', from_timestamp='2014-01-01 12:10:10') - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 24.0), - ], values) + self.assertEqual([], values) values = self.storage.get_cross_metric_measures( [self.metric, metric2], @@ -348,21 +321,10 @@ class TestStorageDriver(tests_base.TestCase): to_timestamp='2014-01-01 12:00:01') self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), ], values) - values = self.storage.get_cross_metric_measures( - [self.metric, metric2], - from_timestamp='2014-01-01 12:00:00', - to_timestamp='2014-01-01 12:00:01', - granularity=300.0) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), - ], values) - def test_add_and_get_cross_metric_measures_with_holes(self): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['low']) @@ -415,14 +377,10 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual( {metric2: [], - self.metric: [ - (utils.datetime_utc(2014, 1, 1), 86400, 33), - (utils.datetime_utc(2014, 1, 1, 12), 3600, 33), - (utils.datetime_utc(2014, 1, 1, 12), 300, 69), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300, 42)]}, + self.metric: [(utils.datetime_utc(2014, 1, 1, 12), 300, 69)]}, self.storage.search_value( [metric2, self.metric], - {u"≥": 30})) + {u"≥": 50})) self.assertEqual( {metric2: [], self.metric: []}, @@ -431,63 +389,3 @@ class TestStorageDriver(tests_base.TestCase): {u"∧": [ {u"eq": 100}, {u"≠": 50}]})) - - -class TestMeasureQuery(base.BaseTestCase): - def test_equal(self): - q = storage.MeasureQuery({"=": 4}) - self.assertTrue(q(4)) - self.assertFalse(q(40)) - - def test_gt(self): - q = storage.MeasureQuery({">": 4}) - self.assertTrue(q(40)) - self.assertFalse(q(4)) - - def test_and(self): - q = storage.MeasureQuery({"and": [{">": 4}, {"<": 10}]}) - self.assertTrue(q(5)) - self.assertFalse(q(40)) - self.assertFalse(q(1)) - - def test_or(self): - q = storage.MeasureQuery({"or": [{"=": 4}, {"=": 10}]}) - self.assertTrue(q(4)) - self.assertTrue(q(10)) - self.assertFalse(q(-1)) - - def test_modulo(self): - q = storage.MeasureQuery({"=": [{"%": 5}, 0]}) - self.assertTrue(q(5)) - self.assertTrue(q(10)) - self.assertFalse(q(-1)) - self.assertFalse(q(6)) - - def test_math(self): - q = storage.MeasureQuery( - { - u"and": [ - # v+5 is bigger 0 - {u"≥": [{u"+": 5}, 0]}, - # v-6 is not 5 - {u"≠": [5, {u"-": 6}]}, - ], - } - ) - self.assertTrue(q(5)) - self.assertTrue(q(10)) - self.assertFalse(q(11)) - - def test_empty(self): - q = storage.MeasureQuery({}) - self.assertFalse(q(5)) - self.assertFalse(q(10)) - - def test_bad_format(self): - self.assertRaises(storage.InvalidQuery, - storage.MeasureQuery, - {"foo": [{"=": 4}, {"=": 10}]}) - - self.assertRaises(storage.InvalidQuery, - storage.MeasureQuery, - {"=": [1, 2, 3]}) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index cd2e2bf4..a266c2ec 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -14,7 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. import datetime -import time import iso8601 from oslo_utils import timeutils @@ -94,10 +93,3 @@ def utcnow(): def datetime_utc(*args): return datetime.datetime(*args, tzinfo=iso8601.iso8601.UTC) - - -def datetime_to_unix(timestamp): - return (time.mktime(timestamp.utctimetuple()) - + timestamp.microsecond / 10e5 - # mktime() returns for the current timezone - - time.timezone) diff --git a/requirements.txt b/requirements.txt index 25480332..ed1cc583 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,25 +1,36 @@ numpy -oslo.config>=2.6.0 +oslo.config>=1.15.0 +oslo.db>=1.8.0 oslo.log>=1.0.0 oslo.policy>=0.3.0 oslo.serialization>=1.4.0 oslo.utils>=1.6.0 oslo.middleware +oslosphinx>=2.2.0 # Apache-2.0 pandas>=0.17.0 pecan>=0.9 +python-swiftclient>=2.5.0 pytimeparse>=1.1.5 futures requests six +sqlalchemy +sqlalchemy-utils stevedore +tooz>=0.11 voluptuous werkzeug +Jinja2 +PyYAML +msgpack-python trollius retrying +pytz WebOb>=1.4.1 alembic>=0.7.6,!=0.8.1 psycopg2 pymysql keystonemiddleware>=2.3.0 PasteDeploy +sphinx_bootstrap_theme prettytable diff --git a/run-tests.sh b/run-tests.sh index 492725da..61b75872 100755 --- a/run-tests.sh +++ b/run-tests.sh @@ -6,8 +6,11 @@ for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS} do for indexer in ${GNOCCHI_TEST_INDEXER_DRIVERS} do - export GNOCCHI_TEST_INDEXER_DRIVER=$indexer - export GNOCCHI_TEST_STORAGE_DRIVER=$storage - ./setup-test-env.sh ./tools/pretty_tox.sh $* + storage_setup_script=./setup-${storage}-tests.sh + if [ ! -x "$storage_setup_script" ] + then + unset storage_setup_script + fi + GNOCCHI_TEST_STORAGE_DRIVER=$storage ./setup-${indexer}-tests.sh "${storage_setup_script}" ./tools/pretty_tox.sh $* done done diff --git a/setup-influxdb-tests.sh b/setup-influxdb-tests.sh new file mode 100755 index 00000000..78296a31 --- /dev/null +++ b/setup-influxdb-tests.sh @@ -0,0 +1,47 @@ +#!/bin/bash -x + +wait_for_line () { + while read line + do + echo "$line" | grep -q "$1" && break + done < "$2" + # Read the fifo for ever otherwise process would block + cat "$2" >/dev/null & +} + +INFLUXDB_DATA=`mktemp -d /tmp/gnocchi-influxdb-XXXXX` +export GNOCCHI_TEST_INFLUXDB_PORT=51234 + +mkdir ${INFLUXDB_DATA}/{broker,data,meta,hh,wal} +mkfifo ${INFLUXDB_DATA}/out + +cat > $INFLUXDB_DATA/config < ${INFLUXDB_DATA}/out 2>&1 & +# Wait for InfluxDB to start listening to connections +wait_for_line "Listening on HTTP" ${INFLUXDB_DATA}/out +influx -port $GNOCCHI_TEST_INFLUXDB_PORT -execute "CREATE DATABASE test;" + + +$* + +ret=$? +kill $(jobs -p) +rm -rf "${INFLUXDB_DATA}" +exit $ret diff --git a/setup-mysql-tests.sh b/setup-mysql-tests.sh new file mode 100755 index 00000000..50feea3d --- /dev/null +++ b/setup-mysql-tests.sh @@ -0,0 +1,29 @@ +#!/bin/bash -x +wait_for_line () { + while read line + do + echo "$line" | grep -q "$1" && break + done < "$2" + # Read the fifo for ever otherwise process would block + cat "$2" >/dev/null & +} + +# Start MySQL process for tests +MYSQL_DATA=`mktemp -d /tmp/gnocchi-mysql-XXXXX` +mkfifo ${MYSQL_DATA}/out +PATH=$PATH:/usr/libexec +mysqld --no-defaults --datadir=${MYSQL_DATA} --pid-file=${MYSQL_DATA}/mysql.pid --socket=${MYSQL_DATA}/mysql.socket --skip-networking --skip-grant-tables &> ${MYSQL_DATA}/out & +# Wait for MySQL to start listening to connections +wait_for_line "mysqld: ready for connections." ${MYSQL_DATA}/out +export GNOCCHI_TEST_INDEXER_URL="mysql+pymysql://root@localhost/test?unix_socket=${MYSQL_DATA}/mysql.socket&charset=utf8" +mysql --no-defaults -S ${MYSQL_DATA}/mysql.socket -e 'CREATE DATABASE test;' + +mkdir $MYSQL_DATA/tooz +export GNOCCHI_COORDINATION_URL="mysql://root@localhost/test?unix_socket=${MYSQL_DATA}/mysql.socket&charset=utf8" + +$* + +ret=$? +kill $(jobs -p) +rm -rf "${MYSQL_DATA}" +exit $ret diff --git a/setup-postgresql-tests.sh b/setup-postgresql-tests.sh new file mode 100755 index 00000000..47487ee6 --- /dev/null +++ b/setup-postgresql-tests.sh @@ -0,0 +1,19 @@ +#!/bin/bash -x + +# Start PostgreSQL process for tests +PGSQL_DATA=`mktemp -d /tmp/gnocchi-psql-XXXXX` +PGSQL_PATH=`pg_config --bindir` +PGSQL_PORT=9824 +${PGSQL_PATH}/pg_ctl initdb -D ${PGSQL_DATA} +LANGUAGE=C ${PGSQL_PATH}/pg_ctl -w -D ${PGSQL_DATA} -o "-k ${PGSQL_DATA} -p ${PGSQL_PORT}" start > /dev/null +export GNOCCHI_TEST_INDEXER_URL="postgresql:///template1?host=${PGSQL_DATA}&port=${PGSQL_PORT}" + +mkdir $PGSQL_DATA/tooz +export GNOCCHI_COORDINATION_URL="${GNOCCHI_TEST_INDEXER_URL}" + +$* + +ret=$? +${PGSQL_PATH}/pg_ctl -w -D ${PGSQL_DATA} -o "-p $PGSQL_PORT" stop +rm -rf ${PGSQL_DATA} +exit $ret diff --git a/setup-test-env.sh b/setup-test-env.sh deleted file mode 100755 index d6c52624..00000000 --- a/setup-test-env.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e -set -x -# Activate overtest for indexer -GNOCCHI_TEST_INDEXER_DRIVER=${GNOCCHI_TEST_INDEXER_DRIVER:-postgresql} -source $(which overtest) $GNOCCHI_TEST_INDEXER_DRIVER -export GNOCCHI_INDEXER_URL=${OVERTEST_URL/#mysql:/mysql+pymysql:} -export GNOCCHI_COORDINATION_URL=${OVERTEST_URL} -# Activate overtest for storage -case $GNOCCHI_TEST_STORAGE_DRIVER in - influxdb) - source $(which overtest) $GNOCCHI_TEST_STORAGE_DRIVER - GNOCCHI_TEST_INFLUXDB_PORT=${OVERTEST_INFLUXDB_PORT} - ;; - *) - ;; -esac -$* diff --git a/setup.cfg b/setup.cfg index 58afb5c5..837834ec 100644 --- a/setup.cfg +++ b/setup.cfg @@ -18,69 +18,13 @@ classifier = Programming Language :: Python :: 3.4 Topic :: System :: Monitoring -[extras] -keystone = - keystonemiddleware>=4.0.0 -mysql = - pymysql - oslo.db>=1.8.0 - sqlalchemy - sqlalchemy-utils - alembic>=0.7.6,!=0.8.1 -postgresql = - psycopg2 - oslo.db>=1.8.0 - sqlalchemy - sqlalchemy-utils - alembic>=0.7.6,!=0.8.1 -influxdb = - influxdb>=2.4 -swift = - python-swiftclient>=2.5.0 - msgpack-python - tooz>=0.11 -ceph = - msgpack-python - tooz>=0.11 -file = - msgpack-python - tooz>=0.11 -doc = - oslosphinx>=2.2.0 - sphinx - sphinxcontrib-httpdomain - PyYAML - Jinja2 -test = - overtest>=0.8.0 - gabbi>=0.101.2 - coverage>=3.6 - fixtures - mock - oslotest - python-subunit>=0.0.18 - tempest-lib>=0.2.0 - testrepository - testscenarios - testtools>=0.9.38 - WebTest>=2.0.16 - doc8 - sysv_ipc - tooz>=0.11 - keystonemiddleware>=4.0.0 - [global] setup-hooks = pbr.hooks.setup_hook -[sdist] -pre-hook.build_config = gnocchi.genconfig.sdist_prehook - [files] packages = gnocchi -data_files = - etc/gnocchi = etc/gnocchi/* [entry_points] gnocchi.indexer.resources = @@ -97,19 +41,19 @@ gnocchi.indexer.resources = stack = gnocchi.indexer.sqlalchemy_base:ResourceExt image = gnocchi.indexer.sqlalchemy_extension:Image -gnocchi.controller.schemas = - generic = gnocchi.rest:GenericSchema - instance = gnocchi.rest:InstanceSchema - instance_disk = gnocchi.rest:InstanceDiskSchema - instance_network_interface = gnocchi.rest:InstanceNetworkInterfaceSchema - swift_account = gnocchi.rest:GenericSchema - volume = gnocchi.rest:VolumeSchema - ceph_account = gnocchi.rest:GenericSchema - network = gnocchi.rest:GenericSchema - identity = gnocchi.rest:GenericSchema - ipmi = gnocchi.rest:GenericSchema - stack = gnocchi.rest:GenericSchema - image = gnocchi.rest:ImageSchema +gnocchi.controller.resources = + generic = gnocchi.rest:GenericResourcesController + instance = gnocchi.rest:InstancesResourcesController + instance_disk = gnocchi.rest:InstanceDisksResourcesController + instance_network_interface = gnocchi.rest:InstanceNetworkInterfacesResourcesController + swift_account = gnocchi.rest:SwiftAccountsResourcesController + volume = gnocchi.rest:VolumesResourcesController + ceph_account = gnocchi.rest:CephAccountsResourcesController + network = gnocchi.rest:NetworkResourcesController + identity = gnocchi.rest:IdentityResourcesController + ipmi = gnocchi.rest:IPMIResourcesController + stack = gnocchi.rest:StackResourcesController + image = gnocchi.rest:ImageResourcesController gnocchi.storage = null = gnocchi.storage.null:NullStorage @@ -129,10 +73,12 @@ gnocchi.aggregates = console_scripts = gnocchi-api = gnocchi.cli:api - gnocchi-upgrade = gnocchi.cli:upgrade + gnocchi-dbsync = gnocchi.cli:storage_dbsync gnocchi-statsd = gnocchi.cli:statsd gnocchi-metricd = gnocchi.cli:metricd + carbonara-create = gnocchi.carbonara:create_archive_file carbonara-dump = gnocchi.carbonara:dump_archive_file + carbonara-update = gnocchi.carbonara:update_archive_file oslo.config.opts = gnocchi = gnocchi.opts:list_opts diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 00000000..713355df --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,16 @@ +gabbi>=0.101.2 +coverage>=3.6 +fixtures +mock +oslotest +sphinx +python-subunit>=0.0.18 +tempest-lib>=0.2.0 +testrepository +testscenarios +testtools>=0.9.38 +WebTest>=2.0.16 +doc8 +sphinxcontrib-httpdomain +influxdb>=2.4 +sysv_ipc diff --git a/tox.ini b/tox.ini index f1d9f928..80c1d76b 100644 --- a/tox.ini +++ b/tox.ini @@ -6,25 +6,23 @@ envlist = py{27,34},py{27,34}-{postgresql,mysql}{,-file,-swift,-ceph,-influxdb}, usedevelop = True sitepackages = False passenv = LANG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE GNOCCHI_TEST_* -deps = .[test] - py{27,34}-postgresql: .[postgresql,swift,ceph,file] - py{27,34}-mysql: .[mysql,swift,ceph,file] - py{27,34}-{postgresql,mysql}-influxdb: .[influxdb] +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt setenv = - GNOCCHI_TEST_STORAGE_DRIVER=file - GNOCCHI_TEST_INDEXER_DRIVER=postgresql GNOCCHI_TEST_STORAGE_DRIVERS=file swift ceph GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql + py{27,34}-postgresql: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql + py{27,34}-mysql: GNOCCHI_TEST_INDEXER_DRIVERS=mysql py{27,34}-{postgresql,mysql}-file: GNOCCHI_TEST_STORAGE_DRIVERS=file py{27,34}-{postgresql,mysql}-swift: GNOCCHI_TEST_STORAGE_DRIVERS=swift py{27,34}-{postgresql,mysql}-ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph py{27,34}-{postgresql,mysql}-influxdb: GNOCCHI_TEST_STORAGE_DRIVERS=influxdb - py{27,34}-postgresql{,-file,-swift,-ceph,-influxdb}: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql - py{27,34}-mysql{,-file,-swift,-ceph,-influxdb}: GNOCCHI_TEST_INDEXER_DRIVERS=mysql + py{27,34}-postgresql-{file,swift,ceph,influxdb}: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql + py{27,34}-mysql-{file,swift,ceph,influxdb}: GNOCCHI_TEST_INDEXER_DRIVERS=mysql commands = doc8 --ignore-path doc/source/rest.rst doc/source - oslo-config-generator --config-file=gnocchi-config-generator.conf + oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf {toxinidir}/run-tests.sh {posargs} [testenv:bashate] @@ -38,7 +36,7 @@ commands = flake8 [testenv:py27-gate] setenv = OS_TEST_PATH=gnocchi/tests/gabbi - GABBI_LIVE=1 + GABBI_LIVE_FAIL_IF_NO_TEST=1 passenv = {[testenv]passenv} GNOCCHI_SERVICE* sitepackages = True basepython = python2.7 @@ -46,38 +44,29 @@ commands = {toxinidir}/tools/pretty_tox.sh '{posargs}' # This target provides a shortcut to running just the gabbi tests. [testenv:py27-gabbi] -deps = .[test,postgresql,file] setenv = OS_TEST_PATH=gnocchi/tests/gabbi basepython = python2.7 -commands = {toxinidir}/setup-test-env.sh {toxinidir}/tools/pretty_tox.sh '{posargs}' +commands = {toxinidir}/setup-mysql-tests.sh {toxinidir}/tools/pretty_tox.sh '{posargs}' [testenv:py27-cover] -commands = {toxinidir}/setup-test-env.sh python setup.py testr --coverage --testr-args="{posargs}" +commands = {toxinidir}/setup-mysql-tests.sh python setup.py testr --coverage --testr-args="{posargs}" [testenv:venv] -# This is used by the doc job on the gate -deps = {[testenv:docs]deps} -commands = {toxinidir}/setup-test-env.sh {posargs} +setenv = GNOCCHI_TEST_STORAGE_DRIVER=file +commands = {toxinidir}/setup-postgresql-tests.sh {posargs} [flake8] exclude = .tox,.eggs,doc,gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py show-source = true [testenv:genconfig] -deps = .[mysql,postgresql,test,file,influxdb,ceph,swift] -commands = oslo-config-generator --config-file=gnocchi-config-generator.conf +commands = oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf [testenv:docs] -# This does not work, see: https://bitbucket.org/hpk42/tox/issues/302 -# deps = {[testenv]deps} -# .[doc] -deps = .[test,postgresql,file,doc] setenv = GNOCCHI_TEST_STORAGE_DRIVER=file - GNOCCHI_TEST_INDEXER_DRIVER=postgresql commands = doc8 --ignore-path doc/source/rest.rst doc/source - {toxinidir}/setup-test-env.sh python setup.py build_sphinx + {toxinidir}/setup-postgresql-tests.sh python setup.py build_sphinx [testenv:docs-gnocchi.xyz] -deps = .[file,postgresql,test,doc] - sphinx_rtd_theme -commands = {toxinidir}/setup-test-env.sh sphinx-build -D html_theme=sphinx_rtd_theme doc/source doc/build/html +setenv = GNOCCHI_TEST_STORAGE_DRIVER=file +commands = {toxinidir}/setup-postgresql-tests.sh sphinx-build -D html_theme=bootstrap doc/source doc/build -- GitLab From 0225646bb0f3541f056b6067f5ac3f505535d9f1 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 17 Feb 2016 16:42:02 +0000 Subject: [PATCH 0101/1483] Fixed running tests with pgsql (ie: GNOCCHI_TEST_INDEXER_URL env var). --- debian/changelog | 1 + debian/rules | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 1960411b..b1ab0e35 100644 --- a/debian/changelog +++ b/debian/changelog @@ -3,6 +3,7 @@ gnocchi (1.3.4-1) experimental; urgency=medium * New upstream release. * Clean /var/{lib,log}/gnocchi on purge (Closes: #810700). * Added nl.po debconf translation (Closes: #812356). + * Fixed running tests with pgsql (ie: GNOCCHI_TEST_INDEXER_URL env var). -- Thomas Goirand Tue, 02 Feb 2016 09:15:03 +0000 diff --git a/debian/rules b/debian/rules index 1b9f1daa..396b75bd 100755 --- a/debian/rules +++ b/debian/rules @@ -56,7 +56,7 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) export PGHOST=$$PG_MYTMPDIR ; \ chmod +x debian/start_pg.sh ; \ debian/start_pg.sh $$PG_MYTMPDIR ; \ - export GNOCCHI_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ + export GNOCCHI_TEST_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ export GNOCCHI_TEST_STORAGE_DRIVER=file ; \ echo "===> Testing with python$$i (python$$PYMAJOR)" ; \ rm -rf .testrepository ; \ -- GitLab From e752d071a36f8856144c6999eb1919934453afe5 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 17 Feb 2016 16:42:54 +0000 Subject: [PATCH 0102/1483] HTTPS VCS URLs. --- debian/changelog | 1 + debian/control | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index b1ab0e35..4b87a2ce 100644 --- a/debian/changelog +++ b/debian/changelog @@ -4,6 +4,7 @@ gnocchi (1.3.4-1) experimental; urgency=medium * Clean /var/{lib,log}/gnocchi on purge (Closes: #810700). * Added nl.po debconf translation (Closes: #812356). * Fixed running tests with pgsql (ie: GNOCCHI_TEST_INDEXER_URL env var). + * HTTPS VCS URLs. -- Thomas Goirand Tue, 02 Feb 2016 09:15:03 +0000 diff --git a/debian/control b/debian/control index d6f9b278..36fe3d9a 100644 --- a/debian/control +++ b/debian/control @@ -67,8 +67,8 @@ Build-Depends-Indep: alembic (>= 0.7.6), subunit (>= 0.0.18), testrepository, Standards-Version: 3.9.6 -Vcs-Browser: http://anonscm.debian.org/gitweb/?p=openstack/python-gnocchi.git -Vcs-Git: git://anonscm.debian.org/openstack/python-gnocchi.git +Vcs-Browser: https://anonscm.debian.org/gitweb/?p=openstack/python-gnocchi.git +Vcs-Git: https://anonscm.debian.org/git/openstack/python-gnocchi.git Homepage: https://github.com/openstack/gnocchi Package: python-gnocchi -- GitLab From eef41775801b880b61d33f52e8975ea9d969e87f Mon Sep 17 00:00:00 2001 From: gordon chung Date: Fri, 19 Feb 2016 12:46:58 -0500 Subject: [PATCH 0103/1483] fix partitioning i broke the partitioning when i rebased on new measures report. this patch: - correctly reads in number of metrics in report - adds a log to show block size of worker - adds missing cleanup Change-Id: I9fdc05fd495800761a68d09e93fd5e0d55be9b9c --- gnocchi/cli.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 20cc0d01..7a4bda53 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -108,7 +108,8 @@ class MetricReporting(MetricProcessBase): try: report = self.store.measures_report(details=False) if self.queues: - block_size = max(16, min(256, len(report) // len(self.queues))) + block_size = max(16, min( + 256, report['summary']['metrics'] // len(self.queues))) for queue in self.queues: queue.put(block_size) LOG.info("Metricd reporting: %d measurements bundles across %d " @@ -131,6 +132,8 @@ class MetricProcessor(MetricProcessBase): if self.queue: while not self.queue.empty(): self.block_size = self.queue.get() + LOG.debug("Re-configuring worker to handle up to %s " + "metrics", self.block_size) self.store.process_background_tasks(self.index, self.block_size) except Exception: LOG.error("Unexpected error during measures processing", @@ -169,7 +172,7 @@ def metricd(): sys.exit(0) except Exception: LOG.warning("exiting", exc_info=True) - _metricd_cleanup(workers) + _metricd_cleanup(workers, queues) sys.exit(1) -- GitLab From e7739cb4ed6cfc1a15169a6872b8b835d904b026 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Wed, 24 Feb 2016 22:56:52 -0500 Subject: [PATCH 0104/1483] fix migration from v1.3 to v2.0 previous tests never really tested migration, it just took a v2.0 TimeSerieArchive and changed to v2.0 AggregatedTimeSerie. this changes it so we take v1.3 TimeSerieArchive and convert it to V2.0 AggregatedTimeSerie. Related-Bug: #1548367 Change-Id: I8eea10912517b1b0a77ad7c1a3c9037d00bf1960 (cherry picked from commit 6ca313dec3f4980defa3af9bd58921fe331849d0) --- gnocchi/carbonara.py | 17 +++++---- gnocchi/tests/storage/test_carbonara.py | 47 ++++++++++++++----------- 2 files changed, 38 insertions(+), 26 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 8aadb8e8..914fe9c8 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -361,12 +361,17 @@ class AggregatedTimeSerie(TimeSerie): :returns: A TimeSerie object """ sampling = d.get('sampling') - prev_timestamp = pandas.Timestamp(d.get('first_timestamp') * 10e8) - timestamps = [] - for delta in d.get('timestamps'): - prev_timestamp = datetime.timedelta( - seconds=delta * sampling) + prev_timestamp - timestamps.append(prev_timestamp) + if 'first_timestamp' in d: + prev_timestamp = pandas.Timestamp(d.get('first_timestamp') * 10e8) + timestamps = [] + for delta in d.get('timestamps'): + prev_timestamp = datetime.timedelta( + seconds=delta * sampling) + prev_timestamp + timestamps.append(prev_timestamp) + else: + # migrate from v1.3, remove with TimeSerieArchive + timestamps, d['values'] = ( + cls._timestamps_and_values_from_dict(d['values'])) return cls.from_data( timestamps=timestamps, diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py index 64acb469..63a8bd33 100644 --- a/gnocchi/tests/storage/test_carbonara.py +++ b/gnocchi/tests/storage/test_carbonara.py @@ -17,6 +17,8 @@ import datetime import uuid import mock +import pandas +import six from gnocchi import carbonara from gnocchi import storage @@ -25,6 +27,18 @@ from gnocchi.tests import base as tests_base from gnocchi import utils +def _to_dict_v1_3(self): + d = {'values': dict((timestamp.value, float(v)) + for timestamp, v + in six.iteritems(self.ts.dropna()))} + sampling = pandas.tseries.offsets.Nano(self.sampling * 10e8) + d.update({ + 'aggregation_method': self.aggregation_method, + 'max_size': self.max_size, + 'sampling': six.text_type(sampling.n) + sampling.rule_code}) + return d + + class TestCarbonaraMigration(tests_base.TestCase): def setUp(self): super(TestCarbonaraMigration, self).setUp() @@ -54,15 +68,20 @@ class TestCarbonaraMigration(tests_base.TestCase): self.storage._create_metric(self.metric) - self.storage._store_metric_archive( - self.metric, - archive.agg_timeseries[0].aggregation_method, - archive.serialize()) + # serialise in old format + with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.to_dict', + autospec=True) as f: + f.side_effect = _to_dict_v1_3 - self.storage._store_metric_archive( - self.metric, - archive_max.agg_timeseries[0].aggregation_method, - archive_max.serialize()) + self.storage._store_metric_archive( + self.metric, + archive.agg_timeseries[0].aggregation_method, + archive.serialize()) + + self.storage._store_metric_archive( + self.metric, + archive_max.agg_timeseries[0].aggregation_method, + archive_max.serialize()) def upgrade(self): with mock.patch.object(self.index, 'list_metrics') as f: @@ -70,18 +89,6 @@ class TestCarbonaraMigration(tests_base.TestCase): self.storage.upgrade(self.index) def test_get_measures(self): - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400, 5), - (utils.datetime_utc(2014, 1, 1, 12), 3600, 5), - (utils.datetime_utc(2014, 1, 1, 12), 300, 5) - ], self.storage.get_measures(self.metric)) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400, 6), - (utils.datetime_utc(2014, 1, 1, 12), 3600, 6), - (utils.datetime_utc(2014, 1, 1, 12), 300, 6) - ], self.storage.get_measures(self.metric, aggregation='max')) - # This is to make gordc safer self.assertIsNotNone(self.storage._get_metric_archive( self.metric, "mean")) -- GitLab From 1ed960afdcc286dea41edb900b20922ae89f1a16 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 4 Mar 2016 16:31:08 +0100 Subject: [PATCH 0105/1483] Pass aggregation when create AggregatedTimeSerie Change-Id: I66d01023d3fe78038aa61a5be9c93a346e3d4726 Closes-bug: #1552437 (cherry picked from commit 23262820683421c604c8be636cfe5afa584b1356) --- gnocchi/storage/_carbonara.py | 1 + gnocchi/tests/test_storage.py | 37 +++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index f6410419..006bf5a1 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -195,6 +195,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): return carbonara.AggregatedTimeSerie.from_timeseries( timeseries, + aggregation_method=aggregation, sampling=granularity, max_size=points) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index c554bf8f..c7f80eca 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -163,6 +163,43 @@ class TestStorageDriver(tests_base.TestCase): (utils.datetime_utc(2015, 1, 1, 12), 300.0, 69), ], self.storage.get_measures(self.metric)) + def test_updated_measures(self): + self.storage.add_measures(self.metric, [ + storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), + ]) + self.storage.process_background_tasks(self.index, sync=True) + + self.storage.add_measures(self.metric, [ + storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), + ]) + self.storage.process_background_tasks(self.index, sync=True) + + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), + (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), + (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), + (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), + (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), + ], self.storage.get_measures(self.metric)) + + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), 86400.0, 69), + (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 69.0), + (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), + (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 42.0), + (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), + ], self.storage.get_measures(self.metric, aggregation='max')) + + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), 86400.0, 4), + (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 4), + (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), + (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 4.0), + (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), + ], self.storage.get_measures(self.metric, aggregation='min')) + def test_add_and_get_measures(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), -- GitLab From 9cf6c7d9fbb4fa9e34e4087ba347f4822c9c766c Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 26 Feb 2016 15:23:00 +0100 Subject: [PATCH 0106/1483] ceph: fix help string Change-Id: I0ebf80dfa8ae0b5ca4263d5d90808e94517803cb (cherry picked from commit 902758be1000b7f02b0dad87caec5021199ccb01) --- gnocchi/storage/ceph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 23236657..fe749d7d 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -46,7 +46,7 @@ OPTS = [ default='gnocchi', help='Ceph pool name to use.'), cfg.StrOpt('ceph_username', - help='Ceph username (ie: client.admin).'), + help='Ceph username (ie: admin without "client." prefix).'), cfg.StrOpt('ceph_keyring', help='Ceph keyring path.'), cfg.StrOpt('ceph_conffile', -- GitLab From f45bd17517c354634a35b9ef869fdda51a773efb Mon Sep 17 00:00:00 2001 From: gordon chung Date: Mon, 29 Feb 2016 14:46:10 -0500 Subject: [PATCH 0107/1483] close queues metricd service catches SIGTERM and kills all children multiprocessing handles. we can't detect queues at this step so inspect the children and kill queues as required. Change-Id: I226a0ba317ec4412159c2afa8f68895e00535e51 --- gnocchi/cli.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 7a4bda53..9bcb942f 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -168,18 +168,18 @@ def metricd(): for worker in workers: worker.join() except KeyboardInterrupt: - _metricd_cleanup(workers, queues) + _metricd_cleanup(workers) sys.exit(0) except Exception: LOG.warning("exiting", exc_info=True) - _metricd_cleanup(workers, queues) + _metricd_cleanup(workers) sys.exit(1) -def _metricd_cleanup(workers, queues): - for queue in queues: - queue.close() +def _metricd_cleanup(workers): for worker in workers: + if hasattr(worker, 'queue'): + worker.queue.close() worker.terminate() for worker in workers: worker.join() -- GitLab From 43f0775e4caf54c40564c08c7913f9d31f058434 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Tue, 1 Mar 2016 22:53:37 -0500 Subject: [PATCH 0108/1483] optimise timeseries we don't need necessarily need to use to sort_index when using set_values because the ts is sorted and the ts passed in may be sorted (via BoundedTimeSerie). we should sort only when required. also, it's much more efficient to remove duplicates by leveraging duplicated() vs groupby(level=0).last()[1] this patch also uses drop rather than overly complicated double slice and merging that was happening previously. [1] http://paste.openstack.org/show/489078/ Change-Id: Iec84957eceee57a430655379ef4a94a20cd6a6ff Related-Bug: #1548448 --- gnocchi/carbonara.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 914fe9c8..12d78c41 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -80,7 +80,9 @@ class TimeSerie(SerializableMixin): def __init__(self, ts=None): if ts is None: ts = pandas.Series() - self.ts = ts.groupby(level=0).last().sort_index() + self.ts = ts[~ts.index.duplicated(keep='last')] + if not self.ts.index.is_monotonic: + self.ts = self.ts.sort_index() @classmethod def from_data(cls, timestamps=None, values=None): @@ -98,9 +100,11 @@ class TimeSerie(SerializableMixin): return self.ts[key] def set_values(self, values): - t = pandas.Series(*reversed(list(zip(*values)))).groupby( - level=0).last() - self.ts = t.combine_first(self.ts).sort_index() + t = pandas.Series(*reversed(list(zip(*values)))) + t = t[~t.index.duplicated(keep='last')] + if not t.index.is_monotonic: + t = t.sort_index() + self.ts = t.combine_first(self.ts) def __len__(self): return len(self.ts) @@ -469,8 +473,7 @@ class AggregatedTimeSerie(TimeSerie): last_timestamp = index[-1] # Build a new time serie excluding all data points in the range of the # timeserie passed as argument - new_ts = self.ts[:first_timestamp].combine_first( - self.ts[last_timestamp:]) + new_ts = self.ts.drop(self.ts[first_timestamp:last_timestamp].index) # Build a new timeserie where we replaced the timestamp range covered # by the timeserie passed as argument -- GitLab From 0e85d8ca2db09c1d8659af715b800f1a3634bfb5 Mon Sep 17 00:00:00 2001 From: liusheng Date: Mon, 7 Mar 2016 14:38:06 +0800 Subject: [PATCH 0109/1483] Make the content of PKG-INFO can be decoded The README.rst contain a dash which is \xe2\x80\x93, the PKG-INFO file will generated by the content of README.rst when install gnocchi package. And the pbr will read the PKG-INFO file to get package info, in py34 environment, the dash cannot be decoded and will raise an error. Change-Id: I6a3c85beb260115e7ad32bfe101ced25442aa8d1 Closes-Bug: #1553893 (cherry picked from commit cd7813c9bd800918dd18516e733698cd7f15f493) --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 8c5bfb57..a6cc77ea 100644 --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ =============================== - Gnocchi – Metric as a Service + Gnocchi - Metric as a Service =============================== .. image:: doc/source/gnocchi-logo.jpg -- GitLab From f665a4da8f7dba929009661aaff3abea009e5e44 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Sat, 27 Feb 2016 17:21:37 -0500 Subject: [PATCH 0110/1483] synchronously delete archives it's safe to delete upgraded metrics asynchronously because they are removed from indexer and not accessible afterwards. this is not the case for archives as they may relate to active metrics. we should delete them completely so we don't access them again. Change-Id: I3b8baaf1609a003c7d09b59a84570c66035a8183 --- gnocchi/storage/ceph.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index fe749d7d..2758aae1 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -297,5 +297,8 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): def _delete_metric_archives(self, metric): with self._get_ioctx() as ioctx: for aggregation in metric.archive_policy.aggregation_methods: - ioctx.aio_remove( - str("gnocchi_%s_%s" % (metric.id, aggregation))) + try: + ioctx.remove_object( + str("gnocchi_%s_%s" % (metric.id, aggregation))) + except rados.ObjectNotFound: + pass -- GitLab From 0c876c4eed8be8763478cb1ca494f91904ddabc8 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Thu, 3 Mar 2016 18:19:43 -0500 Subject: [PATCH 0111/1483] ensure timeseries has unique index this is a safety mechanism to ensure we always have ordered, unique indices when working with timeseries. Change-Id: Ie4a7002bd6aa769c12ed98ceeeb78aaf4d138e34 Closes-Bug: #1548448 (cherry picked from commit bea6989d4872da3aa5466840c8f17ea575dfdd5f) --- gnocchi/carbonara.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 12d78c41..87f22c22 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -80,9 +80,15 @@ class TimeSerie(SerializableMixin): def __init__(self, ts=None): if ts is None: ts = pandas.Series() - self.ts = ts[~ts.index.duplicated(keep='last')] - if not self.ts.index.is_monotonic: - self.ts = self.ts.sort_index() + self.ts = self.clean_ts(ts) + + @staticmethod + def clean_ts(ts): + if ts.index.has_duplicates: + ts = ts[~ts.index.duplicated(keep='last')] + if not ts.index.is_monotonic: + ts = ts.sort_index() + return ts @classmethod def from_data(cls, timestamps=None, values=None): @@ -101,10 +107,7 @@ class TimeSerie(SerializableMixin): def set_values(self, values): t = pandas.Series(*reversed(list(zip(*values)))) - t = t[~t.index.duplicated(keep='last')] - if not t.index.is_monotonic: - t = t.sort_index() - self.ts = t.combine_first(self.ts) + self.ts = self.clean_ts(t).combine_first(self.ts) def __len__(self): return len(self.ts) @@ -468,6 +471,7 @@ class AggregatedTimeSerie(TimeSerie): def update(self, ts): if ts.ts.empty: return + ts.ts = self.clean_ts(ts.ts) index = ts.ts.index first_timestamp = index[0] last_timestamp = index[-1] -- GitLab From 501a4004cb8fc678cca463a59a9237668590b8cb Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 9 Mar 2016 14:05:57 +0100 Subject: [PATCH 0112/1483] Now packaging 2.0.2. --- debian/changelog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/debian/changelog b/debian/changelog index 4b87a2ce..9d3fffac 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (2.0.2-1) experimental; urgency=medium + + * New upstream release. + + -- Thomas Goirand Wed, 09 Mar 2016 14:05:32 +0100 + gnocchi (1.3.4-1) experimental; urgency=medium * New upstream release. -- GitLab From 27332060f878f224f206f0a1a23fbdae93fe6618 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 9 Mar 2016 14:09:09 +0100 Subject: [PATCH 0113/1483] Fixed diff with upstream tag. --- .gitreview | 1 - MANIFEST.in | 1 + README.rst | 21 +- devstack/apache-gnocchi.template | 2 +- devstack/apache-ported-gnocchi.template | 2 +- devstack/plugin.sh | 42 +- devstack/settings | 17 +- doc/source/architecture.rst | 117 ++++ doc/source/client.rst | 13 + doc/source/conf.py | 26 +- doc/source/configuration.rst | 187 ++++++ doc/source/grafana-screenshot.png | Bin 0 -> 82601 bytes doc/source/grafana.rst | 63 ++ doc/source/index.rst | 46 +- doc/source/install.rst | 215 ++----- doc/source/resource_types.rst | 2 +- doc/source/rest.j2 | 23 +- doc/source/rest.yaml | 12 + doc/source/running.rst | 71 +++ doc/source/statsd.rst | 26 +- ...ator.conf => gnocchi-config-generator.conf | 0 gnocchi/archive_policy.py | 10 +- gnocchi/carbonara.py | 308 +++++----- gnocchi/cli.py | 28 +- gnocchi/genconfig.py | 24 + gnocchi/indexer/__init__.py | 14 +- gnocchi/indexer/alembic/env.py | 16 +- ...c2_allow_volume_display_name_to_be_null.py | 41 ++ ...469b308577a9_allow_image_ref_to_be_null.py | 41 ++ gnocchi/indexer/sqlalchemy.py | 278 +++++---- gnocchi/indexer/sqlalchemy_extension.py | 4 +- gnocchi/opts.py | 24 +- gnocchi/rest/__init__.py | 241 +++----- gnocchi/rest/app.py | 19 - gnocchi/service.py | 6 +- gnocchi/statsd.py | 9 +- gnocchi/storage/__init__.py | 33 +- gnocchi/storage/_carbonara.py | 194 ++++-- gnocchi/storage/ceph.py | 25 +- gnocchi/storage/file.py | 80 ++- gnocchi/storage/influxdb.py | 13 +- gnocchi/storage/swift.py | 111 +++- gnocchi/tests/base.py | 31 +- gnocchi/tests/gabbi/fixtures.py | 19 +- gnocchi/tests/gabbi/gabbits-live/live.yaml | 7 +- gnocchi/tests/gabbi/gabbits/aggregation.yaml | 243 ++++++++ gnocchi/tests/gabbi/gabbits/archive.yaml | 6 +- .../tests/gabbi/gabbits/batch_measures.yaml | 173 ++++++ .../gabbi/gabbits/metric_granularity.yaml | 6 +- gnocchi/tests/gabbi/gabbits/resource.yaml | 4 +- gnocchi/tests/gabbi/test_gabbi_live.py | 2 +- .../indexer/sqlalchemy/test_migrations.py | 2 +- gnocchi/tests/storage/test_carbonara.py | 56 +- gnocchi/tests/test_archive_policy.py | 3 +- gnocchi/tests/test_carbonara.py | 574 +++++++----------- gnocchi/tests/test_indexer.py | 26 +- gnocchi/tests/test_rest.py | 97 +-- gnocchi/tests/test_storage.py | 147 ++++- requirements.txt | 10 +- run-tests.sh | 9 +- setup-influxdb-tests.sh | 47 -- setup-mysql-tests.sh | 29 - setup-postgresql-tests.sh | 19 - setup.cfg | 32 +- test-requirements.txt | 16 - tox.ini | 43 +- 66 files changed, 2445 insertions(+), 1562 deletions(-) create mode 100644 MANIFEST.in create mode 100644 doc/source/architecture.rst create mode 100644 doc/source/client.rst create mode 100644 doc/source/configuration.rst create mode 100644 doc/source/grafana-screenshot.png create mode 100644 doc/source/grafana.rst create mode 100644 doc/source/running.rst rename etc/gnocchi/gnocchi-config-generator.conf => gnocchi-config-generator.conf (100%) create mode 100644 gnocchi/genconfig.py create mode 100644 gnocchi/indexer/alembic/versions/1f21cbdd6bc2_allow_volume_display_name_to_be_null.py create mode 100644 gnocchi/indexer/alembic/versions/469b308577a9_allow_image_ref_to_be_null.py create mode 100644 gnocchi/tests/gabbi/gabbits/aggregation.yaml create mode 100644 gnocchi/tests/gabbi/gabbits/batch_measures.yaml delete mode 100755 setup-influxdb-tests.sh delete mode 100755 setup-mysql-tests.sh delete mode 100755 setup-postgresql-tests.sh delete mode 100644 test-requirements.txt diff --git a/.gitreview b/.gitreview index e93df263..e4b8477d 100644 --- a/.gitreview +++ b/.gitreview @@ -2,4 +2,3 @@ host=review.openstack.org port=29418 project=openstack/gnocchi.git -defaultbranch=stable/1.3 diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..8f248e6e --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +include etc/gnocchi/gnocchi.conf diff --git a/README.rst b/README.rst index 9f6920d1..a6cc77ea 100644 --- a/README.rst +++ b/README.rst @@ -2,19 +2,16 @@ Gnocchi - Metric as a Service =============================== -Gnocchi is a service for managing a set of resources and storing metrics about -them, in a scalable and resilient way. Its functionalities are exposed over an -HTTP REST API. +.. image:: doc/source/gnocchi-logo.jpg -There is a more consistent presentation of Gnocchi: +Gnocchi is a multi-tenant timeseries, metrics and resources database. It +provides an `HTTP REST`_ interface to create and manipulate the data. It is +designed to store metrics at a very large scale while providing access to +metrics and resources information to operators and users. - https://julien.danjou.info/blog/2015/openstack-gnocchi-first-release +Gnocchi is part of the `OpenStack` project. While Gnocchi has support for +OpenStack, it is fully able to work stand-alone. -And online documentation: +You can read the full documentation online at http://gnocchi.xyz. - http://docs.openstack.org/developer/gnocchi/ - -Your are invited to file bug reports (if you find bugs) in -the bug tracker, available at: - - http://bugs.launchpad.net/gnocchi +.. _`HTTP REST`: https://en.wikipedia.org/wiki/Representational_state_transfer diff --git a/devstack/apache-gnocchi.template b/devstack/apache-gnocchi.template index 54fafbd1..bc288755 100644 --- a/devstack/apache-gnocchi.template +++ b/devstack/apache-gnocchi.template @@ -1,5 +1,5 @@ -WSGIDaemonProcess gnocchi lang='en_US.UTF-8' locale='en_US.UTF-8' user=%USER% display-name=%{GROUP} processes=%APIWORKERS% threads=10 %VIRTUALENV% +WSGIDaemonProcess gnocchi lang='en_US.UTF-8' locale='en_US.UTF-8' user=%USER% display-name=%{GROUP} processes=%APIWORKERS% threads=32 %VIRTUALENV% WSGIProcessGroup gnocchi WSGIScriptAlias %SCRIPT_NAME% %WSGI% diff --git a/devstack/apache-ported-gnocchi.template b/devstack/apache-ported-gnocchi.template index 58f0c480..2a56fa8d 100644 --- a/devstack/apache-ported-gnocchi.template +++ b/devstack/apache-ported-gnocchi.template @@ -1,7 +1,7 @@ Listen %GNOCCHI_PORT% - WSGIDaemonProcess gnocchi lang='en_US.UTF-8' locale='en_US.UTF-8' user=%USER% display-name=%{GROUP} processes=%APIWORKERS% threads=10 %VIRTUALENV% + WSGIDaemonProcess gnocchi lang='en_US.UTF-8' locale='en_US.UTF-8' user=%USER% display-name=%{GROUP} processes=%APIWORKERS% threads=32 %VIRTUALENV% WSGIProcessGroup gnocchi WSGIScriptAlias / %WSGI% WSGIApplicationGroup %{GLOBAL} diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 76459415..91066e76 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -18,7 +18,6 @@ # - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # - ``SERVICE_HOST`` # - ``OS_AUTH_URL``, ``KEYSTONE_SERVICE_URI`` for auth in api -# - ``CEILOMETER_CONF`` for ceilometer dispatcher configuration # stack.sh # --------- @@ -235,6 +234,12 @@ function configure_gnocchi { # Configure auth token middleware configure_auth_token_middleware $GNOCCHI_CONF gnocchi $GNOCCHI_AUTH_CACHE_DIR + if is_service_enabled gnocchi-statsd ; then + iniset $GNOCCHI_CONF statsd resource_id $GNOCCHI_STATSD_RESOURCE_ID + iniset $GNOCCHI_CONF statsd project_id $GNOCCHI_STATSD_PROJECT_ID + iniset $GNOCCHI_CONF statsd user_id $GNOCCHI_STATSD_USER_ID + fi + # Configure the storage driver if is_service_enabled ceph && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then iniset $GNOCCHI_CONF storage driver ceph @@ -293,25 +298,6 @@ function configure_ceph_gnocchi { sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring } -function configure_ceilometer_gnocchi { - gnocchi_url=$(gnocchi_service_url) - iniset $CEILOMETER_CONF DEFAULT dispatcher gnocchi - iniset $CEILOMETER_CONF alarms gnocchi_url $gnocchi_url - iniset $CEILOMETER_CONF dispatcher_gnocchi url $gnocchi_url - iniset $CEILOMETER_CONF dispatcher_gnocchi archive_policy ${GNOCCHI_ARCHIVE_POLICY} - if is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then - iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "True" - iniset $CEILOMETER_CONF dispatcher_gnocchi filter_project "gnocchi_swift" - else - iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "False" - fi -} - -function configure_aodh_gnocchi { - gnocchi_url=$(gnocchi_service_url) - iniset $AODH_CONF DEFAULT gnocchi_url $gnocchi_url -} - # init_gnocchi() - Initialize etc. function init_gnocchi { @@ -322,8 +308,8 @@ function init_gnocchi { if is_service_enabled mysql postgresql; then recreate_database gnocchi - $GNOCCHI_BIN_DIR/gnocchi-dbsync fi + $GNOCCHI_BIN_DIR/gnocchi-upgrade } function preinstall_gnocchi { @@ -361,8 +347,10 @@ function install_gnocchi { install_gnocchiclient + is_service_enabled key && EXTRA_FLAVOR=,keystonmiddleware + # We don't use setup_package because we don't follow openstack/requirements - sudo -H pip install -e "$GNOCCHI_DIR" + sudo -H pip install -e "$GNOCCHI_DIR"[test,$GNOCCHI_STORAGE_BACKEND,${DATABASE_TYPE}${EXTRA_FLAVOR}] if [ "$GNOCCHI_USE_MOD_WSGI" == "True" ]; then install_apache_wsgi @@ -403,7 +391,6 @@ function start_gnocchi { fi # Create a default policy - archive_policy_url="$(gnocchi_service_url)/v1/archive_policy" if ! is_service_enabled key; then export OS_AUTH_TYPE=gnocchi-noauth export GNOCCHI_USER_ID=`uuidgen` @@ -419,6 +406,7 @@ function start_gnocchi { # run metricd last so we are properly waiting for swift and friends run_process gnocchi-metricd "$GNOCCHI_BIN_DIR/gnocchi-metricd -d -v --config-file $GNOCCHI_CONF" + run_process gnocchi-statsd "$GNOCCHI_BIN_DIR/gnocchi-statsd -d -v --config-file $GNOCCHI_CONF" } # stop_gnocchi() - Stop running processes @@ -452,14 +440,6 @@ if is_service_enabled gnocchi-api; then echo_summary "Configuring Gnocchi" configure_gnocchi create_gnocchi_accounts - if is_service_enabled ceilometer; then - echo_summary "Configuring Ceilometer for gnocchi" - configure_ceilometer_gnocchi - fi - if is_service_enabled aodh; then - echo_summary "Configuring Aodh for gnocchi" - configure_aodh_gnocchi - fi if is_service_enabled ceph && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then echo_summary "Configuring Gnocchi for Ceph" configure_ceph_gnocchi diff --git a/devstack/settings b/devstack/settings index 062b4181..2a74165e 100644 --- a/devstack/settings +++ b/devstack/settings @@ -1,5 +1,6 @@ enable_service gnocchi-api enable_service gnocchi-metricd +enable_service gnocchi-statsd # Set up default directories GNOCCHI_DIR=$DEST/gnocchi @@ -30,8 +31,10 @@ GNOCCHI_SERVICE_PORT=${GNOCCHI_SERVICE_PORT:-8041} GNOCCHI_SERVICE_PREFIX=${GNOCCHI_SERVICE_PREFIX:-'/metric'} GNOCCHI_SERVICE_HOST=$SERVICE_HOST -# Gnocchi ceilometer default archive_policy -GNOCCHI_ARCHIVE_POLICY=${GNOCCHI_ARCHIVE_POLICY:-low} +# Gnocchi statsd info +GNOCCHI_STATSD_RESOURCE_ID=${GNOCCHI_STATSD_RESOURCE_ID:-$(uuidgen)} +GNOCCHI_STATSD_USER_ID=${GNOCCHI_STATSD_USER_ID:-$(uuidgen)} +GNOCCHI_STATSD_PROJECT_ID=${GNOCCHI_STATSD_PROJECT_ID:-$(uuidgen)} # ceph gnocchi info GNOCCHI_CEPH_USER=${GNOCCHI_CEPH_USER:-gnocchi} @@ -44,12 +47,12 @@ GNOCCHI_STORAGE_BACKEND=${GNOCCHI_STORAGE_BACKEND:-file} # InfluxDB Settings GNOCCHI_INFLUXDB_DBNAME=${GNOCCHI_INFLUXDB_DBNAME:-gnocchidevstack} -GNOCCHI_INFLUXDB_RPM_PKG=${GNOCCHI_INFLUXDB_RPM_PKG:-https://s3.amazonaws.com/influxdb/influxdb-0.9.4.1-1.x86_64.rpm} -GNOCCHI_INFLUXDB_DEB_PKG=${GNOCCHI_INFLUXDB_DEB_PKG:-https://s3.amazonaws.com/influxdb/influxdb_0.9.4.1_amd64.deb} +GNOCCHI_INFLUXDB_RPM_PKG=${GNOCCHI_INFLUXDB_RPM_PKG:-https://s3.amazonaws.com/influxdb/influxdb-0.9.4.2-1.x86_64.rpm} +GNOCCHI_INFLUXDB_DEB_PKG=${GNOCCHI_INFLUXDB_DEB_PKG:-https://s3.amazonaws.com/influxdb/influxdb_0.9.4.2_amd64.deb} # Grafana settings -GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana-2.1.3-1.x86_64.rpm} -GRAFANA_DEB_PKG=${GRAFANA_DEB_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana_2.1.3_amd64.deb} +GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm} +GRAFANA_DEB_PKG=${GRAFANA_DEB_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana_2.6.0_amd64.deb} GRAFANA_PLUGINS_DIR=${GRAFANA_PLUGINS_DIR:-$DEST/grafana-plugins} -GRAFANA_PLUGINS_REPO=${GRAFANA_PLUGINS_REPO:-http://github.com/grafana/grafana-plugins.git} +GRAFANA_PLUGINS_REPO=${GRAFANA_PLUGINS_REPO:-http://github.com/sileht/grafana-plugins-gnocchi.git} GRAFANA_URL=${GRAFANA_URL:-http://$HOST_IP:3000} diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst new file mode 100644 index 00000000..fb5a6eca --- /dev/null +++ b/doc/source/architecture.rst @@ -0,0 +1,117 @@ +====================== + Project Architecture +====================== + +Gnocchi consists of several services: a HTTP REST API (see :doc:`rest`), an +optional statsd-compatible daemon (see :doc:`statsd`), and an asynchronous +processing daemon. Data is received via the HTTP REST API and statsd daemon. +The asynchronous processing daemon, called `gnocchi-metricd`, performs +operations (statistics computing, metric cleanup, etc...) on the received data +in the background. + +Both the HTTP REST API and the asynchronous processing daemon are stateless and +are scalable. Additional workers can be added depending on load. + + +Back-ends +--------- + +Gnocchi uses two different back-end for storing data: one for storing the time +series (the storage driver) and one for indexing the data (the index driver). + +The *storage* is responsible for storing measures of created metrics. It +receives timestamps and values, and pre-computes aggregations according to +the defined archive policies. + +The *indexer* is responsible for storing the index of all resources, along with +their types and properties. Gnocchi only knows about resource types from the +OpenStack project, but also provides a *generic* type so you can create basic +resources and handle the resource properties yourself. The indexer is also +responsible for linking resources with metrics. + +How to choose back-ends +~~~~~~~~~~~~~~~~~~~~~~~ + +Gnocchi currently offers 4 storage drivers: + +* File +* Swift +* Ceph (preferred) +* InfluxDB (experimental) + +The first three drivers are based on an intermediate library, named +*Carbonara*, which handles the time series manipulation, since none of these +storage technologies handle time series natively. `InfluxDB`_ does not need +this layer since it is itself a time series database. However, The InfluxDB +driver is still experimental and suffers from bugs in InfluxDB itself that are +yet to be fixed as of this writing. + +The three *Carbonara* based drivers are working well and are as scalable as +their back-end technology permits. Ceph and Swift are inherently more scalable +than the file driver. + +Depending on the size of your architecture, using the file driver and storing +your data on a disk might be enough. If you need to scale the number of server +with the file driver, you can export and share the data via NFS among all +Gnocchi processes. In any case, it is obvious that Ceph and Swift drivers are +largely more scalable. Ceph also offers better consistency, and hence is the +recommended driver. + +.. _InfluxDB: http://influxdb.com + +How to plan for Gnocchi’s storage +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Gnocchi uses a custom file format based on its library *Carbonara*. In Gnocchi, +a time serie is a collection of points, where a point is a given measure, or +sample, in the lifespan of a time serie. The storage format is compressed using +various techniques, therefore the computing of a time serie's size can be +estimated based on its worst case scenario with the following formula:: + + number of points × 9 bytes = size in bytes + +The number of points you want to keep is usually determined by the following +formula:: + + number of points = timespan ÷ granularity + +For example, if you want to keep a year of data with a one minute resolution:: + + number of points = (365 days × 24 hours × 60 minutes) ÷ 1 minute + number of points = 525 600 + +Then:: + + size in bytes = 525 600 × 9 = 4 730 400 bytes = 4 620 KiB + +This is just for a single aggregated time serie. If your archive policy uses +the 8 default aggregation methods (mean, min, max, sum, std, median, count, +95pct) with the same "one year, one minute aggregations" resolution, the space +used will go up to a maximum of 8 × 4.5 MiB = 36 MiB. + +How to set the archive policy and granularity +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In Gnocchi, the archive policy is expressed in number of points. If your +archive policy defines a policy of 10 points with a granularity of 1 second, +the time serie archive will keep up to 10 seconds, each representing an +aggregation over 1 second. This means the time serie will at maximum retain 10 +seconds of data (sometimes a bit more) between the more recent point and the +oldest point. That does not mean it will be 10 consecutive seconds: there might +be a gap if data is fed irregularly. + +There is no expiry of data relative to the current timestamp. Also, you cannot +delete old data points (at least for now). + +Therefore, both the archive policy and the granularity entirely depends on your +use case. Depending on the usage of your data, you can define several archiving +policies. A typical low grained use case could be:: + + 3600 points with a granularity of 1 second = 1 hour + 1440 points with a granularity of 1 minute = 24 hours + 1800 points with a granularity of 1 hour = 30 days + 365 points with a granularity of 1 day = 1 year + +This would represent 7205 points × 17.92 = 126 KiB per aggregation method. If +you use the 8 standard aggregation method, your metric will take up to 8 × 126 +KiB = 0.98 MiB of disk space. diff --git a/doc/source/client.rst b/doc/source/client.rst new file mode 100644 index 00000000..6aa428a1 --- /dev/null +++ b/doc/source/client.rst @@ -0,0 +1,13 @@ +======== + Client +======== + +Gnocchi currently only provides a Python client and SDK which can be installed +using *pip*:: + + pip install gnocchiclient + +This package provides the `gnocchi` command line tool that can be used to send +requests to Gnocchi. You can read the `full documentation online`_. + +.. _full documentation online: http://gnocchi.xyz/gnocchiclient diff --git a/doc/source/conf.py b/doc/source/conf.py index 8d947844..ea782e69 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -15,7 +15,6 @@ import os import subprocess import oslosphinx -import sphinx_bootstrap_theme # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -106,24 +105,13 @@ html_theme = 'openstack' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -if html_theme == "bootstrap": - html_theme_options = { - 'navbar_class': "navbar navbar-inverse", - 'navbar_site_name': "Documentation", - 'navbar_links': [ - ("Launchpad project", "https://launchpad.net/gnocchi", True), - ("Bug tracking", "https://bugs.launchpad.net/gnocchi", True), - ("Git", "http://github.com/openstack/gnocchi", True), - ], - 'navbar_sidebarrel': False, - 'navbar_pagenav': False, - 'globaltoc_depth': 2, - } - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] -html_theme_path = ([os.path.join(os.path.dirname(oslosphinx.__file__), 'theme')] - + sphinx_bootstrap_theme.get_html_theme_path()) +if html_theme == "sphinx_rtd_theme": + import sphinx_rtd_theme + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] +else: + html_theme_path = [os.path.join(os.path.dirname(oslosphinx.__file__), + 'theme')] + # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst new file mode 100644 index 00000000..8005820c --- /dev/null +++ b/doc/source/configuration.rst @@ -0,0 +1,187 @@ +=============== + Configuration +=============== + +Configure Gnocchi by editing `/etc/gnocchi/gnocchi.conf`. + +No config file is provided with the source code; it will be created during the +installation. In case where no configuration file was installed, one can be +easily created by running: + +:: + + tox -e genconfig + +This command will create an `etc/gnocchi/gnocchi.conf` file which can be used +as a base for the default configuration file at `/etc/gnocchi/gnocchi.conf`. If +you're using _devstack_, this file is already generated and put in place. + +If you installed Gnocchi using pip, you can create a sample `gnocchi.conf` file +using the following commands: + +:: + + curl -O "https://raw.githubusercontent.com/openstack/gnocchi/master/etc/gnocchi/gnocchi-config-generator.conf" + oslo-config-generator --config-file=gnocchi-config-generator.conf --output-file=gnocchi.conf + +The configuration file should be pretty explicit, but here are some of the base +options you want to change and configure: + + ++---------------------+---------------------------------------------------+ +| Option name | Help | ++=====================+===================================================+ +| storage.driver | The storage driver for metrics. | ++---------------------+---------------------------------------------------+ +| indexer.url | URL to your indexer. | ++---------------------+---------------------------------------------------+ +| storage.file_* | Configuration options to store files | +| | if you use the file storage driver. | ++---------------------+---------------------------------------------------+ +| storage.swift_* | Configuration options to access Swift | +| | if you use the Swift storage driver. | ++---------------------+---------------------------------------------------+ +| storage.ceph_* | Configuration options to access Ceph | +| | if you use the Ceph storage driver. | ++---------------------+---------------------------------------------------+ + + +Gnocchi provides these storage drivers: + +- File (default) +- `Swift`_ +- `Ceph`_ +- `InfluxDB`_ (experimental) + +Gnocchi provides these indexer drivers: + +- `PostgreSQL`_ (recommended) +- `MySQL`_ + +.. _`Swift`: https://launchpad.net/swift +.. _`Ceph`: http://ceph.com/ +.. _`PostgreSQL`: http://postgresql.org +.. _`MySQL`: http://mysql.com +.. _`InfluxDB`: http://influxdb.com + +Configuring the WSGI pipeline +----------------------------- + +The API server leverages `Paste Deployment`_ to manage its configuration. You +can edit the `/etc/gnocchi/api-paste.ini` to tweak the WSGI pipeline of the +Gnocchi REST HTTP server. By default, no authentication middleware is enabled, +meaning your request will have to provides the authentication headers. + +Gnocchi is easily connectable with `OpenStack Keystone`_. If you successfully +installed the `keystone` flavor using `pip` (see :ref:`installation`), you can +edit the `api-paste.ini` file to add the Keystone authentication middleware:: + + [pipeline:main] + pipeline = keystone_authtoken gnocchi + +Also, if you're planning on using `CORS`_ (e.g. to use `Grafana`_), you an also +add the CORS middleware in the server pipeline:: + + [pipeline:main] + pipeline = keystone_authtoken cors gnocchi + +With or without Keystone support. + +.. _`Paste Deployment`: http://pythonpaste.org/deploy/ +.. _`OpenStack Keystone`: http://launchpad.net/keystone +.. _`CORS`: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing +.. _`Grafana`: http://grafana.org/ + + +Driver notes +============ + +Carbonara based drivers (file, swift, ceph) +------------------------------------------- + +To ensure consistency across all *gnocchi-api* and *gnocchi-metricd* workers, +these drivers need a distributed locking mechanism. This is provided by the +'coordinator' of the `tooz`_ library. + +By default, the configured backend for `tooz`_ is the same as the indexer +(*PostgreSQL* or *MySQL*). This allows locking across workers from different +nodes. + +For a more robust multi-nodes deployment, the coordinator may be changed via +the `storage.coordination_url` configuration option to one of the other `tooz +backends`_. + +For example to use Redis backend:: + + coordination_url = redis://?sentinel= + +or alternatively, to use the Zookeeper backend:: + + coordination_url = zookeeper:///hosts=&hosts= + +.. _`tooz`: http://docs.openstack.org/developer/tooz/ +.. _`tooz backends`: http://docs.openstack.org/developer/tooz/drivers.html + + +Ceph driver implementation details +---------------------------------- + +Each batch of measurements to process is stored into one rados object. +These objects are named `measures___` + +Also a special empty object called `measures` has the list of measures to +process stored in its xattr attributes. + +Because of the asynchronous nature of how we store measurements in Gnocchi, +`gnocchi-metricd` needs to know the list of objects that are waiting to be +processed: + +- Listing rados objects for this is not a solution since it takes too much + time. +- Using a custom format into a rados object, would force us to use a lock + each time we would change it. + +Instead, the xattrs of one empty rados object are used. No lock is needed to +add/remove a xattr. + +But depending on the filesystem used by ceph OSDs, this xattrs can have a +limitation in terms of numbers and size if Ceph is not correctly configured. +See `Ceph extended attributes documentation`_ for more details. + +Then, each Carbonara generated file is stored in *one* rados object. +So each metric has one rados object per aggregation in the archive policy. + +Because of this, the filling of OSDs can look less balanced compared to RBD. +Some objects will be big and others small, depending on how archive policies +are set up. + +We can imagine an unrealistic case such as retaining 1 point per second over +a year, in which case the rados object size will be ~384MB. + +Whereas in a more realistic scenario, a 4MB rados object (like RBD uses) could +result from: + +- 20 days with 1 point every second +- 100 days with 1 point every 5 seconds + +So, in realistic scenarios, the direct relation between the archive policy and +the size of the rados objects created by Gnocchi is not a problem. + + +Also Gnocchi can use `cradox`_ Python libary if installed. This library is a +Python binding to librados written with `Cython`_, aiming to replace the one +written with `ctypes`_ provided by Ceph. +This new library will be part of next Ceph release (10.0.4). + +The new Cython binding divides the gnocchi-metricd times to process measures +by a large factor. + +So, if the Ceph installation doesn't use latest Ceph version, `cradox`_ can be +installed to improve the Ceph backend performance. + + +.. _`Ceph extended attributes documentation`: http://docs.ceph.com/docs/master/rados/configuration/filestore-config-ref/#extended-attributes +.. _`cradox`: https://pypi.python.org/pypi/cradox +.. _`Cython`: http://cython.org/ +.. _`ctypes`: https://docs.python.org/2/library/ctypes.html +.. _`rados.py`: https://docs.python.org/2/library/ctypes.htm://github.com/ceph/ceph/blob/hammer/src/pybind/rados.py diff --git a/doc/source/grafana-screenshot.png b/doc/source/grafana-screenshot.png new file mode 100644 index 0000000000000000000000000000000000000000..eff160321972884e6811d4dfa7fe7dd26fac2912 GIT binary patch literal 82601 zcma&N1ys~s7dMKcgv5YI=g^%3l0&F;gLId4H_WKCv~&p44blzLNJ=*&-CYCBz#X4A zp6C6(yY9NoTCAD>*(dfs`|Pvh_YYT9mce;O_6!9D1xHTywK@vQ6F3UWV}A@Z&)RIHrB&smrD;@Movdu^Em2Tp!xNLyRpaN0`j4;kLxYm=J}3N2r+L$SK_=nb zfd2(g3KRI0Pz@hn+nFOQjHdb<8_lx{c|$C-&Uy^iIm6gDl7_@sI;uX@Cm6?VJ|a#l zsZHky*De2t3^%vqVGv5}+my$Wtn2^@GGlh^^UH7jJ$>UY9jNTz9?||niB?RK{Y~}! zImW#JmK`9cYuWZp{PubE7 zDwfKV3>s79^Gfn75Hd}ANGT4-IDZWzK=FPa&$97YLW<7EmoXw@C-wrb{&$U!e-Is> zrzr!^JO(jcD>ny6K$-}ifIN(t@~FdxQIiKhq0xeY(}=^Nom-zDVk|B@hkEoAj< z8@d@zitom*2#DWvd%d!%9)L@yx*%;En?T?r9w{^`|2ZskUC~JM7e$167wVRsXueN2 zz2bsf&Mz5Z^R)Nvf<$iZ=q~C(P=6<$392~?E+?BzYL;^1FtNO}Pg8g{VocN8X~dQx z3Q$5ibo=P|)O+B$FmM9#2%iR17KQrzBXkVO$Fk_f64*OGuueW;V8p5@exLyGSVwvL zVvMH%ZY=?hxu8p5QlPjt7icHa>t37{xKf6iyY#T>ROBy0E2n<8;hD4xaAdx`f? zJ^gklgpv?`lW`=n(RlFqw>?0Cb~s~1iCX`Iq+>J1x5rxFFx@}mbECfdLGTCaG``pfO6k5Lb48wL5;R7% z?%uM#4Rd*J8E&PNp%n39ZmY);y%XO4b|vP zUCVIke8s37In;mHPucyof4L85U3}H@M9-V86(Ei~*1x@`a3XWMebR8sbrS!?=aYD6 z9G;>H!OU~d=ePt5m<<@+L2@~d$76RCBWV~31o0KZae9V&`g^WCn~ujv7(X5(jnPy(5`f!32%J- zrc)iL15`3?=~4@}dq(&9)f?0|PH*bXGS`5;5uc@Bym*tmCTFT9pnoHS%CZkChSgRImcIGx z-{cNHCKM!$mj5W9E1xw;tkqo7VEcK`ahhnF$@V=zL7i&dv@QBHXK9;}gyHYn4|U&6 zI)6R|Oq+TTTykG}?1fG_mBiS+w}shayu7kAnDQ={n{=O4oME34EfgwwGwwRRWkao;eyA(!vW+F?wRP>aFrLGt(>P^p`1HPV3l9{xk z-e%wC+ZKBJB)|_rj|jLmyrss$#>v5{C#xliBq<@`#je5L$Dt;Vi!zAfjVy^eA~TOV zjM9i?i}XWoDtSD8ylcFE!S~{Fma*EdS}&kgcDK}h*Iw}Tu&mXQRjSpcRp8dkbBS|< zbE0#>hzp`#j)k@&yKyfp@(9g{ZlV}=LJkCn8^!U8EXp!p-zJf8uCfcYS`6s+r|ueTivE`J3irDCY57BIF>zP#@OD^f zHtNu`=Pm4DM)6B= z^LByueSZ-+H~0~_1#!gll7#&&rTH*)6w>yPkD@N&Eus6PMj~Fq)Z|XxyZB*3Oo1AG z7yUF)?~^1qowv$KJYIKfWUR4tRW>ktGkdI?y}P=*xr=4Bv@0e=<+b@M8pv7{I$uiF;}iajzO-{C$p7zWznKzV{jverIUr9MWcAUILZcA3#%1qrv(K= zSPc0Blzc0HXvT7p3^)(ZrSmXi<5A&D<`Cw*<00e$2{OF>!tte3F?slGSfw=gU6x+% z_mGaQExq`<#hLbjv!Ay7B{mQxyBUiAwWdJ?#3 z3>H9LIb0)LLH5xrOPPgn$?<}u|Jo=(x|_jL8QnAIdI`w@rF(cN)$a!VM8EhF-HoEH5kxaSnBx3!74m3{>H! z$0cpE@<-J74UpOUN=rG*fK95+DD)|Sy~s(`CqtLJZ;PEF@==3b zgIu6faI^8I{oAr#>F+j?{*JH-Fn=7whKnBzfvFLxwp3eXycx0 zDmfqTO^NX03O3qnr*OX6Bg|qWZdqy>HTG!t-AY@ptKYwx)pwj*dEaj1RT$yfl z|CAAm;fcY@900NSl-$?edaXmr(e2SkaeZ-C{E6-l#_{ZSrm1s4?YP@wOdu|w?8_X- z9@~xKqOtd5LzY~j#+XJ*?V}GGt7E86kt-SZLx+{=<~-)zPQgxvD-Mv#+xD&QHNe+( z;JOOa1S2De$X^|yc{hLWHk-V|#l@%%=?&mOSi#E?em}1JHEc9A9v8$4nLkw>_(VYP z$Xgo~MTqDWH>{FzaMX7z^@CNwJ@&_rbzNgIf_jvx#z%Oms1h`5YPhrtAnO@@75xGX zK@@#`g;|hv!g~k)(l=5WOk0yvQ+_xo&W9-e1jl@ZYM^oqohhB?c{1n!_WDWlbcrj z84V4MsH=sQu=?w_|L%_bB}V(+-Q8K3i_6Q)i_?pb)5+DEi$_REh>M$-iyL^Y97^i2l{#|7!Ys%71j# z{ky9G-~aUdN6Wu^igNwo;2#|QOI&|FMaoP3nJCwPWiS3rd_A-nz_3JFUBOlkFzaDl)FP#wRZ-!5Ep&V{9>p5XToWpeM^)?~ z(dYK=dsA}i>gx1#bb+z9fOagH^2waUzo(y;fK8X{YlpWi)Y-9RU*}RI2DrXsC-n+{ z=LCn*>sT23`?uCQF6%AS+Rz;?)Y0YX1s*?cCrAMQdnxL{!N%PoI9i@YUxhv3N&ZJa zn!hWTrBgE&%f^np9+wGYgDL-R=`?@xZ$W5Eu7*BeGKQ}amnaIVR;vVhm@TuPGZ13R zldyPWuyDdpmVfhn`nTXpQcnC1s`@wX?~bpT56KV*oK4&P`2-MeoK_-`rqrs8?1{vx z%)dF&QiMtV$es^jQWMN|p^R(4+%Q>0-%R#u8AGv$*QB}TuX9=aFt1x44V+zT1P67+ z;5`HVdqc8M*m8M|R8j-@kKm9T#MmZv8#VFnWn(K9;(P6Vp`NQ(6!p(B+Zk0+o&A2^ z`unysNIaF6*@=9Qbe|i)sMT#%YqBZ~AGx4^ujK{!(G#VY+!dLE^Ve9?OKo^J3O6#h z1ZN#TW5V;qI6IW!b!84N9S4qxZwGeg-$a*Bf%wuG_c1((EzCvcGL_5Q+nY`M@jl&QZNeH;>PL4WKUm0Hi$0J=tHVD~%)ZVtq3s~4F%CF9kmTH=>Nj4 z|G0h=XYvBfrd^TH)g=`pacjE5qEX=emH4lzJHCuF=4E4tRW9LV6)1kCU2gVRuS)rs za;STD9V1W2S#jx6m@g2qlh zxVs)2U+>S;S62^qbUNP}u{=xoX{#^B<2y`ZGhL$f>xkfKe8fukZ(yL$D1A^Yt2+YZ zJN!PC$F3kU>y_l~^)VL^2#mx5($jpC;bH0v znxzIDKI}Tm-A$`7Je+3zoI|_*nUn+=IF8>hv7#I}>V^gEOCKe_^LmoWw0ECZ5LxV1 z39qRdwtmZK5}&oa>1kw-MedEqrjX2;c--+|M8vLxJWsOx#9-5MvcZRga$v`zk;V+} zz0WTPXhxHp<=rSHuji5usf7h%t?vmjLEGI$Tr1q4l*cuw*+-|5g2>K~w_!={v zCJUm3XMU$lmg&|Evcb6>jYiV=4fFI@2tXS1-Vat5r-*6h{zOG{#rFHlDJVn$rebd2 zK8|z5!HDTc^2ynD^3FnT2UPsPw?C6)ynJr%mdR~S(E;#<>#ZpH_BhP0v>Y`@nBT;i zRUfw;x5r}=FlySZUc=AFuJ@DWuq}IBul-|f+zvNOaY)19wK@?6Jx!0*I=JR@4aE0`C7{wcH=4oLxTNn7kFnC z8bliJo&4%-ks4jU>k8?}e%QuratAJV^$nUh5JGhl1Rsapsh~jwsZ{KYUanC0U3Wil zP!}=Ct}jXaaqe+FP*yYYzO^{JHF73gOc_5 zmy$2d78|A2uWaT8=b2|qju_hXV2FMQJhZ33UjwEF(-K)FULl(g03|Is#>lVtMQyC} zdkv+*fB!t2JS%fq?|V)~Rl)h9^{4mhXx z{u^|`#aESU#~Dio8n^k^hx;C1;~9xby2Y?vd~oN5UmdS8Qa^WDxUiXMR zuT`^G;E?-m* zqZHj4#J5M)%$#9i_?(k*%Y0B0R%T&gy_}pJ>{UU--a(tPigwZf%f+k1B`6LpHc&tF zELZz@rP>Q>bV+({;D`_IOG)Z%je@m0-om#$Zr==7p`!%N1iKIuXFy@lAUl z&g@jk*`3R!g=7z6;&MDowqNEb&R=?~Z3&9^Q6p-TiR`hnFRD=1)%U5k$|C>9eBd+V zod-6X3l;u|wyR|?XnB2Ne{H%mlj9@T~0f{vTa+wQ)WT#n+}#+v=Q?5%Z%*T!+c{!haswOu-`^g?D7JVI`2BK zOj^FT6B~I;_UeqeUF@jw17^Gq7x-^3CRL`-!^k(bt+N6cA-9uR8-g7-QO|o`fT@%R zfH)ibg|oPu0n|y_$#BhkmmL{i2E^*K zWb4M=31UnvA`VU6rsgT)#c~C-+Jn*}EZuzpP@PYFZeGv%ObQ)=SyvAxA>F5_h}RT! z7fL{h8%L-0f^W>{Cf``&W+s32ph68Z^)Z2#m#9IBQ&XBkCI;K9txhA&-QRJP`)P-h z=Qz}&2=lY-mYeJJ&>q%F%K1v-mLq+ZEw#&an$)B4?b<%}`o{7UrbA6BJwKvW6;!xH zn%l_B%LiLJuLta&)~*pex1Si%f~_KU*?sM60t~~LMzK(abc3n;yn9_6E2dmNHBOH; z5r7BSm%L0mC`A2bzXp~~JRXu(HzVsQzPwad*RU90d&9Zk(`0kd=(6Vo2hsR_umDgL zS-5NujM`!^6bd+~o0-J`TSIWk_XHfr4Rb9o)A(8GFhRe71W;C{?+Eg&g`#?g`f*k4 zW9bfoAN>2bQRGu+DS$5{yB-t1`Kjxyjyx5oRc+o>kI}!7n&F;G+KV1a&ry}3ALE=a zT9#SC!dCa%%(LcdeQKW9c(;eTEVp&eDMyypTg&F{lsiJQN?nk#S7mVxRh(K&$H@mhBHq#4d$C^P?!uih)T$p|(+mFC zi;*@k$F-W^Z(aN;MBtVBMO?Ctr12hOTw)&9E}6>j?*jjPEb{tK06lUK5e-r`VL4+t8&_ zBR(tzShqjQzLp24`$c#h^a_|RdIKfrwUa70V@|p|Ir;cBT%!W33FP{|-Ob$iUG5=W zJw>+gpz$sMMEW^mu>+CbL&n76)de7yz%zAfJq;nd2ACc1S9T;hbs*lYt*uRoQU_eb zrBx)$@!`U6jX$|mu|-8F!lk!>Mpz6(Fbd#US(j7uDHiL!VVkek2)g*q z1_8spb5)R!QNX1Z19LLFE-gV+v>aX@p^Yt?dtYDV~Ixk?OAU~%Vw$& zh>!Q4Oz8eu;dU@y5JkrHak30iImgqZ~UMMq89St8Y%2bk@8N5ma3PDxS&V@a-N`{qg-!|S0R%thXkfK1l-Pe{6f zI>A84BbSV<29x&NswZZ&LiuKcAAoAkts$-7trvtaLH2xG)Mjk6brEw6eQm*ptdbad zV{M4YQ=`-7qbG!BffMQ&+6Hu{Muuk3)N=aCdM^35`pv~O(XSr)h5UX(7eO^2YFmW8 zu4j_!q7*)1=KpR^V1V-FgH*$>VQ{Mat5^IvQW-SP-=klxhrS6j^e-4V>T#DPAHcE@ z=_6YValxx{L@dZY{s9hTG{iHFwNIfQ68gEj)SBaJYfo_H#gGf>rT)7M|CK9e_Q6qV~sz>>l;`Ge{x`lNq^@Fq6AiROMzg&V+1ev(^Gwa_zZXC~a9jOXKh9P5K;gIC(#L!6rb zO8j@g+Igpc7 z0V)ike3vGwsQ?yi_y~7#k-q7_WwKPclz!HtPai z27RHM2MmD(#91H$TZrYZ+lkxpmWV!-mpxFr>-)yJeY5nr#z3UQWjsc`N{p=&Os!0V zXMksY6a;k}6D!P%=Vt?aI)OeQpq7jE4tqTG3&!_n#2yoU>%myWth!HALn6gQL(g!_ zh*N!(7lU^Ps%x+$EnZ6I=>|g+*|eu_%NK(t4B$fo%6Z<{H(q;FEXf{}ubd?;$8>|U z5SKJ-CjjC|J=m_I#J+z75r_I%43GTdI?>)M{Y{p}fV=uZ-l;0dkd58R!icu5hx_A) zW(*L#`}|fYL3|BLwmz%9U+o<95MsRzer>mBmP6K^jdSLG3iF99*k)UIj zi(AEPghzdY1zw4HlZzn+&rYoaGD3YfFQ*674MyLH9d!>Za`qZ!1<+_XxAuANTz|Hw z=y}{V%nGl??u3)9bx!&oc8!4@1H(STe%J{aN=y8fG}7t6`DD`F^!+_~D2of$O+n#0 zUkH5=vokAZrVjVdH!w+N$@ez%DC0Syc_^fBCmsdGYB17SR3L-tmnF{_N%yt$heBTU zk%Y`#NA43K7E&rU6EdT(>-w_kDJRPq+Ptwz(XxZLJr2G*(Hbd#s@7K*WN0q03`Gcy ze6g_#5DR-_U#Yc18!;^m=rcXYnl_H=3`3AF<|s_z>$gERHn&MrlOiZV=3&U7r$2o% zHg2)M5&G*cre(Ir`qON!RfJU7D&Gl1`#_5HRgMQF=tpou_a*QO3-0hWdOd^)oQb^I z=(*|ZjI@)BIQMFLs<5)?#QFBYLF;G*(y*I5A#*CPiAq|_tjTL=^6=s3xs?x?pqKqc zYPsGG1{?=ckbnmG*h0uzs7KTrPyWQj;tpokrZ&D1Xk*3o5L7@gDfcGk)idd@r^NFo zg=fpvXlJt=eZE(M@w&^@?rfyf`{av34(m@}Up``zvyhjBWe-;c2)Tc}kb!NIpvxsB zHnWYt2_cua@JL$HP!aock9CXYXLRLK^n=qzR=twS?!tiRs|d z=oX^w-lsVfLawjFg*X~wEkiHYX6XtKyM)X4zsKZmJ7R+j=CYTrX3F(#eIolmZL2Xx z%!+{FjjWf!s_tanV`%~Cs%w#u<=vs zA;V)OFt-q`%w2MPuYBz{g4pL%Tz#F@BRlU!0{Yw=zLh;WE~E{n=43y`60A18=%s7E z&>LfD#A`^pquqZ@+9NvqTx#%H*b|FmT_il}rWP-95gFd2s~eh?W>pFSy{ ztm2YP`exvE^1E|SM#^hm_RXE%m*+{ZppwHT(5`cIT%wX{t9{t`g0oLx@iO4;s!r93#6;R*hpgi3qc9%ln zY`+KQOw<=c3mmDmHQ`5QhC^=tGCU)OSLE&F15<%C2ewPOJJ%b#{8bd<`geD;F>SrX zU1JL$P=|nSxOXqxBf@ZA*LXDRZAG7)*Dfp55RlCJ&OMw^)t-1Xn zmUu8B$DMm$`Q-vpR&L8W6)z7@j0TZQB0iXMZG&tyf1wZf$Uyww0}oEY2uNY$7kcN}Ay7&KgZGFL`cm0zd^PbQQ&m zA*xcl%z4!GvbqW&NKh8jd?Bgz61KTf!l-Zhthl3*Tu@dWoUp3q8EC1;?>1Cjvkf=W zt*~aBJA|^DE$(UPgt4gC!5q(}@KHS$!*^Kd7wfxPj<@sSS}@ZVRkAy-hchl){f?V0 za{jA79@e*rNO60tdsFe&`wx#$MFkFjbpblcY*s_7&NAvrbB_9=$XPX>KV&pBt|NX^ zuTWFbq2gyf;$;J3#);h=wHy->fJw4`HB-rI+qE99GpNIE7!lxxwt@|M@x50#%f48* zq7<54Di3G7HnC7GyLaj4zn^pq(6u}fd=%@W{5#od*f_3B#aPywEMH{CIMO9scs%xm zM8EI0b+l~N1R4}uJuLcmD-(fEHNe`kGoAxb2dwo9ka$9wiJ@BU4u`Jk-Pftq$}4D*ax8a-#RxTDynr@E-ckqE#Y z(G>xlxu5~vjD??Lnh{=*S{b$LIX{zY4Y>4e#{dmbwYCR{zr1$+QU{$8k?hO_M;Nd-Wx2cixz7k;dp`8ssrXeTC0-hvj zBHqnVvDMk~QQySx)t04pP6b~U#Gv%;4IR7{Q7FCk3vn`0{t(12>zWsdCL*Cce)#x#x*%PjAJ!DGOSMSzfNEeAJcE1X_ducKH zCkDmtu@)I-hWrp%6NE(B;JomElTVL;XRlAHGrb4sZ6TVs3w!779(OFMr)3ipVhc3!-(6r zW$U#qOx*qXcOQXec%#qkddCg&j#y~438knK!0zV;FVMK$=f=EE&O5)$+=+qJ^VlVZ`8_>LdbbW#j?JW{R{S zv>wxQV}h$=+XRgb3VYIzj>lRk=jVGHTD;b)Km z$g!^w$b0ipX7O1Nuavg1)*O7Cw>AE?AA}c3(SMmyINQTdu~Mpam0L!Xn%dqo8-LP$ zimFaNYdEuu=NIdVjG*tutFmZtZ{2!debj?xpJ8X^Udq^t76xZ^WYr+%UMLzSA-ykO z`?}BWaFkr~$OM^9)8LnfvY0K}_wL!L@Jlnk^@lCmvETs(yS<+T9qi9NKozIbj3(7Q z#|r!VL&wpAaSv_>a!s{G6}3+A_#89}uR%y5Y1c8ep#I>4QCG7R1Jf$Fp({p7g!zvK zYdu}Hc1U^TW5Kce1{JGtSTyo3bXWxOij7TyBW6OwqI`Ed&NOtJ<`MAek^?cgT)Hnk zz+WVT=6>wq25G{(pEBNO=gkXKS5wxVbj#uBx^;&Fyi(D-@E^EoDnVUkB1+@bY=)f^lp;S+%CgfKups|^z*_BK0!U9uIBaGRtFlT4%aR5tM| z{c#C?>t(Ff0Z`i7AfyxXgz z5DQ_Z^Z0VuIMDT!n$vZtWj~p#YizkSr80*PexY=pDU#|xaP*@$<#KIAPyEwZZjQMk zMsc$VjUPpzGrTs?9FQhTqwr*&4-Y@q=XQ(2+;&xH>GyH_B(S0AXUIdBN>*TOIO?Nt z>I53{xQ~HuaqaUVPdcMSqNA7@g2E!S&D=8IqLB$!k9-!ryv4qTe?F|Y zDyCf-AnCc5Gamb0)5Uv~rfWj#51`p{uOSKJ^N(q5W4xl4b&t#~B+DWl%WjG|N;>3d z&kV|PR77133y>E~mTaYgw<4oVd!}mEx?f!`Zxn3oMa231JySxiKU2T+Kr2Toc+12& zve4O_M-3YO?Hx6S3-uDC~BQD8%bcJcc>IN}EHWUHl+vZSrZ?j?J&DTu1Ebw5Dv&$G0wzIogrvn37k!@DkrDAj1E z6i+f59Hc`iHVCVq0;_5IR7A-lS5 z%}2a`7a_TGD)!?>s-K8>rcvjOOxI?gw~jB0ZKgIRaFYk4Rm7QnLRaT%vwVc+!2H9_ z*_@4j>}DZ3+gx%lPVdh=mkGb#SRZ8S#3o( zEo+%H%OdCVjQk?u+v0pb!;qa4jV;lNW&}7x-Q2k1{CS1VR|eTAireGqTj;wo)%}1i z6~<9GhNe_clZgc4Dp}AFo znc4EFJTd|qj#rzz{Er6f)jXqib%>Tfo;Jar8>@7w2;O(zM~vYvh9JQ0z(<$c+cqs< z(>zT;o$Vid?yLzdLo0M!X5bV+;!{2Eb`@W0kG7KIPQ*<{ASnjOU;7MwyL4aAtN-!A zy57>e9#1sI{}!s62I?0;#Xzn@-oe5Zdj^Q_ICEzJi{|~Uyu!`-Q1)5OwSKV;0Hibd z<`%f9^58IXFD=A%|B%n~P=1K>S{&H}%&&QWrdKq8UcNbZ*33C%vo}?oi_~uXe`)uZ zlkVWYtosWl;?+WO$DOJZ)pyT$-jBVU!~vZE&iA)}B@#c(R2n18S|{`0+zHhcI%aCd z#7py1>hg%A1h~#;xX{k?C}*kRIqdVaa}J*R`^=$z+{})2zyn;=- zO#}rWVUZa#;r1+s?eC4zq=xjsyUHiFv0A|uBQvALNA;w$j_*248LIDH^u5C08Tv;J zcjME^5cvTP*>zo4f~qI4*7-X9F6u=sh36-@cEhz& z4T)lune%J~$XZ+A?5|^6F!yb~&R2fpX#FF|IQMOorW-e>=ncN+?U zG5SN$6-O3G0G%eH&8Dw%GqWWkEnrFOJd|A=UilDmQ0h@=(?8G-7^H^U6wjOzo3}xH z`84mHMS5bv)rxQkhnLzX&Rz$3a*4K2;1hl-fTqJu&Sfqk8JdY10xG#9ozV~*% z1BJR0_|0>J!vmMPQKs^j8Cx0qR3$$*7v4BmPj>ynwCCF>se=)Mf^?FDv7mdcZF6%dpb}x$6s3*Y_xoOoN7B!X-lX*CZbBcK6SeyS|qhmA*`EWekt{%EACOl$(C7(BXBvgEswhkS)jL8D#ft!E4!rnA|hgYyyBoB$}9(Sa%XA zdrFVsj;2DIoE%M*^{J+G;z@T*X5dE@OLFFbiV`Ie+1}*br+zaOHMbbRyuGYk62QYR zhWr7OuDJ#_?Mn4!2&sbELo|@*$vRt32C?~voQuVUp4?^%>ra+`k6GbV5V%XRVN3lY z8X2gqFr?P*N~5WIn)xT9H3~i->j~lSTMH~}X>DrH>wkzBRW0-`g!UuDZvu32dSmkW zPiN@@XXv>J+Tu1GPUPmOjw$zcx}sb7i8o$e99>Y%P3G+OzhqhB(e&L ztCF2&rsHyWOIh3wdd$bDB>{U>SZ$QBbw9`3z7nsp#Nk9j=^d9HTD=6y-IyJixU1Bb z+DPLOhlbe1$aj7-9++x19-Ntv8N|9HCr$kjLh_RL?tYQv0Z@GsQ944;-qU6z!$?-o5Lz(7O1ixztxF{9&_QB%k0Waz2yA8cg9aG1{L{b(Iptf~qT{ZvPk9 z2wW}9&ni>|88^1PQ?oW!Ij5 z#zrC#w=CVV^*;bVB!(~{{si@X&d1RvkG(0(l!I<0rcz*s=5K<$jNmLZ%LahhMeJ0C zq5e_xK4%95by8=w4!VLj=D(qqNSs`;$*MKS`5RBF9?CR_rGV>Y_0o47#kw^Gtca&D z@-b7g>KksZB-0ztzq8R0LZ$oRi7WqKpS`VD*^VH~ss!zCgdnHO;muyhoKVO^+s_6lnC(v#ks<6+qG)v86HZVaaOW9wPJygcJ;TBZRQC4Rop{sB zOiTOepBvT1)ff!D*qvnH=hvE;n9$6PRZ16Nx$)W>CiU5flVOJc`TVU!tJ>d-GX3HPn`}FjbA+4h|nEDmuzO@xujNV#DCjOa8uRGDH z2Z)XRfr5@pv5}G3ZoaazVSgYf;_wscn-M24T{5SkdZj5bIG#~SJ-<5zT{j4cnPy{z zhhUTD%sia-Qx73aX$(9(tWSxI|;3(-Dox|txX3D zbqa~R>o?6F`-4bm_U5#~`K9Hc_7d5W2$x?=6Vmn9J=e#mHb`aA{q)az(T zh~eP*^XJ1iG-oGEtv=@#zrIOUJ3+<&xgQC}z7Qn4Pp`$NVC3^TP z^FHi4aSojF{}(PQdDoO+Nc$tN^dVu)JI*c@AkVQ z668E|C3V||ge=Kl`79c7z)$oB&L+(WtA2>amd|)li~G;L0Jpbl|HB5v%5%b1%@%5` zRi-bx!jW#jf~KZs_;w}6svsEd&2MPkquW5aW`~5G+WN*&i4B(tCNwIi^7guviC}af z5uiKR|-_T@Cu?K=kt6#$JtJBsGxk@$+~Idu_QjwXYcNkpZjl&C)%77X-i6(vwTU zd#;-6IR^QFBSUjDSSrCEqf--~n7BEa;*Ugnv;L((NgVnG^7@zN38O}>-fD>xQl7x| zQPy8+{B{L>UEPgd-c<9R%vd4LZnL%0SRL=m^-{Y4=IiqfPtA{aZ@xz(QC9VHqOl<< zg~$@WxC2)ZROGmIy&AXdZnrRtv)<7k_ucCDVVnA2Cf||?E|GQ7zEz7X%?T<`T^nGncqApbp+Eq=4Q)>!Vc&Ck5<_JGQ8>nT`Ja~a-FuB3<2*Q?#rcA ztmMy_9Z%MqP4=S=S=pTLD%vIrokD!u8{&HFAAaNWKlGNWlpeH<_QlfjS&wN!q`?kL ztyKJx~AN)h`IP@7a3UfG8d|qex{lij>>l?*z{K8dRRd_bNi?FGx)Z8eaQqJ z%L**7_PJWF2}@?7Z5^s3rOD*pOmui=1dHyggOlr zz1(BL_kdsMUf#wQbia+fXsm=i*H+;*A4fUjtzX2mNCp@ z4?1pr8nKap53ulG6VtucWm-j+{pnN1yxJE+GwNq2{jOd}i^d>`D?g4|FDefjAuq=l zU`}?O??0G6qPu8)xf#-j5x#g)WV53j(XS$QnP98FO27Y|3Zo@gCPtoo{nLo>Se@PM zFt3quFm?47(%aqPrLNxn3+yOJW+SoXakMmVnJMJ9Rb(%2@Y-+D2nnJ3G%a_)pFZPx3pDX{{UVJ_84`CL?ysK!KBzvNq2Rma!hYpJEf=0{rkKF1~^4SzPl}` z_(zqtkoXXQVc0agDB^yIo8NVcKSj9v6DIcs9MzxW#?+u$lrv#e@2C*%8k>IKmM1o7Cv~gP$d}&h?G-Mh+kSVU|2qB)f(k6 zY}WM1j%wnoN9SE}Txl)dl1|opSuHWoF1(OahL-Atyv9pfIC|3lBy#q$a~);Y&B&-< z_8oV-?bMg1d$*nqvh7`(o10Gu)2}^IYjoX|&#EU@63z&so+)p+qcz7P-q?8ozuC@8 z?*dr1to-LNdv;Zt3{KI0{352RM>dNWtAocjAb_mVV&E!H&RY!(cU^q-yDIKiW+>A$ z0lJk&Am5Y&%hoDTTa>b7^R~(sef#`-qRu+;z;*B}(4+^>#-STtIBKZ5h=v*3f4JzS zI)RlM)>uEDgTCLB;u|Y+xi@_+2Rjt;)^R}pK;Pw7<>E2{viopLevCgEn=fcOVL{4k z-?-=c{#EY583bopt`%0Ufr_E>m9*!J}{9mK(LK7%;8HY(H;dASJbm zpN5i1SIFWCsCPX(Zt*!MlF!i_QsOC~swd^~_g%K5~Ag5{Pi%3WiVSH6B@D`o><{&n<_VU7*WwCd>R=-ODb z86PzwKg6wv+sdpl#86Huj7PN3eL%!wde`TVgEHUbRw#N8TL)u;TJMopj%GZrV)v_0 zd{)EGx|~zYIGwlc_qTTZfDI~o>Y%Tci~IC&6y9s@D`eK$M@?MRevUhN802)E_?L^w zf_yz-xvn%7m|v5u+J!77#+naw1$s-}RHCKorW8sBqEQ7?zfKg{&E=bsiaHGpnD=Zs zk;Y`BA|@l%nz?bmqIi-|Bo^zS0(;e-VT3H|`BEh?tP9C9C7{spDJCD%PO=L^zO(#4 zyuD>qoLkc-9K{0!lHl$h+}+(ZNPx!O9l}9_yG!sOf#43oEx5b8djk!8caw9T_slcj ztob*8vRHxc-gi~)lB?>frdkYNg|!Qi`N?_yA!*h$N`1=pa2$s&qs`suFIj9OQf)9$hPx5IB3Yfa$R0~v8(q}SR^ig9)6CD=5WqyN8Jb1 z^^_VXMdPjLb!&chiB^2x*hy~$HzZAMl^X8RA16T8(9&dL!$xXB@;C$~?O2{A7hRg~Fy(C(c# zW#-fXVWkXW>t^xP_g3Wv39j_RpvzhQoI3C;R(T-<_T*aIXsQ=K?0<*w+g-jRk!KWR zOTVuaPo~YlVKlFzzHyp^dDETKtr659gKbDz+Higgv|$<#FE@ETLW!P2EdoD^ObSaM zh3|BMd?wnU{GZAPp_KqMQH50*0#D;1S!ONvrF`eYolvsMbL=mR>81NF|4h}-vw0{| z0Zx)d2^>-Ez1|KnUjQNklXS2IXkIX6l@l`Zwfy`efw#g%9(QqTJIXQqdc&@3XLnaB zRwU(db*kAJ&9^g8P%F_`Ra?O0aaC}#olO4xF!Vo3V?proI)K0w6yiJ|I;h2Am(}>&;MAOt`f|Sf7!26e9 zRK(25mNs$;qf|&CGC1;GiGr&wmUY8>V4nw; zzfgQY5Dbvv>{Sf&D8Tu;axTRDwu#RGaW$C9fGj6}SYbZMy4tmAMz+vFa&0g2L>T=8 zkC-NVio@r|izLAJC&9`poZxoHirbv=<@!VO?1WhduP z8kdT`WMdqF;tGK7pK4l(0C@gh!nkCi534~c6stip6maCc`M3>9LiRi7*xjjqxNTe^ zDTJMOq!5*axY3`&10Q>7;*0|WYc4dw`Zzz4Hq;e&PtU36WlhH715Sj06P`Kp{E9{| z_{%5hEwqnr3>te&N(SuJMNOr7U~)FUJyPb}chwq_P*$ zMYWPHsa+awN7iVN@M0hc325(Kf4`P&Zhl7WkU^Aq$@cAzuHqWiM`l`9Z=%Jpm+#*7 zp}mU?&3kF7vKG?-S0L>9cj*-8`UZaffA?np3n~OXG8RV}6mN}FLpmkn=}TD`qk=_~|l(mp19s8!amg;SoN(Ak%1fe*&0l=i_7aa zXQT_d2@rSOcpe2@uP3Mss=m0|6C8H^7@1_Y4$p-WP3C&%d$IDH1lmvf5-ojuHlWMH znPz#TdoogIW+~5Wk2t^oU~EsX&N&gz&+uMo5=9gS^Xgghg{`{m?7+{~I7Fhk_aeMP z_4c^JX!zY&hT6`sbb7fKzU>zcfk3`3*7Uzx@6}+62qV*{#S-t1u_Fy51O7ve2QWbu zhtj@XK{{VdmQU-x@Vsg_QpMIz_D-fO zdugB=WCwbBvyTa3B!+z)Xu#WoXp{e_50M{^n`II1ja4DOdC)Z)#P|Qsw@jljbsG~Vcni>e~o&2Ye`J$?&xw5 zEgV^?tf zsl6;47!bos<#s96iy6j~PM}qs^^KDSJQDPo{{K`-1)-JzhvRXx5mv}ETg|cblMAi( zyyRH#1+)Ns#(%5jJsAX`KcgzF*1BJs|KI=CcsK(O_*=)_F>|_PD3(_kNMULTKw(0s zVox6<@uc=YhRPK2=V=gH&*F(kM@K7s9v{B+wg70G1hAJO+1lp%XmRuIxRw3pYTjn^ za#}A@xj?ROrOoeN7rI;_5BPI)+Kw^n`QW!C8;7;a&a*8R)1`%&%D)uBr_YA|w%z|S zhOz}yfF~xC%p?lm7!%wA(Cd>q9r6K=f7b6>Rm4^Rh?F)rFUrl!qZ+L-8z(1w97^LP ziR3v89^MZP2~pY?PFx$a#K{D47cX`gknq z_9w~yZHr|SB4T1iOe&D@@UZZ3)nt1pCc5aM{zHol==>-%+1_0pnuoQxoo`7B3i=}9 zajAICaB(ANzj^V4=f7}T^`my1YSiuCIC%&GiT^LgR`kxrBy2G&S&$nc&`FH>%580vqD>_9#l!usbbJBql zf6y7UW8D}*z+WmAO5hFFPuxCr-RO-TtF};2w0j^_K23Roc8!~5Qvinjdk^cGpZfME zD}q2BP1_$(UTQRuPnAHDs1#606|j~Uj73jYY0yfEFnsHGd6qQ-^OHQl#-H7)fx|CUwW5E0FkP2 zJ8Cg0J)JI=LOx-b|53ZEYtcCzi&>1$;p#v$-fTw_Sx7*Ll=!L_GGA+x=?NgpF`1b* zL+5~T6(LFO+f$}`A68FKEW19sHbc2Cg|!aba+fJzzkW4eq>#^uBN7h$j%L^^$1}A* zTWyg@&5Y0Q-Q+$9ERvwqoCP8i>pg(cNx=}0JFNWj-QyL6WXU8$eZ?OeHSE(!2t1f= z-Y5(e-dHcTJ8QJS+dLmoJR6D^sdwbmiO|L-OPH@dXXP475B5QW~|PP zq&xDC^gqs#3gSrytls&i?OaoNAn$y4z1F}}ap{UdRf*ePT@k<9evStdoGzBb= zsmr6;${x`aPG7s%{I@$K)h$5RL(8aJFpSM~UDkoB=OW}=037+leHnBAr^EKpmSfXaaal_d7u)0QG{D_!xe=3;*$qHfL zrZfo$*PX@6X7ZBd=kImTb@@M zA|n3%8yy$G(hM+5AVELq?qZh&I7vQr^JR@!l!+<%Uk5z^2CVi22g@1Qkf1UiZ5O1Z z>2fR`>AO>WB1~12fvgFe9Aqi%p~;wwXYkQv2seH(XsAt1VG0TtFn6HL2aC}MWA*&! zfg~^o#=h9yB3z+F=Ok9hWVtxH;q)C$N>(VP_JJ2ZRM@%Gtk>7r8+v$@F`*KlRMRZR z!!1;`M#|_+DH|{o7azKQS)hOhmvT~QBv@4s8yQhJKW4L<%F_1SNAXBuVqdUcF>e+?n#HMGbV;SzgXWvS186{IXEhBTNa!(a1{6k zCvHQy2B-U`(P=+H9>AMPDRtEuLd%~Ky*NB4vYtWiTqamGIRu;#A5V~yk;^>5Y~Ydm z%)|fNZ%jAt-ur6SWX}zRpRF>N!YTm`CHhZsu$;7Igp5o(etW4Q#sGo4w>kKP!bm_Z z#_?!EW|S(s&>h)7h&3V3pPY%bvYs{_mAY}eHUVdTZh#$wm?z{HL#eYW(}{*Yie^16 zKt>`LYUgI#=KCiIV!?J-+yb;3 zLiDjb4bQXtYM7OZN-}&u_%eGV@qf9$w+K=;jQ9kkxc&>%+r*%$UBv@&qGRDX#(Qn4JfZ+_{jr8?&o-cZvCpcTnDT`l>(gpE^4`KbSw`PP>$bW{b(eGziPTuf3-rrwWuo0X`6y|?50 z0y&{T()IfFYkDl}C`VI1{o%TEQb-l5NF~C)Jtod@I?&5U&gR~kYXf`sqINC^cz=vDs;!h8_WV0n3d@t9Np%po%JRf`AwR&Q=by<>{8P^!J(+_;rG^1McEF>d zH?<(AIRSQ=YeaI$1|c!eP#C0OjxHH&2?U2_n|~rVP3wst5&~!(*)fm~WiiUFw!3ck8tcWP?tNi4(u6K&120Sz`N+!aewwoHhXUJ%AlPgidZuUU zrP^ktDE4p9ySlolID^j2KyTgAy`!8dg~iU%4s4q}t`oL1eRNqDM+~vD1!1R_fq9Jh zNuV72I0eHzfs;lBeX^RK%DqUH77!rdvguo|-S@PYjrYEGQa z#pV?43^2(ub}*N6C0;n2lH$#=$6e^s<$m@|I+(+x!$9foYfN0*sMW;|^0cMZF6En) z`}%A2K2_=%+>JI?!gnp{-3tGwFv-gtxR5lZ^$1_({OgiO%P4@Lx3^azyM!eh#jwT5 z#3b?4IPx11_NytNe;8`CnH7eSSMRRNQv8L~zplejCTOL-&YLo8QkDkj#phP6d>mHS z^U5Q|XF5N@G`y>yXA5Qq-D=hzr@JgECI?;Fp^w62#=JQfkP?y;g3*%pRJ;w{MRi5B zMW>LvpoS>4gjx!gDBHYbdKq1Bf>->u9i_bEVmrfQC~p+Df-oAZFi{-I`DkIcCO7q8 zcS`bEit#t}vSvCJVMdaXsBpEnn6dAu3tzSy^__750nGH7J-O!BU-G4$ zuJ3&HGN{meaSCcw07v^Xd%&+={_pI%=X1oSM^MKEr(<6d(CSqUb zKA6dpQdxxOgClg@!eh5i!`_xPY1zGejow=&OoDKaxqhZe!2P`bHDi9y$@m~`iP8`} zK>ONRmHuK(o`J1HXsq^jyR=YB(lEq{M0#}GY2RPDwgUigGHQ0a?iV*e{fb$R0|PzjBujNi8hHkW%NB)hy*> zn|p34--6d}NZqdO)27cp2mgwtnTzvq`* zOctQAgtD4XNPK08^T|e}?M`xt(Et`X6@|T%rJ~NkC00d2;LkvU7Fa-7Ho8Gg#o8T9 zTTeYN9po$#>2ZnDPqgj@yR_Op9~OK;zGnk$3in%2JpNx#ZW3BIBc|ck^1F%Zwpm+EmZJI_5==nAj6l`$h3)Xm$6QO|cula=4l21$xEMo5 zLYZExuZSoPJ7IM6-Z4SThHMzq&J}r97M&Z@#^2?)CYhi^L=zd7=pbjQoAuE1G4e?E z0NsaO8nj7fwcG7HX^^M*?mHZ!#lUUpN7;GM%;cx8ibpXXmrq8Im$=D#t7pTKFPx>V zXkPa83?z}6F?oGTpygr|`GCtP@>xIP_Z+*#6v6K(oq$4n1Dhu8x%X5I2cdALP+ReC z{iEwP%mFX49OSg|DLI5_XF8l-Sn<&AGe5Z86B^j4dUp2<)5*;mz*ESQk;^2l)P&Lj z3y8sk$+ZFqCkh z$6kibW@jCA!gXt}wyKV;%kBSvlwH2Kvt z&G#Z`*X7mH>cmsdtBLmeZsKj+Ce)k_*2A?9_>ITZ%b(p10%qfHhg7OTY^amj7?X`n zy;@sJH~~=%Z7;)ZoACrn4JM$noCVT<`52qWhx-cCQDQ6>Q-yNta(@-J)u1q53BI_W z>rlwcXmakdYinSTfR;wBjFrG;9SO5!_|G7Q=mBPp-p6c84;3o2v2D6^YT z-PEqN!wJ8Nh$1UMdh6=>lNtrTwI~vaIyliRmb)JUZ#6OaR%FG(gWGiwt|SBt9pcRl zWY|DCpf;qjchDjn`iW{5HoG0W=uDSKPdEGrC1knFXz1vw=g2HO zzZ|8=l;ZuDSF$=lZEJa3>py?ma(W#Zl;60M7xV(4o(h!>&}-co8AV6K$W(Atpy45f z|GEfLu+F%_{g-|bK&kY6r zE1j#%0;Lu2jvOQK0*LInx-PC+1!MtE)7`xec*@GAT16&YiasnnkU-fMQ-dBl*$Wf~ zGrD0bEAi8we=g^DfVQE6SumBdSTM`jnCFf!6a~P(O7!hE3;vSyWOcw0OsvJU4aZQU zWfwIkfQhBGda8b)*UZllR!%J*UW&@=u3}v89%v)!F&f(_#7Bv1lkoQT9Gynx`81 z5U%n8r{1rI+s(`9mX{uqTmFhMjn>WXc#RMo3%Isb*htLzN`Z=VcZ~U=&)<9=Mf!S! zV_Uoo^W-y2W7r#l!jeo~<`&6>Nn9Zyv>%A#}X1VjVVVyB; zsr^SCRna*;lzp{^*Sf4fnAnb1`l9{@W3xGXAm|-X*g`keAy*dvQMe;)TwO);2SY_! zlYgepgXrShFFNBMMF*{E!W5ybk0Y}_rVol(6^|c35N;6k`;5No)Vh-X&#f+RGDwb{ z5=l4YE1_#S>2|(E1N&@yu=UG zX;!7Zv*~oGRlU+1j=mA3fs{lN86U3MJgoBc;oy+%7X&InPBox#c^C;*tj1c}Uk(G= zii5J{7#~KJx|is0^{sZ2rRg=jQT9FQnvA9obL>i|?<95{eJD@t-C&@1>l#o^rn!J0Dg zq}BBi(-`Uovls{M!wCIx)z3OH19k*>Y+bSpuk;?iB2vqlg3xH&hhxE??9DN|2qNR3 z{7Q^c7gM&?%X3KdyUCF_ulo-&ird}rWvvvFFJnLp`S1%wp<1~77k^~dop$ziFWVYu)=oBl z1jx?-QNa-HQ$LVHb#*Rpaat8ugV)QUodGQIH6L1bIl36b=tqb?ZWh;V`_Hq(Vd}y;lTJU-SGc(2Em3dQ35h;WR zNZrytsu?|IEzHtCd)pJ1C;c+x9_SlahP2LD{{S@c*ZaZ>E5SSS|4{`PV<5`-a&v%r zwX5Rpqgp1{WAi1>*W=XhL&Wk3VbHgv|#i|RLXT%RyD{kMGZMvW3Vx2cUo6X|Bour9u4Y2 zGQnK@+RI~VbAeOr0m~cP!-eNMM?-`3+kF?Jp`oQT^vh9Hskg-rL$|Sh6I_JIM5s^( z1poqLJ@v+bT%BLQA!5}67Ce#G!KF*kFqz=o)B=oJ^RPY&ToLqVEdqkks}+o~XCBZ2 zYj)`+?rZ_NIQW58Vc{lHjq1ar27RM-2}tNu?dC?rekiGYwK>?wG;C9zT-Y@fPGa4Z zmA@*^kZ$yFl$X6KO8CDWfj|%%sV=9|I{1JSjKdfJCjcJ#jDfmb;+eDTabd+Hp>le~ zqdhY2CFdQ3Li;fNLq$(lA4J$~QtCg~{**Vt@D?My^}22JBXc84I7j|6k0Oina$85l zc86}IagyQUTAJVaV9WWexlU%rzRZ7_<*ksb=O@4-W_tF8#k9V4?*mJU((b3NTwD4d zgmIb1sWq|2P}+@ufw&L~&0>2nR6lGuCNR)DDeljE{Y)&9`u1P8x%j8fds)FiQ*Bnhg=@sC zO8~4SxG8u|Sn;T2ETcZ_gIebNJotb=PJ92LwF1pstqPo4ZSjrj;lshM-e%{Y+p2l> z4SoOH1bKy{WiTQ$4140zX?U|z4C7LrJdAf{XZcdbB1Yzf*4ENO&XB6#ux_j=%H(tR z9aPBoykS=9{2KNBPFCr*2cjY_-=|J}Pbk9Qb+TE<^cL2}A$eYkh;#SSN|PI3l*spoB;53yePQb)dD2oG z(Ak~#NR0P?dsehMiy_76^-D!?Jgz6c)-$oouy|kz6=>zC-C))SOe;NqTFbI#v)krt z?`q+g_u-G-8ubZG;Z+<*71p|VTez92d(^fguYcjItxiW1-A53Tb^dK#$knXX=BcJT zKi|evW1Cr0=Y`h(KC@#w(DQICOX4c;Dl+PZz2^J*Xz%v&$u59i{B^oDQ(Z^icC%TC zWi`>D$B9pOP9LURWjY@AM~a9ne{!kI2r~MR!$HmjIXF7{o^mePSUm&zm(GrG8N0)1CcWySsXl^eyx5xa<^!J40nwIsW(!b;d7LZd}D>tp1(8R8dB*tC&oN+mdCzZNKHj58NZBy{2fOeD0@7ukMgaVmV)xzI>l>3VRA{v{Yq0b@7x>^K{5M3;uIE#@>StHKpx& zh^Ji~108@$pa6to+uzO2fma?55h#&C z0BZgH`{^wuq@OQh&*2~y``wxiP|s+3+M5E4;i3L(k~QHWZiBc3*&apHBj=7a+FA!Y zE_uNLwgRS{jzYa^l}~f%9et=$Ie%4M{4FhUdxlES9TtP0%63`{%{15xA286%qtW(- zWpb;2r7;F7_Q3yAb%;@*;@;Xs8c7n=tCfnmj$q4M(2RjGjGI>9E!L9|afv@c z!xc0|10?3#{S*^RK9q~NNiW490TJmheW*2_MH)tfrS0DO)Ft<8G*k(7E(0DnAI$Ag z_etT;>54u`gAVmexP7GT3pQD4#$y%)%3U25KVbTkiKzTMh^0`kVKm&cVPE|Wdxy4! zfnD_3X9sOk=*{}a{Vruutj)%&M6kB;@EHyxp-S_Vdbg{0DDo!tZjd?5pUjD21X`tk zj8NrDi1KP-*@jDiDw3K$0^HP?Pf?6^6!OjoQ@UdLWarzTD;>|8FcqU6SMN^K7x2?g zJ?4|5rU%*ck}j2W++ZMH4G1jIsD>%$qa=i?%G@Y!17Bo@(FqwlTS(^aZG#sl|kfVYjKo;7~F(@a`(J z>9`%nHl$ro@*c`!!QbrBy)0)!XgzA4dvA2D!+=fyYCB;J89keMtg`l4Z9B9xY0Eqb zjg{?m6Q~Z_*XN;%1jbN${&6jnp=AA(l$ls5@RQAd3>2<#(D1t&ZKwde#d!(vm7Qu9G@=ZALCMU zR;ZYvU{`Hln`6R__v|5;w@jrpl_w>U{~ROR_@X4PJ6;Mc?HP)rf2n z)w~I(b98vyTTw&|xhmQF4qE;_fDwi%4A^=oX=4z-&9cSWnPLLiyrGBq#vni)65@EiLrjO@-zwfo=UY zZi;(uM<(&-cW-XOy%s+?L}cW^z#h7Kz4LCv5|7h+9SZK?@L5!vhbFM^$sb%FjBy9B zd8hC@-&M3Uv5vW#$x~F4VuZ64X-Qgt>)lsXk-FQ|+-5xQH_Y^}niQI9u-cg_=9kyw zLQscUb=j2x0zC=+d)7ILbWW1b+f~*t-2tJ$eCf{0&h#sE&2?7*>1j0W!AFtX5iYir zkr6+!Om7)XMm5I)77;6I)sb5xB0|H#D0tX@jm_Zr_1dnNaCgMWakWjbV%;D^yCIE! zSobK;yLntwhSYW?(PZH>Vd$5;&zoTHyScQNn@Mdv(2#H2qZry62L#uRa~rifaHyoE zrVPYO9T}Jux9Mgad0|>b}$Xl?&+dES1pp{ESHLGPd zxKDJXG!EMUqpDO_BDN3KBzf|cZ_AQx|Jh5w%K2WKGH12PnP6qGSu`MTI!Z) zSIR|X@=>I7Vc6U)0&`)r>aH4WmUG0PTn-X=?-27OW3zF#OP_D}9Tdf!(9 zE)G%0;JK!Y@^)um4R4|SgSL&^p_EJIu7K#-(x$8U64$)y5|2*jK1KFMd`G&h^&vca zeff`OZ1w_{6=MDDo^Q( zmAUrw@Jo3(-IVagmhxOl-q(}NkC@#d(^;$Trn;7-jCH^Bb^})Np9?R?TfFer6;4=A zQV->NB;UMr;AE%O9$$>}Zew8sRsa$S(n6xjVf%oE`=bo>!hvrRf$2evR0d%7yab>V zc{W*XY^=$wfxb&R&FLG)!)`voqHxTWY-{f;Kx3#~F1iFA15f zEo~SY!_lw1cYlB+j2$E;Es1#>#3fvVx~<_YmU{z`53!|!uJ}h;yf{n7c?J4no+u9}be^VWkg@)&pmJd(_#Zw~Hh^!&F+1Pl$y>1o(GG zyfHLZ^}iPfStP9;Z=tL)Jaz^cu_G?2A$# z&a>_qsXioipf34g&30ZuR%nTiL|@j1NuKvyXNcV9pa8R#Nyna?Jc%@~n)xf7M@1*b zuPGcD?cgq=b40u##LFko;a4X? znV&1(g}*TRn*;TX6w@8#dQ^{{D2u_l%8=%mxD8;%9||?zN&UCZnx-pBZRSPu)?{elW8%hj3BC~9uoJ1_- zZC}WNujHPwKIFULo)7nKVLA0(nVwc|3FQv$u;rol!MSJi&&u0_Amrf9>qgv`WfJwR z@8z^kkYTHx+!ltZVv|2bsM2tv&3X5(v^R_s`l5 zt6BPWYajC?pB#D~lKSzU#5+5zo~5_rV<6qEN^nQLd6V7IniFc7UsH4fu8r1`2bv2llx?JR|sBHASmPvcpq)EC~(Tz+s?lAn5gko|Z zX|ZB$V-+9IU2caCvVWvdcRrdelXA|b|zefZk6;@@3_fpoIy@Y&-r8`Vc$I^(ow|67q49;kVW$_)LDPrseCu&0tm z{9+`*S%sLRQy;i?jI8K&=!n5b&@5$cF=!u+K6%T0K64*nWua8{gkpB}!B z8!u2YnvZU&$o|0{r3nwpMkZz@GaaZuZ$;vWuJ|wRVf@=I99LiNJ3_%%lf4us~Q|;USfq% z7gN;jq@c+*N{Lt)M8tFAhZ}hHtG>QUIKcG6mdN>_BS@o-)t9?oBPC0eI@>p>#ahs0 zwqk)oQ7&Y<4VCJ_lH`iB;1tGB2R!JpAD(TD-2t08a3hv!Xp0BpJwm}HD7!lU>rJw; za9qg2KO*`+PH)>dp6N7M1~3NdjS9L=rMWR#ZP8w08nrBXT&N6F6yz3>69VJo>(GRX zMzm%r^p3m#9JaA{*Y9h^u`9u>Gl!1xAs%JrH3}0wpM%kD1T7X{YXJx;G$zSE!KHi7 z0?oD!7C_gajc}&V^Yp6pfjAOMTw-aBL0(NQrMtW|K-T}xTa`KDsFNUK`8^pFP1f#Q zwz)_3X+kdYoEF?%6v=7I#qXxHyvZ+nClLAlhqX2?m`IZx*O_>{mv4>=5uyu z2d}AkJXN5a<@m?FKzx8*td!T(d$m8xoQz@GH1ej%l2-y764LCV_{pt*=UbzxS;=qR z%l!UH=z@jsG?a#!9FZ^;7<5xP$oPZWmZfi7Eab=>!Ec3;h1Jmbw zsXm=32K4C*r*~gm>M_JtlOws-M;NTcCpMKJ=s-Cpt{p#K#HCDPNCC6 zf+d}pk~t5wOM$t7QmI}b2`PFnPWBMyRr#22Mc3UW)q6F!9Qs21AYb%jEAuA9J-~#4w_k;Y?(w11@^?)ylN`@SxI6FNL4<$v!!g+_ z&9ipWVuW>e$a_m1zmHFZs?Hu4U z89DH4y0Ykf$iyU;9!5&s+!p_ip4YO{8St&87pZEcLpC$PqnFEpA}6ZtRr#pND6wQF zi*7u9%vdz5VZ~lWvcnFj+%?jBQ`kv4gVCnqf?!%l=b&)_^OtPc5x%w#_XEt{T{>@L zqO?Oq&cM9NE)+E=632}XJ4&UV72EFq{MHARvL%Ecp*4hGZmmJ_@f-Z}HEB*L$TGj7zfck9o>5x+g171`FhIgh_C6G>uQAZs5_?YSG5Mm8VS#}pvt{>6%jWl> zItVhDs&sOTgYWb0H?R8;m`Fb;S0=rvVym54>sDv3xN$9^3v8`HF?GL}akY2()5|by zpSt-T{>HD#+7Q5Nw%FF(sWe)rqi;wiJW_7P5G%Z8ukCqj!#p2kdn%T|M^?X&u>%X* zT<#ZAro{Z(;GW8&U_2XQDg`hJ8c}1Kh=FRu2%T(A2EIeLRtj3`gw$Q6( zQkE;-5JPY*fuOZq#p+|Fft^kzKCB+Sn$_ z_nv0t?zX#7bWqTi*Lv^>$7{O*<7_z5Qj`~&@T6c=!HsFJetN=#%iXGlR%5YIdzeZy zwU#sC@`ZN6nnw~djgP!+N7-uP&~NM0dDJ47ol5zTHyToeH9&%(wcTTDx!#m-}5B-ttb z1N-E$HAwL{1>FN_>xM>KS)(!B{GVdN2qKkDh7-e_Qpt`tHUVV5CE;NYs+R_OI`M=I z)D_^_$`%=$R=(*8sAtMvW`zF(6A^!4(7hVc0lUP@|Q7Cld z7C?^F0|+vTiz3;zmb?X_pWs?9nk1as_DYLWTR7-KVQ^X!EOtU$2sU)r-u(S0Mi^myL)>-?mp-gQ?cPk&viM1KtXCTC#=2E#+u5m;XyIyeF&4^BI`Ze zR75NAJ!4BL4k|%Vc^TtAkDx<+c~emKDjeK#T6{c0&tzf|Sh5&RJ0Edz@0;Ug)EGcQ z5mwGFe@(k4W6vme6za12D^LQX)cQ^ACy#d73#Z_Ftxmj6P1d&(o>tk4x=JV=DJu=| z{9$WNd1dL;QhTa#CzZ&pYmo~;`=xBcemE5@eSmXF{P64h<}6z3f+!a@6CH6&X#Xv9 zB#D@@&NX9BKd?BziMIq@73u1L&ypl~H_^IZQhvUmZ|4e>9KorPeb)hTtiF$3eh|qY zI?Un=4MFv8C}H=wJ2JSlch|)@4>JDa$8iE>k8&UIFjUnDv-*{h)YziIV;ORM zM*EyP3*8jeYC4Lec#}HY6BXt^^$(Nw3k${EbT1U)R4j2rrK}vU^h(Sj04oY)J9mw^ zKr4XULn@y70+no|H^z4FRa3huCi5_+q5Vu#1`|lcza)9KRLK!?sSY-JcX2V3meUY6 z4V~G-{Jn;BejcwZ0vt{~V@^I{qWCeK(~G9o1@TAIoKDAD3Hh znpf^X6VohVJ)!C=jMTa9;8y}(JKm7}YyqiefrwAqSpJ!1hBS%SP8qcpQJpP1iy^Hi zLx-f)Ig!$Z4E>%tN@xS3;N3CDuvU}dqkYpX@ivgM6qi*ho~MiBrLgGZ$=7K;NPAwk z{gR)B(a1&1dvb`QCc~9ZftNOW+I}n!w4D_|U}w-as;|qCz2J-ZQ9F3H9I>;A{B633 zfmS4CrcZgdhqw$Xjf>sn8@3mm4;d{d=LLq=g#E0Puc&KLf#H}LSRfgXu{t2>y226u ztiLQ2_`Uj^;7WfHu5%9FwJRXHVq-$V=_QYsk&9++E+slSCierdB7G#yaO2D|a*(rV z9QYC(U=UDFge#aIgtgjJ5E4ame@D@9XuK!At+ZIE;umJ}?DUmm-}n67Y3(E1CYV%( z&TsIdcvG=TKEKAR!x`%Ch^k%efv=DDU6AC}Te47%argb;-O|sy4Uki|%fE@`@UQ=7R zGtNL?Z;wrvpmiD8ux$t*LDEL}@IsSM)5t$7$bwF<)URJ^PqP?@x^aSLkk71a=tTX< zsRpSP?O>ab?xDK`A0*YvDc&u0OeCp0;Q5jc;;62JG-)_^wnx((qC zYF?6djI>tC&ugxegEF0pP=O8x*N~reTcaKwwhODx{)bCGYAvuE_5LAH+!#Nj$gL`P z;)1{9{(1fN+TCu?(HG(3*~1dO+M=Y0o)impsK^9H!_?_@6YC7z1lXemxh4#e--mna zOe{g@m&JM{H(*)?w>X;u85NHue)Rk6Z(3en$^9A_!ah6JXkmO!jay>$H&I#hN`{oo!(U}$U?R|`fAjQFl9h&i+rEnTP%bC+8t zT($zMO;L}b`E?_MVUXAOh`=bM^{*eogG|dqeUkJ>Exkn^>)lab5L#nt=D%Oc;*2i+ zJ_>b+T`c%2b!cr&1bnIT)}^<>>COiuwYgL-CdDlsOcSPcutZ7=Co>0bAh!xpL$ig2os>l?k(i*Kf!;UW3e<^b8;vGwpt*Tb;VK#WG-_dti^Pos z@HbTN-B651y8YLfSc;}Sm+ej z1SV3(LU1teTEZ>yd#x5QXB(ZhUK6%h%&xB)9h@yOIUEaU+I`DR<4Rtme9V}Cue+B| z0IY5qOUyb~=4IRdbzRpG`6n2fo(4G>e(M5yUMs1YD(uMxL8_y|#h>||)O}YAsfE>z zk+fy&w~N91oP=`u2bf1nTl2gmYDRavV3=$k=S)S>Z?6_I^1O|%HQSgPihnv`exuDf z_1C%GJ$=#2(y3>bnn1SM{Vx6_1Cxqwjx{zvOI9YupNGn7o|j4wtIT09uUeGRV>N-c znPWCjQ%V$(UuTMY4mCgGT48ZsV(a_=$JblOMV-Fi!!`nv1|3Q&B_Ps_h;)N=i7+57 z-NT}Ubc3`?Nr`}TDKSHeG)Osg*AN5Dz;lnTyYA=reLa6qzVEp5T<7Ro4QAs=Y=B{I zE|~%KCq;xIZWW-O{3Q56Yml{B6@Y!QXKWeDz*=7atCmb|`eqEDiwcR}$DAlyi44{% z9_DLtehze-oQk=wTXoC9TIlOGUmg1@gz@ap6i+X&fPNpavSx20uQyO|x>ylkliLQh z8%O;NK2?&67cy4T*QumOT(wJ+GA6^EdVBe%i}3StHbiA0Q<70+`su_I^Uo2!L#JyJ z%8LQ)D53j@JH|-_Z*TH@$IDyse9*hA%@AOsBx6Z2mM7d$+2G|6H_h28w0@t{BqB-Z zsa0jS=zVVXfXu2g<8L2~%<>JYxPN6{EYiN)4^a={p^fd^7rj}Dd0Z4ncy*HOE=c`a zfq6%%C>mN9qHu=S`4_0{!_xqQtbN;PC%%Gkdssl?(b1f1;@g3z&;u}od$)a{_t+Y zu-B8^OH3uPzMCRef$$DiIH6MP288_SA-4Aex4IhxAMs5_%Qd>&Uq3V&x-&?jKDnJR6@?;anp`z)(ezp^#Q zE&p4uZqnYuHF{@^N|l4j3HM*WG9JshHcQ{ekN6j=qx;R0JkC>?sPOgEUB7*MFVJ3I zbU{1sY?3Eam(Wo^`usG0JnH6guh&b5$PP1~rf4kQ+l_n|254patyDQDi`PZE^jsrR z2&$cu@-WQtjJ1yEg1MmHCoxIa!g7+ME65QRmWdo;1WHsCcpJG?v5a5-M*9ugx${Eq zR$&IvYFb6o`mgd{x$f`VI-uLW0hb*%V;1eIXMnw~aBt!25ulBV^S_M}D5-JvTuY8k zK!8cE0=^%t4cjrjW4;ZS*lu?OmtHJ}mbO2n^?Wvv&T7G+Fp{BOkRL&hCnk|!5w_d4 zI@HR!zE>r3Q>?K#)|X#3Ja#x;Dy*Y%pDv|SXYtnH5nIEqm-_xht7D`i-CgFaW{82& zFk#xptcc6AhsZn}IWhJHUx*r7%V_7mgMeScAdud(F`h%e80hiPYNn7EUTNmnDl_AD zFsDL10eK$Lb$DT``(-{L9L8Y3B{q5C)-j2)fXF8Y_Ny|LNpS{D^8jf{=XZ1W?)~XX zvt;WuiOZkUZYR^HIzO+M0FZyg4^lr;(G1vyH(Q06W+0VK50{3Me1wEY7~bcLn%>;u zKn-QOT6LM3d>@*ko&Kun#=txcQV#B%wliQM#JVXHIMqYQtjC>*)9=xcn>NvxGb_tC zQHfrX10Y-Oeh0)_qV;$w-HEL`K7JcSiWjw{|q`S3sUqyW1RTeDQFRFoyQ_FD^eQa)E{$}rlpFFzr^>te&1}&J zkz>$o(HHU>BCg%*N;L>VBRKDyF~1I(eFpB=>(fy4F@WABdR}jNDiHILI3gR&k&ZPA zDNTuw)r?mAl^895_!ebJOv3wfK{k@Y%}8HCq&^-PX*mIFLLt^nqL%{YK{bna)-N$u z%gi2#-d)0;)pKUyijtOLEIZC^Tu+-!;=4PgzSA=SQIMA~r?Kv^Y&eNpPk3`7Rl-;r z5zt;_bUtC1rs8geuGJ=sA2)K;5e-BGW@}O8x;bQ=uqP6+n5XCs=<2R0o*Tov71sjp$_MqHZjCAY@+0q_ z4Z>VsKG269FPpNyjhP5(G{LUuz$CYSvLP27doWHhL$zTho#Mtze36pT*x*u>Mg+|R zcYr}XQ;poc7s)oqP= zITad;=gA|WoQskjF1Uq4W4Zm1f(NaS%6Z}Cmfq}rd;1;S3PGNAsv@=bHF6t_T)6t& zH7c*!(yi!zSS_3@kGt<2RFhs^xdeGvSW~>ejq%}Hrvua#vA!=C4%}=9w|5iYX-pB) zd{3TMzY?XUw=W;=zwvn3U_BsHu^#+{pW z6R?8)OE1sLFDva5kzsu1%Wr89XPo**8z@uV0W(A_C`)Hy2(7fI zyEjsICp`7MNm|u)DA~9j>-vm&#>Ipp zl`7wSI{YeAj&z+xoB4EoUoqImxwQ<71ROFUXN>KblI2td+AySm9)o7aEqQ~{df#Sn z#>N0s4nSj}+QRt3JnEHU8b6zT2Kd-O6Y)5WA0y#q!B&-rp8@*1>@o-v4;100J7G;b zH(+1bXBA%9%jHseE(^Qb4)O7HNG}9>#(UC^zn(BFA>r!yO7Fxv!H?>qIP@Uj9~AG_2FkS-r_Wv;U}mjx&S%L?f2^3xzz5y)fPqBrC@GvLYJT7#F$D_UtjgI>n_VxCw;m7{YeSv z!NqZjLq))d*W2q~rd3anJ@B5^qRGcM$63VnMdF;+GhUFNx;Mm^#vi-b+_G(^=!L#i= zz2iORx<@&I&e*sg&NseaLy*AQBxDVe8UGN~oklajTU z_vQ1zt-Og5>yY5uI6BzeyoEM~9YRqK$Id*B=yxd;UAnmE%2#MEkqW-MOnr6M?Jzo; z)N15I10mpVatGtBh8ZM8cJfF>7(b5NCZT8>&t7{L&!S)rGC}HnM}F4BcY=osp}=_( zzA4I_xVO+$PX)z%{=q!+{QRm4T(UJm20liUtVfgSX_J_XxQiRRb%#bW?eEdgBHG&* z{m}jnL&aBj#>(@{pl&on9hF*nr1JFiJWPV-iW{9b7knC=G&+?H4RWU6Xq9(wV`>kq z7jI!dI9L_VP1?vTOgS5jcMy8F%vZ~RB<($%(xyQ!G9MS|`L+*paB@B-@8w1pEiI#`2@>sw3a*1y z(P(z~e?VihH&XMSblw{)Z~Art#bs2gpbU5-!>`c`vjXl~HNaB#5(g^BaW8Rr&XUN9 zr(^SI{D2$Bd95J|zYO!VxK}9V+2^#kxo2+~zT%7sS0wnbCJg9LpdS%r(-b}kwdR7} zb@nmwy?*MB4TiVyYN$g2`hS9hOaMfjicA66T}N^(x#N5YUu+c6V?PxqUhj zFr$-H9BnT@sn@UQ;RpxELhByLBj>GN;yL|#((CaUv@zS>#zIwin8Fn$ZQ29*X5BIVFeixv9P1f5B%oM2WmERYJZZ0q4D0#m{IZW2h z?~LNR_ebf$vib1;mw`??s*98Nf{9Gi$-=B}U@-H|($p)RJ#l~#lmKGv z)4m8j2ma^RD7qGVB&lCJJlz?Ua(m4zDiT-0ocWjS~`yKWNQsW0-PLy{x za;tN7m!4?wwKQ}}m>(=}OV>qFYUCftd+RwQP{8&zjE&&ldn;NjXk!oL#d0x=&a{<>CSB zx?F?T-r(#dBI@=bONaM6Q-4gkUW5G}kNZX1h9joZej${*x54b)+Z%V=!EZ)m=5)8C zh@f|{LZJx^Ah4GvS;sTjAcKn_VuFs!CYfc&(L{OFaa1i-Xw3rJqT_7t9vScu*I^lD z$Gfk6-57U_efP8utNe}y9S=4kC$o^biVTG#|&Evnh+cDAoNpIrK-eY7KMNHQ%xGK3#-m-ufkpkI?( z;IEen+3P0W;>nbc>lwL!$N$rE&7_l$P462I24RlG8o8#NydcH>&&*W!Zz^{*=Hvy_ z9K6TpW8~=IIxlOkc+!87Z$VpqAa*TTv#?b{c&%N24;EBp#6?9lX}IB77{BG*xzW|x z>0}Ta=6H0n+H|5A($IumcL;4d#cl#>h|`lrzY|$w&-v>~<6(oKSdO zWeJ5nh;$~V`2gPB%z^r7Pl^%=E*ygmSpX7A#*Ips@RN7NLW3Cmq<`_U{jndBLK)ww zc`^+ZVma_?yVkGb^I0i9{#H-nojVQY&VXNDb98LuJH%l-s?>Kh8LEF8ETKk3e90Xb498G_y_W(V1}u`|Rk{loEY z14G`<6c`;-RC4;XOPkCC2BD%cS1AnVGA`wmJag(Bk#pOw;CFzIu>plIbB)JCS7G~3 z#(6Sy+R^F3;SG?J?I?u}Fi3y(^+_HCE1$5h&wpI>B<+EBqKRJ89?+yPX^4vDOifH= z-K<9uU!)4meEO5%kWR#_`PpYj-S4053Nv_K^9?P*>$=h$03%~Ezn^63ylfQfl2#@TC2+yELN&%>QJ+<>9>fzYi&k}{Gp*cWt0QNP1?zy|+*(Zv0lPs@j? zSpPag7P|$P<&=yfk(gDL*(E;Xg#8YGZ-TJeJ#KwN-+<}vWd1Pd+%j=}w9&AVMEaN| zLIjIFnAYjiAIU$3+h_%BwSWN!y&Dl!N4v{Xk}^ok{*R_oR(-d!TTNWdcM~i;fhkHO z`}u!2swI})3m3Rx5V3OG3_d=7!NgeY+8fE2kH>a`8DQ)Nm9{Ym zdhhUa#nkvsE5v~eJYR9Fg@CpZM;eBs|1zf))$y=2HdzujO=DI~W!*l1Oy$JW-97bk zf+K95WFP0eE7z!X!~-(vn|iqy8C$1d$LmjX>Q!cXMb;@?R-R)HBU zHJeX$XjDJvfI)AQTZXsnPdNAH?~N;Zm2zHK>WE{@0n}-+si_>9Km{b6S|IdXL^HS; zTQlL{0l>8E&x`SsMvHgp^5cDDtyekRtLrt-T=z&}UiIRblG08Qm+8HGL75waVV);j z!H{Z~m5L9lrzX_MU~&WVUA>oDy1MLu2~W*W%J_px(-brr(69kEfAj4j0Lo80neyK3 zD1&x&Bihk*KqDn<>v*NG>lV)1u`JpsD9DLf&cXT9GOuWGDlI}IZvo6 z_sGHx{2Mo3%LM0kAE}_U3dIu5+a(+Qgf|xka|G3zNuHc+OxAV9-_|srZuGq)O54yixdR#RBMO$I~lNsPn z1f(+#1<*ZbpxDy93A97BXZQ7zk|BN)fj@J=*^ua8&jhyn7vE;HkTy!!ZSDJ8YXS%t zwy4=*p*z`L(d*{c8NNl}BTuq)XNj?t1K+NkM}0@EmA@D%BZM>85M2AsKWV0wEe% z0WLW2>dBtTsn#xp>u5q8Uat$!Sk4pO!!ayAZYGg@D9ppm@MsFqN}3NB=@szv%+U_x z;Ri31Mn}H{6sLeR7e})!M)qsU8MkY{DOCYHjSW)-ZHTthUJl-<)E3TzPWxd;{BFk_ zj5M88=(Z4hqlm>5r#Jmhy)}>Yx;(e3wZ|;4!%iB1x{o)@h~cj_XI9~2*9^eqC16c@ z_>+|gM`gnI+vCTS7Tsv>&6p2fpcvLLY_Ih@J@6Bxan)_h__|bXaiEts%aB-+bUDjY z#!7UT{O8Ql)tkST;qSxs%fa5ph{f;e=P-yYy<5A{BZwpWPx?^rws!8@QPbC5sbsF< zK-ceesKo5FB%nD3IFc^^O=ZgRi~8=@Gb0ye6B*zmxa&31=xcDYlhpKj=j)x_H9rFy z_fgS}8EtKPQ>%fJRzWB$5`-4fg2$05eHdR*(B_s#?X_qd32pCx|t@uQ- zx-weI^nZT%Z`Me_6bMMMZg=(G!28_J5-tav0@$1Ro$Ab3%q<=j_ z2+8X==E4G=*twS^5?iCmBbtaQAbV%h+}x}*w)nX7pM&+k5ejZFJJ94zU~t@Sxy6w` z{#Dj?KnuF)?8dOr#^>efp6na%`+Fz-f0>xy@c{0!XKxXH+(NNpi)Kiu{6yW>qLYlh zI#^JfEhLb~MjpIGC|zj==-;K3_rEB2e;>7dNZ1n(t>ZDeN}VGduo>UB!s z4v{RF`ut_F|GrT_Kd6D$irzvAjQ3KoAN?QO`_B$hFykh=i^jb4F*O|X6}^i8|9_Q{ z47Se+NkowoG~EK?1-LmY`y`-*5Ch1L>qyC|SNOzn4Bx6q#P6g1b6I}#Mc>B29+S=2 z(;}kO(ARk_r_SLJuG$~;*bu6jaVqIdVLh66p($%)CeL0R0yUhRTqsd{;#K%8PUhwG zc(3=}7O+}>FEnnOev>;~gF$(YLHVv9_@PipC#weF8T&gCw9cj{p}KFVU2L@Jn9Qo2 z+Br;Dm*X2u+z<4ygx!}5ZvUPF%7$S9#OF5?)c>*ruYrov+^>p~{E7NeOeZ(y5(ynE z;SC-EhTy=!u4QKmaqe>SF@lbz5fkjBFrP+bhS)kEogNHHMt2+IaqIs+pocDV6=99Q zSgLI_92nOC(z$FP2(^>NfR*!a+NRAlrLRM0?J{Xeq9ao8%I7Mjno8IY7bP3k!CKZa z+xQ`6Hk)VjbxJb4(xOLuJnE;R84Y;E@`gU=0P#k^-&Xy<=b9t6TY{}G7y+abW2gnJ zU%%+3c}>yvMDOSC07eSsRNgM1zd6S@-A-I-1m*qAx+SobK&0wOZ*ffLGLmAG^Gd|x zKV<0|Zja@Wi^}_xey3Pihy>eq9{&kDRt`!5t2unBRZfJZWT|EEa80QK1int&D+4(F zE>z8{NV>3uGnqj4DB+S0+`i!->T`TPRWYm67dpy^;do3r>%kmZen^|03rvUuF_9QdZ73D zuTJDO*xBw4?;rkn6AHEO-?NW>vWl}EuT%y+#mjCIN-bhwo2yevO4>LlIEXD4Zk_i~204!wDQxs)YwNkya1m7wHE3e~ z{thtBEQ21(*F?-#`|QU~0V?<8r>ZX(eYln{di3-YGy$&TE}Yt--SE zC-&WaiLqF}%B{`^=n&!vk4}qwM@pt2++wPeTWpuxb9W7AnsNscAId->0wI*>lzD}a z@SqyPmzXg-`d!FG|3Dr`;{z`ZI5r3D+>gVbr_D*|b?Qdd@QhPBxnNrr4a7u|2Ve#KG0k+_F87~T*=Q1Pwl(AXE<5wic@{e@ZJ#CtF)!M zNE2wH#`jh487`9SmPSWjYDDgis3#b|Uk*V`#p5w$X)61ND8}HCLxI4Bg zkK{X3Y_ysN+fLLcIC7eD-aKEqKEae)X`eKCOaMjBhR59N?lk@++)oP3vMR;1bm`O@C6?~eA_7|Noz~{4u4hUiRN8lw z%c&6LaK3zR5?AF$pCRh&S@?0RVBOLGtUC}8Ihb&c%8EbkP=#qItw_$)T!pC&d?e0u zofSBL6Qibq`EFBkJ=F~`kqh+P!PL4e-vVU4aX58JiNm?834M6-Ay!vem7-Y`6W=#^ zA8c@e>?#3BCUx|r3ML4FQc;-L{-{~xcq!1T^jt``oCtknev_4zRiP`9YaSRQ_}Se2 zYd3Bblv#EU)HquTd+ucWfYt>r9qnjuj|L>AYMp@E$z&$*(DRsJe5owab9sGBLPS*ewkme={bss74&%ZQ#a>#bPf`Wl3C_vrxV_MW1w&UoYnt%BCf$wib24Z0G_Z)eZ^Q=#TuAT@j-BQ;gJ0uVZaelk#U3OCtPU5a z&Mho-=4Ha}0C9DviAZ3zy3&N5C+rtFO6qTc3(luuG~R(DmIXa)(E#1n%7^R^Rp4-V zC&;gdW`V3`oJM4Gi4v8V%WKY$=JLpyvw}uPjzApHl{AyjH^`H()g2$t8`Jq}(8m0! z?nsDV`v1~fuiP>`2aSM~7jeep(Rcu(J;kCQ; zmTw*`w4FrMuP9{|r5jwgho^w> z5;5$$@k^~`kVo=%zSROs2)|kp^`X=8k=GFj0-?L3jw@J)$x=n^yEdUC3dL5!G0nme zJGGp8_EqWckjVA$9KYl2OUH9S=cOAFo^@KZ~Ce>e>VfhTIsltlBBUG}dX8FshG0V3wlwGlHWQ@lyfY1(% zL-8fH&ow-sw~|+ppO?^kA+$r!_8jL4fJG9p8Ft~AR|`r>Pq$M@)+*G-sap&sU5q+= z0(X8Lg!mb;xvv(3S3U=raAf7uQu)f!w4s2~t{Svd8yUP<3Nyzl12K#J^>MaoA5;W+ zyb6FCcn*&=3;Ag0Kf6d31R8^`b?KuS$^4cn%{;emHK$$TC7^ni&QMxd&`h#(mG|Sz zgD{gbAd@**=<;fr26O|i1EHIJwGpY^gEp%j-w23*un&Eds45}F+wyQdBkL9fYjn?a z(R!hmKJ%)HDLfX2wV_afy;6;?w$5| zBd)$T|C||M{CpB#^@M9EkhbNu`{$H(yt^e;dl#**KqfC9tc{iP0iNuT$Vlz^YTXim za<_cMI$7Q;QJCUm4uy7PhTPXOpgtY37Mcg`^?w=45BK1p zbjOEE4$nJAS0&*;wY?E-6;a@`;@Y$PeYovPv7&J z4NZ?yIKB;{1n03$J?wTJClu$ixP6?RP(8lnlUeGK&ALYniZ1KxaZ@_jh_y4c9NV+( zX3Lp|{%Yy>JPVR925?URomoo6nWSC5#6|=eLb|Trl2(ybCpx*LJTJP#8$BvN)cUe? zmo2g$fp1>P{L`d*Z+*dQAP{lM-vcMi*kQHB=8^{5R95o9h}3M&17I?PXdIfbmB~g) zv9a&?Hg~Ii0Q>rn=0L(7P%ABf(|GWVhHR&p_+cx;>BaSXW|=YH+9-0`NE2OGhZ8=1 zx|i_weIUc|OS?yLF$JvH`wmlvrnrT?eEHhMUvI&jmX_n4ZD+wZT=ZEeYT|vl9(al1 z(ZdVV{4V;}P`UZ|ytcvOPj8=scyh+e@XYMY^?A@32JXxj65bUe!`EK@P4E&hsZJM_J@)Cu zIPs!rb;=xC-}8o>{S^q#oi}aYGeYP>^-1Xq&d2b+Q(y9P7fZjCULET*L#W~cZATW* zD9u{k9f>^h%McThy_M~Kp9EiO&RKikec}dxQrH7=ebq+f|_(S;9C z`U6zyy*J?md^y=&QVPKVK?SGl1K-e>9xn=cg_6uJol1n5R5Tb}xtoZwPqO6PF1!zy z2E>XCit)vcO`PJdc%tIw@jyK-(LEfEU69bDdfKY1{nr-iUZe>SiWak&1A453mSe%5 z;>Cm|THm*i6$onEbOTc>t~xM&_I?KX8ojH5-&$S=qiME3pW?zxe#@`8IFRh$EV9HL z+Mr+W*|{TSIYvZt-y^}7M$rB-%Bypuyvbg^?6`mc^f2ylkWNNy+bBCZmDQuZigG81 z{h2dr>7vDVze#Cm>%perJB51Xxz-Z?kCoU!!hsfbGw}pS6ONach%}TrzkQzROyDTU zPI`!&Hv9c2hUCyzoliNzcI+o!^SZ|TK@n51xg@*4H)Vd28G9~y*|KL1j3eNHz5o6YNNy8h!~MpquVER~o*w7B^bu=6 zYh}N4Nx-s+0#UM7E(1N2R4>DJuxIIj2T?T>eGyxrmm0W5tz+}O1lRZGmx_|y1z{H(HQnV{U3U|vxAr87pZ~ym8bG8WMaj(^3Pywz~ ztF`R7g6bbht^%k$aA3tkASTbrf0Z5oSwArF$q8_0O|h3Dq5oZzrZWUh{R)sLJ%a<) z9P_RzNlyW4^N-Ev`Ky^nEvKOySM&bo32!E-p&bDy@gIvzKG)o zz#{uZNgTab1@rU@|M?6kGm^qoSnA!z@}#Fg;gubj=MvCE64xly<~r<)f@Oc6_8=a( z8)bx1KO!S&irq$EjZ(x0?fGU&iX&VDf`Sx4iv8(X-$M;e%^cAB;T`}rRCeFv{e%xj zbvFzyo#tg{zlU!7n44$o%YyK`+jDiOkd2p@mz)9+H0>XV-?v{9W%2YY)~|?_@I8_P zLh;!kqk=U-Na^Xh#l83A;Sn^)*pn53XY6}40{bCsv@^jYwj&?Xj~K?k91yxm?Mf4IRp2(Rf1g<6dt8ar#aZF4cAO1LDFG2KF2fVp1l^1| zQ266ujT0+1Rd|ot(3i~=Zmh2`+2mnklLh@jI~?bGN~GPvBG!MXP^a)9(DjKCX#Pnw zHN84{X}*?ff|Ehm4tWcN(2nlsUnVV zX3~-~xu3{vR6nTQ9!8avsm2NL52?Qr$L4a|FelCu=KaVDA78(afyHCa<9o{E!Pl?CaP8jCuPw7cQLAHbbws=o z!im~OFr@BW&m1T=sOolo7#&H;`>^L2h=e7t+K-jx;4Gj8oayhoiK9U-0tX;`9L^`{3f0KPY0{mx7n_sCS@(c zH;02cu;8ACkb*lN*I4lJ3k(e00gHTGzuBttdLwC#=HTl{ztATwUZiD&)#JzUo$aXI zlaE&(TpS zx@aJ7H|wuk*I;eSqyzfoSA5L0PqM<3M2PnGT%wqPM^T=HK##TNm!nmpS0Vq;WG(e8|HUb!? zx`0fa?i$vVPVDyYlgDxuwxQs8K7DNQ_CW9V_LsO~XUUbW7n7GucXwSpk%1K5i#Mwn z{V+i9JXcwB7V%b$vX_a;9yBc_w@=^*vsdqC5m8CrhT>2(Fn!7b6%XXs zfY}2hZ|o-fdsyUVzGhy5E2DP%dnwtOC#0~9smzjQ59RW?Ocxvbb z4fJw?P{ib-gPVzlMl48(Wc~Nhsq=_Z%zr&w=L584y{VzN$s;2}!!)HdA$90J(2yLy z2BXv@powxU1)^96Go=^>CX8CpD6q>~v6OPR1MJh{&HnUSl3Wl8po<_Adr0S|qAeCkzK3qanU4@gKu zVW7^(=YxKZSdMnZRX6;2^gqdgCuU`m30HICaFKL$EV#Ir z%x^UGWI{|rlx$nTmBYDYS3QcEa9f#fev58O3S_X|mt$^#C@l#dd6^{_$AX2U^- zQl`YD;WQ{FuyAlFVu1s;y)M{Fh{ zfyV*BjZ(ujPSR0cIsOWh<`xdw^6tv)g@=4I6|R?%T|7;kz*`i=LP;}@4>onml$t2O zV}2^ml8s7Kl{kEa4+jpp3nZa)z`zRTnbf%{Cm{FN#xz63fu4^X7v>|Ehh2Nm>(`W? z0KGV5cKLxh3ZUTR?YwdE0|p3f@hWM33{<$^LcbGA` zD(Nd4m4w^S^pkKBZsw4t#=FRoh&u&t$BwgNK z8@W?g)J!s1sH56;R?k6~hnbe%ro+N6#ov>6QsjO{-bmT`w|M@+SEPz)R$qK<*lKv* z_FI798UK$%RF?ZNU>Zc+OIqJd4MoINGF|H>h~V8Vr|kIH!(P7)S6^IX3}Sxp*0gP9 zsIce9TiP4x*1Q&(qDTsQ`VY+1Df-XOqWcwjItY2{f3Rb=8lxxksz}TzpL&W{rh^CW z0tcv^)SPT(@6A{cJqhXd3(`l6PcTHKHK02okpAa7IFUa`;xR^9557lg^}lW5?DS&r z_;EG?H02o-j6q0Olw$s-CZKBXM@3OtNj6BMZMd@sLA$OH4nN-M;u)fR@hleLo0f|j ze(H9vNPd=l{q|;%y@%;_7`tZBt7G(RlvWKFwaXKw&$w=iys!gH!<2E?)Ti@Q-Z-Ok zhlyUq<$bJAyG846d%K+H$>HLVmWLU zhmpNy9>x9P(~B3FX*x|xjZKoQ2H(frv+8g42`|&LjT@*R{hG*SkTOtO+?Z{iI9Gua zas(-}qo*y`?9lvsj|>xnf9^NPQUihO*j`Xf%)B(a`?J{{_OLz(*3%!4BLDYo50REJ zFp)x)EMi=iNN8laoq;UYKwz^Gds|cJ#!iI z24KUxR27?W&1%Da({%lP-nxIYVIbxL;)ZM!Mt*EjvabrL>-_PW8v5evG-!+$qRYD= zs=7&~AHx3gJ5JX?nCG)T(@T-|2taE7{fjsM%#8p2r5BITRz=BP)b0Q@iQ19+V^ssF z{z!xYRSPZ={`Yh2n9vRhw`G<;b|IGV|4D-XeLr|2;e@U4(99d2BogR16W~NT^@<iZXVk* zf+XpmiUN4>gtD{|<4&5TZVA<7*su(J_s$=ybPbks4F(8X#4SwpIwT@ol^`Jc`*)qd z;d5QAuslGX_TO9D%8LoE2zU)5?|;9(ndkF-eS38wXdfhZNe~%_{pZzqT;%wm$Z8B= zQa#NN^vW?piGS`Xlo7)?dG|vGo8$<<-%tT9!?o`fiohFZJsbV_Hf1^5rKMVT-XFvA8252s1^>f29Ln>Om$GE7N*>|nL!FrT}sDkga-^M~~U(gy@hDFQaN z*PTJ&5ra!Bqsq(6q0IsKtN=uNH!u^@Ujkr3Ipi=9SZOue0*z+wUO}5-r5zMX%ZL7mXs%m9B~6=3!gx#MA6jrZ5IXzw3a>jat_ylrZD!9w1dn6t!}2UUca$91s5Ur-QXWn=3C5QYuDN#lOP77ybruX z7l`Qc;5S>*ey)HiP z1|Xsi@KVB~*4M8*#M?Unu7p77k2nlizml!-I`@(h8AebOW)Dv1w~VX@g&Bnm$&>Zz zCXfpiAM_%e8?4*>K9Yd#{+(MKcJd{3@#~S)0t)i?<413WeFbDUj8akSmOO-& z1WjNj&DjfJka0nPi%-L3oqHE3UWGwHvdd1GyM1= zelK|Y03OA#c?)wFHU(Nx2DbH(x~+1HGhXB{JQDi&1P&dML{c_pX0QaUp*Upf@9zS{ z2xqUu^pC)iZ8(@R*f8?iPH{u#L9=HLo&vtIddd?wEs^pZ8eJ-iB_Gq&ML) z`lbQN>{omZ&&pHQEhMuqWeSLk_zF^gKqBJ!$;@=E5mw+Px)TL=! zE7d>$=RffE(Fvy-rb0;dMm`)7<&x8;9--D5{&UQXuMf`J_(DtF4%lA^#&T&&Wufx?TYXAt*# zoUlAq?I}*V2dP$U1h!Aj+|w??l1D!8%|#UE86)gy7^wiiKrHI?;G?Jrgd8V ziDnI-iryt=C)xddu7Uc98hWhDt6~>5SSt7}r+z+RQdZ8Bu!&n;ozHv2=i*7FDgvcT ze_`f_KP#Uq1pA5lDkx9(@VP9h$tx+vudUe%Br<0p&znCV7p6OweinW!hgiUDL) z;SAjkdU`Of{|sr_vop~Dv#^%rtDBeTdp!pCGz1W_5|SAe)CGO?I!O8&F*}Mw&)`-^ z0Gbo;k}BvxADBs@cCNu{fK}kaXD|?uOTF`B6PFZki@bD9z3ELzKyYWSqE86_G*>lq zLkOE}nV~*_2&bAII0Aaope{NvyZF*?>57RS&+a!g!F0%{PoD_JgGNVnYuwgTtla`n z)lW9J{v~S>teKYSwP} zdzkxXjb%ZB=Hj44Q-K%XumEmozNM;huXt;$7j{3KM)1S37clScqo1raTjtH+;rN?P zrzd9>!+3mw()?nT zRDFDMH6ikIfL+f?U){^H{T7CkZvc zw%-b)fQ^LHUmdm^otSUl+@`CmeT42wuX=zSQAds_9l1iYP3XsTSdXv4-fgW40oP`l zW|tCVR6Z@f)6RtaP`6Rwr&2D%REo~9-$*j9J=W!^nT!1ha41^|B|bS*XEGpuC&uG- zrzgi*pPt;Xw#b4lrn@(ToOGspL>eh*-X8tYX})u7p2x37!g)`4F>PS|RuULD91@zn zPZoRIYx!Lmcc{h%^#M8t+GuOM0@IV-0oCA4?3$%I=2}v^THQ7lK=rTShAQ=Mz~aL! zmX$I-7H3d5xODR(9(4q<&r^%2A^fWi>fG$&Jq!_#&Ilgk4hvPuB)Al9yblNVoO%RC zT1_vos%A_)H~PTx;jKmUCV%E(H*N=D+hN=Fq^3$6ZKZrun~RXs!`kj_&L~>pvT@^* zcKH?S$QYXdImiZ48V%3g{>}D6^(-0JW3Qa7J;A;R{8oCh&1L@i{p>iEP@d1W&qH6e z`(J~pLJnr8B*_IMNygp9Q6ov^6~;0gIwi%@r@ptu4*C%-v2}?RRH?p8^DXb*dm|Hc zT7r5ETA~$$6A79?cPC(;MC@AbG@nh0uS%V#%;@$Y5S5E+x>4o0pHWE_r&&h))Tyg- z-LyYGU%7X=ASP?($kwv$CC6;dTyx1-O`2M&fxmC*+VY|aL0IC1o}XUGRw^FhjkbBw zOfPU6A#WqL%uEirn;>GxGhN%1GLf~lJ*7mA=5o5_Ff&8H$slE9nSSkjmqQ9nablyZ zq>-%Gwf$Sy3ZiW@Dqr}exApj!qvJ5}dKSLhOgz*I7AjW&na(7kk#4%wp zix3AU%&SL7{Yj12nq==W1)xugtENJyv1kM&(hBaL23ZyZQ<=KM(Y%igZ@uZ?kM4fN z4@^N5!Di#Jy?9FeZm!bF92*3`>13~@1>{d78qDL3=3s0PdAOJvwYXhT;#;gN=zR;> zj$%%^O$e$4UPi5`&oUIg%*2byvTHjA%=9rv*>v+IV*%nMVp|WoX*xw#OD8Q5SkR0N z1TF7)ZW`XFk6wdD$VTvJUsl#=p2RytHOuZ40ggzf6c_}CLBAduJX^2(8k8*0Qoxqc z&i0^0>=B+Vy7)3&Wu73Wm5{7IbyL%mStP-EhBjaGu*WP){6?hcMs@${-l$nrge$Kn zypgoTHq4jQR90Nwr;iVG6#LojR~!Sh)i00>bRA?1Yed(kuZFIxO@;DA3rPSnN*(GU z_n!XQ2RjX$zC4YiolSK!p&U=eqnWu~@jbhQ)eqD%xB2{S;(N2LxG&cj%a=1?mt=XH zcASg?FpGhdhhdzHq7s_J`B5HJ%`kQoPN{kx-H(h5JtVNLms{9PKgbjh$}aZnpetuq z;}b*cPJR8an|fc5CUp8Mr5|M6>-Q?ziIQeTZoTGjS)`v z>r|Nii-cKeH1{!`=%*>(lLNg?lpD2+jo+H~m3-&5m}VAJExT<;H|BLo9c>*}ln1iV zip?b}hC=tjw0{ih7Tu@}In2d9*I=_|ajyiwbGfb;uxz;^gSYBE;arZ>f-0{mqgdn{ z&YtH9&(zyYbM7U)%BC8zeDB#7%U=D12gFS~1R@XnD`Su9oUQxv5>K`?C|k}b{}2Yi%cdZKRJT^U zA6Aw{=^JAH@JBTFt2_%t#KaTZZv`kOhUn_G%-R6H6!+yEF(&3{*?27IWh3m|!LZRT z6Ewtzd$T|6*}MnU!?Thyjf{e3Rj>71nMQb^iO$eUmlcWrY$Y_AvQqMs<7~Wfc~6y; z`MwW^Oty7O`kGam^sf}MGk#Q8U+Q<7qaC5}RDnHTt8e{*d1c+5g92&fzQ|XLJtLq< zV&BXIeujCGfro*8+)%R#-Ny5d6E_c0RQP@zoi@Cvf75}i&Je*l`~NZb)?ra??cew@ z0TD?hC5BW=8l(hd#(GvKcD-vWYs4xbdNx`W5?sDb0z}XrX1BAgJ)Vqp=-3=5qwGU z7f5C|>fb~7kpM>#=91qv^jb;e=_^pKOVa7bFL3lfpXSNZ?Z+2Df9v=JS2(6^%WT3=7MgphVKMeH{Kk4xj(FB=RcAIYpQ&HviTgn>`>)I8;<4dlXW zwm9Z;tW4i3HL9=g>ppeiICUN(!0Vn@=(O#`9G)ieGj4$;lRB8re>FB%r9n|*Wt!Tp z?~}L;c7m&8a^aGwGuTfVx?6OZ?>HNQ(aom_iwD_dY-b66&X4dVAKpOHK2zURTaeF* zkkjXR$Qkw-vKA#7r6@sOUoiNS1MxoOHa+TL^i8RgK~D}qN}5 z$00Y*ui>Fx0O`S=Y7qJS&egY7a+ZViy*F+}IZ+MZN?!EeU{{agpID44WU&kC(biR4jqnZ*Lj1bjoGa-bJSwJ-!ir3jji@^L zZPX@QNc%c>b2bN`3@~Q?tPWL>v7~>FX460&uB@S&rD&Y)=XQxQDV0k}I4!oFP$8U+AuB6zn5_;6ZIS?Z7g9k>`iYI&e zK{AsRuw_To9W)O?ea>!JsUH98y`&Dx9eQM#2qC86jt?T~ybRPT}dY zC$asfg7D>~+&|oIx*5{;$r~PbN?!2Xk{rrhflmJz-=YJ_){T%du*Ornu9dhM=K3Yl zn?{wd!v%8;>*}AnSy6Z{$eLiK@-p$#+wu)e=rBl8dw(&aIiOGY~yP?e2}whtIxRGgVso7)F3xnHuq&PieO@r<}p^)wi-Gu+iF}gL(UcJuLgByP7n{o ze}2b=IkVuFQ%qZAf)vk}1RGZ>G#i|)ES81)-nYm)xVaG8rhJvKu0}GLE~>;d;U@h{ zD_o{bkR{O=L^t?{#jLjVH+p>dHxqH_wT*kh#yMZJy-XOQd+kn*Cz(HMww=1iteE3X^ZmTtP}&q-@)UR{$npUz3EZrd(tEv2B{ZaB|GF_~d%`7MnZ7vx>3?Vl}^ z$G@$5h@l|l-pZ@(+vlD*7a{ga>S$z7tIAn7SnO$;QttWsJTF+&cC7Ok@@I2}qG)(a znXq+dFu5U3FeEP4j2$s4?_q>mmz&gy55|OWp`honA7mGjuiGh6x4n!`smifil3h!V zXwJsi$~CDraav@tVNgaVxm;C>7yN-dji9_@6Q$N9wrf;d{Lr5cP;x1zr|KtzTUD+d z$kYkQ86!EYUDq2d2}`xq8Kf;yOJL19S#)Hgot1Ne2TGstbm)QBoDJER)pBhxFC@#y ze)|&~?Q9lfMS}?Firr?x#0 zs+W{g-ivwa#=@YTUA^Cpb7(k`ADEXhWsy=TlAD7!$na8oUUk$hd2=RFD$Gj8kQ$WX zYIG9~8d`KpQz1*+aB(}(jLx^czOmVBXb)CcD9jn00?bodfmoJAO4;x3%!w8F8-Bp9S7q3ph)qg9jC`<^Io7F%rHq|A?08bNa#1E^f3C$UsE=J)6`|n6GG)!R#X3BHYLm8eN~9 ztQ-rWc-4;?O^9=rg;pTsut*U-7EFplgn#?j@GjA$JO$`BPrF$UBOlg2y(P&K`s4+% zeaJUc-YCoss#qt*ID20^{Y~%bVr6a;PQ>r}?LlEVqoNkV_n&O~P=d`S9FoHUMK%V_6U);~ve;{t8Vy%_7}(Ezp=HESx$SX=H*2wx`e7|Zx5nKZYI1^N zOEZ?i-E3k#kXuYJ?0=fi{zYscsj4<2NS-P$IDa4Tk zHgg^R;i23DkJJ@n3eA@2DrGa8AI_b%@1q(j_&4iUXl!YhQz>5`R86;G?poBpccI!-XG&QexKypP>f zjlaSlmuv0$h6$t5rt@x>aVpa3JbZfFp(WboJqItXp*L|{5M2vz^HBk}!Z1G<^;#wq z2a#;wp_0cI#+&(JnrlzzvlU_I1$ptWs7=y$b3bVS?vOd;^T({jKJG?^u6L{ot_7Mi z3-jRqD^0&~4)HrI3!TFL#c9TGi1^+!;`KtDTT+i0rtoUL)L?x`ASIcC zV-?niZfH_I7keLk6Ap)O{#F!Cxgi+vtiE3G-u+mgkyEo$I_mnZ4?lU4L&{m6oHG*s z$2wk1tg7LpCh2)xtdO0&%wH&?Z&l9EtQfPOAJ%nBF-+smCT0VjC6dQ z1HPtG`kH5VRoVuzZ||JXcBa$7T3j!Z#>{bI_G0F66aSEFpsG>0lS!~Ah;G(r4VG?s zmBs#N_)JcaapH+BVK~<9@L5PUCy7GI5fDOxyE}H-Z#9#(Yo+@JgjmmQ@n%)!2uCmx zFd=oR@`_ji>5RsjjFeKQ6_LiW`a~PrW@FPfq>$qTf0=znUgo}fcVD5>34?4*?yDV2 z9L7E|<(iMY3!$5gsA&6ZEs^uD4Q3TjuEd7AGWK-F_XN|;E}|WZk$md@zm6J$p&#i5 zHJFt77Lq$8pj`Vt-HsBpG}dzFHmP{Yt`|MV7Zut$p8lUF3=;w>s3ljzaXGuMsk^VG zW=F8D;9ARzAbaUM*^<*qR~{G^xiY-c_14K2`|wUGg6N7uW~`AN^{L9~5*4@H{0by7 z`HBLwxlJMWPYIO~@F_>LJ!0zkEKX<%XER>{evhla>>oTAG(-5OfmNz40_3pXXUddM z$JyMd-Dro{qZT>^ly!3*o;)VbNH)qwZX}}O=0Es78$g=|^QO_!4JIeNWxCx%fB%7e zLID4tg6E7a5oa8VEZs!s{q3AUk|n|`W}Llmz20)o=@Fh}sT4XwH^1(tqH(`ymQEg+ z{oe6AaVe{MAj}3M2f;|K2bee-WU+=eWbdT+%-SpUJA5l%9(wLR;`GIgkf9spJ~x3{OF54Ww+7Lk{IMm=f&%wWOau*i zV6309U*EP>+oBF5+e3h&D4`TTDQLA3vA zk8$;yM1Av<7=|ZQQAMH4%V$iyg3%7ks2|=;-t7H)TJ<05Zq_vsDRixoDTW}Wf6y1i zT!)GXhMOZJiLE=`*_P}`%VQyY`;@5ZJ4(I=Gpt^6y5>L35owPD4#A4GEIIpPQfT_m z()YGM!3ULFqBqs1HEQ$pypLlM?>76e~3J=50kFM7IU9Fg>$O3(CvP1|PvFr3xj zKm$#1%+^3Ys)L9Vj4PjD-1aNUbKT83HNUntY8vgtUB<{WFhl?g7@%UY$wWMX;yEHxJ!R40IE?mVu*D3D%smmhG zbIdH#qN+hyaOd5Z)rv`Y#Ons!_M_ZaLS(SMHe)_ArJ=MSV~;mVNmkTU-%g7g;H>7F zBz9n*3pm}k|Mn(lQ2H7dUON-Uil5|D49npYn@2EhTB*_DAY=7Nekwlp9j@6yYEG{) z8xM9U6TwJxQ~6`_vMVz_BsT$eC@$e)!hiN@6`0 zTcJ$3d{#p8Omn4GNR!lef`7BM_O4QBK+V;RNUgp5!27o9jbz7X4ewS4=ZAeYA9A{CdpT|WTWkN=rxkQNt@+hu(z>cpky#k zX;)+II7^N72g@yLP^f#fqN2xn>BK$K#&4r_gPUE3Ux3ILZGG4WC(7J&>2VYAJy1sf zG!yHq?0?)-MNCYre)0eVMd_U!x|6{mfBkMr5(m#qwE=F!2I*7qqRSsBeF02?%O7Ac zWx=tpyZn*Cu>X+t_j8xE`ujP^f8N0U!~cg8a2|vbP~5v%YyTa4Ge#nblz4+_L-ew~ zVF!QwQ%*&h`bpTP{K7He?_I)wz4iR|e^os)iR|6VMNrb^??0ae)yv)`{pZGiRbOn& z{(11&Qs#1`z%|FBAynu-!YY zeDVt@fGxpR-hwa_Y#yL+*k9L|i6c#H<^E$5URLQ}Bb~kb{6BAFFTuS1fB7_M=l_Ai zJYKtusLQ{*!+fwd%u3_^^Zf_xR|!|({th%X2#d5zZWU>lA^h2~ZMxO$uH`)-`Pu&6p#?|)2w!dCg-n9L`jn-yOvvp5f6Fw5 zkEE)?4C$^zit3(m-0a$=k>|@oP-YF0=8Llp6TmRAN0Lo5q9f-SIfd+}$lA$FNdpeT z)O@r1$^}HZ&BuU5p(z>dYRAz@Zy(^Nne0W8T35NO_CIppoCwi~Nh8}ppI12!CGPtI zWg0Wun7OEK*&J8So|;l&k7Z|=ov(2c`8Aw7k?Nk-`U<6h?-2}CBlt?eyQ)4|yzWkT zus74y`N?*CjG3}iL4*Mny(GMv?(*2Lw69y2Yt6DR_3pU;`Bu#m8l>I{f$Rz%xzpSV zmC2>|Z!o@xquQ<42*rqob5&06do9PBtPSPzm?pW{C$ZGby6Y!IB`_cPbx^+I=gfji z0p^!5Z7YBz@9r;F4XYM$_!QVlg1k?k#9%B=xmMnr!ox_5@1@nmo&bs~taH7z5 z+Dcb^oy%%Revx84OZyf`kvKivVz48+%OTGjlLTit3LANO4rqNvV%$SC?*b=YQf5t( z8>+0WE1Aw!?kjtU@LwB1KdHLoe36=G<0VjmtGZH zI0IrS#SF1_P$$U#st&oN1ISV&AlYC=2k?#={;5xVWOkD zkI$QEPycz6wTHMqvV39_Ra|)Oa^aYiKPS`BHp-;*>evbAY80zsslyQ~Tg}pS2m%{N z8E_7V?12(WIYdT-i1MvLmj&I&$oXCUu{EGRsv2-hFvS3cA)nnv??E#y!uW3Rk*2CJzH9)oJ z5_L`YLP9z__FiO=YXmhvKhPp!z_F8juiJ{b&i z&nZ#a88S|Ecv?+loc_v4A_ium{;9xj@BGDFAsSvG`Y4Ip2(H;S#6;})9t>9ICRcN5b&$$|M=;hcSy02tBe}xb%h%v3g^#(BxPzKeNQJ~* zEZ<~fHii_*xv7;fG$qiF>ow5rOd18t~z14SXsJ zHlBns!#&OQn>e!x>$LU{a#cPy*r}*8Q3EZXLNRH!A-{V83Q#7m#OubIleV%aiT9Pc znq3M@do11FU{`mxb7#Cx7n>q8ubRi!IE|{|v3|Y|Zdc{7+j5x*rvbl`y z`~+cv37L1d2%_^RefM_x2|Ybq{(LW^BUIf!&RJI%`JwkS$%hj1Y|x$@{%a|N#W@;wc~ z`Fpc^idm*-((i<2DnRm6^F>{vn5k$lS)bb%H(A3kJTDR(D=PY$To#h)@ZDr8D~#L> z1&b~}Af!jPM9A)1+5+nkW5;ezHe>irycYW3@T=b)o6IPirGi{nC}5bU<4W0` z@NY`$dH~vr{S9(QK80;7nFjI8kSq^AdEiXUBg^z$Vw*nk0Xpc{-n(1(b)P5D)a{xr zzBOnfJ8pj*)uzfv{KCx$EVLNGj8}nsBRZj_?=)M=bW!eFftndUj0?%DvS4OCTC#lS z81a=0?NC!^XEuL25&Pu$+lvd}3J9>j+PVvCcbMz)L}3Mhi4&$&nY*!yOa85mfg(7NRa?% z9*hKS0nqgSvi#!&r}iaiwt`c|Q)y{J1qO3uT59mN08q6LnCOXy92ZSa3OhFLt;%@{UlbfV4D8zF`w>}RNW-5z^?mEuY7q> zBtt1`q}S;m=!8_6i>wA&nHUDKY@qd%7`-RtK^>OaHa>D*4BSxik$qOVUkGB)NVtY{ zG1$p-_%nuki)q-UgZx$oP5Oe2HL-I@K-A;O$?IMqb5%wLdt)H&>r@Oc1&OQz9R3OH z{lQy#zP+-R+W5_uE5&S>6U4?fMOkmMV7h6G#e3H&r2nH3dg~PS=(-Z$xCEu$?4l+0N zXWolH>kPj8Bqe{n1B+0xve*owL9tYOWE^E`Ykw(sUuMxtES=q(c|KyqPDDAGIqwmI zNUBWty)B8fT~9Tb88Hl~sk;jQNHN1UP%mT^2iu0f>f7w_5aVgZ+IV><_&@Lb z)zC|?^zW8j`k|Me9cX6s-wnhDDnR#KMi>5aXa8OJ|3eX2&-T)dy!=1xP^e-195m{G zEaK7y{;L*rG~l~j0O9{zq4(W&qVK_gJ*`Ud==J9-_AgvR&0>C}J9j_$@D}dsgY}qR zFNU$YuL%Nvw>h@V%NII-#BTHJ!8uahzv+`r*(Bn%{QT0w&-w)LCDiVY55r|`Nd$xsSAUNk(i zJt`KQHWt=IE{XI5G_ zOB>b;U;lnf*yg&VM-4=>(pI!@H)khhDz~P(*_x%sYnLQ7TfWHfpxiVY8P(SQz`pjp z`ZiZH2AxzkdzWyAZrH+lORsN(7CF$X%h$?iJKpA6cUY}IoiKWYRMyqylBSK7E59@O zoIGi8=U{ftgZNN|lUry6@hd=Y&d6o!lYO-i$Wz$;E<-rBj{zQtOza#LL{My zAE2`$6`|$DfGya)hp~&kwdS;^fr&?Btj2z(NU;3wEp>(hEis8m3LXeHLILvt4Cmm<%2%P_P>iK=p2_&2 z`4(CHR^-?Csqj*tBY0`<78y)HN{-Q^{nv5mm1}y@W9+ooAa4c~Dw#6lc5?!nxB;xk z<4EJif4>`%AxFa-&@p{Vo=!*YiJRfIzuLK?5Y0p#BP_#ppG30e;V&X8g}Vq5C#@2b ziNf6?J%7uRFG|-a2a~DFBW$DkrQYI%29O(3P;cVfcRoAIg{Z)UWQA$)7Q zp&{q+PK3{H@=T{8BpaEZ+m|cDUDE0%H%Cu*^l7(f>Nvg5#dPvRBx9oGXsK7wjqI!{ zMbqn2-KXNU==XxFFpH;ljGsru6g} z@wn*Ry~G5&8HTPdXlh;*TDi{=1kbOJs<&z8vacm^zuhD4kbf^V;>bQuy(;G&%Vp+R z%g~)!#WnuO>$8^&=6GbCt;u!;Rs67rz(W1$2-7QNkgs;~3xJ-}8f~tX_VLltV?!w`2@L39 zQa3Dr`POqm)z`}Nn&VXhboX^5!+rEmI;w*%Sw!fPj(UXY*Y`C~?bo(DYBICIpvy&`mAyrwAoLY>s|17*6 z<6mHsB|oCtY$1oCrhFdxO=&|bv6)Q7^|-=fk4E@Tw+#Dg20AF85q&+@ z*4L%gwpQ0GuztI#{;5|(p%(XZ_aVnF`SrU)!8>)9!{wS=^*Pf&JVoT!N!RNJ&E%4y zGh2;2a$a3MqTS?fc!B7#X@{Z)L{Ya&rP#_;*Tza!#=-0f9_bP4*g#;h&~tWA2=b8v zuTMM*-qI{LOc;GAJkGtU<@+@kyC+|+UgNi#*=?d;2F-OCtwrfpaH9S0m>UB?5>rIUT)tCFmUz0R?m1b6t?nfDt#G2Fk^`uMIh;Sa-BA zKuw8XEGa2?KCZeVs)Ox3uY6|C>M?BKjQ`_vnfRMG43kr3F?XW_LewC#Fy@8O-uL*A zR@HJ4KP#&45YRyetZE}s-Y+oeR31FixnuD91H=Igr9}&DpQ$2X}1I4o^ z_B`1Hk>-LVR@Hm<<3xO=z-C@iCDRa|j^qGqgPYG--2+-`>yU$~1`B2Cx_1h^EWb+& zZ-09&PjQ!Vhwb+=Pp4c)Am&N+o`3gzAzEHU`U3-@h+q(oMaY5uO2b;F!U&5+AnKM^ zAkza_q-(nh`n-4QKL$XbY_WRP;W95Q4$8Vs8NBuNc@VZ_xF~;VN;A2<_zJ6}{%6Qa= zPrq){O+F4CZ4#}t?~k$#FHtc7`L zHhY)sraiNL*@!{+5F`^83OoT4TEB$f$tUjW8``$@77KWySD<@Qr2;QJf4&l}8D)B+ zCkSHowW?>4^yWSpTSIBlpGH+oiAB_JZg^A|S+IA;P%YM_OT8KNR54o}ROq4Ro?beA3d@IKpWew{*@qQ2$9t9ox)8^Mid+FHA48M)d6QL;8p>X?(`&45J@eRt{ z1d3GOFo0;vbCW)5K+dqg_NLX(dpW9>;}G*$>NUPa9P1cJ^-qV?-KX?@!1cM!$1fxhM`siTb`eqFN6;Lk{_Ux&~y_zmhed8Eh=!F#T4 zg$`zGbT$5QLx$&JjFl83VrLljhM%=BV%6K?D?Z^r{zs$gNfeC;Z^N&n>BJsFk>wbr zg-~IG8VAE#prpSpw%^OAzKrcwX}#p;u(J)2+=EeZHP-Z)d$`a!Vb@h%ICS18T8B!P|3OL6+F{JJ$9~f!oU(i@+M$H zM6lzI%oLd{P9V3e3(;1~gc&~#X~CCt2yw)$jL7(iVJf_F@#TI&tj;GG5L@xm5lR+R zz@f^v3xx}|*(gne@L3`|gojvP7B491VR*Oi?MEOHXJk@WBb;nZI0s{ zZq{RQv-2$*qjW(bCYPslNg^|-gX zG{mPaB-Sr#J#Kc<-+hLbSY2~XLoUa5|&Ax=VV zEdF2v0U(LSgZ4MbSVimwYpCLP_OoOgM<-*RC~-DBmle_Up9qOCx9j=FkvI$SLK zfXZzx!`LScB#&2~OMoU{W>3`6;^GJwTB%J}BzS+Ic=?;rt>TkuqJOSGIJ*Edsso!|TGkd>oU+z$cz zWa>0#HuS?EnU&aZbjPf2_VVZOthMx(^|5SORf8EY$XsjJ62^3eSYClV+wu*Zor%y( z=2~)yx6=>3HVT4qj3_GXUgwk&X~;J?^VL|}09+&>azKZ1Kj5?R{;?LY+!bu>G=$(s z=yUn^96g;>L<|4U+=jt9sWvx7=T+l)WxqaS!OjAW^!zT6e8 zO7up+{JkD>zJ09>p-yOmdZnUhA(|hLx&M=}15MyN3>Pgm67{A*PHu&i9Vx~hZr)8F zn7kdOxi(bHk+;Dcp^xL5?Jb55JUO)zS&fiwVLV*I{$=H)1@Vi*QJ-u{9t7_~GANgLa`h|y zJlv0xZFbS06zyfeu0RZN}&Pd^|kGUQN2`Y@-<}nuF&F$1zO6} zx$^=BWPC|r^57hDBfjgE1a05@K83hw_YNt$sf_?zXv1pxNro z&@xF^Qg%KvAU$c-tB=CaLf!pz!T@PV0>k>dBpk*#c**$9x;Gxs;QTDZgBSYc*{>}9 z%$?^oq8V&d$TKitJ$-WaA}p|w_ervGc3n8Q_wzn16vK9BLXAvc^Hhd}7;nlF9jP~| z6z|M0>e#Kc*Fdb_6>M-KpgcbDt0@Uq{5<_M*`X<2pG)I`&zk2D@`kKbLPt}ix}oli zN7D9SGs@{P^YB9|khTX<4G&VnY;fB7=1zo#jI$YhPtYkI0G|t7d>a zmcPviGT5X!rE5t>}#Ni+~R_a zFDDDtSALBLSI$tSwArWCfzm@!Q8rdW-sZ#flNHswLWzZqT8>wKMYCB!c_36~v`*kv|! zGgfab_>&6Ri^MBI6dUx?3N9$jo=D#_l{UuHxKhIFkj{7sA$ETQQQSGEhPp9|EIzy} z%hkT}*92(liysYZHmX!VuyDzTudlniiKcTlNw0KY)WRnHN#xRTxK&RiLd_QZQ9LB# zUq$xrpUNChF}+C5Ro2vHiHeP~)_F^20+`;=q`NtG@x{nHi~jxbX3#siJS)QT-LD?* zKwV<<5^hK^c2OipfiESFpCzNPy3bAiejFmhuMPL3_@IO4N|-%EibkWVD>HA_-nCQC zwSK2u^4lEBv!9BvQGjGT$#vKD&!$YqMP3tgJebtW4W{Fzd3`%NeO{h|E(XIAm{O(S zc60nnCxR*O_A_H=`wx`SULtS0EX0$dC-}~tl$8H%dl&(ZFQ;pPA7G|rBYglJAm_jb zuuV+OSblA1>b<~OM9kaG)bjL}H2SHvL{GY{UF}Wr=kN#QkL>H_x&T1PzAiLQi`FmM zd>A}`f3%{+WmZ>yGl`$z=DOn{SbL1Adb4g4L{VJ7X?XW}%?pV9&#AWKH6azN*fEO_ zx=B26pUR~_zZlT$m)x~d7}7DL1Jk3sYasZYVeCU1@@aV+WC(2(A5=C~HhByAm1dEN z=93jGl{ZJuURQ64R}gMo+zKvp66s2;$6ASAGaE)HTIz6;wlhfQ>0&|Qmxu}J1_ z-8rWm_s|NbZ1r6RNU)p})rq7Vj~p(sct>xo9Xb5$y`57eoF$0P^bK3y3pt`Mb<6hE zjziapr7p^!LZ29w)*V^*RvIQTY7IlLm%sxpE$9^~#u%d&&c)X66ukXo(PwOxW}h;! zsGClMKllEu+dgh_;XVr$kzyb*mwGzIO8epdW{H zCU!$5ya+qrg|NO9G3D6_nYNp@{^5*HUo!jU+uKx#4pNbxiNC;vG(5HLcVfU7F{ierusWvIA60Ecb$qy(ebftUngs_#)Y5)62uHLuxri zwN+7Mnh^t%mJ;twCmSx8_h5++(rh05lv;C7qVbTl}1*9qv!>Dsv)DGWE* zn){4%Z>-Oz!`If`)M??4ZJI!lWl;6{-S7hRPEDX_l zxw@3!Q@-ZOopyP9O;y!koQ$HX4c+GKe2ft|N~v11-vvdhW|vdontfBnagj`v=<%4@5`=FOJp4?9z3%iifS;Zc{QIgdvwv;dZdwWBxr8nL`ywve_wORWzg@bw|9wTF z;a3mya}Xr4s((Hq+R!83n99@>%55eT-Tau+4=|xBZrsOT7eeW1CC&z;JC0ITRHl45 zA7Tk-wd5VPx0ctdXd-tzbni|&s4x#=Sxg|7W2P+#DAToXS}Ji zg8g5()PKCE1Utb043eOoUwu{V@Zq9stdw#R@w7!`XXDhV7x%s;jx0cFQLrfzheJsb}OI$jd6rp=0;mX5wO6o!^z zXtZSqwNV=3w*FLTc4TQITA8bk={Ch9Ahpu>Nlz0bS9AXso&g zkY(Neh60(`o()aav`02E2q4`c=dgYjEA`5gDIsC(Np9snK} zE>M=yOZhkd@ss>X-TI~@jddVMuKgG1b*rw91E@F@8`f7}cmXMJ6F>)8I2<>|a!$hue_2y* zkT#;Rq(&f4o%572z8eF?l_G`SCL6;S!AS&kH^ ziVf;`9aser1rzh&IO&sV42#Qhtb7O%lIz$9YI@o!g7$iAUc)c3C&DcfIA9e3z&o7I z0UOaJZJzi_*KxZ1V+ zlV??@<(v!eL$y10VoLDuao*OimbjM*a2AL75au2oN3fcjEa$769Hz>*m`zuMUS4hI z-5akd1Zam&@{$t4vp-E9zuHd!Xx;N;w<}e^WDyxAk*-uj1oN12k}qbD=*h>?1^f<+ zL%eZjw?QybsB7AfE!v_FN;N?U~M=fa6m_>bFrv3q6Pz+>$5X-A^rg8xUU>462#)W*-agv2~Lw-0%!&6JI!*c8u2YS&w;*Z4THKvso-N{nqr z--j}PH-4J~(=WUfsWbkOmiu}BTl9+jeYY10BqO0C%E@EkoMwZs`!ei3<0bm!8()Gy zCBIYT-)*7!q99&f=bmxTxC<+bGPWv4I2>UK4ge@OGN)XX6IYtWPP4Yvo?%r{PA+}J z`lE>-tP*=+yKSUuBQ-*~?pqo{m65B524e~Ljb8;Bd*7t-TnJtWz@dW=xQ=Mg_;@BQ z?N1$UtPl8=h($cDx7M5ZXl>?eiOi&QZ@+7QG`i_kDf;HSas{`O;+9c0ckn!r5j4vj zo`X<5a;&89V|kiNwlklAmq{8$GC|p2JO^CzL=#hFYrYDGozh>z(NzQXx3p z=G4>QMWM8v?bJ*Xd1+*cqC`slQEzUp2I-jg4uxj`lsgph(@xn?PEmgi^? zB3ggelxF19k)0_UzU$M!0~i$WwHNNbfULJhx%(xNP}Yg}pyItcTAf@ai$^?@^iZ>q zx-heyg5}MjwP?Ekm(vA+u0;*_oiYmA7i`ZXU z^kyIsCSN`UBysZS;Hpv-?OkA@#Tv0kW8u~!Lbn|>PRqYptqA9jh$J<e?17O*h% z_R8t0%gYz1;-g_Fe5ZJF*Ridk5p_7G--AX%Og- zP88~+x#pN9f@q-Toi8hsZJT%*w@r`9r*j#UBcqcB{jYfPNh537(5idA+u&p`rspKU zVEB|mG|EWf88_)lhD6@h^D-BjhFLW?iH{klpKKqAr|f^slArqMKzsU>3=;LW@LbuE z7}oG;vToyRiq@Janp<{@0b|9H=X7A2z!$qMfY;SiFMVS)PtYN4p)>KLYOTN=sbuqiB_u}!@M}91`Cj^Jmqm>lm z(nHDyPtMA@4s}QOE!A=>TcX`bEgt^9vlbDfIu!XT%va=ih0J_#(+E)QG-vnq*M=f? zd#1xJ%uWPEj{r}$SlO|1Lmg{?t|hiaDjH>}&gN`T3mUBrxsPVnc*_2CPC~UZ!&#OmL~y8|I{z6=H!gc& z8pv?N^W{LWG5ew7yWO4*FxKiq8frCq+|Rg;UHSxa4|%hQ2f^gmgtwGF9WPJaWtXc#p1s4h4$jgUzc8Z~k>A1he#Mk88n3Xx zH}Jkald}L(q?ftQ2}@35gy#3`dq?7a0mp-)E{1Z1CD`4ZN$x!vp_oSAc{)qgb6si5 z=;A6XUerPFUxPSfdvTWSW|WdBTeadQ%RXS5H{SpD{I=UD;4~eRz@jpRe2R9JK@vL8 z;+^-(A_@>nYjtc&LHR3TSm-}Wi!_$f1N#NK@qPj0S=v&2w3 zD11A}Zum1NE$g!5hg3XCDn8eoB}Y_eu9FamG=K3W$Kxdr_o{h?FPHlia?kV5TuGA#Ry18rCUH+ z#(;E*6h(*-1JYaQ354*@O<-W&Z@stHTkpO6W%_Tb(oz<*92Vs{Ca!&u)Yx>uHFX}-wPmw z3L*~*KdV;<`Uk^PWBnNOmvAdl`!i3zBN0yd0Bi8 z^^%WVxZD@_3oUV4MzI&VhwRCerj2Q7;~8{c=iJ_nIXGWk`-NZDc9T-aY1oyI=JRGU&t%p|~Web0&+ z@Y`ew*eebVej94>9UU1q3sMtlO(|LRDZb8m;c#Go+fNP+tGTN8rcp-ycZQ$%-y9vr zDMsy1%CPg{1Q08Rzx=!4Tv@4aSt!UBR%EdvOUp|@)zm{FDr`VC;vHg=thk!twWFGyM}jFB^dzb{f9xWV5&!bbb2L=V{~Ziu_Wy~ zt=-YWw}Ol+qR5$M15;WY#baW}`OpPjGC-}Q=*V4C!`lzFXdR1Pv378oGHalvKB-yc z>;m_+qNr737kEkDjs-jP2i0dvHS@kE+3(4hoHTB}EFBfCII+Cp7L0HjI1_*RN^MlhBREutk}C9sDE}$o&y#8c)-C zc&tADdApGw&o^gPQp4q=Uvpaocy#urbZJLVy@u@ya6CkG@06?IX%^YjsdurTb67eg zthM2uAueX-ygJ)$L9OC!ck7PlZ)gc?xx3$8tUot4hC^CS?G;1Z@|{Sopmh9_-J+1@ za;}3S`Kd>kAl}!ZC*G&gL6HZ9>qAF;AN@S4&Jh>CgTG}mZrA*ZEPw7t$9Rjc_N}J` zOgj+4A}H>?#^MQ`N$Ky&uGu>2g_-PoP%cl%gMQ=pgzxuUtTMaFeVC_V-bJ6H+_eLB zCtNzKy@_(EYYJo|KTRuE>r>ndF=cKKSiQ&Jyg-o*QjB_Y@7?OrZn?=N zz5KyvhQcc}QLizeq*PE(jTIIwPLQ@eaXci=bEG2SWP!yoRibJ+ z4DZLnHZDJ|+^mCgeHx#6(>M9tpvGh6=bheNC0dF(M(muDEQykBRMYB*dDm8r&PZz* zb?b@$(zz5X)P))QU2S=HTd3<}yjFz7j?M@uiUo02-i);AGtTwl_m5_cZ4#YVQ^jfJ zWwVpX4z{mHYnE8Bhg6;Fjy2f@{Hh!_Z4&3)em>lzwobcJ-QEu>5h*ay8{o*uemTmIUNLuNILSK5=8nyoY}27+C*siX zUwVd5?UY0Ql`A(u-K(Cx;B;U|ogOQZFW1yONba>r!tA>T(IrI;(GIF95-G`DZ>$HH@O!(h>Ap4W?Q zGH&e4vAHLp{m1Mn_2s4a?(eSf2YFT^tS~5b?e8j@iX3M=Pn&T09y|O{^7PVdakI1V zK0r)Tzb(w)!xyBh8Ykc=8sqQlv-r|y&*WW~TTUjG^Tyu$(Q@G@k)L@&Y%c`hYE>6S z2MUcTJAV5nF=vTr`Awst|0=1$z5Q+IZuG7_GB1Ny;szbkpw|;vB`yj5AV2-Fg44pF zy3youd_N1e@+S4|{bViIMShf&Dn?b)UdNpabqpn}N{344K!_QjvO6_e=d7 zpfOHy7a8mw;XjKv+IWd)S+io7{`z!3biLG(Md!BHM`dFVyNiNI$v{KBfj2VyqwQV3 zUPuT{_sqPZ_+8%TQge)qNG-?MH~E#|HQwFM(=VbvFX>j0pFb0(JpJI7Nj0tVZ>xNB z;R9yMNjzq%UG#0bWZ!8DD!#DL{Df0Rx@M(NVL?^R%;e|ap=kp0Uw!@Ii`R4>h;S)I z@hAA3S9iyHw0adNW~+C-?RsH&lWv9*D`jgsrET@L>gH;f{l^iTuemi20SP6D`pP!v z;p|lWgoTdvmoHi|IkMQcuCuEuM1Jjv*=I@Stsh1%rV3RGL=Gk5WxpRHbvFk*4#Qe0 z$eK7VUq#0)Ul|YgyJbzB$Dmq}o|A&!egrSa?o1|HI+1-xLSo?2j>Y*ftW%uVLc*E6 zFu{W)nXhj_8N{3B#@_t4$lovRYN>jVtmn*Vw!W>N?n)T zUH1)FY!_m7F8c_;daXw)mb|{mR}b91&&1jZp`4^sCp5(7KB^lwp33o@nZ#HD>^gIyHcU{FD0+L!qs3jBx1j?=%p%i1!y_%r+!5+ z!Q_#hem=?xhh*?78^i3BY^{09gMa? z@`aIZ*(}q6G4necqz^`dgxU7LH@1oQZxDAFCtc)X=_#!ujU-;_V!+W-YsHB>q z7AB1PX`@s*cuLbdI0~a4ViYISop`i*qU{c#!G}tDE9{NAMc^-MFS5ldIgWP~+jZ6# z19%m}L~ku=pmCI9r@r1|m-%5O@Qxq>AkXSBG~0|1lwO)(BY6YCfl!1wj7QjK^n((p zw5|P|u2=ig;9_V9P!S(m!_SC#X}wo9*39~mG*MqioY!vQ@(!zRx33S5P^ggAS|B&e z{+=k0;D2!Cq$C7>%`O9Uby~PbT`b?vGoal6+mIbuDkxL*N~z~VOCts^2r%TJX|S~h z23EE!`+S!hd(~!l*n2=P#4SV^U}YrE?4%sqhZmhW zAwzG&s;+Q~(+c^OGw1}@!s_E^ScX?cf4u{Me&yyE)QDn|$Z%U_^}fC^gJ}NjOZzt# z3s`D^J9Pm!16cD%c+h~%&j7B0#9^V5-9WHX7H`V(jaxP#{!9A95r+hh((bmZPW}6H z@Jao5d&>6!gVp^V--eT^V50>G_jTJRD{Mxv0( zbHX7-*s=QF^3m-9zzEMRxJi?ut-Acqnlif#kwE%d5<&nTtr0y2Rh*ZwD-kZr|6`^J z4Y-+s(?Pi5qn8I91eESBeRMt=NlIrKx&?jukB zIjA9I*Wblw+q5R~?*R3O20VOl=zg)pvpYD}TTXL4|9He!jkZaBZ063kCZ4e^{iAW_ zh6v_wGj$5eshCdf40yU4rOsWYL$?mY?+hH7$2@Klb?@VGH46|80LuebU(nZuVNE#pLu23 zSfyP)Zw>86Oz9pMqa~h@pkH(R*wZiWjP>;+4>4Gl%=KU?xBB?fx$*_m)A14M87EYb z9EG&0{DrNiv-nzSu&b5WxZB1Oa$TkelgqSWft9)|TT9I25j(cDu~<@wPF!CNTwL!Y zdR%Lq7sW?hKW0l83dEG{wx;uDGq{q>Yj!9BrP##~PYMCMMlht837(kq&kJX927{tRVr>nZ(;{R6QNwikKU9;BcJ(@YIJmEP`u@k;G>#B11(W=*cS=d@uTdIF=tZ zb(9S@|IT@2+W>Wx0=x&6shM7yJ#q@2JxlCx;CN3qA!49$I1ii&7!?Z88qUTQxzTj8 z&*6D=9beZBr+p%%reP6k(cA948t8e(UNcTll?k@&muj+JD)S}G&zH%rG4k(>RTyhb zaz1bgKht4?PGE)2FZ~#Uc+Q*K!5c~1{rCaFZY z*us3;x|aX+-86P?Y12G|ASjOyRgRaKj6p1N3I;%sCm~kr;v(=<6{rC+=no)1J_%?w zdL$ilvBXS4^e+FQBeoA47z1ZVqs%Tk$~?QH`<=eNMqCd`qy3bzI3^TGn?;d!1IeE- z0-e5$6`8JConEN`o_1H_&DpT^5ZRDM4u$OjF2pDw$kYV*auMWVz!s$zB#~>qfh7qU zTvP=l$+?xQ?~>H-4!0%&%d;tDtqhFXbO-#G<2-2KXYC?y@N>DlX6daG5b7>RW6^6k zh?2UlGnN%Mjve_nH(Ea45A}jD0{I5z5On^DZJ>V*J=9g zcY(%AFr;0NC$9D{j}JDA4RGF{oJqKR8?X*g+4^5OH69pN0JcXhWZ>c~DcSrLk;yh6mQ^3 zT|Z=4?}WH#G|1uq<2R7|Akxm3aid*8Wmn+Tp-v8)grDC*wZwt>#W0EJJjFoZ|C{NS z0@CyGb&6P;2L*UL*z%#(`+e+NVekpy9hBh%r>TL8PwlzL#ikjE1`1h4%$5Vg%*i`I z-lq;L=S@}4ne+PY-tckI1~_iXn>XseYpAiI1Ens}Glled*!o@hS3vBD9c;bpgyDm- ze84=(5_;(|cjbG|iC5u*T-H?YNK7I-ESMwK#)S&(Lpk3eiNfpBDo0o=c5+=0)1pgT zM&PL);ww3($@`^*?ORenF~FiDB!S}t$`2=SMLuqD>e)1%YAI5;;R*`AGzFA63>Ge| zxvC97IkP;2zl2a-7Romjn!edbYe<87>4lvRKfG(^Q!2=Mg1J>~^F^^_Kj36u3u#Fm zCyG74%!3AnF6@v!&NBb`M*-3#gD%_02{NfZ*AK)Sv6Kb`1Dn3I{H?8Lp-rJ0DwH{J znCdOJU!E14@x7!@U64Pu6$AQ zp8lQ?Vr#L2vRYcKW5EK)XYJ-*VD;A!rv6-D63U8~Ka(|~52>hU`|U@==(92Q3$``S zWbpvinX`I!V+i;cyV+2xDFYECAeIIjpmR44A_FEaqe#|~{St@Wfn(O}43N%w04gs! zmA5wK>M|2W4+mgleygUEI*cK(U^C)@IM4N>6HX#r4saX5y;A{jd*}^?omPI*Q=?&I zla3z$oK z25O$M;Kj-yV+%}^bQ5pZ+qEWkIvxk;pqo1pzUTOUbbJ)ve^~Wajex3?DFXOLB+<=U z4&^F7QU57yai6*Zq`V%+HAs>|e;hG0)y zT9di;S=)V0JKA58^<Kw572 zM1+@3B)^*^ZB#lE?ohf<=QTpw(dcnZ$#T*{NC};?3QTJDt}oiIUrxpM_mvbPcD2sO+^Ae~iz7KUn%3Y)iuq7GO#Y~cfiB+nrl^xms%>iB4BkxiaBe6VQt zZrbVTmNTlX3tZCdP=LjOJp~^g%Eq zEJ)oR{9LB|X_+^|njY^RB~EK41W5!!qEXDFNURqttTI4Y5TV2EHKt9OQ^x`;`oF)d z212Q{o~ijCqaB!t^ec2Ic1-LaP}{b@;fky%QW{<09goSH(CZrr7iJtB-=PxbKi-*ZE+A&b3-wxSd#{f@L4Q>K;Gqn6;Ob+| z_JhLL3$Xzp6ivjY9Kvozud!G80n9i3w4P%yP^0)K^;~5C5w`R*D&196Jyb%^C_^)a z2{qjNliVVu+XDho(Zj|pJ%K-cAC2f+?BvN@0fTZRFrs6tRW6r6~(5(@Y zC$?5l>{Z#dcwKD+7PYw3;FeOb+tzSscG`kqE200%wtozpE8;T|?R>dxyzO@2@O_qB zLF?A3lCek_N5e22KHPx$C_~})#*~A2dE2SzAL!8*7{xr=F;1cz4g8N<7}^*lINZTD zbK^h-njF*Le8+=E8HJ|I>o6r$7!fJsc)EGXty86V#{^|v?sentinel= - coordination_url = zookeeper:///hosts=&hosts= - -.. _`tooz`: http://docs.openstack.org/developer/tooz/ -.. _`tooz backends`: http://docs.openstack.org/developer/tooz/drivers.html - - -Ceph driver implementation details ----------------------------------- + gnocchi-upgrade -Each batch of measurements to process is stored into one rados object. -These objects are named `measures___` -Also a special empty object called `measures` has the list of measures to -process stored in its xattr attributes. +Upgrading +========= +In order to upgrade from a previous version of Gnocchi, you need to make sure +that your indexer and storage are properly upgraded. Run the following: -Because of the asynchronous nature of how we store measurements in Gnocchi, -`gnocchi-metricd` need to known the list of objects that wait to be processed: +1. Stop the old version of Gnocchi API server and metric daemon -- Listing rados objects for this is not a solution since it takes too much - time. -- Using a custom format into a rados object, would force us to use a lock - each time we would change it. +2. Install the new version of Gnocchi -Instead, the xattrs of one empty rados object are used. No lock is needed to -add/remove a xattr. +2. Run `gnocchi-upgrade` + This can take several hours depending on the size of your index and + storage. -But depending of the filesystem used by ceph OSDs, this xattrs can have -limitation in term of numbers and size if Ceph if not correctly configured. -See `Ceph extended attributes documentation`_ for more details. +3. Start the new Gnocchi API server and metric daemon -Then, each Carbonara generated file is stored in *one* rados object. -So each metric has one rados object per aggregation in the archive policy. +Minimal interruption upgrade +============================ +Gnocchi supports online upgrade of its storage system, which avoids +interrupting Gnocchi for a long time. In order to upgrade from previous +versions, you need to follow the following steps: -Because of this, the OSDs filling can look less balanced comparing of the RBD. -Some other objects will be big and some others small depending on how archive -policies are set up. +1. Stop the old Gnocchi API server and metric daemon -We can imagine an unrealisting case like 1 point per second during one year, -the rados object size will be ~384MB. +2. Run `gnocchi-upgrade --skip-storage` with the new version of Gnocchi. + This can take several minutes depending on the size of your index. -And a more realistic scenario, a 4MB rados object (like rbd uses) could -come from: +3. Start the new Gnocchi API server. -- 20 days with 1 point every seconds -- 100 days with 1 point every 5 seconds +4. Run `gnocchi-upgrade` with the new version of Gnocchi + This can take several hours depending on the size of your storage. -So, in realistic scenarios, the direct relation between the archive policy and -the size of the rados objects created by Gnocchi is not a problem. +5. Start the new Gnocchi metric daemon. -.. _`Ceph extended attributes documentation`: http://docs.ceph.com/docs/master/rados/configuration/filestore-config-ref/#extended-attributes +This will upgrade the indexer and storage in two passes. While a new version of +Gnocchi API cannot run with an old version of the indexer, it can run with an +old version of its storage back-end. For performance reasons, _metricd_ needs +to run an upgraded storage back-end, otherwise it would spend too much time +checking for upgrade pattern on each run. diff --git a/doc/source/resource_types.rst b/doc/source/resource_types.rst index fba2723d..0409572d 100644 --- a/doc/source/resource_types.rst +++ b/doc/source/resource_types.rst @@ -3,7 +3,7 @@ ================ Gnocchi offers different resource types to manage your resources. Each resource -type has its specific typed attributes. All resource types are subtype of the +type has strongly typed attributes. All resource types are subtypes of the `generic` type. Immutable attributes are attributes that cannot be modified after the resource diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index fae9bf20..52e99817 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -5,10 +5,19 @@ Authentication ============== -By default, the `api.middleware` configuration option is set to use the -Keystone middleware. Therefore you must authenticate using Keystone to use the -API and provide an `X-Auth-Token` header with a valid token for each request -sent to Gnocchi. +By default, no authentication is configured in Gnocchi. You need to provides +these headers in your HTTP requests: + +* X-User-Id +* X-Project-Id + +The `X-Roles` header can also be provided in order to match role based ACL +specified in `policy.json`. + +If you enable the OpenStack Keystone middleware, you only need to authenticate +against Keystone and provide `X-Auth-Token` header with a valid token for each +request sent to Gnocchi. The headers mentionned above will be filled +automatically based on your Keystone authorizations. Metrics ======= @@ -367,7 +376,7 @@ It can also be done by providing the list of metrics to aggregate: .. Note:: This aggregation is done against the aggregates built and updated for - a metric when new measurements are posted in Gnocchi. Therefore the aggregate + a metric when new measurements are posted in Gnocchi. Therefore, the aggregate of this already aggregated data may not have sense for certain kind of aggregation method (e.g. stdev). @@ -396,8 +405,8 @@ error is returned. Capabilities ============ -The list aggregation methods that can be used in Gnocchi is extendable and -can differ between deployement. It is possible to get the supported list of +The list aggregation methods that can be used in Gnocchi are extendable and +can differ between deployments. It is possible to get the supported list of aggregation methods from the API server: {{ scenarios['get-capabilities']['doc'] }} diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 59f7c37e..d0aef1a9 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -77,6 +77,15 @@ "archive_policy_name": "low" } +- name: create-metric-2 + request: | + POST /v1/metric HTTP/1.1 + Content-Type: application/json + + { + "archive_policy_name": "low" + } + - name: create-archive-policy-rule request: | POST /v1/archive_policy_rule HTTP/1.1 @@ -181,6 +190,9 @@ - name: get-measures-max request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?aggregation=max HTTP/1.1 +- name: get-measures-granularity + request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?granularity=1 HTTP/1.1 + - name: create-resource-generic request: | POST /v1/resource/generic HTTP/1.1 diff --git a/doc/source/running.rst b/doc/source/running.rst new file mode 100644 index 00000000..253b6f9e --- /dev/null +++ b/doc/source/running.rst @@ -0,0 +1,71 @@ +=============== +Running Gnocchi +=============== + +To run Gnocchi, simply run the HTTP server and metric daemon: + +:: + + gnocchi-api + gnocchi-metricd + + +Running As A WSGI Application +============================= + +It's possible – and strongly advised – to run Gnocchi through a WSGI +service such as `mod_wsgi`_ or any other WSGI application. The file +`gnocchi/rest/app.wsgi` provided with Gnocchi allows you to enable Gnocchi as +a WSGI application. +For other WSGI setup you can refer to the `pecan deployment`_ documentation. + +.. _`pecan deployment`: http://pecan.readthedocs.org/en/latest/deployment.html#deployment + + +How to scale out the Gnocchi HTTP REST API tier +=============================================== + +The Gnocchi API tier runs using WSGI. This means it can be run using `Apache +httpd`_ and `mod_wsgi`_, or other HTTP daemon such as `uwsgi`_. You should +configure the number of process and threads according to the number of CPU you +have, usually around 1.5 × number of CPU. If one server is not enough, you can +spawn any number of new API server to scale Gnocchi out, even on different +machines. + +.. _Apache httpd: http://httpd.apache.org/ +.. _mod_wsgi: https://modwsgi.readthedocs.org/ +.. _uwsgi: https://uwsgi-docs.readthedocs.org/ + + +How many metricd workers do we need to run +========================================== + +By default, `gnocchi-metricd` daemon spans all your CPU power in order to +maximize CPU utilisation when computing metric aggregation. You can use the +`gnocchi status` command to query the HTTP API and get the cluster status for +metric processing. It’ll show you the number of metric to process, known as the +processing backlog for `gnocchi-metricd`. As long as this backlog is not +continuously increasing, that means that `gnocchi-metricd` is able to cope with +the amount of metric that are being sent. In case this number of measure to +process is continuously increasing, you will need to (maybe temporarily) +increase the number of `gnocchi-metricd` daemons. You can run any number of +metricd daemon on any number of servers. + +How to monitor Gnocchi +====================== + +The `/v1/status` endpoint of the HTTP API returns various information, such as +the number of measures to process (measures backlog), which you can easily +monitor (see `How many metricd workers do we need to run`_). Making sure that +the HTTP server and `gnocchi-metricd` daemon are running and are not writing +anything alarming in their logs is a sign of good health of the overall system. + +How to backup and restore Gnocchi +================================= + +In order to be able to recover from an unfortunate event, you need to backup +both the index and the storage. That means creating a database dump (PostgreSQL +or MySQL) and doing snapshots or copy of your data storage (Ceph, Swift or your +file system). The procedure to restore is no more complicated than initial +deployment: restore your index and storage backups, reinstall Gnocchi if +necessary, and restart it. diff --git a/doc/source/statsd.rst b/doc/source/statsd.rst index 7e39968a..88405b8a 100644 --- a/doc/source/statsd.rst +++ b/doc/source/statsd.rst @@ -5,7 +5,7 @@ Statsd Daemon Usage What Is It? =========== `Statsd`_ is a network daemon that listens for statistics sent over the network -using TCP or UDP and that send aggregates to another backend. +using TCP or UDP, and then sends aggregates to another backend. Gnocchi provides a daemon that is compatible with the statsd protocol and can listen to metrics sent over the network, named `gnocchi-statsd`. @@ -16,16 +16,28 @@ How It Works? ============= In order to enable statsd support in Gnocchi, you need to configure the `[statsd]` option group in the configuration file. You need to provide a -resource id that will be used as a the main generic resource where all the -metrics will be attached, a user and project id that will be used to create the -resource and metrics for, and an archive policy name that will be used to +resource ID that will be used as the main generic resource where all the +metrics will be attached, a user and project id that will be associated with +the resource and metrics, and an archive policy name that will be used to create the metrics. All the metrics will be created dynamically as the metrics are sent to -`gnocchi-statsd`, and attached with the provided name to the resource id you -provided. +`gnocchi-statsd`, and attached with the provided name to the resource ID you +configured. +The `gnocchi-statsd` may be scaled, but trade-offs have to been made due to the +nature of the statsd protocol. That means that if you use metrics of type +`counter`_ or sampling (`c` in the protocol), you should always send those +metrics to the same daemon – or not use them at all. The other supported +types (`timing`_ and `gauges`_) does not suffer this limitation, but be aware +that you might have more measures that expected if you send the same metric to +different `gnocchi-statsd` server, as their cache nor their flush delay are +synchronized. + +.. _`counter`: https://github.com/etsy/statsd/blob/master/docs/metric_types.md#counting +.. _`timing`: https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing +.. _`gauges`: https://github.com/etsy/statsd/blob/master/docs/metric_types.md#gauges .. note :: - The statsd protocol support is incomplete: relative gauges values with +/- + The statsd protocol support is incomplete: relative gauge values with +/- and sets are not supported yet. diff --git a/etc/gnocchi/gnocchi-config-generator.conf b/gnocchi-config-generator.conf similarity index 100% rename from etc/gnocchi/gnocchi-config-generator.conf rename to gnocchi-config-generator.conf diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py index 5a5a27eb..9b460efb 100644 --- a/gnocchi/archive_policy.py +++ b/gnocchi/archive_policy.py @@ -137,13 +137,12 @@ class ArchivePolicy(object): OPTS = [ - cfg.Opt( + cfg.ListOpt( 'default_aggregation_methods', + item_type=types.String( + choices=ArchivePolicy.VALID_AGGREGATION_METHODS), default=['mean', 'min', 'max', 'sum', 'std', 'median', 'count', '95pct'], - type=types.List( - item_type=types.String( - choices=ArchivePolicy.VALID_AGGREGATION_METHODS)), help='Default aggregation methods to use in created archive policies'), ] @@ -169,6 +168,8 @@ class ArchivePolicyItem(dict): "At least two of granularity/points/timespan " "must be provided") granularity = round(timespan / float(points)) + else: + granularity = float(granularity) if points is None: if timespan is None: @@ -177,6 +178,7 @@ class ArchivePolicyItem(dict): points = int(timespan / granularity) self['timespan'] = granularity * points else: + points = int(points) self['timespan'] = granularity * points self['points'] = points diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 47153293..87f22c22 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -3,8 +3,6 @@ # Copyright © 2016 Red Hat, Inc. # Copyright © 2014-2015 eNovance # -# Authors: Julien Danjou -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -21,6 +19,7 @@ import datetime import functools import logging +import numbers import operator import re @@ -30,11 +29,10 @@ import msgpack import pandas import six +from gnocchi import utils -LOG = logging.getLogger(__name__) -AGGREGATION_METHODS = set(('mean', 'sum', 'last', 'max', 'min', - 'std', 'median', 'first', 'count')) +LOG = logging.getLogger(__name__) class NoDeloreanAvailable(Exception): @@ -98,7 +96,7 @@ class TimeSerie(SerializableMixin): @classmethod def from_tuples(cls, timestamps_values): - return cls(*zip(*timestamps_values)) + return cls.from_data(*zip(*timestamps_values)) def __eq__(self, other): return (isinstance(other, TimeSerie) @@ -133,7 +131,8 @@ class TimeSerie(SerializableMixin): :param d: The dict. :returns: A TimeSerie object """ - return cls(*cls._timestamps_and_values_from_dict(d['values'])) + return cls.from_data( + *cls._timestamps_and_values_from_dict(d['values'])) def to_dict(self): return { @@ -145,12 +144,18 @@ class TimeSerie(SerializableMixin): @staticmethod def _serialize_time_period(value): if value: - return six.text_type(value.n) + value.rule_code + return value.nanos / 10e8 @staticmethod def _round_timestamp(ts, freq): return pandas.Timestamp( - (ts.value // freq.delta.value) * freq.delta.value) + (pandas.Timestamp(ts).value // freq) * freq) + + @staticmethod + def _to_offset(value): + if isinstance(value, numbers.Real): + return pandas.tseries.offsets.Nano(value * 10e8) + return pandas.tseries.frequencies.to_offset(value) @property def first(self): @@ -168,8 +173,7 @@ class TimeSerie(SerializableMixin): class BoundTimeSerie(TimeSerie): - def __init__(self, timestamps=None, values=None, - block_size=None, back_window=0): + def __init__(self, ts=None, block_size=None, back_window=0): """A time serie that is limited in size. Used to represent the full-resolution buffer of incoming raw @@ -185,14 +189,17 @@ class BoundTimeSerie(TimeSerie): used. """ - super(BoundTimeSerie, self).__init__(timestamps, values) - if isinstance(block_size, (float, six.integer_types)): - self.block_size = pandas.tseries.offsets.Nano(block_size * 10e8) - else: - self.block_size = pandas.tseries.frequencies.to_offset(block_size) + super(BoundTimeSerie, self).__init__(ts) + self.block_size = self._to_offset(block_size) self.back_window = back_window self._truncate() + @classmethod + def from_data(cls, timestamps=None, values=None, + block_size=None, back_window=0): + return cls(pandas.Series(values, timestamps), + block_size=block_size, back_window=back_window) + def __eq__(self, other): return (isinstance(other, BoundTimeSerie) and super(BoundTimeSerie, self).__eq__(other) @@ -233,9 +240,9 @@ class BoundTimeSerie(TimeSerie): :returns: A TimeSerie object """ timestamps, values = cls._timestamps_and_values_from_dict(d['values']) - return cls(timestamps, values, - block_size=d.get('block_size'), - back_window=d.get('back_window')) + return cls.from_data(timestamps, values, + block_size=d.get('block_size'), + back_window=d.get('back_window')) def to_dict(self): basic = super(BoundTimeSerie, self).to_dict() @@ -246,7 +253,8 @@ class BoundTimeSerie(TimeSerie): return basic def _first_block_timestamp(self): - rounded = self._round_timestamp(self.ts.index[-1], self.block_size) + rounded = self._round_timestamp(self.ts.index[-1], + self.block_size.delta.value) return rounded - (self.block_size * self.back_window) def _truncate(self): @@ -272,7 +280,7 @@ class AggregatedTimeSerie(TimeSerie): granularity/aggregation-function pair stored for a metric. """ - super(AggregatedTimeSerie, self).__init__(timestamps, values) + super(AggregatedTimeSerie, self).__init__(ts) m = self._AGG_METHOD_PCT_RE.match(aggregation_method) @@ -341,6 +349,15 @@ class AggregatedTimeSerie(TimeSerie): and self.sampling == other.sampling and self.aggregation_method == other.aggregation_method) + def __repr__(self): + return "<%s 0x%x sampling=%fs max_size=%s agg_method=%s>" % ( + self.__class__.__name__, + id(self), + self.sampling, + self.max_size, + self.aggregation_method, + ) + @classmethod def from_dict(cls, d): """Build a time series from a dict. @@ -429,6 +446,28 @@ class AggregatedTimeSerie(TimeSerie): # that is before `after' self.ts = aggregated.combine_first(self.ts[:after][:-1]) + def fetch(self, from_timestamp=None, to_timestamp=None): + """Fetch aggregated time value. + + Returns a sorted list of tuples (timestamp, granularity, value). + """ + # Round timestamp to our granularity so we're sure that if e.g. 17:02 + # is requested and we have points for 17:00 and 17:05 in a 5min + # granularity, we do return the 17:00 point and not nothing + if from_timestamp is None: + from_ = None + else: + from_ = self._round_timestamp(from_timestamp, self.sampling * 10e8) + points = self[from_:to_timestamp] + try: + # Do not include stop timestamp + del points[to_timestamp] + except KeyError: + pass + return [(timestamp, self.sampling, value) + for timestamp, value + in six.iteritems(points)] + def update(self, ts): if ts.ts.empty: return @@ -452,80 +491,6 @@ class AggregatedTimeSerie(TimeSerie): self._resample(first_timestamp) self._truncate() - -class TimeSerieArchive(SerializableMixin): - - def __init__(self, agg_timeseries): - """A raw data buffer and a collection of downsampled timeseries. - - Used to represent the set of AggregatedTimeSeries for the range of - granularities supported for a metric (for a particular aggregation - function). - - """ - self.agg_timeseries = sorted(agg_timeseries, - key=operator.attrgetter("sampling")) - - @property - def max_block_size(self): - return max(agg.sampling for agg in self.agg_timeseries) - - @classmethod - def from_definitions(cls, definitions, aggregation_method='mean'): - """Create a new collection of archived time series. - - :param definition: A list of tuple (sampling, max_size) - :param aggregation_method: Aggregation function to use. - """ - # Limit the main timeserie to a timespan mapping - return cls( - [AggregatedTimeSerie( - max_size=size, - sampling=pandas.tseries.offsets.Nano(sampling * 10e8), - aggregation_method=aggregation_method) - for sampling, size in definitions] - ) - - def fetch(self, from_timestamp=None, to_timestamp=None, - timeserie_filter=None): - """Fetch aggregated time value. - - Returns a sorted list of tuples (timestamp, granularity, value). - """ - result = [] - end_timestamp = to_timestamp - for ts in reversed(self.agg_timeseries): - if timeserie_filter and not timeserie_filter(ts): - continue - granularity = ts.sampling.nanos / 1000000000.0 - points = ts[from_timestamp:to_timestamp] - try: - # Do not include stop timestamp - del points[end_timestamp] - except KeyError: - pass - result.extend([(timestamp, granularity, value) - for timestamp, value - in six.iteritems(points)]) - return result - - def __eq__(self, other): - return (isinstance(other, TimeSerieArchive) - and self.agg_timeseries == other.agg_timeseries) - - def update(self, timeserie): - for agg in self.agg_timeseries: - agg.update(timeserie) - - def to_dict(self): - return { - "archives": [ts.to_dict() for ts in self.agg_timeseries], - } - - @classmethod - def from_dict(cls, d): - return cls([AggregatedTimeSerie.from_dict(a) for a in d['archives']]) - @staticmethod def aggregated(timeseries, from_timestamp=None, to_timestamp=None, aggregation='mean', needed_percent_of_overlap=100.0): @@ -537,16 +502,8 @@ class TimeSerieArchive(SerializableMixin): if not timeseries: return [] - granularities = [set(ts.sampling for ts in timeserie.agg_timeseries) - for timeserie in timeseries] - granularities = granularities[0].intersection(*granularities[1:]) - if len(granularities) == 0: - raise UnAggregableTimeseries('No granularity match') - for timeserie in timeseries: - timeserie_raw = timeserie.fetch( - from_timestamp, to_timestamp, - lambda ts: ts.sampling in granularities) + timeserie_raw = timeserie.fetch(from_timestamp, to_timestamp) if timeserie_raw: dataframe = pandas.DataFrame(timeserie_raw, columns=columns) @@ -556,20 +513,37 @@ class TimeSerieArchive(SerializableMixin): if not dataframes: return [] + number_of_distinct_datasource = len(timeseries) / len( + set(ts.sampling for ts in timeseries) + ) + grouped = pandas.concat(dataframes).groupby(level=index) left_boundary_ts = None right_boundary_ts = None maybe_next_timestamp_is_left_boundary = False + + left_holes = 0 + right_holes = 0 holes = 0 for (timestamp, __), group in grouped: - if group.count()['value'] != len(timeseries): + if group.count()['value'] != number_of_distinct_datasource: maybe_next_timestamp_is_left_boundary = True - holes += 1 + if left_boundary_ts is not None: + right_holes += 1 + else: + left_holes += 1 elif maybe_next_timestamp_is_left_boundary: left_boundary_ts = timestamp maybe_next_timestamp_is_left_boundary = False else: right_boundary_ts = timestamp + holes += right_holes + right_holes = 0 + + if to_timestamp is not None: + holes += left_holes + if from_timestamp is not None: + holes += right_holes if to_timestamp is not None or from_timestamp is not None: maximum = len(grouped) @@ -580,7 +554,7 @@ class TimeSerieArchive(SerializableMixin): 'Less than %f%% of datapoints overlap in this ' 'timespan (%.2f%%)' % (needed_percent_of_overlap, percent_of_overlap)) - elif (needed_percent_of_overlap > 0 and + if (needed_percent_of_overlap > 0 and (right_boundary_ts == left_boundary_ts or (right_boundary_ts is None and maybe_next_timestamp_is_left_boundary))): @@ -597,7 +571,7 @@ class TimeSerieArchive(SerializableMixin): # NOTE(sileht): this call the aggregation method on already # aggregated values, for some kind of aggregation this can - # result can looks wierd, but this is the best we can do + # result can looks weird, but this is the best we can do # because we don't have anymore the raw datapoints in those case. # FIXME(sileht): so should we bailout is case of stddev, percentile # and median? @@ -617,63 +591,69 @@ class TimeSerieArchive(SerializableMixin): for __, timestamp, granularity, value in points] -import argparse -import datetime +class TimeSerieArchive(SerializableMixin): -from oslo_utils import timeutils -import prettytable - - -def _definition(value): - result = value.split(",") - if len(result) != 2: - raise ValueError("Format is: seconds,points") - return int(result[0]), int(result[1]) - - -def create_archive_file(): - parser = argparse.ArgumentParser( - description="Create a Carbonara file", - ) - parser.add_argument("--aggregation-method", - type=six.text_type, - default="mean", - choices=AGGREGATION_METHODS, - help="aggregation method to use") - parser.add_argument("--back-window", - type=int, - default=0, - help="back window to keep") - parser.add_argument("definition", - type=_definition, - nargs='+', - help="archive definition as granularity,points") - parser.add_argument("filename", - nargs=1, - type=argparse.FileType(mode="wb"), - help="File name to create") - args = parser.parse_args() - ts = TimeSerieArchive.from_definitions(args.definition, - args.aggregation_method) - args.filename[0].write(ts.serialize()) - - -def dump_archive_file(): - parser = argparse.ArgumentParser( - description="Dump a Carbonara file", - ) - parser.add_argument("filename", - nargs=1, - type=argparse.FileType(mode="rb"), - help="File name to read") - args = parser.parse_args() - - ts = TimeSerieArchive.unserialize_from_file(args.filename[0]) - - print("Aggregation method: %s" - % (ts.agg_timeseries[0].aggregation_method)) - - print("Number of aggregated timeseries: %d" % len(ts.agg_timeseries)) + def __init__(self, agg_timeseries): + """A raw data buffer and a collection of downsampled timeseries. + + Used to represent the set of AggregatedTimeSeries for the range of + granularities supported for a metric (for a particular aggregation + function). + + """ + self.agg_timeseries = sorted(agg_timeseries, + key=operator.attrgetter("sampling")) + + @classmethod + def from_definitions(cls, definitions, aggregation_method='mean'): + """Create a new collection of archived time series. + + :param definition: A list of tuple (sampling, max_size) + :param aggregation_method: Aggregation function to use. + """ + # Limit the main timeserie to a timespan mapping + return cls( + [AggregatedTimeSerie( + max_size=size, + sampling=sampling, + aggregation_method=aggregation_method) + for sampling, size in definitions] + ) + + def fetch(self, from_timestamp=None, to_timestamp=None, + timeserie_filter=None): + """Fetch aggregated time value. + + Returns a sorted list of tuples (timestamp, granularity, value). + """ + result = [] + end_timestamp = to_timestamp + for ts in reversed(self.agg_timeseries): + if timeserie_filter and not timeserie_filter(ts): + continue + points = ts[from_timestamp:to_timestamp] + try: + # Do not include stop timestamp + del points[end_timestamp] + except KeyError: + pass + result.extend([(timestamp, ts.sampling, value) + for timestamp, value + in six.iteritems(points)]) + return result + + def update(self, timeserie): + for agg in self.agg_timeseries: + agg.update(timeserie) + + def to_dict(self): + return { + "archives": [ts.to_dict() for ts in self.agg_timeseries], + } + + def __eq__(self, other): + return (isinstance(other, TimeSerieArchive) + and self.agg_timeseries == other.agg_timeseries) @classmethod def from_dict(cls, d): diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 0bea0f7b..9bcb942f 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -1,4 +1,5 @@ # Copyright (c) 2013 Mirantis Inc. +# Copyright (c) 2015 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,11 +19,11 @@ import signal import sys import time +from oslo_config import cfg from oslo_utils import timeutils import retrying from gnocchi import indexer -from gnocchi.indexer import sqlalchemy as sql_db from gnocchi.rest import app from gnocchi import service from gnocchi import statsd as statsd_service @@ -32,11 +33,24 @@ from gnocchi import storage LOG = logging.getLogger(__name__) -def storage_dbsync(): - conf = service.prepare_service() - index = sql_db.SQLAlchemyIndexer(conf) - index.connect() - index.upgrade() +def upgrade(): + conf = cfg.ConfigOpts() + conf.register_cli_opts([ + cfg.BoolOpt("skip-index", default=False, + help="Skip index upgrade."), + cfg.BoolOpt("skip-storage", default=False, + help="Skip storage upgrade.") + ]) + conf = service.prepare_service(conf=conf) + if not conf.skip_index: + index = indexer.get_driver(conf) + index.connect() + LOG.info("Upgrading indexer %s" % index) + index.upgrade() + if not conf.skip_storage: + s = storage.get_driver(conf) + LOG.info("Upgrading storage %s" % s) + s.upgrade(index) def api(): @@ -157,7 +171,7 @@ def metricd(): _metricd_cleanup(workers) sys.exit(0) except Exception: - LOG.warn("exiting", exc_info=True) + LOG.warning("exiting", exc_info=True) _metricd_cleanup(workers) sys.exit(1) diff --git a/gnocchi/genconfig.py b/gnocchi/genconfig.py new file mode 100644 index 00000000..ec4491e9 --- /dev/null +++ b/gnocchi/genconfig.py @@ -0,0 +1,24 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def prehook(cmd): + try: + from oslo_config import generator + generator.main(['--config-file', + 'gnocchi-config-generator.conf']) + except Exception as e: + print("Unable to build sample configuration file: %s" % e) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index c27dae63..c1004c20 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -15,10 +15,11 @@ # under the License. import fnmatch import hashlib +import os +import iso8601 from oslo_config import cfg from oslo_utils import netutils -import pytz import six from stevedore import driver @@ -27,7 +28,8 @@ from gnocchi import exceptions OPTS = [ cfg.StrOpt('url', secret=True, - default="null://", + required=True, + default=os.getenv("GNOCCHI_INDEXER_URL"), help='Indexer driver to use'), ] @@ -66,7 +68,7 @@ class Resource(object): def lastmodified(self): # less precise revision start for Last-Modified http header return self.revision_start.replace(microsecond=0, - tzinfo=pytz.UTC) + tzinfo=iso8601.iso8601.UTC) def get_driver(conf): @@ -81,11 +83,11 @@ class IndexerException(Exception): """Base class for all exceptions raised by an indexer.""" -class UnknownResourceType(IndexerException): +class NoSuchResourceType(IndexerException): """Error raised when the resource type is unknown.""" def __init__(self, type): - super(UnknownResourceType, self).__init__( - "Resource type %s is unknown" % type) + super(NoSuchResourceType, self).__init__( + "Resource type %s does not exist" % str(type)) self.type = type diff --git a/gnocchi/indexer/alembic/env.py b/gnocchi/indexer/alembic/env.py index dc577200..cf636cfa 100644 --- a/gnocchi/indexer/alembic/env.py +++ b/gnocchi/indexer/alembic/env.py @@ -71,16 +71,16 @@ def run_migrations_online(): conf = config.conf indexer = sqlalchemy.SQLAlchemyIndexer(conf) indexer.connect() - connectable = indexer.engine_facade.get_engine() + with indexer.facade.writer_connection() as connectable: - with connectable.connect() as connection: - context.configure( - connection=connection, - target_metadata=target_metadata - ) + with connectable.connect() as connection: + context.configure( + connection=connection, + target_metadata=target_metadata + ) - with context.begin_transaction(): - context.run_migrations() + with context.begin_transaction(): + context.run_migrations() indexer.disconnect() diff --git a/gnocchi/indexer/alembic/versions/1f21cbdd6bc2_allow_volume_display_name_to_be_null.py b/gnocchi/indexer/alembic/versions/1f21cbdd6bc2_allow_volume_display_name_to_be_null.py new file mode 100644 index 00000000..e2e48d9b --- /dev/null +++ b/gnocchi/indexer/alembic/versions/1f21cbdd6bc2_allow_volume_display_name_to_be_null.py @@ -0,0 +1,41 @@ +# Copyright 2015 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""allow volume display name to be null + +Revision ID: 1f21cbdd6bc2 +Revises: 469b308577a9 +Create Date: 2015-12-08 02:12:20.273880 + +""" + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '1f21cbdd6bc2' +down_revision = '469b308577a9' +branch_labels = None +depends_on = None + + +def upgrade(): + op.alter_column('volume', 'display_name', + existing_type=sa.String(length=255), + nullable=True) + op.alter_column('volume_history', 'display_name', + existing_type=sa.String(length=255), + nullable=True) diff --git a/gnocchi/indexer/alembic/versions/469b308577a9_allow_image_ref_to_be_null.py b/gnocchi/indexer/alembic/versions/469b308577a9_allow_image_ref_to_be_null.py new file mode 100644 index 00000000..5ac8dfcf --- /dev/null +++ b/gnocchi/indexer/alembic/versions/469b308577a9_allow_image_ref_to_be_null.py @@ -0,0 +1,41 @@ +# Copyright 2015 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""allow image_ref to be null + +Revision ID: 469b308577a9 +Revises: 39b7d449d46a +Create Date: 2015-11-29 00:23:39.998256 + +""" + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '469b308577a9' +down_revision = '39b7d449d46a' +branch_labels = None +depends_on = None + + +def upgrade(): + op.alter_column('instance', 'image_ref', + existing_type=sa.String(length=255), + nullable=True) + op.alter_column('instance_history', 'image_ref', + existing_type=sa.String(length=255), + nullable=True) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 38d1d11e..0286393f 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -17,12 +17,13 @@ from __future__ import absolute_import import itertools import operator import os.path +import threading import uuid import oslo_db.api from oslo_db import exception +from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import models -from oslo_db.sqlalchemy import session from oslo_db.sqlalchemy import utils as oslo_db_utils import six import sqlalchemy @@ -63,6 +64,44 @@ def get_resource_mappers(ext): 'history': resource_history_ext} +class PerInstanceFacade(object): + def __init__(self, conf): + self.trans = enginefacade.transaction_context() + self.trans.configure( + **dict(conf.database.items()) + ) + self._context = threading.local() + + def independent_writer(self): + return self.trans.independent.writer.using(self._context) + + def independent_reader(self): + return self.trans.independent.reader.using(self._context) + + def writer_connection(self): + return self.trans.connection.writer.using(self._context) + + def reader_connection(self): + return self.trans.connection.reader.using(self._context) + + def writer(self): + return self.trans.writer.using(self._context) + + def reader(self): + return self.trans.reader.using(self._context) + + def get_engine(self): + # TODO(mbayer): add get_engine() to enginefacade + if not self.trans._factory._started: + self.trans._factory._start() + return self.trans._factory._writer_engine + + def dispose(self): + # TODO(mbayer): add dispose() to enginefacade + if self.trans._factory._started: + self.trans._factory._writer_engine.dispose() + + class SQLAlchemyIndexer(indexer.IndexerDriver): resources = extension.ExtensionManager('gnocchi.indexer.resources') @@ -72,12 +111,10 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): def __init__(self, conf): conf.set_override("connection", conf.indexer.url, "database") self.conf = conf - - def connect(self): - self.engine_facade = session.EngineFacade.from_config(self.conf) + self.facade = PerInstanceFacade(conf) def disconnect(self): - self.engine_facade.get_engine().dispose() + self.facade.dispose() def _get_alembic_config(self): from alembic import config @@ -88,6 +125,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): self.conf.database.connection) return cfg + def get_engine(self): + return self.facade.get_engine() + def upgrade(self, nocreate=False): from alembic import command from alembic import migration @@ -97,31 +137,27 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): if nocreate: command.upgrade(cfg, "head") else: - engine = self.engine_facade.get_engine() - ctxt = migration.MigrationContext.configure(engine.connect()) - current_version = ctxt.get_current_revision() - if current_version is None: - Base.metadata.create_all(engine) - command.stamp(cfg, "head") - else: - command.upgrade(cfg, "head") + with self.facade.writer_connection() as connection: + ctxt = migration.MigrationContext.configure(connection) + current_version = ctxt.get_current_revision() + if current_version is None: + Base.metadata.create_all(connection) + command.stamp(cfg, "head") + else: + command.upgrade(cfg, "head") def _resource_type_to_class(self, resource_type, purpose="resource"): if resource_type not in self._RESOURCE_CLASS_MAPPER: - raise indexer.UnknownResourceType(resource_type) + raise indexer.NoSuchResourceType(resource_type) return self._RESOURCE_CLASS_MAPPER[resource_type][purpose] def list_archive_policies(self): - session = self.engine_facade.get_session() - aps = list(session.query(ArchivePolicy).all()) - session.expunge_all() - return aps + with self.facade.independent_reader() as session: + return list(session.query(ArchivePolicy).all()) def get_archive_policy(self, name): - session = self.engine_facade.get_session() - ap = session.query(ArchivePolicy).get(name) - session.expunge_all() - return ap + with self.facade.independent_reader() as session: + return session.query(ArchivePolicy).get(name) def delete_archive_policy(self, name): with self.facade.writer() as session: @@ -142,33 +178,27 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): definition=archive_policy.definition, aggregation_methods=list(archive_policy.aggregation_methods), ) - session = self.engine_facade.get_session() - session.add(ap) try: - session.flush() + with self.facade.writer() as session: + session.add(ap) except exception.DBDuplicateEntry: raise indexer.ArchivePolicyAlreadyExists(archive_policy.name) - session.expunge_all() return ap def list_archive_policy_rules(self): - session = self.engine_facade.get_session() - aps = session.query(ArchivePolicyRule).order_by( - ArchivePolicyRule.metric_pattern.desc()).all() - session.expunge_all() - return aps + with self.facade.independent_reader() as session: + return session.query(ArchivePolicyRule).order_by( + ArchivePolicyRule.metric_pattern.desc()).all() def get_archive_policy_rule(self, name): - session = self.engine_facade.get_session() - ap = session.query(ArchivePolicyRule).get(name) - session.expunge_all() - return ap + with self.facade.independent_reader() as session: + return session.query(ArchivePolicyRule).get(name) def delete_archive_policy_rule(self, name): - session = self.engine_facade.get_session() - if session.query(ArchivePolicyRule).filter( - ArchivePolicyRule.name == name).delete() == 0: - raise indexer.NoSuchArchivePolicyRule(name) + with self.facade.writer() as session: + if session.query(ArchivePolicyRule).filter( + ArchivePolicyRule.name == name).delete() == 0: + raise indexer.NoSuchArchivePolicyRule(name) def create_archive_policy_rule(self, name, metric_pattern, archive_policy_name): @@ -177,13 +207,11 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): archive_policy_name=archive_policy_name, metric_pattern=metric_pattern ) - session = self.engine_facade.get_session() - session.add(apr) try: - session.flush() + with self.facade.writer() as session: + session.add(apr) except exception.DBDuplicateEntry: raise indexer.ArchivePolicyRuleAlreadyExists(name) - session.expunge_all() return apr def create_metric(self, id, created_by_user_id, created_by_project_id, @@ -195,16 +223,14 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): archive_policy_name=archive_policy_name, name=name, resource_id=resource_id) - session = self.engine_facade.get_session() - session.add(m) try: - session.flush() + with self.facade.writer() as session: + session.add(m) except exception.DBReferenceError as e: if (e.constraint == 'fk_metric_archive_policy_name_archive_policy_name'): raise indexer.NoSuchArchivePolicy(archive_policy_name) raise - session.expunge_all() return m def list_metrics(self, names=None, ids=None, details=False, @@ -234,19 +260,19 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): if (started_at is not None and ended_at is not None and started_at > ended_at): - raise ValueError("Start timestamp cannot be after end timestamp") - r = resource_cls( - id=id, - type=resource_type, - created_by_user_id=created_by_user_id, - created_by_project_id=created_by_project_id, - user_id=user_id, - project_id=project_id, - started_at=started_at, - ended_at=ended_at, - **kwargs) - session = self.engine_facade.get_session() - with session.begin(): + raise ValueError( + "Start timestamp cannot be after end timestamp") + with self.facade.writer() as session: + r = resource_cls( + id=id, + type=resource_type, + created_by_user_id=created_by_user_id, + created_by_project_id=created_by_project_id, + user_id=user_id, + project_id=project_id, + started_at=started_at, + ended_at=ended_at, + **kwargs) session.add(r) try: session.flush() @@ -259,11 +285,10 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): if metrics is not None: self._set_metrics_for_resource(session, r, metrics) - # NOTE(jd) Force load of metrics :) - r.metrics + # NOTE(jd) Force load of metrics :) + r.metrics - session.expunge_all() - return r + return r @oslo_db.api.retry_on_deadlock def update_resource(self, resource_type, @@ -274,9 +299,8 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): resource_cls = self._resource_type_to_class(resource_type) resource_history_cls = self._resource_type_to_class(resource_type, "history") - session = self.engine_facade.get_session() - try: - with session.begin(): + with self.facade.writer() as session: + try: # NOTE(sileht): We use FOR UPDATE that is not galera friendly, # but they are no other way to cleanly patch a resource and # store the history that safe when two concurrent calls are @@ -301,7 +325,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): # Update the resource if ended_at is not _marker: # NOTE(jd) MySQL does not honor checks. I hate it. - engine = self.engine_facade.get_engine() + engine = session.connection() if engine.dialect.name == "mysql": if r.started_at is not None and ended_at is not None: if r.started_at > ended_at: @@ -324,17 +348,18 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): Metric.status == 'active').update( {"resource_id": None}) self._set_metrics_for_resource(session, r, metrics) - except exception.DBConstraintError as e: - if e.check_name == "ck_started_before_ended": - raise indexer.ResourceValueError( - resource_type, "ended_at", ended_at) - raise - # NOTE(jd) Force load of metrics – do it outside the session! - r.metrics + session.flush() + except exception.DBConstraintError as e: + if e.check_name == "ck_started_before_ended": + raise indexer.ResourceValueError( + resource_type, "ended_at", ended_at) + raise + + # NOTE(jd) Force load of metrics – do it outside the session! + r.metrics - session.expunge_all() - return r + return r @staticmethod def _set_metrics_for_resource(session, r, metrics): @@ -375,8 +400,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): session.expire(r, ['metrics']) def delete_resource(self, resource_id): - session = self.engine_facade.get_session() - with session.begin(): + with self.facade.writer() as session: # We are going to delete the resource; the on delete will set the # resource_id of the attached metrics to NULL, we just have to mark # their status as 'delete' @@ -384,21 +408,18 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): Metric.resource_id == resource_id).update( {"status": "delete"}) if session.query(Resource).filter( - Resource.id == resource_id).options( - sqlalchemy.orm.joinedload('metrics')).delete() == 0: + Resource.id == resource_id).delete() == 0: raise indexer.NoSuchResource(resource_id) def get_resource(self, resource_type, resource_id, with_metrics=False): resource_cls = self._resource_type_to_class(resource_type) - session = self.engine_facade.get_session() - q = session.query( - resource_cls).filter( - resource_cls.id == resource_id) - if with_metrics: - q = q.options(sqlalchemy.orm.joinedload('metrics')) - r = q.first() - session.expunge_all() - return r + with self.facade.independent_reader() as session: + q = session.query( + resource_cls).filter( + resource_cls.id == resource_id) + if with_metrics: + q = q.options(sqlalchemy.orm.joinedload('metrics')) + return q.first() def _get_history_result_mapper(self, resource_type): resource_cls = self._resource_type_to_class(resource_type) @@ -448,17 +469,49 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): sorts=None): sorts = sorts or [] - session = self.engine_facade.get_session() - if history: target_cls = self._get_history_result_mapper(resource_type) else: target_cls = self._resource_type_to_class(resource_type) - q = session.query(target_cls) + with self.facade.independent_reader() as session: + q = session.query(target_cls) + + if attribute_filter: + engine = session.connection() + try: + f = QueryTransformer.build_filter(engine.dialect.name, + target_cls, + attribute_filter) + except indexer.QueryAttributeError as e: + # NOTE(jd) The QueryAttributeError does not know about + # resource_type, so convert it + raise indexer.ResourceAttributeError(resource_type, + e.attribute) + + q = q.filter(f) + + # transform the api-wg representation to the oslo.db one + sort_keys = [] + sort_dirs = [] + for sort in sorts: + sort_key, __, sort_dir = sort.partition(":") + sort_keys.append(sort_key.strip()) + sort_dirs.append(sort_dir or 'asc') + + # paginate_query require at list one uniq column + if 'id' not in sort_keys: + sort_keys.append('id') + sort_dirs.append('asc') + + if marker: + resource_marker = self.get_resource(resource_type, marker) + if resource_marker is None: + raise indexer.InvalidPagination( + "Invalid marker: `%s'" % marker) + else: + resource_marker = None - if attribute_filter: - engine = self.engine_facade.get_engine() try: q = oslo_db_utils.paginate_query(q, target_cls, limit=limit, sort_keys=sort_keys, @@ -484,26 +537,31 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): # No need for a second query all_resources.extend(resources) else: - target_cls = self._resource_type_to_class(type) - f = target_cls.id.in_([r.id for r in resources]) + if is_history: + target_cls = self._resource_type_to_class( + type, "history") + f = target_cls.revision.in_( + [r.revision for r in resources]) + else: + target_cls = self._resource_type_to_class(type) + f = target_cls.id.in_([r.id for r in resources]) - q = session.query(target_cls).filter(f) - # Always include metrics - q = q.options(sqlalchemy.orm.joinedload('metrics')) - all_resources.extend(q.all()) - session.expunge_all() - return all_resources + q = session.query(target_cls).filter(f) + # Always include metrics + q = q.options(sqlalchemy.orm.joinedload('metrics')) + all_resources.extend(q.all()) + return all_resources def expunge_metric(self, id): - session = self.engine_facade.get_session() - if session.query(Metric).filter(Metric.id == id).delete() == 0: - raise indexer.NoSuchMetric(id) + with self.facade.writer() as session: + if session.query(Metric).filter(Metric.id == id).delete() == 0: + raise indexer.NoSuchMetric(id) def delete_metric(self, id): - session = self.engine_facade.get_session() - if session.query(Metric).filter( - Metric.id == id).update({"status": "delete"}) == 0: - raise indexer.NoSuchMetric(id) + with self.facade.writer() as session: + if session.query(Metric).filter( + Metric.id == id).update({"status": "delete"}) == 0: + raise indexer.NoSuchMetric(id) class QueryTransformer(object): diff --git a/gnocchi/indexer/sqlalchemy_extension.py b/gnocchi/indexer/sqlalchemy_extension.py index 9a66b83c..3fb5d8eb 100644 --- a/gnocchi/indexer/sqlalchemy_extension.py +++ b/gnocchi/indexer/sqlalchemy_extension.py @@ -27,7 +27,7 @@ class Image(object): class Instance(object): flavor_id = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) - image_ref = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) + image_ref = sqlalchemy.Column(sqlalchemy.String(255)) host = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) display_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) server_group = sqlalchemy.Column(sqlalchemy.String(255)) @@ -47,4 +47,4 @@ class InstanceNetworkInterface(object): class Volume(object): - display_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) + display_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=True) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index aca2afd6..312d9023 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -1,7 +1,5 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2014-2015 eNovance -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -16,6 +14,7 @@ import itertools from oslo_config import cfg +import uuid import gnocchi.archive_policy import gnocchi.indexer @@ -38,20 +37,15 @@ def list_opts(): cfg.StrOpt('paste_config', default='api-paste.ini', help='Path to API Paste configuration.'), - cfg.IntOpt('port', - default=8041, - help='The port for the Gnocchi API server.'), + cfg.PortOpt('port', + default=8041, + help='The port for the Gnocchi API server.'), cfg.StrOpt('host', default='0.0.0.0', help='The listen IP for the Gnocchi API server.'), cfg.BoolOpt('pecan_debug', default=False, help='Toggle Pecan Debug Middleware.'), - cfg.MultiStrOpt( - 'middlewares', - deprecated_for_removal=True, - default=[], - help='Middlewares to use. Use Paste config instead.',), cfg.IntOpt('workers', min=1, help='Number of workers for Gnocchi API server. ' 'By default the available number of CPU is used.'), @@ -67,8 +61,15 @@ def list_opts(): gnocchi.storage.swift.OPTS, gnocchi.storage.influxdb.OPTS)), ("statsd", ( - cfg.StrOpt( + cfg.StrOpt('host', + default='0.0.0.0', + help='The listen IP for statsd'), + cfg.PortOpt('port', + default=8125, + help='The port for statsd'), + cfg.Opt( 'resource_id', + type=uuid.UUID, help='Resource UUID to use to identify statsd in Gnocchi'), cfg.StrOpt( 'user_id', @@ -81,6 +82,7 @@ def list_opts(): help='Archive policy name to use when creating metrics'), cfg.FloatOpt( 'flush_delay', + default=10, help='Delay between flushes'), )), ("archive_policy", gnocchi.archive_policy.OPTS), diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index fd12e4fe..d9514eb7 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -17,7 +17,6 @@ import itertools import uuid -from oslo_log import log from oslo_utils import strutils import pecan from pecan import rest @@ -35,8 +34,6 @@ from gnocchi import json from gnocchi import storage from gnocchi import utils -LOG = log.getLogger(__name__) - def arg_to_list(value): if isinstance(value, list): @@ -228,9 +225,7 @@ def get_pagination_options(params, default): sorts = [sorts] try: - limit = int(limit) - if limit <= 0: - raise ValueError + limit = PositiveNotNullInt(limit) except ValueError: abort(400, "Invalid 'limit' value: %s" % params.get('limit')) @@ -384,13 +379,15 @@ class AggregatedMetricController(rest.RestController): @pecan.expose('json') def get_measures(self, start=None, stop=None, aggregation='mean', - needed_overlap=100.0): + granularity=None, needed_overlap=100.0): return self.get_cross_metric_measures_from_ids( - self.metric_ids, start, stop, aggregation, needed_overlap) + self.metric_ids, start, stop, + aggregation, granularity, needed_overlap) @classmethod def get_cross_metric_measures_from_ids(cls, metric_ids, start=None, stop=None, aggregation='mean', + granularity=None, needed_overlap=100.0): # Check RBAC policy metrics = pecan.request.indexer.list_metrics(ids=metric_ids) @@ -401,11 +398,12 @@ class AggregatedMetricController(rest.RestController): abort(404, storage.MetricDoesNotExist( missing_metric_ids.pop())) return cls.get_cross_metric_measures_from_objs( - metrics, start, stop, aggregation, needed_overlap) + metrics, start, stop, aggregation, granularity, needed_overlap) @staticmethod def get_cross_metric_measures_from_objs(metrics, start=None, stop=None, aggregation='mean', + granularity=None, needed_overlap=100.0): try: needed_overlap = float(needed_overlap) @@ -435,15 +433,26 @@ class AggregatedMetricController(rest.RestController): for metric in metrics: enforce("get metric", metric) + number_of_metrics = len(metrics) try: - if len(metrics) == 1: + if number_of_metrics == 0: + return [] + if granularity is not None: + try: + granularity = float(granularity) + except ValueError as e: + abort(400, "granularity must be a float: %s" % e) + if number_of_metrics == 1: # NOTE(sileht): don't do the aggregation if we only have one # metric measures = pecan.request.storage.get_measures( - metrics[0], start, stop, aggregation) + metrics[0], start, stop, aggregation, + granularity) else: measures = pecan.request.storage.get_cross_metric_measures( - metrics, start, stop, aggregation, needed_overlap) + metrics, start, stop, aggregation, + granularity, + needed_overlap) # Replace timestamp keys by their string versions return [(timestamp.isoformat(), offset, v) for timestamp, offset, v in measures] @@ -456,6 +465,21 @@ class AggregatedMetricController(rest.RestController): abort(404, e) +def MeasureSchema(m): + # NOTE(sileht): don't use voluptuous for performance reasons + try: + value = float(m['value']) + except Exception: + abort(400, "Invalid input for a value") + + try: + timestamp = utils.to_timestamp(m['timestamp']) + except Exception: + abort(400, "Invalid input for a timestamp") + + return storage.Measure(timestamp, value) + + class MetricController(rest.RestController): _custom_actions = { 'measures': ['POST', 'GET'] @@ -467,23 +491,6 @@ class MetricController(rest.RestController): invoke_on_load=True) self.custom_agg = dict((x.name, x.obj) for x in mgr) - @staticmethod - def to_measure(m): - # NOTE(sileht): we do the input validation - # during the iteration for not loop just for this - # and don't use voluptuous for performance reason - try: - value = float(m['value']) - except Exception: - abort(400, "Invalid input for a value") - - try: - timestamp = utils.to_timestamp(m['timestamp']) - except Exception: - abort(400, "Invalid input for a timestamp") - - return storage.Measure(timestamp, value) - def enforce_metric(self, rule): enforce(rule, json.to_primitive(self.metric)) @@ -500,7 +507,7 @@ class MetricController(rest.RestController): abort(400, "Invalid input for measures") if params: pecan.request.storage.add_measures( - self.metric, six.moves.map(self.to_measure, params)) + self.metric, six.moves.map(MeasureSchema, params)) pecan.response.status = 202 @pecan.expose('json') @@ -537,13 +544,13 @@ class MetricController(rest.RestController): else: measures = pecan.request.storage.get_measures( self.metric, start, stop, aggregation, - int(granularity) if granularity is not None else None) + float(granularity) if granularity is not None else None) # Replace timestamp keys by their string versions return [(timestamp.isoformat(), offset, v) for timestamp, offset, v in measures] - except storage.MetricDoesNotExist as e: - abort(404, e) - except storage.AggregationDoesNotExist as e: + except (storage.MetricDoesNotExist, + storage.GranularityDoesNotExist, + storage.AggregationDoesNotExist) as e: abort(404, e) except aggregates.CustomAggFailure as e: abort(400, e) @@ -688,7 +695,8 @@ class NamedMetricController(rest.RestController): @pecan.expose() def _lookup(self, name, *remainder): - m = pecan.request.indexer.list_metrics(details=True, + details = True if pecan.request.method == 'GET' else False + m = pecan.request.indexer.list_metrics(details=details, name=name, resource_id=self.resource_id) if m: @@ -808,12 +816,10 @@ def ResourceSchema(schema): return base_schema -class GenericResourceController(rest.RestController): - _resource_type = 'generic' - - Resource = ResourceSchema({}) +class ResourceController(rest.RestController): - def __init__(self, id): + def __init__(self, resource_type, id): + self._resource_type = resource_type try: self.id = utils.ResourceUUID(id) except ValueError: @@ -842,7 +848,9 @@ class GenericResourceController(rest.RestController): enforce("update resource", resource) etag_precondition_check(resource) - body = deserialize_and_validate(self.Resource, required=False) + body = deserialize_and_validate( + schema_for(self._resource_type), + required=False) if len(body) == 0: etag_set_headers(resource) @@ -860,8 +868,6 @@ class GenericResourceController(rest.RestController): create_revision = False try: - if 'metrics' in body: - user, project = get_user_and_project() resource = pecan.request.indexer.update_resource( self._resource_type, self.id, @@ -890,44 +896,45 @@ class GenericResourceController(rest.RestController): abort(404, e) -class SwiftAccountResourceController(GenericResourceController): - _resource_type = 'swift_account' - +GenericSchema = ResourceSchema({}) -class InstanceDiskResourceController(GenericResourceController): - _resource_type = 'instance_disk' - Resource = ResourceSchema({ - "name": six.text_type, - "instance_id": UUID, - }) +InstanceDiskSchema = ResourceSchema({ + "name": six.text_type, + "instance_id": UUID, +}) +InstanceNetworkInterfaceSchema = ResourceSchema({ + "name": six.text_type, + "instance_id": UUID, +}) -class InstanceNetworkInterfaceResourceController(GenericResourceController): - _resource_type = 'instance_network_interface' - Resource = ResourceSchema({ - "name": six.text_type, - "instance_id": UUID, - }) +InstanceSchema = ResourceSchema({ + "flavor_id": six.text_type, + voluptuous.Optional("image_ref"): six.text_type, + "host": six.text_type, + "display_name": six.text_type, + voluptuous.Optional("server_group"): six.text_type, +}) +VolumeSchema = ResourceSchema({ + voluptuous.Optional("display_name"): voluptuous.Any(None, + six.text_type), +}) -class InstanceResourceController(GenericResourceController): - _resource_type = 'instance' +ImageSchema = ResourceSchema({ + "name": six.text_type, + "container_format": six.text_type, + "disk_format": six.text_type, +}) - Resource = ResourceSchema({ - "flavor_id": six.text_type, - "image_ref": six.text_type, - "host": six.text_type, - "display_name": six.text_type, - voluptuous.Optional("server_group"): six.text_type, - }) +# NOTE(sileht): Must be loaded after all ResourceSchema +RESOURCE_SCHEMA_MANAGER = extension.ExtensionManager( + 'gnocchi.controller.schemas') -class VolumeResourceController(GenericResourceController): - _resource_type = 'volume' - Resource = ResourceSchema({ - "display_name": six.text_type, - }) +def schema_for(resource_type): + return RESOURCE_SCHEMA_MANAGER[resource_type].plugin def ResourceID(value): @@ -940,7 +947,7 @@ class ResourcesController(rest.RestController): @pecan.expose() def _lookup(self, id, *remainder): - return self._resource_rest_class(id), remainder + return ResourceController(self._resource_type, id), remainder @pecan.expose('json') def post(self): @@ -1001,77 +1008,7 @@ class ResourcesController(rest.RestController): abort(400, e) -class SwiftAccountsResourcesController(GenericResourcesController): - _resource_type = 'swift_account' - _resource_rest_class = SwiftAccountResourceController - - -class InstanceDisksResourcesController(GenericResourcesController): - _resource_type = 'instance_disk' - _resource_rest_class = InstanceDiskResourceController - - Resource = InstanceDiskResourceController.Resource - - -class InstanceNetworkInterfacesResourcesController(GenericResourcesController): - _resource_type = 'instance_network_interface' - _resource_rest_class = InstanceNetworkInterfaceResourceController - - Resource = InstanceNetworkInterfaceResourceController.Resource - - -class InstancesResourcesController(GenericResourcesController): - _resource_type = 'instance' - _resource_rest_class = InstanceResourceController - - Resource = InstanceResourceController.Resource - - -class VolumesResourcesController(GenericResourcesController): - _resource_type = 'volume' - _resource_rest_class = VolumeResourceController - - Resource = VolumeResourceController.Resource - - -class CephAccountsResourcesController(GenericResourcesController): - _resource_type = 'ceph_account' - _resource_rest_class = CephAccountResourceController - - -class NetworkResourcesController(GenericResourcesController): - _resource_type = 'network' - _resource_rest_class = NetworkResourceController - - -class IdentityResourcesController(GenericResourcesController): - _resource_type = 'identity' - _resource_rest_class = IdentityResourceController - - -class IPMIResourcesController(GenericResourcesController): - _resource_type = 'ipmi' - _resource_rest_class = IPMIResourceController - - -class StackResourcesController(GenericResourcesController): - _resource_type = 'stack' - _resource_rest_class = StackResourceController - - -class ImageResourcesController(GenericResourcesController): - _resource_type = 'image' - _resource_rest_class = ImageResourceController - - Resource = ImageResourceController.Resource - - -class ResourcesController(rest.RestController): - resources_ctrl_by_type = dict( - (ext.name, ext.plugin()) - for ext in extension.ExtensionManager( - 'gnocchi.controller.resources').extensions) - +class ResourcesByTypeController(rest.RestController): @pecan.expose('json') def get_all(self): return dict( @@ -1081,11 +1018,10 @@ class ResourcesController(rest.RestController): @pecan.expose() def _lookup(self, resource_type, *remainder): - ctrl = self.resources_ctrl_by_type.get(resource_type) - if ctrl: - return ctrl, remainder + if resource_type in RESOURCE_SCHEMA_MANAGER: + return ResourcesController(resource_type), remainder else: - abort(404, indexer.UnknownResourceType(resource_type)) + abort(404, indexer.NoSuchResourceType(resource_type)) def _ResourceSearchSchema(v): @@ -1161,10 +1097,10 @@ class SearchResourceTypeController(rest.RestController): class SearchResourceController(rest.RestController): @pecan.expose() def _lookup(self, resource_type, *remainder): - if resource_type in ResourcesController.resources_ctrl_by_type: + if resource_type in RESOURCE_SCHEMA_MANAGER: return SearchResourceTypeController(resource_type), remainder else: - abort(404, indexer.UnknownResourceType(resource_type)) + abort(404, indexer.NoSuchResourceType(resource_type)) def _MetricSearchSchema(v): @@ -1405,9 +1341,10 @@ class AggregationController(rest.RestController): @pecan.expose('json') def get_metric(self, metric=None, start=None, stop=None, aggregation='mean', - needed_overlap=100.0): + granularity=None, needed_overlap=100.0): return AggregatedMetricController.get_cross_metric_measures_from_ids( - arg_to_list(metric), start, stop, aggregation, needed_overlap) + arg_to_list(metric), start, stop, aggregation, + granularity, needed_overlap) class CapabilityController(rest.RestController): diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 50db0f2e..587d245c 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -16,11 +16,9 @@ import os import uuid -import keystonemiddleware.auth_token from oslo_config import cfg from oslo_log import log from oslo_policy import policy -from oslo_utils import importutils from paste import deploy import pecan import webob.exc @@ -132,23 +130,6 @@ def _setup_app(root, conf, indexer, storage, not_implemented_middleware): if not_implemented_middleware: app = webob.exc.HTTPExceptionMiddleware(NotImplementedMiddleware(app)) - for middleware in reversed(cfg.api.middlewares): - if not middleware: - continue - klass = importutils.import_class(middleware) - # FIXME(jd) Find a way to remove that special handling… - # next version of keystonemiddleware > 2.1.0 will support - # 'oslo_config_project' option, so we could remove this - # workaround. - if klass == keystonemiddleware.auth_token.AuthProtocol: - middleware_config = dict(cfg.keystone_authtoken) - else: - middleware_config = dict(cfg) - # NOTE(sileht): Allow oslo.config compatible middleware to load - # our configuration file. - middleware_config['oslo_config_project'] = 'gnocchi' - app = klass(app, middleware_config) - return app diff --git a/gnocchi/service.py b/gnocchi/service.py index fab9d348..c33d2378 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -30,8 +30,10 @@ from gnocchi import opts LOG = log.getLogger(__name__) -def prepare_service(args=None): - conf = cfg.ConfigOpts() +def prepare_service(args=None, conf=None, + default_config_files=None): + if conf is None: + conf = cfg.ConfigOpts() # FIXME(jd) Use the pkg_entry info to register the options of these libs log.register_options(conf) db_options.set_defaults(conf) diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index 5c2839c6..693a063a 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -19,6 +19,7 @@ try: import asyncio except ImportError: import trollius as asyncio +from oslo_config import cfg from oslo_log import log import six @@ -166,13 +167,17 @@ class StatsdServer(object): def start(): conf = service.prepare_service() + for field in ["resource_id", "user_id", "project_id"]: + if conf.statsd[field] is None: + raise cfg.RequiredOptError(field, cfg.OptGroup("statsd")) + stats = Stats(conf) loop = asyncio.get_event_loop() # TODO(jd) Add TCP support listen = loop.create_datagram_endpoint( - # TODO(jd) Add config options for host/port - lambda: StatsdServer(stats), local_addr=('0.0.0.0', 8125)) + lambda: StatsdServer(stats), + local_addr=(conf.statsd.host, conf.statsd.port)) def _flush(): loop.call_later(conf.statsd.flush_delay, _flush) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 057ae69f..c1d20dc1 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -82,11 +82,15 @@ class Metric(object): __hash__ = object.__hash__ -class InvalidQuery(Exception): +class StorageError(Exception): pass -class MetricDoesNotExist(Exception): +class InvalidQuery(StorageError): + pass + + +class MetricDoesNotExist(StorageError): """Error raised when this metric does not exist.""" def __init__(self, metric): @@ -95,7 +99,7 @@ class MetricDoesNotExist(Exception): "Metric %s does not exist" % metric) -class AggregationDoesNotExist(Exception): +class AggregationDoesNotExist(StorageError): """Error raised when the aggregation method doesn't exists for a metric.""" def __init__(self, metric, method): @@ -106,7 +110,18 @@ class AggregationDoesNotExist(Exception): (method, metric)) -class MetricAlreadyExists(Exception): +class GranularityDoesNotExist(StorageError): + """Error raised when the granularity doesn't exist for a metric.""" + + def __init__(self, metric, granularity): + self.metric = metric + self.granularity = granularity + super(GranularityDoesNotExist, self).__init__( + "Granularity '%s' for metric %s does not exist" % + (granularity, metric)) + + +class MetricAlreadyExists(StorageError): """Error raised when this metric already exists.""" def __init__(self, metric): @@ -115,7 +130,7 @@ class MetricAlreadyExists(Exception): "Metric %s already exists" % metric) -class MetricUnaggregatable(Exception): +class MetricUnaggregatable(StorageError): """Error raised when metrics can't be aggregated.""" def __init__(self, metrics, reason): @@ -221,7 +236,7 @@ class StorageDriver(object): def measures_report(details=True): """Return a report of pending to process measures. - Only usefull for drivers that process measurements in background + Only useful for drivers that process measurements in background :return: {'summary': {'metrics': count, 'measures': count}, 'details': {metric_id: pending_measures_count}} @@ -249,17 +264,23 @@ class StorageDriver(object): @staticmethod def get_cross_metric_measures(metrics, from_timestamp=None, to_timestamp=None, aggregation='mean', + granularity=None, needed_overlap=None): """Get aggregated measures of multiple entities. :param entities: The entities measured to aggregate. :param from timestamp: The timestamp to get the measure from. :param to timestamp: The timestamp to get the measure to. + :param granularity: The granularity to retrieve. :param aggregation: The type of aggregation to retrieve. """ for metric in metrics: if aggregation not in metric.archive_policy.aggregation_methods: raise AggregationDoesNotExist(metric, aggregation) + if (granularity is not None and granularity + not in set(d.granularity + for d in metric.archive_policy.definition)): + raise GranularityDoesNotExist(metric, granularity) @staticmethod def search_value(metrics, query, from_timestamp=None, diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 566257cb..006bf5a1 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -3,8 +3,6 @@ # Copyright © 2016 Red Hat, Inc. # Copyright © 2014-2015 eNovance # -# Authors: Julien Danjou -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -29,6 +27,7 @@ import iso8601 from oslo_config import cfg from oslo_serialization import msgpackutils from oslo_utils import timeutils +import six from tooz import coordination from gnocchi import carbonara @@ -36,7 +35,6 @@ from gnocchi import storage OPTS = [ cfg.IntOpt('aggregation_workers_number', - default=None, help='Number of workers to run during adding new measures for ' 'pre-aggregation needs.'), cfg.StrOpt('coordination_url', @@ -48,8 +46,11 @@ OPTS = [ LOG = logging.getLogger(__name__) -class CarbonaraBasedStorageToozLock(object): +class CarbonaraBasedStorage(storage.StorageDriver): + MEASURE_PREFIX = "measure" + def __init__(self, conf): + super(CarbonaraBasedStorage, self).__init__(conf) self.coord = coordination.get_coordinator( conf.coordination_url, str(uuid.uuid4()).encode('ascii')) @@ -78,58 +79,82 @@ class CarbonaraBasedStorageToozLock(object): def stop(self): self.coord.stop() - def __call__(self, metric_id): + def _lock(self, metric_id): lock_name = b"gnocchi-" + str(metric_id).encode('ascii') return self.coord.get_lock(lock_name) - -class CarbonaraBasedStorage(storage.StorageDriver): - MEASURE_PREFIX = "measure" - - def __init__(self, conf): - super(CarbonaraBasedStorage, self).__init__(conf) - self.executor = futures.ThreadPoolExecutor( - max_workers=(conf.aggregation_workers_number or - multiprocessing.cpu_count())) + @staticmethod + def _get_measures(metric, timestamp_key, aggregation, granularity): + raise NotImplementedError @staticmethod - def _create_metric_container(metric, archive_policy): - pass + def _get_unaggregated_timeserie(metric): + raise NotImplementedError @staticmethod - def _lock(metric): + def _store_unaggregated_timeserie(metric, data): raise NotImplementedError @staticmethod - def _get_measures(metric, aggregation): + def _store_metric_measures(metric, aggregation, granularity, data): raise NotImplementedError @staticmethod - def _store_metric_measures(metric, aggregation, data): + def _list_split_keys_for_metric(metric, aggregation, granularity): raise NotImplementedError def get_measures(self, metric, from_timestamp=None, to_timestamp=None, aggregation='mean', granularity=None): super(CarbonaraBasedStorage, self).get_measures( metric, from_timestamp, to_timestamp, aggregation) - archive = self._get_measures_archive(metric, aggregation) + if granularity is None: + agg_timeseries = self._map_in_thread( + self._get_measures_timeserie, + ((metric, aggregation, ap.granularity, + from_timestamp, to_timestamp) + for ap in reversed(metric.archive_policy.definition))) + else: + agg_timeseries = [self._get_measures_timeserie( + metric, aggregation, granularity, + from_timestamp, to_timestamp)] return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v) - for timestamp, r, v - in archive.fetch(from_timestamp, to_timestamp) - if granularity is None or r == granularity] + for ts in agg_timeseries + for timestamp, r, v in ts.fetch(from_timestamp, to_timestamp)] - @staticmethod - def _log_data_corruption(metric, aggregation): - LOG.error("Data are corrupted for metric %(metric)s and aggregation " - "%(aggregation)s, recreating an empty timeserie." % - dict(metric=metric.id, aggregation=aggregation)) - - def _get_measures_archive(self, metric, aggregation): + def _get_measures_and_unserialize(self, metric, key, + aggregation, granularity): + data = self._get_measures(metric, key, aggregation, granularity) try: - contents = self._get_measures(metric, aggregation) - except (storage.MetricDoesNotExist, storage.AggregationDoesNotExist): - ts = None + return carbonara.TimeSerie.unserialize(data) + except ValueError: + LOG.error("Data corruption detected for %s " + "aggregated `%s' timeserie, granularity `%s' " + "around time `%s', ignoring." + % (metric.id, aggregation, granularity, key)) + + def _get_measures_timeserie(self, metric, + aggregation, granularity, + from_timestamp=None, to_timestamp=None): + + # Find the number of point + for d in metric.archive_policy.definition: + if d.granularity == granularity: + points = d.points + break else: + raise storage.GranularityDoesNotExist(metric, granularity) + + all_keys = None + try: + all_keys = self._list_split_keys_for_metric( + metric, aggregation, granularity) + except storage.MetricDoesNotExist: + # This can happen if it's an old metric with a TimeSerieArchive + all_keys = None + + if not all_keys: + # It does not mean we have no data: it can be an old metric with a + # TimeSerieArchive. try: data = self._get_metric_archive(metric, aggregation) except (storage.MetricDoesNotExist, @@ -311,12 +336,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): # NOTE(jd): We need to lock the metric otherwise we might delete # measures that another worker might be processing. Deleting # measurement files under its feet is not nice! - lock = self._lock(metric_id) - lock.acquire(blocking=sync) - try: + with self._lock(metric_id)(blocking=sync): self._delete_unprocessed_measures_for_metric_id(metric_id) - finally: - lock.release() for metric in metrics: lock = self._lock(metric.id) agg_methods = list(metric.archive_policy.aggregation_methods) @@ -336,8 +357,10 @@ class CarbonaraBasedStorage(storage.StorageDriver): try: with timeutils.StopWatch() as sw: - raw_measures = self._get_measures(metric, - 'none') + raw_measures = ( + self._get_unaggregated_timeserie( + metric) + ) LOG.debug( "Retrieve unaggregated measures " "for %s in %.2fs" @@ -349,20 +372,21 @@ class CarbonaraBasedStorage(storage.StorageDriver): # Created in the mean time, do not worry pass ts = None - except storage.AggregationDoesNotExist: - ts = None else: try: ts = carbonara.BoundTimeSerie.unserialize( raw_measures) except ValueError: ts = None - self._log_data_corruption(metric, "none") + LOG.error( + "Data corruption detected for %s " + "unaggregated timeserie, " + "recreating an empty one." + % metric.id) if ts is None: # This is the first time we treat measures for this - # metric, or data are corrupted, - # create a new one + # metric, or data are corrupted, create a new one mbs = metric.archive_policy.max_block_size ts = carbonara.BoundTimeSerie( block_size=mbs, @@ -385,8 +409,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): "in %.2f seconds" % (metric.id, len(measures), sw.elapsed())) - self._store_metric_measures(metric, 'none', - ts.serialize()) + self._store_unaggregated_timeserie(metric, + ts.serialize()) except Exception: if sync: raise @@ -396,46 +420,84 @@ class CarbonaraBasedStorage(storage.StorageDriver): def get_cross_metric_measures(self, metrics, from_timestamp=None, to_timestamp=None, aggregation='mean', + granularity=None, needed_overlap=100.0): super(CarbonaraBasedStorage, self).get_cross_metric_measures( - metrics, from_timestamp, to_timestamp, aggregation, needed_overlap) + metrics, from_timestamp, to_timestamp, + aggregation, granularity, needed_overlap) + + if granularity is None: + granularities = ( + definition.granularity + for metric in metrics + for definition in metric.archive_policy.definition + ) + granularities_in_common = [ + g + for g, occurence in six.iteritems( + collections.Counter(granularities)) + if occurence == len(metrics) + ] + + if not granularities_in_common: + raise storage.MetricUnaggregatable( + metrics, 'No granularity match') + else: + granularities_in_common = [granularity] - tss = self._map_in_thread(self._get_measures_archive, - [(metric, aggregation) - for metric in metrics]) + tss = self._map_in_thread(self._get_measures_timeserie, + [(metric, aggregation, g, + from_timestamp, to_timestamp) + for metric in metrics + for g in granularities_in_common]) try: return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v) for timestamp, r, v - in carbonara.TimeSerieArchive.aggregated( + in carbonara.AggregatedTimeSerie.aggregated( tss, from_timestamp, to_timestamp, aggregation, needed_overlap)] except carbonara.UnAggregableTimeseries as e: raise storage.MetricUnaggregatable(metrics, e.reason) - def _find_measure(self, metric, aggregation, predicate, + def _find_measure(self, metric, aggregation, granularity, predicate, from_timestamp, to_timestamp): - timeserie = self._get_measures_archive(metric, aggregation) + timeserie = self._get_measures_timeserie( + metric, aggregation, granularity, + from_timestamp, to_timestamp) values = timeserie.fetch(from_timestamp, to_timestamp) return {metric: [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), - granularity, value) - for timestamp, granularity, value in values + g, value) + for timestamp, g, value in values if predicate(value)]} + # TODO(jd) Add granularity parameter here and in the REST API + # rather than fetching all granularities def search_value(self, metrics, query, from_timestamp=None, to_timestamp=None, aggregation='mean'): - result = {} predicate = storage.MeasureQuery(query) - results = self._map_in_thread(self._find_measure, - [(metric, aggregation, predicate, - from_timestamp, to_timestamp) - for metric in metrics]) + results = self._map_in_thread( + self._find_measure, + [(metric, aggregation, + ap.granularity, predicate, + from_timestamp, to_timestamp) + for metric in metrics + for ap in metric.archive_policy.definition]) + result = collections.defaultdict(list) for r in results: - result.update(r) + for metric, metric_result in six.iteritems(r): + result[metric].extend(metric_result) + + # Sort the result + for metric, r in six.iteritems(result): + # Sort by timestamp asc, granularity desc + r.sort(key=lambda t: (t[0], - t[1])) + return result def _map_in_thread(self, method, list_of_args): - # We use 'list' to iterate all threads here to raise the first - # exception now , not much choice - return list(self.executor.map(lambda args: method(*args), - list_of_args)) + with futures.ThreadPoolExecutor( + max_workers=self.aggregation_workers_number) as executor: + # We use 'list' to iterate all threads here to raise the first + # exception now, not much choice + return list(executor.map(lambda args: method(*args), list_of_args)) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 98e6dc99..2758aae1 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -48,7 +48,6 @@ OPTS = [ cfg.StrOpt('ceph_username', help='Ceph username (ie: admin without "client." prefix).'), cfg.StrOpt('ceph_keyring', - default=None, help='Ceph keyring path.'), cfg.StrOpt('ceph_conffile', default='/etc/ceph/ceph.conf', @@ -61,7 +60,6 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): def __init__(self, conf): super(CephStorage, self).__init__(conf) self.pool = conf.ceph_pool - self._lock = _carbonara.CarbonaraBasedStorageToozLock(conf) options = {} if conf.ceph_keyring: options['keyring'] = conf.ceph_keyring @@ -81,9 +79,6 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): conf=options) self.rados.connect() - def stop(self): - self._lock.stop() - def _store_measures(self, metric, data): # NOTE(sileht): list all objects in a pool is too slow with # many objects (2min for 20000 objects in 50osds cluster), @@ -183,8 +178,9 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): return self.rados.open_ioctx(self.pool) @staticmethod - def _get_object_name(metric, lock_name): - return str("gnocchi_%s_%s" % (metric.id, lock_name)) + def _get_object_name(metric, timestamp_key, aggregation, granularity): + return str("gnocchi_%s_%s_%s_%s" % ( + metric.id, timestamp_key, aggregation, granularity)) @staticmethod def _object_exists(ioctx, name): @@ -195,17 +191,20 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): return False def _create_metric(self, metric): - name = self._get_object_name(metric, 'container') + name = "gnocchi_%s_container" % metric.id with self._get_ioctx() as ioctx: if self._object_exists(ioctx, name): raise storage.MetricAlreadyExists(metric) else: ioctx.write_full(name, "metric created") - def _store_metric_measures(self, metric, aggregation, data): - name = self._get_object_name(metric, aggregation) + def _store_metric_measures(self, metric, timestamp_key, + aggregation, granularity, data): + name = self._get_object_name(metric, timestamp_key, + aggregation, granularity) with self._get_ioctx() as ioctx: ioctx.write_full(name, data) + ioctx.set_xattr("gnocchi_%s_container" % metric.id, name, "") def _delete_metric_measures(self, metric, timestamp_key, aggregation, granularity): @@ -227,16 +226,16 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): for name in ('container', 'none'): ioctx.aio_remove("gnocchi_%s_%s" % (metric.id, name)) - def _get_measures(self, metric, aggregation): + def _get_measures(self, metric, timestamp_key, aggregation, granularity): try: with self._get_ioctx() as ioctx: name = self._get_object_name(metric, timestamp_key, aggregation, granularity) return self._get_object_content(ioctx, name) except rados.ObjectNotFound: - name = self._get_object_name(metric, 'container') with self._get_ioctx() as ioctx: - if self._object_exists(ioctx, name): + if self._object_exists( + ioctx, "gnocchi_%s_container" % metric.id): raise storage.AggregationDoesNotExist(metric, aggregation) else: raise storage.MetricDoesNotExist(metric) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index bc927e93..6ea11326 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -44,7 +44,6 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): super(FileStorage, self).__init__(conf) self.basepath = conf.file_basepath self.basepath_tmp = conf.file_basepath_tmp - self._lock = _carbonara.CarbonaraBasedStorageToozLock(conf) try: os.mkdir(self.basepath) except OSError as e: @@ -67,14 +66,26 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): dir=self.basepath_tmp, delete=False) - def stop(self): - self._lock.stop() + def _atomic_file_store(self, dest, data): + tmpfile = self._get_tempfile() + tmpfile.write(data) + tmpfile.close() + os.rename(tmpfile.name, dest) - def _build_metric_path(self, metric, aggregation=None): - path = os.path.join(self.basepath, str(metric.id)) - if aggregation: - return os.path.join(path, aggregation) - return path + def _build_metric_dir(self, metric): + return os.path.join(self.basepath, str(metric.id)) + + def _build_unaggregated_timeserie_path(self, metric): + return os.path.join(self._build_metric_dir(metric), 'none') + + def _build_metric_path(self, metric, aggregation): + return os.path.join(self._build_metric_dir(metric), + "agg_" + aggregation) + + def _build_metric_path_for_split(self, metric, aggregation, + timestamp_key, granularity): + return os.path.join(self._build_metric_path(metric, aggregation), + timestamp_key + "_" + str(granularity)) def _build_measure_path(self, metric_id, random_id=None): path = os.path.join(self.measure_path, six.text_type(metric_id)) @@ -86,13 +97,19 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): return path def _create_metric(self, metric): - path = self._build_metric_path(metric) + path = self._build_metric_dir(metric) try: os.mkdir(path, 0o750) except OSError as e: if e.errno == errno.EEXIST: raise storage.MetricAlreadyExists(metric) raise + for agg in metric.archive_policy.aggregation_methods: + try: + os.mkdir(self._build_metric_path(metric, agg), 0o750) + except OSError as e: + if e.errno != errno.EEXIST: + raise def _store_measures(self, metric, data): tmpfile = self._get_tempfile() @@ -218,7 +235,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): data) def _delete_metric(self, metric): - path = self._build_metric_path(metric) + path = self._build_metric_dir(metric) try: shutil.rmtree(path) except OSError as e: @@ -227,15 +244,48 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): # measures) raise - def _get_measures(self, metric, aggregation): - path = self._build_metric_path(metric, aggregation) + def _get_measures(self, metric, timestamp_key, aggregation, granularity): + path = self._build_metric_path_for_split(metric, aggregation, + timestamp_key, granularity) try: with open(path, 'rb') as aggregation_file: return aggregation_file.read() except IOError as e: if e.errno == errno.ENOENT: - if os.path.exists(self._build_metric_path(metric)): + if os.path.exists(self._build_metric_dir(metric)): raise storage.AggregationDoesNotExist(metric, aggregation) - else: - raise storage.MetricDoesNotExist(metric) + raise storage.MetricDoesNotExist(metric) raise + + # The following methods deal with Gnocchi <= 1.3 archives + def _build_metric_archive_path(self, metric, aggregation): + return os.path.join(self._build_metric_dir(metric), aggregation) + + def _get_metric_archive(self, metric, aggregation): + """Retrieve data in the place we used to store TimeSerieArchive.""" + path = self._build_metric_archive_path(metric, aggregation) + try: + with open(path, 'rb') as aggregation_file: + return aggregation_file.read() + except IOError as e: + if e.errno == errno.ENOENT: + if os.path.exists(self._build_metric_dir(metric)): + raise storage.AggregationDoesNotExist(metric, aggregation) + raise storage.MetricDoesNotExist(metric) + raise + + def _store_metric_archive(self, metric, aggregation, data): + """Stores data in the place we used to store TimeSerieArchive.""" + self._atomic_file_store( + self._build_metric_archive_path(metric, aggregation), + data) + + def _delete_metric_archives(self, metric): + for agg in metric.archive_policy.aggregation_methods: + try: + os.unlink(self._build_metric_archive_path(metric, agg)) + except OSError as e: + if e.errno != errno.ENOENT: + # NOTE(jd) Maybe the metric has never been created (no + # measures) + raise diff --git a/gnocchi/storage/influxdb.py b/gnocchi/storage/influxdb.py index 13c8409f..8e7f9d5d 100644 --- a/gnocchi/storage/influxdb.py +++ b/gnocchi/storage/influxdb.py @@ -36,9 +36,9 @@ OPTS = [ cfg.StrOpt('influxdb_host', default='localhost', help='InfluxDB host'), - cfg.IntOpt('influxdb_port', - default=8086, - help='InfluxDB port'), + cfg.PortOpt('influxdb_port', + default=8086, + help='InfluxDB port'), cfg.StrOpt('influxdb_username', default='root', help='InfluxDB username'), @@ -150,13 +150,12 @@ class InfluxDBStorage(storage.StorageDriver): metric_id = self._get_metric_id(metric) - result = self._query(metric, "select * from \"%(metric_id)s\"" % - dict(metric_id=metric_id)) - result = list(result[metric_id]) - if from_timestamp: first_measure_timestamp = from_timestamp else: + result = self._query(metric, "select * from \"%(metric_id)s\"" % + dict(metric_id=metric_id)) + result = list(result[metric_id]) if result: first_measure_timestamp = self._timestamp_to_utc( timeutils.parse_isotime(result[0]['time'])) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 6d755d2d..0a0972b7 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -21,7 +21,10 @@ import uuid from oslo_config import cfg import retrying import six -from swiftclient import client as swclient +try: + from swiftclient import client as swclient +except ImportError: + swclient = None from gnocchi import storage from gnocchi.storage import _carbonara @@ -32,14 +35,12 @@ OPTS = [ default='1', help='Swift authentication version to user.'), cfg.StrOpt('swift_preauthurl', - default=None, help='Swift pre-auth URL.'), cfg.StrOpt('swift_authurl', default="http://localhost:8080/auth/v1.0", help='Swift auth URL.'), cfg.StrOpt('swift_preauthtoken', secret=True, - default=None, help='Swift token to user to authenticate.'), cfg.StrOpt('swift_user', default="admin:admin", @@ -67,6 +68,8 @@ def retry_if_result_empty(result): class SwiftStorage(_carbonara.CarbonaraBasedStorage): def __init__(self, conf): super(SwiftStorage, self).__init__(conf) + if swclient is None: + raise RuntimeError("python-swiftclient unavailable") self.swift = swclient.Connection( auth_version=conf.swift_auth_version, authurl=conf.swift_authurl, @@ -75,16 +78,16 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): key=conf.swift_key, tenant_name=conf.swift_tenant_name, timeout=conf.swift_timeout) - self._lock = _carbonara.CarbonaraBasedStorageToozLock(conf) self._container_prefix = conf.swift_container_prefix self.swift.put_container(self.MEASURE_PREFIX) - def stop(self): - self._lock.stop() - def _container_name(self, metric): return '%s.%s' % (self._container_prefix, str(metric.id)) + @staticmethod + def _object_name(split_key, aggregation, granularity): + return '%s_%s_%s' % (split_key, aggregation, granularity) + def _create_metric(self, metric): # TODO(jd) A container per user in their account? resp = {} @@ -165,8 +168,12 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): for f in files: self.swift.delete_object(self.MEASURE_PREFIX, f['name']) - def _store_metric_measures(self, metric, aggregation, data): - self.swift.put_object(self._container_name(metric), aggregation, data) + def _store_metric_measures(self, metric, timestamp_key, + aggregation, granularity, data): + self.swift.put_object( + self._container_name(metric), + self._object_name(timestamp_key, aggregation, granularity), + data) def _delete_metric_measures(self, metric, timestamp_key, aggregation, granularity): @@ -176,27 +183,32 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): def _delete_metric(self, metric): self._delete_unaggregated_timeserie(metric) - for aggregation in metric.archive_policy.aggregation_methods: - try: - self.swift.delete_object(self._container_name(metric), - aggregation) - except swclient.ClientException as e: - if e.http_status != 404: - raise + container = self._container_name(metric) try: - self.swift.delete_container(self._container_name(metric)) + headers, files = self.swift.get_container( + container, full_listing=True) except swclient.ClientException as e: if e.http_status != 404: # Maybe it never has been created (no measure) raise + else: + for obj in files: + self.swift.delete_object(container, obj['name']) + try: + self.swift.delete_container(container) + except swclient.ClientException as e: + if e.http_status != 404: + # Deleted in the meantime? Whatever. + raise @retrying.retry(stop_max_attempt_number=4, wait_fixed=500, retry_on_result=retry_if_result_empty) - def _get_measures(self, metric, aggregation): + def _get_measures(self, metric, timestamp_key, aggregation, granularity): try: headers, contents = self.swift.get_object( - self._container_name(metric), aggregation) + self._container_name(metric), self._object_name( + timestamp_key, aggregation, granularity)) except swclient.ClientException as e: if e.http_status == 404: try: @@ -209,9 +221,70 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): raise return contents + def _list_split_keys_for_metric(self, metric, aggregation, granularity): + container = self._container_name(metric) + try: + headers, files = self.swift.get_container( + container, full_listing=True) + except swclient.ClientException as e: + if e.http_status == 404: + raise storage.MetricDoesNotExist(metric) + raise + keys = [] + for f in files: + try: + key, agg, g = f['name'].split('_', 2) + except ValueError: + # Might be "none", or any other file. Be resilient. + continue + if aggregation == agg and granularity == float(g): + keys.append(key) + return keys + + @retrying.retry(stop_max_attempt_number=4, + wait_fixed=500, + retry_on_result=retry_if_result_empty) + def _get_unaggregated_timeserie(self, metric): + try: + headers, contents = self.swift.get_object( + self._container_name(metric), "none") + except swclient.ClientException as e: + if e.http_status == 404: + raise storage.MetricDoesNotExist(metric) + raise + return contents + + def _store_unaggregated_timeserie(self, metric, data): + self.swift.put_object(self._container_name(metric), "none", data) + def _delete_unaggregated_timeserie(self, metric): try: self.swift.delete_object(self._container_name(metric), "none") except swclient.ClientException as e: if e.http_status != 404: raise + + # The following methods deal with Gnocchi <= 1.3 archives + def _get_metric_archive(self, metric, aggregation): + """Retrieve data in the place we used to store TimeSerieArchive.""" + try: + headers, contents = self.swift.get_object( + self._container_name(metric), aggregation) + except swclient.ClientException as e: + if e.http_status == 404: + raise storage.AggregationDoesNotExist(metric, aggregation) + raise + return contents + + def _store_metric_archive(self, metric, aggregation, data): + """Stores data in the place we used to store TimeSerieArchive.""" + self.swift.put_object(self._container_name(metric), aggregation, data) + + def _delete_metric_archives(self, metric): + for aggregation in metric.archive_policy.aggregation_methods: + try: + self.swift.delete_object(self._container_name(metric), + aggregation) + except swclient.ClientException as e: + if e.http_status != 404: + raise diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 2f46e711..76b82db5 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -22,7 +22,10 @@ from oslotest import base from oslotest import mockpatch import six from stevedore import extension -from swiftclient import exceptions as swexc +try: + from swiftclient import exceptions as swexc +except ImportError: + swexc = None from testtools import testcase from tooz import coordination @@ -122,8 +125,7 @@ class FakeRadosModule(object): def get_xattrs(self, key): if key not in self.kvs: raise FakeRadosModule.ObjectNotFound - return iter((k, v) for k, v in - self.kvs_xattrs.get(key, {}).items()) + return six.iteritems(self.kvs_xattrs.get(key, {}).copy()) def set_xattr(self, key, attr, value): self._ensure_key_exists(key) @@ -201,7 +203,7 @@ class FakeSwiftClient(object): files = [] directories = set() - for k, v in six.iteritems(container): + for k, v in six.iteritems(container.copy()): if path and not k.startswith(path): continue @@ -342,16 +344,12 @@ class TestCase(base.BaseTestCase): def setUp(self): super(TestCase, self).setUp() - self.conf = service.prepare_service([]) + self.conf = service.prepare_service([], + default_config_files=[]) self.conf.set_override('policy_file', self.path_get('etc/gnocchi/policy.json'), group="oslo_policy") - self.conf.set_override( - 'url', - os.environ.get("GNOCCHI_TEST_INDEXER_URL", "null://"), - 'indexer') - self.index = indexer.get_driver(self.conf) self.index.connect() @@ -386,9 +384,10 @@ class TestCase(base.BaseTestCase): except indexer.ArchivePolicyAlreadyExists: pass - self.useFixture(mockpatch.Patch( - 'swiftclient.client.Connection', - FakeSwiftClient)) + if swexc: + self.useFixture(mockpatch.Patch( + 'swiftclient.client.Connection', + FakeSwiftClient)) self.useFixture(mockpatch.Patch('gnocchi.storage.ceph.rados', FakeRadosModule())) @@ -419,6 +418,12 @@ class TestCase(base.BaseTestCase): 'storage') self.storage = storage.get_driver(self.conf) + # NOTE(jd) Do not upgrade the storage. We don't really need the storage + # upgrade for now, and the code that upgrade from pre-1.3 + # (TimeSerieArchive) uses a lot of parallel lock, which makes tooz + # explodes because MySQL does not support that many connections in real + # life. + # self.storage.upgrade(self.index) self.mgr = extension.ExtensionManager('gnocchi.aggregates', invoke_on_load=True) diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index 99dee746..03e926ce 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -70,7 +70,12 @@ class ConfigFixture(fixture.GabbiFixture): data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi') - conf = service.prepare_service([]) + if os.getenv("GABBI_LIVE"): + dcf = None + else: + dcf = [] + conf = service.prepare_service([], + default_config_files=dcf) conf.set_override('paste_config', os.path.abspath('etc/gnocchi/api-paste.ini'), @@ -79,13 +84,6 @@ class ConfigFixture(fixture.GabbiFixture): self.conf = conf self.tmp_dir = data_tmp_dir - # Use the indexer set in the conf, unless we have set an - # override via the environment. - if 'GNOCCHI_TEST_INDEXER_URL' in os.environ: - conf.set_override('url', - os.environ.get("GNOCCHI_TEST_INDEXER_URL"), - 'indexer') - # TODO(jd) It would be cool if Gabbi was able to use the null:// # indexer, but this makes the API returns a lot of 501 error, which # Gabbi does not want to see, so let's just disable it. @@ -117,9 +115,6 @@ class ConfigFixture(fixture.GabbiFixture): conf.set_override('pecan_debug', False, 'api') - # Turn off any middleware. - conf.set_override('middlewares', [], 'api') - # Set pagination to a testable value conf.set_override('max_limit', 7, 'api') @@ -136,7 +131,7 @@ class ConfigFixture(fixture.GabbiFixture): } # start up a thread to async process measures - self.metricd_thread = MetricdThread(index, storage.get_driver(conf)) + self.metricd_thread = MetricdThread(index, s) self.metricd_thread.start() def stop_fixture(self): diff --git a/gnocchi/tests/gabbi/gabbits-live/live.yaml b/gnocchi/tests/gabbi/gabbits-live/live.yaml index 169410cb..bca36cdf 100644 --- a/gnocchi/tests/gabbi/gabbits-live/live.yaml +++ b/gnocchi/tests/gabbi/gabbits-live/live.yaml @@ -21,12 +21,13 @@ tests: - '{"definition": [{"points": 12, "timespan": "1:00:00", "granularity": "0:05:00"}, {"points": 24, "timespan": "1 day, 0:00:00", "granularity": "1:00:00"}, {"points": 30, "timespan": "30 days, 0:00:00", "granularity": "1 day, 0:00:00"}], "back_window": 0, "name": "low", "aggregation_methods": ["std", "count", "95pct", "min", "max", "sum", "median", "mean"]}' - '{"definition": [{"points": 60, "timespan": "1:00:00", "granularity": "0:01:00"}, {"points": 168, "timespan": "7 days, 0:00:00", "granularity": "1:00:00"}, {"points": 365, "timespan": "365 days, 0:00:00", "granularity": "1 day, 0:00:00"}], "back_window": 0, "name": "medium", "aggregation_methods": ["std", "count", "95pct", "min", "max", "sum", "median", "mean"]}' - - name: check generic resources + - name: check generic resources with the default one for statsd url: /v1/resource/generic response_headers: content-type: /application/json/ - response_strings: - - "[]" + response_json_paths: + $[0].type: generic + $.`len`: 1 - name: post unicode archive policy url: /v1/archive_policy diff --git a/gnocchi/tests/gabbi/gabbits/aggregation.yaml b/gnocchi/tests/gabbi/gabbits/aggregation.yaml new file mode 100644 index 00000000..7c91cc3d --- /dev/null +++ b/gnocchi/tests/gabbi/gabbits/aggregation.yaml @@ -0,0 +1,243 @@ +fixtures: + - ConfigFixture + +tests: + - name: create archive policy + desc: for later use + url: /v1/archive_policy + method: POST + request_headers: + content-type: application/json + x-roles: admin + data: + name: low + definition: + - granularity: 1 second + - granularity: 300 seconds + status: 201 + +# Aggregation by metric ids + + - name: create metric 1 + url: /v1/metric + request_headers: + content-type: application/json + method: post + data: + archive_policy_name: low + status: 201 + + - name: create metric 2 + url: /v1/metric + request_headers: + content-type: application/json + method: post + data: + archive_policy_name: low + status: 201 + + - name: get metric list to push metric 1 + url: /v1/metric + + - name: push measurements to metric 1 + url: /v1/metric/$RESPONSE['$[0].id']/measures + request_headers: + content-type: application/json + method: post + data: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + status: 202 + + - name: get metric list to push metric 2 + url: /v1/metric + + - name: push measurements to metric 2 + url: /v1/metric/$RESPONSE['$[1].id']/measures + request_headers: + content-type: application/json + method: post + data: + - timestamp: "2015-03-06T14:33:57" + value: 3.1 + - timestamp: "2015-03-06T14:34:12" + value: 2 + status: 202 + + - name: get metric list to get aggregates + url: /v1/metric + + - name: get measure aggregates by granularity not float + url: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=foobar + status: 400 + + - name: get metric list to get aggregates 2 + url: /v1/metric + + - name: get measure aggregates by granularity + url: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1 + poll: + count: 10 + delay: 1 + response_json_paths: + $: + - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + + - name: get metric list to push metric 3 + url: /v1/metric + + - name: get measure aggregates by granularity with timestamps + url: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 + poll: + count: 10 + delay: 1 + response_json_paths: + $: + - ['2015-03-06T14:30:00+00:00', 300.0, 15.05] + - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + +# Aggregation by resource and metric_name + + - name: post a resource + url: /v1/resource/generic + method: post + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + id: bcd3441c-b5aa-4d1b-af9a-5a72322bb269 + metrics: + agg_meter: + archive_policy_name: low + status: 201 + + - name: post another resource + url: /v1/resource/generic + method: post + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + id: 1b0a8345-b279-4cb8-bd7a-2cb83193624f + metrics: + agg_meter: + archive_policy_name: low + status: 201 + + - name: push measurements to resource 1 + url: /v1/resource/generic/bcd3441c-b5aa-4d1b-af9a-5a72322bb269/metric/agg_meter/measures + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + method: post + data: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + status: 202 + + - name: push measurements to resource 2 + url: /v1/resource/generic/1b0a8345-b279-4cb8-bd7a-2cb83193624f/metric/agg_meter/measures + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + method: post + data: + - timestamp: "2015-03-06T14:33:57" + value: 3.1 + - timestamp: "2015-03-06T14:34:12" + value: 2 + status: 202 + + - name: get measure aggregates by granularity from resources + method: POST + url: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1 + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + poll: + count: 10 + delay: 1 + response_json_paths: + $: + - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + + - name: get measure aggregates by granularity with timestamps from resources + method: POST + url: /v1/aggregation/resource/generic/metric/agg_meter?start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + poll: + count: 10 + delay: 1 + response_json_paths: + $: + - ['2015-03-06T14:30:00+00:00', 300.0, 15.05] + - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + +# Some negative tests + + - name: get measure aggregates with wrong GET + url: /v1/aggregation/resource/generic/metric/agg_meter + status: 405 + + - name: get measure aggregates with wrong metric_name + method: POST + url: /v1/aggregation/resource/generic/metric/notexists + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 200 + response_json_paths: + $.`len`: 0 + + - name: get measure aggregates with wrong resource + method: POST + url: /v1/aggregation/resource/notexits/metric/agg_meter + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 404 + response_strings: + - Resource type notexits does not exist + + - name: get measure aggregates with wrong path + method: POST + url: /v1/aggregation/re/generic/metric/agg_meter + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 404 + + - name: get measure aggregates with wrong path 2 + method: POST + url: /v1/aggregation/resource/generic/notexists/agg_meter + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 404 + + - name: get measure aggregates with no resource name + method: POST + url: /v1/aggregation/resource/generic/metric + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 405 diff --git a/gnocchi/tests/gabbi/gabbits/archive.yaml b/gnocchi/tests/gabbi/gabbits/archive.yaml index 4b59ab02..36e22b1d 100644 --- a/gnocchi/tests/gabbi/gabbits/archive.yaml +++ b/gnocchi/tests/gabbi/gabbits/archive.yaml @@ -18,9 +18,9 @@ tests: # http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ # The caveats point out that this is only an issue if your data is # sensitive, which in this case...? -# However, the api-wg has made it recommedation that collections -# be returned as an object with a named key with a value of a list -# as follows: {"archive_policies": [...]} +# However, the api-wg has made it recommendation that collections +# should be returned as an object with a named key with a value of +# a list as follows: {"archive_policies": [...]} # This allows for extensibility such as future support for pagination. # Do we care? diff --git a/gnocchi/tests/gabbi/gabbits/batch_measures.yaml b/gnocchi/tests/gabbi/gabbits/batch_measures.yaml new file mode 100644 index 00000000..dda6cc32 --- /dev/null +++ b/gnocchi/tests/gabbi/gabbits/batch_measures.yaml @@ -0,0 +1,173 @@ +fixtures: + - ConfigFixture + +tests: + - name: create archive policy + desc: for later use + url: /v1/archive_policy + method: POST + request_headers: + content-type: application/json + x-roles: admin + data: + name: simple + definition: + - granularity: 1 second + status: 201 + + - name: create metric + url: /v1/metric + request_headers: + content-type: application/json + method: post + data: + archive_policy_name: simple + status: 201 + + - name: push measurements to metric + url: /v1/batch/metrics/measures + request_headers: + content-type: application/json + method: post + data: + $RESPONSE['$.id']: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + status: 202 + + - name: push measurements to unknown metrics + url: /v1/batch/metrics/measures + request_headers: + content-type: application/json + method: post + data: + 37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5C: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + 37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5D: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + status: 400 + response_strings: + - "Unknown metrics: 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5c, 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5d" + + - name: push measurements to unknown named metrics + url: /v1/batch/resources/metrics/measures + request_headers: + content-type: application/json + method: post + data: + 37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5D: + cpu_util: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + 46c9418d-d63b-4cdd-be89-8f57ffc5952e: + disk.iops: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + status: 400 + response_strings: + - "Unknown metrics: 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5d/cpu_util, 46c9418d-d63b-4cdd-be89-8f57ffc5952e/disk.iops" + + - name: create second metric + url: /v1/metric + request_headers: + content-type: application/json + method: post + data: + archive_policy_name: simple + status: 201 + + - name: post a resource + url: /v1/resource/generic + method: post + request_headers: + content-type: application/json + data: + id: 46c9418d-d63b-4cdd-be89-8f57ffc5952e + user_id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c + project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + metrics: + disk.iops: + archive_policy_name: simple + cpu_util: + archive_policy_name: simple + status: 201 + + - name: post a second resource + url: /v1/resource/generic + method: post + request_headers: + content-type: application/json + data: + id: f0f6038f-f82c-4f30-8d81-65db8be249fe + user_id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c + project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + metrics: + net.speed: + archive_policy_name: simple + mem_usage: + archive_policy_name: simple + status: 201 + + - name: list metrics + url: /v1/metric + + - name: push measurements to two metrics + url: /v1/batch/metrics/measures + request_headers: + content-type: application/json + method: post + data: + $RESPONSE['$[0].id']: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + $RESPONSE['$[1].id']: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + status: 202 + + - name: push measurements to two named metrics + url: /v1/batch/resources/metrics/measures + request_headers: + content-type: application/json + method: post + data: + 46c9418d-d63b-4cdd-be89-8f57ffc5952e: + disk.iops: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + cpu_util: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + f0f6038f-f82c-4f30-8d81-65db8be249fe: + mem_usage: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + net.speed: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + + status: 202 diff --git a/gnocchi/tests/gabbi/gabbits/metric_granularity.yaml b/gnocchi/tests/gabbi/gabbits/metric_granularity.yaml index 232a8adb..e132190c 100644 --- a/gnocchi/tests/gabbi/gabbits/metric_granularity.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric_granularity.yaml @@ -42,9 +42,9 @@ tests: - name: get measurements invalid granularity url: /v1/metric/$RESPONSE['$[0].id']/measures?granularity=42 - status: 200 - response_json_paths: - $: [] + status: 404 + response_strings: + - Granularity '42.0' for metric $RESPONSE['$[0].id'] does not exist - name: get metric list for granularity url: /v1/metric diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index 7c256eec..c43c14a7 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -54,9 +54,9 @@ tests: redirects: true response_json_paths: $.version: "1.0" - $.links.`len`: 9 + $.links.`len`: 10 $.links[0].href: $SCHEME://$NETLOC/v1 - $.links[7].href: $SCHEME://$NETLOC/v1/search + $.links[7].href: $SCHEME://$NETLOC/v1/resource - name: root of resource url: /v1/resource diff --git a/gnocchi/tests/gabbi/test_gabbi_live.py b/gnocchi/tests/gabbi/test_gabbi_live.py index b9425631..63bc7c08 100644 --- a/gnocchi/tests/gabbi/test_gabbi_live.py +++ b/gnocchi/tests/gabbi/test_gabbi_live.py @@ -44,5 +44,5 @@ def load_tests(loader, tests, pattern): host=parsed_url.hostname, port=port, prefix=prefix) - elif os.getenv("GABBI_LIVE_FAIL_IF_NO_TEST"): + elif os.getenv("GABBI_LIVE"): raise RuntimeError('"GNOCCHI_SERVICE_URL" is not set') diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py index 0b917e39..63f22f47 100644 --- a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py +++ b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py @@ -40,7 +40,7 @@ class ModelsMigrationsSync( return sqlalchemy_base.Base.metadata def get_engine(self): - return self.index.engine_facade.get_engine() + return self.index.get_engine() @staticmethod def db_sync(engine): diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py index 4e673035..63a8bd33 100644 --- a/gnocchi/tests/storage/test_carbonara.py +++ b/gnocchi/tests/storage/test_carbonara.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2015 eNovance +# Copyright © 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -13,7 +13,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from oslotest import base +import datetime +import uuid import mock import pandas @@ -44,47 +45,18 @@ class TestCarbonaraMigration(tests_base.TestCase): if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): self.skipTest("This driver is not based on Carbonara") + self.metric = storage.Metric(uuid.uuid4(), + self.archive_policies['low']) -class TestMeasureQuery(base.BaseTestCase): - def test_equal(self): - q = storage.MeasureQuery({"=": 4}) - self.assertTrue(q(4)) - self.assertFalse(q(40)) - - def test_gt(self): - q = storage.MeasureQuery({">": 4}) - self.assertTrue(q(40)) - self.assertFalse(q(4)) - - def test_and(self): - q = storage.MeasureQuery({"and": [{">": 4}, {"<": 10}]}) - self.assertTrue(q(5)) - self.assertFalse(q(40)) - self.assertFalse(q(1)) - - def test_or(self): - q = storage.MeasureQuery({"or": [{"=": 4}, {"=": 10}]}) - self.assertTrue(q(4)) - self.assertTrue(q(10)) - self.assertFalse(q(-1)) - - def test_modulo(self): - q = storage.MeasureQuery({"=": [{"%": 5}, 0]}) - self.assertTrue(q(5)) - self.assertTrue(q(10)) - self.assertFalse(q(-1)) - self.assertFalse(q(6)) - - def test_math(self): - q = storage.MeasureQuery( - { - u"and": [ - # v+5 is bigger 0 - {u"≥": [{u"+": 5}, 0]}, - # v-6 is not 5 - {u"≠": [5, {u"-": 6}]}, - ], - } + archive = carbonara.TimeSerieArchive.from_definitions( + [(v.granularity, v.points) + for v in self.metric.archive_policy.definition] + ) + + archive_max = carbonara.TimeSerieArchive.from_definitions( + [(v.granularity, v.points) + for v in self.metric.archive_policy.definition], + aggregation_method='max', ) for a in (archive, archive_max): diff --git a/gnocchi/tests/test_archive_policy.py b/gnocchi/tests/test_archive_policy.py index d1ae6b63..3b2afb08 100644 --- a/gnocchi/tests/test_archive_policy.py +++ b/gnocchi/tests/test_archive_policy.py @@ -28,7 +28,8 @@ class TestArchivePolicy(base.BaseTestCase): ["*"]) def test_aggregation_methods(self): - conf = service.prepare_service([]) + conf = service.prepare_service([], + default_config_files=[]) ap = archive_policy.ArchivePolicy("foobar", 0, diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 5f3791cc..a5d17260 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -2,8 +2,6 @@ # # Copyright © 2014-2015 eNovance # -# Authors: Julien Danjou -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -30,13 +28,14 @@ from gnocchi import carbonara class TestBoundTimeSerie(base.BaseTestCase): @staticmethod def test_base(): - carbonara.BoundTimeSerie([datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [3, 5, 6]) + carbonara.BoundTimeSerie.from_data( + [datetime.datetime(2014, 1, 1, 12, 0, 0), + datetime.datetime(2014, 1, 1, 12, 0, 4), + datetime.datetime(2014, 1, 1, 12, 0, 9)], + [3, 5, 6]) def test_block_size(self): - ts = carbonara.BoundTimeSerie( + ts = carbonara.BoundTimeSerie.from_data( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 4), datetime.datetime(2014, 1, 1, 12, 0, 9)], @@ -48,7 +47,7 @@ class TestBoundTimeSerie(base.BaseTestCase): self.assertEqual(2, len(ts)) def test_block_size_back_window(self): - ts = carbonara.BoundTimeSerie( + ts = carbonara.BoundTimeSerie.from_data( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 4), datetime.datetime(2014, 1, 1, 12, 0, 9)], @@ -61,7 +60,7 @@ class TestBoundTimeSerie(base.BaseTestCase): self.assertEqual(3, len(ts)) def test_block_size_unordered(self): - ts = carbonara.BoundTimeSerie( + ts = carbonara.BoundTimeSerie.from_data( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 9), datetime.datetime(2014, 1, 1, 12, 0, 5)], @@ -73,7 +72,7 @@ class TestBoundTimeSerie(base.BaseTestCase): self.assertEqual(2, len(ts)) def test_duplicate_timestamps(self): - ts = carbonara.BoundTimeSerie( + ts = carbonara.BoundTimeSerie.from_data( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 9), datetime.datetime(2014, 1, 1, 12, 0, 9)], @@ -202,7 +201,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): def test_down_sampling(self): ts = carbonara.AggregatedTimeSerie(sampling='5Min') - ts.update(carbonara.TimeSerie( + ts.update(carbonara.TimeSerie.from_data( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 4), datetime.datetime(2014, 1, 1, 12, 0, 9)], @@ -214,7 +213,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts = carbonara.AggregatedTimeSerie( sampling='1Min', max_size=2) - ts.update(carbonara.TimeSerie( + ts.update(carbonara.TimeSerie.from_data( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 1, 4), datetime.datetime(2014, 1, 1, 12, 1, 9), @@ -229,7 +228,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): sampling='1Min', max_size=2, aggregation_method='max') - ts.update(carbonara.TimeSerie( + ts.update(carbonara.TimeSerie.from_data( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 1, 4), datetime.datetime(2014, 1, 1, 12, 1, 9), @@ -244,7 +243,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): sampling='1Min', max_size=2, aggregation_method='max') - ts.update(carbonara.TimeSerie( + ts.update(carbonara.TimeSerie.from_data( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 1, 4), datetime.datetime(2014, 1, 1, 12, 1, 9), @@ -253,23 +252,153 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts2 = carbonara.AggregatedTimeSerie.from_dict(ts.to_dict()) self.assertEqual(ts, ts2) + def test_aggregated_different_archive_no_overlap(self): + tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) + tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50) + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling) + + tsb1.set_values([(datetime.datetime(2014, 1, 1, 11, 46, 4), 4)], + before_truncate_callback=tsc1.update) + tsb2.set_values([(datetime.datetime(2014, 1, 1, 9, 1, 4), 4)], + before_truncate_callback=tsc2.update) + + dtfrom = datetime.datetime(2014, 1, 1, 11, 0, 0) + self.assertRaises(carbonara.UnAggregableTimeseries, + carbonara.AggregatedTimeSerie.aggregated, + [tsc1, tsc2], from_timestamp=dtfrom) + + def test_aggregated_different_archive_no_overlap2(self): + tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) + tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50) + + tsb1.set_values([(datetime.datetime(2014, 1, 1, 12, 3, 0), 4)], + before_truncate_callback=tsc1.update) + self.assertRaises(carbonara.UnAggregableTimeseries, + carbonara.AggregatedTimeSerie.aggregated, + [tsc1, tsc2]) + + def test_aggregated_different_archive_overlap(self): + tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) + tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling) + + # NOTE(sileht): minute 8 is missing in both and + # minute 7 in tsc2 too, but it looks like we have + # enough point to do the aggregation + tsb1.set_values([ + (datetime.datetime(2014, 1, 1, 11, 0, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 1, 0), 3), + (datetime.datetime(2014, 1, 1, 12, 2, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 5, 0), 3), + (datetime.datetime(2014, 1, 1, 12, 6, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 7, 0), 10), + (datetime.datetime(2014, 1, 1, 12, 9, 0), 2), + ], before_truncate_callback=tsc1.update) + + tsb2.set_values([ + (datetime.datetime(2014, 1, 1, 12, 1, 0), 3), + (datetime.datetime(2014, 1, 1, 12, 2, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 6), + (datetime.datetime(2014, 1, 1, 12, 5, 0), 3), + (datetime.datetime(2014, 1, 1, 12, 6, 0), 6), + (datetime.datetime(2014, 1, 1, 12, 9, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 11, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 12, 0), 2), + ], before_truncate_callback=tsc2.update) + + dtfrom = datetime.datetime(2014, 1, 1, 12, 0, 0) + dtto = datetime.datetime(2014, 1, 1, 12, 10, 0) + + # By default we require 100% of point that overlap + # so that fail + self.assertRaises(carbonara.UnAggregableTimeseries, + carbonara.AggregatedTimeSerie.aggregated, + [tsc1, tsc2], from_timestamp=dtfrom, + to_timestamp=dtto) + + # Retry with 80% and it works + output = carbonara.AggregatedTimeSerie.aggregated([ + tsc1, tsc2], from_timestamp=dtfrom, to_timestamp=dtto, + needed_percent_of_overlap=80.0) + + self.assertEqual([ + (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 3.0), + (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 3.0), + (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 4.0), + (pandas.Timestamp('2014-01-01 12:04:00'), 60.0, 4.0), + (pandas.Timestamp('2014-01-01 12:05:00'), 60.0, 3.0), + (pandas.Timestamp('2014-01-01 12:06:00'), 60.0, 5.0), + (pandas.Timestamp('2014-01-01 12:07:00'), 60.0, 10.0), + (pandas.Timestamp('2014-01-01 12:09:00'), 60.0, 2.0), + ], output) + + def test_aggregated_different_archive_overlap_edge_missing1(self): + tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) + tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling) + + tsb1.set_values([ + (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 1), + (datetime.datetime(2014, 1, 1, 12, 5, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 6, 0), 7), + (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), + (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), + ], before_truncate_callback=tsc1.update) + + tsb2.set_values([ + (datetime.datetime(2014, 1, 1, 11, 0, 0), 6), + (datetime.datetime(2014, 1, 1, 12, 1, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 2, 0), 13), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 24), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 5, 0), 16), + (datetime.datetime(2014, 1, 1, 12, 6, 0), 12), + ], before_truncate_callback=tsc2.update) + + # By default we require 100% of point that overlap + # but we allow that the last datapoint is missing + # of the precisest granularity + output = carbonara.AggregatedTimeSerie.aggregated([ + tsc1, tsc2], aggregation='sum') + + self.assertEqual([ + (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 33.0), + (pandas.Timestamp('2014-01-01 12:04:00'), 60.0, 5.0), + (pandas.Timestamp('2014-01-01 12:05:00'), 60.0, 18.0), + (pandas.Timestamp('2014-01-01 12:06:00'), 60.0, 19.0), + ], output) + + def test_aggregated_different_archive_overlap_edge_missing2(self): + tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) + tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling) -class TestTimeSerieArchive(base.BaseTestCase): + tsb1.set_values([ + (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), + ], before_truncate_callback=tsc1.update) - def test_empty_update(self): - tsc = carbonara.TimeSerieArchive.from_definitions( - [(60, 10), - (300, 6)]) - tsb = carbonara.BoundTimeSerie(block_size=tsc.max_block_size) - tsb.set_values([], before_truncate_callback=tsc.update) + tsb2.set_values([ + (datetime.datetime(2014, 1, 1, 11, 0, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), + ], before_truncate_callback=tsc2.update) - self.assertEqual([], tsc.fetch()) + output = carbonara.AggregatedTimeSerie.aggregated([tsc1, tsc2]) + self.assertEqual([ + (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 4.0), + ], output) def test_fetch(self): - tsc = carbonara.TimeSerieArchive.from_definitions( - [(60, 10), - (300, 6)]) - tsb = carbonara.BoundTimeSerie(block_size=tsc.max_block_size) + ts = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) + tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) tsb.set_values([ (datetime.datetime(2014, 1, 1, 11, 46, 4), 4), @@ -287,19 +416,14 @@ class TestTimeSerieArchive(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 4, 9), 7), (datetime.datetime(2014, 1, 1, 12, 5, 1), 15), (datetime.datetime(2014, 1, 1, 12, 5, 12), 1), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 3), - ], before_truncate_callback=tsc.update) + (datetime.datetime(2014, 1, 1, 12, 6, 0, 2), 3), + ], before_truncate_callback=ts.update) tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 5, 13), 5), - ], before_truncate_callback=tsc.update) + (datetime.datetime(2014, 1, 1, 12, 6), 5), + ], before_truncate_callback=ts.update) self.assertEqual([ - (datetime.datetime(2014, 1, 1, 11, 45), 300.0, 6.0), - (datetime.datetime(2014, 1, 1, 11, 50), 300.0, 27.0), - (datetime.datetime(2014, 1, 1, 11, 55), 300.0, 5.0), - (datetime.datetime(2014, 1, 1, 12, 00), 300.0, 6.166666666666667), - (datetime.datetime(2014, 1, 1, 12, 5), 300.0, 6.0), (datetime.datetime(2014, 1, 1, 11, 54), 60.0, 4.0), (datetime.datetime(2014, 1, 1, 11, 56), 60.0, 4.0), (datetime.datetime(2014, 1, 1, 11, 57), 60.0, 6.0), @@ -308,39 +432,31 @@ class TestTimeSerieArchive(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 2), 60.0, 8.0), (datetime.datetime(2014, 1, 1, 12, 3), 60.0, 3.0), (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0), - (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 7.0), - (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 3.0) - ], tsc.fetch()) + (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0), + (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0) + ], ts.fetch()) self.assertEqual([ - (datetime.datetime(2014, 1, 1, 12), 300.0, 6.166666666666667), - (datetime.datetime(2014, 1, 1, 12, 5), 300.0, 6.0), (datetime.datetime(2014, 1, 1, 12, 1), 60.0, 5.5), (datetime.datetime(2014, 1, 1, 12, 2), 60.0, 8.0), (datetime.datetime(2014, 1, 1, 12, 3), 60.0, 3.0), (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0), - (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 7.0), - (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 3.0) - ], tsc.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0), + (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0) + ], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) def test_fetch_agg_pct(self): - tsc = carbonara.TimeSerieArchive.from_definitions( - [(1, 3600 * 24), - (60, 24 * 60 * 30)], - aggregation_method='90pct') - tsb = carbonara.BoundTimeSerie(block_size=tsc.max_block_size) - - # NOTE(jd) What's interesting in this test is that we lack a point for - # a second, so we have an interval with no value + ts = carbonara.AggregatedTimeSerie(sampling=1, max_size=3600 * 24, + aggregation_method='90pct') + tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), (datetime.datetime(2014, 1, 1, 12, 0, 0, 123), 4), (datetime.datetime(2014, 1, 1, 12, 0, 2), 4)], - before_truncate_callback=tsc.update) + before_truncate_callback=ts.update) - result = tsc.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) + result = ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) reference = [ - (pandas.Timestamp('2014-01-01 12:00:00'), - 60.0, 4), (pandas.Timestamp('2014-01-01 12:00:00'), 1.0, 3.9), (pandas.Timestamp('2014-01-01 12:00:02'), @@ -356,12 +472,10 @@ class TestTimeSerieArchive(base.BaseTestCase): self.assertAlmostEqual(ref[2], res[2]) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 2, 113), 110)], - before_truncate_callback=tsc.update) + before_truncate_callback=ts.update) - result = tsc.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) + result = ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) reference = [ - (pandas.Timestamp('2014-01-01 12:00:00'), - 60.0, 78.2), (pandas.Timestamp('2014-01-01 12:00:00'), 1.0, 3.9), (pandas.Timestamp('2014-01-01 12:00:02'), @@ -377,10 +491,8 @@ class TestTimeSerieArchive(base.BaseTestCase): self.assertAlmostEqual(ref[2], res[2]) def test_fetch_nano(self): - tsc = carbonara.TimeSerieArchive.from_definitions( - [(0.2, 10), - (0.5, 6)]) - tsb = carbonara.BoundTimeSerie(block_size=tsc.max_block_size) + ts = carbonara.AggregatedTimeSerie(sampling=0.2, max_size=10) + tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) tsb.set_values([ (datetime.datetime(2014, 1, 1, 11, 46, 0, 200123), 4), @@ -388,95 +500,78 @@ class TestTimeSerieArchive(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 11, 47, 0, 323154), 50), (datetime.datetime(2014, 1, 1, 11, 48, 0, 590903), 4), (datetime.datetime(2014, 1, 1, 11, 48, 0, 903291), 4), - ], before_truncate_callback=tsc.update) + ], before_truncate_callback=ts.update) tsb.set_values([ (datetime.datetime(2014, 1, 1, 11, 48, 0, 821312), 5), - ], before_truncate_callback=tsc.update) + ], before_truncate_callback=ts.update) self.assertEqual([ - (datetime.datetime(2014, 1, 1, 11, 46), 0.5, 6.0), - (datetime.datetime(2014, 1, 1, 11, 47), 0.5, 50.0), - (datetime.datetime(2014, 1, 1, 11, 48, 0, 500000), 0.5, - 4.333333333333333), (datetime.datetime(2014, 1, 1, 11, 46, 0, 200000), 0.2, 6.0), (datetime.datetime(2014, 1, 1, 11, 47, 0, 200000), 0.2, 50.0), (datetime.datetime(2014, 1, 1, 11, 48, 0, 400000), 0.2, 4.0), (datetime.datetime(2014, 1, 1, 11, 48, 0, 800000), 0.2, 4.5) - ], tsc.fetch()) + ], ts.fetch()) def test_fetch_agg_std(self): - tsc = carbonara.TimeSerieArchive.from_definitions( - [(60, 60), - (300, 24)], - aggregation_method='std') - tsb = carbonara.BoundTimeSerie(block_size=tsc.max_block_size) + ts = carbonara.AggregatedTimeSerie(sampling=60, max_size=60, + aggregation_method='std') + tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), (datetime.datetime(2014, 1, 1, 12, 1, 4), 4), (datetime.datetime(2014, 1, 1, 12, 1, 9), 7), (datetime.datetime(2014, 1, 1, 12, 2, 1), 15), (datetime.datetime(2014, 1, 1, 12, 2, 12), 1)], - before_truncate_callback=tsc.update) + before_truncate_callback=ts.update) self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:00:00'), - 300.0, 5.4772255750516612), (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 2.1213203435596424), (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 9.8994949366116654), - ], tsc.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)], - before_truncate_callback=tsc.update) + before_truncate_callback=ts.update) self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:00:00'), - 300.0, 42.739521132865619), (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 2.1213203435596424), (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 59.304300012730948), - ], tsc.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) def test_fetch_agg_max(self): - tsc = carbonara.TimeSerieArchive.from_definitions( - [(60, 60), - (300, 24)], - aggregation_method='max') - tsb = carbonara.BoundTimeSerie(block_size=tsc.max_block_size) + ts = carbonara.AggregatedTimeSerie(sampling=60, max_size=60, + aggregation_method='max') + tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), (datetime.datetime(2014, 1, 1, 12, 1, 4), 4), (datetime.datetime(2014, 1, 1, 12, 1, 9), 7), (datetime.datetime(2014, 1, 1, 12, 2, 1), 15), (datetime.datetime(2014, 1, 1, 12, 2, 12), 1)], - before_truncate_callback=tsc.update) + before_truncate_callback=ts.update) self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:00:00'), 300.0, 15), (pandas.Timestamp('2014-01-01 12:00:00'), 60.0, 3), (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 7), (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 15), - ], tsc.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)], - before_truncate_callback=tsc.update) + before_truncate_callback=ts.update) self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:00:00'), 300.0, 110), (pandas.Timestamp('2014-01-01 12:00:00'), 60.0, 3), (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 7), (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 110), - ], tsc.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) def test_serialize(self): - tsc = carbonara.TimeSerieArchive.from_definitions( - [(0.5, None), - (2, None)]) - - tsb = carbonara.BoundTimeSerie(block_size=tsc.max_block_size) + ts = carbonara.AggregatedTimeSerie(sampling=0.5) + tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 0, 0, 1234), 3), @@ -484,43 +579,14 @@ class TestTimeSerieArchive(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 1, 4, 234), 5), (datetime.datetime(2014, 1, 1, 12, 1, 9, 32), 7), (datetime.datetime(2014, 1, 1, 12, 2, 12, 532), 1), - ], before_truncate_callback=tsc.update) - - self.assertEqual(tsc, - carbonara.TimeSerieArchive.unserialize( - tsc.serialize())) - - def test_from_dict_resampling_stddev(self): - d = {'timeserie': {'values': {u'2013-01-01 23:45:01.182000': 1.0, - u'2013-01-01 23:45:02.975000': 2.0, - u'2013-01-01 23:45:03.689000': 3.0, - u'2013-01-01 23:45:04.292000': 4.0, - u'2013-01-01 23:45:05.416000': 5.0, - u'2013-01-01 23:45:06.995000': 6.0, - u'2013-01-01 23:45:07.065000': 7.0, - u'2013-01-01 23:45:08.634000': 8.0, - u'2013-01-01 23:45:09.572000': 9.0, - u'2013-01-01 23:45:10.672000': 10.0}, - 'timespan': u'120S'}, - 'archives': [{'aggregation_method': u'std', - 'values': {u'2013-01-01 23:40:00': - 3.0276503540974917, - u'2013-01-01 23:45:00': - 3.0276503540974917}, - 'max_size': 3600, - 'sampling': u'60S'}]} - timeseries = carbonara.TimeSerieArchive.from_dict(d) - measure = timeseries.fetch() - self.assertEqual(2, len(measure)) - measure = timeseries.fetch('2013-01-01 23:45:00', - '2013-01-01 23:46:00') - self.assertEqual(pandas.Timestamp('2013-01-01 23:45:00'), - measure[0][0]) - self.assertAlmostEquals(measure[0][2], 3.0276503540974917) + ], before_truncate_callback=ts.update) + + self.assertEqual(ts, + carbonara.AggregatedTimeSerie.unserialize( + ts.serialize())) def test_no_truncation(self): - ts = carbonara.TimeSerieArchive.from_definitions( - [(60, None)]) + ts = carbonara.AggregatedTimeSerie(sampling=60) tsb = carbonara.BoundTimeSerie() for i in six.moves.range(1, 11): @@ -535,12 +601,11 @@ class TestTimeSerieArchive(base.BaseTestCase): def test_back_window(self): """Back window testing. - Test the the back window on an archive is not longer than the window we + Test the back window on an archive is not longer than the window we aggregate on. """ - ts = carbonara.TimeSerieArchive.from_definitions( - [(1, 60)]) - tsb = carbonara.BoundTimeSerie(block_size=ts.max_block_size) + ts = carbonara.AggregatedTimeSerie(sampling=1, max_size=60) + tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 0, 1, 2300), 1), @@ -576,12 +641,11 @@ class TestTimeSerieArchive(base.BaseTestCase): def test_back_window_ignore(self): """Back window testing. - Test the the back window on an archive is not longer than the window we + Test the back window on an archive is not longer than the window we aggregate on. """ - ts = carbonara.TimeSerieArchive.from_definitions( - [(1, 60)]) - tsb = carbonara.BoundTimeSerie(block_size=1) + ts = carbonara.AggregatedTimeSerie(sampling=1, max_size=60) + tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 0, 1, 2300), 1), @@ -625,14 +689,20 @@ class TestTimeSerieArchive(base.BaseTestCase): ts.fetch()) def test_aggregated_nominal(self): - tsc1 = carbonara.TimeSerieArchive.from_definitions( - [(60, 10), - (300, 6)]) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) - tsc2 = carbonara.TimeSerieArchive.from_definitions( - [(60, 10), - (300, 6)]) - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.max_block_size) + tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) + tsc12 = carbonara.AggregatedTimeSerie(sampling=300, max_size=6) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc12.sampling) + tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10) + tsc22 = carbonara.AggregatedTimeSerie(sampling=300, max_size=6) + tsb2 = carbonara.BoundTimeSerie(block_size=tsc22.sampling) + + def ts1_update(ts): + tsc1.update(ts) + tsc12.update(ts) + + def ts2_update(ts): + tsc2.update(ts) + tsc22.update(ts) tsb1.set_values([ (datetime.datetime(2014, 1, 1, 11, 46, 4), 4), @@ -651,7 +721,7 @@ class TestTimeSerieArchive(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 5, 1), 15), (datetime.datetime(2014, 1, 1, 12, 5, 12), 1), (datetime.datetime(2014, 1, 1, 12, 6, 0), 3), - ], before_truncate_callback=tsc1.update) + ], before_truncate_callback=ts1_update) tsb2.set_values([ (datetime.datetime(2014, 1, 1, 11, 46, 4), 6), @@ -670,9 +740,10 @@ class TestTimeSerieArchive(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 5, 1), 10), (datetime.datetime(2014, 1, 1, 12, 5, 12), 1), (datetime.datetime(2014, 1, 1, 12, 6, 0), 1), - ], before_truncate_callback=tsc2.update) + ], before_truncate_callback=ts2_update) - output = carbonara.TimeSerieArchive.aggregated([tsc1, tsc2]) + output = carbonara.AggregatedTimeSerie.aggregated([tsc1, tsc12, + tsc2, tsc22]) self.assertEqual([ (datetime.datetime(2014, 1, 1, 11, 45), 300.0, 5.75), (datetime.datetime(2014, 1, 1, 11, 50), 300.0, 27.5), @@ -691,192 +762,11 @@ class TestTimeSerieArchive(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 2.0), ], output) - def test_aggregated_different_archive(self): - tsc1 = carbonara.TimeSerieArchive.from_definitions( - [(60, 50), - (120, 24)]) - tsc2 = carbonara.TimeSerieArchive.from_definitions( - [(180, 50), - (300, 24)]) - - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.TimeSerieArchive.aggregated, - [tsc1, tsc2]) - - def test_aggregated_different_archive_no_overlap(self): - tsc1 = carbonara.TimeSerieArchive.from_definitions( - [(60, 50), - (120, 24)]) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) - tsc2 = carbonara.TimeSerieArchive.from_definitions( - [(60, 50)]) - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.max_block_size) - - tsb1.set_values([(datetime.datetime(2014, 1, 1, 11, 46, 4), 4)], - before_truncate_callback=tsc1.update) - tsb2.set_values([(datetime.datetime(2014, 1, 1, 9, 1, 4), 4)], - before_truncate_callback=tsc2.update) - - dtfrom = datetime.datetime(2014, 1, 1, 11, 0, 0) - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.TimeSerieArchive.aggregated, - [tsc1, tsc2], from_timestamp=dtfrom) - - def test_aggregated_different_archive_no_overlap2(self): - tsc1 = carbonara.TimeSerieArchive.from_definitions( - [(60, 50), - (120, 24)]) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) - tsc2 = carbonara.TimeSerieArchive.from_definitions( - [(60, 50)]) - - tsb1.set_values([(datetime.datetime(2014, 1, 1, 12, 3, 0), 4)], - before_truncate_callback=tsc1.update) - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.TimeSerieArchive.aggregated, - [tsc1, tsc2]) - - def test_aggregated_different_archive_no_overlap_but_dont_care(self): - tsc1 = carbonara.TimeSerieArchive.from_definitions( - [(60, 50), - (120, 24)]) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) - tsc2 = carbonara.TimeSerieArchive.from_definitions( - [(60, 50)]) - - tsb1.set_values([(datetime.datetime(2014, 1, 1, 12, 3, 0), 4)], - before_truncate_callback=tsc1.update) - - res = carbonara.TimeSerieArchive.aggregated( - [tsc1, tsc2], needed_percent_of_overlap=0) - self.assertEqual([(pandas.Timestamp('2014-01-01 12:03:00'), - 60.0, 4.0)], res) - - def test_aggregated_different_archive_overlap(self): - tsc1 = carbonara.TimeSerieArchive.from_definitions( - [(60, 10), - (600, 6)]) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) - tsc2 = carbonara.TimeSerieArchive.from_definitions( - [(60, 10)]) - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.max_block_size) - - # NOTE(sileht): minute 8 is missing in both and - # minute 7 in tsc2 too, but it looks like we have - # enough point to do the aggregation - tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 11, 0, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 10), - (datetime.datetime(2014, 1, 1, 12, 9, 0), 2), - ], before_truncate_callback=tsc1.update) - - tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 12, 1, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 9, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 11, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 12, 0), 2), - ], before_truncate_callback=tsc2.update) - - dtfrom = datetime.datetime(2014, 1, 1, 12, 0, 0) - dtto = datetime.datetime(2014, 1, 1, 12, 10, 0) - - # By default we require 100% of point that overlap - # so that fail - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.TimeSerieArchive.aggregated, - [tsc1, tsc2], from_timestamp=dtfrom, - to_timestamp=dtto) - - # Retry with 80% and it works - output = carbonara.TimeSerieArchive.aggregated([ - tsc1, tsc2], from_timestamp=dtfrom, to_timestamp=dtto, - needed_percent_of_overlap=80.0) - - self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 3.0), - (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 3.0), - (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 4.0), - (pandas.Timestamp('2014-01-01 12:04:00'), 60.0, 4.0), - (pandas.Timestamp('2014-01-01 12:05:00'), 60.0, 3.0), - (pandas.Timestamp('2014-01-01 12:06:00'), 60.0, 5.0), - (pandas.Timestamp('2014-01-01 12:07:00'), 60.0, 10.0), - (pandas.Timestamp('2014-01-01 12:09:00'), 60.0, 2.0), - ], output) - - def test_aggregated_different_archive_overlap_edge_missing1(self): - tsc1 = carbonara.TimeSerieArchive.from_definitions([(60, 10)]) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) - tsc2 = carbonara.TimeSerieArchive.from_definitions([(60, 10)]) - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.max_block_size) - - tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 1), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 7), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), - ], before_truncate_callback=tsc1.update) - - tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 11, 0, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 13), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 24), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 16), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 12), - ], before_truncate_callback=tsc2.update) - - # By default we require 100% of point that overlap - # but we allow that the last datapoint is missing - # of the precisest granularity - output = carbonara.TimeSerieArchive.aggregated([ - tsc1, tsc2], aggregation='sum') - - self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 33.0), - (pandas.Timestamp('2014-01-01 12:04:00'), 60.0, 5.0), - (pandas.Timestamp('2014-01-01 12:05:00'), 60.0, 18.0), - (pandas.Timestamp('2014-01-01 12:06:00'), 60.0, 19.0), - ], output) - - def test_aggregated_different_archive_overlap_edge_missing2(self): - tsc1 = carbonara.TimeSerieArchive.from_definitions([(60, 10)]) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) - tsc2 = carbonara.TimeSerieArchive.from_definitions([(60, 10)]) - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.max_block_size) - - tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - ], before_truncate_callback=tsc1.update) - - tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 11, 0, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - ], before_truncate_callback=tsc2.update) - - output = carbonara.TimeSerieArchive.aggregated([tsc1, tsc2]) - self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 4.0), - ], output) - def test_aggregated_partial_overlap(self): - tsc1 = carbonara.TimeSerieArchive.from_definitions([(1, 86400)]) - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.max_block_size) - tsc2 = carbonara.TimeSerieArchive.from_definitions([(1, 86400)]) - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.max_block_size) + tsc1 = carbonara.AggregatedTimeSerie(sampling=1, max_size=86400) + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) + tsc2 = carbonara.AggregatedTimeSerie(sampling=1, max_size=86400) + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling) tsb1.set_values([ (datetime.datetime(2015, 12, 3, 13, 19, 15), 1), @@ -892,7 +782,7 @@ class TestTimeSerieArchive(base.BaseTestCase): (datetime.datetime(2015, 12, 3, 13, 24, 15), 10), ], before_truncate_callback=tsc2.update) - output = carbonara.TimeSerieArchive.aggregated( + output = carbonara.AggregatedTimeSerie.aggregated( [tsc1, tsc2], aggregation="sum") self.assertEqual([ @@ -903,7 +793,7 @@ class TestTimeSerieArchive(base.BaseTestCase): dtfrom = datetime.datetime(2015, 12, 3, 13, 17, 0) dtto = datetime.datetime(2015, 12, 3, 13, 25, 0) - output = carbonara.TimeSerieArchive.aggregated( + output = carbonara.AggregatedTimeSerie.aggregated( [tsc1, tsc2], from_timestamp=dtfrom, to_timestamp=dtto, aggregation="sum", needed_percent_of_overlap=0) @@ -919,10 +809,10 @@ class TestTimeSerieArchive(base.BaseTestCase): # By default we require 100% of point that overlap # so that fail if from or to is set self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.TimeSerieArchive.aggregated, + carbonara.AggregatedTimeSerie.aggregated, [tsc1, tsc2], to_timestamp=dtto) self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.TimeSerieArchive.aggregated, + carbonara.AggregatedTimeSerie.aggregated, [tsc1, tsc2], from_timestamp=dtfrom) # Retry with 50% and it works diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 904e9f4b..3766ed2f 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -170,7 +170,7 @@ class TestIndexerDriver(tests_base.TestCase): m = self.index.list_metrics(id=rc.metrics[0].id) self.assertEqual(m[0], rc.metrics[0]) - def _do_test_create_instance(self, server_group=None): + def _do_test_create_instance(self, server_group=None, image_ref=None): r1 = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) @@ -178,7 +178,7 @@ class TestIndexerDriver(tests_base.TestCase): rc = self.index.create_resource('instance', r1, user, project, flavor_id="1", - image_ref="http://foo/bar", + image_ref=image_ref, host="foo", display_name="lol", **kwargs) self.assertIsNotNone(rc.started_at) @@ -196,7 +196,7 @@ class TestIndexerDriver(tests_base.TestCase): "display_name": "lol", "server_group": server_group, "host": "foo", - "image_ref": "http://foo/bar", + "image_ref": image_ref, "flavor_id": "1", "original_resource_id": None, "metrics": {}}, @@ -207,10 +207,14 @@ class TestIndexerDriver(tests_base.TestCase): self.assertEqual(rc.metrics, rg.metrics) def test_create_instance(self): - self._do_test_create_instance() + self._do_test_create_instance(image_ref='http://foo/bar') def test_create_instance_with_server_group(self): - self._do_test_create_instance('my_autoscaling_group') + self._do_test_create_instance('my_autoscaling_group', + image_ref='http://foo/bar') + + def test_create_instance_without_image_ref(self): + self._do_test_create_instance(image_ref=None) def test_delete_resource(self): r1 = uuid.uuid4() @@ -945,8 +949,18 @@ class TestIndexerDriver(tests_base.TestCase): self.index.create_metric(e1, user, project, archive_policy_name="low") + e2 = uuid.uuid4() + self.index.create_metric(e2, + user, project, + archive_policy_name="low") metrics = self.index.list_metrics() - self.assertIn(e1, [m.id for m in metrics]) + id_list = [m.id for m in metrics] + self.assertIn(e1, id_list) + # Test ordering + if e1 < e2: + self.assertLess(id_list.index(e1), id_list.index(e2)) + else: + self.assertLess(id_list.index(e2), id_list.index(e1)) def test_list_metrics_delete_status(self): e1 = uuid.uuid4() diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 850257a0..aeba5055 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -22,10 +22,8 @@ import hashlib import json import uuid -import keystonemiddleware.auth_token -from keystonemiddleware import opts as ks_opts +from keystonemiddleware import fixture as ksm_fixture import mock -import oslo_config from oslo_utils import timeutils import six from stevedore import extension @@ -42,77 +40,18 @@ from gnocchi import utils load_tests = testscenarios.load_tests_apply_scenarios -class FakeMemcache(object): - VALID_TOKEN_ADMIN = '4562138218392830' - ADMIN_TOKEN_HASH = hashlib.sha256( - VALID_TOKEN_ADMIN.encode('utf-8')).hexdigest() +class TestingApp(webtest.TestApp): + VALID_TOKEN_ADMIN = str(uuid.uuid4()) USER_ID_ADMIN = str(uuid.uuid4()) PROJECT_ID_ADMIN = str(uuid.uuid4()) - VALID_TOKEN = '4562138218392831' - TOKEN_HASH = hashlib.sha256(VALID_TOKEN.encode('utf-8')).hexdigest() + VALID_TOKEN = str(uuid.uuid4()) USER_ID = str(uuid.uuid4()) PROJECT_ID = str(uuid.uuid4()) - VALID_TOKEN_2 = '4562138218392832' - TOKEN_2_HASH = hashlib.sha256(VALID_TOKEN_2.encode('utf-8')).hexdigest() - # We replace "-" to simulate a middleware that would send UUID in a non - # normalized format. - USER_ID_2 = str(uuid.uuid4()).replace("-", "") - PROJECT_ID_2 = str(uuid.uuid4()).replace("-", "") - - def get(self, key): - dt = "2100-01-01T23:59:59" - if (key == "tokens/%s" % self.ADMIN_TOKEN_HASH or - key == "tokens/%s" % self.VALID_TOKEN_ADMIN): - return json.dumps(({'access': { - 'token': {'id': self.VALID_TOKEN_ADMIN, - 'expires': dt}, - 'user': { - 'id': self.USER_ID_ADMIN, - 'name': 'adminusername', - 'tenantId': self.PROJECT_ID_ADMIN, - 'tenantName': 'myadmintenant', - 'roles': [ - {'name': 'admin'}, - ]}, - }}, dt)) - elif (key == "tokens/%s" % self.TOKEN_HASH or - key == "tokens/%s" % self.VALID_TOKEN): - return json.dumps(({'access': { - 'token': {'id': self.VALID_TOKEN, - 'expires': dt}, - 'user': { - 'id': self.USER_ID, - 'name': 'myusername', - 'tenantId': self.PROJECT_ID, - 'tenantName': 'mytenant', - 'roles': [ - {'name': 'member'}, - ]}, - }}, dt)) - elif (key == "tokens/%s" % self.TOKEN_2_HASH or - key == "tokens/%s" % self.VALID_TOKEN_2): - return json.dumps(({'access': { - 'token': {'id': self.VALID_TOKEN_2, - 'expires': dt}, - 'user': { - 'id': self.USER_ID_2, - 'name': 'myusername2', - 'tenantId': self.PROJECT_ID_2, - 'tenantName': 'mytenant2', - 'roles': [ - {'name': 'member'}, - ]}, - }}, dt)) - - @staticmethod - def set(key, value, **kwargs): - pass - - -class TestingApp(webtest.TestApp): - CACHE_NAME = 'fake.cache' + VALID_TOKEN_2 = str(uuid.uuid4()) + USER_ID_2 = str(uuid.uuid4()) + PROJECT_ID_2 = str(uuid.uuid4()) def __init__(self, *args, **kwargs): self.auth = kwargs.pop('auth') @@ -120,15 +59,14 @@ class TestingApp(webtest.TestApp): self.indexer = kwargs.pop('indexer') super(TestingApp, self).__init__(*args, **kwargs) # Setup Keystone auth_token fake cache - self.extra_environ.update({self.CACHE_NAME: FakeMemcache()}) - self.token = FakeMemcache.VALID_TOKEN + self.token = self.VALID_TOKEN @contextlib.contextmanager def use_admin_user(self): if not self.auth: raise testcase.TestSkipped("No auth enabled") old_token = self.token - self.token = FakeMemcache.VALID_TOKEN_ADMIN + self.token = self.VALID_TOKEN_ADMIN try: yield finally: @@ -139,7 +77,7 @@ class TestingApp(webtest.TestApp): if not self.auth: raise testcase.TestSkipped("No auth enabled") old_token = self.token - self.token = FakeMemcache.VALID_TOKEN_2 + self.token = self.VALID_TOKEN_2 try: yield finally: @@ -320,8 +258,8 @@ class MetricTest(RestTest): params={ "id": str(uuid.uuid4()), "started_at": "2014-01-01 02:02:02", - "user_id": FakeMemcache.USER_ID_2, - "project_id": FakeMemcache.PROJECT_ID_2, + "user_id": TestingApp.USER_ID_2, + "project_id": TestingApp.PROJECT_ID_2, "metrics": {"foobar": {"archive_policy_name": "low"}}, }) resource = json.loads(result.text) @@ -461,6 +399,7 @@ class MetricTest(RestTest): result = json.loads(ret.text) now = utils.utcnow() self.assertEqual([ + ['2014-01-01T10:00:00+00:00', 3600.0, 1234.2], [(now - datetime.timedelta( seconds=now.second, @@ -833,8 +772,8 @@ class ResourceTest(RestTest): # Set original_resource_id self.resource['original_resource_id'] = self.resource['id'] if self.auth: - self.resource['created_by_user_id'] = FakeMemcache.USER_ID - self.resource['created_by_project_id'] = FakeMemcache.PROJECT_ID + self.resource['created_by_user_id'] = TestingApp.USER_ID + self.resource['created_by_project_id'] = TestingApp.PROJECT_ID else: self.resource['created_by_user_id'] = None self.resource['created_by_project_id'] = None @@ -1544,7 +1483,7 @@ class ResourceTest(RestTest): "id": str(uuid.uuid4()), "started_at": "2014-01-01 02:02:02", "user_id": u1, - "project_id": FakeMemcache.PROJECT_ID_2, + "project_id": TestingApp.PROJECT_ID_2, }) g = json.loads(result.text) @@ -1709,8 +1648,8 @@ class ResourceTest(RestTest): params={ "id": str(uuid.uuid4()), "started_at": "2014-01-01 02:02:02", - "user_id": FakeMemcache.USER_ID_2, - "project_id": FakeMemcache.PROJECT_ID_2, + "user_id": TestingApp.USER_ID_2, + "project_id": TestingApp.PROJECT_ID_2, }) g = json.loads(result.text) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 52dd5322..c7f80eca 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -17,9 +17,12 @@ import datetime import uuid import mock +from oslo_utils import timeutils +from oslotest import base import six.moves from gnocchi import storage +from gnocchi.storage import _carbonara from gnocchi.storage import null from gnocchi.tests import base as tests_base from gnocchi import utils @@ -46,6 +49,9 @@ class TestStorageDriver(tests_base.TestCase): @mock.patch('gnocchi.storage._carbonara.LOG') def test_corrupted_data(self, logger): + if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): + self.skipTest("This driver is not based on Carbonara") + self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) @@ -60,18 +66,6 @@ class TestStorageDriver(tests_base.TestCase): side_effect=ValueError("boom!")): self.storage.process_background_tasks(self.index, sync=True) - expected_calls = [ - mock.call.debug('Processing measures for %s' % self.metric.id), - mock.call.debug('Processing measures for %s' % self.metric.id), - ] - aggs = ["none"] + self.conf.archive_policy.default_aggregation_methods - for agg in aggs: - expected_calls.append(mock.call.error( - 'Data are corrupted for metric %s and aggregation %s, ' - 'recreating an empty timeserie.' % (self.metric.id, agg))) - - logger.assert_has_calls(expected_calls, any_order=True) - self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 1), (utils.datetime_utc(2014, 1, 1, 13), 3600.0, 1), @@ -224,6 +218,8 @@ class TestStorageDriver(tests_base.TestCase): ], self.storage.get_measures(self.metric)) self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), + (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), ], self.storage.get_measures( self.metric, @@ -238,14 +234,17 @@ class TestStorageDriver(tests_base.TestCase): self.metric, to_timestamp=datetime.datetime(2014, 1, 1, 12, 6, 0))) - self.assertEqual( - [], - self.storage.get_measures( - self.metric, - to_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10), - from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10))) + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), + (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), + (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), + ], self.storage.get_measures( + self.metric, + to_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10), + from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10))) self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), ], self.storage.get_measures( @@ -253,13 +252,22 @@ class TestStorageDriver(tests_base.TestCase): from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2))) + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), + (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), + (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), + ], self.storage.get_measures( + self.metric, + from_timestamp=timeutils.parse_isotime("2014-1-1 13:00:00+01:00"), + to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2))) + self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), ], self.storage.get_measures( self.metric, from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2), - granularity=3600)) + granularity=3600.0)) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), @@ -267,10 +275,12 @@ class TestStorageDriver(tests_base.TestCase): self.metric, from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2), - granularity=300)) + granularity=300.0)) - self.assertEqual([], self.storage.get_measures(self.metric, - granularity=42)) + self.assertRaises(storage.GranularityDoesNotExist, + self.storage.get_measures, + self.metric, + granularity=42) def test_get_cross_metric_measures_unknown_metric(self): self.assertEqual([], @@ -311,6 +321,26 @@ class TestStorageDriver(tests_base.TestCase): [self.metric, metric2], aggregation='last') + def test_get_cross_metric_measures_unknown_granularity(self): + metric2 = storage.Metric(uuid.uuid4(), + self.archive_policies['low']) + self.storage.add_measures(self.metric, [ + storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), + ]) + self.storage.add_measures(metric2, [ + storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), + ]) + self.assertRaises(storage.GranularityDoesNotExist, + self.storage.get_cross_metric_measures, + [self.metric, metric2], + granularity=12345.456) + def test_add_and_get_cross_metric_measures_different_archives(self): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['no_granularity_match']) @@ -360,7 +390,9 @@ class TestStorageDriver(tests_base.TestCase): [self.metric, metric2], from_timestamp=utils.to_timestamp('2014-01-01 12:10:00')) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0) + (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), + (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0), ], values) values = self.storage.get_cross_metric_measures( @@ -389,6 +421,7 @@ class TestStorageDriver(tests_base.TestCase): to_timestamp=utils.to_timestamp('2014-01-01 12:00:01')) self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), ], values) @@ -449,10 +482,14 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual( {metric2: [], - self.metric: [(utils.datetime_utc(2014, 1, 1, 12), 300, 69)]}, + self.metric: [ + (utils.datetime_utc(2014, 1, 1), 86400, 33), + (utils.datetime_utc(2014, 1, 1, 12), 3600, 33), + (utils.datetime_utc(2014, 1, 1, 12), 300, 69), + (utils.datetime_utc(2014, 1, 1, 12, 10), 300, 42)]}, self.storage.search_value( [metric2, self.metric], - {u"≥": 50})) + {u"≥": 30})) self.assertEqual( {metric2: [], self.metric: []}, @@ -461,3 +498,63 @@ class TestStorageDriver(tests_base.TestCase): {u"∧": [ {u"eq": 100}, {u"≠": 50}]})) + + +class TestMeasureQuery(base.BaseTestCase): + def test_equal(self): + q = storage.MeasureQuery({"=": 4}) + self.assertTrue(q(4)) + self.assertFalse(q(40)) + + def test_gt(self): + q = storage.MeasureQuery({">": 4}) + self.assertTrue(q(40)) + self.assertFalse(q(4)) + + def test_and(self): + q = storage.MeasureQuery({"and": [{">": 4}, {"<": 10}]}) + self.assertTrue(q(5)) + self.assertFalse(q(40)) + self.assertFalse(q(1)) + + def test_or(self): + q = storage.MeasureQuery({"or": [{"=": 4}, {"=": 10}]}) + self.assertTrue(q(4)) + self.assertTrue(q(10)) + self.assertFalse(q(-1)) + + def test_modulo(self): + q = storage.MeasureQuery({"=": [{"%": 5}, 0]}) + self.assertTrue(q(5)) + self.assertTrue(q(10)) + self.assertFalse(q(-1)) + self.assertFalse(q(6)) + + def test_math(self): + q = storage.MeasureQuery( + { + u"and": [ + # v+5 is bigger 0 + {u"≥": [{u"+": 5}, 0]}, + # v-6 is not 5 + {u"≠": [5, {u"-": 6}]}, + ], + } + ) + self.assertTrue(q(5)) + self.assertTrue(q(10)) + self.assertFalse(q(11)) + + def test_empty(self): + q = storage.MeasureQuery({}) + self.assertFalse(q(5)) + self.assertFalse(q(10)) + + def test_bad_format(self): + self.assertRaises(storage.InvalidQuery, + storage.MeasureQuery, + {"foo": [{"=": 4}, {"=": 10}]}) + + self.assertRaises(storage.InvalidQuery, + storage.MeasureQuery, + {"=": [1, 2, 3]}) diff --git a/requirements.txt b/requirements.txt index 1b40b129..cc9667e3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,30 +1,22 @@ numpy -oslo.config>=1.15.0 -oslo.db>=1.8.0 +oslo.config>=2.6.0 oslo.log>=1.0.0 oslo.policy>=0.3.0 oslo.serialization>=1.4.0 oslo.utils>=1.6.0 oslo.middleware -oslosphinx>=2.2.0 # Apache-2.0 pandas>=0.17.0 pecan>=0.9 -python-swiftclient>=2.5.0 pytimeparse>=1.1.5 futures requests six -sqlalchemy -sqlalchemy-utils stevedore -tooz>=0.11 voluptuous werkzeug trollius; python_version < '3.4' retrying -pytz WebOb>=1.4.1 Paste PasteDeploy -sphinx_bootstrap_theme prettytable diff --git a/run-tests.sh b/run-tests.sh index 61b75872..492725da 100755 --- a/run-tests.sh +++ b/run-tests.sh @@ -6,11 +6,8 @@ for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS} do for indexer in ${GNOCCHI_TEST_INDEXER_DRIVERS} do - storage_setup_script=./setup-${storage}-tests.sh - if [ ! -x "$storage_setup_script" ] - then - unset storage_setup_script - fi - GNOCCHI_TEST_STORAGE_DRIVER=$storage ./setup-${indexer}-tests.sh "${storage_setup_script}" ./tools/pretty_tox.sh $* + export GNOCCHI_TEST_INDEXER_DRIVER=$indexer + export GNOCCHI_TEST_STORAGE_DRIVER=$storage + ./setup-test-env.sh ./tools/pretty_tox.sh $* done done diff --git a/setup-influxdb-tests.sh b/setup-influxdb-tests.sh deleted file mode 100755 index 78296a31..00000000 --- a/setup-influxdb-tests.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash -x - -wait_for_line () { - while read line - do - echo "$line" | grep -q "$1" && break - done < "$2" - # Read the fifo for ever otherwise process would block - cat "$2" >/dev/null & -} - -INFLUXDB_DATA=`mktemp -d /tmp/gnocchi-influxdb-XXXXX` -export GNOCCHI_TEST_INFLUXDB_PORT=51234 - -mkdir ${INFLUXDB_DATA}/{broker,data,meta,hh,wal} -mkfifo ${INFLUXDB_DATA}/out - -cat > $INFLUXDB_DATA/config < ${INFLUXDB_DATA}/out 2>&1 & -# Wait for InfluxDB to start listening to connections -wait_for_line "Listening on HTTP" ${INFLUXDB_DATA}/out -influx -port $GNOCCHI_TEST_INFLUXDB_PORT -execute "CREATE DATABASE test;" - - -$* - -ret=$? -kill $(jobs -p) -rm -rf "${INFLUXDB_DATA}" -exit $ret diff --git a/setup-mysql-tests.sh b/setup-mysql-tests.sh deleted file mode 100755 index 50feea3d..00000000 --- a/setup-mysql-tests.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -x -wait_for_line () { - while read line - do - echo "$line" | grep -q "$1" && break - done < "$2" - # Read the fifo for ever otherwise process would block - cat "$2" >/dev/null & -} - -# Start MySQL process for tests -MYSQL_DATA=`mktemp -d /tmp/gnocchi-mysql-XXXXX` -mkfifo ${MYSQL_DATA}/out -PATH=$PATH:/usr/libexec -mysqld --no-defaults --datadir=${MYSQL_DATA} --pid-file=${MYSQL_DATA}/mysql.pid --socket=${MYSQL_DATA}/mysql.socket --skip-networking --skip-grant-tables &> ${MYSQL_DATA}/out & -# Wait for MySQL to start listening to connections -wait_for_line "mysqld: ready for connections." ${MYSQL_DATA}/out -export GNOCCHI_TEST_INDEXER_URL="mysql+pymysql://root@localhost/test?unix_socket=${MYSQL_DATA}/mysql.socket&charset=utf8" -mysql --no-defaults -S ${MYSQL_DATA}/mysql.socket -e 'CREATE DATABASE test;' - -mkdir $MYSQL_DATA/tooz -export GNOCCHI_COORDINATION_URL="mysql://root@localhost/test?unix_socket=${MYSQL_DATA}/mysql.socket&charset=utf8" - -$* - -ret=$? -kill $(jobs -p) -rm -rf "${MYSQL_DATA}" -exit $ret diff --git a/setup-postgresql-tests.sh b/setup-postgresql-tests.sh deleted file mode 100755 index 47487ee6..00000000 --- a/setup-postgresql-tests.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -x - -# Start PostgreSQL process for tests -PGSQL_DATA=`mktemp -d /tmp/gnocchi-psql-XXXXX` -PGSQL_PATH=`pg_config --bindir` -PGSQL_PORT=9824 -${PGSQL_PATH}/pg_ctl initdb -D ${PGSQL_DATA} -LANGUAGE=C ${PGSQL_PATH}/pg_ctl -w -D ${PGSQL_DATA} -o "-k ${PGSQL_DATA} -p ${PGSQL_PORT}" start > /dev/null -export GNOCCHI_TEST_INDEXER_URL="postgresql:///template1?host=${PGSQL_DATA}&port=${PGSQL_PORT}" - -mkdir $PGSQL_DATA/tooz -export GNOCCHI_COORDINATION_URL="${GNOCCHI_TEST_INDEXER_URL}" - -$* - -ret=$? -${PGSQL_PATH}/pg_ctl -w -D ${PGSQL_DATA} -o "-p $PGSQL_PORT" stop -rm -rf ${PGSQL_DATA} -exit $ret diff --git a/setup.cfg b/setup.cfg index d05103cc..5bd6cc1c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -81,6 +81,8 @@ pre-hook.build_config = gnocchi.genconfig.prehook [files] packages = gnocchi +data_files = + etc/gnocchi = etc/gnocchi/* [entry_points] gnocchi.indexer.resources = @@ -97,19 +99,19 @@ gnocchi.indexer.resources = stack = gnocchi.indexer.sqlalchemy_base:ResourceExt image = gnocchi.indexer.sqlalchemy_extension:Image -gnocchi.controller.resources = - generic = gnocchi.rest:GenericResourcesController - instance = gnocchi.rest:InstancesResourcesController - instance_disk = gnocchi.rest:InstanceDisksResourcesController - instance_network_interface = gnocchi.rest:InstanceNetworkInterfacesResourcesController - swift_account = gnocchi.rest:SwiftAccountsResourcesController - volume = gnocchi.rest:VolumesResourcesController - ceph_account = gnocchi.rest:CephAccountsResourcesController - network = gnocchi.rest:NetworkResourcesController - identity = gnocchi.rest:IdentityResourcesController - ipmi = gnocchi.rest:IPMIResourcesController - stack = gnocchi.rest:StackResourcesController - image = gnocchi.rest:ImageResourcesController +gnocchi.controller.schemas = + generic = gnocchi.rest:GenericSchema + instance = gnocchi.rest:InstanceSchema + instance_disk = gnocchi.rest:InstanceDiskSchema + instance_network_interface = gnocchi.rest:InstanceNetworkInterfaceSchema + swift_account = gnocchi.rest:GenericSchema + volume = gnocchi.rest:VolumeSchema + ceph_account = gnocchi.rest:GenericSchema + network = gnocchi.rest:GenericSchema + identity = gnocchi.rest:GenericSchema + ipmi = gnocchi.rest:GenericSchema + stack = gnocchi.rest:GenericSchema + image = gnocchi.rest:ImageSchema gnocchi.storage = null = gnocchi.storage.null:NullStorage @@ -129,12 +131,10 @@ gnocchi.aggregates = console_scripts = gnocchi-api = gnocchi.cli:api - gnocchi-dbsync = gnocchi.cli:storage_dbsync + gnocchi-upgrade = gnocchi.cli:upgrade gnocchi-statsd = gnocchi.cli:statsd gnocchi-metricd = gnocchi.cli:metricd - carbonara-create = gnocchi.carbonara:create_archive_file carbonara-dump = gnocchi.carbonara:dump_archive_file - carbonara-update = gnocchi.carbonara:update_archive_file oslo.config.opts = gnocchi = gnocchi.opts:list_opts diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 713355df..00000000 --- a/test-requirements.txt +++ /dev/null @@ -1,16 +0,0 @@ -gabbi>=0.101.2 -coverage>=3.6 -fixtures -mock -oslotest -sphinx -python-subunit>=0.0.18 -tempest-lib>=0.2.0 -testrepository -testscenarios -testtools>=0.9.38 -WebTest>=2.0.16 -doc8 -sphinxcontrib-httpdomain -influxdb>=2.4 -sysv_ipc diff --git a/tox.ini b/tox.ini index 18e5e0b1..31ed496d 100644 --- a/tox.ini +++ b/tox.ini @@ -6,23 +6,25 @@ envlist = py{27,34},py{27,34}-{postgresql,mysql}{,-file,-swift,-ceph,-influxdb}, usedevelop = True sitepackages = False passenv = LANG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE GNOCCHI_TEST_* -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +deps = .[test] + py{27,34}-postgresql: .[postgresql,swift,ceph,file] + py{27,34}-mysql: .[mysql,swift,ceph,file] + py{27,34}-{postgresql,mysql}-influxdb: .[influxdb] setenv = + GNOCCHI_TEST_STORAGE_DRIVER=file + GNOCCHI_TEST_INDEXER_DRIVER=postgresql GNOCCHI_TEST_STORAGE_DRIVERS=file swift ceph GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql - py{27,34}-postgresql: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql - py{27,34}-mysql: GNOCCHI_TEST_INDEXER_DRIVERS=mysql py{27,34}-{postgresql,mysql}-file: GNOCCHI_TEST_STORAGE_DRIVERS=file py{27,34}-{postgresql,mysql}-swift: GNOCCHI_TEST_STORAGE_DRIVERS=swift py{27,34}-{postgresql,mysql}-ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph py{27,34}-{postgresql,mysql}-influxdb: GNOCCHI_TEST_STORAGE_DRIVERS=influxdb - py{27,34}-postgresql-{file,swift,ceph,influxdb}: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql - py{27,34}-mysql-{file,swift,ceph,influxdb}: GNOCCHI_TEST_INDEXER_DRIVERS=mysql + py{27,34}-postgresql{,-file,-swift,-ceph,-influxdb}: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql + py{27,34}-mysql{,-file,-swift,-ceph,-influxdb}: GNOCCHI_TEST_INDEXER_DRIVERS=mysql commands = doc8 --ignore-path doc/source/rest.rst doc/source - oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf + oslo-config-generator --config-file=gnocchi-config-generator.conf {toxinidir}/run-tests.sh {posargs} [testenv:bashate] @@ -36,7 +38,7 @@ commands = flake8 [testenv:py27-gate] setenv = OS_TEST_PATH=gnocchi/tests/gabbi - GABBI_LIVE_FAIL_IF_NO_TEST=1 + GABBI_LIVE=1 passenv = {[testenv]passenv} GNOCCHI_SERVICE* sitepackages = True basepython = python2.7 @@ -44,29 +46,38 @@ commands = {toxinidir}/tools/pretty_tox.sh '{posargs}' # This target provides a shortcut to running just the gabbi tests. [testenv:py27-gabbi] +deps = .[test,postgresql,file] setenv = OS_TEST_PATH=gnocchi/tests/gabbi basepython = python2.7 -commands = {toxinidir}/setup-mysql-tests.sh {toxinidir}/tools/pretty_tox.sh '{posargs}' +commands = {toxinidir}/setup-test-env.sh {toxinidir}/tools/pretty_tox.sh '{posargs}' [testenv:py27-cover] -commands = {toxinidir}/setup-mysql-tests.sh python setup.py testr --coverage --testr-args="{posargs}" +commands = {toxinidir}/setup-test-env.sh python setup.py testr --coverage --testr-args="{posargs}" [testenv:venv] -setenv = GNOCCHI_TEST_STORAGE_DRIVER=file -commands = {toxinidir}/setup-postgresql-tests.sh {posargs} +# This is used by the doc job on the gate +deps = {[testenv:docs]deps} +commands = {toxinidir}/setup-test-env.sh {posargs} [flake8] exclude = .tox,.eggs,doc show-source = true [testenv:genconfig] -commands = oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf +deps = .[mysql,postgresql,test,file,influxdb,ceph,swift] +commands = oslo-config-generator --config-file=gnocchi-config-generator.conf [testenv:docs] +# This does not work, see: https://bitbucket.org/hpk42/tox/issues/302 +# deps = {[testenv]deps} +# .[doc] +deps = .[test,postgresql,file,doc] setenv = GNOCCHI_TEST_STORAGE_DRIVER=file + GNOCCHI_TEST_INDEXER_DRIVER=postgresql commands = doc8 --ignore-path doc/source/rest.rst doc/source - {toxinidir}/setup-postgresql-tests.sh python setup.py build_sphinx + {toxinidir}/setup-test-env.sh python setup.py build_sphinx [testenv:docs-gnocchi.xyz] -setenv = GNOCCHI_TEST_STORAGE_DRIVER=file -commands = {toxinidir}/setup-postgresql-tests.sh sphinx-build -D html_theme=bootstrap doc/source doc/build +deps = .[file,postgresql,test,doc] + sphinx_rtd_theme +commands = {toxinidir}/setup-test-env.sh sphinx-build -D html_theme=sphinx_rtd_theme doc/source doc/build/html -- GitLab From b9ac1221e775a656b4e740a07bf9e722dcb4e23e Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 9 Mar 2016 14:19:20 +0100 Subject: [PATCH 0114/1483] Fixed (build-)depends for this release. --- debian/changelog | 1 + debian/control | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/debian/changelog b/debian/changelog index 9d3fffac..6f9c35d1 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,6 +1,7 @@ gnocchi (2.0.2-1) experimental; urgency=medium * New upstream release. + * Fixed (build-)depends for this release. -- Thomas Goirand Wed, 09 Mar 2016 14:05:32 +0100 diff --git a/debian/control b/debian/control index 36fe3d9a..62abfacc 100644 --- a/debian/control +++ b/debian/control @@ -20,7 +20,6 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-fixtures, python-future (>= 0.15), python-gabbi (>= 1), - python-jinja2, python-jsonpatch (>= 1.9), python-keystoneclient (>= 1:1.6.0), python-keystonemiddleware (>= 4.0.0), @@ -38,6 +37,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-oslosphinx (>= 2.2.0.0), python-oslotest, python-pandas (>= 0.17), + python-paste, python-pastedeploy, python-pecan (>= 0.9), python-prettytable, @@ -57,7 +57,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-tempest-lib (>= 0.2.0), python-testscenarios, python-testtools (>= 0.9.38), - python-tooz (>= 0.13.1), + python-tooz (>= 1.30), python-trollius, python-voluptuous, python-webob (>= 1.4.1), @@ -76,7 +76,6 @@ Architecture: all Depends: alembic (>= 0.7.6), python-concurrent.futures (>= 2.1.6), python-future (>= 0.15), - python-jinja2, python-jsonpatch (>= 1.9), python-keystoneclient (>= 1:1.6.0), python-keystonemiddleware (>= 4.0.0), @@ -91,6 +90,7 @@ Depends: alembic (>= 0.7.6), python-oslo.utils (>= 1.6.0), python-oslosphinx (>= 2.2.0.0), python-pandas (>= 0.17), + python-paste, python-pastedeploy, python-pecan (>= 0.9), python-prettytable, @@ -104,7 +104,7 @@ Depends: alembic (>= 0.7.6), python-sqlalchemy-utils, python-stevedore, python-swiftclient (>= 2.5.0), - python-tooz (>= 0.13.1), + python-tooz (>= 1.30), python-trollius, python-voluptuous, python-webob (>= 1.4.1), -- GitLab From 9e28f35a2bfd8f9651ad4f5da7caa194c166bac4 Mon Sep 17 00:00:00 2001 From: zhangguoqing Date: Fri, 11 Mar 2016 08:56:03 +0000 Subject: [PATCH 0115/1483] Add note to the docs regarding archive-policy deletion. Change-Id: I973ebb7bf65e2774213b5f8ad970e180c20838a1 Closes-Bug: #1545696 --- doc/source/rest.j2 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 52e99817..4fb6e3d2 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -161,6 +161,10 @@ It is also possible to list archive policies: It is possible to delete an archive policy if it is not used by any metric: +.. note:: + Archive policy cannot be deleted until all metrics associated with it + ain't removed by metricd daemon. + {{ scenarios['delete-archive-policy']['doc'] }} Archive Policy Rule -- GitLab From 20761c57948efdac4632bc761bfe7c76efe70aa1 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 11 Mar 2016 15:41:16 +0000 Subject: [PATCH 0116/1483] (build-)depends on python-tooz >= 1.34 --- debian/control | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/control b/debian/control index 62abfacc..6114932b 100644 --- a/debian/control +++ b/debian/control @@ -57,7 +57,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-tempest-lib (>= 0.2.0), python-testscenarios, python-testtools (>= 0.9.38), - python-tooz (>= 1.30), + python-tooz (>= 1.34), python-trollius, python-voluptuous, python-webob (>= 1.4.1), @@ -104,7 +104,7 @@ Depends: alembic (>= 0.7.6), python-sqlalchemy-utils, python-stevedore, python-swiftclient (>= 2.5.0), - python-tooz (>= 1.30), + python-tooz (>= 1.34), python-trollius, python-voluptuous, python-webob (>= 1.4.1), -- GitLab From e8f3d2b9ce27f766fcb7803059d4b1c4658f970d Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 11 Mar 2016 15:44:56 +0000 Subject: [PATCH 0117/1483] Rename again GNOCCHI_INDEXER_URL env var (upstream constantly changes it). --- debian/changelog | 1 + debian/rules | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 6f9c35d1..c432669c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -2,6 +2,7 @@ gnocchi (2.0.2-1) experimental; urgency=medium * New upstream release. * Fixed (build-)depends for this release. + * Rename again GNOCCHI_INDEXER_URL env var (upstream constantly changes it). -- Thomas Goirand Wed, 09 Mar 2016 14:05:32 +0100 diff --git a/debian/rules b/debian/rules index 396b75bd..1b9f1daa 100755 --- a/debian/rules +++ b/debian/rules @@ -56,7 +56,7 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) export PGHOST=$$PG_MYTMPDIR ; \ chmod +x debian/start_pg.sh ; \ debian/start_pg.sh $$PG_MYTMPDIR ; \ - export GNOCCHI_TEST_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ + export GNOCCHI_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ export GNOCCHI_TEST_STORAGE_DRIVER=file ; \ echo "===> Testing with python$$i (python$$PYMAJOR)" ; \ rm -rf .testrepository ; \ -- GitLab From c693743b1f78e74674ec71509a08d5eb9ed3ed69 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 11 Mar 2016 15:55:20 +0000 Subject: [PATCH 0118/1483] Standards-Version: 3.9.7 --- debian/changelog | 1 + debian/control | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index c432669c..0dfc5368 100644 --- a/debian/changelog +++ b/debian/changelog @@ -3,6 +3,7 @@ gnocchi (2.0.2-1) experimental; urgency=medium * New upstream release. * Fixed (build-)depends for this release. * Rename again GNOCCHI_INDEXER_URL env var (upstream constantly changes it). + * Standards-Version: 3.9.7 (no change). -- Thomas Goirand Wed, 09 Mar 2016 14:05:32 +0100 diff --git a/debian/control b/debian/control index 6114932b..29127679 100644 --- a/debian/control +++ b/debian/control @@ -66,7 +66,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-yaml, subunit (>= 0.0.18), testrepository, -Standards-Version: 3.9.6 +Standards-Version: 3.9.7 Vcs-Browser: https://anonscm.debian.org/gitweb/?p=openstack/python-gnocchi.git Vcs-Git: https://anonscm.debian.org/git/openstack/python-gnocchi.git Homepage: https://github.com/openstack/gnocchi -- GitLab From e2f5cb572ad8814aaae7e20c7964d8d484c29b8d Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 14 Mar 2016 14:13:16 +0100 Subject: [PATCH 0119/1483] Added missing build-depends: python-lz4 --- debian/changelog | 6 ++++++ debian/control | 2 ++ 2 files changed, 8 insertions(+) diff --git a/debian/changelog b/debian/changelog index 0dfc5368..ca0eeb6d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (2.0.2-2) UNRELEASED; urgency=medium + + * Added missing (build-)depends: python-lz4. + + -- Thomas Goirand Mon, 14 Mar 2016 14:12:42 +0100 + gnocchi (2.0.2-1) experimental; urgency=medium * New upstream release. diff --git a/debian/control b/debian/control index 29127679..4b6bb430 100644 --- a/debian/control +++ b/debian/control @@ -23,6 +23,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-jsonpatch (>= 1.9), python-keystoneclient (>= 1:1.6.0), python-keystonemiddleware (>= 4.0.0), + python-lz4, python-mock, python-msgpack, python-mysqldb, @@ -79,6 +80,7 @@ Depends: alembic (>= 0.7.6), python-jsonpatch (>= 1.9), python-keystoneclient (>= 1:1.6.0), python-keystonemiddleware (>= 4.0.0), + python-lz4, python-msgpack, python-numpy, python-oslo.config (>= 1:2.6.0), -- GitLab From 9086e8ce09fd580cefc87cf41a8c677f135e4a50 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 14 Mar 2016 16:42:26 +0100 Subject: [PATCH 0120/1483] workaround to _strptime import issue on py2 This is a workaround to avoid to trigger: http://bugs.python.org/issue7980 Change-Id: I154d4eca3f5c9350df169b931df3bd5154a487b7 Closes-bug: #1557021 --- gnocchi/carbonara.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index d4e994b9..0cac5d95 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -22,6 +22,7 @@ import logging import numbers import operator import re +import time import iso8601 import lz4 @@ -31,6 +32,11 @@ import six from gnocchi import utils +# NOTE(sileht): pandas relies on time.strptime() +# and often triggers http://bugs.python.org/issue7980 +# its dues to our heavy threads usage, this is the workaround +# to ensure the module is correctly loaded before we use really it. +time.strptime("2016-02-19", "%Y-%m-%d") LOG = logging.getLogger(__name__) -- GitLab From 00c2972d2386c6d21c9c86ca0e2f87acadbdc148 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Tue, 15 Mar 2016 17:01:30 +0000 Subject: [PATCH 0121/1483] Revert "Log retrieve/store data speed in Carbonara based drivers" This reverts commit edf9fb363d844b868d1cce5b9aef49b909784c99. remove super granular logging as it is flooding logs where it's growing at hundreds/thousands MB in seconds/minutes. this seems to be logging for an extremely exceptional scenario where such granular logging is required and is not valuable for the vast majority of time. Change-Id: Ief11515cedc9f61eee1397226998b53b0b2c1a60 --- gnocchi/storage/_carbonara.py | 37 ++++++++++++----------------------- 1 file changed, 12 insertions(+), 25 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 21a83ee8..e1b33851 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -201,34 +201,21 @@ class CarbonaraBasedStorage(storage.StorageDriver): def _add_measures(self, aggregation, archive_policy_def, metric, timeserie): - with timeutils.StopWatch() as sw: - ts = self._get_measures_timeserie(metric, aggregation, - archive_policy_def.granularity, - timeserie.first, timeserie.last) - LOG.debug("Retrieve measures" - "for %s/%s/%s in %.2fs" - % (metric.id, aggregation, archive_policy_def. - granularity, sw.elapsed())) + ts = self._get_measures_timeserie(metric, aggregation, + archive_policy_def.granularity, + timeserie.first, timeserie.last) ts.update(timeserie) - with timeutils.StopWatch() as sw: - for key, split in ts.split(): - self._store_metric_measures(metric, key, aggregation, - archive_policy_def.granularity, - split.serialize()) - LOG.debug("Store measures for %s/%s/%s in %.2fs" - % (metric.id, aggregation, - archive_policy_def.granularity, sw.elapsed())) + for key, split in ts.split(): + self._store_metric_measures(metric, key, aggregation, + archive_policy_def.granularity, + split.serialize()) if ts.last and archive_policy_def.timespan: - with timeutils.StopWatch() as sw: - oldest_point_to_keep = ts.last - datetime.timedelta( - seconds=archive_policy_def.timespan) - self._delete_metric_measures_before( - metric, aggregation, archive_policy_def.granularity, - oldest_point_to_keep) - LOG.debug("Expire measures for %s/%s/%s in %.2fs" - % (metric.id, aggregation, - archive_policy_def.granularity, sw.elapsed())) + oldest_point_to_keep = ts.last - datetime.timedelta( + seconds=archive_policy_def.timespan) + self._delete_metric_measures_before( + metric, aggregation, archive_policy_def.granularity, + oldest_point_to_keep) def add_measures(self, metric, measures): self._store_measures(metric, msgpackutils.dumps( -- GitLab From ac64064ba57c0774f8c9e23a4240b731940c3dcc Mon Sep 17 00:00:00 2001 From: Lianhao Lu Date: Thu, 17 Mar 2016 18:05:19 +0800 Subject: [PATCH 0122/1483] Added docs about new snmp related resource types Added docs about the newly added snmp related resource types: host, host_disk, host_network_interface. Change-Id: I3f638fc4332a992d7238444c1b41bcb00268d047 --- doc/source/resource_types.rst | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/doc/source/resource_types.rst b/doc/source/resource_types.rst index 0409572d..17073bf0 100644 --- a/doc/source/resource_types.rst +++ b/doc/source/resource_types.rst @@ -106,3 +106,37 @@ volume +==============+=========+===========+ | display_name | String | No | +--------------+---------+-----------+ + + +host +==== + ++--------------+---------+-----------+ +| Attribute | Type | Immutable | ++==============+=========+===========+ +| host_name | String | No | ++--------------+---------+-----------+ + + +host_disk +========= + ++--------------+---------+-----------+ +| Attribute | Type | Immutable | ++==============+=========+===========+ +| host_name | String | No | ++--------------+---------+-----------+ +| device_name | String | No | ++------------------------------------+ + + +host_network_interface +====================== + ++--------------+---------+-----------+ +| Attribute | Type | Immutable | ++==============+=========+===========+ +| host_name | String | No | ++--------------+---------+-----------+ +| device_name | String | No | ++------------------------------------+ -- GitLab From c75174d6e53acc824f128962408fdf10b93b3cac Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 18 Mar 2016 17:45:10 +0100 Subject: [PATCH 0123/1483] Fix an IN-predicate SAWarning Some times sqlalchemy complains about: The IN-predicate on "metric.id" was invoked with an empty sequence. This is due when ids in list_metrics are a empty tuple() or set(). This change fixes that. Change-Id: Iabe79862c73a296c28752e54cd4e46d624ac4727 --- gnocchi/indexer/sqlalchemy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index d0da8805..6ddef6c1 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -243,7 +243,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): def list_metrics(self, names=None, ids=None, details=False, status='active', **kwargs): - if ids == []: + if ids and len(ids) == 0: return [] with self.facade.independent_reader() as session: q = session.query(Metric).filter( -- GitLab From 996ade6c8a5d539f9de660f1c8290d31b64f2765 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 18 Mar 2016 12:36:22 +0100 Subject: [PATCH 0124/1483] tools: Add tools to quickly add measurements Change-Id: I743f0b33f014411d7ac07d0c11f40870413bed27 --- devstack/gate/post_test_hook.sh | 3 ++ tools/measures_injector.py | 60 +++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100755 tools/measures_injector.py diff --git a/devstack/gate/post_test_hook.sh b/devstack/gate/post_test_hook.sh index 1cb80da3..3e0c6b52 100755 --- a/devstack/gate/post_test_hook.sh +++ b/devstack/gate/post_test_hook.sh @@ -43,6 +43,9 @@ export GNOCCHI_SERVICE_URL=$(openstack catalog show metric -c endpoints -f value curl -X GET ${GNOCCHI_SERVICE_URL}/v1/archive_policy -H "Content-Type: application/json" +# Just ensure tools still works +gnocchi metric create +sudo -E -H -u stack $GNOCCHI_DIR/tools/measures_injector.py --metrics 1 --batch-of-measures 2 --measures-per-batch 2 # Run tests echo "Running gnocchi functional test suite" diff --git a/tools/measures_injector.py b/tools/measures_injector.py new file mode 100755 index 00000000..d4b0a582 --- /dev/null +++ b/tools/measures_injector.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python +# Copyright (c) 2016 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import datetime +import random + +from concurrent import futures +from oslo_config import cfg +import six + +from gnocchi import indexer +from gnocchi import service +from gnocchi import storage +from gnocchi import utils + + +def injector(): + conf = cfg.ConfigOpts() + conf.register_cli_opts([ + cfg.IntOpt("metrics", default=None), + cfg.IntOpt("batch-of-measures", default=1000), + cfg.IntOpt("measures-per-batch", default=10), + ]) + conf = service.prepare_service(conf=conf) + index = indexer.get_driver(conf) + index.connect() + s = storage.get_driver(conf) + + metrics = index.list_metrics() + if conf.metrics: + metrics = metrics[:conf.metrics] + + def todo(metric): + for _ in six.moves.range(conf.batch_of_measures): + measures = [ + storage.Measure(utils.to_timestamp(datetime.datetime.now()), + random.random()) + for __ in six.moves.range(conf.measures_per_batch)] + s.add_measures(metric, measures) + + with futures.ThreadPoolExecutor(max_workers=len(metrics)) as executor: + # We use 'list' to iterate all threads here to raise the first + # exception now, not much choice + list(executor.map(todo, metrics)) + + +if __name__ == '__main__': + injector() -- GitLab From 27128183a405c9d9f88745642883250ade33923d Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sun, 13 Mar 2016 09:09:03 +0100 Subject: [PATCH 0125/1483] devstack: allow uwsgi deployments This change allows to use uwsgi as web server, and uses uwsgi instead of apache in gate jobs. Change-Id: I0683fec4df74f6700d404f94178e76d44b85e568 --- devstack/gate/gate_hook.sh | 4 +++ devstack/plugin.sh | 66 +++++++++++++++++++++++++++++++++++--- devstack/settings | 8 +++-- 3 files changed, 71 insertions(+), 7 deletions(-) diff --git a/devstack/gate/gate_hook.sh b/devstack/gate/gate_hook.sh index 905f8fa5..85633b0d 100755 --- a/devstack/gate/gate_hook.sh +++ b/devstack/gate/gate_hook.sh @@ -19,6 +19,10 @@ SQL_DRIVER="$2" ENABLED_SERVICES="key,gnocchi-api,gnocchi-metricd," +# Use efficient wsgi web server +DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_DEPLOY=uwsgi' +DEVSTACK_LOCAL_CONFIG+=$'\nexport KEYSTONE_DEPLOY=uwsgi' + export DEVSTACK_GATE_INSTALL_TESTONLY=1 export DEVSTACK_GATE_NO_SERVICES=1 export DEVSTACK_GATE_TEMPEST=0 diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 4c91e7a1..5bf21e66 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -38,6 +38,23 @@ set -o xtrace GITDIR["python-gnocchiclient"]=$DEST/python-gnocchiclient GITREPO["python-gnocchiclient"]=${GNOCCHICLIENT_REPO:-${GIT_BASE}/openstack/python-gnocchiclient.git} +if [ -z "$GNOCCHI_DEPLOY" ]; then + # Default + GNOCCHI_DEPLOY=werkzeug + + # Fallback to common wsgi devstack configuration + if [ "$ENABLE_HTTPD_MOD_WSGI_SERVICES" == "True" ]; then + GNOCCHI_DEPLOY=mod_wsgi + + # Deprecated config + elif [ -n "$GNOCCHI_USE_MOD_WSGI" ] ; then + echo_summary "GNOCCHI_USE_MOD_WSGI is deprecated, use GNOCCHI_DEPLOY instead" + if [ "$GNOCCHI_USE_MOD_WSGI" == True ]; then + GNOCCHI_DEPLOY=mod_wsgi + fi + fi +fi + # Functions # --------- @@ -209,7 +226,7 @@ function _config_gnocchi_apache_wsgi { # cleanup_gnocchi() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_gnocchi { - if [ "$GNOCCHI_USE_MOD_WSGI" == "True" ]; then + if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then _cleanup_gnocchi_apache_wsgi fi } @@ -227,6 +244,16 @@ function configure_gnocchi { cp $GNOCCHI_DIR/etc/gnocchi/* $GNOCCHI_CONF_DIR + # Set up logging + if [ "$SYSLOG" != "False" ]; then + iniset $GNOCCHI_CONF DEFAULT use_syslog "True" + fi + + # Format logging + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$GNOCCHI_DEPLOY" != "mod_wsgi" ]; then + setup_colorized_logging $GNOCCHI_CONF DEFAULT + fi + if [ -n "$GNOCCHI_COORDINATOR_URL" ]; then iniset $GNOCCHI_CONF storage coordination_url "$GNOCCHI_COORDINATOR_URL" fi @@ -280,8 +307,33 @@ function configure_gnocchi { # Configure the indexer database iniset $GNOCCHI_CONF indexer url `database_connection_url gnocchi` - if [ "$GNOCCHI_USE_MOD_WSGI" == "True" ]; then + if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then _config_gnocchi_apache_wsgi + elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then + # iniset creates these files when it's called if they don't exist. + GNOCCHI_UWSGI_FILE=$GNOCCHI_CONF_DIR/gnocchi-uwsgi.ini + + rm -f "$GNOCCHI_UWSGI_FILE" + + iniset "$GNOCCHI_UWSGI_FILE" uwsgi http $GNOCCHI_SERVICE_HOST:$GNOCCHI_SERVICE_PORT + iniset "$GNOCCHI_UWSGI_FILE" uwsgi wsgi-file "$GNOCCHI_DIR/gnocchi/rest/app.wsgi" + # This is running standalone + iniset "$GNOCCHI_UWSGI_FILE" uwsgi master true + # Set die-on-term & exit-on-reload so that uwsgi shuts down + iniset "$GNOCCHI_UWSGI_FILE" uwsgi die-on-term true + iniset "$GNOCCHI_UWSGI_FILE" uwsgi exit-on-reload true + iniset "$GNOCCHI_UWSGI_FILE" uwsgi threads 32 + iniset "$GNOCCHI_UWSGI_FILE" uwsgi processes $API_WORKERS + iniset "$GNOCCHI_UWSGI_FILE" uwsgi enable-threads true + iniset "$GNOCCHI_UWSGI_FILE" uwsgi plugins python + # uwsgi recommends this to prevent thundering herd on accept. + iniset "$GNOCCHI_UWSGI_FILE" uwsgi thunder-lock true + # Override the default size for headers from the 4k default. + iniset "$GNOCCHI_UWSGI_FILE" uwsgi buffer-size 65535 + # Make sure the client doesn't try to re-use the connection. + iniset "$GNOCCHI_UWSGI_FILE" uwsgi add-header "Connection: close" + # Don't share rados resources and python-requests globals between processes + iniset "$GNOCCHI_UWSGI_FILE" uwsgi lazy-apps true fi } @@ -352,8 +404,10 @@ function install_gnocchi { # We don't use setup_package because we don't follow openstack/requirements sudo -H pip install -e "$GNOCCHI_DIR"[test,$GNOCCHI_STORAGE_BACKEND,${DATABASE_TYPE}${EXTRA_FLAVOR}] - if [ "$GNOCCHI_USE_MOD_WSGI" == "True" ]; then + if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then install_apache_wsgi + elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then + pip_install uwsgi fi # Create configuration directory @@ -364,7 +418,7 @@ function install_gnocchi { # start_gnocchi() - Start running processes, including screen function start_gnocchi { - if [ "$GNOCCHI_USE_MOD_WSGI" == "True" ]; then + if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then enable_apache_site gnocchi restart_apache_server if [[ -n $GNOCCHI_SERVICE_PORT ]]; then @@ -378,6 +432,8 @@ function start_gnocchi { tail_log gnocchi /var/log/$APACHE_NAME/error[_\.]log tail_log gnocchi-api /var/log/$APACHE_NAME/access[_\.]log fi + elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then + run_process gnocchi-api "$GNOCCHI_BIN_DIR/uwsgi $GNOCCHI_UWSGI_FILE" else run_process gnocchi-api "$GNOCCHI_BIN_DIR/gnocchi-api -d -v --config-file $GNOCCHI_CONF" fi @@ -405,7 +461,7 @@ function start_gnocchi { # stop_gnocchi() - Stop running processes function stop_gnocchi { - if [ "$GNOCCHI_USE_MOD_WSGI" == "True" ]; then + if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then disable_apache_site gnocchi restart_apache_server fi diff --git a/devstack/settings b/devstack/settings index 2a74165e..1b473dae 100644 --- a/devstack/settings +++ b/devstack/settings @@ -12,8 +12,12 @@ GNOCCHI_AUTH_CACHE_DIR=${GNOCCHI_AUTH_CACHE_DIR:-/var/cache/gnocchi} GNOCCHI_WSGI_DIR=${GNOCCHI_WSGI_DIR:-/var/www/gnocchi} GNOCCHI_DATA_DIR=${GNOCCHI_DATA_DIR:-${DATA_DIR}/gnocchi} -# Toggle for deploying Gnocchi under HTTPD + mod_wsgi -GNOCCHI_USE_MOD_WSGI=${GNOCCHI_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}} +# GNOCCHI_DEPLOY defines how Gnocchi is deployed, allowed values: +# - mod_wsgi : Run Gnocchi under Apache HTTPd mod_wsgi +# - werkzeug : Run gnocchi-api +# - uwsgi : Run Gnocchi under uwsgi +# - : Fallback to GNOCCHI_USE_MOD_WSGI or ENABLE_HTTPD_MOD_WSGI_SERVICES +GNOCCHI_DEPLOY=${GNOCCHI_DEPLOY} # Support potential entry-points console scripts and venvs if [[ ${USE_VENV} = True ]]; then -- GitLab From cc27c5d38fb8c0224d1403fdfaa6363258931748 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Tue, 22 Mar 2016 09:58:05 -0400 Subject: [PATCH 0126/1483] clean up doc phrasing Change-Id: I6469801035fd6db01ff92562b68568614b1e967a --- doc/source/rest.j2 | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 4fb6e3d2..b8990489 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -161,11 +161,13 @@ It is also possible to list archive policies: It is possible to delete an archive policy if it is not used by any metric: +{{ scenarios['delete-archive-policy']['doc'] }} + .. note:: - Archive policy cannot be deleted until all metrics associated with it - ain't removed by metricd daemon. -{{ scenarios['delete-archive-policy']['doc'] }} + An archive policy cannot be deleted until all metrics associated with it + are removed by a metricd daemon. + Archive Policy Rule =================== -- GitLab From 346011bd5fb90aaf6f5c4882c2b3b58e8fd980b0 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 18 Mar 2016 13:50:29 +0100 Subject: [PATCH 0127/1483] Use omap insteads of xattr This change move the ingestion parts of ceph driver from xattr to omap. This allows to configure the OSDs behavior on objects. Closes-bug: #1557724 Change-Id: Ib9b10ed93b0102eb0016b4f4786cda54715aff69 --- gnocchi/storage/ceph.py | 114 ++++++++++++++++++++++++++++++---------- gnocchi/tests/base.py | 98 +++++++++++++++++++++++++++------- 2 files changed, 166 insertions(+), 46 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 765991f6..560559dd 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -16,6 +16,7 @@ from collections import defaultdict import contextlib import datetime +import errno import itertools import logging import uuid @@ -56,7 +57,6 @@ OPTS = [ class CephStorage(_carbonara.CarbonaraBasedStorage): - def __init__(self, conf): super(CephStorage, self).__init__(conf) self.pool = conf.ceph_pool @@ -69,6 +69,11 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): if not rados: raise ImportError("No module named 'rados' nor 'cradox'") + if not hasattr(rados, 'OmapIterator'): + raise ImportError("Your rados python module does not support " + "omap feature. Upgrade 'python-rados' or " + "install 'cradox'") + LOG.info("Ceph storage backend use '%s' python library" % RADOS_MODULE_NAME) @@ -81,12 +86,52 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): conf=options) self.rados.connect() + # NOTE(sileht): constants can't be class attributes because + # they rely on presence of rados module + + # NOTE(sileht): We allow to read the measure object on + # outdated replicats, that safe for us, we will + # get the new stuffs on next metricd pass. + self.OMAP_READ_FLAGS = (rados.LIBRADOS_OPERATION_BALANCE_READS | + rados.LIBRADOS_OPERATION_SKIPRWLOCKS) + + # NOTE(sileht): That should be safe to manipulate the omap keys + # with any OSDs at the same times, each osd should replicate the + # new key to others and same thing for deletion. + # I wonder how ceph handle rm_omap and set_omap run at same time + # on the same key. I assume the operation are timestamped so that will + # be same. If not, they are still one acceptable race here, a rm_omap + # can finish before all replicats of set_omap are done, but we don't + # care, if that occurs next metricd run, will just remove it again, no + # object with the measure have already been delected by previous, so + # we are safe and good. + self.OMAP_WRITE_FLAGS = rados.LIBRADOS_OPERATION_SKIPRWLOCKS + + def upgrade(self, index): + super(CephStorage, self).upgrade(index) + + # Move names stored in xattrs to omap + with self._get_ioctx() as ioctx: + try: + xattrs = tuple(k for k, v in + ioctx.get_xattrs(self.MEASURE_PREFIX)) + except rados.ObjectNotFound: + return + with rados.WriteOpCtx() as op: + ioctx.set_omap(op, xattrs, xattrs) + ioctx.operate_write_op(op, self.MEASURE_PREFIX, + flags=self.OMAP_WRITE_FLAGS) + + for xattr in xattrs: + ioctx.rm_xattr(self.MEASURE_PREFIX, xattr) + def _store_measures(self, metric, data): # NOTE(sileht): list all objects in a pool is too slow with # many objects (2min for 20000 objects in 50osds cluster), # and enforce us to iterrate over all objects # So we create an object MEASURE_PREFIX, that have as - # xattr the list of objects to process + # omap the list of objects to process (not xattr because + # it doesn't allow to configure the locking behavior) name = "_".join(( self.MEASURE_PREFIX, str(metric.id), @@ -94,18 +139,19 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S"))) with self._get_ioctx() as ioctx: ioctx.write_full(name, data) - ioctx.set_xattr(self.MEASURE_PREFIX, name, "") + + with rados.WriteOpCtx() as op: + ioctx.set_omap(op, (name,), ("",)) + ioctx.operate_write_op(op, self.MEASURE_PREFIX, + flags=self.OMAP_WRITE_FLAGS) def _build_report(self, details): with self._get_ioctx() as ioctx: - try: - xattrs = ioctx.get_xattrs(self.MEASURE_PREFIX) - except rados.ObjectNotFound: - return 0, 0, {} if details else None + names = self._list_object_names_to_process(ioctx) metrics = set() count = 0 metric_details = defaultdict(int) - for name, __ in xattrs: + for name in names: count += 1 metric = name.split("_")[1] metrics.add(metric) @@ -113,12 +159,20 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): metric_details[metric] += 1 return len(metrics), count, metric_details if details else None - def _list_object_names_to_process(self, ioctx, prefix): - try: - xattrs = ioctx.get_xattrs(self.MEASURE_PREFIX) - except rados.ObjectNotFound: - return () - return (name for name, __ in xattrs if name.startswith(prefix)) + def _list_object_names_to_process(self, ioctx, prefix=""): + with rados.ReadOpCtx() as op: + omaps, ret = ioctx.get_omap_vals(op, "", prefix, -1) + ioctx.operate_read_op( + op, self.MEASURE_PREFIX, flag=self.OMAP_READ_FLAGS) + # NOTE(sileht): after reading the libradospy, I'm + # not sure that ret will have the correct value + # get_omap_vals transforms the C int to python int + # before operate_read_op is called, I dunno if the int + # content is copied during this transformation or if + # this is a pointer to the C int, I think it's copied... + if ret == errno.ENOENT: + return () + return (k for k, v in omaps) def _pending_measures_to_process_count(self, metric_id): with self._get_ioctx() as ioctx: @@ -128,17 +182,14 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): def _list_metric_with_measures_to_process(self, block_size, full=False): with self._get_ioctx() as ioctx: - try: - xattrs = ioctx.get_xattrs(self.MEASURE_PREFIX) - except rados.ObjectNotFound: - return [] + names = self._list_object_names_to_process(ioctx) metrics = set() if full: - objs_it = xattrs + objs_it = names else: objs_it = itertools.islice( - xattrs, block_size * self.partition, None) - for name, __ in objs_it: + names, block_size * self.partition, None) + for name in objs_it: metrics.add(name.split("_")[1]) if full is False and len(metrics) >= block_size: break @@ -149,12 +200,15 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) object_names = self._list_object_names_to_process(ctx, object_prefix) + # Now clean objects and xattrs + with rados.WriteOpCtx() as op: + # NOTE(sileht): come on Ceph, no return code + # for this operation ?!! + ctx.remove_omap_keys(op, tuple(object_names)) + ctx.operate_write_op(op, self.MEASURE_PREFIX, + flags=self.OMAP_WRITE_FLAGS) + for n in object_names: - try: - ctx.rm_xattr(self.MEASURE_PREFIX, n) - except rados.ObjectNotFound: - # Another worker may have removed it, don't worry. - pass ctx.aio_remove(n) @contextlib.contextmanager @@ -172,8 +226,14 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): yield measures # Now clean objects and xattrs + with rados.WriteOpCtx() as op: + # NOTE(sileht): come on Ceph, no return code + # for this operation ?!! + ctx.remove_omap_keys(op, tuple(object_names)) + ctx.operate_write_op(op, self.MEASURE_PREFIX, + flags=self.OMAP_WRITE_FLAGS) + for n in object_names: - ctx.rm_xattr(self.MEASURE_PREFIX, n) ctx.aio_remove(n) def _get_ioctx(self): diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 3c370412..6cb01215 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -13,6 +13,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import errno import functools import json import os @@ -59,13 +60,53 @@ def _skip_decorator(func): class FakeRadosModule(object): + class OpCtx(object): + def __enter__(self): + self.ops = [] + return self + + def __exit__(self, *args, **kwargs): + pass + + WriteOpCtx = ReadOpCtx = OpCtx + + class OmapIterator(object): + class OpRetCode(object): + def __init__(self): + self.ret = 0 + + def __eq__(self, other): + return self.ret == other + + def __init__(self, start_filter, prefix_filter, number): + self.start_filter = start_filter + self.prefix_filter = prefix_filter + self.number = number + self.data = {} + self.op_ret = self.OpRetCode() + + def set_data(self, data): + if not data: + self.op_ret.ret = errno.ENOENT + else: + self.data = data + + def __iter__(self): + # NOTE(sileht): we use only the prefix for now + return ((k, v) for k, v in self.data.items() + if k.startswith(self.prefix_filter)) + + LIBRADOS_OPERATION_BALANCE_READS = 1 + LIBRADOS_OPERATION_SKIPRWLOCKS = 16 + class ObjectNotFound(Exception): pass class ioctx(object): - def __init__(self, kvs, kvs_xattrs): + def __init__(self, kvs, kvs_xattrs, kvs_omaps): self.kvs = kvs self.kvs_xattrs = kvs_xattrs + self.kvs_omaps = kvs_omaps self.librados = self self.io = self @@ -80,21 +121,7 @@ class FakeRadosModule(object): if key not in self.kvs: self.kvs[key] = "" self.kvs_xattrs[key] = {} - - def rados_lock_exclusive(self, ctx, name, lock, locker, desc, timeval, - flags): - # Locking a not existing object create an empty one - # so, do the same in test - key = name.value.decode('ascii') - self._ensure_key_exists(key) - return 0 - - def rados_unlock(self, ctx, name, lock, locker): - # Locking a not existing object create an empty one - # so, do the same in test - key = name.value.decode('ascii') - self._ensure_key_exists(key) - return 0 + self.kvs_omaps[key] = {} @staticmethod def close(): @@ -124,6 +151,33 @@ class FakeRadosModule(object): else: return self.kvs[key][offset:offset+length] + def operate_read_op(self, op, key, flag=0): + for op in op.ops: + op(key) + + def get_omap_vals(self, op, start_filter, prefix_filter, number): + oi = FakeRadosModule.OmapIterator(start_filter, prefix_filter, + number) + op.ops.append(lambda oid: oi.set_data(self.kvs_omaps.get(oid))) + return oi, oi.op_ret + + def operate_write_op(self, op, key, flags=0): + for op in op.ops: + op(key) + + def set_omap(self, op, keys, values): + def add(oid): + self._ensure_key_exists(oid) + omaps = self.kvs_omaps.setdefault(oid, {}) + omaps.update(dict(zip(keys, values))) + op.ops.append(add) + + def remove_omap_keys(self, op, keys): + def rm(oid): + for key in keys: + del self.kvs_omaps[oid][key] + op.ops.append(rm) + def get_xattrs(self, key): if key not in self.kvs: raise FakeRadosModule.ObjectNotFound @@ -145,16 +199,19 @@ class FakeRadosModule(object): raise FakeRadosModule.ObjectNotFound del self.kvs[key] del self.kvs_xattrs[key] + del self.kvs_omaps[key] def aio_remove(self, key): self._validate_key(key) self.kvs.pop(key, None) self.kvs_xattrs.pop(key, None) + self.kvs_omaps.pop(key, None) class FakeRados(object): - def __init__(self, kvs, kvs_xattrs): + def __init__(self, kvs, kvs_xattrs, kvs_omaps): self.kvs = kvs self.kvs_xattrs = kvs_xattrs + self.kvs_omaps = kvs_omaps @staticmethod def connect(): @@ -165,14 +222,17 @@ class FakeRadosModule(object): pass def open_ioctx(self, pool): - return FakeRadosModule.ioctx(self.kvs, self.kvs_xattrs) + return FakeRadosModule.ioctx(self.kvs, self.kvs_xattrs, + self.kvs_omaps) def __init__(self): self.kvs = {} self.kvs_xattrs = {} + self.kvs_omaps = {} def Rados(self, *args, **kwargs): - return FakeRadosModule.FakeRados(self.kvs, self.kvs_xattrs) + return FakeRadosModule.FakeRados(self.kvs, self.kvs_xattrs, + self.kvs_omaps) @staticmethod def run_in_thread(method, args): -- GitLab From 0f2538db6a1279c7e0c8b17f54794b280ab0c4ad Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 23 Mar 2016 09:03:46 +0100 Subject: [PATCH 0128/1483] (Really) Remove sqlalchemy warning Change-Id: If274d134fc5aeb432e310ab12d816cd1fc03d68f --- gnocchi/indexer/sqlalchemy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 6ddef6c1..5480df95 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -243,7 +243,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): def list_metrics(self, names=None, ids=None, details=False, status='active', **kwargs): - if ids and len(ids) == 0: + if ids is not None and not ids: return [] with self.facade.independent_reader() as session: q = session.query(Metric).filter( -- GitLab From 420b1c424b8defa15ab1be2b38b39c1172b8b8ba Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 19 Jan 2016 12:39:21 +0100 Subject: [PATCH 0129/1483] Implements resource type CRUD. This is the boiler plate for resource type CRUD. Legacy Ceilometer rule are still loaded from entry point The CRUD interface doesn't allow to add attributes yet Blueprint resource-type-rest-api Change-Id: I762e42b1b5f9ed78fdaef51bb601e97468b8cf61 --- doc/source/rest.j2 | 22 ++ doc/source/rest.yaml | 24 ++ etc/gnocchi/policy.json | 5 + gnocchi/indexer/__init__.py | 40 +++ ...ed97e5b3_add_tablename_to_resource_type.py | 54 ++++ gnocchi/indexer/sqlalchemy.py | 250 ++++++++++++++---- gnocchi/indexer/sqlalchemy_base.py | 28 +- gnocchi/rest/__init__.py | 93 ++++++- gnocchi/tests/gabbi/gabbits/resource.yaml | 2 +- .../tests/gabbi/gabbits/resource_type.yaml | 115 ++++++++ .../indexer/sqlalchemy/test_migrations.py | 16 ++ gnocchi/tests/test_indexer.py | 41 +++ 12 files changed, 626 insertions(+), 64 deletions(-) create mode 100644 gnocchi/indexer/alembic/versions/0735ed97e5b3_add_tablename_to_resource_type.py create mode 100644 gnocchi/tests/gabbi/gabbits/resource_type.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 4fb6e3d2..25f18694 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -300,6 +300,28 @@ The same endpoint can be used to append metrics to a resource: .. _Nova: http://launchpad.net/nova +Resource Types +============== + +Gnocchi is able to manage resource types with custom attributes. + +To create a new resource type: + +{{ scenarios['create-resource-type']['doc'] }} + +Then to retrieve its description: + +{{ scenarios['get-resource-type']['doc'] }} + +All resource types can be listed like this: + +{{ scenarios['list-resource-type']['doc'] }} + +It can also be deleted if no more resources are associated to it: + +{{ scenarios['delete-resource-type']['doc'] }} + + Searching for resources ======================= diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index d0aef1a9..a37a957d 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -309,6 +309,30 @@ - name: get-patched-instance request: GET /v1/resource/instance/{{ scenarios['create-resource-instance']['response'].json['id'] }} HTTP/1.1 + +- name: create-resource-type + request: | + POST /v1/resource_type HTTP/1.1 + Content-Type: application/json + + {"name": "my_custom_type"} + +- name: create-resource-type-2 + request: | + POST /v1/resource_type HTTP/1.1 + Content-Type: application/json + + {"name": "my_other_type"} + +- name: get-resource-type + request: GET /v1/resource_type/my_custom_type HTTP/1.1 + +- name: list-resource-type + request: GET /v1/resource_type HTTP/1.1 + +- name: delete-resource-type + request: DELETE /v1/resource_type/my_custom_type HTTP/1.1 + - name: search-resource-history request: | POST /v1/search/resource/instance?history=true HTTP/1.1 diff --git a/etc/gnocchi/policy.json b/etc/gnocchi/policy.json index b1a52c05..7987664e 100644 --- a/etc/gnocchi/policy.json +++ b/etc/gnocchi/policy.json @@ -12,6 +12,11 @@ "list resource": "rule:admin_or_creator or rule:resource_owner", "search resource": "rule:admin_or_creator or rule:resource_owner", + "create resource type": "role:admin", + "delete resource type": "role:admin", + "list resource type": "", + "get resource type": "", + "get archive policy": "", "list archive policy": "", "create archive policy": "role:admin", diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index c1004c20..66b3542e 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -37,6 +37,11 @@ OPTS = [ _marker = object() +class ResourceType(object): + def __eq__(self, other): + return self.name == other.name + + class Resource(object): def get_metric(self, metric_name): for m in self.metrics: @@ -124,6 +129,14 @@ class ArchivePolicyInUse(IndexerException): self.archive_policy = archive_policy +class ResourceTypeInUse(IndexerException): + """Error raised when an resource type is still being used.""" + def __init__(self, resource_type): + super(ResourceTypeInUse, self).__init__( + "Resource type %s is still in use" % resource_type) + self.resource_type = resource_type + + class NoSuchArchivePolicyRule(IndexerException): """Error raised when an archive policy rule does not exist.""" def __init__(self, archive_policy_rule): @@ -158,6 +171,14 @@ class ResourceAlreadyExists(IndexerException): self.resource = resource +class ResourceTypeAlreadyExists(IndexerException): + """Error raised when a resource type already exists.""" + def __init__(self, resource_type): + super(ResourceTypeAlreadyExists, self).__init__( + "Resource type %s already exists" % resource_type) + self.resource_type = resource_type + + class ResourceAttributeError(IndexerException, AttributeError): """Error raised when an attribute does not exist for a resource type.""" def __init__(self, resource, attribute): @@ -336,3 +357,22 @@ class IndexerDriver(object): if fnmatch.fnmatch(metric_name or "", rule.metric_pattern): return self.get_archive_policy(rule.archive_policy_name) raise NoArchivePolicyRuleMatch(metric_name) + + @staticmethod + def create_resource_type(resource_type): + raise exceptions.NotImplementedError + + @staticmethod + def get_resource_type(name): + """Get a resource type from the indexer. + + :param name: name of the resource type + """ + raise exceptions.NotImplementedError + + @staticmethod + def list_resource_types(attribute_filter=None, + limit=None, + marker=None, + sorts=None): + raise exceptions.NotImplementedError diff --git a/gnocchi/indexer/alembic/versions/0735ed97e5b3_add_tablename_to_resource_type.py b/gnocchi/indexer/alembic/versions/0735ed97e5b3_add_tablename_to_resource_type.py new file mode 100644 index 00000000..5827b4cd --- /dev/null +++ b/gnocchi/indexer/alembic/versions/0735ed97e5b3_add_tablename_to_resource_type.py @@ -0,0 +1,54 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add tablename to resource_type + +Revision ID: 0718ed97e5b3 +Revises: 828c16f70cce +Create Date: 2016-01-20 08:14:04.893783 + +""" + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '0718ed97e5b3' +down_revision = '828c16f70cce' +branch_labels = None +depends_on = None + + +def upgrade(): + op.add_column("resource_type", sa.Column('tablename', sa.String(18), + nullable=True)) + + resource_type = sa.Table( + 'resource_type', sa.MetaData(), + sa.Column('name', sa.String(255), nullable=False), + sa.Column('tablename', sa.String(18), nullable=False) + ) + op.execute(resource_type.update().where( + resource_type.c.name == "instance_network_interface" + ).values({'tablename': op.inline_literal("'instance_net_int'")})) + op.execute(resource_type.update().where( + resource_type.c.name != "instance_network_interface" + ).values({'tablename': op.inline_literal('name')})) + + op.alter_column("resource_type", "tablename", type_=sa.String(18), + nullable=False) + op.create_unique_constraint("uniq_resource_type0tablename", + "resource_type", ["tablename"]) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 5480df95..5534512d 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -25,6 +25,7 @@ from oslo_db import exception from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import models from oslo_db.sqlalchemy import utils as oslo_db_utils +from oslo_log import log import six import sqlalchemy import sqlalchemy_utils @@ -45,24 +46,7 @@ ResourceType = base.ResourceType _marker = indexer._marker - -def get_resource_mappers(ext): - if ext.name == "generic": - resource_ext = ext.plugin - resource_history_ext = ResourceHistory - else: - tablename = getattr(ext.plugin, '__tablename__', ext.name) - resource_ext = type(str(ext.name), - (ext.plugin, base.ResourceExtMixin, Resource), - {"__tablename__": tablename}) - resource_history_ext = type(str("%s_history" % ext.name), - (ext.plugin, base.ResourceHistoryExtMixin, - ResourceHistory), - {"__tablename__": ( - "%s_history" % tablename)}) - - return {'resource': resource_ext, - 'history': resource_history_ext} +LOG = log.getLogger(__name__) class PerInstanceFacade(object): @@ -103,11 +87,123 @@ class PerInstanceFacade(object): self.trans._factory._writer_engine.dispose() -class SQLAlchemyIndexer(indexer.IndexerDriver): - resources = extension.ExtensionManager('gnocchi.indexer.resources') +class ResourceClassMapper(object): + def __init__(self): + self._resources = extension.ExtensionManager( + 'gnocchi.indexer.resources') + self._cache = self.load_legacy_mappers() + self._lock = threading.RLock() + + @staticmethod + def _build_class_mappers(resource_type, baseclass=None): + tablename = resource_type.tablename + # TODO(sileht): Add columns + if not baseclass: + baseclass = type(str("%s_base" % tablename), (object, ), {}) + resource_ext = type( + str("%s_resource" % tablename), + (baseclass, base.ResourceExtMixin, base.Resource), + {"__tablename__": tablename}) + resource_history_ext = type( + str("%s_history" % tablename), + (baseclass, base.ResourceHistoryExtMixin, base.ResourceHistory), + {"__tablename__": ("%s_history" % tablename)}) + return {'resource': resource_ext, + 'history': resource_history_ext} + + def is_legacy(self, resource_type_name): + return resource_type_name in self._resources + + def load_legacy_mappers(self): + mappers = {} + for ext in self._resources.extensions: + tablename = getattr(ext.plugin, '__tablename__', ext.name) + if ext.name == "generic": + mappers[tablename] = {'resource': base.Resource, + 'history': base.ResourceHistory} + else: + resource_type = base.ResourceType(name=ext.name, + tablename=tablename) + mappers[tablename] = self._build_class_mappers(resource_type, + ext.plugin) + return mappers + + def get_legacy_resource_types(self): + resource_types = [] + for ext in self._resources.extensions: + tablename = getattr(ext.plugin, '__tablename__', ext.name) + resource_types.append(base.ResourceType(name=ext.name, + tablename=tablename)) + return resource_types + + def get_classes(self, resource_type): + # NOTE(sileht): Most of the times we can bypass the lock so do it + try: + return self._cache[resource_type.tablename] + except KeyError: + pass + # TODO(sileht): if the table doesn't exis + with self._lock: + try: + return self._cache[resource_type.tablename] + except KeyError: + mapper = self._build_class_mappers(resource_type) + self._cache[resource_type.tablename] = mapper + return mapper + + @oslo_db.api.wrap_db_retry(retry_on_deadlock=True) + def map_and_create_tables(self, resource_type, connection): + with self._lock: + # NOTE(sileht): map this resource_type to have + # Base.metadata filled with sa.Table objects + mappers = self.get_classes(resource_type) + tables = [Base.metadata.tables[klass.__tablename__] + for klass in mappers.values()] + Base.metadata.create_all(connection, tables=tables) + + def unmap_and_delete_tables(self, resource_type, connection): + with self._lock: + # NOTE(sileht): map this resource_type to have + # Base.metadata filled with sa.Table objects + mappers = self.get_classes(resource_type) + tables = [Base.metadata.tables[klass.__tablename__] + for klass in mappers.values()] + + if connection is not None: + # NOTE(sileht): Base.metadata.drop_all doesn't + # issue CASCADE stuffs correctly at least on postgresql + # We drop foreign keys manually to not lock the destination + # table for too long during drop table. + # It's safe to not use a transaction since + # the resource_type table is already cleaned and commited + # so this code cannot be triggerred anymore for this + # resource_type + for table in tables: + for fk in table.foreign_key_constraints: + self._safe_execute( + connection, + sqlalchemy.schema.DropConstraint(fk)) + for table in tables: + self._safe_execute(connection, + sqlalchemy.schema.DropTable(table)) + + # TODO(sileht): Remove this resource on other workers + # by using expiration on cache ? + for table in tables: + Base.metadata.remove(table) + del self._cache[resource_type.tablename] + + @oslo_db.api.wrap_db_retry(retry_on_deadlock=True) + def _safe_execute(self, connection, works): + # NOTE(sileht): we create a transaction to ensure mysql + # create locks on other transaction... + trans = connection.begin() + connection.execute(works) + trans.commit() + - _RESOURCE_CLASS_MAPPER = {ext.name: get_resource_mappers(ext) - for ext in resources.extensions} +class SQLAlchemyIndexer(indexer.IndexerDriver): + _RESOURCE_TYPE_MANAGER = ResourceClassMapper() def __init__(self, conf): conf.set_override("connection", conf.indexer.url, "database") @@ -147,17 +243,73 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): else: command.upgrade(cfg, "head") - for resource_type in self._RESOURCE_CLASS_MAPPER: + for rt in self._RESOURCE_TYPE_MANAGER.get_legacy_resource_types(): try: with self.facade.writer() as session: - session.add(ResourceType(name=resource_type)) + session.add(rt) except exception.DBDuplicateEntry: pass - def _resource_type_to_class(self, resource_type, purpose="resource"): - if resource_type not in self._RESOURCE_CLASS_MAPPER: - raise indexer.NoSuchResourceType(resource_type) - return self._RESOURCE_CLASS_MAPPER[resource_type][purpose] + def create_resource_type(self, name): + # NOTE(sileht): mysql have a stupid and small length limitation on the + # foreign key and index name, so we can't use the resource type name as + # tablename, the limit is 64. The longest name we have is + # fk__history_revision_resource_history_revision, + # so 64 - 46 = 18 + tablename = "rt_%s" % uuid.uuid4().hex[:15] + resource_type = ResourceType(name=name, + tablename=tablename) + + try: + with self.facade.writer() as session: + session.add(resource_type) + except exception.DBDuplicateEntry: + raise indexer.ResourceTypeAlreadyExists(name) + + with self.facade.writer_connection() as connection: + self._RESOURCE_TYPE_MANAGER.map_and_create_tables(resource_type, + connection) + return resource_type + + def get_resource_type(self, name): + with self.facade.independent_reader() as session: + return self._get_resource_type(session, name) + + def _get_resource_type(self, session, name): + resource_type = session.query(ResourceType).get(name) + if not resource_type: + raise indexer.NoSuchResourceType(name) + return resource_type + + def list_resource_types(self): + with self.facade.independent_reader() as session: + return list(session.query(ResourceType).order_by( + ResourceType.name.asc()).all()) + + def delete_resource_type(self, name): + # FIXME(sileht) this type have special handling + # until we remove this special thing we reject its deletion + if self._RESOURCE_TYPE_MANAGER.is_legacy(name): + raise indexer.ResourceTypeInUse(name) + + try: + with self.facade.writer() as session: + resource_type = self._get_resource_type(session, name) + session.delete(resource_type) + except exception.DBReferenceError as e: + if (e.constraint in [ + 'fk_resource_resource_type_name', + 'fk_resource_history_resource_type_name']): + raise indexer.ResourceTypeInUse(name) + raise + + with self.facade.writer_connection() as connection: + self._RESOURCE_TYPE_MANAGER.unmap_and_delete_tables(resource_type, + connection) + + def _resource_type_to_classes(self, session, name): + resource_type = self._get_resource_type(session, name) + return self._RESOURCE_TYPE_MANAGER.get_classes(resource_type) def list_archive_policies(self): with self.facade.independent_reader() as session: @@ -264,13 +416,14 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): user_id=None, project_id=None, started_at=None, ended_at=None, metrics=None, **kwargs): - resource_cls = self._resource_type_to_class(resource_type) if (started_at is not None and ended_at is not None and started_at > ended_at): raise ValueError( "Start timestamp cannot be after end timestamp") with self.facade.writer() as session: + resource_cls = self._resource_type_to_classes( + session, resource_type)['resource'] r = resource_cls( id=id, type=resource_type, @@ -298,16 +451,17 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): return r - @oslo_db.api.retry_on_deadlock + @oslo_db.api.wrap_db_retry(retry_on_deadlock=True) def update_resource(self, resource_type, resource_id, ended_at=_marker, metrics=_marker, append_metrics=False, create_revision=True, **kwargs): - resource_cls = self._resource_type_to_class(resource_type) - resource_history_cls = self._resource_type_to_class(resource_type, - "history") with self.facade.writer() as session: + classes = self._resource_type_to_classes(session, resource_type) + resource_cls = classes["resource"] + resource_history_cls = classes["history"] + try: # NOTE(sileht): We use FOR UPDATE that is not galera friendly, # but they are no other way to cleanly patch a resource and @@ -420,8 +574,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): raise indexer.NoSuchResource(resource_id) def get_resource(self, resource_type, resource_id, with_metrics=False): - resource_cls = self._resource_type_to_class(resource_type) with self.facade.independent_reader() as session: + resource_cls = self._resource_type_to_classes( + session, resource_type)['resource'] q = session.query( resource_cls).filter( resource_cls.id == resource_id) @@ -429,9 +584,10 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): q = q.options(sqlalchemy.orm.joinedload('metrics')) return q.first() - def _get_history_result_mapper(self, resource_type): - resource_cls = self._resource_type_to_class(resource_type) - history_cls = self._resource_type_to_class(resource_type, 'history') + def _get_history_result_mapper(self, session, resource_type): + classes = self._resource_type_to_classes(session, resource_type) + resource_cls = classes['resource'] + history_cls = classes['history'] resource_cols = {} history_cols = {} @@ -468,6 +624,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): return Result + @oslo_db.api.wrap_db_retry(retry_on_deadlock=True) def list_resources(self, resource_type='generic', attribute_filter=None, details=False, @@ -477,12 +634,14 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): sorts=None): sorts = sorts or [] - if history: - target_cls = self._get_history_result_mapper(resource_type) - else: - target_cls = self._resource_type_to_class(resource_type) - with self.facade.independent_reader() as session: + if history: + target_cls = self._get_history_result_mapper( + session, resource_type) + else: + target_cls = self._resource_type_to_classes( + session, resource_type)["resource"] + q = session.query(target_cls) if attribute_filter: @@ -546,12 +705,13 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): all_resources.extend(resources) else: if is_history: - target_cls = self._resource_type_to_class( - type, "history") + target_cls = self._resource_type_to_classes( + session, type)['history'] f = target_cls.revision.in_( [r.revision for r in resources]) else: - target_cls = self._resource_type_to_class(type) + target_cls = self._resource_type_to_classes( + session, type)["resource"] f = target_cls.id.in_([r.id for r in resources]) q = session.query(target_cls).filter(f) diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index e803cc1d..8506f8b5 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -199,14 +199,22 @@ class Metric(Base, GnocchiBase, storage.Metric): __hash__ = storage.Metric.__hash__ -class ResourceType(Base, GnocchiBase): +class ResourceType(Base, GnocchiBase, indexer.ResourceType): __tablename__ = 'resource_type' __table_args__ = ( + sqlalchemy.UniqueConstraint("tablename", + name="uniq_resource_type0tablename"), COMMON_TABLES_ARGS, ) name = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True, nullable=False) + tablename = sqlalchemy.Column(sqlalchemy.String(18), nullable=False) + + def jsonify(self): + d = dict(self) + del d['tablename'] + return d class ResourceJsonifier(indexer.Resource): @@ -232,7 +240,7 @@ class ResourceMixin(ResourceJsonifier): sqlalchemy.String(255), sqlalchemy.ForeignKey('resource_type.name', ondelete="RESTRICT", - name="fk_%s_type_resource_type_name" % + name="fk_%s_resource_type_name" % cls.__tablename__), nullable=False) @@ -315,8 +323,12 @@ class ResourceExtMixin(object): sqlalchemy.ForeignKey( 'resource.id', ondelete="CASCADE", - name="fk_%s_id_resource_id" % cls.__tablename__), - primary_key=True) + name="fk_%s_id_resource_id" % cls.__tablename__, + # NOTE(sileht): We use to ensure that postgresql + # does not use AccessExclusiveLock on destination table + use_alter=True), + primary_key=True + ) class ResourceHistoryExtMixin(object): @@ -332,8 +344,12 @@ class ResourceHistoryExtMixin(object): 'resource_history.revision', ondelete="CASCADE", name="fk_%s_revision_resource_history_revision" - % cls.__tablename__), - primary_key=True) + % cls.__tablename__, + # NOTE(sileht): We use to ensure that postgresql + # does not use AccessExclusiveLock on destination table + use_alter=True), + primary_key=True + ) class ArchivePolicyRule(Base, GnocchiBase): diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index ee722554..cadb496f 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -806,6 +806,67 @@ def etag_set_headers(obj): pecan.response.last_modified = obj.lastmodified +class ResourceTypeController(rest.RestController): + def __init__(self, name): + self._name = name + + @pecan.expose('json') + def get(self): + try: + resource_type = pecan.request.indexer.get_resource_type(self._name) + except indexer.NoSuchResourceType as e: + abort(404, e) + enforce("get resource type", resource_type) + return resource_type + + @pecan.expose() + def delete(self): + try: + resource_type = pecan.request.indexer.get_resource_type(self._name) + except indexer.NoSuchResourceType as e: + abort(404, e) + enforce("delete resource type", resource_type) + try: + pecan.request.indexer.delete_resource_type(self._name) + except (indexer.NoSuchResourceType, + indexer.ResourceTypeInUse) as e: + abort(400, e) + + +def ResourceTypeSchema(definition): + # FIXME(sileht): Add resource type attributes from the indexer + return voluptuous.Schema({ + "name": six.text_type, + })(definition) + + +class ResourceTypesController(rest.RestController): + + @pecan.expose() + def _lookup(self, name, *remainder): + return ResourceTypeController(name), remainder + + @pecan.expose('json') + def post(self): + body = deserialize_and_validate(ResourceTypeSchema) + enforce("create resource type", body) + try: + resource_type = pecan.request.indexer.create_resource_type(**body) + except indexer.ResourceTypeAlreadyExists as e: + abort(409, e) + set_resp_location_hdr("/resource_type/" + resource_type.name) + pecan.response.status = 201 + return resource_type + + @pecan.expose('json') + def get_all(self, **kwargs): + enforce("list resource type", {}) + try: + return pecan.request.indexer.list_resource_types() + except indexer.IndexerException as e: + abort(400, e) + + def ResourceSchema(schema): base_schema = { voluptuous.Optional('started_at'): Timestamp, @@ -951,7 +1012,12 @@ RESOURCE_SCHEMA_MANAGER = extension.ExtensionManager( def schema_for(resource_type): - return RESOURCE_SCHEMA_MANAGER[resource_type].plugin + if resource_type in RESOURCE_SCHEMA_MANAGER: + # TODO(sileht): Remove this legacy resource schema loading + return RESOURCE_SCHEMA_MANAGER[resource_type].plugin + else: + # TODO(sileht): Load schema from indexer + return GenericSchema def ResourceID(value): @@ -1029,16 +1095,17 @@ class ResourcesByTypeController(rest.RestController): @pecan.expose('json') def get_all(self): return dict( - (ext.name, - pecan.request.application_url + '/resource/' + ext.name) - for ext in RESOURCE_SCHEMA_MANAGER) + (rt.name, + pecan.request.application_url + '/resource/' + rt.name) + for rt in pecan.request.indexer.list_resource_types()) @pecan.expose() def _lookup(self, resource_type, *remainder): - if resource_type in RESOURCE_SCHEMA_MANAGER: - return ResourcesController(resource_type), remainder - else: - abort(404, indexer.NoSuchResourceType(resource_type)) + try: + pecan.request.indexer.get_resource_type(resource_type) + except indexer.NoSuchResourceType as e: + abort(404, e) + return ResourcesController(resource_type), remainder def _ResourceSearchSchema(v): @@ -1114,10 +1181,11 @@ class SearchResourceTypeController(rest.RestController): class SearchResourceController(rest.RestController): @pecan.expose() def _lookup(self, resource_type, *remainder): - if resource_type in RESOURCE_SCHEMA_MANAGER: - return SearchResourceTypeController(resource_type), remainder - else: - abort(404, indexer.NoSuchResourceType(resource_type)) + try: + pecan.request.indexer.get_resource_type(resource_type) + except indexer.NoSuchResourceType as e: + abort(404, e) + return SearchResourceTypeController(resource_type), remainder def _MetricSearchSchema(v): @@ -1415,6 +1483,7 @@ class V1Controller(object): "metric": MetricsController(), "batch": BatchController(), "resource": ResourcesByTypeController(), + "resource_type": ResourceTypesController(), "aggregation": AggregationController(), "capabilities": CapabilityController(), "status": StatusController(), diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index c43c14a7..b08eb004 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -54,7 +54,7 @@ tests: redirects: true response_json_paths: $.version: "1.0" - $.links.`len`: 10 + $.links.`len`: 11 $.links[0].href: $SCHEME://$NETLOC/v1 $.links[7].href: $SCHEME://$NETLOC/v1/resource diff --git a/gnocchi/tests/gabbi/gabbits/resource_type.yaml b/gnocchi/tests/gabbi/gabbits/resource_type.yaml new file mode 100644 index 00000000..a910dad8 --- /dev/null +++ b/gnocchi/tests/gabbi/gabbits/resource_type.yaml @@ -0,0 +1,115 @@ +# +# Test the resource type API to achieve coverage of just the +# ResourceTypesController and ResourceTypeController class code. +# + +fixtures: + - ConfigFixture + +tests: + + - name: list resource type + desc: only legacy resource types are present + url: /v1/resource_type + response_json_paths: + $.`len`: 15 + + - name: post resource type as non-admin + url: /v1/resource_type + method: post + data: + name: my_custom_resource + request_headers: + content-type: application/json + status: 403 + + - name: post resource type + url: /v1/resource_type + method: post + request_headers: + x-roles: admin + content-type: application/json + data: + name: my_custom_resource + status: 201 + response_json_paths: + $.name: my_custom_resource + response_headers: + location: $SCHEME://$NETLOC/v1/resource_type/my_custom_resource + + - name: relist resource types + desc: we have a resource type now + url: /v1/resource_type + response_json_paths: + $.`len`: 16 + $.[11].name: my_custom_resource + + - name: get the custom resource type + url: /v1/resource_type/my_custom_resource + response_json_paths: + $.name: my_custom_resource + + - name: delete as non-admin + url: /v1/resource_type/my_custom_resource + method: DELETE + status: 403 + + - name: post custom resource + url: /v1/resource/my_custom_resource + method: post + request_headers: + x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c + x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + content-type: application/json + data: + id: d11edfca-4393-4fda-b94d-b05a3a1b3747 + status: 201 + + - name: delete in use resource_type + url: /v1/resource_type/my_custom_resource + method: delete + request_headers: + x-roles: admin + status: 400 + response_strings: + - Resource type my_custom_resource is still in use + + - name: delete the resource + url: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 + request_headers: + x-roles: admin + method: DELETE + status: 204 + + - name: delete the custom resource type + method: delete + request_headers: + x-roles: admin + url: /v1/resource_type/my_custom_resource + status: 204 + + - name: delete non-existing custom resource type + method: delete + request_headers: + x-roles: admin + url: /v1/resource_type/my_custom_resource + status: 404 + +# Can we readd and delete the same resource type again + + - name: post resource type again + url: /v1/resource_type + method: post + request_headers: + x-roles: admin + content-type: application/json + data: + name: my_custom_resource + status: 201 + + - name: delete the custom resource type again + method: delete + request_headers: + x-roles: admin + url: /v1/resource_type/my_custom_resource + status: 204 diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py index 63f22f47..f456394e 100644 --- a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py +++ b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py @@ -17,6 +17,7 @@ import abc import mock from oslo_db.sqlalchemy import test_migrations import six +import sqlalchemy from gnocchi.indexer import sqlalchemy_base from gnocchi.tests import base @@ -47,3 +48,18 @@ class ModelsMigrationsSync( # NOTE(jd) Nothing to do here as setUp() in the base class is already # creating table using upgrade pass + + @staticmethod + def filter_metadata_diff(diff): + new_diff = [] + for line in diff: + item = line[1] + # NOTE(sileht): skip resource types created dynamically + if (isinstance(item, sqlalchemy.Table) + and item.name.startswith("rt_")): + continue + elif (isinstance(item, sqlalchemy.Index) + and item.name.startswith("ix_rt_")): + continue + new_diff.append(line) + return new_diff diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 3766ed2f..5fc065ef 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -972,3 +972,44 @@ class TestIndexerDriver(tests_base.TestCase): self.index.delete_metric(e1) metrics = self.index.list_metrics() self.assertNotIn(e1, [m.id for m in metrics]) + + def test_resource_type_crud(self): + # Create + self.index.create_resource_type("indexer_test") + self.assertRaises(indexer.ResourceTypeAlreadyExists, + self.index.create_resource_type, + "indexer_test") + + # Get and List + rtype = self.index.get_resource_type("indexer_test") + self.assertEqual("indexer_test", rtype.name) + rtypes = self.index.list_resource_types() + for rtype in rtypes: + if rtype.name == "indexer_test": + break + else: + self.fail("indexer_test not found") + + # Test resource itself + rid = uuid.uuid4() + self.index.create_resource("indexer_test", rid, + str(uuid.uuid4()), + str(uuid.uuid4())) + r = self.index.get_resource("indexer_test", rid) + self.assertEqual("indexer_test", r.type) + + # Deletion + self.assertRaises(indexer.ResourceTypeInUse, + self.index.delete_resource_type, + "indexer_test") + self.index.delete_resource(rid) + self.index.delete_resource_type("indexer_test") + + # Ensure it's deleted + self.assertRaises(indexer.NoSuchResourceType, + self.index.get_resource_type, + "indexer_test") + + self.assertRaises(indexer.NoSuchResourceType, + self.index.delete_resource_type, + "indexer_test") -- GitLab From 65a9ac994187b1f6951dc27c1aebaea1e79d5d76 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 19 Jan 2016 22:55:59 +0100 Subject: [PATCH 0130/1483] Implements resource attribute string This change allows to create resource attributes of type string. The choosen solution is that the indexer is responsible to providing voluptuous schemas and build resource columns according the the voluptuous schema that it proposes. Another alternative could be that rest API provides jsonschema for each times and indexer is only responsible for the schema to sql column convertion. Blueprint resource-type-rest-api Change-Id: I7877b6ea97dc70f3629e63abe5ef1ddf61d200b3 --- doc/source/rest.yaml | 8 +- gnocchi/indexer/__init__.py | 13 +- ...7c22ab0_add_attributes_to_resource_type.py | 38 +++++ gnocchi/indexer/sqlalchemy.py | 41 +++-- gnocchi/indexer/sqlalchemy_base.py | 39 ++++- gnocchi/indexer/sqlalchemy_extension.py | 10 +- gnocchi/resource_type.py | 151 ++++++++++++++++++ gnocchi/rest/__init__.py | 30 ++-- .../tests/gabbi/gabbits/resource_type.yaml | 102 ++++++++++++ gnocchi/tests/test_indexer.py | 23 ++- setup.cfg | 3 + 11 files changed, 412 insertions(+), 46 deletions(-) create mode 100644 gnocchi/indexer/alembic/versions/d24877c22ab0_add_attributes_to_resource_type.py create mode 100644 gnocchi/resource_type.py diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index a37a957d..00d884a9 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -315,7 +315,13 @@ POST /v1/resource_type HTTP/1.1 Content-Type: application/json - {"name": "my_custom_type"} + { + "name": "my_custom_type", + "attributes": { + "display_name": {"type": "string", "required": true}, + "prefix": {"type": "string", "required": false, "max_length": 8, "min_length": 3} + } + } - name: create-resource-type-2 request: | diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 66b3542e..a83e4348 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -37,11 +37,6 @@ OPTS = [ _marker = object() -class ResourceType(object): - def __eq__(self, other): - return self.name == other.name - - class Resource(object): def get_metric(self, metric_name): for m in self.metrics: @@ -376,3 +371,11 @@ class IndexerDriver(object): marker=None, sorts=None): raise exceptions.NotImplementedError + + @staticmethod + def get_resource_attributes_schemas(): + raise exceptions.NotImplementedError + + @staticmethod + def get_resource_type_schema(): + raise exceptions.NotImplementedError diff --git a/gnocchi/indexer/alembic/versions/d24877c22ab0_add_attributes_to_resource_type.py b/gnocchi/indexer/alembic/versions/d24877c22ab0_add_attributes_to_resource_type.py new file mode 100644 index 00000000..dda81e50 --- /dev/null +++ b/gnocchi/indexer/alembic/versions/d24877c22ab0_add_attributes_to_resource_type.py @@ -0,0 +1,38 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add attributes to resource_type + +Revision ID: d24877c22ab0 +Revises: 0718ed97e5b3 +Create Date: 2016-01-19 22:45:06.431190 + +""" + +from alembic import op +import sqlalchemy as sa +import sqlalchemy_utils as sa_utils + + +# revision identifiers, used by Alembic. +revision = 'd24877c22ab0' +down_revision = '0718ed97e5b3' +branch_labels = None +depends_on = None + + +def upgrade(): + op.add_column("resource_type", + sa.Column('attributes', sa_utils.JSONType(),)) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 5534512d..02726644 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -23,7 +23,6 @@ import uuid import oslo_db.api from oslo_db import exception from oslo_db.sqlalchemy import enginefacade -from oslo_db.sqlalchemy import models from oslo_db.sqlalchemy import utils as oslo_db_utils from oslo_log import log import six @@ -34,6 +33,7 @@ from stevedore import extension from gnocchi import exceptions from gnocchi import indexer from gnocchi.indexer import sqlalchemy_base as base +from gnocchi import resource_type from gnocchi import utils Base = base.Base @@ -99,7 +99,7 @@ class ResourceClassMapper(object): tablename = resource_type.tablename # TODO(sileht): Add columns if not baseclass: - baseclass = type(str("%s_base" % tablename), (object, ), {}) + baseclass = resource_type.to_baseclass() resource_ext = type( str("%s_resource" % tablename), (baseclass, base.ResourceExtMixin, base.Resource), @@ -122,18 +122,20 @@ class ResourceClassMapper(object): mappers[tablename] = {'resource': base.Resource, 'history': base.ResourceHistory} else: - resource_type = base.ResourceType(name=ext.name, - tablename=tablename) - mappers[tablename] = self._build_class_mappers(resource_type, - ext.plugin) + rt = base.ResourceType( + name=ext.name, tablename=tablename, + attributes=resource_type.ResourceTypeAttributes()) + mappers[tablename] = self._build_class_mappers(rt, ext.plugin) return mappers def get_legacy_resource_types(self): resource_types = [] for ext in self._resources.extensions: tablename = getattr(ext.plugin, '__tablename__', ext.name) - resource_types.append(base.ResourceType(name=ext.name, - tablename=tablename)) + resource_types.append(base.ResourceType( + name=ext.name, + tablename=tablename, + attributes=resource_type.ResourceTypeAttributes())) return resource_types def get_classes(self, resource_type): @@ -250,21 +252,26 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): except exception.DBDuplicateEntry: pass - def create_resource_type(self, name): + def create_resource_type(self, resource_type): # NOTE(sileht): mysql have a stupid and small length limitation on the # foreign key and index name, so we can't use the resource type name as # tablename, the limit is 64. The longest name we have is # fk__history_revision_resource_history_revision, # so 64 - 46 = 18 tablename = "rt_%s" % uuid.uuid4().hex[:15] - resource_type = ResourceType(name=name, - tablename=tablename) + resource_type = ResourceType(name=resource_type.name, + tablename=tablename, + attributes=resource_type.attributes) + + # NOTE(sileht): ensure the driver is able to store the request + # resource_type + resource_type.to_baseclass() try: with self.facade.writer() as session: session.add(resource_type) except exception.DBDuplicateEntry: - raise indexer.ResourceTypeAlreadyExists(name) + raise indexer.ResourceTypeAlreadyExists(resource_type.name) with self.facade.writer_connection() as connection: self._RESOURCE_TYPE_MANAGER.map_and_create_tables(resource_type, @@ -281,6 +288,14 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): raise indexer.NoSuchResourceType(name) return resource_type + @staticmethod + def get_resource_type_schema(): + return base.RESOURCE_TYPE_SCHEMA_MANAGER + + @staticmethod + def get_resource_attributes_schemas(): + return [ext.plugin.schema() for ext in ResourceType.RESOURCE_SCHEMAS] + def list_resource_types(self): with self.facade.independent_reader() as session: return list(session.query(ResourceType).order_by( @@ -609,7 +624,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): class Result(base.ResourceJsonifier, base.GnocchiBase): def __iter__(self): - return models.ModelIterator(self, iter(stmt.c.keys())) + return iter((key, getattr(self, key)) for key in stmt.c.keys()) sqlalchemy.orm.mapper( Result, stmt, primary_key=[stmt.c.id, stmt.c.revision], diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index 8506f8b5..6e239192 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -32,6 +32,7 @@ import sqlalchemy_utils from gnocchi import archive_policy from gnocchi import indexer +from gnocchi import resource_type from gnocchi import storage from gnocchi import utils @@ -199,7 +200,22 @@ class Metric(Base, GnocchiBase, storage.Metric): __hash__ = storage.Metric.__hash__ -class ResourceType(Base, GnocchiBase, indexer.ResourceType): +RESOURCE_TYPE_SCHEMA_MANAGER = resource_type.ResourceTypeSchemaManager( + "gnocchi.indexer.sqlalchemy.resource_type_attribute") + + +class ResourceTypeAttributes(sqlalchemy_utils.JSONType): + def process_bind_param(self, attributes, dialect): + return super(ResourceTypeAttributes, self).process_bind_param( + attributes.jsonify(), dialect) + + def process_result_value(self, value, dialect): + attributes = super(ResourceTypeAttributes, self).process_result_value( + value, dialect) + return RESOURCE_TYPE_SCHEMA_MANAGER.attributes_from_dict(attributes) + + +class ResourceType(Base, GnocchiBase, resource_type.ResourceType): __tablename__ = 'resource_type' __table_args__ = ( sqlalchemy.UniqueConstraint("tablename", @@ -210,11 +226,14 @@ class ResourceType(Base, GnocchiBase, indexer.ResourceType): name = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True, nullable=False) tablename = sqlalchemy.Column(sqlalchemy.String(18), nullable=False) + attributes = sqlalchemy.Column(ResourceTypeAttributes) - def jsonify(self): - d = dict(self) - del d['tablename'] - return d + def to_baseclass(self): + cols = {} + for attr in self.attributes: + cols[attr.name] = sqlalchemy.Column(attr.satype, + nullable=not attr.required) + return type(str("%s_base" % self.tablename), (object, ), cols) class ResourceJsonifier(indexer.Resource): @@ -352,6 +371,16 @@ class ResourceHistoryExtMixin(object): ) +class HistoryModelIterator(models.ModelIterator): + def __next__(self): + # NOTE(sileht): Our custom resource attribute columns don't + # have the same name in database than in sqlalchemy model + # so remove the additional "f_" for the model name + n = six.advance_iterator(self.i) + model_attr = n[2:] if n[:2] == "f_" else n + return model_attr, getattr(self.model, n) + + class ArchivePolicyRule(Base, GnocchiBase): __tablename__ = 'archive_policy_rule' diff --git a/gnocchi/indexer/sqlalchemy_extension.py b/gnocchi/indexer/sqlalchemy_extension.py index 2f55e753..a9dd6055 100644 --- a/gnocchi/indexer/sqlalchemy_extension.py +++ b/gnocchi/indexer/sqlalchemy_extension.py @@ -1,5 +1,5 @@ # -*- encoding: utf-8 -*- -# + # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -17,6 +17,8 @@ from __future__ import absolute_import import sqlalchemy import sqlalchemy_utils +from gnocchi import resource_type + class Image(object): name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) @@ -65,3 +67,9 @@ class HostDisk(object): __tablename__ = 'host_disk' host_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) device_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=True) + + +class StringSchema(resource_type.StringSchema): + @property + def satype(self): + return sqlalchemy.String(self.max_length) diff --git a/gnocchi/resource_type.py b/gnocchi/resource_type.py new file mode 100644 index 00000000..0854544e --- /dev/null +++ b/gnocchi/resource_type.py @@ -0,0 +1,151 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re +import six +import stevedore +import voluptuous + + +INVALID_NAMES = [ + "id", "type", "metrics", + "revision", "revision_start", "revision_end", + "started_at", "ended_at", + "user_id", "project_id", + "created_by_user_id", "created_by_project_id", "get_metric" +] + +VALID_CHARS = re.compile("[a-zA-Z0-9][a-zA-Z0-9_]*") + + +class InvalidResourceAttributeName(Exception): + """Error raised when the resource attribute name is invalid.""" + def __init__(self, name): + super(InvalidResourceAttributeName, self).__init__( + "Resource attribute name %s is invalid" % str(name)) + self.name = name + + +class CommonAttributeSchema(object): + meta_schema_ext = {} + schema_ext = None + + def __init__(self, type, name, required): + if (len(name) > 63 or name in INVALID_NAMES + or not VALID_CHARS.match(name)): + raise InvalidResourceAttributeName(name) + + self.name = name + self.required = required + + @classmethod + def meta_schema(cls): + d = { + voluptuous.Required('type'): cls.typename, + voluptuous.Required('required', default=True): bool + } + d.update(cls.meta_schema_ext) + return d + + def schema(self): + if self.required: + return {self.name: self.schema_ext} + else: + return {voluptuous.Optional(self.name): self.schema_ext} + + def jsonify(self): + return {"type": self.typename, + "required": self.required} + + +class StringSchema(CommonAttributeSchema): + typename = "string" + + def __init__(self, min_length, max_length, *args, **kwargs): + super(StringSchema, self).__init__(*args, **kwargs) + self.min_length = min_length + self.max_length = max_length + + # TODO(sileht): ensure min_length <= max_length + meta_schema_ext = { + voluptuous.Required('min_length', default=0): + voluptuous.All(int, voluptuous.Range(min=0, max=255)), + voluptuous.Required('max_length', default=255): + voluptuous.All(int, voluptuous.Range(min=1, max=255)) + } + + @property + def schema_ext(self): + return voluptuous.All(six.text_type, + voluptuous.Length( + min=self.min_length, + max=self.max_length)) + + def jsonify(self): + d = super(StringSchema, self).jsonify() + d.update({"max_length": self.max_length, + "min_length": self.min_length}) + return d + + +class ResourceTypeAttributes(list): + def jsonify(self): + d = {} + for attr in self: + d[attr.name] = attr.jsonify() + return d + + +class ResourceTypeSchemaManager(stevedore.ExtensionManager): + def __init__(self, *args, **kwargs): + super(ResourceTypeSchemaManager, self).__init__(*args, **kwargs) + type_schemas = tuple([ext.plugin.meta_schema() + for ext in self.extensions]) + self._schema = voluptuous.Schema({ + "name": six.text_type, + voluptuous.Required("attributes", default={}): { + six.text_type: voluptuous.Any(*tuple(type_schemas)) + } + }) + + def __call__(self, definition): + return self._schema(definition) + + def attributes_from_dict(self, attributes): + return ResourceTypeAttributes( + self[attr["type"]].plugin(name=name, **attr) + for name, attr in attributes.items()) + + def resource_type_from_dict(self, name, attributes): + return ResourceType(name, self.attributes_from_dict(attributes)) + + +class ResourceType(object): + def __init__(self, name, attributes): + self.name = name + self.attributes = attributes + + @property + def schema(self): + schema = {} + for attr in self.attributes: + schema.update(attr.schema()) + return schema + + def __eq__(self, other): + return self.name == other.name + + def jsonify(self): + return {"name": self.name, + "attributes": self.attributes.jsonify()} diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index cadb496f..75c5869d 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -31,6 +31,7 @@ from gnocchi import aggregates from gnocchi import archive_policy from gnocchi import indexer from gnocchi import json +from gnocchi import resource_type from gnocchi import storage from gnocchi import utils @@ -813,16 +814,16 @@ class ResourceTypeController(rest.RestController): @pecan.expose('json') def get(self): try: - resource_type = pecan.request.indexer.get_resource_type(self._name) + rt = pecan.request.indexer.get_resource_type(self._name) except indexer.NoSuchResourceType as e: abort(404, e) - enforce("get resource type", resource_type) - return resource_type + enforce("get resource type", rt) + return rt @pecan.expose() def delete(self): try: - resource_type = pecan.request.indexer.get_resource_type(self._name) + pecan.request.indexer.get_resource_type(self._name) except indexer.NoSuchResourceType as e: abort(404, e) enforce("delete resource type", resource_type) @@ -833,13 +834,6 @@ class ResourceTypeController(rest.RestController): abort(400, e) -def ResourceTypeSchema(definition): - # FIXME(sileht): Add resource type attributes from the indexer - return voluptuous.Schema({ - "name": six.text_type, - })(definition) - - class ResourceTypesController(rest.RestController): @pecan.expose() @@ -848,15 +842,17 @@ class ResourceTypesController(rest.RestController): @pecan.expose('json') def post(self): - body = deserialize_and_validate(ResourceTypeSchema) + schema = pecan.request.indexer.get_resource_type_schema() + body = deserialize_and_validate(schema) + rt = schema.resource_type_from_dict(**body) enforce("create resource type", body) try: - resource_type = pecan.request.indexer.create_resource_type(**body) + rt = pecan.request.indexer.create_resource_type(rt) except indexer.ResourceTypeAlreadyExists as e: abort(409, e) - set_resp_location_hdr("/resource_type/" + resource_type.name) + set_resp_location_hdr("/resource_type/" + rt.name) pecan.response.status = 201 - return resource_type + return rt @pecan.expose('json') def get_all(self, **kwargs): @@ -1016,8 +1012,8 @@ def schema_for(resource_type): # TODO(sileht): Remove this legacy resource schema loading return RESOURCE_SCHEMA_MANAGER[resource_type].plugin else: - # TODO(sileht): Load schema from indexer - return GenericSchema + resource_type = pecan.request.indexer.get_resource_type(resource_type) + return ResourceSchema(resource_type.schema) def ResourceID(value): diff --git a/gnocchi/tests/gabbi/gabbits/resource_type.yaml b/gnocchi/tests/gabbi/gabbits/resource_type.yaml index a910dad8..f02d1017 100644 --- a/gnocchi/tests/gabbi/gabbits/resource_type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource_type.yaml @@ -23,6 +23,28 @@ tests: content-type: application/json status: 403 + - name: post resource type bad string + url: /v1/resource_type + method: post + request_headers: + x-roles: admin + content-type: application/json + data: + name: my_custom_resource + attributes: + foo: + type: string + max_length: 32 + min_length: 5 + noexist: foo + status: 400 + response_strings: + # NOTE(sileht): We would prefer to have a better message but voluptuous seems a bit lost when + # an Any have many dict with the same key, here "type" + # - "Invalid input: extra keys not allowed @ data[u'attributes'][u'foo'][u'noexist']" + # - "Invalid input: not a valid value for dictionary value @ data[u'attributes'][u'foo'][u'type']" + - "Invalid input:" + - name: post resource type url: /v1/resource_type method: post @@ -31,9 +53,29 @@ tests: content-type: application/json data: name: my_custom_resource + attributes: + name: + type: string + required: true + max_length: 5 + min_length: 2 + foobar: + type: string + required: false status: 201 response_json_paths: $.name: my_custom_resource + $.attributes: + name: + type: string + required: True + max_length: 5 + min_length: 2 + foobar: + type: string + required: False + max_length: 255 + min_length: 0 response_headers: location: $SCHEME://$NETLOC/v1/resource_type/my_custom_resource @@ -54,6 +96,23 @@ tests: method: DELETE status: 403 + - name: post invalid resource + url: /v1/resource/my_custom_resource + method: post + request_headers: + x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c + x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + content-type: application/json + data: + id: d11edfca-4393-4fda-b94d-b05a3a1b3747 + name: toolong!!! + foobar: what + status: 400 + response_strings: + # split to not match the u' in py2 + - "Invalid input: length of value must be at most 5 for dictionary value @ data[" + - "'name']" + - name: post custom resource url: /v1/resource/my_custom_resource method: post @@ -63,7 +122,50 @@ tests: content-type: application/json data: id: d11edfca-4393-4fda-b94d-b05a3a1b3747 + name: bar + foobar: what status: 201 + response_json_paths: + $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747 + $.name: bar + $.foobar: what + + - name: patch custom resource + url: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 + method: patch + request_headers: + x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c + x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + content-type: application/json + data: + name: foo + status: 200 + response_json_paths: + $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747 + $.name: foo + $.foobar: what + + - name: get resource + url: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 + request_headers: + content-type: application/json + response_json_paths: + $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747 + $.name: foo + $.foobar: what + + - name: list resource history + url: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast + request_headers: + content-type: application/json + response_json_paths: + $.`len`: 2 + $[0].id: d11edfca-4393-4fda-b94d-b05a3a1b3747 + $[0].name: bar + $[0].foobar: what + $[1].id: d11edfca-4393-4fda-b94d-b05a3a1b3747 + $[1].name: foo + $[1].foobar: what - name: delete in use resource_type url: /v1/resource_type/my_custom_resource diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 5fc065ef..2f049ded 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -974,15 +974,28 @@ class TestIndexerDriver(tests_base.TestCase): self.assertNotIn(e1, [m.id for m in metrics]) def test_resource_type_crud(self): + mgr = self.index.get_resource_type_schema() + rtype = mgr.resource_type_from_dict("indexer_test", { + "col1": {"type": "string", "required": True, + "min_length": 2, "max_length": 15} + }) + # Create - self.index.create_resource_type("indexer_test") + self.index.create_resource_type(rtype) self.assertRaises(indexer.ResourceTypeAlreadyExists, self.index.create_resource_type, - "indexer_test") + rtype) - # Get and List + # Get rtype = self.index.get_resource_type("indexer_test") self.assertEqual("indexer_test", rtype.name) + self.assertEqual(1, len(rtype.attributes)) + self.assertEqual("col1", rtype.attributes[0].name) + self.assertEqual("string", rtype.attributes[0].typename) + self.assertEqual(15, rtype.attributes[0].max_length) + self.assertEqual(2, rtype.attributes[0].min_length) + + # List rtypes = self.index.list_resource_types() for rtype in rtypes: if rtype.name == "indexer_test": @@ -994,9 +1007,11 @@ class TestIndexerDriver(tests_base.TestCase): rid = uuid.uuid4() self.index.create_resource("indexer_test", rid, str(uuid.uuid4()), - str(uuid.uuid4())) + str(uuid.uuid4()), + col1="col1_value") r = self.index.get_resource("indexer_test", rid) self.assertEqual("indexer_test", r.type) + self.assertEqual("col1_value", r.col1) # Deletion self.assertRaises(indexer.ResourceTypeInUse, diff --git a/setup.cfg b/setup.cfg index 9654e7d8..bb5e7803 100644 --- a/setup.cfg +++ b/setup.cfg @@ -85,6 +85,9 @@ data_files = etc/gnocchi = etc/gnocchi/* [entry_points] +gnocchi.indexer.sqlalchemy.resource_type_attribute = + string = gnocchi.indexer.sqlalchemy_extension:StringSchema + gnocchi.indexer.resources = generic = gnocchi.indexer.sqlalchemy_base:Resource instance = gnocchi.indexer.sqlalchemy_extension:Instance -- GitLab From 46e34d4d9a8a752699155a72489b2de05852fa4c Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 20 Jan 2016 09:27:49 +0100 Subject: [PATCH 0131/1483] Implements resource attribute uuid This change allows to create resource attributes of type uuid. Blueprint resource-type-rest-api Change-Id: I52a063d816ad38b7901741abfefe1b6206425c2a --- doc/source/rest.yaml | 1 + gnocchi/indexer/sqlalchemy_extension.py | 4 +++ gnocchi/resource_type.py | 7 +++++ gnocchi/rest/__init__.py | 15 +++------- .../tests/gabbi/gabbits/resource_type.yaml | 28 +++++++++++++++++++ gnocchi/utils.py | 7 +++++ setup.cfg | 1 + 7 files changed, 52 insertions(+), 11 deletions(-) diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 00d884a9..0bc1b7cf 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -318,6 +318,7 @@ { "name": "my_custom_type", "attributes": { + "myid": {"type": "uuid"}, "display_name": {"type": "string", "required": true}, "prefix": {"type": "string", "required": false, "max_length": 8, "min_length": 3} } diff --git a/gnocchi/indexer/sqlalchemy_extension.py b/gnocchi/indexer/sqlalchemy_extension.py index a9dd6055..a4d1bf8e 100644 --- a/gnocchi/indexer/sqlalchemy_extension.py +++ b/gnocchi/indexer/sqlalchemy_extension.py @@ -73,3 +73,7 @@ class StringSchema(resource_type.StringSchema): @property def satype(self): return sqlalchemy.String(self.max_length) + + +class UUIDSchema(resource_type.UUIDSchema): + satype = sqlalchemy_utils.UUIDType() diff --git a/gnocchi/resource_type.py b/gnocchi/resource_type.py index 0854544e..6e32f0fd 100644 --- a/gnocchi/resource_type.py +++ b/gnocchi/resource_type.py @@ -17,6 +17,8 @@ import six import stevedore import voluptuous +from gnocchi import utils + INVALID_NAMES = [ "id", "type", "metrics", @@ -99,6 +101,11 @@ class StringSchema(CommonAttributeSchema): return d +class UUIDSchema(CommonAttributeSchema): + typename = "uuid" + schema_ext = staticmethod(utils.UUID) + + class ResourceTypeAttributes(list): def jsonify(self): d = {} diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 75c5869d..10fbde02 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -567,13 +567,6 @@ class MetricController(rest.RestController): abort(404, e) -def UUID(value): - try: - return uuid.UUID(value) - except Exception as e: - raise ValueError(e) - - class MetricsController(rest.RestController): @pecan.expose() @@ -674,7 +667,7 @@ class MetricsController(rest.RestController): _MetricsSchema = voluptuous.Schema({ - six.text_type: voluptuous.Any(UUID, + six.text_type: voluptuous.Any(utils.UUID, MetricsController.MetricSchema), }) @@ -959,12 +952,12 @@ GenericSchema = ResourceSchema({}) InstanceDiskSchema = ResourceSchema({ "name": six.text_type, - "instance_id": UUID, + "instance_id": utils.UUID, }) InstanceNetworkInterfaceSchema = ResourceSchema({ "name": six.text_type, - "instance_id": UUID, + "instance_id": utils.UUID, }) InstanceSchema = ResourceSchema({ @@ -1324,7 +1317,7 @@ class MetricsMeasuresBatchController(rest.RestController): # only the last key will be retain by json python module to # build the python dict. MeasuresBatchSchema = voluptuous.Schema( - {UUID: [MeasureSchema]} + {utils.UUID: [MeasureSchema]} ) @pecan.expose() diff --git a/gnocchi/tests/gabbi/gabbits/resource_type.yaml b/gnocchi/tests/gabbi/gabbits/resource_type.yaml index f02d1017..d78436b6 100644 --- a/gnocchi/tests/gabbi/gabbits/resource_type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource_type.yaml @@ -62,6 +62,8 @@ tests: foobar: type: string required: false + uuid: + type: uuid status: 201 response_json_paths: $.name: my_custom_resource @@ -76,6 +78,10 @@ tests: required: False max_length: 255 min_length: 0 + uuid: + type: uuid + required: True + response_headers: location: $SCHEME://$NETLOC/v1/resource_type/my_custom_resource @@ -107,12 +113,31 @@ tests: id: d11edfca-4393-4fda-b94d-b05a3a1b3747 name: toolong!!! foobar: what + uuid: 07eb339e-23c0-4be2-be43-cd8247afae3b status: 400 response_strings: # split to not match the u' in py2 - "Invalid input: length of value must be at most 5 for dictionary value @ data[" - "'name']" + - name: post invalid resource uuid + url: /v1/resource/my_custom_resource + method: post + request_headers: + x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c + x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + content-type: application/json + data: + id: d11edfca-4393-4fda-b94d-b05a3a1b3747 + name: too + foobar: what + uuid: really! + status: 400 + response_strings: + # split to not match the u' in py2 + - "Invalid input: not a valid value for dictionary value @ data[" + - "'uuid']" + - name: post custom resource url: /v1/resource/my_custom_resource method: post @@ -124,6 +149,7 @@ tests: id: d11edfca-4393-4fda-b94d-b05a3a1b3747 name: bar foobar: what + uuid: e495ebad-be64-46c0-81d6-b079beb48df9 status: 201 response_json_paths: $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747 @@ -144,6 +170,7 @@ tests: $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747 $.name: foo $.foobar: what + $.uuid: e495ebad-be64-46c0-81d6-b079beb48df9 - name: get resource url: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 @@ -153,6 +180,7 @@ tests: $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747 $.name: foo $.foobar: what + $.uuid: e495ebad-be64-46c0-81d6-b079beb48df9 - name: list resource history url: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 8e2b139f..63646717 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -43,6 +43,13 @@ def ResourceUUID(value): raise ValueError(e) +def UUID(value): + try: + return uuid.UUID(value) + except Exception as e: + raise ValueError(e) + + def to_timestamp(v): if isinstance(v, datetime.datetime): return v diff --git a/setup.cfg b/setup.cfg index bb5e7803..409ce35f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -87,6 +87,7 @@ data_files = [entry_points] gnocchi.indexer.sqlalchemy.resource_type_attribute = string = gnocchi.indexer.sqlalchemy_extension:StringSchema + uuid = gnocchi.indexer.sqlalchemy_extension:UUIDSchema gnocchi.indexer.resources = generic = gnocchi.indexer.sqlalchemy_base:Resource -- GitLab From e5bd6595b0624b8d9a3e62952a73a8137be321af Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 20 Jan 2016 09:45:18 +0100 Subject: [PATCH 0132/1483] Implements resource attribute number This change allows to create resource attributes of type number Blueprint resource-type-rest-api Change-Id: I3ada1bc8055fe63253bbf2c329ddbe6437f29d3f --- doc/source/rest.yaml | 3 +- gnocchi/indexer/sqlalchemy_extension.py | 4 ++ gnocchi/resource_type.py | 34 +++++++++- .../tests/gabbi/gabbits/resource_type.yaml | 66 +++++++++++++++++++ setup.cfg | 1 + 5 files changed, 106 insertions(+), 2 deletions(-) diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 0bc1b7cf..158713d2 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -320,7 +320,8 @@ "attributes": { "myid": {"type": "uuid"}, "display_name": {"type": "string", "required": true}, - "prefix": {"type": "string", "required": false, "max_length": 8, "min_length": 3} + "prefix": {"type": "string", "required": false, "max_length": 8, "min_length": 3}, + "size": {"type": "number", "min": 5, "max": 32.8} } } diff --git a/gnocchi/indexer/sqlalchemy_extension.py b/gnocchi/indexer/sqlalchemy_extension.py index a4d1bf8e..a071326c 100644 --- a/gnocchi/indexer/sqlalchemy_extension.py +++ b/gnocchi/indexer/sqlalchemy_extension.py @@ -77,3 +77,7 @@ class StringSchema(resource_type.StringSchema): class UUIDSchema(resource_type.UUIDSchema): satype = sqlalchemy_utils.UUIDType() + + +class NumberSchema(resource_type.NumberSchema): + satype = sqlalchemy.Float(53) diff --git a/gnocchi/resource_type.py b/gnocchi/resource_type.py index 6e32f0fd..91356022 100644 --- a/gnocchi/resource_type.py +++ b/gnocchi/resource_type.py @@ -12,6 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. +import numbers import re import six import stevedore @@ -57,7 +58,10 @@ class CommonAttributeSchema(object): voluptuous.Required('type'): cls.typename, voluptuous.Required('required', default=True): bool } - d.update(cls.meta_schema_ext) + if callable(cls.meta_schema_ext): + d.update(cls.meta_schema_ext()) + else: + d.update(cls.meta_schema_ext) return d def schema(self): @@ -106,6 +110,34 @@ class UUIDSchema(CommonAttributeSchema): schema_ext = staticmethod(utils.UUID) +class NumberSchema(CommonAttributeSchema): + typename = "number" + + def __init__(self, min, max, *args, **kwargs): + super(NumberSchema, self).__init__(*args, **kwargs) + self.min = min + self.max = max + + # TODO(sileht): ensure min_length <= max_length + meta_schema_ext = { + voluptuous.Required('min', default=None): voluptuous.Any( + None, numbers.Real), + voluptuous.Required('max', default=None): voluptuous.Any( + None, numbers.Real) + } + + @property + def schema_ext(self): + return voluptuous.All(numbers.Real, + voluptuous.Range(min=self.min, + max=self.max)) + + def jsonify(self): + d = super(NumberSchema, self).jsonify() + d.update({"min": self.min, "max": self.max}) + return d + + class ResourceTypeAttributes(list): def jsonify(self): d = {} diff --git a/gnocchi/tests/gabbi/gabbits/resource_type.yaml b/gnocchi/tests/gabbi/gabbits/resource_type.yaml index d78436b6..c704a8ff 100644 --- a/gnocchi/tests/gabbi/gabbits/resource_type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource_type.yaml @@ -14,6 +14,8 @@ tests: response_json_paths: $.`len`: 15 +# Some bad cases + - name: post resource type as non-admin url: /v1/resource_type method: post @@ -45,6 +47,8 @@ tests: # - "Invalid input: not a valid value for dictionary value @ data[u'attributes'][u'foo'][u'type']" - "Invalid input:" +# Create a type + - name: post resource type url: /v1/resource_type method: post @@ -64,6 +68,15 @@ tests: required: false uuid: type: uuid + int: + type: number + required: false + min: -2 + max: 3 + float: + type: number + required: false + min: -2.3 status: 201 response_json_paths: $.name: my_custom_resource @@ -81,10 +94,22 @@ tests: uuid: type: uuid required: True + int: + type: number + required: False + min: -2 + max: 3 + float: + type: number + required: false + min: -2.3 + max: response_headers: location: $SCHEME://$NETLOC/v1/resource_type/my_custom_resource +# Control the created type + - name: relist resource types desc: we have a resource type now url: /v1/resource_type @@ -97,11 +122,15 @@ tests: response_json_paths: $.name: my_custom_resource +# Some bad case case on the type + - name: delete as non-admin url: /v1/resource_type/my_custom_resource method: DELETE status: 403 +# Bad resources for this type + - name: post invalid resource url: /v1/resource/my_custom_resource method: post @@ -138,6 +167,8 @@ tests: - "Invalid input: not a valid value for dictionary value @ data[" - "'uuid']" +# Good resources for this type + - name: post custom resource url: /v1/resource/my_custom_resource method: post @@ -150,6 +181,7 @@ tests: name: bar foobar: what uuid: e495ebad-be64-46c0-81d6-b079beb48df9 + int: 1 status: 201 response_json_paths: $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747 @@ -171,6 +203,7 @@ tests: $.name: foo $.foobar: what $.uuid: e495ebad-be64-46c0-81d6-b079beb48df9 + $.int: 1 - name: get resource url: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 @@ -181,6 +214,28 @@ tests: $.name: foo $.foobar: what $.uuid: e495ebad-be64-46c0-81d6-b079beb48df9 + $.int: 1 + + - name: post resource with default + url: /v1/resource/my_custom_resource + method: post + request_headers: + x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c + x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + content-type: application/json + data: + id: c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59 + name: foo + uuid: e495ebad-be64-46c0-81d6-b079beb48df9 + status: 201 + response_json_paths: + $.id: c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59 + $.name: foo + $.foobar: + $.uuid: e495ebad-be64-46c0-81d6-b079beb48df9 + $.int: + +# Ensure we can't delete the type - name: list resource history url: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast @@ -204,6 +259,8 @@ tests: response_strings: - Resource type my_custom_resource is still in use +# Delete associated resources + - name: delete the resource url: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 request_headers: @@ -211,6 +268,15 @@ tests: method: DELETE status: 204 + - name: delete the second resource + url: /v1/resource/my_custom_resource/c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59 + request_headers: + x-roles: admin + method: DELETE + status: 204 + +# Now we can deleted the type + - name: delete the custom resource type method: delete request_headers: diff --git a/setup.cfg b/setup.cfg index 409ce35f..68f073df 100644 --- a/setup.cfg +++ b/setup.cfg @@ -88,6 +88,7 @@ data_files = gnocchi.indexer.sqlalchemy.resource_type_attribute = string = gnocchi.indexer.sqlalchemy_extension:StringSchema uuid = gnocchi.indexer.sqlalchemy_extension:UUIDSchema + number = gnocchi.indexer.sqlalchemy_extension:NumberSchema gnocchi.indexer.resources = generic = gnocchi.indexer.sqlalchemy_base:Resource -- GitLab From ce5fbaf053b748d631f0265f481ee1746e0c6b2e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 20 Jan 2016 19:39:17 +0100 Subject: [PATCH 0133/1483] Implements resource attribute bool This change allows to create resource attributes of type bool. Blueprint resource-type-rest-api Change-Id: I70be54554b05a6991a0ef9db52de820995239ef6 --- doc/source/rest.yaml | 3 ++- gnocchi/indexer/sqlalchemy_extension.py | 4 ++++ gnocchi/resource_type.py | 5 +++++ gnocchi/tests/gabbi/gabbits/resource_type.yaml | 6 ++++++ setup.cfg | 1 + 5 files changed, 18 insertions(+), 1 deletion(-) diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 158713d2..abe62166 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -321,7 +321,8 @@ "myid": {"type": "uuid"}, "display_name": {"type": "string", "required": true}, "prefix": {"type": "string", "required": false, "max_length": 8, "min_length": 3}, - "size": {"type": "number", "min": 5, "max": 32.8} + "size": {"type": "number", "min": 5, "max": 32.8}, + "enabled": {"type": "bool", "required": false} } } diff --git a/gnocchi/indexer/sqlalchemy_extension.py b/gnocchi/indexer/sqlalchemy_extension.py index a071326c..c627d9ff 100644 --- a/gnocchi/indexer/sqlalchemy_extension.py +++ b/gnocchi/indexer/sqlalchemy_extension.py @@ -81,3 +81,7 @@ class UUIDSchema(resource_type.UUIDSchema): class NumberSchema(resource_type.NumberSchema): satype = sqlalchemy.Float(53) + + +class BoolSchema(resource_type.BoolSchema): + satype = sqlalchemy.Boolean diff --git a/gnocchi/resource_type.py b/gnocchi/resource_type.py index 91356022..093f3acf 100644 --- a/gnocchi/resource_type.py +++ b/gnocchi/resource_type.py @@ -138,6 +138,11 @@ class NumberSchema(CommonAttributeSchema): return d +class BoolSchema(CommonAttributeSchema): + typename = "bool" + schema_ext = bool + + class ResourceTypeAttributes(list): def jsonify(self): d = {} diff --git a/gnocchi/tests/gabbi/gabbits/resource_type.yaml b/gnocchi/tests/gabbi/gabbits/resource_type.yaml index c704a8ff..d35b01dc 100644 --- a/gnocchi/tests/gabbi/gabbits/resource_type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource_type.yaml @@ -77,6 +77,9 @@ tests: type: number required: false min: -2.3 + bool: + type: bool + required: false status: 201 response_json_paths: $.name: my_custom_resource @@ -104,6 +107,9 @@ tests: required: false min: -2.3 max: + bool: + type: bool + required: false response_headers: location: $SCHEME://$NETLOC/v1/resource_type/my_custom_resource diff --git a/setup.cfg b/setup.cfg index 68f073df..5d13e79f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -89,6 +89,7 @@ gnocchi.indexer.sqlalchemy.resource_type_attribute = string = gnocchi.indexer.sqlalchemy_extension:StringSchema uuid = gnocchi.indexer.sqlalchemy_extension:UUIDSchema number = gnocchi.indexer.sqlalchemy_extension:NumberSchema + bool = gnocchi.indexer.sqlalchemy_extension:BoolSchema gnocchi.indexer.resources = generic = gnocchi.indexer.sqlalchemy_base:Resource -- GitLab From b4dc28a044a4db0e9ed6f01fc7c0ab89c20c1e8b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 20 Jan 2016 15:25:33 +0100 Subject: [PATCH 0134/1483] Move legacy Ceilometer resource into indexer. Blueprint resource-type-rest-api Change-Id: I26a01366165fa276ba7c7072a13fd2046658cb99 --- ...6189b9eb_migrate_legacy_resources_to_db.py | 48 ++++++++++++ gnocchi/indexer/sqlalchemy.py | 46 +++-------- gnocchi/indexer/sqlalchemy_base.py | 13 ++++ gnocchi/indexer/sqlalchemy_extension.py | 49 ------------ .../indexer/sqlalchemy_legacy_resources.py | 78 +++++++++++++++++++ gnocchi/rest/__init__.py | 66 ++-------------- gnocchi/tests/gabbi/gabbits/resource.yaml | 3 +- setup.cfg | 34 -------- 8 files changed, 157 insertions(+), 180 deletions(-) create mode 100644 gnocchi/indexer/alembic/versions/8f376189b9eb_migrate_legacy_resources_to_db.py create mode 100644 gnocchi/indexer/sqlalchemy_legacy_resources.py diff --git a/gnocchi/indexer/alembic/versions/8f376189b9eb_migrate_legacy_resources_to_db.py b/gnocchi/indexer/alembic/versions/8f376189b9eb_migrate_legacy_resources_to_db.py new file mode 100644 index 00000000..f1a83bd4 --- /dev/null +++ b/gnocchi/indexer/alembic/versions/8f376189b9eb_migrate_legacy_resources_to_db.py @@ -0,0 +1,48 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Migrate legacy resources to DB + +Revision ID: 8f376189b9eb +Revises: d24877c22ab0 +Create Date: 2016-01-20 15:03:28.115656 + +""" +import json + +from alembic import op +import sqlalchemy as sa + +from gnocchi.indexer import sqlalchemy_legacy_resources as legacy + +# revision identifiers, used by Alembic. +revision = '8f376189b9eb' +down_revision = 'd24877c22ab0' +branch_labels = None +depends_on = None + + +def upgrade(): + resource_type = sa.Table( + 'resource_type', sa.MetaData(), + sa.Column('name', sa.String(255), nullable=False), + sa.Column('attributes', sa.Text, nullable=False) + ) + + for name, attributes in legacy.ceilometer_resources.items(): + text_attributes = json.dumps(attributes) + op.execute(resource_type.update().where( + resource_type.c.name == name + ).values({resource_type.c.attributes: text_attributes})) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 02726644..86a9988c 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -28,12 +28,10 @@ from oslo_log import log import six import sqlalchemy import sqlalchemy_utils -from stevedore import extension from gnocchi import exceptions from gnocchi import indexer from gnocchi.indexer import sqlalchemy_base as base -from gnocchi import resource_type from gnocchi import utils Base = base.Base @@ -89,9 +87,8 @@ class PerInstanceFacade(object): class ResourceClassMapper(object): def __init__(self): - self._resources = extension.ExtensionManager( - 'gnocchi.indexer.resources') - self._cache = self.load_legacy_mappers() + self._cache = {'generic': {'resource': base.Resource, + 'history': base.ResourceHistory}} self._lock = threading.RLock() @staticmethod @@ -111,33 +108,6 @@ class ResourceClassMapper(object): return {'resource': resource_ext, 'history': resource_history_ext} - def is_legacy(self, resource_type_name): - return resource_type_name in self._resources - - def load_legacy_mappers(self): - mappers = {} - for ext in self._resources.extensions: - tablename = getattr(ext.plugin, '__tablename__', ext.name) - if ext.name == "generic": - mappers[tablename] = {'resource': base.Resource, - 'history': base.ResourceHistory} - else: - rt = base.ResourceType( - name=ext.name, tablename=tablename, - attributes=resource_type.ResourceTypeAttributes()) - mappers[tablename] = self._build_class_mappers(rt, ext.plugin) - return mappers - - def get_legacy_resource_types(self): - resource_types = [] - for ext in self._resources.extensions: - tablename = getattr(ext.plugin, '__tablename__', ext.name) - resource_types.append(base.ResourceType( - name=ext.name, - tablename=tablename, - attributes=resource_type.ResourceTypeAttributes())) - return resource_types - def get_classes(self, resource_type): # NOTE(sileht): Most of the times we can bypass the lock so do it try: @@ -245,12 +215,18 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): else: command.upgrade(cfg, "head") - for rt in self._RESOURCE_TYPE_MANAGER.get_legacy_resource_types(): + # TODO(sileht): generic shouldn't be a particular case + # we must create a rt_generic and rt_generic_history table + # like other type + for rt in base.get_legacy_resource_types(): try: with self.facade.writer() as session: session.add(rt) except exception.DBDuplicateEntry: pass + with self.facade.writer_connection() as connection: + self._RESOURCE_TYPE_MANAGER.map_and_create_tables( + rt, connection) def create_resource_type(self, resource_type): # NOTE(sileht): mysql have a stupid and small length limitation on the @@ -302,9 +278,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): ResourceType.name.asc()).all()) def delete_resource_type(self, name): - # FIXME(sileht) this type have special handling - # until we remove this special thing we reject its deletion - if self._RESOURCE_TYPE_MANAGER.is_legacy(name): + if name == "generic": raise indexer.ResourceTypeInUse(name) try: diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index 6e239192..f59122b1 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -32,6 +32,7 @@ import sqlalchemy_utils from gnocchi import archive_policy from gnocchi import indexer +from gnocchi.indexer import sqlalchemy_legacy_resources as legacy from gnocchi import resource_type from gnocchi import storage from gnocchi import utils @@ -204,6 +205,18 @@ RESOURCE_TYPE_SCHEMA_MANAGER = resource_type.ResourceTypeSchemaManager( "gnocchi.indexer.sqlalchemy.resource_type_attribute") +def get_legacy_resource_types(): + resource_types = [] + for name, attributes in legacy.ceilometer_resources.items(): + tablename = legacy.ceilometer_tablenames.get(name, name) + attrs = RESOURCE_TYPE_SCHEMA_MANAGER.attributes_from_dict( + attributes) + resource_types.append(ResourceType(name=name, + tablename=tablename, + attributes=attrs)) + return resource_types + + class ResourceTypeAttributes(sqlalchemy_utils.JSONType): def process_bind_param(self, attributes, dialect): return super(ResourceTypeAttributes, self).process_bind_param( diff --git a/gnocchi/indexer/sqlalchemy_extension.py b/gnocchi/indexer/sqlalchemy_extension.py index c627d9ff..058d31b2 100644 --- a/gnocchi/indexer/sqlalchemy_extension.py +++ b/gnocchi/indexer/sqlalchemy_extension.py @@ -20,55 +20,6 @@ import sqlalchemy_utils from gnocchi import resource_type -class Image(object): - name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) - container_format = sqlalchemy.Column(sqlalchemy.String(255), - nullable=False) - disk_format = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) - - -class Instance(object): - flavor_id = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) - image_ref = sqlalchemy.Column(sqlalchemy.String(255)) - host = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) - display_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) - server_group = sqlalchemy.Column(sqlalchemy.String(255)) - - -class InstanceDisk(object): - name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) - instance_id = sqlalchemy.Column(sqlalchemy_utils.UUIDType(), - nullable=False) - - -class InstanceNetworkInterface(object): - __tablename__ = 'instance_net_int' - name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) - instance_id = sqlalchemy.Column(sqlalchemy_utils.UUIDType(), - nullable=False) - - -class Volume(object): - display_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=True) - - -class Host(object): - __tablename__ = 'host' - host_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) - - -class HostNetworkInterface(object): - __tablename__ = 'host_net_int' - host_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) - device_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=True) - - -class HostDisk(object): - __tablename__ = 'host_disk' - host_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) - device_name = sqlalchemy.Column(sqlalchemy.String(255), nullable=True) - - class StringSchema(resource_type.StringSchema): @property def satype(self): diff --git a/gnocchi/indexer/sqlalchemy_legacy_resources.py b/gnocchi/indexer/sqlalchemy_legacy_resources.py new file mode 100644 index 00000000..8390476b --- /dev/null +++ b/gnocchi/indexer/sqlalchemy_legacy_resources.py @@ -0,0 +1,78 @@ +# -*- encoding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(sileht): this code is also in alembic migration +ceilometer_tablenames = { + "instance_network_interface": "instance_net_int", + "host_network_interface": "host_net_int", +} +ceilometer_resources = { + "generic": {}, + "image": { + "name": {"type": "string", "min_length": 0, "max_length": 255, + "required": True}, + "container_format": {"type": "string", "min_length": 0, + "max_length": 255, "required": True}, + "disk_format": {"type": "string", "min_length": 0, "max_length": 255, + "required": True}, + }, + "instance": { + "flavor_id": {"type": "string", "min_length": 0, "max_length": 255, + "required": True}, + "image_ref": {"type": "string", "min_length": 0, "max_length": 255, + "required": False}, + "host": {"type": "string", "min_length": 0, "max_length": 255, + "required": True}, + "display_name": {"type": "string", "min_length": 0, "max_length": 255, + "required": True}, + "server_group": {"type": "string", "min_length": 0, "max_length": 255, + "required": False}, + }, + "instance_disk": { + "name": {"type": "string", "min_length": 0, "max_length": 255, + "required": True}, + "instance_id": {"type": "uuid", "required": True}, + }, + "instance_network_interface": { + "name": {"type": "string", "min_length": 0, "max_length": 255, + "required": True}, + "instance_id": {"type": "uuid", "required": True}, + }, + "volume": { + "display_name": {"type": "string", "min_length": 0, "max_length": 255, + "required": False}, + }, + "swift_account": {}, + "ceph_account": {}, + "network": {}, + "identity": {}, + "ipmi": {}, + "stack": {}, + "host": { + "host_name": {"type": "string", "min_length": 0, "max_length": 255, + "required": True}, + }, + "host_network_interface": { + "host_name": {"type": "string", "min_length": 0, "max_length": 255, + "required": True}, + "device_name": {"type": "string", "min_length": 0, "max_length": 255, + "required": False}, + }, + "host_disk": { + "host_name": {"type": "string", "min_length": 0, "max_length": 255, + "required": True}, + "device_name": {"type": "string", "min_length": 0, "max_length": 255, + "required": False}, + }, +} diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 10fbde02..d9163ffe 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -948,65 +948,9 @@ class ResourceController(rest.RestController): abort(404, e) -GenericSchema = ResourceSchema({}) - -InstanceDiskSchema = ResourceSchema({ - "name": six.text_type, - "instance_id": utils.UUID, -}) - -InstanceNetworkInterfaceSchema = ResourceSchema({ - "name": six.text_type, - "instance_id": utils.UUID, -}) - -InstanceSchema = ResourceSchema({ - "flavor_id": six.text_type, - voluptuous.Optional("image_ref"): six.text_type, - "host": six.text_type, - "display_name": six.text_type, - voluptuous.Optional("server_group"): six.text_type, -}) - -VolumeSchema = ResourceSchema({ - voluptuous.Optional("display_name"): voluptuous.Any(None, - six.text_type), -}) - -ImageSchema = ResourceSchema({ - "name": six.text_type, - "container_format": six.text_type, - "disk_format": six.text_type, -}) - -HostSchema = ResourceSchema({ - "host_name": six.text_type, -}) - -HostDiskSchema = ResourceSchema({ - "host_name": six.text_type, - voluptuous.Optional("device_name"): voluptuous.Any(None, - six.text_type), -}) - -HostNetworkInterfaceSchema = ResourceSchema({ - "host_name": six.text_type, - voluptuous.Optional("device_name"): voluptuous.Any(None, - six.text_type), -}) - -# NOTE(sileht): Must be loaded after all ResourceSchema -RESOURCE_SCHEMA_MANAGER = extension.ExtensionManager( - 'gnocchi.controller.schemas') - - def schema_for(resource_type): - if resource_type in RESOURCE_SCHEMA_MANAGER: - # TODO(sileht): Remove this legacy resource schema loading - return RESOURCE_SCHEMA_MANAGER[resource_type].plugin - else: - resource_type = pecan.request.indexer.get_resource_type(resource_type) - return ResourceSchema(resource_type.schema) + resource_type = pecan.request.indexer.get_resource_type(resource_type) + return ResourceSchema(resource_type.schema) def ResourceID(value): @@ -1407,8 +1351,10 @@ class AggregationController(rest.RestController): # NOTE(sileht): we want the raw 404 message here # so use directly pecan pecan.abort(404) - elif resource_type not in RESOURCE_SCHEMA_MANAGER: - abort(404, indexer.NoSuchResourceType(resource_type)) + try: + pecan.request.indexer.get_resource_type(resource_type) + except indexer.NoSuchResourceType as e: + abort(404, e) return AggregationResourceController(resource_type, metric_name), remainder diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index b08eb004..c7882494 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -198,7 +198,8 @@ tests: host: compute1 status: 400 response_strings: - - "Invalid input: required key not provided @ data['display_name']" + - "Invalid input: required key not provided @ data[" + - "'display_name']" - name: post instance resource url: /v1/resource/instance diff --git a/setup.cfg b/setup.cfg index 5d13e79f..686750c5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -91,40 +91,6 @@ gnocchi.indexer.sqlalchemy.resource_type_attribute = number = gnocchi.indexer.sqlalchemy_extension:NumberSchema bool = gnocchi.indexer.sqlalchemy_extension:BoolSchema -gnocchi.indexer.resources = - generic = gnocchi.indexer.sqlalchemy_base:Resource - instance = gnocchi.indexer.sqlalchemy_extension:Instance - instance_disk = gnocchi.indexer.sqlalchemy_extension:InstanceDisk - instance_network_interface = gnocchi.indexer.sqlalchemy_extension:InstanceNetworkInterface - swift_account = gnocchi.indexer.sqlalchemy_base:ResourceExt - volume = gnocchi.indexer.sqlalchemy_extension:Volume - ceph_account = gnocchi.indexer.sqlalchemy_base:ResourceExt - network = gnocchi.indexer.sqlalchemy_base:ResourceExt - identity = gnocchi.indexer.sqlalchemy_base:ResourceExt - ipmi = gnocchi.indexer.sqlalchemy_base:ResourceExt - stack = gnocchi.indexer.sqlalchemy_base:ResourceExt - image = gnocchi.indexer.sqlalchemy_extension:Image - host = gnocchi.indexer.sqlalchemy_extension:Host - host_disk = gnocchi.indexer.sqlalchemy_extension:HostDisk - host_network_interface = gnocchi.indexer.sqlalchemy_extension:HostNetworkInterface - -gnocchi.controller.schemas = - generic = gnocchi.rest:GenericSchema - instance = gnocchi.rest:InstanceSchema - instance_disk = gnocchi.rest:InstanceDiskSchema - instance_network_interface = gnocchi.rest:InstanceNetworkInterfaceSchema - swift_account = gnocchi.rest:GenericSchema - volume = gnocchi.rest:VolumeSchema - ceph_account = gnocchi.rest:GenericSchema - network = gnocchi.rest:GenericSchema - identity = gnocchi.rest:GenericSchema - ipmi = gnocchi.rest:GenericSchema - stack = gnocchi.rest:GenericSchema - image = gnocchi.rest:ImageSchema - host = gnocchi.rest:HostSchema - host_disk = gnocchi.rest:HostDiskSchema - host_network_interface = gnocchi.rest:HostNetworkInterfaceSchema - gnocchi.storage = null = gnocchi.storage.null:NullStorage swift = gnocchi.storage.swift:SwiftStorage -- GitLab From b1b7c1730bff0875089399dc66469d05d1ce5ba6 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 20 Jan 2016 17:59:44 +0100 Subject: [PATCH 0135/1483] Don't create Ceilometer resource types by default. Blueprint resource-type-rest-api Closes-bug: #1513623 Change-Id: Ie6ff8049860ca976e8d4b6abfbd2ff483cd60d2f --- devstack/gate/post_test_hook.sh | 3 +++ devstack/plugin.sh | 6 +++++- gnocchi/cli.py | 7 +++++-- gnocchi/indexer/__init__.py | 2 +- gnocchi/indexer/sqlalchemy.py | 4 +++- gnocchi/tests/base.py | 3 ++- gnocchi/tests/gabbi/fixtures.py | 2 +- 7 files changed, 20 insertions(+), 7 deletions(-) diff --git a/devstack/gate/post_test_hook.sh b/devstack/gate/post_test_hook.sh index 3e0c6b52..6e4ab014 100755 --- a/devstack/gate/post_test_hook.sh +++ b/devstack/gate/post_test_hook.sh @@ -43,6 +43,9 @@ export GNOCCHI_SERVICE_URL=$(openstack catalog show metric -c endpoints -f value curl -X GET ${GNOCCHI_SERVICE_URL}/v1/archive_policy -H "Content-Type: application/json" +# NOTE(sileht): gabbi tests needs assert on some Ceilometer resource types +sudo gnocchi-upgrade --create-legacy-resource-types + # Just ensure tools still works gnocchi metric create sudo -E -H -u stack $GNOCCHI_DIR/tools/measures_injector.py --metrics 1 --batch-of-measures 2 --measures-per-batch 2 diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 4c91e7a1..8522be3e 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -309,7 +309,11 @@ function init_gnocchi { if is_service_enabled mysql postgresql; then recreate_database gnocchi fi - $GNOCCHI_BIN_DIR/gnocchi-upgrade + if is_service_enabled ceilometer; then + $GNOCCHI_BIN_DIR/gnocchi-upgrade --create-legacy-resource-types + else + $GNOCCHI_BIN_DIR/gnocchi-upgrade + fi } function preinstall_gnocchi { diff --git a/gnocchi/cli.py b/gnocchi/cli.py index c068cf0c..38f7d1b5 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -43,14 +43,17 @@ def upgrade(): cfg.BoolOpt("skip-storage", default=False, help="Skip storage upgrade."), cfg.BoolOpt("skip-archive-policies-creation", default=False, - help="Skip default archive policies creation.") + help="Skip default archive policies creation."), + cfg.BoolOpt("create-legacy-resource-types", default=False, + help="Creation of Ceilometer legacy resource types.") ]) conf = service.prepare_service(conf=conf) index = indexer.get_driver(conf) index.connect() if not conf.skip_index: LOG.info("Upgrading indexer %s" % index) - index.upgrade() + index.upgrade( + create_legacy_resource_types=conf.create_legacy_resource_types) if not conf.skip_storage: s = storage.get_driver(conf) LOG.info("Upgrading storage %s" % s) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index a83e4348..761794a4 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -254,7 +254,7 @@ class IndexerDriver(object): pass @staticmethod - def upgrade(nocreate=False): + def upgrade(nocreate=False, create_legacy_resource_types=False): pass @staticmethod diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 86a9988c..f8ecaca4 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -197,7 +197,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): def get_engine(self): return self.facade.get_engine() - def upgrade(self, nocreate=False): + def upgrade(self, nocreate=False, create_legacy_resource_types=False): from alembic import command from alembic import migration @@ -219,6 +219,8 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): # we must create a rt_generic and rt_generic_history table # like other type for rt in base.get_legacy_resource_types(): + if not (rt.name == "generic" or create_legacy_resource_types): + continue try: with self.facade.writer() as session: session.add(rt) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 3c370412..d4782fce 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -343,7 +343,8 @@ class TestCase(base.BaseTestCase): # in the Alembic upgrades. We have a test to check that # upgrades == create but it misses things such as custom CHECK # constraints. - self.index.upgrade(nocreate=True) + self.index.upgrade(nocreate=True, + create_legacy_resource_types=True) self.coord.stop() diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index 03e926ce..b6ee59c5 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -111,7 +111,7 @@ class ConfigFixture(fixture.GabbiFixture): index = indexer.get_driver(conf) index.connect() - index.upgrade() + index.upgrade(create_legacy_resource_types=True) conf.set_override('pecan_debug', False, 'api') -- GitLab From 1224377c67a0e7184755501cfe0d9e3a4c372f93 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 20 Jan 2016 19:42:43 +0100 Subject: [PATCH 0136/1483] Add some resource types tests Blueprint resource-type-rest-api Change-Id: I09d6a6357a706b961ecd34db7654518b433757a5 --- .../tests/gabbi/gabbits/resource_type.yaml | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/gnocchi/tests/gabbi/gabbits/resource_type.yaml b/gnocchi/tests/gabbi/gabbits/resource_type.yaml index d35b01dc..66d2729f 100644 --- a/gnocchi/tests/gabbi/gabbits/resource_type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource_type.yaml @@ -127,6 +127,33 @@ tests: url: /v1/resource_type/my_custom_resource response_json_paths: $.name: my_custom_resource + $.attributes: + name: + type: string + required: True + min_length: 2 + max_length: 5 + foobar: + type: string + required: False + min_length: 0 + max_length: 255 + uuid: + type: uuid + required: True + int: + type: number + required: False + min: -2 + max: 3 + float: + type: number + required: false + min: -2.3 + max: + bool: + type: bool + required: false # Some bad case case on the type -- GitLab From 6043e8791cd7dbf84f2f9165b260a5d167cdd530 Mon Sep 17 00:00:00 2001 From: Lianhao Lu Date: Fri, 18 Mar 2016 06:03:35 +0000 Subject: [PATCH 0137/1483] devstack: allow gnocchi-api to run on different host from keystone User now can specify GNOCCHI_SERVICE_HOST to a different host so gnocchi-api and keystone can run on different machine. User also can specify GNOCCHI_USE_KEYSTONE to indicate whether need to run gnocchi-api with keystone. Change-Id: I95bd8775c95430695eb952aa902b30459553c2eb --- devstack/plugin.sh | 16 +++++++++++----- devstack/settings | 5 ++++- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 4c91e7a1..030992d3 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -56,8 +56,12 @@ function is_gnocchi_enabled { # gnocchi_swift gnocchi_swift ResellerAdmin (if Swift is enabled) function create_gnocchi_accounts { # Gnocchi - if is_service_enabled key && is_service_enabled gnocchi-api - then + if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && is_service_enabled gnocchi-api ; then + # At this time, the /etc/openstack/clouds.yaml is available, + # we could leverage that by setting OS_CLOUD + OLD_OS_CLOUD=$OS_CLOUD + export OS_CLOUD='devstack-admin' + create_service_user "gnocchi" local gnocchi_service=$(get_or_create_service "gnocchi" \ @@ -74,6 +78,8 @@ function create_gnocchi_accounts { "$SERVICE_PASSWORD" default "gnocchi_swift@example.com") get_or_add_user_project_role "ResellerAdmin" $gnocchi_swift_user "gnocchi_swift" fi + + export OS_CLOUD=$OLD_OS_CLOUD fi } @@ -263,7 +269,7 @@ function configure_gnocchi { exit 1 fi - if is_service_enabled key; then + if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] ; then if is_service_enabled gnocchi-grafana; then iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline "cors gnocchi+auth" iniset $KEYSTONE_CONF cors allowed_origin ${GRAFANA_URL} @@ -347,7 +353,7 @@ function install_gnocchi { install_gnocchiclient - is_service_enabled key && EXTRA_FLAVOR=,keystonmiddleware + [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && EXTRA_FLAVOR=,keystonmiddleware # We don't use setup_package because we don't follow openstack/requirements sudo -H pip install -e "$GNOCCHI_DIR"[test,$GNOCCHI_STORAGE_BACKEND,${DATABASE_TYPE}${EXTRA_FLAVOR}] @@ -391,7 +397,7 @@ function start_gnocchi { fi # Create a default policy - if ! is_service_enabled key; then + if [ "$GNOCCHI_USE_KEYSTONE" == "False" ]; then export OS_AUTH_TYPE=gnocchi-noauth export GNOCCHI_USER_ID=`uuidgen` export GNOCCHI_PROJECT_ID=`uuidgen` diff --git a/devstack/settings b/devstack/settings index 2a74165e..b39e86b9 100644 --- a/devstack/settings +++ b/devstack/settings @@ -15,6 +15,9 @@ GNOCCHI_DATA_DIR=${GNOCCHI_DATA_DIR:-${DATA_DIR}/gnocchi} # Toggle for deploying Gnocchi under HTTPD + mod_wsgi GNOCCHI_USE_MOD_WSGI=${GNOCCHI_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}} +# Toggle for deploying Gnocchi with/without Keystone +GNOCCHI_USE_KEYSTONE=$(trueorfalse True GNOCCHI_USE_KEYSTONE) + # Support potential entry-points console scripts and venvs if [[ ${USE_VENV} = True ]]; then PROJECT_VENV["gnocchi"]=${GNOCCHI_DIR}.venv @@ -29,7 +32,7 @@ GNOCCHI_SERVICE_PROTOCOL=http # NOTE(chdent): If you are not using mod wsgi you need to set port! GNOCCHI_SERVICE_PORT=${GNOCCHI_SERVICE_PORT:-8041} GNOCCHI_SERVICE_PREFIX=${GNOCCHI_SERVICE_PREFIX:-'/metric'} -GNOCCHI_SERVICE_HOST=$SERVICE_HOST +GNOCCHI_SERVICE_HOST=${GNOCCHI_SERVICE_HOST:-${SERVICE_HOST}} # Gnocchi statsd info GNOCCHI_STATSD_RESOURCE_ID=${GNOCCHI_STATSD_RESOURCE_ID:-$(uuidgen)} -- GitLab From c6de04dca966ae3535b527da8ca7dee4c6276caf Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 24 Mar 2016 12:18:57 +0100 Subject: [PATCH 0138/1483] InfluxDB: drop support The driver has been broken for a while, and there's no interest shown in fixing it. No known user of Gnocchi rely on it. Since there's little interest in using InfluxDB behind Gnocchi, let's drop that code altogether. Change-Id: I25d30a35e218ac3c6c531e3a943b1d2023978110 --- devstack/gate/gate_hook.sh | 3 - devstack/plugin.sh | 43 ------ devstack/settings | 5 - doc/source/architecture.rst | 12 +- doc/source/configuration.rst | 2 - doc/source/install.rst | 1 - gnocchi/opts.py | 4 +- gnocchi/storage/influxdb.py | 281 ----------------------------------- gnocchi/tests/base.py | 14 -- gnocchi/tests/test_rest.py | 5 - setup-test-env.sh | 9 -- setup.cfg | 3 - tox.ini | 10 +- 13 files changed, 8 insertions(+), 384 deletions(-) delete mode 100644 gnocchi/storage/influxdb.py diff --git a/devstack/gate/gate_hook.sh b/devstack/gate/gate_hook.sh index 905f8fa5..7915d260 100755 --- a/devstack/gate/gate_hook.sh +++ b/devstack/gate/gate_hook.sh @@ -39,9 +39,6 @@ case $STORAGE_DRIVER in ENABLED_SERVICES+="ceph" DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_STORAGE_BACKEND=ceph' ;; - influxdb) - DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_STORAGE_BACKEND=influxdb' - ;; esac diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 4c91e7a1..8264c09b 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -113,32 +113,6 @@ function _gnocchi_install_redis { pip_install_gr redis } -# install influxdb -# NOTE(chdent): InfluxDB is not currently packaged by the distro at the -# version that gnocchi needs. Until that is true we're downloading -# the debs and rpms packaged by the InfluxDB company. When it is -# true this method can be changed to be similar to -# _gnocchi_install_redis above. -function _gnocchi_install_influxdb { - if is_package_installed influxdb; then - echo "influxdb already installed" - else - local file=$(mktemp /tmp/influxpkg-XXXXX) - - if is_ubuntu; then - wget -O $file $GNOCCHI_INFLUXDB_DEB_PKG - sudo dpkg -i $file - elif is_fedora; then - wget -O $file $GNOCCHI_INFLUXDB_RPM_PKG - sudo rpm -i $file - fi - rm $file - fi - - # restart influxdb via its initscript - sudo /opt/influxdb/init.sh restart -} - function _gnocchi_install_grafana { if is_ubuntu; then local file=$(mktemp /tmp/grafanapkg-XXXXX) @@ -157,11 +131,6 @@ function _gnocchi_install_grafana { sudo service grafana-server restart } -# remove the influxdb database -function _gnocchi_cleanup_influxdb { - curl -G 'http://localhost:8086/query' --data-urlencode "q=DROP DATABASE $GNOCCHI_INFLUXDB_DBNAME" -} - function _cleanup_gnocchi_apache_wsgi { sudo rm -f $GNOCCHI_WSGI_DIR/*.wsgi sudo rm -f $(apache_site_config_for gnocchi) @@ -255,9 +224,6 @@ function configure_gnocchi { elif [[ "$GNOCCHI_STORAGE_BACKEND" = 'file' ]] ; then iniset $GNOCCHI_CONF storage driver file iniset $GNOCCHI_CONF storage file_basepath $GNOCCHI_DATA_DIR/ - elif [[ "$GNOCCHI_STORAGE_BACKEND" == 'influxdb' ]] ; then - iniset $GNOCCHI_CONF storage driver influxdb - iniset $GNOCCHI_CONF storage influxdb_database $GNOCCHI_INFLUXDB_DBNAME else echo "ERROR: could not configure storage driver" exit 1 @@ -331,11 +297,6 @@ function install_gnocchi { _gnocchi_install_redis fi - if [[ "${GNOCCHI_STORAGE_BACKEND}" == 'influxdb' ]] ; then - _gnocchi_install_influxdb - pip_install influxdb - fi - if [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then pip_install cradox fi @@ -414,10 +375,6 @@ function stop_gnocchi { stop_process $serv done - if [[ "${GNOCCHI_STORAGE_BACKEND}" == 'influxdb' ]] ; then - _gnocchi_cleanup_influxdb - fi - if is_service_enabled gnocchi-grafana; then sudo umount /usr/share/grafana/public/app/plugins/datasource/gnocchi fi diff --git a/devstack/settings b/devstack/settings index 2a74165e..9ad8f51a 100644 --- a/devstack/settings +++ b/devstack/settings @@ -45,11 +45,6 @@ GNOCCHI_CEPH_POOL_PGP=${GNOCCHI_CEPH_POOL_PGP:-8} # Gnocchi backend GNOCCHI_STORAGE_BACKEND=${GNOCCHI_STORAGE_BACKEND:-file} -# InfluxDB Settings -GNOCCHI_INFLUXDB_DBNAME=${GNOCCHI_INFLUXDB_DBNAME:-gnocchidevstack} -GNOCCHI_INFLUXDB_RPM_PKG=${GNOCCHI_INFLUXDB_RPM_PKG:-https://s3.amazonaws.com/influxdb/influxdb-0.9.4.2-1.x86_64.rpm} -GNOCCHI_INFLUXDB_DEB_PKG=${GNOCCHI_INFLUXDB_DEB_PKG:-https://s3.amazonaws.com/influxdb/influxdb_0.9.4.2_amd64.deb} - # Grafana settings GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm} GRAFANA_DEB_PKG=${GRAFANA_DEB_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana_2.6.0_amd64.deb} diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index 22f7e0b2..aa4aee3a 100644 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -37,14 +37,10 @@ Gnocchi currently offers 4 storage drivers: * File * Swift * Ceph (preferred) -* InfluxDB (experimental) -The first three drivers are based on an intermediate library, named -*Carbonara*, which handles the time series manipulation, since none of these -storage technologies handle time series natively. `InfluxDB`_ does not need -this layer since it is itself a time series database. However, The InfluxDB -driver is still experimental and suffers from bugs in InfluxDB itself that are -yet to be fixed as of this writing. +The drivers are based on an intermediate library, named *Carbonara*, which +handles the time series manipulation, since none of these storage technologies +handle time series natively. The three *Carbonara* based drivers are working well and are as scalable as their back-end technology permits. Ceph and Swift are inherently more scalable @@ -57,8 +53,6 @@ Gnocchi processes. In any case, it is obvious that Ceph and Swift drivers are largely more scalable. Ceph also offers better consistency, and hence is the recommended driver. -.. _InfluxDB: http://influxdb.com - How to plan for Gnocchi’s storage ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index f09d2595..a24f141b 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -51,7 +51,6 @@ Gnocchi provides these storage drivers: - File (default) - `Swift`_ - `Ceph`_ -- `InfluxDB`_ (experimental) Gnocchi provides these indexer drivers: @@ -62,7 +61,6 @@ Gnocchi provides these indexer drivers: .. _`Ceph`: http://ceph.com/ .. _`PostgreSQL`: http://postgresql.org .. _`MySQL`: http://mysql.com -.. _`InfluxDB`: http://influxdb.com Configuring the WSGI pipeline ----------------------------- diff --git a/doc/source/install.rst b/doc/source/install.rst index 1d09774c..9806be40 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -45,7 +45,6 @@ The list of variants available is: * keystone – provides Keystone authentication support * mysql - provides MySQL indexer support * postgresql – provides PostgreSQL indexer support -* influxdb – provides InfluxDB storage support * swift – provides OpenStack Swift storage support * ceph – provides Ceph storage support * file – provides file driver support diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 312d9023..08c7bdff 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -21,7 +21,6 @@ import gnocchi.indexer import gnocchi.storage import gnocchi.storage.ceph import gnocchi.storage.file -import gnocchi.storage.influxdb import gnocchi.storage.swift @@ -58,8 +57,7 @@ def list_opts(): gnocchi.storage.OPTS, gnocchi.storage.ceph.OPTS, gnocchi.storage.file.OPTS, - gnocchi.storage.swift.OPTS, - gnocchi.storage.influxdb.OPTS)), + gnocchi.storage.swift.OPTS)), ("statsd", ( cfg.StrOpt('host', default='0.0.0.0', diff --git a/gnocchi/storage/influxdb.py b/gnocchi/storage/influxdb.py deleted file mode 100644 index 8e7f9d5d..00000000 --- a/gnocchi/storage/influxdb.py +++ /dev/null @@ -1,281 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from __future__ import absolute_import -import datetime -import logging -import operator - -try: - import influxdb -except ImportError: - influxdb = None -import iso8601 -from oslo_config import cfg -from oslo_utils import timeutils -import retrying - -from gnocchi import exceptions -from gnocchi import storage -from gnocchi import utils - - -OPTS = [ - cfg.StrOpt('influxdb_host', - default='localhost', - help='InfluxDB host'), - cfg.PortOpt('influxdb_port', - default=8086, - help='InfluxDB port'), - cfg.StrOpt('influxdb_username', - default='root', - help='InfluxDB username'), - cfg.StrOpt('influxdb_password', - secret=True, - help='InfluxDB password'), - cfg.StrOpt('influxdb_database', - default='gnocchi', - help='InfluxDB database'), - cfg.BoolOpt('influxdb_block_until_data_ingested', - default=False, - help='InfluxDB ingests data in asynchroneous ways. ' - 'Set to True to wait data are ingested.'), -] - - -LOG = logging.getLogger(__name__) -START_EPOCH = datetime.datetime(1, 1, 1, tzinfo=iso8601.iso8601.UTC) - - -class InfluxDBStorage(storage.StorageDriver): - - def __init__(self, conf): - if not influxdb: - raise ImportError("Module influxdb could not be loaded") - super(InfluxDBStorage, self).__init__(conf) - self._block_until_data_ingested = ( - conf.influxdb_block_until_data_ingested) - self.influx = influxdb.InfluxDBClient(conf.influxdb_host, - conf.influxdb_port, - conf.influxdb_username, - conf.influxdb_password, - conf.influxdb_database) - self.database = conf.influxdb_database - - @staticmethod - def _get_metric_id(metric): - return str(metric.id) - - def _metric_exists(self, metric): - list_series = [s['name'] for s in self.influx.get_list_series()] - return self._get_metric_id(metric) in list_series - - def _query(self, metric, query): - try: - return self.influx.query(query, database=self.database) - except influxdb.client.InfluxDBClientError as e: - # NOTE(ityaptin) If metric exists but doesn't have any measures - # with `value` field influxdb client may raise exception for - # (aggregate) query. It's not error in Gnocchi context and we - # should to return empty list in this case. - if ("unknown field or tag name" in e.content - or "measurement not found" in e.content): - return {self._get_metric_id(metric): []} - raise - - @retrying.retry(stop_max_delay=5000, wait_fixed=500, - retry_on_exception=utils.retry_if_retry_raised) - def _wait_points_exists(self, metric_id, where): - # NOTE(sileht): influxdb query returns even the data is not yet insert - # in the asked series, the work is done in an async fashion, so a - # immediate get_measures after an add_measures will not returns the - # just inserted data. perhaps related: - # https://github.com/influxdb/influxdb/issues/2450 This is a workaround - # to wait that data appear in influxdb... - if not self._block_until_data_ingested: - return - try: - result = self.influx.query("SELECT * FROM \"%(metric_id)s\" WHERE " - "%(where)s LIMIT 1" % - dict(metric_id=metric_id, where=where), - database=self.database) - except influxdb.client.InfluxDBClientError as e: - if "measurement not found" in e.content: - raise utils.Retry - raise - - result = list(result[metric_id]) - if not result: - raise utils.Retry - - def delete_metric(self, metric): - metric_id = self._get_metric_id(metric) - self._query(metric, "DROP MEASUREMENT \"%s\"" % metric_id) - - def add_measures(self, metric, measures): - metric_id = self._get_metric_id(metric) - points = [dict(measurement=metric_id, - time=self._timestamp_to_utc(m.timestamp).isoformat(), - fields=dict(value=float(m.value))) - for m in measures] - self.influx.write_points(points=points, time_precision='n', - database=self.database, - retention_policy="default") - self._wait_points_exists(metric_id, "time = '%(time)s' AND " - "value = %(value)s" % - dict(time=points[-1]['time'], - value=points[-1]["fields"]["value"])) - - def get_measures(self, metric, from_timestamp=None, to_timestamp=None, - aggregation='mean', granularity=None): - super(InfluxDBStorage, self).get_measures( - metric, from_timestamp, to_timestamp, aggregation) - - if from_timestamp: - from_timestamp = self._timestamp_to_utc(from_timestamp) - if to_timestamp: - to_timestamp = self._timestamp_to_utc(to_timestamp) - - metric_id = self._get_metric_id(metric) - - if from_timestamp: - first_measure_timestamp = from_timestamp - else: - result = self._query(metric, "select * from \"%(metric_id)s\"" % - dict(metric_id=metric_id)) - result = list(result[metric_id]) - if result: - first_measure_timestamp = self._timestamp_to_utc( - timeutils.parse_isotime(result[0]['time'])) - else: - first_measure_timestamp = None - - query = ("SELECT %(aggregation)s(value) FROM \"%(metric_id)s\"" - % dict(aggregation=aggregation, - metric_id=metric_id)) - - # NOTE(jd) So this is totally suboptimal as we CANNOT limit the range - # on time. InfluxDB is not smart enough yet to limit the result of the - # time we want based on the GROUP BY result, not based on the time - # value. If we do from_timestamp < t < to_timestamp, InfluxDB will - # limit the datapoints to those, and then run the aggregate function. - # What we want instead, is something like: - # SELECT mean(value) FROM serie - # GROUP BY time(5s) as groupedtime - # WHERE from_timestamp <= groupedtime < to_timestamp - # Since we cannot do that, we aggregate everything and then limit - # the returned result. - # see https://github.com/influxdb/influxdb/issues/1973 - # NOTE(sileht): But we have to set one time boundary to have the - # request accept by influxdb. - # see https://github.com/influxdb/influxdb/issues/2444 - # - # That's good enough until we support continuous query or the like. - - results = [] - defs = sorted( - (d - for d in metric.archive_policy.definition - if granularity is None or granularity == d.granularity), - key=operator.attrgetter('granularity')) - - for definition in defs: - time_query = self._make_time_query( - first_measure_timestamp, - to_timestamp, - definition.granularity) - subquery = (query + - " WHERE %(times)s GROUP BY time(%(granularity)ds) " - "fill(none) LIMIT %(points)d" % - dict(times=time_query, - granularity=definition.granularity, - points=definition.points)) - - result = self._query(metric, subquery) - - subresults = [] - for point in result[metric_id]: - timestamp = self._timestamp_to_utc( - timeutils.parse_isotime(point['time'])) - if (point[aggregation] is not None and - ((from_timestamp is None or timestamp >= from_timestamp) - and (to_timestamp is None or timestamp < to_timestamp))): - subresults.insert(0, (timestamp, - definition.granularity, - point[aggregation])) - results.extend(subresults) - - return list(reversed(results)) - - def search_value(self, metrics, query, from_timestamp=None, - to_timestamp=None, - aggregation='mean'): - results = {} - predicate = storage.MeasureQuery(query) - - for metric in metrics: - measures = self.get_measures(metric, from_timestamp, to_timestamp, - aggregation) - results[metric] = [ - (timestamp, granularity, value) - for timestamp, granularity, value in measures - if predicate(value)] - return results - - @staticmethod - def _timestamp_to_utc(ts): - return timeutils.normalize_time(ts).replace(tzinfo=iso8601.iso8601.UTC) - - def _make_time_query(self, from_timestamp, to_timestamp, granularity): - if from_timestamp: - from_timestamp = find_nearest_stable_point(from_timestamp, - granularity) - left_time = self._timestamp_to_utc(from_timestamp).isoformat() - else: - left_time = "now()" - - if to_timestamp and to_timestamp >= from_timestamp: - right_time = self._timestamp_to_utc(to_timestamp).isoformat() - else: - right_time = None - - return ("time >= '%s'" % left_time) + (" and time < '%s'" % right_time - if right_time else "") - - def get_cross_metric_measures(self, metrics, from_timestamp=None, - to_timestamp=None, aggregation='mean', - needed_overlap=None): - super(InfluxDBStorage, self).get_cross_metric_measures( - metrics, from_timestamp, to_timestamp, aggregation, needed_overlap) - raise exceptions.NotImplementedError - - -def find_nearest_stable_point(timestamp, granularity, next=False): - """Find the timetamp before another one for a particular granularity. - - e.g. the nearest timestamp for 14:23:45 - with a granularity of 60 is 14:23:00 - - :param timestamp: The timestamp to use as a reference point - :param granularity: Granularity to use to look for the nearest timestamp - :param next: Whatever to run the next timestamp - rather than the previous one - """ - seconds = timeutils.delta_seconds(START_EPOCH, timestamp) - seconds = int(seconds - seconds % granularity) - stable_point = START_EPOCH + datetime.timedelta(seconds=seconds) - if next: - stable_point += datetime.timedelta(seconds=granularity) - return stable_point diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 3c370412..416ac902 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -376,20 +376,6 @@ class TestCase(base.BaseTestCase): self.conf.set_override('file_basepath', tempdir.path, 'storage') - elif self.conf.storage.driver == 'influxdb': - self.conf.set_override('influxdb_block_until_data_ingested', True, - 'storage') - self.conf.set_override('influxdb_database', 'test', 'storage') - self.conf.set_override('influxdb_password', 'root', 'storage') - self.conf.set_override('influxdb_port', - os.getenv("GNOCCHI_TEST_INFLUXDB_PORT", - 51234), 'storage') - # NOTE(ityaptin) Creating unique database for every test may cause - # tests failing by timeout, but in may be useful in some cases - if os.getenv("GNOCCHI_TEST_INFLUXDB_UNIQUE_DATABASES"): - self.conf.set_override("influxdb_database", - "gnocchi_%s" % uuid.uuid4().hex, - 'storage') self.storage = storage.get_driver(self.conf) # NOTE(jd) Do not upgrade the storage. We don't really need the storage diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 4d0f7df9..d1466cad 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -322,11 +322,6 @@ class MetricTest(RestTest): status=403) def test_add_measures_back_window(self): - if self.conf.storage.driver == 'influxdb': - # FIXME(sileht): Won't pass with influxdb because it doesn't - # check archive policy - raise testcase.TestSkipped("InfluxDB issue") - ap_name = str(uuid.uuid4()) with self.app.use_admin_user(): self.app.post_json( diff --git a/setup-test-env.sh b/setup-test-env.sh index d4fe1cd0..dbbf8297 100755 --- a/setup-test-env.sh +++ b/setup-test-env.sh @@ -5,13 +5,4 @@ set -x GNOCCHI_TEST_INDEXER_DRIVER=${GNOCCHI_TEST_INDEXER_DRIVER:-postgresql} source $(which overtest) $GNOCCHI_TEST_INDEXER_DRIVER export GNOCCHI_INDEXER_URL=${OVERTEST_URL/#mysql:/mysql+pymysql:} -# Activate overtest for storage -case $GNOCCHI_TEST_STORAGE_DRIVER in - influxdb) - source $(which overtest) $GNOCCHI_TEST_STORAGE_DRIVER - GNOCCHI_TEST_INFLUXDB_PORT=${OVERTEST_INFLUXDB_PORT} - ;; - *) - ;; -esac $* diff --git a/setup.cfg b/setup.cfg index 9654e7d8..7976d4dc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -33,8 +33,6 @@ postgresql = sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1 -influxdb = - influxdb>=2.4 swift = python-swiftclient>=3.0.0 msgpack-python @@ -124,7 +122,6 @@ gnocchi.storage = swift = gnocchi.storage.swift:SwiftStorage ceph = gnocchi.storage.ceph:CephStorage file = gnocchi.storage.file:FileStorage - influxdb = gnocchi.storage.influxdb:InfluxDBStorage gnocchi.indexer = null = gnocchi.indexer.null:NullIndexer diff --git a/tox.ini b/tox.ini index 31ed496d..cb85323f 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] minversion = 1.8 -envlist = py{27,34},py{27,34}-{postgresql,mysql}{,-file,-swift,-ceph,-influxdb},pep8,bashate +envlist = py{27,34},py{27,34}-{postgresql,mysql}{,-file,-swift,-ceph},pep8,bashate [testenv] usedevelop = True @@ -9,7 +9,6 @@ passenv = LANG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTUR deps = .[test] py{27,34}-postgresql: .[postgresql,swift,ceph,file] py{27,34}-mysql: .[mysql,swift,ceph,file] - py{27,34}-{postgresql,mysql}-influxdb: .[influxdb] setenv = GNOCCHI_TEST_STORAGE_DRIVER=file GNOCCHI_TEST_INDEXER_DRIVER=postgresql @@ -18,9 +17,8 @@ setenv = py{27,34}-{postgresql,mysql}-file: GNOCCHI_TEST_STORAGE_DRIVERS=file py{27,34}-{postgresql,mysql}-swift: GNOCCHI_TEST_STORAGE_DRIVERS=swift py{27,34}-{postgresql,mysql}-ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph - py{27,34}-{postgresql,mysql}-influxdb: GNOCCHI_TEST_STORAGE_DRIVERS=influxdb - py{27,34}-postgresql{,-file,-swift,-ceph,-influxdb}: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql - py{27,34}-mysql{,-file,-swift,-ceph,-influxdb}: GNOCCHI_TEST_INDEXER_DRIVERS=mysql + py{27,34}-postgresql{,-file,-swift,-ceph}: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql + py{27,34}-mysql{,-file,-swift,-ceph}: GNOCCHI_TEST_INDEXER_DRIVERS=mysql commands = doc8 --ignore-path doc/source/rest.rst doc/source @@ -64,7 +62,7 @@ exclude = .tox,.eggs,doc show-source = true [testenv:genconfig] -deps = .[mysql,postgresql,test,file,influxdb,ceph,swift] +deps = .[mysql,postgresql,test,file,ceph,swift] commands = oslo-config-generator --config-file=gnocchi-config-generator.conf [testenv:docs] -- GitLab From c4d8b88503bfc50751d5dfe66569a3f83e4096a9 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Thu, 24 Mar 2016 14:44:55 -0400 Subject: [PATCH 0139/1483] remove timeserie_filter param this doesn't seem to be used anywhere. Change-Id: Ic747a358ca5c3b4f61ff39aa841c154c01e3c0f4 --- gnocchi/carbonara.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 0cac5d95..c6a41bc7 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -270,6 +270,7 @@ class BoundTimeSerie(TimeSerie): def _first_block_timestamp(self): rounded = self._round_timestamp(self.ts.index[-1], self.block_size.delta.value) + return rounded - (self.block_size * self.back_window) def _truncate(self): @@ -644,8 +645,7 @@ class TimeSerieArchive(SerializableMixin): for sampling, size in definitions] ) - def fetch(self, from_timestamp=None, to_timestamp=None, - timeserie_filter=None): + def fetch(self, from_timestamp=None, to_timestamp=None): """Fetch aggregated time value. Returns a sorted list of tuples (timestamp, granularity, value). @@ -653,8 +653,6 @@ class TimeSerieArchive(SerializableMixin): result = [] end_timestamp = to_timestamp for ts in reversed(self.agg_timeseries): - if timeserie_filter and not timeserie_filter(ts): - continue points = ts[from_timestamp:to_timestamp] try: # Do not include stop timestamp -- GitLab From 4918bf3984f266b28d08d0676e31c37187efbd23 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Mon, 28 Mar 2016 14:55:26 -0400 Subject: [PATCH 0140/1483] cleanup split function groupby by default sorts the result so we don't need to sort again. http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.groupby.html Change-Id: I63f6cba7fefbdd93e529486f07d46d7ecb4f99a0 --- gnocchi/carbonara.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index c6a41bc7..3bc51996 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -341,14 +341,8 @@ class AggregatedTimeSerie(TimeSerie): def split(self): groupby = self.ts.groupby(functools.partial( self.get_split_key_datetime, sampling=self.sampling)) - keys = sorted(groupby.groups.keys()) - for i, ts in enumerate(keys): - if i + 1 == len(keys): - yield self._split_key_to_string(ts), TimeSerie(self.ts[ts:]) - elif i + 1 < len(keys): - t = self.ts[ts:keys[i + 1]] - del t[t.index[-1]] - yield self._split_key_to_string(ts), TimeSerie(t) + for group, ts in groupby: + yield self._split_key_to_string(group), TimeSerie(ts) @classmethod def from_timeseries(cls, timeseries, sampling, aggregation_method, -- GitLab From beeb324a3ed007699a34c9a1e2775a52ffa34574 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Tue, 29 Mar 2016 13:11:13 +0000 Subject: [PATCH 0141/1483] Do not use Keystone admin auth token to register API endpoints. --- debian/changelog | 10 ++++- debian/control | 2 +- debian/gnocchi-api.templates | 25 ++++++++++--- debian/po/cs.po | 57 ++++++++++++++++++++--------- debian/po/da.po | 63 +++++++++++++++++++++++--------- debian/po/de.po | 63 +++++++++++++++++++++++--------- debian/po/es.po | 66 ++++++++++++++++++++++++--------- debian/po/fr.po | 71 +++++++++++++++++++++++++----------- debian/po/gl.po | 40 ++++++++++++++------ debian/po/it.po | 63 +++++++++++++++++++++++--------- debian/po/ja.po | 61 +++++++++++++++++++++---------- debian/po/nl.po | 64 ++++++++++++++++++++++---------- debian/po/pl.po | 40 ++++++++++++++------ debian/po/pt.po | 67 ++++++++++++++++++++++++---------- debian/po/pt_BR.po | 63 +++++++++++++++++++++++--------- debian/po/ru.po | 62 +++++++++++++++++++++++-------- debian/po/sv.po | 63 +++++++++++++++++++++++--------- debian/po/templates.pot | 40 ++++++++++++++------ debian/po/zh_CN.po | 40 ++++++++++++++------ 19 files changed, 694 insertions(+), 266 deletions(-) diff --git a/debian/changelog b/debian/changelog index ca0eeb6d..4f78d301 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,14 @@ -gnocchi (2.0.2-2) UNRELEASED; urgency=medium +gnocchi (2.0.2-3) experimental; urgency=medium * Added missing (build-)depends: python-lz4. - -- Thomas Goirand Mon, 14 Mar 2016 14:12:42 +0100 + -- Thomas Goirand Tue, 29 Mar 2016 13:29:28 +0000 + +gnocchi (2.0.2-2) experimental; urgency=medium + + * Do not use Keystone admin auth token to register API endpoints. + + -- Thomas Goirand Tue, 29 Mar 2016 13:10:49 +0000 gnocchi (2.0.2-1) experimental; urgency=medium diff --git a/debian/control b/debian/control index 4b6bb430..628c5fd5 100644 --- a/debian/control +++ b/debian/control @@ -5,7 +5,7 @@ Maintainer: PKG OpenStack Uploaders: Thomas Goirand , Build-Depends: debhelper (>= 9), dh-python, - openstack-pkg-tools (>= 37~), + openstack-pkg-tools (>= 40~), python-all, python-pbr, python-setuptools, diff --git a/debian/gnocchi-api.templates b/debian/gnocchi-api.templates index e913e49d..5209747f 100644 --- a/debian/gnocchi-api.templates +++ b/debian/gnocchi-api.templates @@ -16,7 +16,8 @@ _Description: Register Gnocchi in the Keystone endpoint catalog? endpoint-create". This can be done automatically now. . Note that you will need to have an up and running Keystone server on which to - connect using the Keystone authentication token. + connect using a known admin project name, admin username and password. The + admin auth token is not used anymore. Template: gnocchi/keystone-ip Type: string @@ -24,11 +25,25 @@ _Description: Keystone server IP address: Please enter the IP address of the Keystone server, so that gnocchi-api can contact Keystone to do the Gnocchi service and endpoint creation. -Template: gnocchi/keystone-auth-token +Template: gnocchi/keystone-admin-name +Type: string +Default: admin +_Description: Keystone admin name: + To register the service endpoint, this package needs to know the Admin login, + name, project name, and password to the Keystone server. + +Template: gnocchi/keystone-project-name +Type: string +Default: admin +_Description: Keystone admin project name: + To register the service endpoint, this package needs to know the Admin login, + name, project name, and password to the Keystone server. + +Template: gnocchi/keystone-admin-password Type: password -_Description: Keystone authentication token: - To configure its endpoint in Keystone, gnocchi-api needs the Keystone - authentication token. +_Description: Keystone admin password: + To register the service endpoint, this package needs to know the Admin login, + name, project name, and password to the Keystone server. Template: gnocchi/endpoint-ip Type: string diff --git a/debian/po/cs.po b/debian/po/cs.po index fce3884b..98bdfd6d 100644 --- a/debian/po/cs.po +++ b/debian/po/cs.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: glance 2013.1.2-4\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: 2013-08-25 13:01+0200\n" "Last-Translator: Michal Simunek \n" "Language-Team: Czech \n" @@ -196,7 +196,8 @@ msgstr "" #| "which to connect using the Keystone auth token." msgid "" "Note that you will need to have an up and running Keystone server on which " -"to connect using the Keystone authentication token." +"to connect using a known admin project name, admin username and password. " +"The admin auth token is not used anymore." msgstr "" "Berte na vědomí, že musíte mít běžící server keystone, na který se lze " "připojit pomocí ověřovacího klíče pro Keystone." @@ -219,37 +220,48 @@ msgstr "" "Zadejte IP adresu serveru keystone, aby se mohlo glance-api spojit s " "Keystone a provozovat službu Gnocchi a vytvářet koncové body." -#. Type: password +#. Type: string #. Description #: ../gnocchi-api.templates:4001 #, fuzzy #| msgid "Keystone Auth Token:" -msgid "Keystone authentication token:" +msgid "Keystone admin name:" msgstr "Autentizační klíč pro Keystone:" +#. Type: string +#. Description +#. Type: string +#. Description #. Type: password #. Description -#: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "" -#| "To configure its endpoint in Keystone, glance-api needs the Keystone auth " -#| "token." +#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:6001 msgid "" -"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -"authentication token." +"To register the service endpoint, this package needs to know the Admin " +"login, name, project name, and password to the Keystone server." msgstr "" -"Aby mohlo glance-api nastavit v Keystone svůj koncový bod, potřebuje " -"autentizační klíč pro Keystone." #. Type: string #. Description #: ../gnocchi-api.templates:5001 +msgid "Keystone admin project name:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Keystone admin password:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" msgstr "IP adresa koncového bodu Gnocchi:" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." msgstr "" "Zadejte IP adresu, která se bude používat ke spojení s Gnocchi (např: IP " @@ -257,7 +269,7 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "" "This IP address should be accessible from the clients that will use this " "service, so if you are installing a public cloud, this should be a public IP " @@ -269,13 +281,13 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" msgstr "Název registrované oblasti:" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 #, fuzzy #| msgid "" #| "Openstack can be used using availability zones, with each region " @@ -289,3 +301,14 @@ msgstr "" "Openstack lze využívat pomocí oblastí dostupnosti, přičemž každá oblast " "představuje místo. Zadejte prosím oblast, kterou chcete použít při " "registraci koncového bodu." + +#, fuzzy +#~| msgid "" +#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#~| "auth token." +#~ msgid "" +#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +#~ "authentication token." +#~ msgstr "" +#~ "Aby mohlo glance-api nastavit v Keystone svůj koncový bod, potřebuje " +#~ "autentizační klíč pro Keystone." diff --git a/debian/po/da.po b/debian/po/da.po index a67a0ab0..81b9b36c 100644 --- a/debian/po/da.po +++ b/debian/po/da.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: glance\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: 2014-02-22 12:42+0000\n" "Last-Translator: Joe Hansen \n" "Language-Team: Danish \n" @@ -165,9 +165,14 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 +#, fuzzy +#| msgid "" +#| "Note that you will need to have an up and running Keystone server on " +#| "which to connect using the Keystone authentication token." msgid "" "Note that you will need to have an up and running Keystone server on which " -"to connect using the Keystone authentication token." +"to connect using a known admin project name, admin username and password. " +"The admin auth token is not used anymore." msgstr "" "Bemærk at du skal have en op og kørende Keystoneserver, som du skal forbinde " "til med Keystones godkendelsessymbol." @@ -188,42 +193,55 @@ msgstr "" "Indtast venligst IP-adressen for Keystoneserveren, så at glance-api kan " "kontakte Keystone for at udføre Gnocchitjenesten og slutpunktsoprettelse." -#. Type: password +#. Type: string #. Description #: ../gnocchi-api.templates:4001 -msgid "Keystone authentication token:" +#, fuzzy +#| msgid "Keystone authentication token:" +msgid "Keystone admin name:" msgstr "Godkendelsessymbol for Keystone:" +#. Type: string +#. Description +#. Type: string +#. Description #. Type: password #. Description -#: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "" -#| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#| "authentication token." +#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:6001 msgid "" -"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -"authentication token." +"To register the service endpoint, this package needs to know the Admin " +"login, name, project name, and password to the Keystone server." msgstr "" -"For at konfigurere dets slutpunkt i Keystone, kræver glance-api Keystones " -"godkendelsessymbol." #. Type: string #. Description #: ../gnocchi-api.templates:5001 +msgid "Keystone admin project name:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Keystone admin password:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" msgstr "IP-adresse for Gnochis slutpunkt:" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." msgstr "" "Indtast venligst IP-adressen som vil blive brugt til at kontakte Gnocchi." #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "" "This IP address should be accessible from the clients that will use this " "service, so if you are installing a public cloud, this should be a public IP " @@ -235,13 +253,13 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" msgstr "Navn på regionen der skal registreres:" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "" "OpenStack supports using availability zones, with each region representing a " "location. Please enter the zone that you wish to use when registering the " @@ -250,3 +268,14 @@ msgstr "" "OpenStack understøtter brug af tilgængelighedszoner, hvor hver region " "repræsenterer et sted. Indtast venligst zonen du ønsker at bruge, når " "slutpunktet registreres." + +#, fuzzy +#~| msgid "" +#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#~| "authentication token." +#~ msgid "" +#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +#~ "authentication token." +#~ msgstr "" +#~ "For at konfigurere dets slutpunkt i Keystone, kræver glance-api Keystones " +#~ "godkendelsessymbol." diff --git a/debian/po/de.po b/debian/po/de.po index 147bd249..0d4a16fd 100644 --- a/debian/po/de.po +++ b/debian/po/de.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: glance 2013.2.1-1\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: 2014-01-09 22:51+0100\n" "Last-Translator: Chris Leick \n" "Language-Team: German \n" @@ -180,9 +180,14 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 +#, fuzzy +#| msgid "" +#| "Note that you will need to have an up and running Keystone server on " +#| "which to connect using the Keystone authentication token." msgid "" "Note that you will need to have an up and running Keystone server on which " -"to connect using the Keystone authentication token." +"to connect using a known admin project name, admin username and password. " +"The admin auth token is not used anymore." msgstr "" "Beachten Sie, dass Sie einen gestarteten und laufenden Keystone-Server haben " "müssen, mit dem Sie sich anhand des Keystone-Authentifizierungs-Tokens " @@ -209,29 +214,42 @@ msgstr "" "Keystone kontaktieren kann, um den Glance-Dienst und den Endpunkt zu " "erstellen." -#. Type: password +#. Type: string #. Description #: ../gnocchi-api.templates:4001 -msgid "Keystone authentication token:" +#, fuzzy +#| msgid "Keystone authentication token:" +msgid "Keystone admin name:" msgstr "Keystone-Authentifizierungs-Token:" +#. Type: string +#. Description +#. Type: string +#. Description #. Type: password #. Description -#: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "" -#| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#| "authentication token." +#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:6001 msgid "" -"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -"authentication token." +"To register the service endpoint, this package needs to know the Admin " +"login, name, project name, and password to the Keystone server." msgstr "" -"Glance-API benötigt das Keystone-Authentifizierungs-Token, um seinen " -"Endpunkt in Keystone zu konfigurieren." #. Type: string #. Description #: ../gnocchi-api.templates:5001 +msgid "Keystone admin project name:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Keystone admin password:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:7001 #, fuzzy #| msgid "Glance endpoint IP address:" msgid "Gnocchi endpoint IP address:" @@ -239,7 +257,7 @@ msgstr "IP-Adresse des Glance-Endpunkts" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 #, fuzzy #| msgid "Please enter the IP address that will be used to contact Glance." msgid "Please enter the IP address that will be used to contact Gnocchi." @@ -249,7 +267,7 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "" "This IP address should be accessible from the clients that will use this " "service, so if you are installing a public cloud, this should be a public IP " @@ -261,13 +279,13 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" msgstr "Name der Region, die registriert wird:" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "" "OpenStack supports using availability zones, with each region representing a " "location. Please enter the zone that you wish to use when registering the " @@ -277,6 +295,17 @@ msgstr "" "Region einen Ort repräsentiert. Bitte geben Sie die Zone, die Sie benutzen " "möchten, bei der Registrierung des Endpunkts an." +#, fuzzy +#~| msgid "" +#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#~| "authentication token." +#~ msgid "" +#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +#~ "authentication token." +#~ msgstr "" +#~ "Glance-API benötigt das Keystone-Authentifizierungs-Token, um seinen " +#~ "Endpunkt in Keystone zu konfigurieren." + #~ msgid "keystone" #~ msgstr "Keystone" diff --git a/debian/po/es.po b/debian/po/es.po index 50ed8925..b99bcbaf 100644 --- a/debian/po/es.po +++ b/debian/po/es.po @@ -31,7 +31,7 @@ msgid "" msgstr "" "Project-Id-Version: glance 2012.1-3\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: 2013-10-19 11:01+0200\n" "Last-Translator: Camaleón \n" "Language-Team: Debian Spanish \n" @@ -194,9 +194,14 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 +#, fuzzy +#| msgid "" +#| "Note that you will need to have an up and running Keystone server on " +#| "which to connect using the Keystone authentication token." msgid "" "Note that you will need to have an up and running Keystone server on which " -"to connect using the Keystone authentication token." +"to connect using a known admin project name, admin username and password. " +"The admin auth token is not used anymore." msgstr "" "Tenga en cuenta que necesitará disponer de un servidor Keystone en ejecución " "al que conectarse utilizando el token de autenticación de Keystone." @@ -218,41 +223,55 @@ msgstr "" "contactar con Keystone para realizar el servicio Gnocchi y crear el punto de " "cierre." -#. Type: password +#. Type: string #. Description #: ../gnocchi-api.templates:4001 -msgid "Keystone authentication token:" +#, fuzzy +#| msgid "Keystone authentication token:" +msgid "Keystone admin name:" msgstr "Token de autenticación de Keystone:" +#. Type: string +#. Description +#. Type: string +#. Description #. Type: password #. Description -#: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "" -#| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#| "authentication token." +#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:6001 msgid "" -"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -"authentication token." +"To register the service endpoint, this package needs to know the Admin " +"login, name, project name, and password to the Keystone server." msgstr "" -"Para configurar su punto final en Keystone, glance-api necesita el token de " -"autenticación de Keystone." #. Type: string #. Description #: ../gnocchi-api.templates:5001 +msgid "Keystone admin project name:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Keystone admin password:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" msgstr "Dirección IP del punto de cierre de Gnocchi:" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "Introduzca la dirección IP que se utilizará para contactar con Gnocchi." +msgstr "" +"Introduzca la dirección IP que se utilizará para contactar con Gnocchi." #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "" "This IP address should be accessible from the clients that will use this " "service, so if you are installing a public cloud, this should be a public IP " @@ -264,13 +283,13 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" msgstr "Nombre de la región a registrar:" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "" "OpenStack supports using availability zones, with each region representing a " "location. Please enter the zone that you wish to use when registering the " @@ -279,3 +298,14 @@ msgstr "" "OpenStack puede utilizarse con zonas de disponibilidad, donde cada región " "representa una ubicación. Introduzca la zona que desea utilizar cuando \n" "registre un punto de cierre." + +#, fuzzy +#~| msgid "" +#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#~| "authentication token." +#~ msgid "" +#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +#~ "authentication token." +#~ msgstr "" +#~ "Para configurar su punto final en Keystone, glance-api necesita el token " +#~ "de autenticación de Keystone." diff --git a/debian/po/fr.po b/debian/po/fr.po index cee8809b..0a5bf470 100644 --- a/debian/po/fr.po +++ b/debian/po/fr.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: glance\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: 2013-10-26 18:35+0100\n" "Last-Translator: Julien Patriarca \n" "Language-Team: FRENCH \n" @@ -31,9 +31,9 @@ msgid "" "Typically this is also the hostname of the OpenStack Identity Service " "(Keystone)." msgstr "" -"Veuillez indiquer le nom d'hôte du serveur d'authentification pour " -"Gnocchi. Typiquement c'est également le nom d'hôte du Service " -"d'Identité OpenStack (Keystone)." +"Veuillez indiquer le nom d'hôte du serveur d'authentification pour Gnocchi. " +"Typiquement c'est également le nom d'hôte du Service d'Identité OpenStack " +"(Keystone)." #. Type: string #. Description @@ -103,8 +103,8 @@ msgid "" "No database has been set up for Gnocchi to use. Before continuing, you " "should make sure you have the following information:" msgstr "" -"Aucune base de données n'a été installée pour " -"Gnocchi. Avant de continuer, assurez-vous d'avoir :" +"Aucune base de données n'a été installée pour Gnocchi. Avant de continuer, " +"assurez-vous d'avoir :" #. Type: boolean #. Description @@ -163,9 +163,14 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 +#, fuzzy +#| msgid "" +#| "Note that you will need to have an up and running Keystone server on " +#| "which to connect using the Keystone authentication token." msgid "" "Note that you will need to have an up and running Keystone server on which " -"to connect using the Keystone authentication token." +"to connect using a known admin project name, admin username and password. " +"The admin auth token is not used anymore." msgstr "" "Veuillez noter que vous aurez besoin d'avoir un serveur Keystone fonctionnel " "sur lequel se connecter pour utiliser le jeton d'authentification Keystone." @@ -183,42 +188,59 @@ msgid "" "Please enter the IP address of the Keystone server, so that gnocchi-api can " "contact Keystone to do the Gnocchi service and endpoint creation." msgstr "" -"Veuillez indiquer l'adresse IP du serveur Keystone, pour que l'API de Gnocchi " -"puisse contacter Keystone pour établir le service Glance et créer le point " -"d'accès." +"Veuillez indiquer l'adresse IP du serveur Keystone, pour que l'API de " +"Gnocchi puisse contacter Keystone pour établir le service Glance et créer le " +"point d'accès." -#. Type: password +#. Type: string #. Description #: ../gnocchi-api.templates:4001 -msgid "Keystone authentication token:" +#, fuzzy +#| msgid "Keystone authentication token:" +msgid "Keystone admin name:" msgstr "Jeton d'authentification Keystone : " +#. Type: string +#. Description +#. Type: string +#. Description #. Type: password #. Description -#: ../gnocchi-api.templates:4001 +#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:6001 msgid "" -"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -"authentication token." +"To register the service endpoint, this package needs to know the Admin " +"login, name, project name, and password to the Keystone server." msgstr "" -"Pour configurer son point d'accès dans Keystone, l'API de Gnocchi a besoin du " -"jeton d'authentification Keystone." #. Type: string #. Description #: ../gnocchi-api.templates:5001 +msgid "Keystone admin project name:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Keystone admin password:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" msgstr "Adresse IP du point d'accès Gnocchi : " #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." msgstr "" "Veuillez indiquer l'adresse IP qui sera utilisée pour contacter Gnocchi." #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "" "This IP address should be accessible from the clients that will use this " "service, so if you are installing a public cloud, this should be a public IP " @@ -230,13 +252,13 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" msgstr "Nom de la région à enregistrer : " #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "" "OpenStack supports using availability zones, with each region representing a " "location. Please enter the zone that you wish to use when registering the " @@ -245,3 +267,10 @@ msgstr "" "OpenStack gère l'utilisation de zones disponibles, avec chaque région " "représentant un lieu. Veuillez entrer une zone que vous souhaitez utiliser " "lors de l'enregistrement d'un point d'accès." + +#~ msgid "" +#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +#~ "authentication token." +#~ msgstr "" +#~ "Pour configurer son point d'accès dans Keystone, l'API de Gnocchi a " +#~ "besoin du jeton d'authentification Keystone." diff --git a/debian/po/gl.po b/debian/po/gl.po index 2d623bf9..9df1a0ad 100644 --- a/debian/po/gl.po +++ b/debian/po/gl.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: glance\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: 2012-06-23 12:02+0200\n" "Last-Translator: Jorge Barreiro \n" "Language-Team: Galician \n" @@ -184,7 +184,8 @@ msgstr "" #: ../gnocchi-api.templates:2001 msgid "" "Note that you will need to have an up and running Keystone server on which " -"to connect using the Keystone authentication token." +"to connect using a known admin project name, admin username and password. " +"The admin auth token is not used anymore." msgstr "" #. Type: string @@ -201,35 +202,52 @@ msgid "" "contact Keystone to do the Gnocchi service and endpoint creation." msgstr "" -#. Type: password +#. Type: string #. Description #: ../gnocchi-api.templates:4001 -msgid "Keystone authentication token:" +msgid "Keystone admin name:" msgstr "" +#. Type: string +#. Description +#. Type: string +#. Description #. Type: password #. Description -#: ../gnocchi-api.templates:4001 +#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:6001 msgid "" -"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -"authentication token." +"To register the service endpoint, this package needs to know the Admin " +"login, name, project name, and password to the Keystone server." msgstr "" #. Type: string #. Description #: ../gnocchi-api.templates:5001 +msgid "Keystone admin project name:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Keystone admin password:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "" "This IP address should be accessible from the clients that will use this " "service, so if you are installing a public cloud, this should be a public IP " @@ -238,13 +256,13 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "" "OpenStack supports using availability zones, with each region representing a " "location. Please enter the zone that you wish to use when registering the " diff --git a/debian/po/it.po b/debian/po/it.po index 0740ac73..06e65766 100644 --- a/debian/po/it.po +++ b/debian/po/it.po @@ -6,7 +6,7 @@ msgid "" msgstr "" "Project-Id-Version: glance\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: 2014-04-21 10:03+0200\n" "Last-Translator: Beatrice Torracca \n" "Language-Team: Italian \n" @@ -167,9 +167,14 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 +#, fuzzy +#| msgid "" +#| "Note that you will need to have an up and running Keystone server on " +#| "which to connect using the Keystone authentication token." msgid "" "Note that you will need to have an up and running Keystone server on which " -"to connect using the Keystone authentication token." +"to connect using a known admin project name, admin username and password. " +"The admin auth token is not used anymore." msgstr "" "Notare che sarà necessario avere un server Keystone in funzione a cui " "connettersi usando il token di autenticazione Keystone." @@ -191,41 +196,54 @@ msgstr "" "contattare Keystone per effettuare la creazione del servizio e del punto " "terminale Gnocchi." -#. Type: password +#. Type: string #. Description #: ../gnocchi-api.templates:4001 -msgid "Keystone authentication token:" +#, fuzzy +#| msgid "Keystone authentication token:" +msgid "Keystone admin name:" msgstr "Token di autenticazione Keystone:" +#. Type: string +#. Description +#. Type: string +#. Description #. Type: password #. Description -#: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "" -#| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#| "authentication token." +#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:6001 msgid "" -"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -"authentication token." +"To register the service endpoint, this package needs to know the Admin " +"login, name, project name, and password to the Keystone server." msgstr "" -"Per configurare il proprio punto terminale in Keystone, glance-api ha " -"bisogno del token di autenticazione Keystone." #. Type: string #. Description #: ../gnocchi-api.templates:5001 +msgid "Keystone admin project name:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Keystone admin password:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" msgstr "Indirizzo IP del punto terminale Gnocchi:" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." msgstr "Inserire l'indirizzo IP che verrà usato per contattare Gnocchi." #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "" "This IP address should be accessible from the clients that will use this " "service, so if you are installing a public cloud, this should be a public IP " @@ -237,13 +255,13 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" msgstr "Nome della regione da registrare:" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "" "OpenStack supports using availability zones, with each region representing a " "location. Please enter the zone that you wish to use when registering the " @@ -252,3 +270,14 @@ msgstr "" "OpenStack gestisce le zone di disponibilità, con ogni regione che " "rappresenta una posizione. Inserire la zona che si desidera usare durante la " "registrazione del punto terminale." + +#, fuzzy +#~| msgid "" +#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#~| "authentication token." +#~ msgid "" +#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +#~ "authentication token." +#~ msgstr "" +#~ "Per configurare il proprio punto terminale in Keystone, glance-api ha " +#~ "bisogno del token di autenticazione Keystone." diff --git a/debian/po/ja.po b/debian/po/ja.po index db266ee8..a818bd83 100644 --- a/debian/po/ja.po +++ b/debian/po/ja.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: glance\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: 2012-11-10 23:28+0900\n" "Last-Translator: victory \n" "Language-Team: Japanese \n" @@ -195,7 +195,8 @@ msgstr "" #| "which to connect using the Keystone auth token." msgid "" "Note that you will need to have an up and running Keystone server on which " -"to connect using the Keystone authentication token." +"to connect using a known admin project name, admin username and password. " +"The admin auth token is not used anymore." msgstr "" "Keystone 認証文字列を使って接続する先の Keystone サーバが必要なことに注意して" "ください。" @@ -218,45 +219,56 @@ msgstr "" "Keystone サーバの IP アドレスを入力してください。それにより glance-api は " "Keystone と通信し、Gnocchi サービスや端末の作成ができるようになります。" -#. Type: password +#. Type: string #. Description #: ../gnocchi-api.templates:4001 #, fuzzy #| msgid "Keystone Auth Token:" -msgid "Keystone authentication token:" +msgid "Keystone admin name:" msgstr "Keystone 認証文字列:" +#. Type: string +#. Description +#. Type: string +#. Description #. Type: password #. Description -#: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "" -#| "To configure its endpoint in Keystone, glance-api needs the Keystone auth " -#| "token." +#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:6001 msgid "" -"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -"authentication token." +"To register the service endpoint, this package needs to know the Admin " +"login, name, project name, and password to the Keystone server." msgstr "" -"Keystone で端末を設定するには、glance-api は Keystone 認証文字列を必要としま" -"す。" #. Type: string #. Description #: ../gnocchi-api.templates:5001 +msgid "Keystone admin project name:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Keystone admin password:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" msgstr "Gnocchi 端末の IP アドレス:" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." msgstr "" -"Gnocchi への通信に利用する IP アドレス (例えば Gnocchi 端末の IP アドレス) を入" -"力してください。" +"Gnocchi への通信に利用する IP アドレス (例えば Gnocchi 端末の IP アドレス) を" +"入力してください。" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "" "This IP address should be accessible from the clients that will use this " "service, so if you are installing a public cloud, this should be a public IP " @@ -268,13 +280,13 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" msgstr "登録する領域の名前:" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 #, fuzzy #| msgid "" #| "Openstack can be used using availability zones, with each region " @@ -287,3 +299,14 @@ msgid "" msgstr "" "OpenStack は位置を示す各領域による利用可能区分を利用することができます。端末" "の登録時に利用したい区分を入力してください。" + +#, fuzzy +#~| msgid "" +#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#~| "auth token." +#~ msgid "" +#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +#~ "authentication token." +#~ msgstr "" +#~ "Keystone で端末を設定するには、glance-api は Keystone 認証文字列を必要とし" +#~ "ます。" diff --git a/debian/po/nl.po b/debian/po/nl.po index 0efd1f9d..49d9e65a 100644 --- a/debian/po/nl.po +++ b/debian/po/nl.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: gnocchi_1.3.0-4\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: 2016-01-12 16:31+0100\n" "Last-Translator: Frans Spiesschaert \n" "Language-Team: Debian Dutch l10n Team \n" @@ -132,9 +132,6 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#| msgid "" -#| "You can change this setting later on by running \"dpkg-reconfigure -plow " -#| "glance-common\"." msgid "" "You can change this setting later on by running \"dpkg-reconfigure -plow " "gnocchi-common\"." @@ -163,9 +160,14 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 +#, fuzzy +#| msgid "" +#| "Note that you will need to have an up and running Keystone server on " +#| "which to connect using the Keystone authentication token." msgid "" "Note that you will need to have an up and running Keystone server on which " -"to connect using the Keystone authentication token." +"to connect using a known admin project name, admin username and password. " +"The admin auth token is not used anymore." msgstr "" "Merk op dat u hiervoor een volledig werkende keystone-server nodig heeft, " "waarmee een verbinding gemaakt wordt met behulp van het authenticatiebewijs " @@ -188,34 +190,48 @@ msgstr "" "met Keystone kan verbinden om de Gnocchi-service en het troegangspunt aan te " "maken." -#. Type: password +#. Type: string #. Description #: ../gnocchi-api.templates:4001 -msgid "Keystone authentication token:" +#, fuzzy +#| msgid "Keystone authentication token:" +msgid "Keystone admin name:" msgstr "Authenticatiebewijs voor Keystone:" +#. Type: string +#. Description +#. Type: string +#. Description #. Type: password #. Description -#: ../gnocchi-api.templates:4001 -#| msgid "" -#| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#| "authentication token." +#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:6001 msgid "" -"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -"authentication token." +"To register the service endpoint, this package needs to know the Admin " +"login, name, project name, and password to the Keystone server." msgstr "" -"Om zijn toegangspunt te kunnen aanmaken in Keystone, heeft gnocchi-api het " -"authenticatiebewijs voor Keystone nodig." #. Type: string #. Description #: ../gnocchi-api.templates:5001 +msgid "Keystone admin project name:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Keystone admin password:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" msgstr "IP-adres van het toegangspunt voor Gnocchi:" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." msgstr "" "Gelieve het IP-adres in te voeren dat gebruikt zal worden om contact te " @@ -223,7 +239,7 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "" "This IP address should be accessible from the clients that will use this " "service, so if you are installing a public cloud, this should be a public IP " @@ -235,13 +251,13 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" msgstr "Naam van de te registreren regio:" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "" "OpenStack supports using availability zones, with each region representing a " "location. Please enter the zone that you wish to use when registering the " @@ -250,3 +266,13 @@ msgstr "" "Openstack ondersteunt het gebruik van zones van beschikbaarheid, waarbij " "elke regio een locatie vertegenwoordigt. Geef aan welke zone u wenst te " "gebruiken bij het registreren van het toegangspunt." + +#~| msgid "" +#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#~| "authentication token." +#~ msgid "" +#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +#~ "authentication token." +#~ msgstr "" +#~ "Om zijn toegangspunt te kunnen aanmaken in Keystone, heeft gnocchi-api " +#~ "het authenticatiebewijs voor Keystone nodig." diff --git a/debian/po/pl.po b/debian/po/pl.po index 2b2e8d8f..22fb705b 100644 --- a/debian/po/pl.po +++ b/debian/po/pl.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: glance\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: 2012-06-09 10:11+0200\n" "Last-Translator: Michał Kułach \n" "Language-Team: Polish \n" @@ -185,7 +185,8 @@ msgstr "" #: ../gnocchi-api.templates:2001 msgid "" "Note that you will need to have an up and running Keystone server on which " -"to connect using the Keystone authentication token." +"to connect using a known admin project name, admin username and password. " +"The admin auth token is not used anymore." msgstr "" #. Type: string @@ -202,35 +203,52 @@ msgid "" "contact Keystone to do the Gnocchi service and endpoint creation." msgstr "" -#. Type: password +#. Type: string #. Description #: ../gnocchi-api.templates:4001 -msgid "Keystone authentication token:" +msgid "Keystone admin name:" msgstr "" +#. Type: string +#. Description +#. Type: string +#. Description #. Type: password #. Description -#: ../gnocchi-api.templates:4001 +#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:6001 msgid "" -"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -"authentication token." +"To register the service endpoint, this package needs to know the Admin " +"login, name, project name, and password to the Keystone server." msgstr "" #. Type: string #. Description #: ../gnocchi-api.templates:5001 +msgid "Keystone admin project name:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Keystone admin password:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "" "This IP address should be accessible from the clients that will use this " "service, so if you are installing a public cloud, this should be a public IP " @@ -239,13 +257,13 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "" "OpenStack supports using availability zones, with each region representing a " "location. Please enter the zone that you wish to use when registering the " diff --git a/debian/po/pt.po b/debian/po/pt.po index 367d0291..2905fde1 100644 --- a/debian/po/pt.po +++ b/debian/po/pt.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: glance\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: 2013-10-20 23:43+0100\n" "Last-Translator: Pedro Ribeiro \n" "Language-Team: Potuguese \n" @@ -30,8 +30,8 @@ msgid "" "Typically this is also the hostname of the OpenStack Identity Service " "(Keystone)." msgstr "" -"Indique o nome do seu servidor de autenticação para o Gnocchi. Normalmente, é " -"o nome do seu Serviço de Identidade OpenStack (Keystone)." +"Indique o nome do seu servidor de autenticação para o Gnocchi. Normalmente, " +"é o nome do seu Serviço de Identidade OpenStack (Keystone)." #. Type: string #. Description @@ -166,9 +166,14 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 +#, fuzzy +#| msgid "" +#| "Note that you will need to have an up and running Keystone server on " +#| "which to connect using the Keystone authentication token." msgid "" "Note that you will need to have an up and running Keystone server on which " -"to connect using the Keystone authentication token." +"to connect using a known admin project name, admin username and password. " +"The admin auth token is not used anymore." msgstr "" "Note que irá necessitar de ter um servidor keystone a correr e pronto para " "receber ligações autenticadas com o token de autenticação Keystone." @@ -189,41 +194,54 @@ msgstr "" "Indique o endereço IP do seu servidor keystone, de modo a que o glance-api " "possa contactar o Keystone para criar o serviço e ponto final Gnocchi." -#. Type: password +#. Type: string #. Description #: ../gnocchi-api.templates:4001 -msgid "Keystone authentication token:" +#, fuzzy +#| msgid "Keystone authentication token:" +msgid "Keystone admin name:" msgstr "Token de Autenticação Keystone:" +#. Type: string +#. Description +#. Type: string +#. Description #. Type: password #. Description -#: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "" -#| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#| "authentication token." +#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:6001 msgid "" -"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -"authentication token." +"To register the service endpoint, this package needs to know the Admin " +"login, name, project name, and password to the Keystone server." msgstr "" -"Para configurar o seu ponto final no Keystone, o glance-api precisa do token " -"de autenticação do Keystone." #. Type: string #. Description #: ../gnocchi-api.templates:5001 +msgid "Keystone admin project name:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Keystone admin password:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" msgstr "Endereço IP do ponto final Gnocchi:" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." msgstr "Indique o endereço IP que irá ser usado para contactar o Gnocchi." #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "" "This IP address should be accessible from the clients that will use this " "service, so if you are installing a public cloud, this should be a public IP " @@ -235,13 +253,13 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" msgstr "Nome da região a registar:" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "" "OpenStack supports using availability zones, with each region representing a " "location. Please enter the zone that you wish to use when registering the " @@ -250,3 +268,14 @@ msgstr "" "O Openstack pode ser usado com zonas de disponibilidade, com cada região a " "representar uma localização. Por favor, indique a zona que quer usar ao " "registar um ponto final." + +#, fuzzy +#~| msgid "" +#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#~| "authentication token." +#~ msgid "" +#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +#~ "authentication token." +#~ msgstr "" +#~ "Para configurar o seu ponto final no Keystone, o glance-api precisa do " +#~ "token de autenticação do Keystone." diff --git a/debian/po/pt_BR.po b/debian/po/pt_BR.po index 465caa75..5ca122cf 100644 --- a/debian/po/pt_BR.po +++ b/debian/po/pt_BR.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: glance 2014.1.2-1\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: 2014-09-04 08:49-0300\n" "Last-Translator: Adriano Rafael Gomes \n" "Language-Team: Brazilian Portuguese \n" "Language-Team: Russian \n" @@ -166,9 +166,14 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 +#, fuzzy +#| msgid "" +#| "Note that you will need to have an up and running Keystone server on " +#| "which to connect using the Keystone authentication token." msgid "" "Note that you will need to have an up and running Keystone server on which " -"to connect using the Keystone authentication token." +"to connect using a known admin project name, admin username and password. " +"The admin auth token is not used anymore." msgstr "" "Заметим, что у вас должен быть работающий сервер Keystone, к которому будет " "произведено подключение с помощью токена аутентификации Keystone." @@ -189,40 +194,54 @@ msgstr "" "Введите IP-адрес сервера Keystone для того, чтобы glance-api могла " "подключиться к Keystone для запуска службы Gnocchi и создания конечной точки." -#. Type: password +#. Type: string #. Description #: ../gnocchi-api.templates:4001 -msgid "Keystone authentication token:" +#, fuzzy +#| msgid "Keystone authentication token:" +msgid "Keystone admin name:" msgstr "Токен аутентификации Keystone:" +#. Type: string +#. Description +#. Type: string +#. Description #. Type: password #. Description -#: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "" -#| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#| "authentication token." +#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:6001 msgid "" -"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -"authentication token." +"To register the service endpoint, this package needs to know the Admin " +"login, name, project name, and password to the Keystone server." msgstr "" -"Для настройки собственной конечной точки в Keystone glance-api требуется " -"токен аутентификации Keystone." #. Type: string #. Description #: ../gnocchi-api.templates:5001 +msgid "Keystone admin project name:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Keystone admin password:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" msgstr "IP-адрес конечной точки Gnocchi:" #. Type: string #. Description +#: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." msgstr "Введите IP-адрес, который будет использован для подключения к Gnocchi." #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "" "This IP address should be accessible from the clients that will use this " "service, so if you are installing a public cloud, this should be a public IP " @@ -234,13 +253,13 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" msgstr "Название области для регистрации:" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "" "OpenStack supports using availability zones, with each region representing a " "location. Please enter the zone that you wish to use when registering the " @@ -249,3 +268,14 @@ msgstr "" "Openstack поддерживает разделение на зоны доступности, где каждая область " "представляет определённое расположение. Введите зону, которую вы хотите " "использовать при регистрации конечной точки." + +#, fuzzy +#~| msgid "" +#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#~| "authentication token." +#~ msgid "" +#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +#~ "authentication token." +#~ msgstr "" +#~ "Для настройки собственной конечной точки в Keystone glance-api требуется " +#~ "токен аутентификации Keystone." diff --git a/debian/po/sv.po b/debian/po/sv.po index a48d0eb6..81249530 100644 --- a/debian/po/sv.po +++ b/debian/po/sv.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: glance\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: 2014-01-09 10:35+0100\n" "Last-Translator: Martin Bagge / brother \n" "Language-Team: Swedish \n" @@ -169,9 +169,14 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 +#, fuzzy +#| msgid "" +#| "Note that you will need to have an up and running Keystone server on " +#| "which to connect using the Keystone authentication token." msgid "" "Note that you will need to have an up and running Keystone server on which " -"to connect using the Keystone authentication token." +"to connect using a known admin project name, admin username and password. " +"The admin auth token is not used anymore." msgstr "OBS. Du behöver ha en fungerande keystone-server att ansluta till." #. Type: string @@ -190,41 +195,54 @@ msgstr "" "Ange IP-adressen till din Keystone-server så att glance-api kan kontakta " "Keystone för att lägga till Gnocchi-tjänsten som en ändpunkt." -#. Type: password +#. Type: string #. Description #: ../gnocchi-api.templates:4001 -msgid "Keystone authentication token:" +#, fuzzy +#| msgid "Keystone authentication token:" +msgid "Keystone admin name:" msgstr "Autetiseringsvärde för Keystone:" +#. Type: string +#. Description +#. Type: string +#. Description #. Type: password #. Description -#: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "" -#| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#| "authentication token." +#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:6001 msgid "" -"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -"authentication token." +"To register the service endpoint, this package needs to know the Admin " +"login, name, project name, and password to the Keystone server." msgstr "" -"För att lägga till ändpunkt i Keystone behöver glance-api ett " -"autentiseringsvärde för Keystone." #. Type: string #. Description #: ../gnocchi-api.templates:5001 +msgid "Keystone admin project name:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Keystone admin password:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" msgstr "IP-adress för Gnocchi-ändpunkt:" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." msgstr "Ange den IP-adress som ska användas för att kontakta Gnocchi." #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "" "This IP address should be accessible from the clients that will use this " "service, so if you are installing a public cloud, this should be a public IP " @@ -236,13 +254,13 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" msgstr "Regionnamn:" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "" "OpenStack supports using availability zones, with each region representing a " "location. Please enter the zone that you wish to use when registering the " @@ -250,3 +268,14 @@ msgid "" msgstr "" "OpenStack kan användas med tillgänglighetszoner. Varje region representerar " "en plats. Ange zonen som ska användas när ändpunkten registreras." + +#, fuzzy +#~| msgid "" +#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " +#~| "authentication token." +#~ msgid "" +#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " +#~ "authentication token." +#~ msgstr "" +#~ "För att lägga till ändpunkt i Keystone behöver glance-api ett " +#~ "autentiseringsvärde för Keystone." diff --git a/debian/po/templates.pot b/debian/po/templates.pot index 916d4478..5e60a281 100644 --- a/debian/po/templates.pot +++ b/debian/po/templates.pot @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: gnocchi\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -143,7 +143,8 @@ msgstr "" #: ../gnocchi-api.templates:2001 msgid "" "Note that you will need to have an up and running Keystone server on which " -"to connect using the Keystone authentication token." +"to connect using a known admin project name, admin username and password. " +"The admin auth token is not used anymore." msgstr "" #. Type: string @@ -160,35 +161,52 @@ msgid "" "contact Keystone to do the Gnocchi service and endpoint creation." msgstr "" -#. Type: password +#. Type: string #. Description #: ../gnocchi-api.templates:4001 -msgid "Keystone authentication token:" +msgid "Keystone admin name:" msgstr "" +#. Type: string +#. Description +#. Type: string +#. Description #. Type: password #. Description -#: ../gnocchi-api.templates:4001 +#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:6001 msgid "" -"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -"authentication token." +"To register the service endpoint, this package needs to know the Admin " +"login, name, project name, and password to the Keystone server." msgstr "" #. Type: string #. Description #: ../gnocchi-api.templates:5001 +msgid "Keystone admin project name:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Keystone admin password:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "" "This IP address should be accessible from the clients that will use this " "service, so if you are installing a public cloud, this should be a public IP " @@ -197,13 +215,13 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "" "OpenStack supports using availability zones, with each region representing a " "location. Please enter the zone that you wish to use when registering the " diff --git a/debian/po/zh_CN.po b/debian/po/zh_CN.po index f5c3d51c..0fd9ec5d 100644 --- a/debian/po/zh_CN.po +++ b/debian/po/zh_CN.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: glance\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2015-11-25 09:24+0000\n" +"POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: 2012-08-27 17:14+0800\n" "Last-Translator: ben \n" "Language-Team: LANGUAGE \n" @@ -177,7 +177,8 @@ msgstr "" #: ../gnocchi-api.templates:2001 msgid "" "Note that you will need to have an up and running Keystone server on which " -"to connect using the Keystone authentication token." +"to connect using a known admin project name, admin username and password. " +"The admin auth token is not used anymore." msgstr "" #. Type: string @@ -194,35 +195,52 @@ msgid "" "contact Keystone to do the Gnocchi service and endpoint creation." msgstr "" -#. Type: password +#. Type: string #. Description #: ../gnocchi-api.templates:4001 -msgid "Keystone authentication token:" +msgid "Keystone admin name:" msgstr "" +#. Type: string +#. Description +#. Type: string +#. Description #. Type: password #. Description -#: ../gnocchi-api.templates:4001 +#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:6001 msgid "" -"To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -"authentication token." +"To register the service endpoint, this package needs to know the Admin " +"login, name, project name, and password to the Keystone server." msgstr "" #. Type: string #. Description #: ../gnocchi-api.templates:5001 +msgid "Keystone admin project name:" +msgstr "" + +#. Type: password +#. Description +#: ../gnocchi-api.templates:6001 +msgid "Keystone admin password:" +msgstr "" + +#. Type: string +#. Description +#: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:5001 +#: ../gnocchi-api.templates:7001 msgid "" "This IP address should be accessible from the clients that will use this " "service, so if you are installing a public cloud, this should be a public IP " @@ -231,13 +249,13 @@ msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" msgstr "" #. Type: string #. Description -#: ../gnocchi-api.templates:6001 +#: ../gnocchi-api.templates:8001 msgid "" "OpenStack supports using availability zones, with each region representing a " "location. Please enter the zone that you wish to use when registering the " -- GitLab From 3bdfaeb67e7041a0ea541143bd3cfc697d8ab5d4 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Wed, 30 Mar 2016 09:34:40 -0400 Subject: [PATCH 0142/1483] resample only data affected by new measures currently, we are passing in the full unaggregated timeserie when we update each of the aggregates for each granularity. this is incorrect as the unaggregated timeserie corresponds to the largest granularity of the metric. this means that for smaller granularities, we are updating and resampling points that are already aggregated and not affected by new incoming measures. Change-Id: I687c2a18b332494f5c5cb7fdfe6f2b3d1e8de804 Closes-Bug: #1562820 --- gnocchi/carbonara.py | 14 +++++++------- gnocchi/storage/_carbonara.py | 12 +++++++++++- gnocchi/tests/test_storage.py | 35 ++++++++++++++++++++++++++++------- 3 files changed, 46 insertions(+), 15 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 3bc51996..977443fe 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -162,7 +162,7 @@ class TimeSerie(SerializableMixin): return value.nanos / 10e8 @staticmethod - def _round_timestamp(ts, freq): + def round_timestamp(ts, freq): return pandas.Timestamp( (pandas.Timestamp(ts).value // freq) * freq) @@ -223,8 +223,8 @@ class BoundTimeSerie(TimeSerie): def set_values(self, values, before_truncate_callback=None, ignore_too_old_timestamps=False): + # NOTE: values must be sorted when passed in. if self.block_size is not None and not self.ts.empty: - values = sorted(values, key=operator.itemgetter(0)) first_block_timestamp = self._first_block_timestamp() if ignore_too_old_timestamps: for index, (timestamp, value) in enumerate(values): @@ -268,8 +268,8 @@ class BoundTimeSerie(TimeSerie): return basic def _first_block_timestamp(self): - rounded = self._round_timestamp(self.ts.index[-1], - self.block_size.delta.value) + rounded = self.round_timestamp(self.ts.index[-1], + self.block_size.delta.value) return rounded - (self.block_size * self.back_window) @@ -323,7 +323,7 @@ class AggregatedTimeSerie(TimeSerie): @classmethod def get_split_key_datetime(cls, timestamp, sampling): - return cls._round_timestamp( + return cls.round_timestamp( timestamp, freq=sampling * cls.POINTS_PER_SPLIT * 10e8) @staticmethod @@ -447,7 +447,7 @@ class AggregatedTimeSerie(TimeSerie): # Group by the sampling, and then apply the aggregation method on # the points after `after' groupedby = self.ts[after:].groupby( - functools.partial(self._round_timestamp, + functools.partial(self.round_timestamp, freq=self.sampling * 10e8)) agg_func = getattr(groupedby, self.aggregation_method_func_name) if self.aggregation_method_func_name == 'quantile': @@ -469,7 +469,7 @@ class AggregatedTimeSerie(TimeSerie): if from_timestamp is None: from_ = None else: - from_ = self._round_timestamp(from_timestamp, self.sampling * 10e8) + from_ = self.round_timestamp(from_timestamp, self.sampling * 10e8) points = self[from_:to_timestamp] try: # Do not include stop timestamp diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index e1b33851..72b97119 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -18,6 +18,7 @@ import collections import datetime import logging import multiprocessing +import operator import threading import time import uuid @@ -342,6 +343,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): % metric) continue + measures = sorted(measures, key=operator.itemgetter(0)) try: with timeutils.StopWatch() as sw: raw_measures = ( @@ -380,9 +382,17 @@ class CarbonaraBasedStorage(storage.StorageDriver): back_window=metric.archive_policy.back_window) def _map_add_measures(bound_timeserie): + # NOTE (gordc): bound_timeserie is entire set of + # unaggregated measures matching largest + # granularity. the following takes only the points + # affected by new measures for specific granularity + tstamp = max(bound_timeserie.first, measures[0][0]) self._map_in_thread( self._add_measures, - ((aggregation, d, metric, bound_timeserie) + ((aggregation, d, metric, + carbonara.TimeSerie(bound_timeserie.ts[ + carbonara.TimeSerie.round_timestamp( + tstamp, d.granularity * 10e8):])) for aggregation in agg_methods for d in metric.archive_policy.definition)) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index c7f80eca..ede191d7 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -21,6 +21,7 @@ from oslo_utils import timeutils from oslotest import base import six.moves +from gnocchi import carbonara from gnocchi import storage from gnocchi.storage import _carbonara from gnocchi.storage import null @@ -112,27 +113,47 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual(3661, len(self.storage.get_measures(m))) @mock.patch('gnocchi.carbonara.AggregatedTimeSerie.POINTS_PER_SPLIT', 48) - def test_add_measures_big_update_subset(self): + def test_add_measures_update_subset_split(self): m, m_sql = self._create_metric('medium') measures = [ - storage.Measure(datetime.datetime(2014, 1, i, j, 0, 0), 100) - for i in six.moves.range(1, 6) for j in six.moves.range(0, 24)] - measures.append( - storage.Measure(datetime.datetime(2014, 1, 6, 0, 0, 0), 100)) + storage.Measure(datetime.datetime(2014, 1, 6, i, j, 0), 100) + for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] self.storage.add_measures(m, measures) self.storage.process_background_tasks(self.index, sync=True) + # add measure to end, in same aggregate time as last point. self.storage.add_measures(m, [ - storage.Measure(datetime.datetime(2014, 1, 6, 1, 0, 0), 100)]) + storage.Measure(datetime.datetime(2014, 1, 6, 1, 58, 1), 100)]) with mock.patch.object(self.storage, '_store_metric_measures') as c: + # should only resample last aggregate self.storage.process_background_tasks(self.index, sync=True) count = 0 for call in c.mock_calls: - if mock.call(m_sql, mock.ANY, 'mean', 3600.0, mock.ANY) == call: + # policy is 60 points and split is 48. should only update 2nd half + if mock.call(m_sql, mock.ANY, 'mean', 60.0, mock.ANY) == call: count += 1 self.assertEqual(1, count) + def test_add_measures_update_subset(self): + m, m_sql = self._create_metric('medium') + measures = [ + storage.Measure(datetime.datetime(2014, 1, 6, i, j, 0), 100) + for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] + self.storage.add_measures(m, measures) + self.storage.process_background_tasks(self.index, sync=True) + + # add measure to end, in same aggregate time as last point. + new_point = datetime.datetime(2014, 1, 6, 1, 58, 1) + self.storage.add_measures(m, [storage.Measure(new_point, 100)]) + + with mock.patch.object(self.storage, '_add_measures') as c: + self.storage.process_background_tasks(self.index, sync=True) + for __, args, __ in c.mock_calls: + self.assertEqual( + args[3].first, carbonara.TimeSerie.round_timestamp( + new_point, args[1].granularity * 10e8)) + def test_delete_old_measures(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), -- GitLab From dc36e645e575b640c5b32f230fa67639cb68fbac Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 1 Apr 2016 16:30:11 +0200 Subject: [PATCH 0143/1483] Fix --version string on all command line tools Currently the --version switch is shown in --help, but nothing is returned. Make it happens! Change-Id: I02dc4adcb2599979533bba39fee4b808c958a2e4 --- gnocchi/service.py | 4 +++- requirements.txt | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/gnocchi/service.py b/gnocchi/service.py index c33d2378..e480ba75 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -22,6 +22,7 @@ from oslo_config import cfg from oslo_db import options as db_options from oslo_log import log from oslo_policy import opts as policy_opts +import pbr.version from six.moves.urllib import parse as urlparse from gnocchi import archive_policy @@ -58,7 +59,8 @@ def prepare_service(args=None, conf=None, conf.set_default("workers", default_workers, group="metricd") conf(args, project='gnocchi', validate_default_values=True, - default_config_files=default_config_files) + default_config_files=default_config_files, + version=pbr.version.VersionInfo('gnocchi').version_string()) # If no coordination URL is provided, default to using the indexer as # coordinator diff --git a/requirements.txt b/requirements.txt index cc9667e3..306fd64c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ +pbr numpy oslo.config>=2.6.0 oslo.log>=1.0.0 -- GitLab From 5153164ade5a1197dd77c2cddeea409cf438194b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 31 Mar 2016 09:20:09 +0200 Subject: [PATCH 0144/1483] devstack: Allow to use devstack-plugin-ceph Change-Id: I3337ee883d3c38c8d0b42fb950221b36d0a244fa --- devstack/plugin.sh | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index fb3b5302..e517ebbb 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -65,6 +65,19 @@ function is_gnocchi_enabled { return 1 } +# Test if a Ceph services are enabled +# _is_ceph_enabled +function _is_ceph_enabled { + if is_service_enabled ceph; then + # Old ceph setup + return 0 + elif type is_ceph_enabled_for_service >/dev/null 2>&1; then + # New devstack-plugin-ceph + return 0 + fi + return 1 +} + # create_gnocchi_accounts() - Set up common required gnocchi accounts # Project User Roles @@ -243,7 +256,7 @@ function configure_gnocchi { fi # Configure the storage driver - if is_service_enabled ceph && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then + if _is_ceph_enabled && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then iniset $GNOCCHI_CONF storage driver ceph iniset $GNOCCHI_CONF storage ceph_username ${GNOCCHI_CEPH_USER} iniset $GNOCCHI_CONF storage ceph_secret $(awk '/key/{print $3}' ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring) @@ -457,7 +470,7 @@ if is_service_enabled gnocchi-api; then echo_summary "Configuring Gnocchi" configure_gnocchi create_gnocchi_accounts - if is_service_enabled ceph && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then + if _is_ceph_enabled && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then echo_summary "Configuring Gnocchi for Ceph" configure_ceph_gnocchi fi -- GitLab From bcab579ac3c805ec392afd90afc1f0c6b4bcdadc Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 31 Mar 2016 09:21:03 +0200 Subject: [PATCH 0145/1483] devstack: remove useless ceph permission Change-Id: I4c0619ca7328f38bbec8043fe65551573e0d63f1 --- devstack/plugin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index fb3b5302..c92025e9 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -318,7 +318,7 @@ function configure_ceph_gnocchi { sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GNOCCHI_CEPH_POOL} crush_ruleset ${RULE_ID} fi - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GNOCCHI_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GNOCCHI_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GNOCCHI_CEPH_USER} mon "allow r" osd "allow rwx pool=${GNOCCHI_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring } -- GitLab From 5403db73c42540432fb285da28271bf9dadb74c3 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 25 Mar 2016 08:44:32 +0100 Subject: [PATCH 0146/1483] ceph: make requirements clearer This improves documentation about ceph requirements. And this creates two new extra requirements to inform packagers about rados libs requirements, even this is not really usefull for our virtualenv setup. * ceph-pre-jewel: For when Ceph version is < jewel (10.1.0) * ceph-jewel-and-later: For when Ceph version is >= jewel Change-Id: I75bb096d8ede2e1e0076d22eaeac5eabc1c03ded --- doc/source/configuration.rst | 13 +++++++------ doc/source/install.rst | 17 +++++++++++++++-- gnocchi/storage/ceph.py | 4 ++-- setup.cfg | 4 ++++ 4 files changed, 28 insertions(+), 10 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index a24f141b..456f1810 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -127,8 +127,8 @@ Ceph driver implementation details Each batch of measurements to process is stored into one rados object. These objects are named `measures___` -Also a special empty object called `measures` has the list of measures to -process stored in its xattr attributes. +Also a special empty object called `measure` has the list of measures to +process stored in its omap attributes. Because of the asynchronous nature of how we store measurements in Gnocchi, `gnocchi-metricd` needs to know the list of objects that are waiting to be @@ -139,11 +139,12 @@ processed: - Using a custom format into a rados object, would force us to use a lock each time we would change it. -Instead, the xattrs of one empty rados object are used. No lock is needed to -add/remove a xattr. +Instead, the omaps of one empty rados object are used. No lock is needed to +add/remove a omap attribute. -But depending on the filesystem used by ceph OSDs, this xattrs can have a -limitation in terms of numbers and size if Ceph is not correctly configured. +Also xattrs attributes are used to store the list of aggregations used for a +metric. So depending on the filesystem used by ceph OSDs, xattrs can have +a limitation in terms of numbers and size if Ceph is not correctly configured. See `Ceph extended attributes documentation`_ for more details. Then, each Carbonara generated file is stored in *one* rados object. diff --git a/doc/source/install.rst b/doc/source/install.rst index 9806be40..0d5029bb 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -46,7 +46,9 @@ The list of variants available is: * mysql - provides MySQL indexer support * postgresql – provides PostgreSQL indexer support * swift – provides OpenStack Swift storage support -* ceph – provides Ceph storage support +* ceph – provides common part of Ceph storage support +* ceph-pre-jewel – provides Ceph (<10.1.0) storage support +* ceph-jewel-and-later – provides Ceph (>=10.1.0) storage support * file – provides file driver support * doc – documentation building support * test – unit and functional tests support @@ -59,7 +61,18 @@ procedure:: Again, depending on the drivers and features you want to use, you need to install extra variants using, for example:: - pip install -e .[postgresql,ceph] + pip install -e .[postgresql,ceph,ceph-pre-jewel] + + +Ceph requirements +----------------- + +Gnocchi leverages omap API of librados, but this is available in python binding +only since python-rados >= 9.1.0. To handle this, Gnocchi uses 'cradox' python +library which has exactly the same API but works with Ceph >= 0.80.0. + +If Ceph and python-rados are >= 9.1.0, cradox python library becomes optional +but is still recommended until 10.1.0. Initialization diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 560559dd..63ddb17d 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -71,8 +71,8 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): if not hasattr(rados, 'OmapIterator'): raise ImportError("Your rados python module does not support " - "omap feature. Upgrade 'python-rados' or " - "install 'cradox'") + "omap feature. Install 'cradox' (recommended) " + "or upgrade 'python-rados' >= 9.1.0 ") LOG.info("Ceph storage backend use '%s' python library" % RADOS_MODULE_NAME) diff --git a/setup.cfg b/setup.cfg index 22415e74..8cf94719 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,6 +42,10 @@ ceph = msgpack-python lz4 tooz>=1.30 +ceph-pre-jewel: + cradox>=1.0.9 +ceph-jewel-and-later: + python-rados>=10.1.0 # not available on pypi file = msgpack-python lz4 -- GitLab From c77791ebf3bf963dccd53b8e7fa3facd18e0b270 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Mon, 4 Apr 2016 11:33:27 -0400 Subject: [PATCH 0147/1483] fix resource_type table migration we're incorrectly setting the value of tablename to 'name' rather than the actually name of resource type. this corrects that. additional fixes: - fix incorrect setting nullable=False when it's created as True initially - fix incorrect revision string in create_resource_type Change-Id: Id77239c0e22ac069ebb87fc926a8daaf71ad8177 Closes-Bug: #1565781 --- .../versions/0735ed97e5b3_add_tablename_to_resource_type.py | 4 ++-- .../versions/828c16f70cce_create_resource_type_table.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/gnocchi/indexer/alembic/versions/0735ed97e5b3_add_tablename_to_resource_type.py b/gnocchi/indexer/alembic/versions/0735ed97e5b3_add_tablename_to_resource_type.py index 5827b4cd..8662b114 100644 --- a/gnocchi/indexer/alembic/versions/0735ed97e5b3_add_tablename_to_resource_type.py +++ b/gnocchi/indexer/alembic/versions/0735ed97e5b3_add_tablename_to_resource_type.py @@ -39,14 +39,14 @@ def upgrade(): resource_type = sa.Table( 'resource_type', sa.MetaData(), sa.Column('name', sa.String(255), nullable=False), - sa.Column('tablename', sa.String(18), nullable=False) + sa.Column('tablename', sa.String(18), nullable=True) ) op.execute(resource_type.update().where( resource_type.c.name == "instance_network_interface" ).values({'tablename': op.inline_literal("'instance_net_int'")})) op.execute(resource_type.update().where( resource_type.c.name != "instance_network_interface" - ).values({'tablename': op.inline_literal('name')})) + ).values({'tablename': resource_type.c.name})) op.alter_column("resource_type", "tablename", type_=sa.String(18), nullable=False) diff --git a/gnocchi/indexer/alembic/versions/828c16f70cce_create_resource_type_table.py b/gnocchi/indexer/alembic/versions/828c16f70cce_create_resource_type_table.py index 3cfd499d..c95d2684 100644 --- a/gnocchi/indexer/alembic/versions/828c16f70cce_create_resource_type_table.py +++ b/gnocchi/indexer/alembic/versions/828c16f70cce_create_resource_type_table.py @@ -16,7 +16,7 @@ """create resource_type table Revision ID: 828c16f70cce -Revises: a54c57ada3f5 +Revises: 9901e5ea4b6e Create Date: 2016-01-19 12:47:19.384127 """ -- GitLab From a5963db55046cc2c10e0dcc0be9e376d4c1c8cf2 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 4 Apr 2016 18:05:58 +0200 Subject: [PATCH 0148/1483] devstack: rename UWSGI file I mean, it's in /etc/gnocchi, so we already know for who it is. Change-Id: I704c459cc64e58b3dd02ec089144434de900b452 --- devstack/plugin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index fb3b5302..ed9a3d6d 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -283,7 +283,7 @@ function configure_gnocchi { _config_gnocchi_apache_wsgi elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then # iniset creates these files when it's called if they don't exist. - GNOCCHI_UWSGI_FILE=$GNOCCHI_CONF_DIR/gnocchi-uwsgi.ini + GNOCCHI_UWSGI_FILE=$GNOCCHI_CONF_DIR/uwsgi.ini rm -f "$GNOCCHI_UWSGI_FILE" -- GitLab From 87fbc13885ae1278dcb5bc0e74cf0b39ea3020b2 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Tue, 5 Apr 2016 11:03:47 +0200 Subject: [PATCH 0149/1483] Updated ja.po debconf translation (Closes: #819135). --- debian/changelog | 7 +++ debian/po/ja.po | 143 +++++++++++++++++------------------------------ 2 files changed, 59 insertions(+), 91 deletions(-) diff --git a/debian/changelog b/debian/changelog index 4f78d301..bbef9ccf 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +gnocchi (2.0.2-4) unstable; urgency=medium + + * Uploading to unstable. + * Updated ja.po debconf translation (closes: #819135). + + -- Thomas Goirand Tue, 05 Apr 2016 11:02:48 +0200 + gnocchi (2.0.2-3) experimental; urgency=medium * Added missing (build-)depends: python-lz4. diff --git a/debian/po/ja.po b/debian/po/ja.po index a818bd83..66c8c099 100644 --- a/debian/po/ja.po +++ b/debian/po/ja.po @@ -1,26 +1,27 @@ -# SOME DESCRIPTIVE TITLE. -# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER -# This file is distributed under the same license as the PACKAGE package. -# victory , 2012. -# +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# victory , 2012. +# Takuma Yamada , 2016. +# msgid "" msgstr "" "Project-Id-Version: glance\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" "POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2012-11-10 23:28+0900\n" -"Last-Translator: victory \n" +"PO-Revision-Date: 2016-03-17 10:06+0900\n" +"Last-Translator: Takuma Yamada \n" "Language-Team: Japanese \n" "Language: ja\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Gtranslator 2.91.6\n" #. Type: string #. Description #: ../gnocchi-common.templates:2001 -#, fuzzy -#| msgid "Auth server hostname:" msgid "Authentication server hostname:" msgstr "認証サーバのホスト名:" @@ -32,8 +33,8 @@ msgid "" "Typically this is also the hostname of the OpenStack Identity Service " "(Keystone)." msgstr "" -"Gnocchi 認証サーバの URL を指定してください。これは通常 OpenStack Identity " -"Service (Keystone) の URL にもなります。" +"Gnocchi 用認証サーバのホスト名を指定してください。通常これは OpenStack " +"Identity Service (Keystone) のホスト名と同じです。" #. Type: string #. Description @@ -45,10 +46,8 @@ msgstr "" #. or keep it parenthezised. Example for French: #. locataire ("tenant") #: ../gnocchi-common.templates:3001 -#, fuzzy -#| msgid "Auth server tenant name:" msgid "Authentication server tenant name:" -msgstr "認証サーバの管理用アカウント (tenant) 名" +msgstr "認証サーバのテナント (tenant) 名:" #. Type: string #. Description @@ -61,13 +60,11 @@ msgstr "認証サーバの管理用アカウント (tenant) 名" #. locataire ("tenant") #: ../gnocchi-common.templates:3001 msgid "Please specify the authentication server tenant name." -msgstr "" +msgstr "認証サーバのテナント (tenant) 名を指定してください。" #. Type: string #. Description #: ../gnocchi-common.templates:4001 -#, fuzzy -#| msgid "Auth server username:" msgid "Authentication server username:" msgstr "認証サーバのユーザ名:" @@ -75,13 +72,11 @@ msgstr "認証サーバのユーザ名:" #. Description #: ../gnocchi-common.templates:4001 msgid "Please specify the username to use with the authentication server." -msgstr "" +msgstr "認証サーバで使用するユーザ名を指定してください。" #. Type: password #. Description #: ../gnocchi-common.templates:5001 -#, fuzzy -#| msgid "Auth server password:" msgid "Authentication server password:" msgstr "認証サーバのパスワード:" @@ -89,40 +84,27 @@ msgstr "認証サーバのパスワード:" #. Description #: ../gnocchi-common.templates:5001 msgid "Please specify the password to use with the authentication server." -msgstr "" +msgstr "認証サーバで使用するパスワードを指定してください。" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "Set up a database for glance?" msgid "Set up a database for Gnocchi?" -msgstr "glance 用のデータベースを用意しますか?" +msgstr "Gnocchi 用のデータベースを用意しますか?" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "No database has been set up for glance-registry or glance-api to use. " -#| "Before continuing, you should make sure you have:" msgid "" "No database has been set up for Gnocchi to use. Before continuing, you " "should make sure you have the following information:" msgstr "" -"glance-registry または glance-api で利用するデータベースが用意されていませ" -"ん。続ける前に以下の情報が揃っていることを確認してください:" +"Gnocchi で使用するために設定されたデータベースがありません。続行する前に、以" +"下の情報が揃っていることを確認してください:" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| " - the server host name (that server must allow TCP connections from " -#| "this\n" -#| " machine);\n" -#| " - a username and password to access the database.\n" -#| " - A database type that you want to use." msgid "" " * the type of database that you want to use;\n" " * the database server hostname (that server must allow TCP connections from " @@ -130,84 +112,69 @@ msgid "" " machine);\n" " * a username and password to access the database." msgstr "" -" - サーバのホスト名 (このサーバはこのマシンからの\n" -" TCP 接続を許可しなければなりません)\n" -" - データベースにアクセスするためのユーザ名とパスワード\n" -" - 使いたいデータベースの種類" +" * 使用するデータベースの種類\n" +" * データベースサーバのホスト名 (そのサーバは、このマシンからの\n" +" TCP 接続を許可する必要があります)\n" +" * データベースにアクセスするためのユーザ名とパスワード" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "If some of these requirements are missing, reject this option and run " -#| "with regular sqlite support." msgid "" "If some of these requirements are missing, do not choose this option and run " "with regular SQLite support." msgstr "" -"必要な情報が欠けている場合このオプションを却下して標準の SQLite を利用しま" -"す。" +"これらの要件が欠落している場合は、このオプションを選択しないでください。そし" +"て、標準 SQLite サポートで実行してください。" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "You can change this setting later on by running 'dpkg-reconfigure -plow " -#| "glance-common'." msgid "" "You can change this setting later on by running \"dpkg-reconfigure -plow " "gnocchi-common\"." msgstr "" -"この設定は後で「dpkg-reconfigure -plow glance-common」を実行することにより変" -"更できます。" +"「dpkg-reconfigure -plow gnocchi-common」の実行により、この設定を後で変更する" +"ことができます。" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Gnocchi を Keystone の端末リストに登録しますか?" +msgstr "Gnocchi を Keystone のエンドポイントのカタログに登録しますか?" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 -#, fuzzy -#| msgid "" -#| "Each Openstack services (each API) should be registered in order to be " -#| "accessible. This is done using \"keystone service-create\" and \"keystone " -#| "endpoint-create\". Select if you want to run these commands now." msgid "" "Each OpenStack service (each API) should be registered in order to be " "accessible. This is done using \"keystone service-create\" and \"keystone " "endpoint-create\". This can be done automatically now." msgstr "" -"OpenStack のサービスごと (API ごと) に、アクセスできるようにするため登録すべ" -"きです。「keystone service-create」と「keystone endpoint-create」を使って登録" -"することができます。ここで自動的に行うことができます。" +"各 OpenStack のサービス (各 API ) がアクセスできるようにするために登録する必" +"要があります。これは「keystone service-create」と「keystone endpoint-create」" +"を使用して行われます。これは自動的に行うことができます。" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 #, fuzzy #| msgid "" -#| "Note that you will need to have an up and running keystone server on " -#| "which to connect using the Keystone auth token." +#| "Note that you will need to have an up and running Keystone server on " +#| "which to connect using the Keystone authentication token." msgid "" "Note that you will need to have an up and running Keystone server on which " "to connect using a known admin project name, admin username and password. " "The admin auth token is not used anymore." msgstr "" -"Keystone 認証文字列を使って接続する先の Keystone サーバが必要なことに注意して" -"ください。" +"Keystone 認証トークンを使って接続するには、Keystone サーバの起動および実行が" +"必要になりますので注意してください。" #. Type: string #. Description #: ../gnocchi-api.templates:3001 -#, fuzzy -#| msgid "Keystone IP address:" msgid "Keystone server IP address:" -msgstr "Keystone の IP アドレス:" +msgstr "Keystone サーバの IP アドレス:" #. Type: string #. Description @@ -216,16 +183,17 @@ msgid "" "Please enter the IP address of the Keystone server, so that gnocchi-api can " "contact Keystone to do the Gnocchi service and endpoint creation." msgstr "" -"Keystone サーバの IP アドレスを入力してください。それにより glance-api は " -"Keystone と通信し、Gnocchi サービスや端末の作成ができるようになります。" +"Keystone サーバの IP アドレスを入力してください。その結果 gnocchi-api は、" +"Gnocchi サービスやエンドポイント作成を行うために Keystone へ通信することがで" +"きます。" #. Type: string #. Description #: ../gnocchi-api.templates:4001 #, fuzzy -#| msgid "Keystone Auth Token:" +#| msgid "Keystone authentication token:" msgid "Keystone admin name:" -msgstr "Keystone 認証文字列:" +msgstr "Keystone 認証トークン:" #. Type: string #. Description @@ -256,15 +224,13 @@ msgstr "" #. Description #: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" -msgstr "Gnocchi 端末の IP アドレス:" +msgstr "Gnocchi エンドポイントの IP アドレス:" #. Type: string #. Description #: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "" -"Gnocchi への通信に利用する IP アドレス (例えば Gnocchi 端末の IP アドレス) を" -"入力してください。" +msgstr "Gnocchi の通信に使用される IP アドレスを入力してください。" #. Type: string #. Description @@ -274,33 +240,28 @@ msgid "" "service, so if you are installing a public cloud, this should be a public IP " "address." msgstr "" -"この IP アドレスはこのサービスを利用するクライアントからアクセスできないとい" -"けないので、パブリッククラウドをインストールしている場合、これは公開 IP アド" -"レスを使うようにしてください。" +"この IP アドレスは、このサービスを利用するクライアントからアクセス可能でなけ" +"ればならないので、パブリッククラウドをインストールしている場合、これはパブ" +"リック IP アドレスでなければなりません。" #. Type: string #. Description #: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" -msgstr "登録する領域の名前:" +msgstr "登録するリージョンの名前:" #. Type: string #. Description #: ../gnocchi-api.templates:8001 -#, fuzzy -#| msgid "" -#| "Openstack can be used using availability zones, with each region " -#| "representing a location. Please enter the zone that you wish to use when " -#| "registering the endpoint." msgid "" "OpenStack supports using availability zones, with each region representing a " "location. Please enter the zone that you wish to use when registering the " "endpoint." msgstr "" -"OpenStack は位置を示す各領域による利用可能区分を利用することができます。端末" -"の登録時に利用したい区分を入力してください。" +"OpenStack は、場所を表すリージョン毎に、アベイラビリティーゾーンの使用をサ" +"ポートします。エンドポイントを登録する際に、使用するゾーンを入力してくださ" +"い。" -#, fuzzy #~| msgid "" #~| "To configure its endpoint in Keystone, glance-api needs the Keystone " #~| "auth token." @@ -308,5 +269,5 @@ msgstr "" #~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " #~ "authentication token." #~ msgstr "" -#~ "Keystone で端末を設定するには、glance-api は Keystone 認証文字列を必要とし" -#~ "ます。" +#~ "Keystone でエンドポイントを設定するには、glance-api は Keystone 認証トーク" +#~ "ンを必要とします。" -- GitLab From 14e60418caafc22d814a57220552cac95d153f69 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 4 Apr 2016 20:43:25 +0200 Subject: [PATCH 0150/1483] carbonara: catch LockAcquireFailed exception Starting with tooz 1.21.0, this exception is raised when the lock fail to be acquired. Change-Id: I6c8a85342b4b736e2d10fc1719790fa771c5d6b6 --- gnocchi/storage/_carbonara.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 72b97119..ebd38680 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -324,8 +324,12 @@ class CarbonaraBasedStorage(storage.StorageDriver): # NOTE(jd): We need to lock the metric otherwise we might delete # measures that another worker might be processing. Deleting # measurement files under its feet is not nice! - with self._lock(metric_id)(blocking=sync): - self._delete_unprocessed_measures_for_metric_id(metric_id) + try: + with self._lock(metric_id)(blocking=sync): + self._delete_unprocessed_measures_for_metric_id(metric_id) + except coordination.LockAcquireFailed: + LOG.debug("Cannot acquire lock for metric %s, postponing" + "unprocessed measures deletion" % metric_id) for metric in metrics: lock = self._lock(metric.id) agg_methods = list(metric.archive_policy.aggregation_methods) -- GitLab From 69215f20cf29ad9ac2c86c97efb897b74affaca9 Mon Sep 17 00:00:00 2001 From: Eyal Date: Mon, 4 Apr 2016 14:31:42 +0300 Subject: [PATCH 0151/1483] dict.iteritems() method is not available in py3 Change-Id: Ia46a6d5c9ca38f4497da796274a1dac8f148d044 --- tools/duration_perf_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/duration_perf_test.py b/tools/duration_perf_test.py index 871eb24f..275cb05c 100644 --- a/tools/duration_perf_test.py +++ b/tools/duration_perf_test.py @@ -133,7 +133,7 @@ class PerfTools(object): self.dump_logs() def dump_logs(self): - for name, data in self._timers.iteritems(): + for name, data in self._timers.items(): filepath = "%s_%s.csv" % (self.args.result_path, name) dirpath = os.path.dirname(filepath) if dirpath and not os.path.exists(dirpath): -- GitLab From ffc5852958eb73f89cdc8d589ae5bb4eb5d5eaba Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 5 Apr 2016 14:25:33 +0200 Subject: [PATCH 0152/1483] Use pbr WSGI feature to build gnocchi-api Change-Id: Iad9ed5eb03d5b72c92a32d42c4a6bab24dbc33e9 --- devstack/plugin.sh | 2 +- devstack/settings | 2 +- gnocchi/cli.py | 5 ----- gnocchi/opts.py | 9 --------- gnocchi/rest/app.py | 32 +++----------------------------- gnocchi/service.py | 1 - setup.cfg | 4 +++- 7 files changed, 8 insertions(+), 47 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index c92025e9..0596c682 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -40,7 +40,7 @@ GITREPO["python-gnocchiclient"]=${GNOCCHICLIENT_REPO:-${GIT_BASE}/openstack/pyth if [ -z "$GNOCCHI_DEPLOY" ]; then # Default - GNOCCHI_DEPLOY=werkzeug + GNOCCHI_DEPLOY=simple # Fallback to common wsgi devstack configuration if [ "$ENABLE_HTTPD_MOD_WSGI_SERVICES" == "True" ]; then diff --git a/devstack/settings b/devstack/settings index bdafd158..d8ec0244 100644 --- a/devstack/settings +++ b/devstack/settings @@ -14,7 +14,7 @@ GNOCCHI_DATA_DIR=${GNOCCHI_DATA_DIR:-${DATA_DIR}/gnocchi} # GNOCCHI_DEPLOY defines how Gnocchi is deployed, allowed values: # - mod_wsgi : Run Gnocchi under Apache HTTPd mod_wsgi -# - werkzeug : Run gnocchi-api +# - simple : Run gnocchi-api # - uwsgi : Run Gnocchi under uwsgi # - : Fallback to GNOCCHI_USE_MOD_WSGI or ENABLE_HTTPD_MOD_WSGI_SERVICES GNOCCHI_DEPLOY=${GNOCCHI_DEPLOY} diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 38f7d1b5..96bbab19 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -26,7 +26,6 @@ import six from gnocchi import archive_policy from gnocchi import indexer -from gnocchi.rest import app from gnocchi import service from gnocchi import statsd as statsd_service from gnocchi import storage @@ -67,10 +66,6 @@ def upgrade(): index.create_archive_policy_rule("default", "*", "low") -def api(): - app.build_server() - - def statsd(): statsd_service.start() diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 08c7bdff..7487cc97 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -36,18 +36,9 @@ def list_opts(): cfg.StrOpt('paste_config', default='api-paste.ini', help='Path to API Paste configuration.'), - cfg.PortOpt('port', - default=8041, - help='The port for the Gnocchi API server.'), - cfg.StrOpt('host', - default='0.0.0.0', - help='The listen IP for the Gnocchi API server.'), cfg.BoolOpt('pecan_debug', default=False, help='Toggle Pecan Debug Middleware.'), - cfg.IntOpt('workers', min=1, - help='Number of workers for Gnocchi API server. ' - 'By default the available number of CPU is used.'), cfg.IntOpt('max_limit', default=1000, help=('The maximum number of items returned in a ' diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 587d245c..c323c984 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -22,7 +22,6 @@ from oslo_policy import policy from paste import deploy import pecan import webob.exc -from werkzeug import serving from gnocchi import exceptions from gnocchi import indexer as gnocchi_indexer @@ -112,16 +111,9 @@ def load_app(conf, appname=None, indexer=None, storage=None, def _setup_app(root, conf, indexer, storage, not_implemented_middleware): - # NOTE(sileht): pecan debug won't work in multi-process environment - pecan_debug = conf.api.pecan_debug - if conf.api.workers != 1 and pecan_debug: - pecan_debug = False - LOG.warning('pecan_debug cannot be enabled, if workers is > 1, ' - 'the value is overrided with False') - app = pecan.make_app( root, - debug=pecan_debug, + debug=conf.api.pecan_debug, hooks=(GnocchiHook(storage, indexer, conf),), guess_content_type_from_ext=False, custom_renderers={'json': OsloJSONRenderer}, @@ -133,27 +125,9 @@ def _setup_app(root, conf, indexer, storage, not_implemented_middleware): return app -class WerkzeugApp(object): - # NOTE(sileht): The purpose of this class is only to be used - # with werkzeug to create the app after the werkzeug - # fork gnocchi-api and avoid creation of connection of the - # storage/indexer by the main process. - - def __init__(self, conf): - self.app = None - self.conf = conf - - def __call__(self, environ, start_response): - if self.app is None: - self.app = load_app(conf=self.conf) - return self.app(environ, start_response) - - -def build_server(): +def build_wsgi_app(): conf = service.prepare_service() - serving.run_simple(conf.api.host, conf.api.port, - WerkzeugApp(conf), - processes=conf.api.workers) + return load_app(conf=conf) def app_factory(global_config, **local_conf): diff --git a/gnocchi/service.py b/gnocchi/service.py index c33d2378..4f38b0fe 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -54,7 +54,6 @@ def prepare_service(args=None, conf=None, except NotImplementedError: default_workers = 1 - conf.set_default("workers", default_workers, group="api") conf.set_default("workers", default_workers, group="metricd") conf(args, project='gnocchi', validate_default_values=True, diff --git a/setup.cfg b/setup.cfg index 8cf94719..eaa46621 100644 --- a/setup.cfg +++ b/setup.cfg @@ -108,8 +108,10 @@ gnocchi.indexer = gnocchi.aggregates = moving-average = gnocchi.aggregates.moving_stats:MovingAverage +wsgi_scripts = + gnocchi-api = gnocchi.rest.app:build_wsgi_app + console_scripts = - gnocchi-api = gnocchi.cli:api gnocchi-upgrade = gnocchi.cli:upgrade gnocchi-statsd = gnocchi.cli:statsd gnocchi-metricd = gnocchi.cli:metricd -- GitLab From 04f4594cb365433f15b1c43c0a22af7068a14a2b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 6 Apr 2016 16:14:37 +0200 Subject: [PATCH 0153/1483] Revert "Use pbr WSGI feature to build gnocchi-api" This reverts commit ffc5852958eb73f89cdc8d589ae5bb4eb5d5eaba. This is a very good idea, but we need I37f82e8d78a4288323854282da300c123561218a to be merged first. Change-Id: I014c54c0c27f847bd20eb8961194459745c94f6d --- devstack/plugin.sh | 2 +- devstack/settings | 2 +- gnocchi/cli.py | 5 +++++ gnocchi/opts.py | 9 +++++++++ gnocchi/rest/app.py | 32 +++++++++++++++++++++++++++++--- gnocchi/service.py | 1 + setup.cfg | 4 +--- 7 files changed, 47 insertions(+), 8 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 0596c682..c92025e9 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -40,7 +40,7 @@ GITREPO["python-gnocchiclient"]=${GNOCCHICLIENT_REPO:-${GIT_BASE}/openstack/pyth if [ -z "$GNOCCHI_DEPLOY" ]; then # Default - GNOCCHI_DEPLOY=simple + GNOCCHI_DEPLOY=werkzeug # Fallback to common wsgi devstack configuration if [ "$ENABLE_HTTPD_MOD_WSGI_SERVICES" == "True" ]; then diff --git a/devstack/settings b/devstack/settings index d8ec0244..bdafd158 100644 --- a/devstack/settings +++ b/devstack/settings @@ -14,7 +14,7 @@ GNOCCHI_DATA_DIR=${GNOCCHI_DATA_DIR:-${DATA_DIR}/gnocchi} # GNOCCHI_DEPLOY defines how Gnocchi is deployed, allowed values: # - mod_wsgi : Run Gnocchi under Apache HTTPd mod_wsgi -# - simple : Run gnocchi-api +# - werkzeug : Run gnocchi-api # - uwsgi : Run Gnocchi under uwsgi # - : Fallback to GNOCCHI_USE_MOD_WSGI or ENABLE_HTTPD_MOD_WSGI_SERVICES GNOCCHI_DEPLOY=${GNOCCHI_DEPLOY} diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 96bbab19..38f7d1b5 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -26,6 +26,7 @@ import six from gnocchi import archive_policy from gnocchi import indexer +from gnocchi.rest import app from gnocchi import service from gnocchi import statsd as statsd_service from gnocchi import storage @@ -66,6 +67,10 @@ def upgrade(): index.create_archive_policy_rule("default", "*", "low") +def api(): + app.build_server() + + def statsd(): statsd_service.start() diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 7487cc97..08c7bdff 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -36,9 +36,18 @@ def list_opts(): cfg.StrOpt('paste_config', default='api-paste.ini', help='Path to API Paste configuration.'), + cfg.PortOpt('port', + default=8041, + help='The port for the Gnocchi API server.'), + cfg.StrOpt('host', + default='0.0.0.0', + help='The listen IP for the Gnocchi API server.'), cfg.BoolOpt('pecan_debug', default=False, help='Toggle Pecan Debug Middleware.'), + cfg.IntOpt('workers', min=1, + help='Number of workers for Gnocchi API server. ' + 'By default the available number of CPU is used.'), cfg.IntOpt('max_limit', default=1000, help=('The maximum number of items returned in a ' diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index c323c984..587d245c 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -22,6 +22,7 @@ from oslo_policy import policy from paste import deploy import pecan import webob.exc +from werkzeug import serving from gnocchi import exceptions from gnocchi import indexer as gnocchi_indexer @@ -111,9 +112,16 @@ def load_app(conf, appname=None, indexer=None, storage=None, def _setup_app(root, conf, indexer, storage, not_implemented_middleware): + # NOTE(sileht): pecan debug won't work in multi-process environment + pecan_debug = conf.api.pecan_debug + if conf.api.workers != 1 and pecan_debug: + pecan_debug = False + LOG.warning('pecan_debug cannot be enabled, if workers is > 1, ' + 'the value is overrided with False') + app = pecan.make_app( root, - debug=conf.api.pecan_debug, + debug=pecan_debug, hooks=(GnocchiHook(storage, indexer, conf),), guess_content_type_from_ext=False, custom_renderers={'json': OsloJSONRenderer}, @@ -125,9 +133,27 @@ def _setup_app(root, conf, indexer, storage, not_implemented_middleware): return app -def build_wsgi_app(): +class WerkzeugApp(object): + # NOTE(sileht): The purpose of this class is only to be used + # with werkzeug to create the app after the werkzeug + # fork gnocchi-api and avoid creation of connection of the + # storage/indexer by the main process. + + def __init__(self, conf): + self.app = None + self.conf = conf + + def __call__(self, environ, start_response): + if self.app is None: + self.app = load_app(conf=self.conf) + return self.app(environ, start_response) + + +def build_server(): conf = service.prepare_service() - return load_app(conf=conf) + serving.run_simple(conf.api.host, conf.api.port, + WerkzeugApp(conf), + processes=conf.api.workers) def app_factory(global_config, **local_conf): diff --git a/gnocchi/service.py b/gnocchi/service.py index 20089b36..e480ba75 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -55,6 +55,7 @@ def prepare_service(args=None, conf=None, except NotImplementedError: default_workers = 1 + conf.set_default("workers", default_workers, group="api") conf.set_default("workers", default_workers, group="metricd") conf(args, project='gnocchi', validate_default_values=True, diff --git a/setup.cfg b/setup.cfg index eaa46621..8cf94719 100644 --- a/setup.cfg +++ b/setup.cfg @@ -108,10 +108,8 @@ gnocchi.indexer = gnocchi.aggregates = moving-average = gnocchi.aggregates.moving_stats:MovingAverage -wsgi_scripts = - gnocchi-api = gnocchi.rest.app:build_wsgi_app - console_scripts = + gnocchi-api = gnocchi.cli:api gnocchi-upgrade = gnocchi.cli:upgrade gnocchi-statsd = gnocchi.cli:statsd gnocchi-metricd = gnocchi.cli:metricd -- GitLab From 9b34440950bb4792774b25c9d0e58e7f278a11f8 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 6 Apr 2016 19:45:39 +0200 Subject: [PATCH 0154/1483] ceph: Don't fetch useless omap attributes Change-Id: I5e42a62d18b54721e65f03c1b374017ed07fd5b0 --- gnocchi/storage/ceph.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 63ddb17d..90f14235 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -183,17 +183,13 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): def _list_metric_with_measures_to_process(self, block_size, full=False): with self._get_ioctx() as ioctx: names = self._list_object_names_to_process(ioctx) - metrics = set() if full: objs_it = names else: objs_it = itertools.islice( - names, block_size * self.partition, None) - for name in objs_it: - metrics.add(name.split("_")[1]) - if full is False and len(metrics) >= block_size: - break - return metrics + names, block_size * self.partition, + block_size * (self.partition + 1)) + return set([name.split("_")[1] for name in objs_it]) def _delete_unprocessed_measures_for_metric_id(self, metric_id): with self._get_ioctx() as ctx: -- GitLab From c52e0d6f1bf6f73f4fd0da9fedf8323f4071a908 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 6 Apr 2016 17:59:45 +0200 Subject: [PATCH 0155/1483] carbonara: add a processing speed in debug logs Change-Id: Ia6791718985222bb9d9479e476bcf0ec570497fc --- gnocchi/storage/_carbonara.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index ebd38680..d8d1bbd2 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -385,12 +385,18 @@ class CarbonaraBasedStorage(storage.StorageDriver): block_size=mbs, back_window=metric.archive_policy.back_window) + # NOTE(jd) This is Python where you need such + # hack to pass a variable around a closure, + # sorry. + computed_points = {"number": 0} + def _map_add_measures(bound_timeserie): # NOTE (gordc): bound_timeserie is entire set of # unaggregated measures matching largest # granularity. the following takes only the points # affected by new measures for specific granularity tstamp = max(bound_timeserie.first, measures[0][0]) + computed_points['number'] = len(bound_timeserie) self._map_in_thread( self._add_measures, ((aggregation, d, metric, @@ -405,10 +411,14 @@ class CarbonaraBasedStorage(storage.StorageDriver): measures, before_truncate_callback=_map_add_measures, ignore_too_old_timestamps=True) + elapsed = sw.elapsed() + speed = ((len(agg_methods) + * len(metric.archive_policy.definition) + * computed_points['number']) / elapsed) LOG.debug( "Computed new metric %s with %d new measures " - "in %.2f seconds" - % (metric.id, len(measures), sw.elapsed())) + "in %.2f seconds (%d points/s)" + % (metric.id, len(measures), elapsed, speed)) self._store_unaggregated_timeserie(metric, ts.serialize()) -- GitLab From 1c344f0372e80aa2f23ba0a77686a0161ceab464 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 24 Mar 2016 12:15:01 +0100 Subject: [PATCH 0156/1483] Use pifpaf rather than overtest Change-Id: If5bef40d663c1a95394e6a0ee6c8c4b7703a0412 --- setup-test-env.sh | 11 ++++++++--- setup.cfg | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/setup-test-env.sh b/setup-test-env.sh index dbbf8297..7d7789fa 100755 --- a/setup-test-env.sh +++ b/setup-test-env.sh @@ -1,8 +1,13 @@ #!/bin/bash set -e set -x -# Activate overtest for indexer +# Activate pifpaf for indexer GNOCCHI_TEST_INDEXER_DRIVER=${GNOCCHI_TEST_INDEXER_DRIVER:-postgresql} -source $(which overtest) $GNOCCHI_TEST_INDEXER_DRIVER -export GNOCCHI_INDEXER_URL=${OVERTEST_URL/#mysql:/mysql+pymysql:} +eval `pifpaf run $GNOCCHI_TEST_INDEXER_DRIVER` +kill_pifpaf () +{ + test -n "$PIFPAF_PID" && kill "$PIFPAF_PID" +} +trap kill_pifpaf EXIT +export GNOCCHI_INDEXER_URL=${PIFPAF_URL/#mysql:/mysql+pymysql:} $* diff --git a/setup.cfg b/setup.cfg index 8cf94719..88a05cae 100644 --- a/setup.cfg +++ b/setup.cfg @@ -57,7 +57,7 @@ doc = PyYAML Jinja2 test = - overtest>=0.8.0 + pifpaf gabbi>=0.101.2 coverage>=3.6 fixtures -- GitLab From 0ab4672413b7ea452b374153823b84b53e12be51 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 8 Apr 2016 15:00:20 +0200 Subject: [PATCH 0157/1483] indexer: teach SQL query compiler numeric and string types Change-Id: I25dd86c1b46d07c6438bc45dadbbd7fc4734c94c Closes-Bug: #1567686 --- gnocchi/indexer/sqlalchemy.py | 7 +++++++ gnocchi/tests/test_indexer.py | 5 +++++ 2 files changed, 12 insertions(+) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index f8ecaca4..3a8e7266 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -27,6 +27,7 @@ from oslo_db.sqlalchemy import utils as oslo_db_utils from oslo_log import log import six import sqlalchemy +from sqlalchemy import types import sqlalchemy_utils from gnocchi import exceptions @@ -809,6 +810,12 @@ class QueryTransformer(object): elif (isinstance(attr.type, sqlalchemy_utils.UUIDType) and not isinstance(value, uuid.UUID)): converter = utils.ResourceUUID + elif isinstance(attr.type, types.String): + converter = six.text_type + elif isinstance(attr.type, types.Integer): + converter = int + elif isinstance(attr.type, types.Numeric): + converter = float if converter: try: diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 2f049ded..573b1dc2 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -713,6 +713,11 @@ class TestIndexerDriver(tests_base.TestCase): 'generic', attribute_filter={"=": {"id": "f00bar" * 50}}) + def test_list_resource_instance_flavor_id_numeric(self): + r = self.index.list_resources( + 'instance', attribute_filter={"=": {"flavor_id": 1.0}}) + self.assertEqual(0, len(r)) + def test_list_resource_weird_date(self): self.assertRaises( indexer.QueryValueError, -- GitLab From 91c022626ade35d7220e1bfbd7d19427258bc565 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Sat, 9 Apr 2016 19:22:44 +0200 Subject: [PATCH 0158/1483] Standards-Version is 3.9.8 now (no change) --- debian/changelog | 6 ++++++ debian/control | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index bbef9ccf..076a375c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (2.0.2-5) UNRELEASED; urgency=medium + + * Standards-Version is 3.9.8 now (no change) + + -- Ondřej Nový Sat, 09 Apr 2016 19:22:44 +0200 + gnocchi (2.0.2-4) unstable; urgency=medium * Uploading to unstable. diff --git a/debian/control b/debian/control index 628c5fd5..33057b13 100644 --- a/debian/control +++ b/debian/control @@ -67,7 +67,7 @@ Build-Depends-Indep: alembic (>= 0.7.6), python-yaml, subunit (>= 0.0.18), testrepository, -Standards-Version: 3.9.7 +Standards-Version: 3.9.8 Vcs-Browser: https://anonscm.debian.org/gitweb/?p=openstack/python-gnocchi.git Vcs-Git: https://anonscm.debian.org/git/openstack/python-gnocchi.git Homepage: https://github.com/openstack/gnocchi -- GitLab From be6703bf42f53b70466d73a87b177fda7dc8da15 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 11 Apr 2016 11:03:54 +0200 Subject: [PATCH 0159/1483] carbonara: print out the speed of measures/s Change-Id: Ida3f5c8f07e3f8ab283e4f4dca94703f415bd9c1 --- gnocchi/storage/_carbonara.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index d8d1bbd2..5b5e1a30 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -412,13 +412,18 @@ class CarbonaraBasedStorage(storage.StorageDriver): before_truncate_callback=_map_add_measures, ignore_too_old_timestamps=True) elapsed = sw.elapsed() - speed = ((len(agg_methods) - * len(metric.archive_policy.definition) - * computed_points['number']) / elapsed) + number_of_operations = ( + len(agg_methods) + * len(metric.archive_policy.definition) + ) + speed = ((number_of_operations + * computed_points['number']) / elapsed) LOG.debug( "Computed new metric %s with %d new measures " - "in %.2f seconds (%d points/s)" - % (metric.id, len(measures), elapsed, speed)) + "in %.2f seconds (%d points/s, %d measures/s)" + % (metric.id, len(measures), elapsed, speed, + (number_of_operations * len(measures)) + / elapsed)) self._store_unaggregated_timeserie(metric, ts.serialize()) -- GitLab From f59a1ccbd61d638619deec9e7293cba207aa5d5f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 11 Apr 2016 14:55:30 +0200 Subject: [PATCH 0160/1483] doc/ap: document default archive policies and tweak them Medium default archive policy go for one minute resolution over a day. High was way too big, reduce it a bit. Change-Id: I4ac19fdb9c85a3a14232353b4f2535153a6ded74 --- doc/source/architecture.rst | 29 ++++++++++++++++++++-- gnocchi/archive_policy.py | 12 ++++----- gnocchi/tests/gabbi/gabbits-live/live.yaml | 4 +-- 3 files changed, 35 insertions(+), 10 deletions(-) diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index aa4aee3a..151b0ffa 100644 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -54,7 +54,7 @@ largely more scalable. Ceph also offers better consistency, and hence is the recommended driver. How to plan for Gnocchi’s storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +--------------------------------- Gnocchi uses a custom file format based on its library *Carbonara*. In Gnocchi, a time series is a collection of points, where a point is a given measure, or @@ -84,7 +84,7 @@ the 8 default aggregation methods (mean, min, max, sum, std, median, count, used will go up to a maximum of 8 × 4.5 MiB = 36 MiB. How to set the archive policy and granularity -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +--------------------------------------------- In Gnocchi, the archive policy is expressed in number of points. If your archive policy defines a policy of 10 points with a granularity of 1 second, @@ -109,3 +109,28 @@ policies. A typical low grained use case could be:: This would represent 7205 points × 17.92 = 126 KiB per aggregation method. If you use the 8 standard aggregation method, your metric will take up to 8 × 126 KiB = 0.98 MiB of disk space. + +Default archive policies +------------------------ + +By default, 3 archive policies are created using the default archive policy +list (listed in `default_aggregation_methods`, i.e. mean, min, max, sum, std, +median, count, 95pct): + +- low (maximum estimated size per metric: 5 KiB) + + * 5 minutes granularity over 1 hour + * 1 hour granularity over 1 day + * 1 day granularity over 1 month + +- medium (maximum estimated size per metric: 139 KiB) + + * 1 minute granularity over 1 day + * 1 hour granularity over 1 week + * 1 day granularity over 1 year + +- high (maximum estimated size per metric: 1 578 KiB) + + * 1 second granularity over 1 hour + * 1 minute granularity over 1 week + * 1 hour granularity over 1 year diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py index e9cb723c..79217391 100644 --- a/gnocchi/archive_policy.py +++ b/gnocchi/archive_policy.py @@ -222,8 +222,8 @@ DEFAULT_ARCHIVE_POLICIES = { ), 'medium': ArchivePolicy( "medium", 0, [ - # 1 minute resolution for an hour - ArchivePolicyItem(granularity=60, points=60), + # 1 minute resolution for an day + ArchivePolicyItem(granularity=60, points=60 * 24), # 1 hour resolution for a week ArchivePolicyItem(granularity=3600, points=7 * 24), # 1 day resolution for a year @@ -232,10 +232,10 @@ DEFAULT_ARCHIVE_POLICIES = { ), 'high': ArchivePolicy( "high", 0, [ - # 1 second resolution for a day - ArchivePolicyItem(granularity=1, points=3600 * 24), - # 1 minute resolution for a month - ArchivePolicyItem(granularity=60, points=60 * 24 * 30), + # 1 second resolution for an hour + ArchivePolicyItem(granularity=1, points=3600), + # 1 minute resolution for a week + ArchivePolicyItem(granularity=60, points=60 * 24 * 7), # 1 hour resolution for a year ArchivePolicyItem(granularity=3600, points=365 * 24), ], diff --git a/gnocchi/tests/gabbi/gabbits-live/live.yaml b/gnocchi/tests/gabbi/gabbits-live/live.yaml index bca36cdf..3f4ef010 100644 --- a/gnocchi/tests/gabbi/gabbits-live/live.yaml +++ b/gnocchi/tests/gabbi/gabbits-live/live.yaml @@ -17,9 +17,9 @@ tests: response_headers: content-type: /application/json/ response_strings: - - '{"definition": [{"points": 86400, "timespan": "1 day, 0:00:00", "granularity": "0:00:01"}, {"points": 43200, "timespan": "30 days, 0:00:00", "granularity": "0:01:00"}, {"points": 8760, "timespan": "365 days, 0:00:00", "granularity": "1:00:00"}], "back_window": 0, "name": "high", "aggregation_methods": ["std", "count", "95pct", "min", "max", "sum", "median", "mean"]}' + - '{"definition": [{"points": 3600, "timespan": "1:00:00", "granularity": "0:00:01"}, {"points": 10080, "timespan": "7 days, 0:00:00", "granularity": "0:01:00"}, {"points": 8760, "timespan": "365 days, 0:00:00", "granularity": "1:00:00"}], "back_window": 0, "name": "high", "aggregation_methods": ["std", "count", "95pct", "min", "max", "sum", "median", "mean"]}' - '{"definition": [{"points": 12, "timespan": "1:00:00", "granularity": "0:05:00"}, {"points": 24, "timespan": "1 day, 0:00:00", "granularity": "1:00:00"}, {"points": 30, "timespan": "30 days, 0:00:00", "granularity": "1 day, 0:00:00"}], "back_window": 0, "name": "low", "aggregation_methods": ["std", "count", "95pct", "min", "max", "sum", "median", "mean"]}' - - '{"definition": [{"points": 60, "timespan": "1:00:00", "granularity": "0:01:00"}, {"points": 168, "timespan": "7 days, 0:00:00", "granularity": "1:00:00"}, {"points": 365, "timespan": "365 days, 0:00:00", "granularity": "1 day, 0:00:00"}], "back_window": 0, "name": "medium", "aggregation_methods": ["std", "count", "95pct", "min", "max", "sum", "median", "mean"]}' + - '{"definition": [{"points": 1440, "timespan": "1 day, 0:00:00", "granularity": "0:01:00"}, {"points": 168, "timespan": "7 days, 0:00:00", "granularity": "1:00:00"}, {"points": 365, "timespan": "365 days, 0:00:00", "granularity": "1 day, 0:00:00"}], "back_window": 0, "name": "medium", "aggregation_methods": ["std", "count", "95pct", "min", "max", "sum", "median", "mean"]}' - name: check generic resources with the default one for statsd url: /v1/resource/generic -- GitLab From e1ff53d2df7b8642df395dd42b7f8da58cbeb67b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 5 Apr 2016 09:59:20 +0200 Subject: [PATCH 0161/1483] tests: tempest plugin This change allows to run gabbits-live scenarios with tempest. Change-Id: I518b240c8f589b4b5440a148766cb79bdb216a77 --- devstack/gate/gate_hook.sh | 5 +- devstack/gate/post_test_hook.sh | 19 +++++- gnocchi/tempest/__init__.py | 0 gnocchi/tempest/config.py | 38 ++++++++++++ gnocchi/tempest/plugin.py | 44 ++++++++++++++ gnocchi/tempest/scenario/__init__.py | 87 ++++++++++++++++++++++++++++ setup.cfg | 3 + 7 files changed, 193 insertions(+), 3 deletions(-) create mode 100644 gnocchi/tempest/__init__.py create mode 100644 gnocchi/tempest/config.py create mode 100644 gnocchi/tempest/plugin.py create mode 100644 gnocchi/tempest/scenario/__init__.py diff --git a/devstack/gate/gate_hook.sh b/devstack/gate/gate_hook.sh index 93200920..1d26ca44 100755 --- a/devstack/gate/gate_hook.sh +++ b/devstack/gate/gate_hook.sh @@ -17,7 +17,7 @@ STORAGE_DRIVER="$1" SQL_DRIVER="$2" -ENABLED_SERVICES="key,gnocchi-api,gnocchi-metricd," +ENABLED_SERVICES="key,gnocchi-api,gnocchi-metricd,tempest," # Use efficient wsgi web server DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_DEPLOY=uwsgi' @@ -25,7 +25,8 @@ DEVSTACK_LOCAL_CONFIG+=$'\nexport KEYSTONE_DEPLOY=uwsgi' export DEVSTACK_GATE_INSTALL_TESTONLY=1 export DEVSTACK_GATE_NO_SERVICES=1 -export DEVSTACK_GATE_TEMPEST=0 +export DEVSTACK_GATE_TEMPEST=1 +export DEVSTACK_GATE_TEMPEST_NOTESTS=1 export DEVSTACK_GATE_EXERCISES=0 export KEEP_LOCALRC=1 diff --git a/devstack/gate/post_test_hook.sh b/devstack/gate/post_test_hook.sh index 6e4ab014..6fe8e139 100755 --- a/devstack/gate/post_test_hook.sh +++ b/devstack/gate/post_test_hook.sh @@ -50,7 +50,24 @@ sudo gnocchi-upgrade --create-legacy-resource-types gnocchi metric create sudo -E -H -u stack $GNOCCHI_DIR/tools/measures_injector.py --metrics 1 --batch-of-measures 2 --measures-per-batch 2 -# Run tests +# NOTE(sileht): on swift job permissions are wrong, I don't known why +sudo chown -R tempest:stack $BASE/new/tempest +sudo chown -R tempest:stack $BASE/data/tempest + +# Run tests with tempst +cd $BASE/new/tempest +set +e +sudo -H -u tempest OS_TEST_TIMEOUT=$TEMPEST_OS_TEST_TIMEOUT tox -eall-plugin -- --concurrency=$TEMPEST_CONCURRENCY gnocchi +TEMPEST_EXIT_CODE=$? +set -e +if [[ $TEMPEST_EXIT_CODE != 0 ]]; then + # Collect and parse result + generate_testr_results + exit $TEMPEST_EXIT_CODE +fi + +# Run tests with tox +cd $GNOCCHI_DIR echo "Running gnocchi functional test suite" set +e sudo -E -H -u stack tox -epy27-gate diff --git a/gnocchi/tempest/__init__.py b/gnocchi/tempest/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/gnocchi/tempest/config.py b/gnocchi/tempest/config.py new file mode 100644 index 00000000..54bf8ff9 --- /dev/null +++ b/gnocchi/tempest/config.py @@ -0,0 +1,38 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + + +service_available_group = cfg.OptGroup(name="service_available", + title="Available OpenStack Services") + +service_available_opts = [ + cfg.BoolOpt("gnocchi", + default=True, + help="Whether or not Gnocchi is expected to be available"), +] + +metric_group = cfg.OptGroup(name='metric', + title='Metric Service Options') + +metric_opts = [ + cfg.StrOpt('catalog_type', + default='metric', + help="Catalog type of the Metric service."), + cfg.StrOpt('endpoint_type', + default='publicURL', + choices=['public', 'admin', 'internal', + 'publicURL', 'adminURL', 'internalURL'], + help="The endpoint type to use for the metric service."), +] diff --git a/gnocchi/tempest/plugin.py b/gnocchi/tempest/plugin.py new file mode 100644 index 00000000..b0a9fd82 --- /dev/null +++ b/gnocchi/tempest/plugin.py @@ -0,0 +1,44 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import + +import os + +from tempest import config +from tempest.test_discover import plugins + +import gnocchi +from gnocchi.tempest import config as tempest_config + + +class GnocchiTempestPlugin(plugins.TempestPlugin): + def load_tests(self): + base_path = os.path.split(os.path.dirname( + os.path.abspath(gnocchi.__file__)))[0] + test_dir = "gnocchi/tempest" + full_test_dir = os.path.join(base_path, test_dir) + return full_test_dir, base_path + + def register_opts(self, conf): + config.register_opt_group(conf, + tempest_config.service_available_group, + tempest_config.service_available_opts) + config.register_opt_group(conf, + tempest_config.metric_group, + tempest_config.metric_opts) + + def get_opt_lists(self): + return [(tempest_config.metric_group.name, + tempest_config.metric_opts)] diff --git a/gnocchi/tempest/scenario/__init__.py b/gnocchi/tempest/scenario/__init__.py new file mode 100644 index 00000000..7019963e --- /dev/null +++ b/gnocchi/tempest/scenario/__init__.py @@ -0,0 +1,87 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import + +import os +import unittest + +from gabbi import driver +import six.moves.urllib.parse as urlparse +from tempest import config +import tempest.test + +CONF = config.CONF + + +class GnocchiGabbiTest(tempest.test.BaseTestCase): + credentials = ['admin'] + + @classmethod + def skip_checks(cls): + super(GnocchiGabbiTest, cls).skip_checks() + if not CONF.service_available.gnocchi: + raise cls.skipException("Gnocchi support is required") + + @classmethod + def resource_setup(cls): + super(GnocchiGabbiTest, cls).resource_setup() + url, token = cls._get_gnocchi_auth() + parsed_url = urlparse.urlsplit(url) + prefix = parsed_url.path.rstrip('/') # turn it into a prefix + port = 443 if parsed_url.scheme == 'https' else 80 + host = parsed_url.hostname + if parsed_url.port: + port = parsed_url.port + + test_dir = os.path.join(os.path.dirname(__file__), '..', '..', + 'tests', 'gabbi', 'gabbits-live') + cls.tests = driver.build_tests( + test_dir, unittest.TestLoader(), + host=host, port=port, prefix=prefix, + test_loader_name='tempest.scenario.gnocchi.test') + + os.environ["GNOCCHI_SERVICE_TOKEN"] = token + + @classmethod + def clear_credentials(cls): + # FIXME(sileht): We don't want the token to be invalided, but + # for some obcurs reason, clear_credentials is called before/during run + # So, make the one used by tearDropClass a dump, and call it manually + # in run() + pass + + def run(self, result=None): + self.setUp() + try: + self.tests.run(result) + finally: + super(GnocchiGabbiTest, self).clear_credentials() + self.tearDown() + + @classmethod + def _get_gnocchi_auth(cls): + endpoint_type = CONF.metric.endpoint_type + if not endpoint_type.endswith("URL"): + endpoint_type += "URL" + + auth = cls.os_admin.auth_provider.get_auth() + endpoints = [e for e in auth[1]['serviceCatalog'] + if e['type'] == CONF.metric.catalog_type] + if not endpoints: + raise Exception("%s endpoint not found" % CONF.metric.catalog_type) + return endpoints[0]['endpoints'][0][endpoint_type], auth[0] + + def test_fake(self): + # NOTE(sileht): A fake test is needed to have the class loaded + # by the test runner + pass diff --git a/setup.cfg b/setup.cfg index 88a05cae..02146157 100644 --- a/setup.cfg +++ b/setup.cfg @@ -118,6 +118,9 @@ console_scripts = oslo.config.opts = gnocchi = gnocchi.opts:list_opts +tempest.test_plugins = + gnocchi_tests = gnocchi.tempest.plugin:GnocchiTempestPlugin + [build_sphinx] all_files = 1 build-dir = doc/build -- GitLab From 2d3a357beda3060690ae47aa076a47d3b3c0abb7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 12 Apr 2016 15:13:46 +0200 Subject: [PATCH 0162/1483] Fix dependency from tempest-lib to os-testr We only need os-testr for subunit-trace Change-Id: Id99348d5ce14d59b6693829e9c0c9dcaba70d18a --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 88a05cae..c3421c64 100644 --- a/setup.cfg +++ b/setup.cfg @@ -64,7 +64,7 @@ test = mock oslotest python-subunit>=0.0.18 - tempest-lib>=0.2.0 + os-testr testrepository testscenarios testtools>=0.9.38 -- GitLab From 314b110d93f613e4491b4d5aefc090d3e3fa02fd Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 12 Apr 2016 18:17:12 +0200 Subject: [PATCH 0163/1483] rest: fix typo in metric list filtering for non-admin Thanks Sylvain Afchain! Change-Id: Icda5e64e5937aae95e822c4b3dfbf54228a1df6a --- gnocchi/rest/__init__.py | 2 +- gnocchi/tests/gabbi/gabbits/metric.yaml | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index d9163ffe..4c140e44 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -660,7 +660,7 @@ class MetricsController(rest.RestController): project_id = kwargs.get('project_id') attr_filter = {} if user_id is not None: - attr_filter['creater_by_user_id'] = user_id + attr_filter['created_by_user_id'] = user_id if project_id is not None: attr_filter['created_by_project_id'] = project_id return pecan.request.indexer.list_metrics(**attr_filter) diff --git a/gnocchi/tests/gabbi/gabbits/metric.yaml b/gnocchi/tests/gabbi/gabbits/metric.yaml index cdfc6fce..8ee0b490 100644 --- a/gnocchi/tests/gabbi/gabbits/metric.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric.yaml @@ -203,6 +203,12 @@ tests: value: 43.1 status: 404 + - name: get metric list for authenticated user + request_headers: + x-user-id: foo + x-project-id: bar + url: /v1/metric + - name: get metric list url: /v1/metric status: 200 -- GitLab From d784fa37ddbd3b3bbc8d867190eea1a0c8d025d1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 13 Apr 2016 10:29:15 +0200 Subject: [PATCH 0164/1483] Remove unused requirement PrettyTable Change-Id: Iad3f0d72017c6aa2df4336998803439c2a2c47d5 Signed-off-by: Julien Danjou --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 306fd64c..27e85c2a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,4 +20,3 @@ retrying WebOb>=1.4.1 Paste PasteDeploy -prettytable -- GitLab From 69f7819f1f9fb14abda94204cd97096ac66d8df6 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 13 Apr 2016 10:57:33 +0200 Subject: [PATCH 0165/1483] Don't encode exception message in ascii Closes-bug: #1569749 Change-Id: I7698e4d73a41325fb34998166317b250afd7afd3 --- gnocchi/indexer/__init__.py | 13 ++++++------- gnocchi/tests/gabbi/gabbits-live/live.yaml | 5 +++++ gnocchi/tests/gabbi/gabbits/archive.yaml | 16 ++++++++++++++++ gnocchi/tests/gabbi/gabbits/archive_rule.yaml | 10 ++++++++++ gnocchi/tests/gabbi/gabbits/resource_type.yaml | 9 +++++++++ 5 files changed, 46 insertions(+), 7 deletions(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 761794a4..97cc9eca 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -87,7 +87,7 @@ class NoSuchResourceType(IndexerException): """Error raised when the resource type is unknown.""" def __init__(self, type): super(NoSuchResourceType, self).__init__( - "Resource type %s does not exist" % str(type)) + "Resource type %s does not exist" % type) self.type = type @@ -95,7 +95,7 @@ class NoSuchMetric(IndexerException): """Error raised when a metric does not exist.""" def __init__(self, metric): super(NoSuchMetric, self).__init__("Metric %s does not exist" % - str(metric)) + metric) self.metric = metric @@ -103,7 +103,7 @@ class NoSuchResource(IndexerException): """Error raised when a resource does not exist.""" def __init__(self, resource): super(NoSuchResource, self).__init__("Resource %s does not exist" % - str(resource)) + resource) self.resource = resource @@ -111,8 +111,7 @@ class NoSuchArchivePolicy(IndexerException): """Error raised when an archive policy does not exist.""" def __init__(self, archive_policy): super(NoSuchArchivePolicy, self).__init__( - "Archive policy %s does not exist" % - str(archive_policy)) + "Archive policy %s does not exist" % archive_policy) self.archive_policy = archive_policy @@ -137,7 +136,7 @@ class NoSuchArchivePolicyRule(IndexerException): def __init__(self, archive_policy_rule): super(NoSuchArchivePolicyRule, self).__init__( "Archive policy rule %s does not exist" % - str(archive_policy_rule)) + archive_policy_rule) self.archive_policy_rule = archive_policy_rule @@ -146,7 +145,7 @@ class NoArchivePolicyRuleMatch(IndexerException): def __init__(self, metric_name): super(NoArchivePolicyRuleMatch, self).__init__( "No Archive policy rule found for metric %s" % - str(metric_name)) + metric_name) self.metric_name = metric_name diff --git a/gnocchi/tests/gabbi/gabbits-live/live.yaml b/gnocchi/tests/gabbi/gabbits-live/live.yaml index 3f4ef010..9c77e452 100644 --- a/gnocchi/tests/gabbi/gabbits-live/live.yaml +++ b/gnocchi/tests/gabbi/gabbits-live/live.yaml @@ -56,6 +56,11 @@ tests: method: DELETE status: 204 + - name: delete again unicode archive policy + url: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 + method: DELETE + status: 404 + - name: post instance resource url: /v1/resource/instance method: post diff --git a/gnocchi/tests/gabbi/gabbits/archive.yaml b/gnocchi/tests/gabbi/gabbits/archive.yaml index 36e22b1d..e25bf865 100644 --- a/gnocchi/tests/gabbi/gabbits/archive.yaml +++ b/gnocchi/tests/gabbi/gabbits/archive.yaml @@ -319,6 +319,22 @@ tests: response_strings: - Archive policy grandiose does not exist + - name: delete archive utf8 + url: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 + method: DELETE + request_headers: + x-roles: admin + status: 204 + + - name: delete missing archive utf8 again + url: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 + method: DELETE + request_headers: + x-roles: admin + status: 404 + response_strings: + - Archive policy ✔éñ☃ does not exist + # Add metric using the policy and then be unable to delete policy - name: create metric diff --git a/gnocchi/tests/gabbi/gabbits/archive_rule.yaml b/gnocchi/tests/gabbi/gabbits/archive_rule.yaml index 30323e3a..a0eebdd3 100644 --- a/gnocchi/tests/gabbi/gabbits/archive_rule.yaml +++ b/gnocchi/tests/gabbi/gabbits/archive_rule.yaml @@ -196,3 +196,13 @@ tests: request_headers: x-roles: admin status: 404 + + - name: delete missing archive policy rule utf8 + url: /v1/archive_policy_rule/%E2%9C%94%C3%A9%C3%B1%E2%98%83 + method: DELETE + request_headers: + x-roles: admin + status: 404 + response_strings: + - Archive policy rule ✔éñ☃ does not exist + diff --git a/gnocchi/tests/gabbi/gabbits/resource_type.yaml b/gnocchi/tests/gabbi/gabbits/resource_type.yaml index 66d2729f..7a1bdeb5 100644 --- a/gnocchi/tests/gabbi/gabbits/resource_type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource_type.yaml @@ -324,6 +324,15 @@ tests: url: /v1/resource_type/my_custom_resource status: 404 + - name: delete missing custom resource type utf8 + url: /v1/resource_type/%E2%9C%94%C3%A9%C3%B1%E2%98%83 + method: DELETE + request_headers: + x-roles: admin + status: 404 + response_strings: + - Resource type ✔éñ☃ does not exist + # Can we readd and delete the same resource type again - name: post resource type again -- GitLab From 56d3e670d0c5500549edcb1b8fadb9d4b4273bc5 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 13 Apr 2016 16:41:43 +0200 Subject: [PATCH 0166/1483] doc: rely on oslo-config-generator to generate the config file It should be simpler to ship it in etc and use directly. Change-Id: Ia1c9335d65053f5a8f576e433b0caa07ad9690b6 --- doc/source/configuration.rst | 14 +------------- .../gnocchi/gnocchi-config-generator.conf | 0 gnocchi/genconfig.py | 2 +- tox.ini | 4 ++-- 4 files changed, 4 insertions(+), 16 deletions(-) rename gnocchi-config-generator.conf => etc/gnocchi/gnocchi-config-generator.conf (100%) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 456f1810..2730f713 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -10,19 +10,7 @@ easily created by running: :: - tox -e genconfig - -This command will create an `etc/gnocchi/gnocchi.conf` file which can be used -as a base for the default configuration file at `/etc/gnocchi/gnocchi.conf`. If -you're using _devstack_, this file is already generated and put in place. - -If you installed Gnocchi using pip, you can create a sample `gnocchi.conf` file -using the following commands: - -:: - - curl -O "https://raw.githubusercontent.com/openstack/gnocchi/master/gnocchi-config-generator.conf" - oslo-config-generator --config-file=gnocchi-config-generator.conf --output-file=gnocchi.conf + oslo-config-generator --config-file=/etc/gnocchi/gnocchi-config-generator.conf --output-file=/etc/gnocchi/gnocchi.conf The configuration file should be pretty explicit, but here are some of the base options you want to change and configure: diff --git a/gnocchi-config-generator.conf b/etc/gnocchi/gnocchi-config-generator.conf similarity index 100% rename from gnocchi-config-generator.conf rename to etc/gnocchi/gnocchi-config-generator.conf diff --git a/gnocchi/genconfig.py b/gnocchi/genconfig.py index ec4491e9..84a2feb9 100644 --- a/gnocchi/genconfig.py +++ b/gnocchi/genconfig.py @@ -19,6 +19,6 @@ def prehook(cmd): try: from oslo_config import generator generator.main(['--config-file', - 'gnocchi-config-generator.conf']) + 'etc/gnocchi/gnocchi-config-generator.conf']) except Exception as e: print("Unable to build sample configuration file: %s" % e) diff --git a/tox.ini b/tox.ini index cb85323f..a5153c7f 100644 --- a/tox.ini +++ b/tox.ini @@ -22,7 +22,7 @@ setenv = commands = doc8 --ignore-path doc/source/rest.rst doc/source - oslo-config-generator --config-file=gnocchi-config-generator.conf + oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf {toxinidir}/run-tests.sh {posargs} [testenv:bashate] @@ -63,7 +63,7 @@ show-source = true [testenv:genconfig] deps = .[mysql,postgresql,test,file,ceph,swift] -commands = oslo-config-generator --config-file=gnocchi-config-generator.conf +commands = oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf [testenv:docs] # This does not work, see: https://bitbucket.org/hpk42/tox/issues/302 -- GitLab From c4d7924293628d0814068589c38573af05ac342d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 13 Apr 2016 16:38:08 +0200 Subject: [PATCH 0167/1483] tests: fix a fuzzy test for archive policy rules The test does not run in isolation of other tests, so there's a possibility that there's more than 3 rules. Change-Id: I028fbec01b6a1c33db9fc2fc48ef6ab76c269e2a --- gnocchi/tests/test_indexer.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 573b1dc2..d58a9e1b 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -77,10 +77,17 @@ class TestIndexerDriver(tests_base.TestCase): self.index.create_archive_policy_rule('rule2', 'abc.xyz.*', name) self.index.create_archive_policy_rule('rule3', 'abc.xyz', name) rules = self.index.list_archive_policy_rules() - self.assertEqual(3, len(rules)) - self.assertEqual('abc.xyz.*', rules[0]['metric_pattern']) - self.assertEqual('abc.xyz', rules[1]['metric_pattern']) - self.assertEqual('abc.*', rules[2]['metric_pattern']) + # NOTE(jd) The test is not isolated, there might be more than 3 rules + found = 0 + for r in rules: + if r['metric_pattern'] == 'abc.xyz.*': + found = 1 + if found == 1 and r['metric_pattern'] == 'abc.xyz': + found = 2 + if found == 2 and r['metric_pattern'] == 'abc.*': + break + else: + self.fail("Metric patterns are not ordered") def test_create_metric(self): r1 = uuid.uuid4() -- GitLab From 6a5c2714a6d120c75ed65ae8d8d686ae692f7cb7 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 14 Apr 2016 18:00:59 +0200 Subject: [PATCH 0168/1483] Fix broken ceilometer resources migration script Migration script 828c16f70cce have a bug, the following command doesn't insert anything: op.execute(resource_type.insert().from_select( ['name'], sa.select([resource.c.type]).distinct())) So all migration scripts that manipulates resource_type next, do not update upgrade the resource_type for legacy ceilometer resources. This change fixes that. Change-Id: I5f8c47721c313eea3936132f3140793ec80ef3b1 --- ...beec0b0_migrate_legacy_resources_to_db2.py | 59 +++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py diff --git a/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py b/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py new file mode 100644 index 00000000..c6493600 --- /dev/null +++ b/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py @@ -0,0 +1,59 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""migrate_legacy_resources_to_db2 + +Revision ID: ffc7bbeec0b0 +Revises: 8f376189b9eb +Create Date: 2016-04-14 15:57:13.072128 + +""" +import json + +from alembic import op +import sqlalchemy as sa + +from gnocchi.indexer import sqlalchemy_legacy_resources as legacy + +# revision identifiers, used by Alembic. +revision = 'ffc7bbeec0b0' +down_revision = '8f376189b9eb' +branch_labels = None +depends_on = None + + +def upgrade(): + bind = op.get_bind() + + resource_type = sa.Table( + 'resource_type', sa.MetaData(), + sa.Column('name', sa.String(255), nullable=False), + sa.Column('tablename', sa.String(18), nullable=False), + sa.Column('attributes', sa.Text, nullable=False) + ) + + resource_type_names = [rt.name for rt in + list(bind.execute(resource_type.select()))] + + for name, attributes in legacy.ceilometer_resources.items(): + if name in resource_type_names: + continue + tablename = legacy.ceilometer_tablenames.get(name, name) + text_attributes = json.dumps(attributes) + op.execute(resource_type.insert().values({ + resource_type.c.attributes: text_attributes, + resource_type.c.name: name, + resource_type.c.tablename: tablename, + })) -- GitLab From 7adc938b64124a869be65a97ae2437a4c42d7dc4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 15 Apr 2016 16:41:52 +0200 Subject: [PATCH 0169/1483] doc: remove legacy resource types listing There's no need to document that as we deprecated them and they are anyway disoverable via the new API. Change-Id: Id2fc023e0c2d06302d9861fa16673798debd9afa --- doc/source/index.rst | 1 - doc/source/resource_types.rst | 142 ---------------------------------- 2 files changed, 143 deletions(-) delete mode 100644 doc/source/resource_types.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index 8d288dc6..b4b0549a 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -66,6 +66,5 @@ Documentation rest statsd grafana - resource_types .. _`OpenStack`: http://openstack.org diff --git a/doc/source/resource_types.rst b/doc/source/resource_types.rst deleted file mode 100644 index 17073bf0..00000000 --- a/doc/source/resource_types.rst +++ /dev/null @@ -1,142 +0,0 @@ -================ - Resource Types -================ - -Gnocchi offers different resource types to manage your resources. Each resource -type has strongly typed attributes. All resource types are subtypes of the -`generic` type. - -Immutable attributes are attributes that cannot be modified after the resource -has been created. - - -generic -======= - -+------------+----------------+-----------+ -| Attribute | Type | Immutable | -+============+================+===========+ -| user_id | UUID | Yes | -+------------+----------------+-----------+ -| project_id | UUID | Yes | -+------------+----------------+-----------+ -| started_at | Timestamp | Yes | -+------------+----------------+-----------+ -| ended_at | Timestamp | No | -+------------+----------------+-----------+ -| type | String | Yes | -+------------+----------------+-----------+ -| metrics | {String: UUID} | No | -+------------+----------------+-----------+ - - -ceph_account -============ - -No specific attributes. - - -identity -======== - -No specific attributes. - - -image -===== - -+------------------+---------+-----------+ -| Attribute | Type | Immutable | -+==================+=========+===========+ -| name | String | No | -+------------------+---------+-----------+ -| container_format | String | No | -+------------------+---------+-----------+ -| disk_format | String | No | -+------------------+---------+-----------+ - - -instance -======== - -+--------------+---------+-----------+ -| Attribute | Type | Immutable | -+==============+=========+===========+ -| flavor_id | String | No | -+--------------+---------+-----------+ -| image_ref | String | No | -+--------------+---------+-----------+ -| host | String | No | -+--------------+---------+-----------+ -| display_name | String | No | -+--------------+---------+-----------+ -| server_group | String | No | -+--------------+---------+-----------+ - - -ipmi -==== - -No specific attributes. - - -network -======= - -No specific attributes. - - -stack -===== - -No specific attributes. - - -swift_account -============= - -No specific attributes. - - -volume -====== - -+--------------+---------+-----------+ -| Attribute | Type | Immutable | -+==============+=========+===========+ -| display_name | String | No | -+--------------+---------+-----------+ - - -host -==== - -+--------------+---------+-----------+ -| Attribute | Type | Immutable | -+==============+=========+===========+ -| host_name | String | No | -+--------------+---------+-----------+ - - -host_disk -========= - -+--------------+---------+-----------+ -| Attribute | Type | Immutable | -+==============+=========+===========+ -| host_name | String | No | -+--------------+---------+-----------+ -| device_name | String | No | -+------------------------------------+ - - -host_network_interface -====================== - -+--------------+---------+-----------+ -| Attribute | Type | Immutable | -+==============+=========+===========+ -| host_name | String | No | -+--------------+---------+-----------+ -| device_name | String | No | -+------------------------------------+ -- GitLab From fe3c93f91ccf30bb43bfd346cd78a98f88605ecb Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 15 Apr 2016 16:42:56 +0200 Subject: [PATCH 0170/1483] doc: add resource history in features Change-Id: I270a97ca39f09bbc3287617b0d84147294230054 Signed-off-by: Julien Danjou --- doc/source/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/index.rst b/doc/source/index.rst index 8d288dc6..ab78e766 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -46,6 +46,7 @@ Key Features - Archiving policy - Metric value search - Structured resources +- Resource history - Queryable resource indexer - Multi-tenant - Grafana support -- GitLab From e7340f9ee4813c82f93ce0f0a076725f9e791529 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 15 Apr 2016 08:43:39 +0200 Subject: [PATCH 0171/1483] Fix foreignkey names of host/host_history table The migration a54c57ada3f5_removes_useless_indexes.py doesn't set the foreign key names like sqlalchemy_base.py. This change fixes that. Change-Id: I4d4c71e3462ee5581aab703592c225498102d6cd --- .../ed9c6ddc5c35_fix_host_foreign_key.py | 53 +++++++++++++++++++ .../indexer/sqlalchemy/test_migrations.py | 16 ------ 2 files changed, 53 insertions(+), 16 deletions(-) create mode 100644 gnocchi/indexer/alembic/versions/ed9c6ddc5c35_fix_host_foreign_key.py diff --git a/gnocchi/indexer/alembic/versions/ed9c6ddc5c35_fix_host_foreign_key.py b/gnocchi/indexer/alembic/versions/ed9c6ddc5c35_fix_host_foreign_key.py new file mode 100644 index 00000000..e5cfdd02 --- /dev/null +++ b/gnocchi/indexer/alembic/versions/ed9c6ddc5c35_fix_host_foreign_key.py @@ -0,0 +1,53 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""fix_host_foreign_key + +Revision ID: ed9c6ddc5c35 +Revises: ffc7bbeec0b0 +Create Date: 2016-04-15 06:25:34.649934 + +""" + +from alembic import op +from sqlalchemy import inspect + +# revision identifiers, used by Alembic. +revision = 'ed9c6ddc5c35' +down_revision = 'ffc7bbeec0b0' +branch_labels = None +depends_on = None + + +def upgrade(): + conn = op.get_bind() + + insp = inspect(conn) + fk_names = [fk['name'] for fk in insp.get_foreign_keys('host')] + if ("fk_hypervisor_id_resource_id" not in fk_names and + "fk_host_id_resource_id" in fk_names): + # NOTE(sileht): we are already good, the BD have been created from + # scratch after "a54c57ada3f5" + return + + op.drop_constraint("fk_hypervisor_id_resource_id", "host", + type_="foreignkey") + op.drop_constraint("fk_hypervisor_history_resource_history_revision", + "host_history", type_="foreignkey") + op.create_foreign_key("fk_host_id_resource_id", "host", "resource", + ["id"], ["id"], ondelete="CASCADE") + op.create_foreign_key("fk_host_history_resource_history_revision", + "host_history", "resource_history", + ["revision"], ["revision"], ondelete="CASCADE") diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py index f456394e..63f22f47 100644 --- a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py +++ b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py @@ -17,7 +17,6 @@ import abc import mock from oslo_db.sqlalchemy import test_migrations import six -import sqlalchemy from gnocchi.indexer import sqlalchemy_base from gnocchi.tests import base @@ -48,18 +47,3 @@ class ModelsMigrationsSync( # NOTE(jd) Nothing to do here as setUp() in the base class is already # creating table using upgrade pass - - @staticmethod - def filter_metadata_diff(diff): - new_diff = [] - for line in diff: - item = line[1] - # NOTE(sileht): skip resource types created dynamically - if (isinstance(item, sqlalchemy.Table) - and item.name.startswith("rt_")): - continue - elif (isinstance(item, sqlalchemy.Index) - and item.name.startswith("ix_rt_")): - continue - new_diff.append(line) - return new_diff -- GitLab From 3f71414cbfffd5f14b5a1306234b59d77ac9f9e2 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 13 Apr 2016 11:23:43 +0200 Subject: [PATCH 0172/1483] Reduce length of some foreign keys Mysql have many stuffs limited in size, like tablename, foreign key name, error message. In Gnocchi we use very explicit foreign key name, and we use error message detect foreign key violation (with oslo.db helper). But mysql error message length are limited and message got truncated. Then oslo.db can't translated the error into DBReferenceError exception because the regex don't match. Example of message truncated with old previous foreign key naming: 'Cannot delete or update a parent row: a foreign key constraint fails (`test92ded636cd20454db48bd5ea2634cc3e`.`archive_policy_rule`, CONSTRAINT `fk_archive_policy_rule_archive_policy_name_archive_policy_name` FOREIGN KEY (`archive_policy_name`) REFERENCES `archiv)' Note REFERENCES field is truncated to "`archiv" So this change renames some foreign key name to make them shorter and got more chance to not get the error message truncated with mysql. This also removes the previous limitation of resource_type.tablename col. The tablename can now have the full uuid inside. Change-Id: Ib40918ab325bc5315c762a0fe145553abd14550c --- .../34c517bcc2dd_shorter_foreign_key.py | 91 +++++++++++++++++++ gnocchi/indexer/sqlalchemy.py | 15 ++- gnocchi/indexer/sqlalchemy_base.py | 21 +++-- 3 files changed, 111 insertions(+), 16 deletions(-) create mode 100644 gnocchi/indexer/alembic/versions/34c517bcc2dd_shorter_foreign_key.py diff --git a/gnocchi/indexer/alembic/versions/34c517bcc2dd_shorter_foreign_key.py b/gnocchi/indexer/alembic/versions/34c517bcc2dd_shorter_foreign_key.py new file mode 100644 index 00000000..f7a4a61a --- /dev/null +++ b/gnocchi/indexer/alembic/versions/34c517bcc2dd_shorter_foreign_key.py @@ -0,0 +1,91 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""shorter_foreign_key + +Revision ID: 34c517bcc2dd +Revises: ed9c6ddc5c35 +Create Date: 2016-04-13 16:58:42.536431 + +""" + +from alembic import op +import sqlalchemy + +# revision identifiers, used by Alembic. +revision = '34c517bcc2dd' +down_revision = 'ed9c6ddc5c35' +branch_labels = None +depends_on = None + + +resource_type_helper = sqlalchemy.Table( + 'resource_type', + sqlalchemy.MetaData(), + sqlalchemy.Column('tablename', sqlalchemy.String(18), nullable=False) +) + +to_rename = [ + ('fk_metric_archive_policy_name_archive_policy_name', + 'fk_metric_ap_name_ap_name', + 'archive_policy', 'name', + 'metric', 'archive_policy_name', + "RESTRICT"), + ('fk_resource_history_resource_type_name', + 'fk_rh_resource_type_name', + 'resource_type', 'name', 'resource_history', 'type', + "RESTRICT"), + ('fk_resource_history_id_resource_id', + 'fk_rh_id_resource_id', + 'resource', 'id', 'resource_history', 'id', + "CASCADE"), + ('fk_archive_policy_rule_archive_policy_name_archive_policy_name', + 'fk_apr_ap_name_ap_name', + 'archive_policy', 'name', 'archive_policy_rule', 'archive_policy_name', + "RESTRICT") +] + + +def upgrade(): + connection = op.get_bind() + + insp = sqlalchemy.inspect(connection) + + op.alter_column("resource_type", "tablename", + type_=sqlalchemy.String(35), + existing_type=sqlalchemy.String(18), nullable=False) + + for rt in connection.execute(resource_type_helper.select()): + if rt.tablename == "generic": + continue + + fk_names = [fk['name'] for fk in insp.get_foreign_keys("%s_history" % + rt.tablename)] + fk_old = ("fk_%s_history_resource_history_revision" % + rt.tablename) + if fk_old not in fk_names: + # The table have been created from scratch recently + fk_old = ("fk_%s_history_revision_resource_history_revision" % + rt.tablename) + + fk_new = "fk_%s_h_revision_rh_revision" % rt.tablename + to_rename.append((fk_old, fk_new, 'resource_history', 'revision', + "%s_history" % rt.tablename, 'revision', 'CASCADE')) + + for (fk_old, fk_new, src_table, src_col, dst_table, dst_col, ondelete + ) in to_rename: + op.drop_constraint(fk_old, dst_table, type_="foreignkey") + op.create_foreign_key(fk_new, dst_table, src_table, + [dst_col], [src_col], ondelete=ondelete) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 3a8e7266..76f4e9dc 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -235,9 +235,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): # NOTE(sileht): mysql have a stupid and small length limitation on the # foreign key and index name, so we can't use the resource type name as # tablename, the limit is 64. The longest name we have is - # fk__history_revision_resource_history_revision, - # so 64 - 46 = 18 - tablename = "rt_%s" % uuid.uuid4().hex[:15] + # fk__h_revision_rh_revision, + # so 64 - 26 = 38 and 3 chars for rt_, 35 chars, uuid is 32, it's cool. + tablename = "rt_%s" % uuid.uuid4().hex resource_type = ResourceType(name=resource_type.name, tablename=tablename, attributes=resource_type.attributes) @@ -291,7 +291,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): except exception.DBReferenceError as e: if (e.constraint in [ 'fk_resource_resource_type_name', - 'fk_resource_history_resource_type_name']): + 'fk_rh_resource_type_name']): raise indexer.ResourceTypeInUse(name) raise @@ -318,8 +318,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): ArchivePolicy.name == name).delete() == 0: raise indexer.NoSuchArchivePolicy(name) except exception.DBReferenceError as e: - if (e.constraint == - 'fk_metric_archive_policy_name_archive_policy_name'): + if (e.constraint == "fk_metric_ap_name_ap_name"): raise indexer.ArchivePolicyInUse(name) raise @@ -380,7 +379,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): session.add(m) except exception.DBReferenceError as e: if (e.constraint == - 'fk_metric_archive_policy_name_archive_policy_name'): + 'fk_metric_ap_name_ap_name'): raise indexer.NoSuchArchivePolicy(archive_policy_name) raise return m @@ -547,7 +546,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): raise indexer.NamedMetricAlreadyExists(name) except exception.DBReferenceError as e: if (e.constraint == - 'fk_metric_archive_policy_name_archive_policy_name'): + 'fk_metric_ap_name_ap_name'): raise indexer.NoSuchArchivePolicy(ap_name) raise diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index f59122b1..25115ec4 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -148,7 +148,7 @@ class Metric(Base, GnocchiBase, storage.Metric): sqlalchemy.ForeignKey( 'archive_policy.name', ondelete="RESTRICT", - name="fk_metric_archive_policy_name_archive_policy_name"), + name="fk_metric_ap_name_ap_name"), nullable=False) archive_policy = sqlalchemy.orm.relationship(ArchivePolicy, lazy="joined") created_by_user_id = sqlalchemy.Column( @@ -238,7 +238,7 @@ class ResourceType(Base, GnocchiBase, resource_type.ResourceType): name = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True, nullable=False) - tablename = sqlalchemy.Column(sqlalchemy.String(18), nullable=False) + tablename = sqlalchemy.Column(sqlalchemy.String(35), nullable=False) attributes = sqlalchemy.Column(ResourceTypeAttributes) def to_baseclass(self): @@ -327,7 +327,7 @@ class ResourceHistory(ResourceMixin, Base, GnocchiBase): sqlalchemy.ForeignKey( 'resource.id', ondelete="CASCADE", - name="fk_resource_history_id_resource_id"), + name="fk_rh_id_resource_id"), nullable=False) revision_end = sqlalchemy.Column(PreciseTimestamp, nullable=False, default=lambda: utils.utcnow()) @@ -350,12 +350,15 @@ class ResourceExtMixin(object): @declarative.declared_attr def id(cls): + tablename_compact = cls.__tablename__ + if tablename_compact.endswith("_history"): + tablename_compact = tablename_compact[:-6] return sqlalchemy.Column( sqlalchemy_utils.UUIDType(), sqlalchemy.ForeignKey( 'resource.id', ondelete="CASCADE", - name="fk_%s_id_resource_id" % cls.__tablename__, + name="fk_%s_id_resource_id" % tablename_compact, # NOTE(sileht): We use to ensure that postgresql # does not use AccessExclusiveLock on destination table use_alter=True), @@ -370,13 +373,16 @@ class ResourceHistoryExtMixin(object): @declarative.declared_attr def revision(cls): + tablename_compact = cls.__tablename__ + if tablename_compact.endswith("_history"): + tablename_compact = tablename_compact[:-6] return sqlalchemy.Column( sqlalchemy.Integer, sqlalchemy.ForeignKey( 'resource_history.revision', ondelete="CASCADE", - name="fk_%s_revision_resource_history_revision" - % cls.__tablename__, + name="fk_%s_revision_rh_revision" + % tablename_compact, # NOTE(sileht): We use to ensure that postgresql # does not use AccessExclusiveLock on destination table use_alter=True), @@ -403,7 +409,6 @@ class ArchivePolicyRule(Base, GnocchiBase): sqlalchemy.ForeignKey( 'archive_policy.name', ondelete="RESTRICT", - name="fk_archive_policy_rule_" - "archive_policy_name_archive_policy_name"), + name="fk_apr_ap_name_ap_name"), nullable=False) metric_pattern = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) -- GitLab From c23835f5370c3da26c910f162507b7822b5bf8fb Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 13 Apr 2016 20:43:03 +0200 Subject: [PATCH 0173/1483] Don't delete archive policy used by ap rule Closes-bug: #1569781 Change-Id: Id407eb4c9ef9fe6c1f18e832d0a314ba2448ffbb --- gnocchi/indexer/sqlalchemy.py | 5 ++++- gnocchi/tests/gabbi/gabbits/archive_rule.yaml | 7 +++++++ gnocchi/tests/test_indexer.py | 4 ++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 76f4e9dc..f1de9dac 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -312,13 +312,16 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): return session.query(ArchivePolicy).get(name) def delete_archive_policy(self, name): + constraints = [ + "fk_metric_ap_name_ap_name", + "fk_apr_ap_name_ap_name"] with self.facade.writer() as session: try: if session.query(ArchivePolicy).filter( ArchivePolicy.name == name).delete() == 0: raise indexer.NoSuchArchivePolicy(name) except exception.DBReferenceError as e: - if (e.constraint == "fk_metric_ap_name_ap_name"): + if e.constraint in constraints: raise indexer.ArchivePolicyInUse(name) raise diff --git a/gnocchi/tests/gabbi/gabbits/archive_rule.yaml b/gnocchi/tests/gabbi/gabbits/archive_rule.yaml index a0eebdd3..ea5e2b33 100644 --- a/gnocchi/tests/gabbi/gabbits/archive_rule.yaml +++ b/gnocchi/tests/gabbi/gabbits/archive_rule.yaml @@ -157,6 +157,13 @@ tests: url: /v1/archive_policy_rule/foo status: 404 + - name: delete used archive policy + url: /v1/archive_policy/low + request_headers: + x-roles: admin + method: DELETE + status: 400 + # delete rule as non admin - name: delete archive policy rule non admin diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 573b1dc2..1aa1b56a 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -82,6 +82,10 @@ class TestIndexerDriver(tests_base.TestCase): self.assertEqual('abc.xyz', rules[1]['metric_pattern']) self.assertEqual('abc.*', rules[2]['metric_pattern']) + # Ensure we can't delete the archive policy + self.assertRaises(indexer.ArchivePolicyInUse, + self.index.delete_archive_policy, name) + def test_create_metric(self): r1 = uuid.uuid4() user = str(uuid.uuid4()) -- GitLab From 24786066675392120077dd0e39a723792b4327a2 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 15 Apr 2016 09:31:17 +0200 Subject: [PATCH 0174/1483] Drop useless enum Change-Id: Ic20927210323b1272ab9618d3bec0a7f5808382f --- .../2e0b912062d1_drop_useless_enum.py | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 gnocchi/indexer/alembic/versions/2e0b912062d1_drop_useless_enum.py diff --git a/gnocchi/indexer/alembic/versions/2e0b912062d1_drop_useless_enum.py b/gnocchi/indexer/alembic/versions/2e0b912062d1_drop_useless_enum.py new file mode 100644 index 00000000..5215da09 --- /dev/null +++ b/gnocchi/indexer/alembic/versions/2e0b912062d1_drop_useless_enum.py @@ -0,0 +1,39 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""drop_useless_enum + +Revision ID: 2e0b912062d1 +Revises: 34c517bcc2dd +Create Date: 2016-04-15 07:29:38.492237 + +""" + +from alembic import op + + +# revision identifiers, used by Alembic. +revision = '2e0b912062d1' +down_revision = '34c517bcc2dd' +branch_labels = None +depends_on = None + + +def upgrade(): + bind = op.get_bind() + if bind and bind.engine.name == "postgresql": + # NOTE(sileht): we use IF exists because if the database have + # been created from scratch with 2.1 the enum doesn't exists + op.execute("DROP TYPE IF EXISTS resource_type_enum") -- GitLab From 441d68f14dbfcf6553f99c4c64de6a4abc73c82d Mon Sep 17 00:00:00 2001 From: Yurii Prokulevych Date: Wed, 13 Apr 2016 16:13:06 +0200 Subject: [PATCH 0175/1483] tests: Add more integration tests coverage Depends-On: Id407eb4c9ef9fe6c1f18e832d0a314ba2448ffbb Change-Id: I26eee16527043e5b90d70d48fb83eb982b7a008c --- gnocchi/tests/gabbi/gabbits-live/live.yaml | 657 ++++++++++++++++++++- 1 file changed, 631 insertions(+), 26 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits-live/live.yaml b/gnocchi/tests/gabbi/gabbits-live/live.yaml index 9c77e452..14a74fd1 100644 --- a/gnocchi/tests/gabbi/gabbits-live/live.yaml +++ b/gnocchi/tests/gabbi/gabbits-live/live.yaml @@ -8,33 +8,200 @@ defaults: x-auth-token: $ENVIRON['GNOCCHI_SERVICE_TOKEN'] tests: - - name: check / url: / - - name: check archive policies + # Fail to create archive policy + - name: wrong archive policy content type + desc: attempt to create archive policy with invalid content-type url: /v1/archive_policy - response_headers: - content-type: /application/json/ + method: POST + request_headers: + content-type: text/plain + status: 415 response_strings: - - '{"definition": [{"points": 3600, "timespan": "1:00:00", "granularity": "0:00:01"}, {"points": 10080, "timespan": "7 days, 0:00:00", "granularity": "0:01:00"}, {"points": 8760, "timespan": "365 days, 0:00:00", "granularity": "1:00:00"}], "back_window": 0, "name": "high", "aggregation_methods": ["std", "count", "95pct", "min", "max", "sum", "median", "mean"]}' - - '{"definition": [{"points": 12, "timespan": "1:00:00", "granularity": "0:05:00"}, {"points": 24, "timespan": "1 day, 0:00:00", "granularity": "1:00:00"}, {"points": 30, "timespan": "30 days, 0:00:00", "granularity": "1 day, 0:00:00"}], "back_window": 0, "name": "low", "aggregation_methods": ["std", "count", "95pct", "min", "max", "sum", "median", "mean"]}' - - '{"definition": [{"points": 1440, "timespan": "1 day, 0:00:00", "granularity": "0:01:00"}, {"points": 168, "timespan": "7 days, 0:00:00", "granularity": "1:00:00"}, {"points": 365, "timespan": "365 days, 0:00:00", "granularity": "1 day, 0:00:00"}], "back_window": 0, "name": "medium", "aggregation_methods": ["std", "count", "95pct", "min", "max", "sum", "median", "mean"]}' + - Unsupported Media Type - - name: check generic resources with the default one for statsd - url: /v1/resource/generic + - name: wrong method + desc: attempt to create archive policy with 'PUT' method + url: /v1/archive_policy + method: PUT + request_headers: + content-type: application/json + status: 405 + + - name: invalid authZ + desc: x-auth-token is invalid + url: /v1/archive_policy + method: POST + request_headers: + content-type: application/json + x-auth-token: 'hello' + data: + name: medium + definition: + - granularity: 1 second + status: 401 + + - name: bad archive policy body + desc: archive policy contains invalid key 'cowsay' + url: /v1/archive_policy + method: POST + request_headers: + content-type: application/json + data: + cowsay: moo + status: 400 + response_strings: + - "Invalid input: extra keys not allowed" + + - name: missing definition + desc: archive policy is missing 'definition' keyword + url: /v1/archive_policy + method: POST + request_headers: + content-type: application/json + data: + name: medium + status: 400 + response_strings: + - "Invalid input: required key not provided" + + - name: empty definition + desc: empty definition for archive policy + url: /v1/archive_policy + method: POST + request_headers: + content-type: application/json + data: + name: medium + definition: [] + status: 400 + response_strings: + - "Invalid input: length of value must be at least 1" + + - name: wrong value definition + desc: invalid type of 'definition' key + url: /v1/archive_policy + method: POST + request_headers: + content-type: application/json + data: + name: somename + definition: foobar + status: 400 + response_strings: + - "Invalid input: expected a list" + + - name: useless definition + desc: invalid archive policy definition + url: /v1/archive_policy + method: POST + request_headers: + content-type: application/json + data: + name: medium + definition: + - cowsay: moo + status: 400 + response_strings: + - "Invalid input: extra keys not allowed" + + # + # Create archive policy + # + + - name: create archive policy + desc: create archve policy 'gabbilive' for live tests + url: /v1/archive_policy + method: POST + request_headers: + content-type: application/json + data: + name: gabbilive + back_window: 0 + definition: + - granularity: 1 second + points: 60 + - granularity: 2 second + timespan: 1 minute + - points: 5 + timespan: 5 minute + aggregation_methods: + - mean + - min + - max + response_headers: + location: $SCHEME://$NETLOC/v1/archive_policy/gabbilive + status: 201 + + # Retrieve it correctly and then poorly + + - name: get archive policy + desc: retrieve archive policy 'gabbilive' and asster its values + url: $LOCATION response_headers: content-type: /application/json/ response_json_paths: - $[0].type: generic - $.`len`: 1 + $.name: gabbilive + $.back_window: 0 + $.definition[0].granularity: "0:00:01" + $.definition[0].points: 60 + $.definition[0].timespan: "0:01:00" + $.definition[1].granularity: "0:00:02" + $.definition[1].points: 30 + $.definition[1].timespan: "0:01:00" + $.definition[2].granularity: "0:01:00" + $.definition[2].points: 5 + $.definition[2].timespan: "0:05:00" + response_strings: + '"aggregation_methods": ["max", "min", "mean"]' + + - name: get wrong accept + desc: invalid 'accept' header + url: /v1/archive_policy/medium + request_headers: + accept: text/plain + status: 406 + + # Unexpected methods - - name: post unicode archive policy + - name: post single archive + desc: unexpected 'POST' request to archive policy + url: /v1/archive_policy/gabbilive + method: POST + status: 405 + + - name: put single archive + desc: unexpected 'PUT' request to archive policy + url: /v1/archive_policy/gabbilive + method: PUT + status: 405 + + # Duplicated archive policy names ain't allowed + + - name: create duplicate archive policy + desc: create archve policy 'gabbilive' for live tests + url: /v1/archive_policy + method: POST + request_headers: + content-type: application/json + data: + name: gabbilive + definition: + - granularity: 30 second + points: 60 + status: 409 + response_strings: + - Archive policy gabbilive already exists + + # Create a unicode named policy + + - name: post unicode policy name url: /v1/archive_policy method: POST request_headers: content-type: application/json - x-roles: admin data: name: ✔éñ☃ definition: @@ -46,41 +213,428 @@ tests: response_json_paths: name: ✔éñ☃ - - name: get unicode archive policy - url: $LOCATION + - name: retrieve unicode policy name + url: $LOCATION response_json_paths: - $.name: ✔éñ☃ + name: ✔éñ☃ - name: delete unicode archive policy url: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 method: DELETE status: 204 - - name: delete again unicode archive policy + # It really is gone + + - name: confirm delete + desc: assert deleted unicode policy is not available + method: GET url: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 + status: 404 + + # Fail to delete one that does not exist + + - name: delete missing archive + desc: delete non-existint archive policy + url: /v1/archive_policy/grandiose + method: DELETE + status: 404 + response_strings: + - Archive policy grandiose does not exist + + # Attempt to create illogical policies + + - name: create illogical policy + url: /v1/archive_policy + method: POST + request_headers: + content-type: application/json + data: + name: complex + definition: + - granularity: 1 second + points: 60 + timespan: "0:01:01" + status: 400 + response_strings: + - timespan ≠ granularity × points + + - name: create identical granularities policy + url: /v1/archive_policy + method: POST + request_headers: + content-type: application/json + data: + name: complex + definition: + - granularity: 1 second + points: 60 + - granularity: 1 second + points: 120 + status: 400 + response_strings: + - "More than one archive policy uses granularity `1.0'" + + - name: policy invalid unit + desc: invalid unit for archive policy 'timespan' key + url: /v1/archive_policy + method: POST + request_headers: + content-type: application/json + data: + name: 227d0e1f-4295-4e4b-8515-c296c47d71d3 + definition: + - granularity: 1 second + timespan: "1 shenanigan" + status: 400 + + # + # Archive policy rules + # + + - name: create archive policy rule1 + url: /v1/archive_policy_rule + method: POST + request_headers: + content-type: application/json + data: + name: gabbilive_rule + metric_pattern: "live.*" + archive_policy_name: gabbilive + status: 201 + response_json_paths: + $.metric_pattern: "live.*" + $.archive_policy_name: gabbilive + $.name: gabbilive_rule + + - name: create invalid archive policy rule + url: /v1/archive_policy_rule + method: POST + request_headers: + content-type: application/json + data: + name: test_rule + metric_pattern: "disk.foo.*" + status: 400 + + - name: missing auth archive policy rule + url: /v1/archive_policy_rule + method: POST + request_headers: + content-type: application/json + x-auth-token: 'hello' + data: + name: test_rule + metric_pattern: "disk.foo.*" + archive_policy_name: low + status: 401 + + - name: wrong archive policy rule content type + url: /v1/archive_policy_rule + method: POST + request_headers: + content-type: text/plain + status: 415 + response_strings: + - Unsupported Media Type + + - name: bad archive policy rule body + url: /v1/archive_policy_rule + method: POST + request_headers: + content-type: application/json + data: + whaa: foobar + status: 400 + response_strings: + - "Invalid input: extra keys not allowed" + + # get an archive policy rules + + - name: get all archive policy rules + url: /v1/archive_policy_rule + status: 200 + response_strings: + '"metric_pattern": "live.*", "archive_policy_name": "gabbilive", "name": "gabbilive_rule"' + + - name: get unknown archive policy rule + url: /v1/archive_policy_rule/foo + status: 404 + + + - name: get archive policy rule + url: /v1/archive_policy_rule/gabbilive_rule + status: 200 + response_json_paths: + $.metric_pattern: "live.*" + $.archive_policy_name: "gabbilive" + $.name: "gabbilive_rule" + + - name: delete archive policy in use + desc: fails due to https://bugs.launchpad.net/gnocchi/+bug/1569781 + url: /v1/archive_policy/gabbilive + method: DELETE + status: 400 + + # + # Metrics + # + + + - name: get all metrics + url: /v1/metric + status: 200 + + - name: create metric with name and rule + url: /v1/metric + request_headers: + content-type: application/json + method: post + data: + name: "live.io.rate" + status: 201 + response_json_paths: + $.archive_policy_name: gabbilive + $.name: live.io.rate + + - name: delete metric + url: $LOCATION + method: DELETE + status: 204 + + - name: create metric with name and policy + url: /v1/metric + request_headers: + content-type: application/json + method: post + data: + name: "aagabbi.live.metric" + archive_policy_name: "gabbilive" + status: 201 + response_json_paths: + $.archive_policy_name: gabbilive + $.name: "aagabbi.live.metric" + + - name: get valid metric id + url: $LOCATION + status: 200 + response_json_paths: + $.archive_policy.name: gabbilive + + - name: delete the metric + url: /v1/metric/$RESPONSE['$.id'] + method: DELETE + status: 204 + + - name: create metric bad archive policy + url: /v1/metric + method: POST + request_headers: + content-type: application/json + data: + archive_policy_name: 2e2675aa-105e-4664-a30d-c407e6a0ea7f + status: 400 + response_strings: + - Archive policy 2e2675aa-105e-4664-a30d-c407e6a0ea7f does not exist + + - name: create metric bad content-type + url: /v1/metric + method: POST + request_headers: + content-type: plain/text + data: + archive_policy_name: cookies + status: 415 + + + # + # Cleanup + # + + - name: delete archive policy rule + url: /v1/archive_policy_rule/gabbilive_rule + method: DELETE + status: 204 + + - name: confirm delete archive policy rule + url: /v1/archive_policy_rule/gabbilive_rule method: DELETE status: 404 + + # + # Resources section + # + + - name: root of resource + url: /v1/resource + response_json_paths: + $.volume: $SCHEME://$NETLOC/v1/resource/volume + + - name: typo of resource + url: /v1/resoue + status: 404 + + - name: typo of resource extra + url: /v1/resource/foobar + status: 404 + + - name: identity resource + desc: maybe theres are no identity resources yet + url: /v1/resource/identity + status: 200 + + - name: ceph_account resource + desc: maybe theres are no ceph_account resources yet + url: /v1/resource/ceph_account + status: 200 + + - name: instance resource + desc: maybe there are no instance resources yet + url: /v1/resource/instance + status: 200 + + - name: instance_network_interface resource + desc: maybe theres are no instance_network_interface resources yet + url: /v1/resource/instance_network_interface + status: 200 + + - name: instance_disk resource + desc: maybe theres are no instance_disk resources yet + url: /v1/resource/instance_disk + status: 200 + + - name: image resource + desc: maybe theres are no image resources yet + url: /v1/resource/image + status: 200 + + - name: ipmi resource + desc: maybe theres are no ipmi resources yet + url: /v1/resource/ipmi + status: 200 + + - name: network resource + desc: maybe theres are no network resources yet + url: /v1/resource/network + status: 200 + + - name: orchestration resource + desc: maybe theres are no orchestration resources yet + #url: /v1/resource/orchestration + url: /v1/resource/stack + status: 200 + + - name: swift_account resource + desc: maybe theres are no swift_account resources yet + url: /v1/resource/swift_account + status: 200 + + - name: volume resource + desc: maybe theres are no volume resources yet + url: /v1/resource/volume + status: 200 + + - name: instance resource bad accept + desc: Expect 406 on bad accept type + request_headers: + accept: text/plain + url: /v1/resource/instance + status: 406 + response_strings: + - 406 Not Acceptable + + - name: instance resource complex accept + desc: failover accept media type appropriately + request_headers: + accept: text/plain, application/json; q=0.8 + url: /v1/resource/instance + status: 200 + - name: post instance resource url: /v1/resource/instance method: post request_headers: content-type: application/json data: - id: 75C44741-CC60-4033-804E-2D3098C7D2E9 - user_id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c - project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea + id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e + user_id: 126204ef-989a-46fd-999b-ee45c8108f31 + project_id: 98e785d7-9487-4159-8ab8-8230ec37537a flavor_id: "2" image_ref: http://image host: compute1 display_name: myvm metrics: vcpus: - archive_policy_name: medium + archive_policy_name: gabbilive status: 201 + response_json_paths: + $.id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e + $.user_id: 126204ef-989a-46fd-999b-ee45c8108f31 + $.project_id: 98e785d7-9487-4159-8ab8-8230ec37537a + $.flavor_id: "2" + + - name: get instance resource + url: $LOCATION + status: 200 + response_json_paths: + $.id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e + $.user_id: 126204ef-989a-46fd-999b-ee45c8108f31 + $.project_id: 98e785d7-9487-4159-8ab8-8230ec37537a + $.flavor_id: "2" + + - name: search for instance resource via user_id + #url: /v1/search/resource/generic + url: /v1/search/resource/instance + method: POST + request_headers: + content-type: application/json + data: + =: + user_id: "126204ef-989a-46fd-999b-ee45c8108f31" + response_json_paths: + $..id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e + $..user_id: 126204ef-989a-46fd-999b-ee45c8108f31 + $..project_id: 98e785d7-9487-4159-8ab8-8230ec37537a + $..display_name: myvm + + - name: search for instance resource via user_id and 'generic' type + url: /v1/search/resource/generic + method: POST + request_headers: + content-type: application/json + data: + =: + id: "2ae35573-7f9f-4bb1-aae8-dad8dff5706e" + response_strings: + '"user_id": "126204ef-989a-46fd-999b-ee45c8108f31"' + + - name: search for instance resource via user_id and project_id + url: /v1/search/resource/generic + method: POST + request_headers: + content-type: application/json + data: + and: + - =: + user_id: "126204ef-989a-46fd-999b-ee45c8108f31" + - =: + project_id: "98e785d7-9487-4159-8ab8-8230ec37537a" + response_strings: + '"id": "2ae35573-7f9f-4bb1-aae8-dad8dff5706e"' + + - name: patch instance resource + url: /v1/resource/instance/2ae35573-7f9f-4bb1-aae8-dad8dff5706e + method: patch + request_headers: + content-type: application/json + data: + host: compute2 + status: 200 + response_json_paths: + host: compute2 - name: post some measures to the metric on instance - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/metric/vcpus/measures + url: /v1/resource/instance/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures request_headers: content-type: application/json method: POST @@ -92,8 +646,8 @@ tests: status: 202 - name: get instance measures with poll - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/metric/vcpus/measures - # wait up to 60 seconds + url: /v1/resource/instance/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures + # wait up to 60 seconds before policy is deleted poll: count: 60 delay: 1 @@ -101,7 +655,58 @@ tests: $[0][2]: 2 $[1][2]: 2 - - name: delete the instance resource - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 + + # + # Search for resources + # + + - name: typo of search + url: /v1/search/notexists + status: 404 + + - name: typo of search in resource + url: /v1/search/resource/foobar + status: 404 + + - name: search with invalid uuid + url: /v1/search/resource/generic + method: POST + request_headers: + content-type: application/json + data: + =: + id: "cd9eef" + + + - name: delete instance resource + url: /v1/resource/generic/2ae35573-7f9f-4bb1-aae8-dad8dff5706e method: DELETE status: 204 + + # assert resource is really deleted + - name: assert resource resource is deleted + url: /v1/resource/generic/2ae35573-7f9f-4bb1-aae8-dad8dff5706e + method: GET + status: 404 + + - name: post instance resource no data + url: /v1/resource/generic + method: post + request_headers: + content-type: application/json + status: 400 + + - name: delete single archive policy cleanup. + url: /v1/archive_policy/gabbilive + method: DELETE + poll: + count: 60 + delay: 1 + status: 204 + + # It really is gone + + - name: confirm delete of cleanup + url: /v1/archive_policy/gabbilive + status: 404 + -- GitLab From 24ca86b0e20dd0c067fab01729bd40be34523956 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Tue, 19 Apr 2016 08:53:20 -0400 Subject: [PATCH 0176/1483] fix resource_type tablename for instance_net_int migration 0735ed97e5b3_add_tablename_to_resource_type.py is incorrectly setting tablename as 'instance_net_int' (with quotes). this causes error in future migrations specficially 34c517bcc2dd_shorter_foreign_key.py where insp.get_foreign_keys("%s_history" % rt.tablename) cannot find tablename 'instance_net_int'_history Change-Id: Idf51ef40b1d4fe22d52849ab1402b6c19de5210c --- .../ffc7bbeec0b0_migrate_legacy_resources_to_db2.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py b/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py index c6493600..0fd3e5c6 100644 --- a/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py +++ b/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py @@ -44,6 +44,12 @@ def upgrade(): sa.Column('attributes', sa.Text, nullable=False) ) + # NOTE(gordc): fix for incorrect migration: + # 0735ed97e5b3_add_tablename_to_resource_type.py#L46 + op.execute(resource_type.update().where( + resource_type.c.name == "instance_network_interface" + ).values({'tablename': 'instance_net_int'})) + resource_type_names = [rt.name for rt in list(bind.execute(resource_type.select()))] -- GitLab From 8da63a2ff34c83a29acb07a7c2d8bce900dc8254 Mon Sep 17 00:00:00 2001 From: ZhiQiang Fan Date: Thu, 21 Apr 2016 12:19:30 +0800 Subject: [PATCH 0177/1483] use thread safe fnmatch fnmatch is not thread safe in python <2.7.10, let's use the safe one in oslo.utils Change-Id: I332c63d4b1253ae3c520689a3d8cb5c479267370 ref: https://hg.python.org/cpython/rev/fe12c34c39eb --- gnocchi/indexer/__init__.py | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 97cc9eca..4ad7bf5a 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -13,12 +13,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import fnmatch import hashlib import os import iso8601 from oslo_config import cfg +from oslo_utils import fnmatch from oslo_utils import netutils import six from stevedore import driver diff --git a/requirements.txt b/requirements.txt index 27e85c2a..7b39a8ac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ oslo.config>=2.6.0 oslo.log>=1.0.0 oslo.policy>=0.3.0 oslo.serialization>=1.4.0 -oslo.utils>=1.6.0 +oslo.utils>=3.3.0 oslo.middleware pandas>=0.17.0 pecan>=0.9 -- GitLab From b99c85b6d31106969e1f448ed5b1a96fd1b742c9 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Sat, 23 Apr 2016 18:54:59 +0200 Subject: [PATCH 0178/1483] Added Japanese debconf templates translation update (Closes: #820769). --- debian/changelog | 8 ++++++-- debian/po/ja.po | 44 +++++++++++++++++++++++--------------------- 2 files changed, 29 insertions(+), 23 deletions(-) diff --git a/debian/changelog b/debian/changelog index 076a375c..f5c97254 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,12 @@ -gnocchi (2.0.2-5) UNRELEASED; urgency=medium +gnocchi (2.0.2-6) UNRELEASED; urgency=medium + [ Ondřej Nový ] * Standards-Version is 3.9.8 now (no change) - -- Ondřej Nový Sat, 09 Apr 2016 19:22:44 +0200 + [ Thomas Goirand ] + * Added Japanese debconf templates translation update (Closes: #820769). + + -- Thomas Goirand Sat, 23 Apr 2016 18:54:09 +0200 gnocchi (2.0.2-4) unstable; urgency=medium diff --git a/debian/po/ja.po b/debian/po/ja.po index 66c8c099..09fafba0 100644 --- a/debian/po/ja.po +++ b/debian/po/ja.po @@ -9,7 +9,7 @@ msgstr "" "Project-Id-Version: glance\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" "POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2016-03-17 10:06+0900\n" +"PO-Revision-Date: 2016-04-07 13:15+0900\n" "Last-Translator: Takuma Yamada \n" "Language-Team: Japanese \n" "Language: ja\n" @@ -113,8 +113,8 @@ msgid "" " * a username and password to access the database." msgstr "" " * 使用するデータベースの種類\n" -" * データベースサーバのホスト名 (そのサーバは、このマシンからの\n" -" TCP 接続を許可する必要があります)\n" +" * データベースサーバのホスト名 (そのサーバは、このマシンからのTCP 接続を\n" +" 許可する必要があります)\n" " * データベースにアクセスするためのユーザ名とパスワード" #. Type: boolean @@ -134,14 +134,14 @@ msgid "" "You can change this setting later on by running \"dpkg-reconfigure -plow " "gnocchi-common\"." msgstr "" -"「dpkg-reconfigure -plow gnocchi-common」の実行により、この設定を後で変更する" -"ことができます。" +"この設定は、後で \"dpkg-reconfigure -plow gnocchi-common\" を実行することで変" +"更できます。" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Gnocchi を Keystone のエンドポイントのカタログに登録しますか?" +msgstr "Gnocchi を Keystone のエンドポイントカタログに登録しますか?" #. Type: boolean #. Description @@ -151,14 +151,13 @@ msgid "" "accessible. This is done using \"keystone service-create\" and \"keystone " "endpoint-create\". This can be done automatically now." msgstr "" -"各 OpenStack のサービス (各 API ) がアクセスできるようにするために登録する必" -"要があります。これは「keystone service-create」と「keystone endpoint-create」" -"を使用して行われます。これは自動的に行うことができます。" +"各 OpenStack サービス (各 API) は、アクセス可能にするために登録する必要があり" +"ます。\"keystone service-create\" と \"keystone endpoint-create\" を使って登" +"録することができます。ここで自動的に行うことができます。" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 -#, fuzzy #| msgid "" #| "Note that you will need to have an up and running Keystone server on " #| "which to connect using the Keystone authentication token." @@ -167,8 +166,9 @@ msgid "" "to connect using a known admin project name, admin username and password. " "The admin auth token is not used anymore." msgstr "" -"Keystone 認証トークンを使って接続するには、Keystone サーバの起動および実行が" -"必要になりますので注意してください。" +"既知の管理プロジェクト名、管理者のユーザ名とパスワードを使用して接続するに" +"は、Keystone サーバの起動および実行が必要になりますので注意してください。管理" +"者認証トークンはもう使用されていません。" #. Type: string #. Description @@ -183,17 +183,16 @@ msgid "" "Please enter the IP address of the Keystone server, so that gnocchi-api can " "contact Keystone to do the Gnocchi service and endpoint creation." msgstr "" -"Keystone サーバの IP アドレスを入力してください。その結果 gnocchi-api は、" -"Gnocchi サービスやエンドポイント作成を行うために Keystone へ通信することがで" -"きます。" +"Keystone サーバの IP アドレスを入力してください。それにより gnocchi-api は " +"Keystone と通信し、Gnocchi サービスやエンドポイントの作成ができるようになりま" +"す。" #. Type: string #. Description #: ../gnocchi-api.templates:4001 -#, fuzzy #| msgid "Keystone authentication token:" msgid "Keystone admin name:" -msgstr "Keystone 認証トークン:" +msgstr "Keystone 管理者名:" #. Type: string #. Description @@ -207,18 +206,21 @@ msgid "" "To register the service endpoint, this package needs to know the Admin " "login, name, project name, and password to the Keystone server." msgstr "" +"サービスのエンドポイントを登録するには、このパッケージが Keystone サーバへの" +"管理者ログイン、名前、プロジェクト名、およびパスワードを知っている必要があり" +"ます。" #. Type: string #. Description #: ../gnocchi-api.templates:5001 msgid "Keystone admin project name:" -msgstr "" +msgstr "Keystone 管理プロジェクト名:" #. Type: password #. Description #: ../gnocchi-api.templates:6001 msgid "Keystone admin password:" -msgstr "" +msgstr "Keystone 管理者パスワード:" #. Type: string #. Description @@ -241,8 +243,8 @@ msgid "" "address." msgstr "" "この IP アドレスは、このサービスを利用するクライアントからアクセス可能でなけ" -"ればならないので、パブリッククラウドをインストールしている場合、これはパブ" -"リック IP アドレスでなければなりません。" +"ればなりません。パブリッククラウドをインストールしている場合は、パブリック " +"IP アドレスにする必要があります。" #. Type: string #. Description -- GitLab From 28650af52a6b463f18f967093605e56c5a855530 Mon Sep 17 00:00:00 2001 From: liusheng Date: Mon, 25 Apr 2016 11:46:57 +0800 Subject: [PATCH 0179/1483] Replace logging with oslo_log If we use logging lib to initialize a logger, the logger won't be applied the oslo_log config options. Change-Id: Id31245873cc01faf3dfa39af4b95680e43afeeea --- gnocchi/carbonara.py | 4 ++-- gnocchi/cli.py | 4 ++-- gnocchi/service.py | 3 +-- gnocchi/storage/__init__.py | 4 ++-- gnocchi/storage/_carbonara.py | 4 ++-- gnocchi/storage/ceph.py | 4 ++-- gnocchi/storage/swift.py | 4 ++-- 7 files changed, 13 insertions(+), 14 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 977443fe..944b1ffc 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -18,7 +18,6 @@ import datetime import functools -import logging import numbers import operator import re @@ -27,6 +26,7 @@ import time import iso8601 import lz4 import msgpack +from oslo_log import log import pandas import six @@ -38,7 +38,7 @@ from gnocchi import utils # to ensure the module is correctly loaded before we use really it. time.strptime("2016-02-19", "%Y-%m-%d") -LOG = logging.getLogger(__name__) +LOG = log.getLogger(__name__) class NoDeloreanAvailable(Exception): diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 38f7d1b5..3329f12b 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -13,13 +13,13 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. -import logging import multiprocessing import signal import sys import time from oslo_config import cfg +from oslo_log import log from oslo_utils import timeutils import retrying import six @@ -32,7 +32,7 @@ from gnocchi import statsd as statsd_service from gnocchi import storage -LOG = logging.getLogger(__name__) +LOG = log.getLogger(__name__) def upgrade(): diff --git a/gnocchi/service.py b/gnocchi/service.py index e480ba75..1136dc65 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -15,7 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging import multiprocessing from oslo_config import cfg @@ -75,6 +74,6 @@ def prepare_service(args=None, conf=None, "storage") log.setup(conf, 'gnocchi') - conf.log_opt_values(LOG, logging.DEBUG) + conf.log_opt_values(LOG, log.DEBUG) return conf diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 070688b4..4354f307 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -13,9 +13,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import logging import operator from oslo_config import cfg +from oslo_log import log from oslo_utils import timeutils from stevedore import driver @@ -37,7 +37,7 @@ OPTS = [ "metric ingestion reporting"), ] -LOG = logging.getLogger(__name__) +LOG = log.getLogger(__name__) class Measure(object): diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 5b5e1a30..1cb5b837 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -16,7 +16,6 @@ # under the License. import collections import datetime -import logging import multiprocessing import operator import threading @@ -26,6 +25,7 @@ import uuid from concurrent import futures import iso8601 from oslo_config import cfg +from oslo_log import log from oslo_serialization import msgpackutils from oslo_utils import timeutils import six @@ -44,7 +44,7 @@ OPTS = [ ] -LOG = logging.getLogger(__name__) +LOG = log.getLogger(__name__) class CarbonaraBasedStorage(storage.StorageDriver): diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 90f14235..2fd4a445 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -18,17 +18,17 @@ import contextlib import datetime import errno import itertools -import logging import uuid from oslo_config import cfg +from oslo_log import log from oslo_utils import importutils from gnocchi import storage from gnocchi.storage import _carbonara -LOG = logging.getLogger(__name__) +LOG = log.getLogger(__name__) for RADOS_MODULE_NAME in ('cradox', 'rados'): rados = importutils.try_import(RADOS_MODULE_NAME) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 282fb137..1ba2329a 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -16,10 +16,10 @@ from collections import defaultdict import contextlib import datetime -import logging import uuid from oslo_config import cfg +from oslo_log import log import retrying import six from six.moves.urllib.parse import quote @@ -32,7 +32,7 @@ except ImportError: from gnocchi import storage from gnocchi.storage import _carbonara -LOG = logging.getLogger(__name__) +LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('swift_auth_version', -- GitLab From 39b2bf7a40e79d60a04fc4fd6773a9ddc151068a Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 21 Apr 2016 11:53:34 +0200 Subject: [PATCH 0180/1483] Remove annoying debug log Change-Id: If169881f833dc50b1ffbb3cff8c6fb314eb491ba --- gnocchi/service.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gnocchi/service.py b/gnocchi/service.py index 1136dc65..c13d8b91 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -73,6 +73,8 @@ def prepare_service(args=None, conf=None, urlparse.urlunparse(parsed), "storage") + log.set_defaults(default_log_levels=log.get_default_log_levels() + + ["passlib.utils.compat=INFO"]) log.setup(conf, 'gnocchi') conf.log_opt_values(LOG, log.DEBUG) -- GitLab From 791ca7748f92dd9bf0cbaba8584f1a119c2b72d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Albert?= Date: Fri, 29 Apr 2016 11:48:40 -0500 Subject: [PATCH 0181/1483] Fix uuidgen not installed in some ubuntu installs On some basic ubuntu install the uuidgen commands is not there causing gnocchi installation to fail. Add a new pre-install commands to install the package on ubuntu systems. Change-Id: I456b2421905c383c5df39deb7f1a1819ad1c990e --- devstack/plugin.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index dddbb23d..b3ef7930 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -354,9 +354,10 @@ function init_gnocchi { } function preinstall_gnocchi { - # Needed to build psycopg2 if is_ubuntu; then - install_package libpq-dev + # libpq-dev is needed to build psycopg2 + # uuid-runtime is needed to use the uuidgen command + install_package libpq-dev uuid-runtime else install_package postgresql-devel fi -- GitLab From a0d1e9ad9779f8c4fb69236f8d31e9e85c5c888f Mon Sep 17 00:00:00 2001 From: zhangguoqing Date: Thu, 5 May 2016 09:50:10 +0000 Subject: [PATCH 0182/1483] [alembic] delete a blank line from script.py.mako A new db version file always pep8 error which E303 too many blank lines. Beacause the line ${imports if imports else ""} often left a blank line in script.py.mako, this patch delete the below one line to avoid pep8 error. reference: https://github.com/zzzeek/alembic/blob/master/alembic/templates/pylons/script.py.mako Change-Id: I6c28493f7ede4a9dc7390ad431fe05d1d1dc873d --- gnocchi/indexer/alembic/script.py.mako | 1 - 1 file changed, 1 deletion(-) diff --git a/gnocchi/indexer/alembic/script.py.mako b/gnocchi/indexer/alembic/script.py.mako index 60a8ed07..8f4e92ea 100644 --- a/gnocchi/indexer/alembic/script.py.mako +++ b/gnocchi/indexer/alembic/script.py.mako @@ -25,7 +25,6 @@ from alembic import op import sqlalchemy as sa ${imports if imports else ""} - # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} -- GitLab From c2d722b185dde43f1505812fe888c3a65bb59549 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 10 May 2016 08:43:58 +0200 Subject: [PATCH 0183/1483] gate: work with old and new devstack ceph plugin Change-Id: Ie27a2d9a77f632f027773f84fe23af5c11252aa7 --- devstack/gate/gate_hook.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/devstack/gate/gate_hook.sh b/devstack/gate/gate_hook.sh index 1d26ca44..e9ba013d 100755 --- a/devstack/gate/gate_hook.sh +++ b/devstack/gate/gate_hook.sh @@ -41,7 +41,10 @@ case $STORAGE_DRIVER in DEVSTACK_GATE_TEMPEST+=$'\nexport SWIFT_USE_MOD_WSGI=True' ;; ceph) - ENABLED_SERVICES+="ceph" + if [ "${PROJECTS//devstack-plugin-ceph/}" == "$PROJECTS" ]; then + # Old fashion ceph plugin + ENABLED_SERVICES+="ceph" + fi DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_STORAGE_BACKEND=ceph' ;; esac -- GitLab From 2343d101dc038e91d6e00943a96f9dccac00111d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 10 May 2016 10:06:38 +0200 Subject: [PATCH 0184/1483] doc: fix the number of storage drivers Change-Id: Ie8cff2d33e997c364e297718b53b54d3a9bb1093 Closes-Bug: #1580059 --- doc/source/architecture.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index 151b0ffa..313d10fe 100644 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -32,7 +32,7 @@ responsible for linking resources with metrics. How to choose back-ends ~~~~~~~~~~~~~~~~~~~~~~~ -Gnocchi currently offers 4 storage drivers: +Gnocchi currently offers different storage drivers: * File * Swift -- GitLab From a1d02ee90979fdc0d060e2f14e663f149859c3f0 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 10 May 2016 17:09:20 +0200 Subject: [PATCH 0185/1483] Update hacking to 0.11 Change-Id: I4b79b56317e333cf12a94fae9dd8ec3bbfa020b5 --- gnocchi/aggregates/__init__.py | 2 -- gnocchi/exceptions.py | 2 -- gnocchi/indexer/null.py | 2 -- gnocchi/indexer/sqlalchemy_base.py | 2 -- gnocchi/rest/app.wsgi | 2 -- gnocchi/storage/null.py | 2 -- tox.ini | 2 +- 7 files changed, 1 insertion(+), 13 deletions(-) diff --git a/gnocchi/aggregates/__init__.py b/gnocchi/aggregates/__init__.py index 93cac35a..4d54f470 100644 --- a/gnocchi/aggregates/__init__.py +++ b/gnocchi/aggregates/__init__.py @@ -2,8 +2,6 @@ # # Copyright 2014 OpenStack Foundation # -# Authors: Ana Malagon -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at diff --git a/gnocchi/exceptions.py b/gnocchi/exceptions.py index d016714c..81b484bf 100644 --- a/gnocchi/exceptions.py +++ b/gnocchi/exceptions.py @@ -2,8 +2,6 @@ # # Copyright © 2014 eNovance # -# Authors: Julien Danjou -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at diff --git a/gnocchi/indexer/null.py b/gnocchi/indexer/null.py index d6950d7b..850e2aeb 100644 --- a/gnocchi/indexer/null.py +++ b/gnocchi/indexer/null.py @@ -2,8 +2,6 @@ # # Copyright © 2014 eNovance # -# Authors: Julien Danjou -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index 25115ec4..12aa309a 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -2,8 +2,6 @@ # # Copyright © 2014-2015 eNovance # -# Authors: Julien Danjou -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at diff --git a/gnocchi/rest/app.wsgi b/gnocchi/rest/app.wsgi index f3529c35..3fbb9c9d 100644 --- a/gnocchi/rest/app.wsgi +++ b/gnocchi/rest/app.wsgi @@ -1,8 +1,6 @@ # # Copyright 2014 eNovance # -# Authors: Mehdi Abaakouk -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at diff --git a/gnocchi/storage/null.py b/gnocchi/storage/null.py index 90927ccf..21eed341 100644 --- a/gnocchi/storage/null.py +++ b/gnocchi/storage/null.py @@ -2,8 +2,6 @@ # # Copyright © 2014-2015 eNovance # -# Authors: Julien Danjou -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at diff --git a/tox.ini b/tox.ini index a5153c7f..702871be 100644 --- a/tox.ini +++ b/tox.ini @@ -31,7 +31,7 @@ commands = bashate -v devstack/plugin.sh devstack/gate/gate_hook.sh devstack/gat whitelist_externals = bash [testenv:pep8] -deps = hacking>=0.10,<0.11 +deps = hacking>=0.11,<0.12 commands = flake8 [testenv:py27-gate] -- GitLab From 32ddf6db3cc0d8e77c23a4c74f162d9be0459743 Mon Sep 17 00:00:00 2001 From: Yatin Kumbhare Date: Thu, 12 May 2016 19:53:54 +0530 Subject: [PATCH 0186/1483] remove verbose option in devstack plugin This option is deprecated in Mitaka and has been removed in Newton, and its default value is True already. Let's remove it. ref: https://review.openstack.org/#/c/314573/ Change-Id: Ifa8e6b004d7ecf03cd4fdc16d61e09d371ef1098 --- devstack/plugin.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index b3ef7930..ed3b5904 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -225,7 +225,6 @@ function configure_gnocchi { sudo chown $STACK_USER $GNOCCHI_DATA_DIR # Configure logging - iniset $GNOCCHI_CONF DEFAULT verbose True iniset $GNOCCHI_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" # Install the configuration files -- GitLab From a725a3f5a1be9873b968c23114c6f6e066f7f37a Mon Sep 17 00:00:00 2001 From: Julian Pistorius Date: Thu, 12 May 2016 17:54:31 -0700 Subject: [PATCH 0187/1483] Fixed typo in documentation Change-Id: Id0377c3b8efeb23744f89596e1e3c951c4fbdd2d --- doc/source/rest.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index fc43475b..1a4b0096 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -16,7 +16,7 @@ specified in `policy.json`. If you enable the OpenStack Keystone middleware, you only need to authenticate against Keystone and provide `X-Auth-Token` header with a valid token for each -request sent to Gnocchi. The headers mentionned above will be filled +request sent to Gnocchi. The headers mentioned above will be filled automatically based on your Keystone authorizations. Metrics -- GitLab From 20c55fe71384ada9bd00e19e836f174231cbd8f7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 16 May 2016 15:42:22 +0200 Subject: [PATCH 0188/1483] rest: return a better error message when history is not found Foo is not really explicit. Change-Id: Id14bf657d957f6494669b2e96fa6145240e12854 --- gnocchi/rest/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 4c140e44..9fde91fc 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -751,7 +751,7 @@ class ResourceHistoryController(rest.RestController): resource = pecan.request.indexer.get_resource( self.resource_type, self.resource_id) if not resource: - abort(404, "foo") + abort(404, indexer.NoSuchResource(self.resource_id)) enforce("get resource", resource) -- GitLab From 19e9c59f6364e62efa016c9e33a6e098c653343f Mon Sep 17 00:00:00 2001 From: dsxyy Date: Mon, 16 May 2016 11:46:45 +0800 Subject: [PATCH 0189/1483] doc: fix the number of points for 30 days Change-Id: I10a3852acefe8a35bb12559caa9528674786fa27 Closes-Bug: #1582057 --- doc/source/architecture.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) mode change 100644 => 100755 doc/source/architecture.rst diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst old mode 100644 new mode 100755 index 313d10fe..ee63edfb --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -103,12 +103,12 @@ policies. A typical low grained use case could be:: 3600 points with a granularity of 1 second = 1 hour 1440 points with a granularity of 1 minute = 24 hours - 1800 points with a granularity of 1 hour = 30 days + 720 points with a granularity of 1 hour = 30 days 365 points with a granularity of 1 day = 1 year -This would represent 7205 points × 17.92 = 126 KiB per aggregation method. If -you use the 8 standard aggregation method, your metric will take up to 8 × 126 -KiB = 0.98 MiB of disk space. +This would represent 6125 points × 9 = 54 KiB per aggregation method. If +you use the 8 standard aggregation method, your metric will take up to 8 × 54 +KiB = 432 KiB of disk space. Default archive policies ------------------------ -- GitLab From 95e702dbfc611c85bd08ed00a186bced4290f124 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 17 May 2016 15:09:19 +0200 Subject: [PATCH 0190/1483] rest: catch InvalidResourceAttributeName when creating resource type Currently, if a user tries to use an invalid name for an attribute, the server returns an error 500 because the exception is not caught. Transform that to a 400 error. Change-Id: I4cac55fe6b1c2c7c5004ab6002e61b04487facd4 --- gnocchi/rest/__init__.py | 5 ++++- gnocchi/tests/gabbi/gabbits/resource_type.yaml | 13 +++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 9fde91fc..2844455b 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -837,7 +837,10 @@ class ResourceTypesController(rest.RestController): def post(self): schema = pecan.request.indexer.get_resource_type_schema() body = deserialize_and_validate(schema) - rt = schema.resource_type_from_dict(**body) + try: + rt = schema.resource_type_from_dict(**body) + except resource_type.InvalidResourceAttributeName as e: + abort(400, e) enforce("create resource type", body) try: rt = pecan.request.indexer.create_resource_type(rt) diff --git a/gnocchi/tests/gabbi/gabbits/resource_type.yaml b/gnocchi/tests/gabbi/gabbits/resource_type.yaml index 7a1bdeb5..3d710e16 100644 --- a/gnocchi/tests/gabbi/gabbits/resource_type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource_type.yaml @@ -25,6 +25,19 @@ tests: content-type: application/json status: 403 + - name: post resource type with existing name + url: /v1/resource_type + method: post + request_headers: + x-roles: admin + content-type: application/json + data: + name: my_custom_resource + attributes: + project_id: + type: string + status: 400 + - name: post resource type bad string url: /v1/resource_type method: post -- GitLab From 702672c00c7d2c9896117d200e967dd90dd17232 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 11 May 2016 16:59:13 +0200 Subject: [PATCH 0191/1483] doc: add a glossary Change-Id: I3bf4d6be8996c3ede0ae58dd14c054bd53982659 --- doc/source/glossary.rst | 33 +++++++++++++++++++++++++++++++++ doc/source/index.rst | 1 + 2 files changed, 34 insertions(+) create mode 100644 doc/source/glossary.rst diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst new file mode 100644 index 00000000..4dcb0b45 --- /dev/null +++ b/doc/source/glossary.rst @@ -0,0 +1,33 @@ +======== +Glossary +======== + +.. glossary:: + + Resource + An entity representing anything in your infrastructure that you will + associate metric(s) with. It is identified by a unique ID and can contain + attributes. + + Metric + An entity storing measures identified by an UUID. It can be attached to a + resource using a name. How a metric stores its measure is defined by the + archive policy it is associated to. + + Measure + A datapoint tuple composed of timestamp and a value. + + Archive policy + A measure storage policy attached to a metric. It determines how long + measures will be kept in a metric and how they will be aggregated. + + Granularity + The time between two measures in an aggregated timeseries of a metric. + + Timeseries + A list of measures. + + Aggregation method + Function used to aggregate multiple measures in one. For example, the + `min` aggregation method will aggregate the values of different measures + to the minimum value of all the measures in time range. diff --git a/doc/source/index.rst b/doc/source/index.rst index e16e5e11..a23ef034 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -67,5 +67,6 @@ Documentation rest statsd grafana + glossary .. _`OpenStack`: http://openstack.org -- GitLab From 1d3c67574a4b61d60eff68507eec8edd416e9bd4 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Mon, 11 Apr 2016 18:40:38 -0400 Subject: [PATCH 0192/1483] support shrinking/extending policy timespan add flexibility to allow users to shrink or expand the coverage of a granularity. this does not support changing the granularity, just the duration of the granularity (if it exists). Change-Id: I71715ea75a8d80807616830fd64f1844516d82bc --- doc/source/rest.j2 | 11 ++++ doc/source/rest.yaml | 18 ++++++ etc/gnocchi/policy.json | 1 + gnocchi/indexer/__init__.py | 14 +++++ gnocchi/indexer/sqlalchemy.py | 20 +++++++ gnocchi/rest/__init__.py | 27 +++++++++ gnocchi/tests/gabbi/gabbits/archive.yaml | 75 ++++++++++++++++++++++++ gnocchi/tests/test_indexer.py | 46 +++++++++++++++ gnocchi/tests/test_storage.py | 53 +++++++++++++++++ 9 files changed, 265 insertions(+) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index fc43475b..de17eccf 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -159,6 +159,17 @@ It is also possible to list archive policies: {{ scenarios['list-archive-policy']['doc'] }} +Existing archive policies can be modified to retain more or less data depending +on requirements. If the policy coverage is expanded, measures are not +retroactively calculated as backfill to accommodate the new timespan: + +{{ scenarios['update-archive-policy']['doc'] }} + +.. note:: + + Granularities cannot be changed to a different rate. Also, granularities + cannot be added or dropped from a policy. + It is possible to delete an archive policy if it is not used by any metric: {{ scenarios['delete-archive-policy']['doc'] }} diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index abe62166..782555b8 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -45,6 +45,24 @@ - name: list-archive-policy request: GET /v1/archive_policy HTTP/1.1 +- name: update-archive-policy + request: | + PATCH /v1/archive_policy/{{ scenarios['create-archive-policy']['response'].json['name'] }} HTTP/1.1 + Content-Type: application/json + + { + "definition": [ + { + "granularity": "1s", + "timespan": "1 hour" + }, + { + "points": 48, + "timespan": "1 day" + } + ] + } + - name: create-archive-policy-to-delete request: | POST /v1/archive_policy HTTP/1.1 diff --git a/etc/gnocchi/policy.json b/etc/gnocchi/policy.json index 7987664e..78b0a23a 100644 --- a/etc/gnocchi/policy.json +++ b/etc/gnocchi/policy.json @@ -20,6 +20,7 @@ "get archive policy": "", "list archive policy": "", "create archive policy": "role:admin", + "update archive policy": "role:admin", "delete archive policy": "role:admin", "create archive policy rule": "role:admin", diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 4ad7bf5a..c62c84fb 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -115,6 +115,16 @@ class NoSuchArchivePolicy(IndexerException): self.archive_policy = archive_policy +class UnsupportedArchivePolicyChange(IndexerException): + """Error raised when modifying archive policy if not supported.""" + def __init__(self, archive_policy, message): + super(UnsupportedArchivePolicyChange, self).__init__( + "Archive policy %s does not support change: %s" % + (archive_policy, message)) + self.archive_policy = archive_policy + self.message = message + + class ArchivePolicyInUse(IndexerException): """Error raised when an archive policy is still being used.""" def __init__(self, archive_policy): @@ -284,6 +294,10 @@ class IndexerDriver(object): def get_archive_policy(name): raise exceptions.NotImplementedError + @staticmethod + def update_archive_policy(name, ap_items): + raise exceptions.NotImplementedError + @staticmethod def delete_archive_policy(name): raise exceptions.NotImplementedError diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index f1de9dac..09fb29a0 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -311,6 +311,26 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): with self.facade.independent_reader() as session: return session.query(ArchivePolicy).get(name) + def update_archive_policy(self, name, ap_items): + with self.facade.independent_writer() as session: + ap = session.query(ArchivePolicy).get(name) + if not ap: + raise indexer.NoSuchArchivePolicy(name) + current = sorted(ap.definition, + key=operator.attrgetter('granularity')) + new = sorted(ap_items, key=operator.attrgetter('granularity')) + if len(current) != len(new): + raise indexer.UnsupportedArchivePolicyChange( + name, 'Cannot add or drop granularities') + for c, n in zip(current, new): + if c.granularity != n.granularity: + raise indexer.UnsupportedArchivePolicyChange( + name, '%s granularity interval was changed' + % c.granularity) + # NOTE(gordc): ORM doesn't update JSON column unless new + ap.definition = ap_items + return ap + def delete_archive_policy(self, name): constraints = [ "fk_metric_ap_name_ap_name", diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 9fde91fc..ff427d6c 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -257,6 +257,33 @@ class ArchivePolicyController(rest.RestController): return ap abort(404, indexer.NoSuchArchivePolicy(self.archive_policy)) + @pecan.expose('json') + def patch(self): + ap = pecan.request.indexer.get_archive_policy(self.archive_policy) + if not ap: + abort(404, indexer.NoSuchArchivePolicy(self.archive_policy)) + enforce("update archive policy", ap) + + body = deserialize_and_validate(voluptuous.Schema({ + voluptuous.Required("definition"): + voluptuous.All([{ + "granularity": Timespan, + "points": PositiveNotNullInt, + "timespan": Timespan}], voluptuous.Length(min=1)), + })) + # Validate the data + try: + ap_items = [archive_policy.ArchivePolicyItem(**item) for item in + body['definition']] + except ValueError as e: + abort(400, e) + + try: + return pecan.request.indexer.update_archive_policy( + self.archive_policy, ap_items) + except indexer.UnsupportedArchivePolicyChange as e: + abort(400, e) + @pecan.expose() def delete(self): # NOTE(jd) I don't think there's any point in fetching and passing the diff --git a/gnocchi/tests/gabbi/gabbits/archive.yaml b/gnocchi/tests/gabbi/gabbits/archive.yaml index e25bf865..ba25af7c 100644 --- a/gnocchi/tests/gabbi/gabbits/archive.yaml +++ b/gnocchi/tests/gabbi/gabbits/archive.yaml @@ -189,6 +189,81 @@ tests: accept: text/plain status: 406 +# Update archive policy + + - name: patch archive policy with bad definition + url: /v1/archive_policy/medium + method: PATCH + request_headers: + content-type: application/json + x-roles: admin + data: + definition: + - granularity: 1 second + points: 50 + timespan: 1 hour + - granularity: 2 second + status: 400 + response_strings: + - timespan ≠ granularity × points + + - name: patch archive policy with missing granularity + url: /v1/archive_policy/medium + method: PATCH + request_headers: + content-type: application/json + x-roles: admin + data: + definition: + - granularity: 1 second + points: 50 + status: 400 + response_strings: + - "Archive policy medium does not support change: Cannot add or drop granularities" + + - name: patch archive policy with non-matching granularity + url: /v1/archive_policy/medium + method: PATCH + request_headers: + content-type: application/json + x-roles: admin + data: + definition: + - granularity: 5 second + points: 20 + - granularity: 2 second + status: 400 + response_strings: + - "Archive policy medium does not support change: 1.0 granularity interval was changed" + + - name: patch archive policy + url: /v1/archive_policy/medium + method: PATCH + request_headers: + content-type: application/json + x-roles: admin + data: + definition: + - granularity: 1 second + points: 50 + - granularity: 2 second + status: 200 + response_json_paths: + $.name: medium + $.definition[0].granularity: "0:00:01" + $.definition[0].points: 50 + $.definition[0].timespan: "0:00:50" + + - name: get patched archive policy + url: /v1/archive_policy/medium + response_headers: + content-type: /application/json/ + response_json_paths: + $.name: medium + $.definition[0].granularity: "0:00:01" + $.definition[0].points: 50 + $.definition[0].timespan: "0:00:50" + # Unexpected methods - name: post single archive diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index b4e5a98a..a800b995 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -50,6 +50,52 @@ class TestIndexerDriver(tests_base.TestCase): {u'granularity': 86400, u'points': 30, u'timespan': 2592000}], 'name': u'low'}, dict(ap)) + def test_update_archive_policy(self): + self.assertRaises(indexer.UnsupportedArchivePolicyChange, + self.index.update_archive_policy, "low", + [archive_policy.ArchivePolicyItem(granularity=300, + points=10)]) + self.assertRaises(indexer.UnsupportedArchivePolicyChange, + self.index.update_archive_policy, "low", + [archive_policy.ArchivePolicyItem(granularity=300, + points=12), + archive_policy.ArchivePolicyItem(granularity=3600, + points=12), + archive_policy.ArchivePolicyItem(granularity=5, + points=6)]) + ap = self.index.update_archive_policy( + "low", [archive_policy.ArchivePolicyItem(granularity=300, + points=6), + archive_policy.ArchivePolicyItem(granularity=3600, + points=24), + archive_policy.ArchivePolicyItem(granularity=86400, + points=30)]) + self.assertEqual({ + 'back_window': 0, + 'aggregation_methods': + set(self.conf.archive_policy.default_aggregation_methods), + 'definition': [ + {u'granularity': 300, u'points': 6, u'timespan': 1800}, + {u'granularity': 3600, u'points': 24, u'timespan': 86400}, + {u'granularity': 86400, u'points': 30, u'timespan': 2592000}], + 'name': u'low'}, dict(ap)) + ap = self.index.update_archive_policy( + "low", [archive_policy.ArchivePolicyItem(granularity=300, + points=12), + archive_policy.ArchivePolicyItem(granularity=3600, + points=24), + archive_policy.ArchivePolicyItem(granularity=86400, + points=30)]) + self.assertEqual({ + 'back_window': 0, + 'aggregation_methods': + set(self.conf.archive_policy.default_aggregation_methods), + 'definition': [ + {u'granularity': 300, u'points': 12, u'timespan': 3600}, + {u'granularity': 3600, u'points': 24, u'timespan': 86400}, + {u'granularity': 86400, u'points': 30, u'timespan': 2592000}], + 'name': u'low'}, dict(ap)) + def test_delete_archive_policy(self): name = str(uuid.uuid4()) self.index.create_archive_policy( diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index ede191d7..c36a16c9 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -21,6 +21,7 @@ from oslo_utils import timeutils from oslotest import base import six.moves +from gnocchi import archive_policy from gnocchi import carbonara from gnocchi import storage from gnocchi.storage import _carbonara @@ -520,6 +521,58 @@ class TestStorageDriver(tests_base.TestCase): {u"eq": 100}, {u"≠": 50}]})) + def test_resize_policy(self): + name = str(uuid.uuid4()) + ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)]) + self.index.create_archive_policy(ap) + m = storage.Metric(uuid.uuid4(), ap) + self.index.create_metric(m.id, str(uuid.uuid4()), + str(uuid.uuid4()), name) + self.storage.add_measures(m, [ + storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 0), 1), + storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 1), + storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 10), 1), + ]) + self.storage.process_background_tasks(self.index, sync=True) + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), + ], self.storage.get_measures(m)) + # expand to more points + self.index.update_archive_policy( + name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)]) + self.storage.add_measures(m, [ + storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 15), 1), + ]) + self.storage.process_background_tasks(self.index, sync=True) + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0), + ], self.storage.get_measures(m)) + # shrink timespan + self.index.update_archive_policy( + name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)]) + # unchanged after update if no samples + self.storage.process_background_tasks(self.index, sync=True) + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0), + ], self.storage.get_measures(m)) + # drop points + self.storage.add_measures(m, [ + storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 20), 1), + ]) + self.storage.process_background_tasks(self.index, sync=True) + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 20), 5.0, 1.0), + ], self.storage.get_measures(m)) + class TestMeasureQuery(base.BaseTestCase): def test_equal(self): -- GitLab From 99d91a0b0b12df51af4327d03092213f2c929d1c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 13 May 2016 13:29:07 +0200 Subject: [PATCH 0193/1483] sql: default to pymysql It's utopic to think the user is going to write a mysql+pymysql:// URL by default. Let's do that for them. Change-Id: I00f050423fd86ffd51d7ef2bb7f5e5928620f412 --- gnocchi/indexer/sqlalchemy.py | 14 +++++++++++++- gnocchi/tests/gabbi/fixtures.py | 5 ++++- run-tests.sh | 5 ++--- setup-test-env.sh | 13 ------------- setup.cfg | 2 +- tox.ini | 10 +++++----- 6 files changed, 25 insertions(+), 24 deletions(-) delete mode 100755 setup-test-env.sh diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index f1de9dac..a68225ca 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -27,6 +27,7 @@ from oslo_db.sqlalchemy import utils as oslo_db_utils from oslo_log import log import six import sqlalchemy +from sqlalchemy.engine import url as sqlalchemy_url from sqlalchemy import types import sqlalchemy_utils @@ -178,8 +179,19 @@ class ResourceClassMapper(object): class SQLAlchemyIndexer(indexer.IndexerDriver): _RESOURCE_TYPE_MANAGER = ResourceClassMapper() + @staticmethod + def dress_url(url): + # If no explicit driver has been set, we default to pymysql + if url.startswith("mysql://"): + url = sqlalchemy_url.make_url(url) + url.drivername = "mysql+pymysql" + return str(url) + return url + def __init__(self, conf): - conf.set_override("connection", conf.indexer.url, "database") + conf.set_override("connection", + self.dress_url(conf.indexer.url), + "database") self.conf = conf self.facade = PerInstanceFacade(conf) diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index b6ee59c5..bbf460f2 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -28,6 +28,7 @@ import sqlalchemy.engine.url as sqlalchemy_url import sqlalchemy_utils from gnocchi import indexer +from gnocchi.indexer import sqlalchemy from gnocchi.rest import app from gnocchi import service from gnocchi import storage @@ -102,7 +103,9 @@ class ConfigFixture(fixture.GabbiFixture): # NOTE(jd) All of that is still very SQL centric but we only support # SQL for now so let's say it's good enough. - url = sqlalchemy_url.make_url(conf.indexer.url) + url = sqlalchemy_url.make_url( + sqlalchemy.SQLAlchemyIndexer.dress_url( + conf.indexer.url)) url.database = url.database + str(uuid.uuid4()).replace('-', '') db_url = str(url) diff --git a/run-tests.sh b/run-tests.sh index 492725da..06c35801 100755 --- a/run-tests.sh +++ b/run-tests.sh @@ -4,10 +4,9 @@ GNOCCHI_TEST_STORAGE_DRIVERS=${GNOCCHI_TEST_STORAGE_DRIVERS:-file} GNOCCHI_TEST_INDEXER_DRIVERS=${GNOCCHI_TEST_INDEXER_DRIVERS:-postgresql} for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS} do + export GNOCCHI_TEST_STORAGE_DRIVER=$storage for indexer in ${GNOCCHI_TEST_INDEXER_DRIVERS} do - export GNOCCHI_TEST_INDEXER_DRIVER=$indexer - export GNOCCHI_TEST_STORAGE_DRIVER=$storage - ./setup-test-env.sh ./tools/pretty_tox.sh $* + pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* done done diff --git a/setup-test-env.sh b/setup-test-env.sh deleted file mode 100755 index 7d7789fa..00000000 --- a/setup-test-env.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -set -e -set -x -# Activate pifpaf for indexer -GNOCCHI_TEST_INDEXER_DRIVER=${GNOCCHI_TEST_INDEXER_DRIVER:-postgresql} -eval `pifpaf run $GNOCCHI_TEST_INDEXER_DRIVER` -kill_pifpaf () -{ - test -n "$PIFPAF_PID" && kill "$PIFPAF_PID" -} -trap kill_pifpaf EXIT -export GNOCCHI_INDEXER_URL=${PIFPAF_URL/#mysql:/mysql+pymysql:} -$* diff --git a/setup.cfg b/setup.cfg index 0ecc34b8..fa9a61ea 100644 --- a/setup.cfg +++ b/setup.cfg @@ -57,7 +57,7 @@ doc = PyYAML Jinja2 test = - pifpaf + pifpaf>=0.1.0 gabbi>=0.101.2 coverage>=3.6 fixtures diff --git a/tox.ini b/tox.ini index 702871be..d446a621 100644 --- a/tox.ini +++ b/tox.ini @@ -47,15 +47,15 @@ commands = {toxinidir}/tools/pretty_tox.sh '{posargs}' deps = .[test,postgresql,file] setenv = OS_TEST_PATH=gnocchi/tests/gabbi basepython = python2.7 -commands = {toxinidir}/setup-test-env.sh {toxinidir}/tools/pretty_tox.sh '{posargs}' +commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- {toxinidir}/tools/pretty_tox.sh '{posargs}' [testenv:py27-cover] -commands = {toxinidir}/setup-test-env.sh python setup.py testr --coverage --testr-args="{posargs}" +commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py testr --coverage --testr-args="{posargs}" [testenv:venv] # This is used by the doc job on the gate deps = {[testenv:docs]deps} -commands = {toxinidir}/setup-test-env.sh {posargs} +commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- {posargs} [flake8] exclude = .tox,.eggs,doc @@ -73,9 +73,9 @@ deps = .[test,postgresql,file,doc] setenv = GNOCCHI_TEST_STORAGE_DRIVER=file GNOCCHI_TEST_INDEXER_DRIVER=postgresql commands = doc8 --ignore-path doc/source/rest.rst doc/source - {toxinidir}/setup-test-env.sh python setup.py build_sphinx + pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx [testenv:docs-gnocchi.xyz] deps = .[file,postgresql,test,doc] sphinx_rtd_theme -commands = {toxinidir}/setup-test-env.sh sphinx-build -D html_theme=sphinx_rtd_theme doc/source doc/build/html +commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-build -D html_theme=sphinx_rtd_theme doc/source doc/build/html -- GitLab From 6319a1243d4ea32955e3b98dd3477fb64b46b7ff Mon Sep 17 00:00:00 2001 From: MikeG451 Date: Wed, 18 May 2016 12:18:01 -0700 Subject: [PATCH 0194/1483] Backlog status not accurate when batching Change-Id: I995997168f547a5a9d372250951e82c20438ae79 Closes-bug: #1582976 --- doc/source/running.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/source/running.rst b/doc/source/running.rst index 253b6f9e..3e83dfbf 100644 --- a/doc/source/running.rst +++ b/doc/source/running.rst @@ -60,6 +60,9 @@ monitor (see `How many metricd workers do we need to run`_). Making sure that the HTTP server and `gnocchi-metricd` daemon are running and are not writing anything alarming in their logs is a sign of good health of the overall system. +Total measures for backlog status may not accurately reflect the number of +points to be processed when measures are submitted via batch. + How to backup and restore Gnocchi ================================= -- GitLab From b502fa1b4a58b591a64d3e4cfe68c452f3aa39d5 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 13 May 2016 11:52:20 +0200 Subject: [PATCH 0195/1483] devstack: remove support for old devstack ceph plugin Change-Id: Ie71d2e078e135d2f247b0adc97c1c1c2c43a89f0 --- devstack/plugin.sh | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index ed3b5904..612336b0 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -68,13 +68,7 @@ function is_gnocchi_enabled { # Test if a Ceph services are enabled # _is_ceph_enabled function _is_ceph_enabled { - if is_service_enabled ceph; then - # Old ceph setup - return 0 - elif type is_ceph_enabled_for_service >/dev/null 2>&1; then - # New devstack-plugin-ceph - return 0 - fi + type is_ceph_enabled_for_service >/dev/null 2>&1 && return 0 return 1 } -- GitLab From 2a4b0b5c6c3ef63503060b4f8a90c39a6f24830d Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 13 May 2016 08:41:58 +0200 Subject: [PATCH 0196/1483] gate: remove old job conf Change-Id: I4d3e18fa0d0f770daa194ac5d472141d5da4b4b7 --- devstack/gate/gate_hook.sh | 4 ---- 1 file changed, 4 deletions(-) diff --git a/devstack/gate/gate_hook.sh b/devstack/gate/gate_hook.sh index e9ba013d..c01d37a0 100755 --- a/devstack/gate/gate_hook.sh +++ b/devstack/gate/gate_hook.sh @@ -41,10 +41,6 @@ case $STORAGE_DRIVER in DEVSTACK_GATE_TEMPEST+=$'\nexport SWIFT_USE_MOD_WSGI=True' ;; ceph) - if [ "${PROJECTS//devstack-plugin-ceph/}" == "$PROJECTS" ]; then - # Old fashion ceph plugin - ENABLED_SERVICES+="ceph" - fi DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_STORAGE_BACKEND=ceph' ;; esac -- GitLab From 52846b408ab51840e592cfec3e177d0da97a60fb Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 19 May 2016 11:31:52 +0200 Subject: [PATCH 0197/1483] * Updated Dutch debconf templates (Closes: #823439). * Updated Brazilian Portuguese debconf templates (Closes: #824292). --- debian/changelog | 2 ++ debian/po/nl.po | 88 ++++++++++++++++++++++++---------------------- debian/po/pt_BR.po | 49 ++++++++++---------------- 3 files changed, 65 insertions(+), 74 deletions(-) diff --git a/debian/changelog b/debian/changelog index f5c97254..238930e8 100644 --- a/debian/changelog +++ b/debian/changelog @@ -5,6 +5,8 @@ gnocchi (2.0.2-6) UNRELEASED; urgency=medium [ Thomas Goirand ] * Added Japanese debconf templates translation update (Closes: #820769). + * Updated Dutch debconf templates (Closes: #823439). + * Updated Brazilian Portuguese debconf templates (Closes: #824292). -- Thomas Goirand Sat, 23 Apr 2016 18:54:09 +0200 diff --git a/debian/po/nl.po b/debian/po/nl.po index 49d9e65a..7d0b79cb 100644 --- a/debian/po/nl.po +++ b/debian/po/nl.po @@ -6,10 +6,10 @@ # msgid "" msgstr "" -"Project-Id-Version: gnocchi_1.3.0-4\n" +"Project-Id-Version: gnocchi_2.0.2-4\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" "POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2016-01-12 16:31+0100\n" +"PO-Revision-Date: 2016-04-24 12:07+0200\n" "Last-Translator: Frans Spiesschaert \n" "Language-Team: Debian Dutch l10n Team \n" "Language: nl\n" @@ -33,8 +33,8 @@ msgid "" "Typically this is also the hostname of the OpenStack Identity Service " "(Keystone)." msgstr "" -"Geef de computernaam van de authenticatieserver voor Gnocchi. Meestal is dit " -"ook de computernaam van de OpenStack identiteitsserver (Keystone)." +"Geef de computernaam van de authenticatieserver voor Gnocchi op. Meestal is " +"dit ook de computernaam van de OpenStack Identiteitsdienst (Keystone)." #. Type: string #. Description @@ -47,7 +47,7 @@ msgstr "" #. locataire ("tenant") #: ../gnocchi-common.templates:3001 msgid "Authentication server tenant name:" -msgstr "Naam van de clientruimte (tenant) op de authenticatieserver:" +msgstr "Naam van de cliëntruimte (tenant) op de authenticatieserver:" #. Type: string #. Description @@ -61,38 +61,39 @@ msgstr "Naam van de clientruimte (tenant) op de authenticatieserver:" #: ../gnocchi-common.templates:3001 msgid "Please specify the authentication server tenant name." msgstr "" -"Gelieve de naam te vermelden van de clientruimte (tenant) op de " -"authenticatieserver" +"Geef de naam op van de cliëntruimte (tenant) op de authenticatieserver." #. Type: string #. Description #: ../gnocchi-common.templates:4001 msgid "Authentication server username:" -msgstr "Gebruikersnaam voor de authenticatieserver:" +msgstr "Gebruikersnaam op de authenticatieserver:" #. Type: string #. Description #: ../gnocchi-common.templates:4001 msgid "Please specify the username to use with the authentication server." -msgstr "Geef de gebruikersnaam op voor de authenticatieserver." +msgstr "" +"Geef de gebruikersnaam op die op de authenticatieserver gebruikt moet worden." #. Type: password #. Description #: ../gnocchi-common.templates:5001 msgid "Authentication server password:" -msgstr "Wachtwoord voor de authenticatieserver:" +msgstr "Wachtwoord op de authenticatieserver:" #. Type: password #. Description #: ../gnocchi-common.templates:5001 msgid "Please specify the password to use with the authentication server." -msgstr "Geef het wachtwoord op voor de authenticatieserver." +msgstr "" +"Geef het wachtwoord op dat op de authenticatieserver gebruikt moet worden." #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 msgid "Set up a database for Gnocchi?" -msgstr "Een database voor Gnocchi opzetten?" +msgstr "Een database opzetten voor Gnocchi?" #. Type: boolean #. Description @@ -101,8 +102,8 @@ msgid "" "No database has been set up for Gnocchi to use. Before continuing, you " "should make sure you have the following information:" msgstr "" -"Er werd geen database opgezet voor het register van Gnocchi noch voor de API " -"van Gnocchi. Voor u doorgaat moet u beschikken over de volgende informatie:" +"Er werd geen database opgezet om door Gnocchi gebruikt te worden. Voor u " +"doorgaat moet u beschikken over de volgende informatie:" #. Type: boolean #. Description @@ -114,10 +115,11 @@ msgid "" " machine);\n" " * a username and password to access the database." msgstr "" -" * het type database dat u wenst te gebruiken;\n" -" * de computernaam van de databeseserver (die server moet\n" -" TCP-verbindingen vanaf deze computer accepteren);\n" -" * een gebruikersnaam en wachtwoord voor toegang tot de database." +" * het soort database dat u wilt gebruiken;\n" +" * de computernaam van de databaseserver (die server moet\n" +" TCP-verbindingen vanaf deze computer toestaan);\n" +" * een gebruikersnaam en een wachtwoord om toegang te krijgen tot de " +"database." #. Type: boolean #. Description @@ -136,14 +138,14 @@ msgid "" "You can change this setting later on by running \"dpkg-reconfigure -plow " "gnocchi-common\"." msgstr "" -"U kunt deze instelling later wijzigen met het commando \"dpkg-reconfigure -" -"plow gnocchi-common\"." +"U kunt deze instelling later wijzigen door het uitvoeren van \"dpkg-" +"reconfigure -plow gnocchi-common\"." #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Gnocchi registreren in de catalogus van toegangspunten van keystone?" +msgstr "Gnocchi opnemen in de catalogus van Keystone-toegangspunten?" #. Type: boolean #. Description @@ -153,14 +155,13 @@ msgid "" "accessible. This is done using \"keystone service-create\" and \"keystone " "endpoint-create\". This can be done automatically now." msgstr "" -"Elke OpenStackdienst (elke API) moet geregistreerd worden om toegankelijk te " -"zijn. Dit gebeurt aan de hand van \"keystone service-create\" en \"keystone " -"endpoint-create\". Dit kan nu automatisch gedaan worden." +"Elke dienst van OpenStack (elke API) moet geregistreerd staan om " +"toegankelijk te zijn. Dit gebeurt met de opdrachten \"keystone service-create" +"\" en \"keystone endpoint-create\". Dit kan nu automatisch uitgevoerd worden." #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 -#, fuzzy #| msgid "" #| "Note that you will need to have an up and running Keystone server on " #| "which to connect using the Keystone authentication token." @@ -169,9 +170,10 @@ msgid "" "to connect using a known admin project name, admin username and password. " "The admin auth token is not used anymore." msgstr "" -"Merk op dat u hiervoor een volledig werkende keystone-server nodig heeft, " -"waarmee een verbinding gemaakt wordt met behulp van het authenticatiebewijs " -"voor Keystone." +"Merk op dat u een functionerende Keystone-server moet hebben om er een " +"verbinding mee te maken met behulp van een gekende beheerdersprojectnaam, " +"beheerdersgebruikersnaam en wachtwoord. Het beheerderslegitimatiebewijs " +"wordt niet langer gebruikt." #. Type: string #. Description @@ -186,17 +188,15 @@ msgid "" "Please enter the IP address of the Keystone server, so that gnocchi-api can " "contact Keystone to do the Gnocchi service and endpoint creation." msgstr "" -"Gelieve het IP-adres van de Keystone-server op te geven, zodat glance-api " -"met Keystone kan verbinden om de Gnocchi-service en het troegangspunt aan te " -"maken." +"Geef het IP-adres van de Keystone-server op, zodat glance-api Keystone kan " +"contacteren om de Gnocchidienst en het toegangspunt aan te maken." #. Type: string #. Description #: ../gnocchi-api.templates:4001 -#, fuzzy #| msgid "Keystone authentication token:" msgid "Keystone admin name:" -msgstr "Authenticatiebewijs voor Keystone:" +msgstr "Naam van de beheerder voor Keystone:" #. Type: string #. Description @@ -210,32 +210,34 @@ msgid "" "To register the service endpoint, this package needs to know the Admin " "login, name, project name, and password to the Keystone server." msgstr "" +"Om het toegangspunt van de dienst te registreren moet dit pakket de " +"inloggegevens voor de Keystone-server van de beheerder kennen, naam, " +"projectnaam en wachtwoord." #. Type: string #. Description #: ../gnocchi-api.templates:5001 msgid "Keystone admin project name:" -msgstr "" +msgstr "Naam van het project van de beheerder voor Keystone:" #. Type: password #. Description #: ../gnocchi-api.templates:6001 msgid "Keystone admin password:" -msgstr "" +msgstr "Wachtwoord van de beheerder voor Keystone:" #. Type: string #. Description #: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" -msgstr "IP-adres van het toegangspunt voor Gnocchi:" +msgstr "IP-adres van het toegangspunt van Gnocchi:" #. Type: string #. Description #: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." msgstr "" -"Gelieve het IP-adres in te voeren dat gebruikt zal worden om contact te " -"maken met Gnocchi." +"Geef het IP-adres op dat gebruikt zal worden voor het contact met Gnocchi." #. Type: string #. Description @@ -245,15 +247,15 @@ msgid "" "service, so if you are installing a public cloud, this should be a public IP " "address." msgstr "" -"Dit IP-adres moet bereikbaar zijn voor de clients die van deze service " -"gebruik zullen maken. Indien u een openbare cloud installeert, moet dit dus " -"een algemeen bereikbaar IP-adres zijn." +"Dit IP-adres moet bereikbaar zijn voor de clients die deze dienst zullen " +"gebruiken. Indien u een openbare cloud installeert, moet dit dus een " +"algemeen bereikbaar IP-adres zijn." #. Type: string #. Description #: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" -msgstr "Naam van de te registreren regio:" +msgstr "Naam van de registratieregio:" #. Type: string #. Description @@ -263,7 +265,7 @@ msgid "" "location. Please enter the zone that you wish to use when registering the " "endpoint." msgstr "" -"Openstack ondersteunt het gebruik van zones van beschikbaarheid, waarbij " +"OpenStack ondersteunt het gebruik van zones van beschikbaarheid, waarbij " "elke regio een locatie vertegenwoordigt. Geef aan welke zone u wenst te " "gebruiken bij het registreren van het toegangspunt." diff --git a/debian/po/pt_BR.po b/debian/po/pt_BR.po index 5ca122cf..5725212e 100644 --- a/debian/po/pt_BR.po +++ b/debian/po/pt_BR.po @@ -1,14 +1,14 @@ -# Debconf translations for glance. -# Copyright (C) 2012 THE glance'S COPYRIGHT HOLDER -# This file is distributed under the same license as the glance package. -# Adriano Rafael Gomes , 2012-2014. +# Debconf translations for gnocchi. +# Copyright (C) 2012 THE gnocchi'S COPYRIGHT HOLDER +# This file is distributed under the same license as the gnocchi package. +# Adriano Rafael Gomes , 2012-2016. # msgid "" msgstr "" -"Project-Id-Version: glance 2014.1.2-1\n" +"Project-Id-Version: gnocchi\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" "POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2014-09-04 08:49-0300\n" +"PO-Revision-Date: 2016-04-30 16:34-0300\n" "Last-Translator: Adriano Rafael Gomes \n" "Language-Team: Brazilian Portuguese \n" @@ -99,18 +99,12 @@ msgstr "Configurar um banco de dados para o Gnocchi?" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "No database has been set up for glance-registry or glance-api to use. " -#| "Before continuing, you should make sure you have the following " -#| "information:" msgid "" "No database has been set up for Gnocchi to use. Before continuing, you " "should make sure you have the following information:" msgstr "" -"Nenhum banco de dados foi configurado para o glance-registry ou para o " -"glance-api utilizar. Antes de continuar, você deve se certificar que você " -"tem as seguintes informações:" +"Nenhum banco de dados foi configurado para o Gnocchi utilizar. Antes de " +"continuar, você deve se certificar que você tem as seguintes informações:" #. Type: boolean #. Description @@ -140,16 +134,12 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "You can change this setting later on by running \"dpkg-reconfigure -plow " -#| "glance-common\"." msgid "" "You can change this setting later on by running \"dpkg-reconfigure -plow " "gnocchi-common\"." msgstr "" "Você pode mudar essa configuração depois, executando \"dpkg-reconfigure -" -"plow glance-common\"." +"plow gnocchi-common\"." #. Type: boolean #. Description @@ -172,17 +162,15 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 -#, fuzzy -#| msgid "" -#| "Note that you will need to have an up and running Keystone server on " -#| "which to connect using the Keystone authentication token." msgid "" "Note that you will need to have an up and running Keystone server on which " "to connect using a known admin project name, admin username and password. " "The admin auth token is not used anymore." msgstr "" "Note que você precisará ter um servidor Keystone configurado e em execução " -"no qual conectar usando o \"token\" de autenticação do Keystone." +"no qual conectar usando um nome de projeto de admin, nome de usuário de " +"admin e senha conhecidos. O \"token\" de autenticação do admin não é mais " +"usado." #. Type: string #. Description @@ -204,10 +192,8 @@ msgstr "" #. Type: string #. Description #: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "Keystone authentication token:" msgid "Keystone admin name:" -msgstr "\"Token\" de autenticação Keystone:" +msgstr "Nome de admin do Keystone:" #. Type: string #. Description @@ -221,18 +207,20 @@ msgid "" "To register the service endpoint, this package needs to know the Admin " "login, name, project name, and password to the Keystone server." msgstr "" +"Para registrar o \"endpoint\" do serviço, esse pacote precisa saber o login, " +"nome, nome do projeto e senha do Admin no servidor Keystone." #. Type: string #. Description #: ../gnocchi-api.templates:5001 msgid "Keystone admin project name:" -msgstr "" +msgstr "Nome do projeto admin no Keystone:" #. Type: password #. Description #: ../gnocchi-api.templates:6001 msgid "Keystone admin password:" -msgstr "" +msgstr "Senha do admin no Keystone:" #. Type: string #. Description @@ -277,7 +265,6 @@ msgstr "" "representando uma localidade. Por favor, informe a zona que você deseja usar " "ao registrar o \"endpoint\"." -#, fuzzy #~| msgid "" #~| "To configure its endpoint in Keystone, glance-api needs the Keystone " #~| "authentication token." @@ -285,5 +272,5 @@ msgstr "" #~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " #~ "authentication token." #~ msgstr "" -#~ "Para configurar o seu \"endpoint\" no Keystone, o glance-api precisa do " +#~ "Para configurar o seu \"endpoint\" no Keystone, o gnocchi-api precisa do " #~ "\"token\" de autenticação do Keystone." -- GitLab From 44ba45297f2170deef3fa45107e5f85724615d77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Thu, 19 May 2016 20:55:53 +0200 Subject: [PATCH 0198/1483] d/rules: Removed UPSTREAM_GIT with default value --- debian/changelog | 3 +++ debian/rules | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 238930e8..92c212ad 100644 --- a/debian/changelog +++ b/debian/changelog @@ -8,6 +8,9 @@ gnocchi (2.0.2-6) UNRELEASED; urgency=medium * Updated Dutch debconf templates (Closes: #823439). * Updated Brazilian Portuguese debconf templates (Closes: #824292). + [ Ondřej Nový ] + * d/rules: Removed UPSTREAM_GIT with default value + -- Thomas Goirand Sat, 23 Apr 2016 18:54:09 +0200 gnocchi (2.0.2-4) unstable; urgency=medium diff --git a/debian/rules b/debian/rules index 1b9f1daa..082d2b44 100755 --- a/debian/rules +++ b/debian/rules @@ -3,7 +3,6 @@ PYTHONS:=$(shell pyversions -vr) #PYTHON3S:=$(shell py3versions -vr) -UPSTREAM_GIT = git://github.com/openstack/gnocchi.git include /usr/share/openstack-pkg-tools/pkgos.make export OSLO_PACKAGE_VERSION=$(VERSION) -- GitLab From 6f7efa475e31389985568e2f82224ddf1037aafc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Thu, 19 May 2016 20:56:00 +0200 Subject: [PATCH 0199/1483] d/copyright: Changed source URL to https protocol --- debian/changelog | 1 + debian/copyright | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 92c212ad..84c8f346 100644 --- a/debian/changelog +++ b/debian/changelog @@ -10,6 +10,7 @@ gnocchi (2.0.2-6) UNRELEASED; urgency=medium [ Ondřej Nový ] * d/rules: Removed UPSTREAM_GIT with default value + * d/copyright: Changed source URL to https protocol -- Thomas Goirand Sat, 23 Apr 2016 18:54:09 +0200 diff --git a/debian/copyright b/debian/copyright index 91dd8ca1..5d11d0d6 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,7 +1,7 @@ Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: gnocchi Upstream-Contact: Julien Danjou -Source: git://github.com/openstack/gnocchi.git +Source: https://github.com/openstack/gnocchi Files: * Copyright: (c) 2014-2015, Julien Danjou -- GitLab From 1baa714e6860818949f84d5b8f505eaccc03e91a Mon Sep 17 00:00:00 2001 From: gordon chung Date: Thu, 19 May 2016 19:30:34 -0400 Subject: [PATCH 0200/1483] fix details filter for measures report details param is always a string when provided as a param. we can not turn off detailed filtering. this ensures it's handled as a boolean Change-Id: Ie50db5fce99dda2fe2f81beab4f4b7bbbeae5a40 --- gnocchi/rest/__init__.py | 3 ++- gnocchi/tests/gabbi/gabbits/base.yaml | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index ff427d6c..9d2ea947 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1411,7 +1411,8 @@ class StatusController(rest.RestController): @pecan.expose('json') def get(details=True): enforce("get status", {}) - report = pecan.request.storage.measures_report(details) + report = pecan.request.storage.measures_report( + strutils.bool_from_string(details)) report_dict = {"storage": {"summary": report['summary']}} if 'details' in report: report_dict["storage"]["measures_to_process"] = report['details'] diff --git a/gnocchi/tests/gabbi/gabbits/base.yaml b/gnocchi/tests/gabbi/gabbits/base.yaml index fb5a88b8..31971bbd 100644 --- a/gnocchi/tests/gabbi/gabbits/base.yaml +++ b/gnocchi/tests/gabbi/gabbits/base.yaml @@ -147,3 +147,23 @@ tests: - name: get status denied url: /v1/status status: 403 + +- name: get status + url: /v1/status + request_headers: + content-type: application/json + x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee + x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f + x-roles: admin + response_json_paths: + $.storage.`len`: 2 + +- name: get status, no details + url: /v1/status?details=False + request_headers: + content-type: application/json + x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee + x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f + x-roles: admin + response_json_paths: + $.storage.`len`: 1 -- GitLab From 7dd80467d374fbfbf1a9880b072a9e84430996fd Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 20 May 2016 09:35:49 +0200 Subject: [PATCH 0201/1483] tests: protect database upgrade for gabbi tests Indexer upgrade can run twice in parallel. This avoid that. Change-Id: I55d45053c8037e5421e1031ae5296be1d6308780 --- gnocchi/tests/gabbi/fixtures.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index bbf460f2..5e2ff3b0 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -26,6 +26,7 @@ import warnings from gabbi import fixture import sqlalchemy.engine.url as sqlalchemy_url import sqlalchemy_utils +from tooz import coordination from gnocchi import indexer from gnocchi.indexer import sqlalchemy @@ -114,7 +115,20 @@ class ConfigFixture(fixture.GabbiFixture): index = indexer.get_driver(conf) index.connect() - index.upgrade(create_legacy_resource_types=True) + + coord = coordination.get_coordinator( + conf.storage.coordination_url, + str(uuid.uuid4()).encode('ascii')) + coord.start() + with coord.get_lock(b"gnocchi-tests-db-lock"): + # Force upgrading using Alembic rather than creating the + # database from scratch so we are sure we don't miss anything + # in the Alembic upgrades. We have a test to check that + # upgrades == create but it misses things such as custom CHECK + # constraints. + index.upgrade(nocreate=True, + create_legacy_resource_types=True) + coord.stop() conf.set_override('pecan_debug', False, 'api') -- GitLab From e8965b11667d0c17ad6566c70db8edc51d4a31e2 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 13 May 2016 09:46:31 +0200 Subject: [PATCH 0202/1483] tests: move custom agg setup code in the tests using it Change-Id: I832b7a9b5f2eb649f01af0e536b40adc86ebfb8f --- gnocchi/tests/base.py | 5 ----- gnocchi/tests/test_aggregates.py | 7 +++++++ 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 55cfd0b7..0983345b 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -24,7 +24,6 @@ from oslotest import base from oslotest import mockpatch import six from six.moves.urllib.parse import unquote -from stevedore import extension try: from swiftclient import exceptions as swexc except ImportError: @@ -446,10 +445,6 @@ class TestCase(base.BaseTestCase): # life. # self.storage.upgrade(self.index) - self.mgr = extension.ExtensionManager('gnocchi.aggregates', - invoke_on_load=True) - self.custom_agg = dict((x.name, x.obj) for x in self.mgr) - def tearDown(self): self.index.disconnect() self.storage.stop() diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 8dbd1bb8..266e9298 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -17,6 +17,7 @@ import datetime import uuid import pandas +from stevedore import extension from gnocchi import aggregates from gnocchi.aggregates import moving_stats @@ -27,6 +28,12 @@ from gnocchi import utils class TestAggregates(tests_base.TestCase): + def setUp(self): + super(TestAggregates, self).setUp() + mgr = extension.ExtensionManager('gnocchi.aggregates', + invoke_on_load=True) + self.custom_agg = dict((x.name, x.obj) for x in mgr) + def test_extension_dict(self): self.assertIsInstance(self.custom_agg['moving-average'], moving_stats.MovingAverage) -- GitLab From 1e1ee3c6a0ebac161f000ba5c9dcc7082ab7404c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 13 May 2016 09:36:06 +0200 Subject: [PATCH 0203/1483] sqlalchemy: set max_retries & all when retrying It seems that it's 0 by default, so it does not really retry. Change-Id: I036f0775ec6d64328b80bf0186879fcbba312912 --- gnocchi/indexer/sqlalchemy.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index fa24e205..442e71cd 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -49,6 +49,17 @@ _marker = indexer._marker LOG = log.getLogger(__name__) +def retry_on_deadlock(f): + # FIXME(jd) The default values in oslo.db are useless, we need to fix that. + # Once it's done, let's remove that wrapper of wrapper. + return oslo_db.api.wrap_db_retry(retry_on_deadlock=True, + retry_on_request=True, + max_retries=10, + retry_interval=0.1, + inc_retry_interval=True, + max_retry_interval=2)(f) + + class PerInstanceFacade(object): def __init__(self, conf): self.trans = enginefacade.transaction_context() @@ -125,7 +136,7 @@ class ResourceClassMapper(object): self._cache[resource_type.tablename] = mapper return mapper - @oslo_db.api.wrap_db_retry(retry_on_deadlock=True) + @retry_on_deadlock def map_and_create_tables(self, resource_type, connection): with self._lock: # NOTE(sileht): map this resource_type to have @@ -167,7 +178,7 @@ class ResourceClassMapper(object): Base.metadata.remove(table) del self._cache[resource_type.tablename] - @oslo_db.api.wrap_db_retry(retry_on_deadlock=True) + @retry_on_deadlock def _safe_execute(self, connection, works): # NOTE(sileht): we create a transaction to ensure mysql # create locks on other transaction... @@ -477,7 +488,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): return r - @oslo_db.api.wrap_db_retry(retry_on_deadlock=True) + @retry_on_deadlock def update_resource(self, resource_type, resource_id, ended_at=_marker, metrics=_marker, append_metrics=False, @@ -650,7 +661,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): return Result - @oslo_db.api.wrap_db_retry(retry_on_deadlock=True) + @retry_on_deadlock def list_resources(self, resource_type='generic', attribute_filter=None, details=False, -- GitLab From 2e1d8f63108396e5ae9366b837f8f1a9169290b6 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 13 May 2016 10:03:27 +0200 Subject: [PATCH 0204/1483] sqlalchemy: add retry on deadlock for delete_resource() Deleting a resource can trigger a deadlock using PostgreSQL. oslo_db.exception.DBDeadlock: (psycopg2.extensions.TransactionRollbackError) deadlock detected DETAIL: Process 91956 waits for RowExclusiveLock on relation 21454 of database 12641; blocked by process 91968. Process 91968 waits for ShareRowExclusiveLock on relation 21416 of database 12641; blocked by process 91956. HINT: See server log for query details. [SQL: 'DELETE FROM resource WHERE resource.id = %(id_1)s'] [parameters: {'id_1': UUID('4b935d09-6aa6-4403-8f8c-a76628bfad60')}] Change-Id: Ife5c3ae5fdf921f27a6ca964d82e99a4ae45aa0a --- gnocchi/indexer/sqlalchemy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 442e71cd..7ba32c85 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -598,6 +598,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): session.expire(r, ['metrics']) + @retry_on_deadlock def delete_resource(self, resource_id): with self.facade.writer() as session: # We are going to delete the resource; the on delete will set the -- GitLab From 78503d99597e39216db86caa7d43c59dfb3e615c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 14 May 2016 12:47:07 +0200 Subject: [PATCH 0205/1483] sqlalchemy: retry on deadlock for create_resource() This happens sometimes too: oslo_db.exception.DBDeadlock: (psycopg2.extensions.TransactionRollbackError) deadlock detected LINE 1: INSERT INTO resource (created_by_user_id, created_by_project... ^' DETAIL: Process 11422 waits for RowExclusiveLock on relation 16412 of database 12066; blocked by process 11544. Process 11544 waits for AccessExclusiveLock on relation 16442 of database 12066; blocked by process 11550. Process 11550 waits for AccessExclusiveLock on relation 16412 of database 12066; blocked by process 11422. HINT: See server log for query details. Change-Id: I3cbe55c0af7cd29838c7892300a8a3036c1e07f7 --- gnocchi/indexer/sqlalchemy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 7ba32c85..72e9eeae 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -448,6 +448,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): return list(q.all()) + @retry_on_deadlock def create_resource(self, resource_type, id, created_by_user_id, created_by_project_id, user_id=None, project_id=None, -- GitLab From d80d93c85ed99a1a85d3fb30e45eafdf5bfe79af Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 17 May 2016 16:43:09 +0200 Subject: [PATCH 0206/1483] sqlalchemy: retry on deadlock for create_metric() oslo_db.exception.DBDeadlock: (psycopg2.extensions.TransactionRollbackError) deadlock detected DETAIL: Process 16288 waits for RowShareLock on relation 21012 of database 12066; blocked by process 16284. Process 16284 waits for AccessExclusiveLock on relation 21050 of database 12066; blocked by process 16295. Process 16295 waits for AccessExclusiveLock on relation 21012 of database 12066; blocked by process 16288. HINT: See server log for query details. [SQL: 'INSERT INTO metric (id, archive_policy_name, created_by_user_id, created_by_project_id, resource_id, name) VALUES (%(id)s, %(archive_policy_name)s, %(created_by_user_id)s, %(created_by_project_id)s, %(resource_id)s, %(name)s)'] [parameters: {'resource_id': None, 'id': UUID('467254d2-fb1d-408d-981c-6bb7a9b998b4'), 'name': None, 'archive_policy_name': 'low', 'created_by_user_id': '9f21d34f-d7dc-4613-8e84-d460be32ae68', 'created_by_project_id': '8279bb8f-12fa-4fff-b85a-9fd0773c2545'}] Change-Id: I22534dd8484caeadef98d45d3c297c01a98d2db3 --- gnocchi/indexer/sqlalchemy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 72e9eeae..c9b34516 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -411,6 +411,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): raise indexer.ArchivePolicyRuleAlreadyExists(name) return apr + @retry_on_deadlock def create_metric(self, id, created_by_user_id, created_by_project_id, archive_policy_name, name=None, resource_id=None): -- GitLab From 3df9cf774ed1e152eb33800156ebd00f7d3ad0a1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 18 May 2016 10:17:52 +0200 Subject: [PATCH 0207/1483] sqlalchemy: avoid deadlock on list_metrics() oslo_db.exception.DBDeadlock: (psycopg2.extensions.TransactionRollbackError) deadlock detected LINE 2: ...name = metric.archive_policy_name LEFT OUTER JOIN resource A... ^ DETAIL: Process 19529 waits for AccessShareLock on relation 16425 of database 12066; blocked by process 19569. Process 19569 waits for AccessExclusiveLock on relation 16441 of database 12066; blocked by process 19572. Process 19572 waits for AccessExclusiveLock on relation 16425 of database 12066; blocked by process 19529. HINT: See server log for query details. [SQL: 'SELECT metric.id AS metric_id, metric.archive_policy_name AS metric_archive_policy_name, metric.created_by_user_id AS metric_created_by_user_id, metric.created_by_project_id AS metric_created_by_project_id, metric.resource_id AS metric_resource_id, metric.name AS metric_name, metric.status AS metric_status, archive_policy_1.name AS archive_policy_1_name, archive_policy_1.back_window AS archive_policy_1_back_window, archive_policy_1.definition AS archive_policy_1_definition, archive_policy_1.aggregation_methods AS archive_policy_1_aggregation_methods, resource_1.created_by_user_id AS resource_1_created_by_user_id, resource_1.created_by_project_id AS resource_1_created_by_project_id, resource_1.started_at AS resource_1_started_at, resource_1.revision_start AS resource_1_revision_start, resource_1.ended_at AS resource_1_ended_at, resource_1.user_id AS resource_1_user_id, resource_1.project_id AS resource_1_project_id, resource_1.original_resource_id AS resource_1_original_resource_id, resource_1.id AS resource_1_id, resource_1.type AS resource_1_type \nFROM metric LEFT OUTER JOIN archive_policy AS archive_policy_1 ON archive_policy_1.name = metric.archive_policy_name LEFT OUTER JOIN resource AS resource_1 ON resource_1.id = metric.resource_id AND metric.status = %(status_1)s \nWHERE metric.status = %(status_2)s AND metric.id = %(id_1)s ORDER BY metric.id'] [parameters: {'status_2': 'active', 'id_1': UUID('5c6b89cc-81a9-4dab-adc1-f015903531a2'), 'status_1': 'active'}] Change-Id: I6531af9fbe0849b4e1b2721b5f4c47a7ac0e7e6e --- gnocchi/indexer/sqlalchemy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index c9b34516..e682c223 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -431,6 +431,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): raise return m + @retry_on_deadlock def list_metrics(self, names=None, ids=None, details=False, status='active', **kwargs): if ids is not None and not ids: -- GitLab From 6ba55111515135a59c63b232f566362198359d9e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 18 May 2016 10:19:15 +0200 Subject: [PATCH 0208/1483] sqlalchemy: retry on deadlocks in get_resource() oslo_db.exception.DBDeadlock: (psycopg2.extensions.TransactionRollbackError) deadlock detected LINE 3: FROM resource JOIN rt_38f561d4f58747a3a256b2aad624e2b7 ON re... ^ DETAIL: Process 19631 waits for AccessShareLock on relation 16425 of database 12066; blocked by process 19645. Process 19645 waits for AccessExclusiveLock on relation 16441 of database 12066; blocked by process 19649. Process 19649 waits for AccessExclusiveLock on relation 16425 of database 12066; blocked by process 19631. HINT: See server log for query details. [SQL: 'SELECT anon_1.resource_created_by_user_id AS anon_1_resource_created_by_user_id, anon_1.resource_created_by_project_id AS anon_1_resource_created_by_project_id, anon_1.resource_started_at AS anon_1_resource_started_at, anon_1.resource_revision_start AS anon_1_resource_revision_start, anon_1.resource_ended_at AS anon_1_resource_ended_at, anon_1.resource_user_id AS anon_1_resource_user_id, anon_1.resource_project_id AS anon_1_resource_project_id, anon_1.resource_original_resource_id AS anon_1_resource_original_resource_id, anon_1.rt_38f561d4f58747a3a256b2aad624e2b7_id AS anon_1_rt_38f561d4f58747a3a256b2aad624e2b7_id, anon_1.resource_id AS anon_1_resource_id, anon_1.resource_type AS anon_1_resource_type, anon_1.rt_38f561d4f58747a3a256b2aad624e2b7_name AS anon_1_rt_38f561d4f58747a3a256b2aad624e2b7_name, archive_policy_1.name AS archive_policy_1_name, archive_policy_1.back_window AS archive_policy_1_back_window, archive_policy_1.definition AS archive_policy_1_definition, archive_policy_1.aggregation_methods AS archive_policy_1_aggregation_methods, metric_1.id AS metric_1_id, metric_1.archive_policy_name AS metric_1_archive_policy_name, metric_1.created_by_user_id AS metric_1_created_by_user_id, metric_1.created_by_project_id AS metric_1_created_by_project_id, metric_1.resource_id AS metric_1_resource_id, metric_1.name AS metric_1_name, metric_1.status AS metric_1_status \nFROM (SELECT resource.created_by_user_id AS resource_created_by_user_id, resource.created_by_project_id AS resource_created_by_project_id, resource.started_at AS resource_started_at, resource.revision_start AS resource_revision_start, resource.ended_at AS resource_ended_at, resource.user_id AS resource_user_id, resource.project_id AS resource_project_id, resource.original_resource_id AS resource_original_resource_id, rt_38f561d4f58747a3a256b2aad624e2b7.id AS rt_38f561d4f58747a3a256b2aad624e2b7_id, resource.id AS resource_id, resource.type AS resource_type, rt_38f561d4f58747a3a256b2aad624e2b7.name AS rt_38f561d4f58747a3a256b2aad624e2b7_name \nFROM resource JOIN rt_38f561d4f58747a3a256b2aad624e2b7 ON resource.id = rt_38f561d4f58747a3a256b2aad624e2b7.id \nWHERE rt_38f561d4f58747a3a256b2aad624e2b7.id = %(id_1)s \n LIMIT %(param_1)s) AS anon_1 LEFT OUTER JOIN metric AS metric_1 ON anon_1.resource_id = metric_1.resource_id AND metric_1.status = %(status_1)s LEFT OUTER JOIN archive_policy AS archive_policy_1 ON archive_policy_1.name = metric_1.archive_policy_name'] [parameters: {'id_1': UUID('c1018d5d-7be6-49c5-aeab-b87ce15d4af6'), 'status_1': 'active', 'param_1': 1}] 2016-05-17 18:19:51.970 | Change-Id: Ied1a399329a4ca59482d49727d1be952a7a3e77a --- gnocchi/indexer/sqlalchemy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index e682c223..0bef7800 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -614,6 +614,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): Resource.id == resource_id).delete() == 0: raise indexer.NoSuchResource(resource_id) + @retry_on_deadlock def get_resource(self, resource_type, resource_id, with_metrics=False): with self.facade.independent_reader() as session: resource_cls = self._resource_type_to_classes( -- GitLab From fb972acf990ce1289ae7cb08abdc475b73df2158 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 18 May 2016 15:06:21 +0200 Subject: [PATCH 0209/1483] sqlalchemy: retry on deadlock for create_resource_type() sqlalchemy.exc.InternalError: (psycopg2.InternalError) current transaction is aborted, commands ignored until end of transaction block [SQL: 'select relname from pg_class c join pg_namespace n on n.oid=c.relnamespace where pg_catalog.pg_table_is_visible(c.oid) and relname=%(name)s'] [parameters: {'name': u'rt_c5a9affa3ef94951823f3d81d50932de'}] Change-Id: Id77565677fddfc1d9b9edc074b8b808cfa23a1bd --- gnocchi/indexer/sqlalchemy.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 0bef7800..2968c643 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -254,6 +254,20 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): self._RESOURCE_TYPE_MANAGER.map_and_create_tables( rt, connection) + # NOTE(jd) We can have deadlock errors either here or later in + # map_and_create_tables(). We can't decorate create_resource_type() + # directly or each part might retry later on its own and cause a + # duplicate. And it seems there's no way to use the same session for + # both adding the resource_type in our table and calling + # map_and_create_tables() :-( + @retry_on_deadlock + def _add_resource_type(self, resource_type): + try: + with self.facade.writer() as session: + session.add(resource_type) + except exception.DBDuplicateEntry: + raise indexer.ResourceTypeAlreadyExists(resource_type.name) + def create_resource_type(self, resource_type): # NOTE(sileht): mysql have a stupid and small length limitation on the # foreign key and index name, so we can't use the resource type name as @@ -269,11 +283,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): # resource_type resource_type.to_baseclass() - try: - with self.facade.writer() as session: - session.add(resource_type) - except exception.DBDuplicateEntry: - raise indexer.ResourceTypeAlreadyExists(resource_type.name) + self._add_resource_type(resource_type) with self.facade.writer_connection() as connection: self._RESOURCE_TYPE_MANAGER.map_and_create_tables(resource_type, -- GitLab From 6275d174d5203cde13b8e26066372ca548a86e10 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 20 May 2016 13:17:41 +0000 Subject: [PATCH 0210/1483] Releasing to unstable. --- debian/changelog | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/debian/changelog b/debian/changelog index 84c8f346..4cf7fb90 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,17 +1,15 @@ -gnocchi (2.0.2-6) UNRELEASED; urgency=medium +gnocchi (2.0.2-5) unstable; urgency=medium [ Ondřej Nový ] * Standards-Version is 3.9.8 now (no change) + * d/rules: Removed UPSTREAM_GIT with default value + * d/copyright: Changed source URL to https protocol [ Thomas Goirand ] * Added Japanese debconf templates translation update (Closes: #820769). * Updated Dutch debconf templates (Closes: #823439). * Updated Brazilian Portuguese debconf templates (Closes: #824292). - [ Ondřej Nový ] - * d/rules: Removed UPSTREAM_GIT with default value - * d/copyright: Changed source URL to https protocol - -- Thomas Goirand Sat, 23 Apr 2016 18:54:09 +0200 gnocchi (2.0.2-4) unstable; urgency=medium -- GitLab From 831ed2f2f3315d974f4cc847142906b7821af61a Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 13 May 2016 11:39:11 +0200 Subject: [PATCH 0211/1483] Make tempest tests compatible with keystone v3 Change-Id: I1fcc520fbc3873f435c1ebe0d3af78b30985a1ad --- gnocchi/tempest/scenario/__init__.py | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/gnocchi/tempest/scenario/__init__.py b/gnocchi/tempest/scenario/__init__.py index 7019963e..8760af64 100644 --- a/gnocchi/tempest/scenario/__init__.py +++ b/gnocchi/tempest/scenario/__init__.py @@ -35,7 +35,12 @@ class GnocchiGabbiTest(tempest.test.BaseTestCase): @classmethod def resource_setup(cls): super(GnocchiGabbiTest, cls).resource_setup() - url, token = cls._get_gnocchi_auth() + + url = cls.os_admin.auth_provider.base_url( + {'service': CONF.metric.catalog_type, + 'endpoint_type': CONF.metric.endpoint_type}) + token = cls.os_admin.auth_provider.get_token() + parsed_url = urlparse.urlsplit(url) prefix = parsed_url.path.rstrip('/') # turn it into a prefix port = 443 if parsed_url.scheme == 'https' else 80 @@ -68,19 +73,6 @@ class GnocchiGabbiTest(tempest.test.BaseTestCase): super(GnocchiGabbiTest, self).clear_credentials() self.tearDown() - @classmethod - def _get_gnocchi_auth(cls): - endpoint_type = CONF.metric.endpoint_type - if not endpoint_type.endswith("URL"): - endpoint_type += "URL" - - auth = cls.os_admin.auth_provider.get_auth() - endpoints = [e for e in auth[1]['serviceCatalog'] - if e['type'] == CONF.metric.catalog_type] - if not endpoints: - raise Exception("%s endpoint not found" % CONF.metric.catalog_type) - return endpoints[0]['endpoints'][0][endpoint_type], auth[0] - def test_fake(self): # NOTE(sileht): A fake test is needed to have the class loaded # by the test runner -- GitLab From 1b7f01dfd6555c9cb37df76fe338a7a2a30e890e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 24 May 2016 09:47:54 +0200 Subject: [PATCH 0212/1483] Revert "tests: protect database upgrade for gabbi tests" This reverts commit 7dd80467d374fbfbf1a9880b072a9e84430996fd. This change was not really fix something because each gabbi scenario run on its own database. This change removes it to not lock too much between tests. Change-Id: I16166f3282034ab9d307f0c07c94508b4de4cfa1 --- gnocchi/tests/gabbi/fixtures.py | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index 5e2ff3b0..bbf460f2 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -26,7 +26,6 @@ import warnings from gabbi import fixture import sqlalchemy.engine.url as sqlalchemy_url import sqlalchemy_utils -from tooz import coordination from gnocchi import indexer from gnocchi.indexer import sqlalchemy @@ -115,20 +114,7 @@ class ConfigFixture(fixture.GabbiFixture): index = indexer.get_driver(conf) index.connect() - - coord = coordination.get_coordinator( - conf.storage.coordination_url, - str(uuid.uuid4()).encode('ascii')) - coord.start() - with coord.get_lock(b"gnocchi-tests-db-lock"): - # Force upgrading using Alembic rather than creating the - # database from scratch so we are sure we don't miss anything - # in the Alembic upgrades. We have a test to check that - # upgrades == create but it misses things such as custom CHECK - # constraints. - index.upgrade(nocreate=True, - create_legacy_resource_types=True) - coord.stop() + index.upgrade(create_legacy_resource_types=True) conf.set_override('pecan_debug', False, 'api') -- GitLab From 3cfcd074b4f6db0e8caff7dd502d6347fdec1559 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 24 May 2016 10:26:27 +0200 Subject: [PATCH 0213/1483] devstack: ensure grafana plugin for 2.6 is installed Change-Id: I822f82ff302fc17539e8c171a43ec5735a09e1a4 --- devstack/plugin.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 612336b0..8b2d5566 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -153,7 +153,10 @@ function _gnocchi_install_grafana { sudo yum install "$GRAFANA_RPM_PKG" fi - git_clone ${GRAFANA_PLUGINS_REPO} ${GRAFANA_PLUGINS_DIR} + # NOTE(sileht): We current support only 2.6, when + # plugin for 3.0 will be ready we will switch to the grafana + # plugin tool to install it + git_clone ${GRAFANA_PLUGINS_REPO} ${GRAFANA_PLUGINS_DIR} 2.6 # Grafana-server does not handle symlink :( sudo mkdir -p /usr/share/grafana/public/app/plugins/datasource/gnocchi sudo mount -o bind ${GRAFANA_PLUGINS_DIR}/datasources/gnocchi /usr/share/grafana/public/app/plugins/datasource/gnocchi -- GitLab From d74ea92ebe40e59b251b5c543cc7a2a73a8bb2ef Mon Sep 17 00:00:00 2001 From: zhangguoqing Date: Wed, 4 May 2016 12:26:20 +0000 Subject: [PATCH 0214/1483] add unit column for metric Change-Id: I9cdf83f1ec51670e74a11125ac9f048bf9d70d84 Closes-Bug: #1530967 --- gnocchi/indexer/__init__.py | 3 +- ...c62df18bf4ee_add_unit_column_for_metric.py | 38 +++++++++++++++++++ gnocchi/indexer/sqlalchemy.py | 5 ++- gnocchi/indexer/sqlalchemy_base.py | 3 ++ gnocchi/rest/__init__.py | 4 ++ gnocchi/tests/gabbi/gabbits/metric.yaml | 18 ++++++++- gnocchi/tests/test_indexer.py | 1 + 7 files changed, 69 insertions(+), 3 deletions(-) create mode 100644 gnocchi/indexer/alembic/versions/c62df18bf4ee_add_unit_column_for_metric.py diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 4ad7bf5a..47dc1148 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -306,7 +306,8 @@ class IndexerDriver(object): @staticmethod def create_metric(id, created_by_user_id, created_by_project_id, - archive_policy_name, name=None, resource_id=None): + archive_policy_name, name=None, unit=None, + resource_id=None): raise exceptions.NotImplementedError @staticmethod diff --git a/gnocchi/indexer/alembic/versions/c62df18bf4ee_add_unit_column_for_metric.py b/gnocchi/indexer/alembic/versions/c62df18bf4ee_add_unit_column_for_metric.py new file mode 100644 index 00000000..7d4deef5 --- /dev/null +++ b/gnocchi/indexer/alembic/versions/c62df18bf4ee_add_unit_column_for_metric.py @@ -0,0 +1,38 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add unit column for metric + +Revision ID: c62df18bf4ee +Revises: 2e0b912062d1 +Create Date: 2016-05-04 12:31:25.350190 + +""" + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'c62df18bf4ee' +down_revision = '2e0b912062d1' +branch_labels = None +depends_on = None + + +def upgrade(): + op.add_column('metric', sa.Column('unit', + sa.String(length=31), + nullable=True)) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index f1de9dac..c7593a12 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -370,12 +370,13 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): def create_metric(self, id, created_by_user_id, created_by_project_id, archive_policy_name, - name=None, resource_id=None): + name=None, unit=None, resource_id=None): m = Metric(id=id, created_by_user_id=created_by_user_id, created_by_project_id=created_by_project_id, archive_policy_name=archive_policy_name, name=name, + unit=unit, resource_id=resource_id) try: with self.facade.writer() as session: @@ -535,12 +536,14 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): if update == 0: raise indexer.NoSuchMetric(value) else: + unit = value.get('unit') ap_name = value['archive_policy_name'] m = Metric(id=uuid.uuid4(), created_by_user_id=r.created_by_user_id, created_by_project_id=r.created_by_project_id, archive_policy_name=ap_name, name=name, + unit=unit, resource_id=r.id) session.add(m) try: diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index 25115ec4..5b2322bd 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -161,6 +161,7 @@ class Metric(Base, GnocchiBase, storage.Metric): ondelete="SET NULL", name="fk_metric_resource_id_resource_id")) name = sqlalchemy.Column(sqlalchemy.String(255)) + unit = sqlalchemy.Column(sqlalchemy.String(31)) status = sqlalchemy.Column(sqlalchemy.Enum('active', 'delete', name="metric_status_enum"), nullable=False, @@ -172,6 +173,7 @@ class Metric(Base, GnocchiBase, storage.Metric): "created_by_user_id": self.created_by_user_id, "created_by_project_id": self.created_by_project_id, "name": self.name, + "unit": self.unit, } unloaded = sqlalchemy.inspect(self).unloaded if 'resource' in unloaded: @@ -195,6 +197,7 @@ class Metric(Base, GnocchiBase, storage.Metric): and self.created_by_user_id == other.created_by_user_id and self.created_by_project_id == other.created_by_project_id and self.name == other.name + and self.unit == other.unit and self.resource_id == other.resource_id) or (storage.Metric.__eq__(self, other))) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 4c140e44..70769c61 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -586,6 +586,8 @@ class MetricsController(rest.RestController): "project_id": six.text_type, "archive_policy_name": six.text_type, "name": six.text_type, + voluptuous.Optional("unit"): + voluptuous.All(six.text_type, voluptuous.Length(max=31)), }) # NOTE(jd) Define this method as it was a voluptuous schema – it's just a @@ -621,6 +623,7 @@ class MetricsController(rest.RestController): "project_id": definition.get('project_id'), "archive_policy_name": archive_policy_name, "name": name, + "unit": definition.get('unit'), }) return definition @@ -634,6 +637,7 @@ class MetricsController(rest.RestController): uuid.uuid4(), user, project, name=body.get('name'), + unit=body.get('unit'), archive_policy_name=body['archive_policy_name']) except indexer.NoSuchArchivePolicy as e: abort(400, e) diff --git a/gnocchi/tests/gabbi/gabbits/metric.yaml b/gnocchi/tests/gabbi/gabbits/metric.yaml index 8ee0b490..735dd7c7 100644 --- a/gnocchi/tests/gabbi/gabbits/metric.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric.yaml @@ -63,17 +63,33 @@ tests: response_strings: - "[]" - - name: create metric with name + - name: create metric with name and unit url: /v1/metric request_headers: content-type: application/json method: post data: name: "disk.io.rate" + unit: "B/s" status: 201 response_json_paths: $.archive_policy_name: cookies $.name: disk.io.rate + $.unit: B/s + + - name: create metric with name and over length unit + url: /v1/metric + request_headers: + content-type: application/json + method: post + data: + name: "disk.io.rate" + unit: "over_length_unit_over_length_unit" + status: 400 + response_strings: + # split to not match the u' in py2 + - "Invalid input: length of value must be at most 31 for dictionary value @ data[" + - "'unit']" - name: create metric with name no rule url: /v1/metric diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index b4e5a98a..e081621d 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -102,6 +102,7 @@ class TestIndexerDriver(tests_base.TestCase): self.assertEqual(m.created_by_user_id, user) self.assertEqual(m.created_by_project_id, project) self.assertIsNone(m.name) + self.assertIsNone(m.unit) self.assertIsNone(m.resource_id) m2 = self.index.list_metrics(id=r1) self.assertEqual([m], m2) -- GitLab From 10ce63731734bd7dfb278ba2cba5f08a8896c274 Mon Sep 17 00:00:00 2001 From: ZhiQiang Fan Date: Wed, 25 May 2016 03:31:07 +0800 Subject: [PATCH 0215/1483] fix some typos in doc, comment & code Change-Id: Idd3051e426cbe87f178ae7e0a8797b3efd6f13c4 --- doc/source/configuration.rst | 6 ++--- gnocchi/indexer/sqlalchemy.py | 2 +- gnocchi/storage/_carbonara.py | 4 ++-- gnocchi/tests/gabbi/gabbits-live/live.yaml | 22 +++++++++---------- .../tests/gabbi/gabbits/transformedids.yaml | 4 ++-- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 2730f713..e6e17b9f 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -97,7 +97,7 @@ For a more robust multi-nodes deployment, the coordinator may be changed via the `storage.coordination_url` configuration option to one of the other `tooz backends`_. -For example to use Redis backend:: +For example, to use Redis backend:: coordination_url = redis://?sentinel= @@ -128,7 +128,7 @@ processed: each time we would change it. Instead, the omaps of one empty rados object are used. No lock is needed to -add/remove a omap attribute. +add/remove an omap attribute. Also xattrs attributes are used to store the list of aggregations used for a metric. So depending on the filesystem used by ceph OSDs, xattrs can have @@ -155,7 +155,7 @@ So, in realistic scenarios, the direct relation between the archive policy and the size of the rados objects created by Gnocchi is not a problem. -Also Gnocchi can use `cradox`_ Python libary if installed. This library is a +Also Gnocchi can use `cradox`_ Python library if installed. This library is a Python binding to librados written with `Cython`_, aiming to replace the one written with `ctypes`_ provided by Ceph. This new library will be part of next Ceph release (10.0.4). diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 72e9eeae..2613e42f 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -160,7 +160,7 @@ class ResourceClassMapper(object): # We drop foreign keys manually to not lock the destination # table for too long during drop table. # It's safe to not use a transaction since - # the resource_type table is already cleaned and commited + # the resource_type table is already cleaned and committed # so this code cannot be triggerred anymore for this # resource_type for table in tables: diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 1cb5b837..cc7d2206 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -450,9 +450,9 @@ class CarbonaraBasedStorage(storage.StorageDriver): ) granularities_in_common = [ g - for g, occurence in six.iteritems( + for g, occurrence in six.iteritems( collections.Counter(granularities)) - if occurence == len(metrics) + if occurrence == len(metrics) ] if not granularities_in_common: diff --git a/gnocchi/tests/gabbi/gabbits-live/live.yaml b/gnocchi/tests/gabbi/gabbits-live/live.yaml index 14a74fd1..5aa2b246 100644 --- a/gnocchi/tests/gabbi/gabbits-live/live.yaml +++ b/gnocchi/tests/gabbi/gabbits-live/live.yaml @@ -234,7 +234,7 @@ tests: # Fail to delete one that does not exist - name: delete missing archive - desc: delete non-existint archive policy + desc: delete non-existent archive policy url: /v1/archive_policy/grandiose method: DELETE status: 404 @@ -479,12 +479,12 @@ tests: status: 404 - name: identity resource - desc: maybe theres are no identity resources yet + desc: maybe there's are no identity resources yet url: /v1/resource/identity status: 200 - name: ceph_account resource - desc: maybe theres are no ceph_account resources yet + desc: maybe there's are no ceph_account resources yet url: /v1/resource/ceph_account status: 200 @@ -494,43 +494,43 @@ tests: status: 200 - name: instance_network_interface resource - desc: maybe theres are no instance_network_interface resources yet + desc: maybe there's are no instance_network_interface resources yet url: /v1/resource/instance_network_interface status: 200 - name: instance_disk resource - desc: maybe theres are no instance_disk resources yet + desc: maybe there's are no instance_disk resources yet url: /v1/resource/instance_disk status: 200 - name: image resource - desc: maybe theres are no image resources yet + desc: maybe there's are no image resources yet url: /v1/resource/image status: 200 - name: ipmi resource - desc: maybe theres are no ipmi resources yet + desc: maybe there's are no ipmi resources yet url: /v1/resource/ipmi status: 200 - name: network resource - desc: maybe theres are no network resources yet + desc: maybe there's are no network resources yet url: /v1/resource/network status: 200 - name: orchestration resource - desc: maybe theres are no orchestration resources yet + desc: maybe there's are no orchestration resources yet #url: /v1/resource/orchestration url: /v1/resource/stack status: 200 - name: swift_account resource - desc: maybe theres are no swift_account resources yet + desc: maybe there's are no swift_account resources yet url: /v1/resource/swift_account status: 200 - name: volume resource - desc: maybe theres are no volume resources yet + desc: maybe there's are no volume resources yet url: /v1/resource/volume status: 200 diff --git a/gnocchi/tests/gabbi/gabbits/transformedids.yaml b/gnocchi/tests/gabbi/gabbits/transformedids.yaml index 0957fb7d..76dd5e2a 100644 --- a/gnocchi/tests/gabbi/gabbits/transformedids.yaml +++ b/gnocchi/tests/gabbi/gabbits/transformedids.yaml @@ -110,7 +110,7 @@ tests: url: /v1/resource/generic method: post data: - id: four score and seven years ago we the people of the united states of america i have a dream it is the courage to continue that counts four score and seven years ago we the people of the united states of america i have a dream it is the courage to continue that counts four score and seven years ago we the people of the united states of america i have a dream it is the courage to continue that counts + id: four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts user_id: 0fbb231484614b1a80131fc22f6afc9c project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea metrics: @@ -125,7 +125,7 @@ tests: method: post data: # 255 char string - id: four score and seven years ago we the people of the united states of america i have a dream it is the courage to continue that counts four score and seven years ago we the people of the united states of america i have a dream it is the courage to continue + id: four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue user_id: 0fbb231484614b1a80131fc22f6afc9c project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea metrics: -- GitLab From e91d7e668071a385514804d6eb9f17dbcc850cfe Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 25 May 2016 17:08:57 +0000 Subject: [PATCH 0216/1483] Tuneup gabbi aggregation.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. Change-Id: Ic8e5e2934fda87c74fae29d4031ff30c0bce5e24 --- gnocchi/tests/gabbi/gabbits/aggregation.yaml | 64 ++++++++------------ 1 file changed, 24 insertions(+), 40 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/aggregation.yaml b/gnocchi/tests/gabbi/gabbits/aggregation.yaml index 7c91cc3d..c1e883a9 100644 --- a/gnocchi/tests/gabbi/gabbits/aggregation.yaml +++ b/gnocchi/tests/gabbi/gabbits/aggregation.yaml @@ -4,8 +4,7 @@ fixtures: tests: - name: create archive policy desc: for later use - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -19,19 +18,17 @@ tests: # Aggregation by metric ids - name: create metric 1 - url: /v1/metric + POST: /v1/metric request_headers: content-type: application/json - method: post data: archive_policy_name: low status: 201 - name: create metric 2 - url: /v1/metric + POST: /v1/metric request_headers: content-type: application/json - method: post data: archive_policy_name: low status: 201 @@ -40,10 +37,9 @@ tests: url: /v1/metric - name: push measurements to metric 1 - url: /v1/metric/$RESPONSE['$[0].id']/measures + POST: /v1/metric/$RESPONSE['$[0].id']/measures request_headers: content-type: application/json - method: post data: - timestamp: "2015-03-06T14:33:57" value: 43.1 @@ -52,13 +48,12 @@ tests: status: 202 - name: get metric list to push metric 2 - url: /v1/metric + GET: /v1/metric - name: push measurements to metric 2 - url: /v1/metric/$RESPONSE['$[1].id']/measures + POST: /v1/metric/$RESPONSE['$[1].id']/measures request_headers: content-type: application/json - method: post data: - timestamp: "2015-03-06T14:33:57" value: 3.1 @@ -67,17 +62,17 @@ tests: status: 202 - name: get metric list to get aggregates - url: /v1/metric + GET: /v1/metric - name: get measure aggregates by granularity not float - url: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=foobar + GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=foobar status: 400 - name: get metric list to get aggregates 2 - url: /v1/metric + GET: /v1/metric - name: get measure aggregates by granularity - url: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1 + GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1 poll: count: 10 delay: 1 @@ -87,10 +82,10 @@ tests: - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - name: get metric list to push metric 3 - url: /v1/metric + GET: /v1/metric - name: get measure aggregates by granularity with timestamps - url: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 + GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 poll: count: 10 delay: 1 @@ -102,8 +97,7 @@ tests: # Aggregation by resource and metric_name - name: post a resource - url: /v1/resource/generic - method: post + POST: /v1/resource/generic request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -116,8 +110,7 @@ tests: status: 201 - name: post another resource - url: /v1/resource/generic - method: post + POST: /v1/resource/generic request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -130,12 +123,11 @@ tests: status: 201 - name: push measurements to resource 1 - url: /v1/resource/generic/bcd3441c-b5aa-4d1b-af9a-5a72322bb269/metric/agg_meter/measures + POST: /v1/resource/generic/bcd3441c-b5aa-4d1b-af9a-5a72322bb269/metric/agg_meter/measures request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json - method: post data: - timestamp: "2015-03-06T14:33:57" value: 43.1 @@ -144,12 +136,11 @@ tests: status: 202 - name: push measurements to resource 2 - url: /v1/resource/generic/1b0a8345-b279-4cb8-bd7a-2cb83193624f/metric/agg_meter/measures + POST: /v1/resource/generic/1b0a8345-b279-4cb8-bd7a-2cb83193624f/metric/agg_meter/measures request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json - method: post data: - timestamp: "2015-03-06T14:33:57" value: 3.1 @@ -158,8 +149,7 @@ tests: status: 202 - name: get measure aggregates by granularity from resources - method: POST - url: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1 + POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -173,8 +163,7 @@ tests: - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - name: get measure aggregates by granularity with timestamps from resources - method: POST - url: /v1/aggregation/resource/generic/metric/agg_meter?start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 + POST: /v1/aggregation/resource/generic/metric/agg_meter?start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -190,12 +179,11 @@ tests: # Some negative tests - name: get measure aggregates with wrong GET - url: /v1/aggregation/resource/generic/metric/agg_meter + GET: /v1/aggregation/resource/generic/metric/agg_meter status: 405 - name: get measure aggregates with wrong metric_name - method: POST - url: /v1/aggregation/resource/generic/metric/notexists + POST: /v1/aggregation/resource/generic/metric/notexists request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -205,8 +193,7 @@ tests: $.`len`: 0 - name: get measure aggregates with wrong resource - method: POST - url: /v1/aggregation/resource/notexits/metric/agg_meter + POST: /v1/aggregation/resource/notexits/metric/agg_meter request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -216,8 +203,7 @@ tests: - Resource type notexits does not exist - name: get measure aggregates with wrong path - method: POST - url: /v1/aggregation/re/generic/metric/agg_meter + POST: /v1/aggregation/re/generic/metric/agg_meter request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -225,8 +211,7 @@ tests: status: 404 - name: get measure aggregates with wrong path 2 - method: POST - url: /v1/aggregation/resource/generic/notexists/agg_meter + POST: /v1/aggregation/resource/generic/notexists/agg_meter request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -234,8 +219,7 @@ tests: status: 404 - name: get measure aggregates with no resource name - method: POST - url: /v1/aggregation/resource/generic/metric + POST: /v1/aggregation/resource/generic/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea -- GitLab From 505a6a200c9d569abb802b9761840dc43afc2359 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 25 May 2016 17:48:24 +0000 Subject: [PATCH 0217/1483] Tuneup gabbi archive_rule.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. In one place a status was move above a response_headers so that the test more clearly aligns with the order of an HTTP request. This isn't required by gabbi, but makes reading nice. Change-Id: I37afcb780db8e9491fab5b03165ab1d8adccd0d2 --- gnocchi/tests/gabbi/gabbits/archive_rule.yaml | 56 +++++++------------ 1 file changed, 20 insertions(+), 36 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/archive_rule.yaml b/gnocchi/tests/gabbi/gabbits/archive_rule.yaml index ea5e2b33..10d0c7e4 100644 --- a/gnocchi/tests/gabbi/gabbits/archive_rule.yaml +++ b/gnocchi/tests/gabbi/gabbits/archive_rule.yaml @@ -10,8 +10,7 @@ tests: # create dependent policy - name: create archive policy - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -19,14 +18,14 @@ tests: name: low definition: - granularity: 1 hour + status: 201 response_headers: location: $SCHEME://$NETLOC/v1/archive_policy/low - status: 201 # Attempt to create an archive policy rule - name: create archive policy rule1 - url: /v1/archive_policy_rule + POST: /v1/archive_policy_rule method: POST request_headers: content-type: application/json @@ -42,7 +41,7 @@ tests: $.name: test_rule1 - name: create archive policy rule 2 - url: /v1/archive_policy_rule + POST: /v1/archive_policy_rule method: POST request_headers: content-type: application/json @@ -58,8 +57,7 @@ tests: $.name: test_rule2 - name: create archive policy rule 3 - url: /v1/archive_policy_rule - method: POST + POST: /v1/archive_policy_rule request_headers: content-type: application/json x-roles: admin @@ -77,8 +75,7 @@ tests: # Attempt to create an invalid policy rule - name: create invalid archive policy rule - url: /v1/archive_policy_rule - method: POST + POST: /v1/archive_policy_rule request_headers: content-type: application/json x-roles: admin @@ -88,8 +85,7 @@ tests: status: 400 - name: missing auth archive policy rule - url: /v1/archive_policy_rule - method: POST + POST: /v1/archive_policy_rule request_headers: content-type: application/json data: @@ -99,8 +95,7 @@ tests: status: 403 - name: wrong content type - url: /v1/archive_policy_rule - method: POST + POST: /v1/archive_policy_rule request_headers: content-type: text/plain x-roles: admin @@ -109,8 +104,7 @@ tests: - Unsupported Media Type - name: wrong auth create rule - url: /v1/archive_policy_rule - method: POST + POST: /v1/archive_policy_rule request_headers: content-type: application/json x-roles: foo @@ -121,8 +115,7 @@ tests: status: 403 - name: missing auth createrule - url: /v1/archive_policy_rule - method: POST + POST: /v1/archive_policy_rule request_headers: content-type: application/json data: @@ -132,8 +125,7 @@ tests: status: 403 - name: bad request body - url: /v1/archive_policy_rule - method: POST + POST: /v1/archive_policy_rule request_headers: content-type: application/json x-roles: admin @@ -146,7 +138,7 @@ tests: # get an archive policy rules - name: get archive policy rule - url: /v1/archive_policy_rule + GET: /v1/archive_policy_rule status: 200 response_json_paths: $.[0].metric_pattern: disk.foo.* @@ -154,43 +146,38 @@ tests: $.[2].metric_pattern: "*" - name: get unknown archive policy rule - url: /v1/archive_policy_rule/foo + GET: /v1/archive_policy_rule/foo status: 404 - name: delete used archive policy - url: /v1/archive_policy/low + DELETE: /v1/archive_policy/low request_headers: x-roles: admin - method: DELETE status: 400 # delete rule as non admin - name: delete archive policy rule non admin - url: /v1/archive_policy_rule/test_rule1 - method: DELETE + DELETE: /v1/archive_policy_rule/test_rule1 status: 403 # delete rule - name: delete archive policy rule1 - url: /v1/archive_policy_rule/test_rule1 - method: DELETE + DELETE: /v1/archive_policy_rule/test_rule1 request_headers: x-roles: admin status: 204 - name: delete archive policy rule2 - url: /v1/archive_policy_rule/test_rule2 - method: DELETE + DELETE: /v1/archive_policy_rule/test_rule2 request_headers: x-roles: admin status: 204 - name: delete archive policy rule3 - url: /v1/archive_policy_rule/test_rule3 - method: DELETE + DELETE: /v1/archive_policy_rule/test_rule3 request_headers: x-roles: admin status: 204 @@ -198,18 +185,15 @@ tests: # delete again - name: confirm delete archive policy rule - url: /v1/archive_policy_rule/test_rule1 - method: DELETE + DELETE: /v1/archive_policy_rule/test_rule1 request_headers: x-roles: admin status: 404 - name: delete missing archive policy rule utf8 - url: /v1/archive_policy_rule/%E2%9C%94%C3%A9%C3%B1%E2%98%83 - method: DELETE + DELETE: /v1/archive_policy_rule/%E2%9C%94%C3%A9%C3%B1%E2%98%83 request_headers: x-roles: admin status: 404 response_strings: - Archive policy rule ✔éñ☃ does not exist - -- GitLab From 18b8baa0204948ee429cfb7b3b3e204a81f1efb4 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 25 May 2016 18:54:11 +0000 Subject: [PATCH 0218/1483] Tuneup gabbi archive.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. Use $LAST_URL to refer to the URL of the previous request, but only when the sameness of the URL is a relevant part of the expression of the tests-in-sequence. (That is, don't just use LAST_URL because the URL is the same.) In one test use more complex json paths to confirm the details of the list of archive policies. Change-Id: Ic0b02b19bea121af9c21157443d0f1233b5f4e0c --- gnocchi/tests/gabbi/gabbits/archive.yaml | 135 +++++++++-------------- 1 file changed, 50 insertions(+), 85 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/archive.yaml b/gnocchi/tests/gabbi/gabbits/archive.yaml index ba25af7c..5519d63a 100644 --- a/gnocchi/tests/gabbi/gabbits/archive.yaml +++ b/gnocchi/tests/gabbi/gabbits/archive.yaml @@ -25,20 +25,20 @@ tests: # Do we care? - name: empty archive policy list - url: /v1/archive_policy + GET: /v1/archive_policy response_headers: content-type: /application/json/ response_strings: - "[]" - name: empty list text - url: /v1/archive_policy + GET: /v1/archive_policy request_headers: accept: text/plain status: 406 - name: empty list html - url: /v1/archive_policy + GET: /v1/archive_policy request_headers: accept: text/html status: 406 @@ -46,8 +46,7 @@ tests: # Fail to create an archive policy for various reasons. - name: wrong content type - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: text/plain x-roles: admin @@ -56,16 +55,14 @@ tests: - Unsupported Media Type - name: wrong method - url: /v1/archive_policy - method: PUT + PUT: /v1/archive_policy request_headers: content-type: application/json x-roles: admin status: 405 - name: wrong authZ - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: clancy @@ -76,8 +73,7 @@ tests: status: 403 - name: missing authZ - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json data: @@ -87,8 +83,7 @@ tests: status: 403 - name: bad request body - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -99,8 +94,7 @@ tests: - "Invalid input: extra keys not allowed" - name: missing definition - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -111,8 +105,7 @@ tests: - "Invalid input: required key not provided" - name: empty definition - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -124,8 +117,7 @@ tests: - "Invalid input: length of value must be at least 1" - name: wrong value definition - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -137,8 +129,7 @@ tests: - "Invalid input: expected a list" - name: useless definition - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -153,8 +144,7 @@ tests: # Create a valid archive policy. - name: create archive policy - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -171,7 +161,7 @@ tests: # Retrieve it correctly and then poorly - name: get archive policy - url: $LOCATION + GET: $LOCATION response_headers: content-type: /application/json/ response_json_paths: @@ -184,7 +174,7 @@ tests: $.definition[1].timespan: null - name: get wrong accept - url: /v1/archive_policy/medium + GET: $LAST_URL request_headers: accept: text/plain status: 406 @@ -192,8 +182,7 @@ tests: # Update archive policy - name: patch archive policy with bad definition - url: /v1/archive_policy/medium - method: PATCH + PATCH: $LAST_URL request_headers: content-type: application/json x-roles: admin @@ -208,8 +197,7 @@ tests: - timespan ≠ granularity × points - name: patch archive policy with missing granularity - url: /v1/archive_policy/medium - method: PATCH + PATCH: $LAST_URL request_headers: content-type: application/json x-roles: admin @@ -222,8 +210,7 @@ tests: - "Archive policy medium does not support change: Cannot add or drop granularities" - name: patch archive policy with non-matching granularity - url: /v1/archive_policy/medium - method: PATCH + PATCH: $LAST_URL request_headers: content-type: application/json x-roles: admin @@ -237,8 +224,7 @@ tests: - "Archive policy medium does not support change: 1.0 granularity interval was changed" - name: patch archive policy - url: /v1/archive_policy/medium - method: PATCH + PATCH: $LAST_URL request_headers: content-type: application/json x-roles: admin @@ -255,7 +241,7 @@ tests: $.definition[0].timespan: "0:00:50" - name: get patched archive policy - url: /v1/archive_policy/medium + GET: $LAST_URL response_headers: content-type: /application/json/ response_json_paths: @@ -267,20 +253,17 @@ tests: # Unexpected methods - name: post single archive - url: /v1/archive_policy/medium - method: POST + POST: $LAST_URL status: 405 - name: put single archive - url: /v1/archive_policy/medium - method: PUT + PUT: $LAST_URL status: 405 # Create another one and then test duplication - name: create second policy - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -293,8 +276,7 @@ tests: status: 201 - name: create duplicate policy - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -309,8 +291,7 @@ tests: # Create a unicode named policy - name: post unicode policy name - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -326,13 +307,12 @@ tests: name: ✔éñ☃ - name: retrieve unicode policy name - url: $LOCATION + GET: $LOCATION response_json_paths: name: ✔éñ☃ - name: post small unicode policy name - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -348,31 +328,31 @@ tests: name: æ - name: retrieve small unicode policy name - url: $LOCATION + GET: $LOCATION response_json_paths: name: æ # List the collection - name: get archive policy list - url: /v1/archive_policy - # TODO(chdent): We do not know the order, should we? + GET: /v1/archive_policy response_strings: - '"name": "medium"' - '"name": "large"' + response_json_paths: + $[?name = "large"].definition[?granularity = "1:00:00"].points: null + $[?name = "medium"].definition[?granularity = "0:00:02"].points: null # Delete one as non-admin - name: delete single archive non admin - url: /v1/archive_policy/medium - method: DELETE + DELETE: /v1/archive_policy/medium status: 403 # Delete one - name: delete single archive - url: /v1/archive_policy/medium - method: DELETE + DELETE: /v1/archive_policy/medium request_headers: x-roles: admin status: 204 @@ -380,14 +360,13 @@ tests: # It really is gone - name: confirm delete - url: /v1/archive_policy/medium + GET: $LAST_URL status: 404 # Fail to delete one that does not exist - name: delete missing archive - url: /v1/archive_policy/grandiose - method: DELETE + DELETE: /v1/archive_policy/grandiose request_headers: x-roles: admin status: 404 @@ -395,15 +374,13 @@ tests: - Archive policy grandiose does not exist - name: delete archive utf8 - url: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 - method: DELETE + DELETE: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 request_headers: x-roles: admin status: 204 - name: delete missing archive utf8 again - url: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 - method: DELETE + DELETE: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 request_headers: x-roles: admin status: 404 @@ -413,8 +390,7 @@ tests: # Add metric using the policy and then be unable to delete policy - name: create metric - url: /v1/metric - method: POST + POST: /v1/metric request_headers: content-type: application/json x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee @@ -424,8 +400,7 @@ tests: status: 201 - name: delete in use policy - url: /v1/archive_policy/large - method: DELETE + DELETE: /v1/archive_policy/large request_headers: x-roles: admin status: 400 @@ -435,8 +410,7 @@ tests: # Attempt to create illogical policies - name: create illogical policy - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -451,8 +425,7 @@ tests: - timespan ≠ granularity × points - name: create invalid points policy - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -466,8 +439,7 @@ tests: - "Invalid input: not a valid value for dictionary value" - name: create invalid granularity policy - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -481,8 +453,7 @@ tests: - "Invalid input: not a valid value for dictionary value" - name: create identical granularities policy - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -498,8 +469,7 @@ tests: - "More than one archive policy uses granularity `1.0'" - name: policy invalid unit - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -513,8 +483,7 @@ tests: # Non admin user attempt - name: fail to create policy non-admin - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: x-user-id: b45187c5-150b-4730-bcb2-b5e04e234220 x-project-id: 16764ee0-bffe-4843-aa36-04b002cdbc7c @@ -530,8 +499,7 @@ tests: # Back windows - name: policy with back window - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -548,8 +516,7 @@ tests: - name: policy no back window desc: and default seconds on int granularity - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -568,8 +535,7 @@ tests: # Timespan, points, granularity input tests - name: policy float granularity - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -585,8 +551,7 @@ tests: $.definition[0].timespan: "1:06:40" - name: policy float timespan - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin -- GitLab From 7224d9e858f6b2e2b94ca2cf6267ee060e84f0e2 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 25 May 2016 19:32:44 +0000 Subject: [PATCH 0219/1483] Tuneup gabbi async.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. Change-Id: Ibfa5ef9e223929ad5a292d84ca01fbe15158ed84 --- gnocchi/tests/gabbi/gabbits/async.yaml | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/async.yaml b/gnocchi/tests/gabbi/gabbits/async.yaml index 00ae0d82..d31e4692 100644 --- a/gnocchi/tests/gabbi/gabbits/async.yaml +++ b/gnocchi/tests/gabbi/gabbits/async.yaml @@ -8,8 +8,7 @@ fixtures: tests: - name: create archive policy - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -20,8 +19,7 @@ tests: status: 201 - name: make a generic resource - url: /v1/resource/generic - method: POST + POST: /v1/resource/generic request_headers: x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147 x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d @@ -35,17 +33,16 @@ tests: status: 201 - name: confirm no metrics yet + GET: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures request_headers: x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147 x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d content-type: application/json - url: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures response_json_paths: $: [] - name: post some measures - url: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures - method: post + POST: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures request_headers: x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147 x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d @@ -61,7 +58,7 @@ tests: # aggregated. - name: get some measures - url: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures + GET: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures poll: count: 50 delay: .1 -- GitLab From d1cecc58b851b74f2ca4217327613479d455dbf7 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 25 May 2016 19:38:08 +0000 Subject: [PATCH 0220/1483] Tuneup gabbi base.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. Change-Id: I9c819d7bc001c6c05ef6dbb761fb34cc149eca37 --- gnocchi/tests/gabbi/gabbits/base.yaml | 32 +++++++++++---------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/base.yaml b/gnocchi/tests/gabbi/gabbits/base.yaml index 31971bbd..675407c7 100644 --- a/gnocchi/tests/gabbi/gabbits/base.yaml +++ b/gnocchi/tests/gabbi/gabbits/base.yaml @@ -5,7 +5,7 @@ tests: - name: get information on APIs desc: Root URL must return information about API versions - url: / + GET: / response_headers: content-type: application/json; charset=UTF-8 response_json_paths: @@ -13,8 +13,7 @@ tests: $.versions.[0].status: "CURRENT" - name: archive policy post success - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -35,8 +34,7 @@ tests: - name: post archive policy no auth desc: this confirms that auth handling comes before data validation - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json data: @@ -46,13 +44,12 @@ tests: status: 403 - name: post metric with archive policy - url: /v1/metric + POST: /v1/metric request_headers: content-type: application/json x-roles: admin x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f - method: POST data: archive_policy_name: test1 status: 201 @@ -62,7 +59,7 @@ tests: $.archive_policy_name: test1 - name: retrieve metric info - url: $LOCATION + GET: $LOCATION status: 200 request_headers: content_type: /application\/json/ @@ -73,15 +70,14 @@ tests: $.created_by_project_id: 99d13f22-3618-4288-82b8-6512ded77e4f - name: list the one metric - url: /v1/metric + GET: /v1/metric status: 200 response_json_paths: $[0].archive_policy.name: test1 - name: post a single measure desc: post one measure - url: /v1/metric/$RESPONSE['$[0].id']/measures - method: POST + POST: /v1/metric/$RESPONSE['$[0].id']/measures request_headers: content-type: application/json x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee @@ -93,7 +89,7 @@ tests: - name: Get list of resource type and URL desc: Resources index page should return list of type associated with a URL - url: /v1/resource/ + GET: /v1/resource/ response_headers: content-type: application/json; charset=UTF-8 status: 200 @@ -101,8 +97,7 @@ tests: $.generic: $SCHEME://$NETLOC/v1/resource/generic - name: post generic resource - url: /v1/resource/generic - method: post + POST: /v1/resource/generic request_headers: content-type: application/json x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee @@ -122,8 +117,7 @@ tests: created_by_project_id: 99d13f22-3618-4288-82b8-6512ded77e4f - name: post generic resource bad id - url: /v1/resource/generic - method: post + POST: /v1/resource/generic request_headers: content-type: application/json x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee @@ -145,11 +139,11 @@ tests: original_resource_id: 1.2.3.4 - name: get status denied - url: /v1/status + GET: /v1/status status: 403 - name: get status - url: /v1/status + GET: /v1/status request_headers: content-type: application/json x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee @@ -159,7 +153,7 @@ tests: $.storage.`len`: 2 - name: get status, no details - url: /v1/status?details=False + GET: /v1/status?details=False request_headers: content-type: application/json x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee -- GitLab From fc7877c9d35dbc99fc4628cd9f5fc696e49d7d78 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 25 May 2016 20:00:14 +0000 Subject: [PATCH 0221/1483] Tuneup gabbi batch_measures.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. Change-Id: Ib4073509cc263862f32f24a464c3ec3cf085710b --- .../tests/gabbi/gabbits/batch_measures.yaml | 33 +++++++------------ 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/batch_measures.yaml b/gnocchi/tests/gabbi/gabbits/batch_measures.yaml index dda6cc32..e5b748dd 100644 --- a/gnocchi/tests/gabbi/gabbits/batch_measures.yaml +++ b/gnocchi/tests/gabbi/gabbits/batch_measures.yaml @@ -4,8 +4,7 @@ fixtures: tests: - name: create archive policy desc: for later use - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -16,19 +15,17 @@ tests: status: 201 - name: create metric - url: /v1/metric + POST: /v1/metric request_headers: content-type: application/json - method: post data: archive_policy_name: simple status: 201 - name: push measurements to metric - url: /v1/batch/metrics/measures + POST: /v1/batch/metrics/measures request_headers: content-type: application/json - method: post data: $RESPONSE['$.id']: - timestamp: "2015-03-06T14:33:57" @@ -38,10 +35,9 @@ tests: status: 202 - name: push measurements to unknown metrics - url: /v1/batch/metrics/measures + POST: /v1/batch/metrics/measures request_headers: content-type: application/json - method: post data: 37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5C: - timestamp: "2015-03-06T14:33:57" @@ -58,10 +54,9 @@ tests: - "Unknown metrics: 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5c, 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5d" - name: push measurements to unknown named metrics - url: /v1/batch/resources/metrics/measures + POST: /v1/batch/resources/metrics/measures request_headers: content-type: application/json - method: post data: 37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5D: cpu_util: @@ -80,17 +75,15 @@ tests: - "Unknown metrics: 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5d/cpu_util, 46c9418d-d63b-4cdd-be89-8f57ffc5952e/disk.iops" - name: create second metric - url: /v1/metric + POST: /v1/metric request_headers: content-type: application/json - method: post data: archive_policy_name: simple status: 201 - name: post a resource - url: /v1/resource/generic - method: post + POST: /v1/resource/generic request_headers: content-type: application/json data: @@ -105,8 +98,7 @@ tests: status: 201 - name: post a second resource - url: /v1/resource/generic - method: post + POST: /v1/resource/generic request_headers: content-type: application/json data: @@ -121,13 +113,12 @@ tests: status: 201 - name: list metrics - url: /v1/metric + GET: /v1/metric - name: push measurements to two metrics - url: /v1/batch/metrics/measures + POST: /v1/batch/metrics/measures request_headers: content-type: application/json - method: post data: $RESPONSE['$[0].id']: - timestamp: "2015-03-06T14:33:57" @@ -142,10 +133,9 @@ tests: status: 202 - name: push measurements to two named metrics - url: /v1/batch/resources/metrics/measures + POST: /v1/batch/resources/metrics/measures request_headers: content-type: application/json - method: post data: 46c9418d-d63b-4cdd-be89-8f57ffc5952e: disk.iops: @@ -169,5 +159,4 @@ tests: value: 43.1 - timestamp: "2015-03-06T14:34:12" value: 12 - status: 202 -- GitLab From 24073a37e4bd702b704b438ed1bc9d9418d41068 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 25 May 2016 20:15:16 +0000 Subject: [PATCH 0222/1483] Tuneup gabbi history.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. Use $LAST_URL to refer to the URL of the previous request, but only when the sameness of the URL is a relevant part of the expression of the tests-in-sequence. (That is, don't just use LAST_URL because the URL is the same.) Change-Id: I17d980830c114db14cc0881798a35f0c46bce3fa --- gnocchi/tests/gabbi/gabbits/history.yaml | 27 ++++++++++-------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/history.yaml b/gnocchi/tests/gabbi/gabbits/history.yaml index b7c28bc4..11b5983f 100644 --- a/gnocchi/tests/gabbi/gabbits/history.yaml +++ b/gnocchi/tests/gabbi/gabbits/history.yaml @@ -7,8 +7,7 @@ fixtures: tests: - name: create archive policy - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -16,15 +15,14 @@ tests: name: low definition: - granularity: 1 hour + status: 201 response_headers: location: $SCHEME://$NETLOC/v1/archive_policy/low - status: 201 # Try creating a new generic resource - name: post generic resource - url: /v1/resource/generic - method: post + POST: /v1/resource/generic request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -45,8 +43,7 @@ tests: # Update it twice - name: patch resource user_id - url: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 - method: patch + PATCH: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -59,7 +56,7 @@ tests: project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - name: patch resource project_id - url: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 + PATCH: $LAST_URL method: patch request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -78,7 +75,7 @@ tests: # List resources - name: list all resources without history - url: /v1/resource/generic + GET: /v1/resource/generic request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -87,7 +84,7 @@ tests: $[0].project_id: fe20a931-1012-4cc6-addc-39556ec60907 - name: list all resources with history - url: /v1/resource/generic + GET: $LAST_URL request_headers: accept: application/json; details=True; history=True x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -105,8 +102,7 @@ tests: $[2].project_id: fe20a931-1012-4cc6-addc-39556ec60907 - name: patch resource metrics - url: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 - method: patch + PATCH: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -118,7 +114,7 @@ tests: status: 200 - name: list all resources with history no change after metrics update - url: /v1/resource/generic + GET: /v1/resource/generic request_headers: accept: application/json; details=True; history=True x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -136,8 +132,7 @@ tests: $[2].project_id: fe20a931-1012-4cc6-addc-39556ec60907 - name: create new metrics - url: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1/metric - method: post + POST: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -148,7 +143,7 @@ tests: status: 204 - name: list all resources with history no change after metrics creation - url: /v1/resource/generic + GET: /v1/resource/generic request_headers: accept: application/json; details=True; history=True x-user-id: 0fbb231484614b1a80131fc22f6afc9c -- GitLab From 6aae4578441b29bd78a3f57f8db48de0dfb9970c Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 24 May 2016 12:09:05 -0400 Subject: [PATCH 0223/1483] raise NoSuchMetric when deleting metric already marked deleted we should hide the fact the metric is marked for deletion but not yet deleted. Change-Id: Ib4a5d581208abed7978c53c169ce3ae0cc5d4ece Closes-Bug: #1584139 --- gnocchi/indexer/sqlalchemy.py | 3 ++- gnocchi/tests/gabbi/gabbits/metric.yaml | 4 ++++ gnocchi/tests/test_indexer.py | 8 +------- setup.cfg | 2 +- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 05f74da5..de5953b9 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -773,7 +773,8 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): def delete_metric(self, id): with self.facade.writer() as session: if session.query(Metric).filter( - Metric.id == id).update({"status": "delete"}) == 0: + Metric.id == id, Metric.status == 'active').update( + {"status": "delete"}) == 0: raise indexer.NoSuchMetric(id) diff --git a/gnocchi/tests/gabbi/gabbits/metric.yaml b/gnocchi/tests/gabbi/gabbits/metric.yaml index 735dd7c7..e9ef78bc 100644 --- a/gnocchi/tests/gabbi/gabbits/metric.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric.yaml @@ -278,6 +278,10 @@ tests: method: DELETE status: 204 + - name: delete metric again + DELETE: $LAST_URL + status: 404 + - name: delete non existent metric url: /v1/metric/foo method: DELETE diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index ec4b6ea6..7cfdb3e4 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -584,13 +584,7 @@ class TestIndexerDriver(tests_base.TestCase): rc = self.index.create_resource('generic', r1, user, project, metrics={'foo': e1, 'bar': e2}) self.index.delete_metric(e1) - # It can be called twice - try: - self.index.delete_metric(e1) - except indexer.NoSuchMetric: - # It's possible that the metric has been expunged by another - # parallel test. No worry. - pass + self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric, e1) r = self.index.get_resource('generic', r1, with_metrics=True) self.assertIsNotNone(r.started_at) self.assertIsNotNone(r.revision_start) diff --git a/setup.cfg b/setup.cfg index fa9a61ea..2aba8a3e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -58,7 +58,7 @@ doc = Jinja2 test = pifpaf>=0.1.0 - gabbi>=0.101.2 + gabbi>=1.19.0 coverage>=3.6 fixtures mock -- GitLab From 6dea9dee20e4a129f4f2e4c68f0da5ac4b9b2108 Mon Sep 17 00:00:00 2001 From: zhangguoqing Date: Thu, 26 May 2016 06:07:27 +0000 Subject: [PATCH 0224/1483] Tuneup gabbi metric_granularity.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. Change-Id: Ic1a0a1b5d1dc98639fb2c9b11d5224a67fdac87a --- .../gabbi/gabbits/metric_granularity.yaml | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/metric_granularity.yaml b/gnocchi/tests/gabbi/gabbits/metric_granularity.yaml index e132190c..c6de61d3 100644 --- a/gnocchi/tests/gabbi/gabbits/metric_granularity.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric_granularity.yaml @@ -4,8 +4,7 @@ fixtures: tests: - name: create archive policy desc: for later use - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -16,8 +15,7 @@ tests: status: 201 - name: create valid metric - url: /v1/metric - method: POST + POST: /v1/metric request_headers: content-type: application/json data: @@ -25,10 +23,9 @@ tests: status: 201 - name: push measurements to metric - url: /v1/metric/$RESPONSE['$.id']/measures + POST: /v1/metric/$RESPONSE['$.id']/measures request_headers: content-type: application/json - method: post data: - timestamp: "2015-03-06T14:33:57" value: 43.1 @@ -37,21 +34,21 @@ tests: status: 202 - name: get metric list for invalid granularity - url: /v1/metric + GET: /v1/metric status: 200 - name: get measurements invalid granularity - url: /v1/metric/$RESPONSE['$[0].id']/measures?granularity=42 + GET: /v1/metric/$RESPONSE['$[0].id']/measures?granularity=42 status: 404 response_strings: - Granularity '42.0' for metric $RESPONSE['$[0].id'] does not exist - name: get metric list for granularity - url: /v1/metric + GET: /v1/metric status: 200 - name: get measurements granularity - url: /v1/metric/$RESPONSE['$[0].id']/measures?granularity=1 + GET: /v1/metric/$RESPONSE['$[0].id']/measures?granularity=1 status: 200 poll: count: 50 @@ -59,4 +56,4 @@ tests: response_json_paths: $: - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] - - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] \ No newline at end of file + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] -- GitLab From 5a35b7161bf1d7e9cc45e91439cdbe6274179771 Mon Sep 17 00:00:00 2001 From: zhangguoqing Date: Thu, 26 May 2016 06:27:33 +0000 Subject: [PATCH 0225/1483] Tuneup gabbi pagination.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. Use $LAST_URL to refer to the URL of the previous request, but only when the sameness of the URL is a relevant part of the expression of the tests-in-sequence. (That is, don't just use LAST_URL because the URL is the same.) Change-Id: I9e6f677fbf75ab306ede0db78d8970cc8a692df7 --- gnocchi/tests/gabbi/gabbits/pagination.yaml | 69 +++++++-------------- 1 file changed, 23 insertions(+), 46 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/pagination.yaml b/gnocchi/tests/gabbi/gabbits/pagination.yaml index cd8f25b4..68826e7e 100644 --- a/gnocchi/tests/gabbi/gabbits/pagination.yaml +++ b/gnocchi/tests/gabbi/gabbits/pagination.yaml @@ -11,8 +11,7 @@ tests: # Creation resources for this scenarion # - name: post resource 1 - url: /v1/resource/generic - method: post + POST: /v1/resource/generic request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -25,8 +24,7 @@ tests: status: 201 - name: post resource 2 - url: /v1/resource/generic - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -39,8 +37,7 @@ tests: status: 201 - name: post resource 3 - url: /v1/resource/generic - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -53,8 +50,7 @@ tests: status: 201 - name: post resource 4 - url: /v1/resource/generic - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -67,8 +63,7 @@ tests: status: 201 - name: post resource 5 - url: /v1/resource/generic - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -84,8 +79,7 @@ tests: # Basic resource limit/ordering tests # - name: list first two items default order - url: /v1/resource/generic?limit=2 - method: get + GET: /v1/resource/generic?limit=2 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -96,8 +90,7 @@ tests: $[1].id: 4facbf7e-a900-406d-a828-82393f7006b3 - name: list next third items default order - url: /v1/resource/generic?limit=4&marker=4facbf7e-a900-406d-a828-82393f7006b3 - method: get + GET: /v1/resource/generic?limit=4&marker=4facbf7e-a900-406d-a828-82393f7006b3 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -109,8 +102,7 @@ tests: $[2].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 - name: list first two items order by id witouth direction - url: /v1/resource/generic?limit=2&sort=id - method: get + GET: /v1/resource/generic?limit=2&sort=id request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -122,8 +114,7 @@ tests: $[1].id: 28593168-52bb-43b5-a6db-fc2343aac02a - name: list first two items order by id - url: /v1/resource/generic?limit=2&sort=id:asc - method: get + GET: /v1/resource/generic?limit=2&sort=id:asc request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -134,8 +125,7 @@ tests: $[1].id: 28593168-52bb-43b5-a6db-fc2343aac02a - name: list next third items order by id - url: /v1/resource/generic?limit=4&sort=id:asc&marker=28593168-52bb-43b5-a6db-fc2343aac02a - method: get + GET: /v1/resource/generic?limit=4&sort=id:asc&marker=28593168-52bb-43b5-a6db-fc2343aac02a request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -147,8 +137,7 @@ tests: $[2].id: 57a9e836-87b8-4a21-9e30-18a474b98fef - name: search for some resources with limit, order and marker - url: /v1/search/resource/generic?limit=2&sort=id:asc&marker=36775172-ebc9-4060-9870-a649361bc3ab - method: POST + POST: /v1/search/resource/generic?limit=2&sort=id:asc&marker=36775172-ebc9-4060-9870-a649361bc3ab request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -168,8 +157,7 @@ tests: # Invalid limit/ordering # - name: invalid sort_key - url: /v1/resource/generic?sort=invalid:asc - method: get + GET: /v1/resource/generic?sort=invalid:asc request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -177,8 +165,7 @@ tests: status: 400 - name: invalid sort_dir - url: /v1/resource/generic?sort=id:invalid - method: get + GET: /v1/resource/generic?sort=id:invalid request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -186,8 +173,7 @@ tests: status: 400 - name: invalid marker - url: /v1/resource/generic?marker=d44b3f4c-27bc-4ace-b81c-2a8e60026874 - method: get + GET: /v1/resource/generic?marker=d44b3f4c-27bc-4ace-b81c-2a8e60026874 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -195,8 +181,7 @@ tests: status: 400 - name: invalid negative limit - url: /v1/resource/generic?limit=-2 - method: get + GET: /v1/resource/generic?limit=-2 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -204,8 +189,7 @@ tests: status: 400 - name: invalid limit - url: /v1/resource/generic?limit=invalid - method: get + GET: /v1/resource/generic?limit=invalid request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -217,8 +201,7 @@ tests: # - name: post resource 6 - url: /v1/resource/generic - method: post + POST: /v1/resource/generic request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -231,8 +214,7 @@ tests: status: 201 - name: post resource 7 - url: /v1/resource/generic - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -245,8 +227,7 @@ tests: status: 201 - name: post resource 8 - url: /v1/resource/generic - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -259,8 +240,7 @@ tests: status: 201 - name: default limit - url: /v1/resource/generic - method: get + GET: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -271,8 +251,7 @@ tests: - name: update resource 5 - url: /v1/resource/generic/1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 - method: patch + PATCH: /v1/resource/generic/1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -281,8 +260,7 @@ tests: ended_at: "2014-01-30T02:02:02.000000" - name: update resource 5 bis - url: /v1/resource/generic/1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 - method: patch + PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -291,8 +269,7 @@ tests: ended_at: "2014-01-31T02:02:02.000000" - name: default limit with history and multiple sort key - url: /v1/resource/generic?history=true&sort=id:asc&sort=ended_at:desc-nullslast - method: get + GET: /v1/resource/generic?history=true&sort=id:asc&sort=ended_at:desc-nullslast request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea -- GitLab From cc06c733d737eca18c53df3f4d9348610941783f Mon Sep 17 00:00:00 2001 From: zhangguoqing Date: Thu, 26 May 2016 07:17:53 +0000 Subject: [PATCH 0226/1483] Tuneup gabbi search.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. Change-Id: Iec90797f1a6dbe427f534808378d86a1f185051f --- gnocchi/tests/gabbi/gabbits/search.yaml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/search.yaml b/gnocchi/tests/gabbi/gabbits/search.yaml index 2cd2deb6..f0d7abd7 100644 --- a/gnocchi/tests/gabbi/gabbits/search.yaml +++ b/gnocchi/tests/gabbi/gabbits/search.yaml @@ -8,18 +8,17 @@ fixtures: tests: - name: typo of search - url: /v1/search/notexists + GET: /v1/search/notexists status: 404 - name: typo of search in resource - url: /v1/search/resource/foobar + GET: /v1/search/resource/foobar status: 404 - name: search with invalid uuid - url: /v1/search/resource/generic - method: POST + POST: /v1/search/resource/generic request_headers: content-type: application/json data: =: - id: "cd9eef" \ No newline at end of file + id: "cd9eef" -- GitLab From 72c8b5f4a705abe3293a2c323777344b53ebe4f4 Mon Sep 17 00:00:00 2001 From: zhangguoqing Date: Thu, 26 May 2016 07:23:02 +0000 Subject: [PATCH 0227/1483] Tuneup gabbi transformedids.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. Use $LAST_URL to refer to the URL of the previous request, but only when the sameness of the URL is a relevant part of the expression of the tests-in-sequence. (That is, don't just use LAST_URL because the URL is the same.) Change-Id: I1a86fcf07faa65f2e554c7278841abcd8870f2c5 --- .../tests/gabbi/gabbits/transformedids.yaml | 31 +++++++------------ 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/transformedids.yaml b/gnocchi/tests/gabbi/gabbits/transformedids.yaml index 76dd5e2a..fdf1d974 100644 --- a/gnocchi/tests/gabbi/gabbits/transformedids.yaml +++ b/gnocchi/tests/gabbi/gabbits/transformedids.yaml @@ -20,8 +20,7 @@ tests: - name: create archive policy desc: for later use - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: x-roles: admin data: @@ -32,8 +31,7 @@ tests: # Check transformed uuids across the URL hierarchy - name: post new resource non uuid - url: /v1/resource/generic - method: post + POST: /v1/resource/generic data: id: generic one user_id: 0fbb231484614b1a80131fc22f6afc9c @@ -50,18 +48,17 @@ tests: location: /v1/resource/generic/[a-f0-9-]{36}/ - name: get new non uuid resource by external id - url: /v1/resource/generic/generic%20one + GET: /v1/resource/generic/generic%20one response_json_paths: $.id: $RESPONSE['$.id'] - name: get new non uuid resource by internal id - url: /v1/resource/generic/$RESPONSE['$.id'] + GET: /v1/resource/generic/$RESPONSE['$.id'] response_json_paths: $.id: $RESPONSE['$.id'] - name: patch by external id - url: /v1/resource/generic/generic%20one - method: PATCH + PATCH: /v1/resource/generic/generic%20one data: metrics: cattle: @@ -71,18 +68,17 @@ tests: - '"cattle"' - name: list metric by external resource id - url: /v1/resource/generic/generic%20one/metric + GET: /v1/resource/generic/generic%20one/metric response_json_paths: $[0].name: cattle - name: list empty measures by external resource id - url: /v1/resource/generic/generic%20one/metric/cattle/measures + GET: /v1/resource/generic/generic%20one/metric/cattle/measures response_json_paths: $: [] - name: post measures by external resource id - url: /v1/resource/generic/generic%20one/metric/cattle/measures - method: POST + POST: /v1/resource/generic/generic%20one/metric/cattle/measures data: - timestamp: "2015-03-06T14:33:57" value: 43.1 @@ -91,7 +87,7 @@ tests: status: 202 - name: list two measures by external resource id - url: /v1/resource/generic/generic%20one/metric/cattle/measures + GET: $LAST_URL poll: count: 10 delay: 1 @@ -100,15 +96,13 @@ tests: $[1][2]: 12 - name: delete the resource by external id - url: /v1/resource/generic/generic%20one - method: DELETE + DELETE: /v1/resource/generic/generic%20one status: 204 # Check length handling - name: fail to post too long non uuid resource id - url: /v1/resource/generic - method: post + POST: /v1/resource/generic data: id: four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts user_id: 0fbb231484614b1a80131fc22f6afc9c @@ -121,8 +115,7 @@ tests: - not a valid value for - name: post long non uuid resource id - url: /v1/resource/generic - method: post + POST: $LAST_URL data: # 255 char string id: four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue -- GitLab From aceb647656be219d2edfb57bcefdc74f80e2c19e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 17 May 2016 07:03:43 +0200 Subject: [PATCH 0228/1483] Enable releasenotes documentation Change-Id: I135989dd8359da989ac215d21cf0e5ec8c622c90 --- .gitignore | 1 + releasenotes/notes/.placeholder | 0 releasenotes/source/2.1.rst | 6 + releasenotes/source/_static/.placeholder | 0 releasenotes/source/_templates/.placeholder | 0 releasenotes/source/conf.py | 274 ++++++++++++++++++++ releasenotes/source/index.rst | 18 ++ releasenotes/source/unreleased.rst | 5 + setup.cfg | 1 + tox.ini | 4 + 10 files changed, 309 insertions(+) create mode 100644 releasenotes/notes/.placeholder create mode 100644 releasenotes/source/2.1.rst create mode 100644 releasenotes/source/_static/.placeholder create mode 100644 releasenotes/source/_templates/.placeholder create mode 100644 releasenotes/source/conf.py create mode 100644 releasenotes/source/index.rst create mode 100644 releasenotes/source/unreleased.rst diff --git a/.gitignore b/.gitignore index 5599e5cd..0562170d 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ ChangeLog etc/gnocchi/gnocchi.conf doc/build doc/source/rest.rst +releasenotes/build cover .coverage dist diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder new file mode 100644 index 00000000..e69de29b diff --git a/releasenotes/source/2.1.rst b/releasenotes/source/2.1.rst new file mode 100644 index 00000000..75b12881 --- /dev/null +++ b/releasenotes/source/2.1.rst @@ -0,0 +1,6 @@ +=================================== + 2.1 Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/2.1 diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder new file mode 100644 index 00000000..e69de29b diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder new file mode 100644 index 00000000..e69de29b diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py new file mode 100644 index 00000000..9e1ccdb2 --- /dev/null +++ b/releasenotes/source/conf.py @@ -0,0 +1,274 @@ +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Gnocchi Release Notes documentation build configuration file, created by +# sphinx-quickstart on Mon Nov 23 20:38:38 2015. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'oslosphinx', + 'reno.sphinxext', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Gnocchi Release Notes' +copyright = u'2015-present, Gnocchi developers' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +import pbr.version +gnocchi_version = pbr.version.VersionInfo('gnocchi') +# The short X.Y version. +version = gnocchi_version.canonical_version_string() +# The full version, including alpha/beta/rc tags. +release = gnocchi_version.version_string_with_vcs() + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'GnocchiReleaseNotestdoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # 'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ('index', 'Gnocchi.tex', + u'Gnocchi Release Notes Documentation', + u'Gnocchi developers', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'gnocchi', u'Gnocchi Release Notes Documentation', + [u'Gnocchi developers'], 1) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'Gnocchi', u'Gnocchi Release Notes Documentation', + u'Gnocchi developers', 'Gnocchi', + 'Gnocchi is a multi-tenant timeseries, metrics and resources database.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst new file mode 100644 index 00000000..3377f194 --- /dev/null +++ b/releasenotes/source/index.rst @@ -0,0 +1,18 @@ +Welcome to Gnocchi Release Notes documentation! +=================================================== + +Contents +======== + +.. toctree:: + :maxdepth: 2 + + 2.1 + unreleased + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst new file mode 100644 index 00000000..875030f9 --- /dev/null +++ b/releasenotes/source/unreleased.rst @@ -0,0 +1,5 @@ +============================ +Current Series Release Notes +============================ + +.. release-notes:: diff --git a/setup.cfg b/setup.cfg index 0ecc34b8..584eaae8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -56,6 +56,7 @@ doc = sphinxcontrib-httpdomain PyYAML Jinja2 + reno>=1.6.2 test = pifpaf gabbi>=0.101.2 diff --git a/tox.ini b/tox.ini index a5153c7f..0dc67d42 100644 --- a/tox.ini +++ b/tox.ini @@ -65,6 +65,10 @@ show-source = true deps = .[mysql,postgresql,test,file,ceph,swift] commands = oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf +[testenv:releasenotes] +deps = .[test,doc] +commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html + [testenv:docs] # This does not work, see: https://bitbucket.org/hpk42/tox/issues/302 # deps = {[testenv]deps} -- GitLab From b1788f2a2aa9b5cf0875ea55f8ea389a0e89a051 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 20 May 2016 12:06:40 +0200 Subject: [PATCH 0229/1483] sqlalchemy: retry on deadlock in delete_resource_type() oslo_db.exception.DBDeadlock: (psycopg2.extensions.TransactionRollbackError) deadlock detected DETAIL: Process 9924 waits for RowShareLock on relation 16441 of database 12066; blocked by process 9935. Process 9935 waits for AccessExclusiveLock on relation 16425 of database 12066; blocked by process 9924. HINT: See server log for query details. [SQL: 'DELETE FROM resource_type WHERE resource_type.name = %(name)s'] [parameters: {'name': u'indexer_test'}] Change-Id: I8416a693878417f8911a92bb6cef467cbc238936 --- gnocchi/indexer/sqlalchemy.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 1cb8d688..cd363403 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -313,10 +313,14 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): return list(session.query(ResourceType).order_by( ResourceType.name.asc()).all()) - def delete_resource_type(self, name): - if name == "generic": - raise indexer.ResourceTypeInUse(name) - + # NOTE(jd) We can have deadlock errors either here or later in + # map_and_create_tables(). We can't decorate delete_resource_type() + # directly or each part might retry later on its own and cause a + # duplicate. And it seems there's no way to use the same session for + # both adding the resource_type in our table and calling + # map_and_create_tables() :-( + @retry_on_deadlock + def _delete_resource_type(self, name): try: with self.facade.writer() as session: resource_type = self._get_resource_type(session, name) @@ -327,6 +331,13 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): 'fk_rh_resource_type_name']): raise indexer.ResourceTypeInUse(name) raise + return resource_type + + def delete_resource_type(self, name): + if name == "generic": + raise indexer.ResourceTypeInUse(name) + + resource_type = self._delete_resource_type(name) with self.facade.writer_connection() as connection: self._RESOURCE_TYPE_MANAGER.unmap_and_delete_tables(resource_type, -- GitLab From 7ce5051b00f681a864d3e2a0edf6020ae37e1bef Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 16 May 2016 17:12:08 +0200 Subject: [PATCH 0230/1483] sqlalchemy: retry on PostgreSQL catalog errors too This avoids this kind of errors: oslo_db.exception.DBError: (psycopg2.InternalError) current transaction is aborted, commands ignored until end of transaction block [SQL: 'select relname from pg_class c join pg_namespace n on n.oid=c.relnamespace where pg_catalog.pg_table_is_visible(c.oid) and relname=%(name)s'] [parameters: {'name': u'rt_9e4a0e8695ef42ddae0f4ac73608b643'}] Change-Id: I947fc74543c016cfcdf017f69b260f695702dc0a --- gnocchi/indexer/sqlalchemy.py | 37 +++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index cd363403..6d5a0de9 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -25,9 +25,14 @@ from oslo_db import exception from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import utils as oslo_db_utils from oslo_log import log +try: + import psycopg2 +except ImportError: + psycopg2 = None import six import sqlalchemy from sqlalchemy.engine import url as sqlalchemy_url +import sqlalchemy.exc from sqlalchemy import types import sqlalchemy_utils @@ -137,14 +142,32 @@ class ResourceClassMapper(object): return mapper @retry_on_deadlock - def map_and_create_tables(self, resource_type, connection): + def map_and_create_tables(self, resource_type, facade): with self._lock: # NOTE(sileht): map this resource_type to have # Base.metadata filled with sa.Table objects mappers = self.get_classes(resource_type) tables = [Base.metadata.tables[klass.__tablename__] for klass in mappers.values()] - Base.metadata.create_all(connection, tables=tables) + try: + with facade.writer_connection() as connection: + Base.metadata.create_all(connection, tables=tables) + except exception.DBError as e: + # HACK(jd) Sometimes, PostgreSQL raises an error such as + # "current transaction is aborted, commands ignored until end + # of transaction block" on its own catalog, so we need to + # retry, but this is not caught by oslo.db as a deadlock. This + # is likely because when we use Base.metadata.create_all(), + # sqlalchemy itself gets an error it does not catch or + # something. So this is paperover I guess. + inn_e = e.inner_exception + if (psycopg2 + and isinstance(inn_e, sqlalchemy.exc.InternalError) + and isinstance(inn_e.orig, psycopg2.InternalError) + # current transaction is aborted + and inn_e.orig.pgcode == '25P02'): + raise exception.RetryRequest(e) + raise def unmap_and_delete_tables(self, resource_type, connection): with self._lock: @@ -250,9 +273,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): session.add(rt) except exception.DBDuplicateEntry: pass - with self.facade.writer_connection() as connection: - self._RESOURCE_TYPE_MANAGER.map_and_create_tables( - rt, connection) + self._RESOURCE_TYPE_MANAGER.map_and_create_tables(rt, self.facade) # NOTE(jd) We can have deadlock errors either here or later in # map_and_create_tables(). We can't decorate create_resource_type() @@ -285,9 +306,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): self._add_resource_type(resource_type) - with self.facade.writer_connection() as connection: - self._RESOURCE_TYPE_MANAGER.map_and_create_tables(resource_type, - connection) + self._RESOURCE_TYPE_MANAGER.map_and_create_tables(resource_type, + self.facade) + return resource_type def get_resource_type(self, name): -- GitLab From 34674863cc6ed75c40d61945b5518f2bed348dbe Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 19 May 2016 14:42:28 +0200 Subject: [PATCH 0231/1483] sqlalchemy: no fail if resources and type are deleted under our feet This is a rare case, but it can happen and did in one of our unit tests run. When listing generic resources with details, the tables of some resource types might disappear under our feet, at least with MySQL, and that triggers an error: sqlalchemy.exc.ProgrammingError: (pymysql.err.ProgrammingError) (1146, "Table \'test.rt_cd01a3f48e0345ebb55ac208de7b600f\' doesn\'t exist") Change-Id: Ia7950052e06b1e2e9cc2651b04decc8d864972a7 --- gnocchi/indexer/sqlalchemy.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 6d5a0de9..30c4be62 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -29,6 +29,11 @@ try: import psycopg2 except ImportError: psycopg2 = None +try: + import pymysql.constants.ER + import pymysql.err +except ImportError: + pymysql = None import six import sqlalchemy from sqlalchemy.engine import url as sqlalchemy_url @@ -804,7 +809,26 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): q = session.query(target_cls).filter(f) # Always include metrics q = q.options(sqlalchemy.orm.joinedload('metrics')) - all_resources.extend(q.all()) + try: + all_resources.extend(q.all()) + except sqlalchemy.exc.ProgrammingError as e: + # NOTE(jd) This exception can happen when the + # resources and their resource type have been + # deleted in the meantime: + # sqlalchemy.exc.ProgrammingError: + # (pymysql.err.ProgrammingError) + # (1146, "Table \'test.rt_f00\' doesn\'t exist") + # In that case, just ignore those resources. + inn_e = e.inner_exception + if (not pymysql + or not isinstance( + inn_e, sqlalchemy.exc.ProgrammingError) + or not isinstance( + inn_e.orig, pymysql.err.ProgrammingError) + or (inn_e.orig.args[0] + != pymysql.constants.ER.NO_SUCH_TABLE)): + raise + return all_resources def expunge_metric(self, id): -- GitLab From 09377bfc8ab0e8c673a283bb46f638fd88a90e66 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 19 May 2016 14:56:55 +0200 Subject: [PATCH 0232/1483] sqlalchemy: add missing constraint delete_resource_type() This constraint can also be violated in certain circumstance: oslo_db.exception.DBReferenceError: (pymysql.err.IntegrityError) (1451, u'Cannot delete or update a parent row: a foreign key constraint fails (`test1070afa755f149838f0a6ddd523e68e2`.`resource_history`, CONSTRAINT `fk_resource_history_resource_type_name` FOREIGN KEY (`type`) REFERENCES `resource_type` (`name`))') [SQL: u'DELETE FROM resource_type WHERE resource_type.name = %(name)s'] [parameters: {'name': u'my_custom_resource'}] Change-Id: Ic3eb36fcbbcdf3353e13745ba560d9c8a7b0a760 --- gnocchi/indexer/sqlalchemy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 30c4be62..e46b5383 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -354,6 +354,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): except exception.DBReferenceError as e: if (e.constraint in [ 'fk_resource_resource_type_name', + 'fk_resource_history_resource_type_name', 'fk_rh_resource_type_name']): raise indexer.ResourceTypeInUse(name) raise -- GitLab From 955591a042135dce4ef3a4afd1192a719c96ad2e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 13 Apr 2016 16:21:20 +0200 Subject: [PATCH 0233/1483] tests: do not create legacy resources This use custom created resource types instead of the Ceilometer legacy resource types, and kill the individual testing of those resources. Change-Id: I08eb305f426d5a1565c089e91bf5fe63faa14b14 --- doc/source/rest.yaml | 16 + gnocchi/indexer/sqlalchemy.py | 11 + gnocchi/tests/base.py | 8 +- gnocchi/tests/gabbi/fixtures.py | 15 +- .../indexer/sqlalchemy/test_migrations.py | 17 +- gnocchi/tests/test_indexer.py | 208 ++++----- gnocchi/tests/test_rest.py | 430 ++++-------------- setup.cfg | 2 +- 8 files changed, 243 insertions(+), 464 deletions(-) diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 782555b8..98793a85 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -234,6 +234,22 @@ "metrics": {"temperature": {"archive_policy_name": "low"}} } +- name: create-resource-type-instance + request: | + POST /v1/resource_type HTTP/1.1 + Content-Type: application/json + + { + "name": "instance", + "attributes": { + "display_name": {"type": "string", "required": true}, + "flavor_id": {"type": "string", "required": true}, + "image_ref": {"type": "string", "required": true}, + "host": {"type": "string", "required": true}, + "server_group": {"type": "string", "required": false} + } + } + - name: create-resource-instance request: | POST /v1/resource/instance HTTP/1.1 diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index e46b5383..b366baae 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -218,6 +218,17 @@ class ResourceClassMapper(object): class SQLAlchemyIndexer(indexer.IndexerDriver): _RESOURCE_TYPE_MANAGER = ResourceClassMapper() + @classmethod + def _create_new_database(cls, url): + """Used by testing to create a new database.""" + purl = sqlalchemy_url.make_url( + cls.dress_url( + url)) + purl.database = purl.database + str(uuid.uuid4()).replace('-', '') + new_url = str(purl) + sqlalchemy_utils.create_database(new_url) + return new_url + @staticmethod def dress_url(url): # If no explicit driver has been set, we default to pymysql diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 0983345b..472afeb0 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -397,13 +397,7 @@ class TestCase(base.BaseTestCase): self.coord.start() with self.coord.get_lock(b"gnocchi-tests-db-lock"): - # Force upgrading using Alembic rather than creating the - # database from scratch so we are sure we don't miss anything - # in the Alembic upgrades. We have a test to check that - # upgrades == create but it misses things such as custom CHECK - # constraints. - self.index.upgrade(nocreate=True, - create_legacy_resource_types=True) + self.index.upgrade() self.coord.stop() diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index bbf460f2..c935a494 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -20,11 +20,9 @@ import tempfile import threading import time from unittest import case -import uuid import warnings from gabbi import fixture -import sqlalchemy.engine.url as sqlalchemy_url import sqlalchemy_utils from gnocchi import indexer @@ -103,14 +101,11 @@ class ConfigFixture(fixture.GabbiFixture): # NOTE(jd) All of that is still very SQL centric but we only support # SQL for now so let's say it's good enough. - url = sqlalchemy_url.make_url( - sqlalchemy.SQLAlchemyIndexer.dress_url( - conf.indexer.url)) - - url.database = url.database + str(uuid.uuid4()).replace('-', '') - db_url = str(url) - conf.set_override('url', db_url, 'indexer') - sqlalchemy_utils.create_database(db_url) + conf.set_override( + 'url', + sqlalchemy.SQLAlchemyIndexer._create_new_database( + conf.indexer.url), + 'indexer') index = indexer.get_driver(conf) index.connect() diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py index 63f22f47..df6adfcb 100644 --- a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py +++ b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py @@ -17,7 +17,10 @@ import abc import mock from oslo_db.sqlalchemy import test_migrations import six +import sqlalchemy_utils +from gnocchi import indexer +from gnocchi.indexer import sqlalchemy from gnocchi.indexer import sqlalchemy_base from gnocchi.tests import base @@ -34,6 +37,14 @@ class ModelsMigrationsSync( def setUp(self): super(ModelsMigrationsSync, self).setUp() self.db = mock.Mock() + self.conf.set_override( + 'url', + sqlalchemy.SQLAlchemyIndexer._create_new_database( + self.conf.indexer.url), + 'indexer') + self.index = indexer.get_driver(self.conf) + self.index.connect() + self.index.upgrade(nocreate=True, create_legacy_resource_types=True) @staticmethod def get_metadata(): @@ -44,6 +55,8 @@ class ModelsMigrationsSync( @staticmethod def db_sync(engine): - # NOTE(jd) Nothing to do here as setUp() in the base class is already - # creating table using upgrade pass + + def tearDown(self): + sqlalchemy_utils.drop_database(self.conf.indexer.url) + super(ModelsMigrationsSync, self).tearDown() diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 7cfdb3e4..566d9382 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -228,52 +228,6 @@ class TestIndexerDriver(tests_base.TestCase): m = self.index.list_metrics(id=rc.metrics[0].id) self.assertEqual(m[0], rc.metrics[0]) - def _do_test_create_instance(self, server_group=None, image_ref=None): - r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - kwargs = {'server_group': server_group} if server_group else {} - - rc = self.index.create_resource('instance', r1, user, project, - flavor_id="1", - image_ref=image_ref, - host="foo", - display_name="lol", **kwargs) - self.assertIsNotNone(rc.started_at) - self.assertIsNotNone(rc.revision_start) - self.assertEqual({"id": r1, - "revision_start": rc.revision_start, - "revision_end": None, - "type": "instance", - "created_by_user_id": user, - "created_by_project_id": project, - "user_id": None, - "project_id": None, - "started_at": rc.started_at, - "ended_at": None, - "display_name": "lol", - "server_group": server_group, - "host": "foo", - "image_ref": image_ref, - "flavor_id": "1", - "original_resource_id": None, - "metrics": {}}, - rc.jsonify()) - rg = self.index.get_resource('generic', r1, with_metrics=True) - self.assertEqual(rc.id, rg.id) - self.assertEqual(rc.revision_start, rg.revision_start) - self.assertEqual(rc.metrics, rg.metrics) - - def test_create_instance(self): - self._do_test_create_instance(image_ref='http://foo/bar') - - def test_create_instance_with_server_group(self): - self._do_test_create_instance('my_autoscaling_group', - image_ref='http://foo/bar') - - def test_create_instance_without_image_ref(self): - self._do_test_create_instance(image_ref=None) - def test_delete_resource(self): r1 = uuid.uuid4() self.index.create_resource('generic', r1, str(uuid.uuid4()), @@ -484,30 +438,40 @@ class TestIndexerDriver(tests_base.TestCase): self.assertEqual(e1, r.metrics[0].id) def test_update_resource_attribute(self): + mgr = self.index.get_resource_type_schema() + resource_type = str(uuid.uuid4()) + rtype = mgr.resource_type_from_dict(resource_type, { + "col1": {"type": "string", "required": True, + "min_length": 2, "max_length": 15} + }) r1 = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) - rc = self.index.create_resource('instance', r1, user, project, - flavor_id="1", - image_ref="http://foo/bar", - host="foo", - display_name="lol") - rc = self.index.update_resource('instance', r1, host="bar") - r = self.index.get_resource('instance', r1, with_metrics=True) + # Create + self.index.create_resource_type(rtype) + + rc = self.index.create_resource(resource_type, r1, user, project, + col1="foo") + rc = self.index.update_resource(resource_type, r1, col1="foo") + r = self.index.get_resource(resource_type, r1, with_metrics=True) self.assertEqual(rc, r) def test_update_resource_no_change(self): + mgr = self.index.get_resource_type_schema() + resource_type = str(uuid.uuid4()) + rtype = mgr.resource_type_from_dict(resource_type, { + "col1": {"type": "string", "required": True, + "min_length": 2, "max_length": 15} + }) + self.index.create_resource_type(rtype) r1 = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) - rc = self.index.create_resource('instance', r1, user, project, - flavor_id="1", - image_ref="http://foo/bar", - host="foo", - display_name="lol") - updated = self.index.update_resource('instance', r1, host="foo", + rc = self.index.create_resource(resource_type, r1, user, project, + col1="foo") + updated = self.index.update_resource(resource_type, r1, col1="foo", create_revision=False) - r = self.index.list_resources('instance', + r = self.index.list_resources(resource_type, {"=": {"id": r1}}, history=True) self.assertEqual(1, len(r)) @@ -518,28 +482,27 @@ class TestIndexerDriver(tests_base.TestCase): r1 = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) - self.index.create_resource('instance', r1, user, project, - flavor_id="1", - image_ref="http://foo/bar", - host="foo", - display_name="lol") + self.index.create_resource('generic', r1, user, project) self.assertRaises( indexer.ResourceValueError, self.index.update_resource, - 'instance', r1, + 'generic', r1, ended_at=utils.datetime_utc(2010, 1, 1, 1, 1, 1)) def test_update_resource_unknown_attribute(self): + mgr = self.index.get_resource_type_schema() + resource_type = str(uuid.uuid4()) + rtype = mgr.resource_type_from_dict(resource_type, { + "col1": {"type": "string", "required": False, + "min_length": 1, "max_length": 2}, + }) + self.index.create_resource_type(rtype) r1 = uuid.uuid4() - self.index.create_resource('instance', r1, str(uuid.uuid4()), - str(uuid.uuid4()), - flavor_id="1", - image_ref="http://foo/bar", - host="foo", - display_name="lol") + self.index.create_resource(resource_type, r1, + str(uuid.uuid4()), str(uuid.uuid4())) self.assertRaises(indexer.ResourceAttributeError, self.index.update_resource, - 'instance', + resource_type, r1, foo="bar") def test_update_non_existent_metric(self): @@ -601,19 +564,25 @@ class TestIndexerDriver(tests_base.TestCase): "type": "generic", "metrics": {'bar': str(e2)}}, r.jsonify()) - def test_delete_instance(self): + def test_delete_resource_custom(self): + mgr = self.index.get_resource_type_schema() + resource_type = str(uuid.uuid4()) + self.index.create_resource_type( + mgr.resource_type_from_dict(resource_type, { + "flavor_id": {"type": "string", + "min_length": 1, + "max_length": 20, + "required": True} + })) r1 = uuid.uuid4() - created = self.index.create_resource('instance', r1, + created = self.index.create_resource(resource_type, r1, str(uuid.uuid4()), str(uuid.uuid4()), - flavor_id="123", - image_ref="foo", - host="dwq", - display_name="foobar") - got = self.index.get_resource('instance', r1, with_metrics=True) + flavor_id="foo") + got = self.index.get_resource(resource_type, r1, with_metrics=True) self.assertEqual(created, got) self.index.delete_resource(r1) - got = self.index.get_resource('instance', r1) + got = self.index.get_resource(resource_type, r1) self.assertIsNone(got) def test_list_resources_by_unknown_field(self): @@ -659,14 +628,14 @@ class TestIndexerDriver(tests_base.TestCase): project = str(uuid.uuid4()) g = self.index.create_resource('generic', r1, user, project, user, project) + mgr = self.index.get_resource_type_schema() + resource_type = str(uuid.uuid4()) + self.index.create_resource_type( + mgr.resource_type_from_dict(resource_type, {})) r2 = uuid.uuid4() - i = self.index.create_resource('instance', r2, - user, project, + i = self.index.create_resource(resource_type, r2, user, project, - flavor_id="123", - image_ref="foo", - host="dwq", - display_name="foobar") + user, project) resources = self.index.list_resources( 'generic', attribute_filter={"=": {"user_id": user}}, @@ -724,13 +693,13 @@ class TestIndexerDriver(tests_base.TestCase): r1 = uuid.uuid4() g = self.index.create_resource('generic', r1, str(uuid.uuid4()), str(uuid.uuid4())) + mgr = self.index.get_resource_type_schema() + resource_type = str(uuid.uuid4()) + self.index.create_resource_type( + mgr.resource_type_from_dict(resource_type, {})) r2 = uuid.uuid4() - i = self.index.create_resource('instance', r2, - str(uuid.uuid4()), str(uuid.uuid4()), - flavor_id="123", - image_ref="foo", - host="dwq", - display_name="foobar") + i = self.index.create_resource(resource_type, r2, + str(uuid.uuid4()), str(uuid.uuid4())) resources = self.index.list_resources('generic') self.assertGreaterEqual(len(resources), 2) g_found = False @@ -746,7 +715,7 @@ class TestIndexerDriver(tests_base.TestCase): else: self.fail("Some resources were not found") - resources = self.index.list_resources('instance') + resources = self.index.list_resources(resource_type) self.assertGreaterEqual(len(resources), 1) for r in resources: if r.id == r2: @@ -765,9 +734,19 @@ class TestIndexerDriver(tests_base.TestCase): 'generic', attribute_filter={"=": {"id": "f00bar" * 50}}) - def test_list_resource_instance_flavor_id_numeric(self): + def test_list_resource_attribute_type_numeric(self): + """Test that we can pass an integer to filter on a string type.""" + mgr = self.index.get_resource_type_schema() + resource_type = str(uuid.uuid4()) + self.index.create_resource_type( + mgr.resource_type_from_dict(resource_type, { + "flavor_id": {"type": "string", + "min_length": 1, + "max_length": 20, + "required": False}, + })) r = self.index.list_resources( - 'instance', attribute_filter={"=": {"flavor_id": 1.0}}) + resource_type, attribute_filter={"=": {"flavor_id": 1.0}}) self.assertEqual(0, len(r)) def test_list_resource_weird_date(self): @@ -845,7 +824,7 @@ class TestIndexerDriver(tests_base.TestCase): key=operator.itemgetter("revision_start")) self.assertEqual([r1, r2], resources) - def test_list_resources_instance_with_history(self): + def test_list_resources_custom_with_history(self): e1 = uuid.uuid4() e2 = uuid.uuid4() rid = uuid.uuid4() @@ -854,6 +833,14 @@ class TestIndexerDriver(tests_base.TestCase): new_user = str(uuid.uuid4()) new_project = str(uuid.uuid4()) + mgr = self.index.get_resource_type_schema() + resource_type = str(uuid.uuid4()) + self.index.create_resource_type( + mgr.resource_type_from_dict(resource_type, { + "col1": {"type": "string", "required": True, + "min_length": 2, "max_length": 15} + })) + self.index.create_metric(e1, user, project, archive_policy_name="low") self.index.create_metric(e2, user, project, @@ -861,17 +848,14 @@ class TestIndexerDriver(tests_base.TestCase): self.index.create_metric(uuid.uuid4(), user, project, archive_policy_name="low") - r1 = self.index.create_resource('instance', rid, user, project, + r1 = self.index.create_resource(resource_type, rid, user, project, user, project, - flavor_id="123", - image_ref="foo", - host="dwq", - display_name="foobar_history", + col1="foo", metrics={'foo': e1, 'bar': e2} ).jsonify() - r2 = self.index.update_resource('instance', rid, user_id=new_user, + r2 = self.index.update_resource(resource_type, rid, user_id=new_user, project_id=new_project, - host="other", + col1="bar", append_metrics=True).jsonify() r1['revision_end'] = r2['revision_start'] @@ -880,8 +864,8 @@ class TestIndexerDriver(tests_base.TestCase): 'bar': str(e2)}, r2['metrics']) self.assertEqual(new_user, r2['user_id']) self.assertEqual(new_project, r2['project_id']) - self.assertEqual('other', r2['host']) - resources = self.index.list_resources('instance', history=True, + self.assertEqual('bar', r2['col1']) + resources = self.index.list_resources(resource_type, history=True, details=False, attribute_filter={ "=": {"id": rid}}) @@ -903,12 +887,12 @@ class TestIndexerDriver(tests_base.TestCase): started_at=utils.datetime_utc(2000, 1, 1, 23, 23, 23), ended_at=utils.datetime_utc(2000, 1, 3, 23, 23, 23)) r2 = uuid.uuid4() + mgr = self.index.get_resource_type_schema() + resource_type = str(uuid.uuid4()) + self.index.create_resource_type( + mgr.resource_type_from_dict(resource_type, {})) i = self.index.create_resource( - 'instance', r2, user, project, - flavor_id="123", - image_ref="foo", - host="dwq", - display_name="foobar", + resource_type, r2, user, project, started_at=utils.datetime_utc(2000, 1, 1, 23, 23, 23), ended_at=utils.datetime_utc(2000, 1, 4, 23, 23, 23)) resources = self.index.list_resources( @@ -934,7 +918,7 @@ class TestIndexerDriver(tests_base.TestCase): self.fail("Some resources were not found") resources = self.index.list_resources( - 'instance', + resource_type, attribute_filter={ ">=": { "started_at": datetime.datetime(2000, 1, 1, 23, 23, 23) diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index d1466cad..a894fb02 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -499,12 +499,17 @@ class MetricTest(RestTest): self.assertIn('Invalid value for window', ret.text) def test_get_resource_missing_named_metric_measure_aggregation(self): + mgr = self.index.get_resource_type_schema() + resource_type = str(uuid.uuid4()) + self.index.create_resource_type( + mgr.resource_type_from_dict(resource_type, { + "server_group": {"type": "string", + "min_length": 1, + "max_length": 40, + "required": True} + })) + attributes = { - "started_at": "2014-01-03T02:02:02.000000", - "host": "foo", - "image_ref": "imageref!", - "flavor_id": "123", - "display_name": "myinstance", "server_group": str(uuid.uuid4()), } result = self.app.post_json("/v1/metric", @@ -527,16 +532,17 @@ class MetricTest(RestTest): attributes['id'] = str(uuid.uuid4()) attributes['metrics'] = {'foo': metric1['id']} - self.app.post_json("/v1/resource/instance", + self.app.post_json("/v1/resource/" + resource_type, params=attributes) attributes['id'] = str(uuid.uuid4()) attributes['metrics'] = {'bar': metric2['id']} - self.app.post_json("/v1/resource/instance", + self.app.post_json("/v1/resource/" + resource_type, params=attributes) result = self.app.post_json( - "/v1/aggregation/resource/instance/metric/foo?aggregation=max", + "/v1/aggregation/resource/%s/metric/foo?aggregation=max" + % resource_type, params={"=": {"server_group": attributes['server_group']}}) measures = json.loads(result.text) @@ -578,227 +584,19 @@ class MetricTest(RestTest): class ResourceTest(RestTest): - - resource_scenarios = [ - ('generic', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - }, - resource_type='generic')), - ('instance_disk', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - "name": "disk-name", - "instance_id": str(uuid.uuid4()), - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - "name": "new-disk-name", - }, - resource_type='instance_disk')), - ('instance_network_interface', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - "name": "nic-name", - "instance_id": str(uuid.uuid4()), - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - "name": "new-nic-name", - }, - resource_type='instance_network_interface')), - ('instance', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - # NOTE(jd) We test this one without user_id/project_id! - # Just to test that use case. :) - "host": "foo", - "image_ref": "imageref!", - "flavor_id": "123", - "display_name": "myinstance", - "server_group": "as_group", - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - "host": "fooz", - "image_ref": "imageref!z", - "flavor_id": "1234", - "display_name": "myinstancez", - "server_group": "new_as_group", - }, - resource_type='instance')), - # swift notifications contain UUID user_id - ('swift_account', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - }, - resource_type='swift_account')), - # swift pollsters contain None user_id - ('swift_account_none_user', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": None, - "project_id": str(uuid.uuid4()), - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - }, - resource_type='swift_account')), - # TODO(dbelova): add tests with None project ID when we'll add kwapi, - # ipmi, hardware, etc. resources that are passed without project ID - ('volume', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - "display_name": "test_volume", - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - "display_name": "myvolume", - }, - resource_type='volume')), - ('ceph_account', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - }, - resource_type='ceph_account')), - ('network', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - }, - resource_type='network')), - ('identity', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - }, - resource_type='identity')), - ('ipmi', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - }, - resource_type='ipmi')), - ('stack', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - }, - resource_type='stack')), - # image pollsters contain UUID user_id - ('image', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - "name": "test-image", - "container_format": "aki", - "disk_format": "aki", - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - }, - resource_type='image')), - # image pollsters contain None user_id - ('image_none_user', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": None, - "project_id": str(uuid.uuid4()), - "name": "test-image2", - "container_format": "aki", - "disk_format": "aki", - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - }, - resource_type='image')), - ('host', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - "host_name": "test-host", - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - }, - resource_type='host')), - ('host_disk', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - "host_name": "test-host", - "device_name": "test-device" - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - }, - resource_type='host_disk')), - ('host_network_interface', dict( - attributes={ - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - "host_name": "test-host", - "device_name": "test-device" - }, - patchable_attributes={ - "ended_at": "2014-01-03T02:02:02+00:00", - }, - resource_type='host_network_interface')), - ] - - @classmethod - def generate_scenarios(cls): - cls.scenarios = testscenarios.multiply_scenarios( - cls.scenarios, - cls.resource_scenarios) - def setUp(self): super(ResourceTest, self).setUp() - # Copy attributes so we can modify them in each test :) - self.attributes = self.attributes.copy() - # Set an id in the attribute - self.attributes['id'] = str(uuid.uuid4()) + self.attributes = { + "id": str(uuid.uuid4()), + "started_at": "2014-01-03T02:02:02+00:00", + "user_id": str(uuid.uuid4()), + "project_id": str(uuid.uuid4()), + "name": "my-name", + } + self.patchable_attributes = { + "ended_at": "2014-01-03T02:02:02+00:00", + "name": "new-name", + } self.resource = self.attributes.copy() # Set original_resource_id self.resource['original_resource_id'] = self.resource['id'] @@ -808,7 +606,6 @@ class ResourceTest(RestTest): else: self.resource['created_by_user_id'] = None self.resource['created_by_project_id'] = None - self.resource['type'] = self.resource_type self.resource['ended_at'] = None self.resource['metrics'] = {} if 'user_id' not in self.resource: @@ -816,6 +613,17 @@ class ResourceTest(RestTest): if 'project_id' not in self.resource: self.resource['project_id'] = None + mgr = self.index.get_resource_type_schema() + self.resource_type = str(uuid.uuid4()) + self.index.create_resource_type( + mgr.resource_type_from_dict(self.resource_type, { + "name": {"type": "string", + "min_length": 1, + "max_length": 40, + "required": True} + })) + self.resource['type'] = self.resource_type + @mock.patch.object(utils, 'utcnow') def test_post_resource(self, utcnow): utcnow.return_value = utils.datetime_utc(2014, 1, 1, 10, 23) @@ -1752,10 +1560,9 @@ class ResourceTest(RestTest): # NOTE(sileht): because the database is never cleaned between each test # we must ensure that the query will not match resources from an other - # test, to achieve this we set a different server_group on each test. - server_group = str(uuid.uuid4()) - if self.resource_type == 'instance': - self.attributes['server_group'] = server_group + # test, to achieve this we set a different name on each test. + name = str(uuid.uuid4()) + self.attributes['name'] = name self.attributes['metrics'] = {'foo': metric1['id']} self.app.post_json("/v1/resource/" + self.resource_type, @@ -1769,14 +1576,11 @@ class ResourceTest(RestTest): result = self.app.post_json( "/v1/aggregation/resource/" + self.resource_type + "/metric/foo?aggregation=max", - params={"and": - [{"=": {"server_group": server_group}}, - {"=": {"display_name": "myinstance"}}]}, + params={"=": {"name": name}}, status=400) - if self.resource_type == 'instance': - self.assertIn(b"One of the metrics being aggregated doesn't have " - b"matching granularity", - result.body) + self.assertIn(b"One of the metrics being aggregated doesn't have " + b"matching granularity", + result.body) def test_get_res_named_metric_measure_aggregation_nooverlap(self): result = self.app.post_json("/v1/metric", @@ -1794,10 +1598,9 @@ class ResourceTest(RestTest): # NOTE(sileht): because the database is never cleaned between each test # we must ensure that the query will not match resources from an other - # test, to achieve this we set a different server_group on each test. - server_group = str(uuid.uuid4()) - if self.resource_type == 'instance': - self.attributes['server_group'] = server_group + # test, to achieve this we set a different name on each test. + name = str(uuid.uuid4()) + self.attributes['name'] = name self.attributes['metrics'] = {'foo': metric1['id']} self.app.post_json("/v1/resource/" + self.resource_type, @@ -1811,35 +1614,25 @@ class ResourceTest(RestTest): result = self.app.post_json( "/v1/aggregation/resource/" + self.resource_type + "/metric/foo?aggregation=max", - params={"and": - [{"=": {"server_group": server_group}}, - {"=": {"display_name": "myinstance"}}]}, + params={"=": {"name": name}}, expect_errors=True) - if self.resource_type == 'instance': - self.assertEqual(400, result.status_code, result.text) - self.assertIn("No overlap", result.text) - else: - self.assertEqual(400, result.status_code) + self.assertEqual(400, result.status_code, result.text) + self.assertIn("No overlap", result.text) result = self.app.post_json( "/v1/aggregation/resource/" + self.resource_type + "/metric/foo?aggregation=min" + "&needed_overlap=0", - params={"and": - [{"=": {"server_group": server_group}}, - {"=": {"display_name": "myinstance"}}]}, + params={"=": {"name": name}}, expect_errors=True) - if self.resource_type == 'instance': - self.assertEqual(200, result.status_code, result.text) - measures = json.loads(result.text) - self.assertEqual([['2013-01-01T00:00:00+00:00', 86400.0, 8.0], - ['2013-01-01T12:00:00+00:00', 3600.0, 8.0], - ['2013-01-01T12:00:00+00:00', 60.0, 8.0]], - measures) - else: - self.assertEqual(400, result.status_code) + self.assertEqual(200, result.status_code, result.text) + measures = json.loads(result.text) + self.assertEqual([['2013-01-01T00:00:00+00:00', 86400.0, 8.0], + ['2013-01-01T12:00:00+00:00', 3600.0, 8.0], + ['2013-01-01T12:00:00+00:00', 60.0, 8.0]], + measures) def test_get_res_named_metric_measure_aggregation_nominal(self): result = self.app.post_json("/v1/metric", @@ -1862,10 +1655,9 @@ class ResourceTest(RestTest): # NOTE(sileht): because the database is never cleaned between each test # we must ensure that the query will not match resources from an other - # test, to achieve this we set a different server_group on each test. - server_group = str(uuid.uuid4()) - if self.resource_type == 'instance': - self.attributes['server_group'] = server_group + # test, to achieve this we set a different name on each test. + name = str(uuid.uuid4()) + self.attributes['name'] = name self.attributes['metrics'] = {'foo': metric1['id']} self.app.post_json("/v1/resource/" + self.resource_type, @@ -1879,54 +1671,39 @@ class ResourceTest(RestTest): result = self.app.post_json( "/v1/aggregation/resource/" + self.resource_type + "/metric/foo?aggregation=max", - params={"and": - [{"=": {"server_group": server_group}}, - {"=": {"display_name": "myinstance"}}]}, + params={"=": {"name": name}}, expect_errors=True) - if self.resource_type == 'instance': - self.assertEqual(200, result.status_code, result.text) - measures = json.loads(result.text) - self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 16.0], - [u'2013-01-01T12:00:00+00:00', 3600.0, 16.0], - [u'2013-01-01T12:00:00+00:00', 60.0, 16.0]], - measures) - else: - self.assertEqual(400, result.status_code) + self.assertEqual(200, result.status_code, result.text) + measures = json.loads(result.text) + self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 16.0], + [u'2013-01-01T12:00:00+00:00', 3600.0, 16.0], + [u'2013-01-01T12:00:00+00:00', 60.0, 16.0]], + measures) result = self.app.post_json( "/v1/aggregation/resource/" + self.resource_type + "/metric/foo?aggregation=min", - params={"and": - [{"=": {"server_group": server_group}}, - {"=": {"display_name": "myinstance"}}]}, + params={"=": {"name": name}}, expect_errors=True) - if self.resource_type == 'instance': - self.assertEqual(200, result.status_code) - measures = json.loads(result.text) - self.assertEqual([['2013-01-01T00:00:00+00:00', 86400.0, 0], - ['2013-01-01T12:00:00+00:00', 3600.0, 0], - ['2013-01-01T12:00:00+00:00', 60.0, 0]], - measures) - else: - self.assertEqual(400, result.status_code) + self.assertEqual(200, result.status_code) + measures = json.loads(result.text) + self.assertEqual([['2013-01-01T00:00:00+00:00', 86400.0, 0], + ['2013-01-01T12:00:00+00:00', 3600.0, 0], + ['2013-01-01T12:00:00+00:00', 60.0, 0]], + measures) def test_get_aggregated_measures_across_entities_no_match(self): result = self.app.post_json( "/v1/aggregation/resource/" + self.resource_type + "/metric/foo?aggregation=min", - params={"and": - [{"=": {"server_group": "notexistentyet"}}, - {"=": {"display_name": "myinstance"}}]}, + params={"=": {"name": "none!"}}, expect_errors=True) - if self.resource_type == 'instance': - self.assertEqual(200, result.status_code) - measures = json.loads(result.text) - self.assertEqual([], measures) - else: - self.assertEqual(400, result.status_code) + self.assertEqual(200, result.status_code) + measures = json.loads(result.text) + self.assertEqual([], measures) def test_get_aggregated_measures_across_entities(self): result = self.app.post_json("/v1/metric", @@ -1965,6 +1742,27 @@ class ResourceTest(RestTest): [u'2013-01-01T12:00:00+00:00', 60.0, 7.0]], measures) + def test_search_resources_with_like(self): + result = self.app.post_json( + "/v1/resource/" + self.resource_type, + params=self.attributes) + created_resource = json.loads(result.text) + + result = self.app.post_json( + "/v1/search/resource/" + self.resource_type, + params={"like": {"name": "my%"}}, + status=200) + + resources = json.loads(result.text) + self.assertIn(created_resource, resources) + + result = self.app.post_json( + "/v1/search/resource/" + self.resource_type, + params={"like": {"name": str(uuid.uuid4())}}, + status=200) + resources = json.loads(result.text) + self.assertEqual([], resources) + class GenericResourceTest(RestTest): def test_list_resources_tied_to_user(self): @@ -2015,35 +1813,3 @@ class GenericResourceTest(RestTest): "Invalid input: extra keys not allowed @ data[" + repr(u'wrongoperator') + "]", result.text) - - def test_search_resources_with_like(self): - attributes = { - "id": str(uuid.uuid4()), - "started_at": "2014-01-03T02:02:02.000000", - "host": "computenode42", - "image_ref": "imageref!", - "flavor_id": "123", - "display_name": "myinstance", - } - result = self.app.post_json( - "/v1/resource/instance", - params=attributes) - created_resource = json.loads(result.text) - - result = self.app.post_json( - "/v1/search/resource/instance", - params={"like": {"host": "computenode%"}}, - status=200) - - resources = json.loads(result.text) - self.assertIn(created_resource, resources) - - result = self.app.post_json( - "/v1/search/resource/instance", - params={"like": {"host": str(uuid.uuid4())}}, - status=200) - resources = json.loads(result.text) - self.assertEqual([], resources) - - -ResourceTest.generate_scenarios() diff --git a/setup.cfg b/setup.cfg index 2aba8a3e..cf8163d9 100644 --- a/setup.cfg +++ b/setup.cfg @@ -57,7 +57,7 @@ doc = PyYAML Jinja2 test = - pifpaf>=0.1.0 + pifpaf>=0.2.0 gabbi>=1.19.0 coverage>=3.6 fixtures -- GitLab From ee2eb67b99b9b3a45d39a3641a0591151461a0f9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 13 May 2016 10:14:50 +0200 Subject: [PATCH 0234/1483] tests: remove skip_archive_policies_creation This is not a problem for gnocchi.gendoc anymore, we trying to create existing archive policies. This also shows the default existing archive policies in the doc. Change-Id: I462fff88a060f88d0353a902996624ab2884ebed --- doc/source/rest.yaml | 8 ++++---- gnocchi/gendoc.py | 1 - gnocchi/tests/base.py | 14 ++++++-------- 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 98793a85..9db1d8ba 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -4,7 +4,7 @@ Content-Type: application/json { - "name": "low", + "name": "short", "back_window": 0, "definition": [ { @@ -24,7 +24,7 @@ Content-Type: application/json { - "name": "low-without-max", + "name": "short-without-max", "aggregation_methods": ["-max", "-min"], "back_window": 0, "definition": [ @@ -69,7 +69,7 @@ Content-Type: application/json { - "name": "medium", + "name": "some-archive-policy", "back_window": 0, "definition": [ { @@ -92,7 +92,7 @@ Content-Type: application/json { - "archive_policy_name": "low" + "archive_policy_name": "high" } - name: create-metric-2 diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index 88080654..63a4e9b1 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -31,7 +31,6 @@ _RUN = False def _setup_test_app(): t = test_rest.RestTest() - t.skip_archive_policies_creation = True t.auth = True t.setUp() return t.app diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 472afeb0..283e18d3 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -403,14 +403,12 @@ class TestCase(base.BaseTestCase): self.archive_policies = self.ARCHIVE_POLICIES.copy() self.archive_policies.update(archive_policy.DEFAULT_ARCHIVE_POLICIES) - # Used in gnocchi.gendoc - if not getattr(self, "skip_archive_policies_creation", False): - for name, ap in six.iteritems(self.archive_policies): - # Create basic archive policies - try: - self.index.create_archive_policy(ap) - except indexer.ArchivePolicyAlreadyExists: - pass + for name, ap in six.iteritems(self.archive_policies): + # Create basic archive policies + try: + self.index.create_archive_policy(ap) + except indexer.ArchivePolicyAlreadyExists: + pass if swexc: self.useFixture(mockpatch.Patch( -- GitLab From f181d4b2a6ed224234541940f88e4259e39bca11 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 13 May 2016 10:10:06 +0200 Subject: [PATCH 0235/1483] tests: create common resources at class init time This should save a bit of time. Change-Id: Ia43ee874f5d55e93fe96f2c5a352a4bd01912407 --- gnocchi/gendoc.py | 1 + gnocchi/tests/base.py | 17 ++++++++++------- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index 63a4e9b1..b3db9e5e 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -32,6 +32,7 @@ _RUN = False def _setup_test_app(): t = test_rest.RestTest() t.auth = True + t.setUpClass() t.setUp() return t.app diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 283e18d3..30704f71 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -374,8 +374,9 @@ class TestCase(base.BaseTestCase): return os.path.join(root, project_file) return root - def setUp(self): - super(TestCase, self).setUp() + @classmethod + def setUpClass(self): + super(TestCase, self).setUpClass() self.conf = service.prepare_service([], default_config_files=[]) self.conf.set_override('policy_file', @@ -410,6 +411,13 @@ class TestCase(base.BaseTestCase): except indexer.ArchivePolicyAlreadyExists: pass + self.conf.set_override( + 'driver', + os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "null"), + 'storage') + + def setUp(self): + super(TestCase, self).setUp() if swexc: self.useFixture(mockpatch.Patch( 'swiftclient.client.Connection', @@ -418,11 +426,6 @@ class TestCase(base.BaseTestCase): self.useFixture(mockpatch.Patch('gnocchi.storage.ceph.rados', FakeRadosModule())) - self.conf.set_override( - 'driver', - os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "null"), - 'storage') - if self.conf.storage.driver == 'file': tempdir = self.useFixture(fixtures.TempDir()) self.conf.set_override('file_basepath', -- GitLab From 4934c2b8c1b9cb4f8fb4fd48f0cca61c95318734 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 30 May 2016 11:16:58 +0200 Subject: [PATCH 0236/1483] _carbonara: stop heartbeat thread on stop() Change-Id: Ie79f6dd5ff7a11e964fdf049ccfdc642cef89e5a Closes-Bug: #1586443 --- gnocchi/storage/_carbonara.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index cc7d2206..7a303e6f 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -68,9 +68,10 @@ class CarbonaraBasedStorage(storage.StorageDriver): name='heartbeat') self.heartbeater.setDaemon(True) self.heartbeater.start() + self._stop_heartbeat = threading.Event() def _heartbeat(self): - while True: + while not self._stop_heartbeat.is_set(): # FIXME(jd) Why 10? Why not. We should have a way to find out # what's the best value here, but it depends on the timeout used by # the driver; tooz should help us here! @@ -78,6 +79,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): self.coord.heartbeat() def stop(self): + self._stop_heartbeat.set() self.coord.stop() def _lock(self, metric_id): -- GitLab From 227d5c6a7edb6a14b0412c4891ddaf89489f0898 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 30 May 2016 11:46:49 +0200 Subject: [PATCH 0237/1483] rest: report dynamic aggregation methods in capabilities in a different field Change-Id: Id2747db4cb3f6238b0083f2c49a3f8ca6b034c21 Closes-Bug: #1501322 --- gnocchi/rest/__init__.py | 9 +++++---- gnocchi/tests/test_rest.py | 12 +++++++----- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index c8f531c0..1a22fe88 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1404,10 +1404,11 @@ class CapabilityController(rest.RestController): def get(): aggregation_methods = set( archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS) - aggregation_methods.update( - ext.name for ext in extension.ExtensionManager( - namespace='gnocchi.aggregates')) - return dict(aggregation_methods=aggregation_methods) + return dict(aggregation_methods=aggregation_methods, + dynamic_aggregation_methods=[ + ext.name for ext in extension.ExtensionManager( + namespace='gnocchi.aggregates') + ]) class StatusController(rest.RestController): diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index a894fb02..27d0be3b 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -147,18 +147,20 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): def test_capabilities(self): custom_agg = extension.Extension('test_aggregation', None, None, None) - aggregation_methods = set( - archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS) - aggregation_methods.add('test_aggregation') mgr = extension.ExtensionManager.make_test_instance( [custom_agg], 'gnocchi.aggregates') + aggregation_methods = set( + archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS) with mock.patch.object(extension, 'ExtensionManager', return_value=mgr): - result = self.app.get("/v1/capabilities") + result = self.app.get("/v1/capabilities").json self.assertEqual( sorted(aggregation_methods), - sorted(json.loads(result.text)['aggregation_methods'])) + sorted(result['aggregation_methods'])) + self.assertEqual( + ['test_aggregation'], + result['dynamic_aggregation_methods']) def test_status(self): with self.app.use_admin_user(): -- GitLab From 10975c55ff3cb3dedc28ec1b3aadfb2686706194 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 30 May 2016 11:49:10 +0200 Subject: [PATCH 0238/1483] test: move root tests to their own class There are a few test classes that inherits from RestTest, so those tests where executed several times for no good reason. Move them to their own class so they are only executed once. Change-Id: I68aca7822c781d915d167b152aab4d7deec07200 --- gnocchi/tests/test_rest.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 27d0be3b..f9888ec3 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -138,6 +138,13 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): indexer=self.index, auth=self.auth) + # NOTE(jd) Used at least by docs + @staticmethod + def runTest(): + pass + + +class RootTest(RestTest): def test_deserialize_force_json(self): with self.app.use_admin_user(): self.app.post( @@ -172,10 +179,6 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): self.assertIs(type(status['storage']['summary']['metrics']), int) self.assertIs(type(status['storage']['summary']['measures']), int) - @staticmethod - def runTest(): - pass - class ArchivePolicyTest(RestTest): """Test the ArchivePolicies REST API. -- GitLab From 8da588dd19cfec5b9a3bb14c5c3e5196b26e692c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 30 May 2016 15:41:34 +0200 Subject: [PATCH 0239/1483] metricd: no max wait, fix comment Change-Id: I3aad55628652e231c984d1d3fd34d7432397a626 --- gnocchi/cli.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 3329f12b..9675f03a 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -83,10 +83,9 @@ class MetricProcessBase(multiprocessing.Process): self.startup_delay = worker_id self.interval_delay = interval_delay - # Retry with exponential backoff for up to 5 minutes + # Retry with exponential backoff for up to 1 minute @retrying.retry(wait_exponential_multiplier=500, - wait_exponential_max=60000, - stop_max_delay=300000) + wait_exponential_max=60000) def _configure(self): self.store = storage.get_driver(self.conf) self.store.partition = self.worker_id -- GitLab From 8932aad1c3fa95c9ee901aab2b5c2f089a020312 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 30 May 2016 15:53:18 +0200 Subject: [PATCH 0240/1483] metricd: only retry on attended errors and print error when coordinator fails Currently, `retrying' retry on every error raised by either the storage or indexer. Explicitely retry on what is expected to be failing, and not on random errors. Also print a log indicating something is wrong. Change-Id: I7d3e94cf94a22dcc08fb529537f10758db7d58ca Closes-Bug: #1584083 --- gnocchi/cli.py | 27 ++++++++++++++++++++++----- gnocchi/storage/_carbonara.py | 11 +++++++---- gnocchi/utils.py | 8 -------- 3 files changed, 29 insertions(+), 17 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 9675f03a..1fc692c0 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -75,6 +75,14 @@ def statsd(): statsd_service.start() +class Retry(Exception): + pass + + +def retry_if_retry_is_raised(exception): + return isinstance(exception, Retry) + + class MetricProcessBase(multiprocessing.Process): def __init__(self, conf, worker_id=0, interval_delay=0): super(MetricProcessBase, self).__init__() @@ -85,12 +93,21 @@ class MetricProcessBase(multiprocessing.Process): # Retry with exponential backoff for up to 1 minute @retrying.retry(wait_exponential_multiplier=500, - wait_exponential_max=60000) + wait_exponential_max=60000, + retry_on_exception=retry_if_retry_is_raised) def _configure(self): - self.store = storage.get_driver(self.conf) - self.store.partition = self.worker_id - self.index = indexer.get_driver(self.conf) - self.index.connect() + try: + self.store = storage.get_driver(self.conf) + self.store.partition = self.worker_id + except storage.StorageError as e: + LOG.error("Unable to initialize storage: %s" % e) + raise Retry(e) + try: + self.index = indexer.get_driver(self.conf) + self.index.connect() + except indexer.IndexerException as e: + LOG.error("Unable to initialize indexer: %s" % e) + raise Retry(e) def run(self): self._configure() diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index cc7d2206..8d00b3f7 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -52,10 +52,13 @@ class CarbonaraBasedStorage(storage.StorageDriver): def __init__(self, conf): super(CarbonaraBasedStorage, self).__init__(conf) - self.coord = coordination.get_coordinator( - conf.coordination_url, - str(uuid.uuid4()).encode('ascii')) - self.coord.start() + try: + self.coord = coordination.get_coordinator( + conf.coordination_url, + str(uuid.uuid4()).encode('ascii')) + self.coord.start() + except Exception as e: + raise storage.StorageError("Unable to start coordinator: %s" % e) if conf.aggregation_workers_number is None: try: self.aggregation_workers_number = multiprocessing.cpu_count() diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 63646717..aacf99ff 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -85,14 +85,6 @@ def to_timespan(value): return datetime.timedelta(seconds=seconds) -class Retry(Exception): - pass - - -def retry_if_retry_raised(exception): - return isinstance(exception, Retry) - - def utcnow(): """Better version of utcnow() that returns utcnow with a correct TZ.""" return timeutils.utcnow(True) -- GitLab From dbfe0509bc15af73cffd1eaac4110393019819bd Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 30 May 2016 16:09:07 +0200 Subject: [PATCH 0241/1483] doc: include an example with the `like' operator Change-Id: I40d538b08d7dfa9305d576d4de68a94356b3d103 Closes-Bug: #1576835 --- doc/source/rest.j2 | 4 ++++ doc/source/rest.yaml | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index de17eccf..7580ca1e 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -346,6 +346,10 @@ values: {{ scenarios['search-resource-for-user']['doc'] }} +Or even: + +{{ scenarios['search-resource-for-host-like']['doc'] }} + Complex operators such as `and` and `or` are also available: {{ scenarios['search-resource-for-user-after-timestamp']['doc'] }} diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 9db1d8ba..2f5bce6a 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -284,6 +284,13 @@ {"=": {"user_id": "{{ scenarios['create-resource-instance']['response'].json['user_id'] }}"}} +- name: search-resource-for-host-like + request: | + POST /v1/search/resource/instance HTTP/1.1 + Content-Type: application/json + + {"like": {"host": "compute%"}} + - name: search-resource-for-user-details request: | POST /v1/search/resource/generic?details=true HTTP/1.1 -- GitLab From 37e17ce50ccdce48aaa97d2c9f09fb39fe31b854 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 18 May 2016 22:10:22 -0400 Subject: [PATCH 0242/1483] enable pagination when querying metrics this patch enables pagination when querying metrics api. Change-Id: I6a4775bd23d41ca412130bb990b754cdb1ed8e92 Closes-Bug: #1552751 --- doc/source/rest.j2 | 13 + doc/source/rest.yaml | 6 + gnocchi/indexer/__init__.py | 2 +- gnocchi/indexer/sqlalchemy.py | 60 +++-- gnocchi/rest/__init__.py | 9 +- gnocchi/tests/gabbi/gabbits/pagination.yaml | 255 +++++++++++++++++++- 6 files changed, 327 insertions(+), 18 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 7580ca1e..07f13179 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -41,6 +41,13 @@ To retrieve the list of all the metrics created, use the following request: {{ scenarios['list-metric']['doc'] }} +Considering the large volume of metrics Gnocchi will store, query results are +limited to `max_limit` value set in the configuration file. Returned results +are ordered by metrics' id values. Default ordering and limits as well as page +start can be modified using query parameters: + +{{ scenarios['list-metric-pagination']['doc'] }} + It is possible to send measures to the metric: {{ scenarios['post-measures']['doc'] }} @@ -292,6 +299,12 @@ or using `details=true` in the query parameter: {{ scenarios['list-resource-generic-details']['doc'] }} +Similar to metric list, query results are limited to `max_limit` value set in +the configuration file. Returned results are ordered by resouces' +revision_start time and started_at values: + +{{ scenarios['list-resource-generic-pagination']['doc'] }} + Each resource can be linked to any number of metrics. The `metrics` attributes is a key/value field where the key is the name of the relationship and the value is a metric: diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 2f5bce6a..764ccafb 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -142,6 +142,9 @@ - name: list-metric request: GET /v1/metric HTTP/1.1 +- name: list-metric-pagination + request: GET /v1/metric?limit=100&sort=name:asc HTTP/1.1 + - name: post-measures request: | POST /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures HTTP/1.1 @@ -277,6 +280,9 @@ - name: list-resource-generic-details request: GET /v1/resource/generic?details=true HTTP/1.1 +- name: list-resource-generic-pagination + request: GET /v1/resource/generic?limit=2&sort=id:asc HTTP/1.1 + - name: search-resource-for-user request: | POST /v1/search/resource/instance HTTP/1.1 diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 5836e3b5..151ee50d 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -326,7 +326,7 @@ class IndexerDriver(object): @staticmethod def list_metrics(names=None, ids=None, details=False, status='active', - **kwargs): + limit=None, marker=None, sorts=None, **kwargs): raise exceptions.NotImplementedError @staticmethod diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index b366baae..61faa9c1 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -493,12 +493,14 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): @retry_on_deadlock def list_metrics(self, names=None, ids=None, details=False, - status='active', **kwargs): + status='active', limit=None, marker=None, sorts=None, + **kwargs): + sorts = sorts or [] if ids is not None and not ids: return [] with self.facade.independent_reader() as session: q = session.query(Metric).filter( - Metric.status == status).order_by(Metric.id) + Metric.status == status) if names is not None: q = q.filter(Metric.name.in_(names)) if ids is not None: @@ -508,6 +510,30 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): if details: q = q.options(sqlalchemy.orm.joinedload('resource')) + sort_keys, sort_dirs = self._build_sort_keys(sorts) + + if marker: + metric_marker = self.list_metrics(ids=[marker]) + if metric_marker: + metric_marker = metric_marker[0] + else: + raise indexer.InvalidPagination( + "Invalid marker: `%s'" % marker) + else: + metric_marker = None + + try: + q = oslo_db_utils.paginate_query(q, Metric, limit=limit, + sort_keys=sort_keys, + marker=metric_marker, + sort_dirs=sort_dirs) + except ValueError as e: + raise indexer.InvalidPagination(e) + except exception.InvalidSortKey as e: + # FIXME(jd) Wait for https://review.openstack.org/274868 to be + # released so we can return which key + raise indexer.InvalidPagination("Invalid sort keys") + return list(q.all()) @retry_on_deadlock @@ -762,18 +788,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): q = q.filter(f) - # transform the api-wg representation to the oslo.db one - sort_keys = [] - sort_dirs = [] - for sort in sorts: - sort_key, __, sort_dir = sort.partition(":") - sort_keys.append(sort_key.strip()) - sort_dirs.append(sort_dir or 'asc') - - # paginate_query require at list one uniq column - if 'id' not in sort_keys: - sort_keys.append('id') - sort_dirs.append('asc') + sort_keys, sort_dirs = self._build_sort_keys(sorts) if marker: resource_marker = self.get_resource(resource_type, marker) @@ -855,6 +870,23 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): {"status": "delete"}) == 0: raise indexer.NoSuchMetric(id) + @staticmethod + def _build_sort_keys(sorts): + # transform the api-wg representation to the oslo.db one + sort_keys = [] + sort_dirs = [] + for sort in sorts: + sort_key, __, sort_dir = sort.partition(":") + sort_keys.append(sort_key.strip()) + sort_dirs.append(sort_dir or 'asc') + + # paginate_query require at list one uniq column + if 'id' not in sort_keys: + sort_keys.append('id') + sort_dirs.append('asc') + + return sort_keys, sort_dirs + class QueryTransformer(object): unary_operators = { diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index df009b28..85de62fc 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -217,6 +217,8 @@ def get_details(params): RESOURCE_DEFAULT_PAGINATION = ['revision_start:asc', 'started_at:asc'] +METRIC_DEFAULT_PAGINATION = ['id:asc'] + def get_pagination_options(params, default): max_limit = pecan.request.conf.api.max_limit @@ -694,7 +696,12 @@ class MetricsController(rest.RestController): attr_filter['created_by_user_id'] = user_id if project_id is not None: attr_filter['created_by_project_id'] = project_id - return pecan.request.indexer.list_metrics(**attr_filter) + attr_filter.update(get_pagination_options( + kwargs, METRIC_DEFAULT_PAGINATION)) + try: + return pecan.request.indexer.list_metrics(**attr_filter) + except indexer.IndexerException as e: + abort(400, e) _MetricsSchema = voluptuous.Schema({ diff --git a/gnocchi/tests/gabbi/gabbits/pagination.yaml b/gnocchi/tests/gabbi/gabbits/pagination.yaml index 68826e7e..4967cad1 100644 --- a/gnocchi/tests/gabbi/gabbits/pagination.yaml +++ b/gnocchi/tests/gabbi/gabbits/pagination.yaml @@ -154,7 +154,7 @@ tests: $[1].id: 57a9e836-87b8-4a21-9e30-18a474b98fef # -# Invalid limit/ordering +# Invalid resource limit/ordering # - name: invalid sort_key GET: /v1/resource/generic?sort=invalid:asc @@ -259,7 +259,7 @@ tests: data: ended_at: "2014-01-30T02:02:02.000000" - - name: update resource 5 bis + - name: update resource 5 again PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -282,3 +282,254 @@ tests: $[1].ended_at: "2014-01-30T02:02:02+00:00" $[2].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 $[2].ended_at: null + +# +# Create metrics +# + - name: create archive policy + desc: for later use + url: /v1/archive_policy + method: POST + request_headers: + content-type: application/json + x-roles: admin + data: + name: dummy_policy + definition: + - granularity: 1 second + status: 201 + + - name: create metric with name1 + url: /v1/metric + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + method: post + data: + name: "dummy1" + archive_policy_name: dummy_policy + status: 201 + + - name: create metric with name2 + url: /v1/metric + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + method: post + data: + name: "dummy2" + archive_policy_name: dummy_policy + status: 201 + + - name: create metric with name3 + url: /v1/metric + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + method: post + data: + name: "dummy3" + archive_policy_name: dummy_policy + status: 201 + + - name: create metric with name4 + url: /v1/metric + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + method: post + data: + name: "dummy4" + archive_policy_name: dummy_policy + status: 201 + + - name: create metric with name5 + url: /v1/metric + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + method: post + data: + name: "dummy5" + archive_policy_name: dummy_policy + status: 201 + + - name: list all default order + url: /v1/metric + method: get + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + + - name: list first two metrics default order + url: /v1/metric?limit=2 + method: get + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + response_json_paths: + $.`len`: 2 + $[0].name: $RESPONSE['$[0].name'] + $[1].name: $RESPONSE['$[1].name'] + + - name: list all default order again + url: /v1/metric + method: get + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + + - name: list next three metrics default order + url: /v1/metric?limit=4&marker=$RESPONSE['$[1].id'] + method: get + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + response_json_paths: + $.`len`: 3 + $[0].name: $RESPONSE['$[2].name'] + $[1].name: $RESPONSE['$[3].name'] + $[2].name: $RESPONSE['$[4].name'] + + - name: list first two metrics order by user without direction + url: /v1/metric?limit=2&sort=name + method: get + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 200 + response_json_paths: + $.`len`: 2 + $[0].name: dummy1 + $[1].name: dummy2 + + - name: list first two metrics order by user + url: /v1/metric?limit=2&sort=name:asc + method: get + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + response_json_paths: + $.`len`: 2 + $[0].name: dummy1 + $[1].name: dummy2 + + - name: list next third metrics order by user + url: /v1/metric?limit=4&sort=name:asc&marker=$RESPONSE['$[1].id'] + method: get + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + response_json_paths: + $.`len`: 3 + $[0].name: dummy3 + $[1].name: dummy4 + $[2].name: dummy5 + +# +# Default metric limit +# + + - name: create metric with name6 + url: /v1/metric + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + method: post + data: + archive_policy_name: dummy_policy + status: 201 + + - name: create metric with name7 + url: /v1/metric + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + method: post + data: + archive_policy_name: dummy_policy + status: 201 + + - name: create metric with name8 + url: /v1/metric + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + method: post + data: + archive_policy_name: dummy_policy + status: 201 + + - name: default metric limit + url: /v1/metric + method: get + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + response_json_paths: + $.`len`: 7 + +# +# Invalid metrics limit/ordering +# + + - name: metric invalid sort_key + url: /v1/metric?sort=invalid:asc + method: get + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 400 + + - name: metric invalid sort_dir + url: /v1/metric?sort=id:invalid + method: get + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 400 + + - name: metric invalid marker + url: /v1/metric?marker=d44b3f4c-27bc-4ace-b81c-2a8e60026874 + method: get + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 400 + + - name: metric invalid negative limit + url: /v1/metric?limit=-2 + method: get + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 400 + + - name: metric invalid limit + url: /v1/metric?limit=invalid + method: get + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 400 -- GitLab From 3499b64cd168bbf598cca5376faace118c0d6a67 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 2 Jun 2016 07:17:14 +0000 Subject: [PATCH 0243/1483] Fixed section to be net instead of python (Closes: #825364). --- debian/changelog | 6 ++++++ debian/control | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 4cf7fb90..54d3a525 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (2.0.2-6) unstable; urgency=medium + + * Fixed section to be net instead of python (Closes: #825364). + + -- Thomas Goirand Thu, 02 Jun 2016 07:16:10 +0000 + gnocchi (2.0.2-5) unstable; urgency=medium [ Ondřej Nový ] diff --git a/debian/control b/debian/control index 33057b13..4096c187 100644 --- a/debian/control +++ b/debian/control @@ -1,5 +1,5 @@ Source: gnocchi -Section: python +Section: net Priority: optional Maintainer: PKG OpenStack Uploaders: Thomas Goirand , @@ -73,6 +73,7 @@ Vcs-Git: https://anonscm.debian.org/git/openstack/python-gnocchi.git Homepage: https://github.com/openstack/gnocchi Package: python-gnocchi +Section: python Architecture: all Depends: alembic (>= 0.7.6), python-concurrent.futures (>= 2.1.6), -- GitLab From 18a260f26836b28aec5eb21e40bd6d44b761af44 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 2 Jun 2016 11:08:12 +0200 Subject: [PATCH 0244/1483] _carbonara: fix race condition in heartbeat stop condition The _stop_heartbeat attribute is used by our thread, so we better create it before it starts running. Change-Id: I4ed8b4db4056fb417ed095994f3c76e9304387e9 --- gnocchi/storage/_carbonara.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 89c88d4a..c6499298 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -67,11 +67,11 @@ class CarbonaraBasedStorage(storage.StorageDriver): else: self.aggregation_workers_number = conf.aggregation_workers_number self.partition = 0 + self._stop_heartbeat = threading.Event() self.heartbeater = threading.Thread(target=self._heartbeat, name='heartbeat') self.heartbeater.setDaemon(True) self.heartbeater.start() - self._stop_heartbeat = threading.Event() def _heartbeat(self): while not self._stop_heartbeat.is_set(): -- GitLab From 93d83cd3ef54ac281018b9f9057dcc66fa33fb38 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Thu, 2 Jun 2016 10:56:47 +0100 Subject: [PATCH 0245/1483] Fix tempest tests that use SSL A new version of gabbi, 1.21.0, is required for gnocchi to work properly with live SSL tests. That version adds a require_ssl arg to build_tests. These changes determine the correct value for that, and set it. Change-Id: If8e1b0ae7de5d01c3e29de639474bc893fd7eb37 --- gnocchi/tempest/scenario/__init__.py | 10 ++++++++-- setup.cfg | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/gnocchi/tempest/scenario/__init__.py b/gnocchi/tempest/scenario/__init__.py index 8760af64..835aec82 100644 --- a/gnocchi/tempest/scenario/__init__.py +++ b/gnocchi/tempest/scenario/__init__.py @@ -43,7 +43,12 @@ class GnocchiGabbiTest(tempest.test.BaseTestCase): parsed_url = urlparse.urlsplit(url) prefix = parsed_url.path.rstrip('/') # turn it into a prefix - port = 443 if parsed_url.scheme == 'https' else 80 + if parsed_url.scheme == 'https': + port = 443 + require_ssl = True + else: + port = 80 + require_ssl = False host = parsed_url.hostname if parsed_url.port: port = parsed_url.port @@ -53,7 +58,8 @@ class GnocchiGabbiTest(tempest.test.BaseTestCase): cls.tests = driver.build_tests( test_dir, unittest.TestLoader(), host=host, port=port, prefix=prefix, - test_loader_name='tempest.scenario.gnocchi.test') + test_loader_name='tempest.scenario.gnocchi.test', + require_ssl=require_ssl) os.environ["GNOCCHI_SERVICE_TOKEN"] = token diff --git a/setup.cfg b/setup.cfg index 4c1ffa18..9bc8e874 100644 --- a/setup.cfg +++ b/setup.cfg @@ -59,7 +59,7 @@ doc = reno>=1.6.2 test = pifpaf>=0.2.0 - gabbi>=1.19.0 + gabbi>=1.21.0 coverage>=3.6 fixtures mock -- GitLab From 8a4ddb3786ad42299dc8a027a964e73f0d28a928 Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 3 Jun 2016 12:30:10 +0000 Subject: [PATCH 0246/1483] use async delete when remove measures we can async delete when remove metric measures because we remove the xattr reference before so it shouldn't be queryable afterwards. Change-Id: I9bfd45bb18af01c7f0579e832771a5a67d15ff67 --- gnocchi/storage/ceph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 2fd4a445..5a4f7dac 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -270,7 +270,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): aggregation, granularity) with self._get_ioctx() as ioctx: ioctx.rm_xattr("gnocchi_%s_container" % metric.id, name) - ioctx.remove_object(name) + ioctx.aio_remove(name) def _delete_metric(self, metric): with self._get_ioctx() as ioctx: -- GitLab From 4f2102d51786c61b8479d6f47a76f91af968729e Mon Sep 17 00:00:00 2001 From: Victor Hugo Date: Thu, 2 Jun 2016 11:53:03 -0300 Subject: [PATCH 0247/1483] Added endpoint type on swift configuration. The swiftclient uses publicURL as default to connect on swift, but has the option to choose the endpoint type the user wants to use, for cases where you rather connect to adminURL or internalURL. Signed-off-by: Victor Hugo de Araujo Monteiro Change-Id: I63d9a74ea590241868053c3c82a02003dd97f99b --- gnocchi/storage/swift.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 1ba2329a..dcab3890 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -58,6 +58,9 @@ OPTS = [ cfg.StrOpt('swift_container_prefix', default='gnocchi', help='Prefix to namespace metric containers.'), + cfg.StrOpt('swift_endpoint_type', + default='publicURL', + help='Endpoint type to connect to Swift',), cfg.IntOpt('swift_timeout', min=0, default=300, @@ -85,6 +88,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): key=conf.swift_key, tenant_name=conf.swift_tenant_name, timeout=conf.swift_timeout, + os_options={'endpoint_type': conf.swift_endpoint_type}, retries=0) self._container_prefix = conf.swift_container_prefix self.swift.put_container(self.MEASURE_PREFIX) -- GitLab From 3af0fcbc7e53ceee6de7ba9a4bdcee44516fc7e5 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 7 Jun 2016 17:40:57 +0200 Subject: [PATCH 0248/1483] swift: raise an explicit error if bulk-delete is unavailable Change-Id: I6ee333bb602d984c8737301dffa109eae293d8f7 --- gnocchi/storage/swift.py | 7 ++++++- gnocchi/tests/base.py | 39 ++++++++++++++++++++++++--------------- 2 files changed, 30 insertions(+), 16 deletions(-) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 1ba2329a..67932fde 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -152,9 +152,14 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): def _bulk_delete(self, container, objects): objects = [quote(('/%s/%s' % (container, obj['name'])).encode('utf-8')) for obj in objects] + resp = {} headers, body = self.swift.post_account( headers=self.POST_HEADERS, query_string='bulk-delete', - data=b''.join(obj.encode('utf-8') + b'\n' for obj in objects)) + data=b''.join(obj.encode('utf-8') + b'\n' for obj in objects), + response_dict=resp) + if resp['status'] != 200: + raise storage.StorageError( + "Unable to bulk-delete, is bulk-delete enabled in Swift?") resp = swift_utils.parse_api_response(headers, body) LOG.debug('# of objects deleted: %s, # of objects skipped: %s', resp['Number Deleted'], resp['Number Not Found']) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 30704f71..c7d4a4eb 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -331,21 +331,30 @@ class FakeSwiftClient(object): raise swexc.ClientException("No such container", http_status=404) - def post_account(self, headers, query_string=None, data=None): - resp = {'Response Status': '200 OK', - 'Response Body': '', - 'Number Deleted': 0, - 'Number Not Found': 0} - if query_string == 'bulk-delete' and data: - for path in data.splitlines(): - try: - __, container, obj = (unquote(path.decode('utf8')) - .split('/', 2)) - del self.kvs[container][obj] - resp['Number Deleted'] += 1 - except KeyError: - resp['Number Not Found'] += 1 - return {}, json.dumps(resp).encode('utf-8') + def post_account(self, headers, query_string=None, data=None, + response_dict=None): + if query_string == 'bulk-delete': + resp = {'Response Status': '200 OK', + 'Response Body': '', + 'Number Deleted': 0, + 'Number Not Found': 0} + if response_dict is not None: + response_dict['status'] = 200 + if data: + for path in data.splitlines(): + try: + __, container, obj = (unquote(path.decode('utf8')) + .split('/', 2)) + del self.kvs[container][obj] + resp['Number Deleted'] += 1 + except KeyError: + resp['Number Not Found'] += 1 + return {}, json.dumps(resp).encode('utf-8') + + if response_dict is not None: + response_dict['status'] = 204 + + return {}, None @six.add_metaclass(SkipNotImplementedMeta) -- GitLab From 0bbf0799768b28df156f7871125d1f8120723132 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 7 Jun 2016 15:11:10 +0200 Subject: [PATCH 0249/1483] swift: force retry to 1 Change I4320fb3e8bb30603bd70f8159fbcf855fc4a2880 set the retries to 0 to minimize the delay. Unfortunately, swiftclient makes no difference between the errors, so it won't renew the token on 401 and retry. Set retry to 1 so we have a chance to renew the token. Closes-Bug: #1589926 Change-Id: I44428dd4b23d2c8c12852ebf87bd7b60e2f85e3b --- gnocchi/storage/swift.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index dcab3890..dcca555a 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -89,7 +89,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): tenant_name=conf.swift_tenant_name, timeout=conf.swift_timeout, os_options={'endpoint_type': conf.swift_endpoint_type}, - retries=0) + retries=1) self._container_prefix = conf.swift_container_prefix self.swift.put_container(self.MEASURE_PREFIX) -- GitLab From 1da7668db4fcc1b3f2ecf5e73c951e5deeebb2d9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 9 Jun 2016 16:39:13 +0200 Subject: [PATCH 0250/1483] tests: fix Gabbi live test to not rely on legacy resource types Change-Id: I3356579524c23f771fbd5ae74e4fa9d3e0dd8d09 --- gnocchi/tests/gabbi/gabbits-live/live.yaml | 127 ++++++++------------- 1 file changed, 46 insertions(+), 81 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits-live/live.yaml b/gnocchi/tests/gabbi/gabbits-live/live.yaml index 5aa2b246..bbc924fb 100644 --- a/gnocchi/tests/gabbi/gabbits-live/live.yaml +++ b/gnocchi/tests/gabbi/gabbits-live/live.yaml @@ -468,7 +468,7 @@ tests: - name: root of resource url: /v1/resource response_json_paths: - $.volume: $SCHEME://$NETLOC/v1/resource/volume + $.generic: $SCHEME://$NETLOC/v1/resource/generic - name: typo of resource url: /v1/resoue @@ -478,80 +478,45 @@ tests: url: /v1/resource/foobar status: 404 - - name: identity resource - desc: maybe there's are no identity resources yet - url: /v1/resource/identity - status: 200 - - - name: ceph_account resource - desc: maybe there's are no ceph_account resources yet - url: /v1/resource/ceph_account - status: 200 - - - name: instance resource - desc: maybe there are no instance resources yet - url: /v1/resource/instance - status: 200 - - - name: instance_network_interface resource - desc: maybe there's are no instance_network_interface resources yet - url: /v1/resource/instance_network_interface - status: 200 - - - name: instance_disk resource - desc: maybe there's are no instance_disk resources yet - url: /v1/resource/instance_disk - status: 200 - - - name: image resource - desc: maybe there's are no image resources yet - url: /v1/resource/image - status: 200 - - - name: ipmi resource - desc: maybe there's are no ipmi resources yet - url: /v1/resource/ipmi - status: 200 - - - name: network resource - desc: maybe there's are no network resources yet - url: /v1/resource/network - status: 200 - - - name: orchestration resource - desc: maybe there's are no orchestration resources yet - #url: /v1/resource/orchestration - url: /v1/resource/stack - status: 200 - - - name: swift_account resource - desc: maybe there's are no swift_account resources yet - url: /v1/resource/swift_account + - name: generic resource + url: /v1/resource/generic status: 200 - - name: volume resource - desc: maybe there's are no volume resources yet - url: /v1/resource/volume - status: 200 + - name: post resource type + url: /v1/resource_type + method: post + request_headers: + content-type: application/json + data: + name: myresource + attributes: + display_name: + type: string + required: true + max_length: 5 + min_length: 2 + status: 201 + response_headers: + location: $SCHEME://$NETLOC/v1/resource_type/myresource - - name: instance resource bad accept + - name: myresource resource bad accept desc: Expect 406 on bad accept type request_headers: accept: text/plain - url: /v1/resource/instance + url: /v1/resource/myresource status: 406 response_strings: - 406 Not Acceptable - - name: instance resource complex accept + - name: myresource resource complex accept desc: failover accept media type appropriately request_headers: accept: text/plain, application/json; q=0.8 - url: /v1/resource/instance + url: /v1/resource/myresource status: 200 - - name: post instance resource - url: /v1/resource/instance + - name: post myresource resource + url: /v1/resource/myresource method: post request_headers: content-type: application/json @@ -559,9 +524,6 @@ tests: id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e user_id: 126204ef-989a-46fd-999b-ee45c8108f31 project_id: 98e785d7-9487-4159-8ab8-8230ec37537a - flavor_id: "2" - image_ref: http://image - host: compute1 display_name: myvm metrics: vcpus: @@ -571,20 +533,20 @@ tests: $.id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e $.user_id: 126204ef-989a-46fd-999b-ee45c8108f31 $.project_id: 98e785d7-9487-4159-8ab8-8230ec37537a - $.flavor_id: "2" + $.display_name: "myvm" - - name: get instance resource + - name: get myresource resource url: $LOCATION status: 200 response_json_paths: $.id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e $.user_id: 126204ef-989a-46fd-999b-ee45c8108f31 $.project_id: 98e785d7-9487-4159-8ab8-8230ec37537a - $.flavor_id: "2" + $.display_name: "myvm" - - name: search for instance resource via user_id + - name: search for myresource resource via user_id #url: /v1/search/resource/generic - url: /v1/search/resource/instance + url: /v1/search/resource/myresource method: POST request_headers: content-type: application/json @@ -597,7 +559,7 @@ tests: $..project_id: 98e785d7-9487-4159-8ab8-8230ec37537a $..display_name: myvm - - name: search for instance resource via user_id and 'generic' type + - name: search for myresource resource via user_id and 'generic' type url: /v1/search/resource/generic method: POST request_headers: @@ -608,7 +570,7 @@ tests: response_strings: '"user_id": "126204ef-989a-46fd-999b-ee45c8108f31"' - - name: search for instance resource via user_id and project_id + - name: search for myresource resource via user_id and project_id url: /v1/search/resource/generic method: POST request_headers: @@ -622,19 +584,19 @@ tests: response_strings: '"id": "2ae35573-7f9f-4bb1-aae8-dad8dff5706e"' - - name: patch instance resource - url: /v1/resource/instance/2ae35573-7f9f-4bb1-aae8-dad8dff5706e + - name: patch myresource resource + url: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e method: patch request_headers: content-type: application/json data: - host: compute2 + display_name: myvm2 status: 200 response_json_paths: - host: compute2 + display_name: myvm2 - - name: post some measures to the metric on instance - url: /v1/resource/instance/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures + - name: post some measures to the metric on myresource + url: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures request_headers: content-type: application/json method: POST @@ -645,8 +607,8 @@ tests: value: 2 status: 202 - - name: get instance measures with poll - url: /v1/resource/instance/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures + - name: get myresource measures with poll + url: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures # wait up to 60 seconds before policy is deleted poll: count: 60 @@ -678,7 +640,7 @@ tests: id: "cd9eef" - - name: delete instance resource + - name: delete myresource resource url: /v1/resource/generic/2ae35573-7f9f-4bb1-aae8-dad8dff5706e method: DELETE status: 204 @@ -689,7 +651,7 @@ tests: method: GET status: 404 - - name: post instance resource no data + - name: post myresource resource no data url: /v1/resource/generic method: post request_headers: @@ -706,7 +668,10 @@ tests: # It really is gone + - name: delete our resource type + DELETE: /v1/resource_type/myresource + status: 204 + - name: confirm delete of cleanup url: /v1/archive_policy/gabbilive status: 404 - -- GitLab From 2d7151e14b4369e1bace64292851145a03a124f3 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Thu, 16 Jun 2016 12:48:59 +0100 Subject: [PATCH 0251/1483] Correct concurrency of gabbi tests for gabbi 1.22.0 When running gabbi under testr, concurrency grouping is controlled by a regex in .testr.conf. A module name change in gabbi 1.22.0 inadvertently broke the grouping. This change should work for old and new versions. Change-Id: I0d963da7e0a5d3edd046a885312e2fd9cd02a974 --- .testr.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.testr.conf b/.testr.conf index 89993306..cafb2f6e 100644 --- a/.testr.conf +++ b/.testr.conf @@ -2,4 +2,4 @@ test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} ${PYTHON:-python} -m subunit.run discover -t . ${OS_TEST_PATH:-gnocchi/tests} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list -group_regex=(gabbi\.driver.test_gabbi_[^_]+)_ +group_regex=(gabbi\.(suitemaker|driver)\.test_gabbi_([^_]+))_ -- GitLab From 6980d7acf44420f5a802d7cd8bb71b7eabc46da4 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Thu, 16 Jun 2016 13:46:53 +0100 Subject: [PATCH 0252/1483] Rename gabbits with _ to have - instead A forthcoming version of gabbi will warn on the presence of _ in yaml filenames as it can cause issues with test grouping in concurent situations. Change-Id: Ida8ce49582d5b8d927858982fae9fdfc267e4be4 --- .../tests/gabbi/gabbits/{archive_rule.yaml => archive-rule.yaml} | 0 .../gabbi/gabbits/{batch_measures.yaml => batch-measures.yaml} | 0 .../gabbits/{metric_granularity.yaml => metric-granularity.yaml} | 0 .../{resource_aggregation.yaml => resource-aggregation.yaml} | 0 .../gabbi/gabbits/{resource_type.yaml => resource-type.yaml} | 0 .../gabbi/gabbits/{search_metric.yaml => search-metric.yaml} | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename gnocchi/tests/gabbi/gabbits/{archive_rule.yaml => archive-rule.yaml} (100%) rename gnocchi/tests/gabbi/gabbits/{batch_measures.yaml => batch-measures.yaml} (100%) rename gnocchi/tests/gabbi/gabbits/{metric_granularity.yaml => metric-granularity.yaml} (100%) rename gnocchi/tests/gabbi/gabbits/{resource_aggregation.yaml => resource-aggregation.yaml} (100%) rename gnocchi/tests/gabbi/gabbits/{resource_type.yaml => resource-type.yaml} (100%) rename gnocchi/tests/gabbi/gabbits/{search_metric.yaml => search-metric.yaml} (100%) diff --git a/gnocchi/tests/gabbi/gabbits/archive_rule.yaml b/gnocchi/tests/gabbi/gabbits/archive-rule.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/archive_rule.yaml rename to gnocchi/tests/gabbi/gabbits/archive-rule.yaml diff --git a/gnocchi/tests/gabbi/gabbits/batch_measures.yaml b/gnocchi/tests/gabbi/gabbits/batch-measures.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/batch_measures.yaml rename to gnocchi/tests/gabbi/gabbits/batch-measures.yaml diff --git a/gnocchi/tests/gabbi/gabbits/metric_granularity.yaml b/gnocchi/tests/gabbi/gabbits/metric-granularity.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/metric_granularity.yaml rename to gnocchi/tests/gabbi/gabbits/metric-granularity.yaml diff --git a/gnocchi/tests/gabbi/gabbits/resource_aggregation.yaml b/gnocchi/tests/gabbi/gabbits/resource-aggregation.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/resource_aggregation.yaml rename to gnocchi/tests/gabbi/gabbits/resource-aggregation.yaml diff --git a/gnocchi/tests/gabbi/gabbits/resource_type.yaml b/gnocchi/tests/gabbi/gabbits/resource-type.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/resource_type.yaml rename to gnocchi/tests/gabbi/gabbits/resource-type.yaml diff --git a/gnocchi/tests/gabbi/gabbits/search_metric.yaml b/gnocchi/tests/gabbi/gabbits/search-metric.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/search_metric.yaml rename to gnocchi/tests/gabbi/gabbits/search-metric.yaml -- GitLab From e275febca9b6bcee9ce3464bce293a80c14daef5 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 9 Mar 2016 12:48:24 +0100 Subject: [PATCH 0253/1483] Enable CORS by default This simplify the deployment and configuration of Gnocchi. Depends-On: I18eb78d4206c20efc934fe8709881c2bd6972983 Change-Id: I5fc6fca8f54e34f6c932f0e32878b17ab607780b --- devstack/plugin.sh | 5 +---- doc/source/configuration.rst | 8 -------- doc/source/grafana.rst | 16 +++------------- etc/gnocchi/api-paste.ini | 7 +++++-- requirements.txt | 2 +- 5 files changed, 10 insertions(+), 28 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 8b2d5566..3e409144 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -272,14 +272,11 @@ function configure_gnocchi { fi if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] ; then + iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline gnocchi+auth if is_service_enabled gnocchi-grafana; then - iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline "cors gnocchi+auth" iniset $KEYSTONE_CONF cors allowed_origin ${GRAFANA_URL} iniset $GNOCCHI_CONF cors allowed_origin ${GRAFANA_URL} - iniset $GNOCCHI_CONF cors allow_methods GET,POST,PUT,DELETE,OPTIONS,HEAD,PATCH iniset $GNOCCHI_CONF cors allow_headers Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token,X-Subject-Token - else - iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline gnocchi+auth fi else iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline gnocchi+noauth diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index e6e17b9f..2480c1a3 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -65,14 +65,6 @@ edit the `api-paste.ini` file to add the Keystone authentication middleware:: [pipeline:main] pipeline = gnocchi+auth -Also, if you're planning on using `CORS`_ (e.g. to use `Grafana`_), you an also -add the CORS middleware in the server pipeline:: - - [pipeline:gnocchiv1+auth] - pipeline = keystone_authtoken cors gnocchiv1 - -With or without Keystone support. - .. _`Paste Deployment`: http://pythonpaste.org/deploy/ .. _`OpenStack Keystone`: http://launchpad.net/keystone .. _`CORS`: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing diff --git a/doc/source/grafana.rst b/doc/source/grafana.rst index dee0a71c..0d876544 100644 --- a/doc/source/grafana.rst +++ b/doc/source/grafana.rst @@ -29,26 +29,16 @@ In order to use Gnocchi with Grafana in proxy mode, you just need to: In order to use Gnocchi with Grafana in direct mode, you need to do a few more steps: -1. Enable the `CORS`_ middleware. This can be done easily by modifying the - Gnocchi `api-paste.ini` configuration file and adding `cors` into the main - pipeline:: - - [pieline:main] - pipeline = cors keystone_authtoken gnocchi - - This will authorize your browser to make requests to Gnocchi on behalf of - Grafana. - -2. Configure the CORS middleware in `gnocchi.conf` to allow request from +1. Configure the CORS middleware in `gnocchi.conf` to allow request from Grafana:: [cors] allowed_origin = http://example.com/grafana allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token -3. Configure the CORS middleware in Keystone in the same fashion. +2. Configure the CORS middleware in Keystone in the same fashion. -4. Configure a new datasource in Grafana with the Keystone URL, a user, a +3. Configure a new datasource in Grafana with the Keystone URL, a user, a project and a password. Your browser will query Keystone for a token, and then query Gnocchi based on what Grafana needs. diff --git a/etc/gnocchi/api-paste.ini b/etc/gnocchi/api-paste.ini index 94c5c337..9d1b54ba 100644 --- a/etc/gnocchi/api-paste.ini +++ b/etc/gnocchi/api-paste.ini @@ -5,15 +5,18 @@ pipeline = gnocchi+noauth [composite:gnocchi+noauth] use = egg:Paste#urlmap / = gnocchiversions -/v1 = gnocchiv1 +/v1 = gnocchiv1+noauth [composite:gnocchi+auth] use = egg:Paste#urlmap / = gnocchiversions /v1 = gnocchiv1+auth +[pipeline:gnocchiv1+noauth] +pipeline = cors gnocchiv1 + [pipeline:gnocchiv1+auth] -pipeline = keystone_authtoken gnocchiv1 +pipeline = keystone_authtoken cors gnocchiv1 [app:gnocchiversions] paste.app_factory = gnocchi.rest.app:app_factory diff --git a/requirements.txt b/requirements.txt index 7b39a8ac..0127b70b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ oslo.log>=1.0.0 oslo.policy>=0.3.0 oslo.serialization>=1.4.0 oslo.utils>=3.3.0 -oslo.middleware +oslo.middleware>=3.11.0 pandas>=0.17.0 pecan>=0.9 pytimeparse>=1.1.5 -- GitLab From 3156b9d383762a2b4297f3416ccd5205e22c16ef Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 17 Jun 2016 11:20:08 +0200 Subject: [PATCH 0254/1483] _carbonara: set default aggregation_workers_number to 1 Change-Id: Iaa2ca23611d37cf96262104829dc96cb9ef6b0bd Closes-Bug: #1558791 --- gnocchi/storage/_carbonara.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index c6499298..b172e684 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -16,7 +16,6 @@ # under the License. import collections import datetime -import multiprocessing import operator import threading import time @@ -36,8 +35,10 @@ from gnocchi import storage OPTS = [ cfg.IntOpt('aggregation_workers_number', + default=1, min=1, help='Number of workers to run during adding new measures for ' - 'pre-aggregation needs.'), + 'pre-aggregation needs. Due to the Python GIL, ' + '1 is usually faster, unless you have high latency I/O'), cfg.StrOpt('coordination_url', secret=True, help='Coordination driver URL'), @@ -59,13 +60,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): self.coord.start() except Exception as e: raise storage.StorageError("Unable to start coordinator: %s" % e) - if conf.aggregation_workers_number is None: - try: - self.aggregation_workers_number = multiprocessing.cpu_count() - except NotImplementedError: - self.aggregation_workers_number = 2 - else: - self.aggregation_workers_number = conf.aggregation_workers_number + self.aggregation_workers_number = conf.aggregation_workers_number self.partition = 0 self._stop_heartbeat = threading.Event() self.heartbeater = threading.Thread(target=self._heartbeat, -- GitLab From c64132a3c43eeee314a4cb096d99df682b15df48 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 17 Jun 2016 11:23:57 +0200 Subject: [PATCH 0255/1483] _carbonara: use tooz heartbeat management Closes-Bug: #1557593# Change-Id: I238414edafe58fe087d27cfb60bd8e7003077cd4 --- gnocchi/storage/_carbonara.py | 18 +----------------- gnocchi/tests/base.py | 2 +- setup.cfg | 8 ++++---- 3 files changed, 6 insertions(+), 22 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index c6499298..fe7e1664 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -18,8 +18,6 @@ import collections import datetime import multiprocessing import operator -import threading -import time import uuid from concurrent import futures @@ -56,7 +54,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): self.coord = coordination.get_coordinator( conf.coordination_url, str(uuid.uuid4()).encode('ascii')) - self.coord.start() + self.coord.start(start_heart=True) except Exception as e: raise storage.StorageError("Unable to start coordinator: %s" % e) if conf.aggregation_workers_number is None: @@ -67,22 +65,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): else: self.aggregation_workers_number = conf.aggregation_workers_number self.partition = 0 - self._stop_heartbeat = threading.Event() - self.heartbeater = threading.Thread(target=self._heartbeat, - name='heartbeat') - self.heartbeater.setDaemon(True) - self.heartbeater.start() - - def _heartbeat(self): - while not self._stop_heartbeat.is_set(): - # FIXME(jd) Why 10? Why not. We should have a way to find out - # what's the best value here, but it depends on the timeout used by - # the driver; tooz should help us here! - time.sleep(10) - self.coord.heartbeat() def stop(self): - self._stop_heartbeat.set() self.coord.stop() def _lock(self, metric_id): diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 30704f71..ab9e90ac 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -395,7 +395,7 @@ class TestCase(base.BaseTestCase): self.conf.storage.coordination_url, str(uuid.uuid4()).encode('ascii')) - self.coord.start() + self.coord.start(start_heart=True) with self.coord.get_lock(b"gnocchi-tests-db-lock"): self.index.upgrade() diff --git a/setup.cfg b/setup.cfg index 9bc8e874..a3e28f8e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -37,11 +37,11 @@ swift = python-swiftclient>=3.0.0 msgpack-python lz4 - tooz>=1.30 + tooz>=1.35 ceph = msgpack-python lz4 - tooz>=1.30 + tooz>=1.35 ceph-pre-jewel: cradox>=1.0.9 ceph-jewel-and-later: @@ -49,7 +49,7 @@ ceph-jewel-and-later: file = msgpack-python lz4 - tooz>=1.30 + tooz>=1.35 doc = oslosphinx>=2.2.0 sphinx @@ -71,7 +71,7 @@ test = testtools>=0.9.38 WebTest>=2.0.16 doc8 - tooz>=1.30 + tooz>=1.35 keystonemiddleware>=4.0.0 [global] -- GitLab From f6a7f4ed03c743a738f9adffe2cbc22c3920e41f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 17 Jun 2016 12:33:07 +0200 Subject: [PATCH 0256/1483] sqlalchemy: fix MySQL error handling in list_resources This code fails for a good reason: AttributeError: 'ProgrammingError' object has no attribute 'inner_exception' Change-Id: Id148a5f04bc84a9b39de3c4c993d84783e2289f4 --- gnocchi/indexer/sqlalchemy.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 61faa9c1..8e10250e 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -846,13 +846,12 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): # (pymysql.err.ProgrammingError) # (1146, "Table \'test.rt_f00\' doesn\'t exist") # In that case, just ignore those resources. - inn_e = e.inner_exception if (not pymysql or not isinstance( - inn_e, sqlalchemy.exc.ProgrammingError) + e, sqlalchemy.exc.ProgrammingError) or not isinstance( - inn_e.orig, pymysql.err.ProgrammingError) - or (inn_e.orig.args[0] + e.orig, pymysql.err.ProgrammingError) + or (e.orig.args[0] != pymysql.constants.ER.NO_SUCH_TABLE)): raise -- GitLab From ebb1e3328176fb205710bb039a64cf719e60ce05 Mon Sep 17 00:00:00 2001 From: zhangguoqing Date: Thu, 26 May 2016 07:10:10 +0000 Subject: [PATCH 0257/1483] Tuneup gabbi resource.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. Use $LAST_URL to refer to the URL of the previous request, but only when the sameness of the URL is a relevant part of the expression of the tests-in-sequence. (That is, don't just use LAST_URL because the URL is the same.) Change-Id: Ibaa80ec7509240057ddc7df79f201c65e25a0475 --- gnocchi/tests/gabbi/gabbits/resource.yaml | 177 ++++++++-------------- 1 file changed, 67 insertions(+), 110 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index c7882494..217a2f4f 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -14,8 +14,7 @@ tests: - name: create archive policy desc: for later use - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -26,8 +25,7 @@ tests: status: 201 - name: create archive policy rule - url: /v1/archive_policy_rule - method: POST + POST: /v1/archive_policy_rule request_headers: content-type: application/json x-roles: admin @@ -43,14 +41,14 @@ tests: # xfails. - name: root of all - url: / + GET: / response_headers: content-type: /application/json/ response_json_paths: $.versions[0].links[0].href: $SCHEME://$NETLOC/v1/ - name: root of v1 - url: /v1 + GET: /v1 redirects: true response_json_paths: $.version: "1.0" @@ -59,16 +57,16 @@ tests: $.links[7].href: $SCHEME://$NETLOC/v1/resource - name: root of resource - url: /v1/resource + GET: /v1/resource response_json_paths: $.volume: $SCHEME://$NETLOC/v1/resource/volume - name: typo of resource - url: /v1/resoue + GET: /v1/resoue status: 404 - name: typo of resource extra - url: /v1/resource/foobar + GET: /v1/resource/foobar status: 404 # Explore that GETting a list of resources demonstrates the expected @@ -76,24 +74,24 @@ tests: - name: instance resource desc: there are no instance resources yet - url: /v1/resource/instance + GET: /v1/resource/instance response_strings: - "[]" - name: instance resource bad accept desc: Expect 406 on bad accept type + GET: $LAST_URL request_headers: accept: text/plain - url: /v1/resource/instance status: 406 response_strings: - 406 Not Acceptable - name: instance resource complex accept desc: failover accept media type appropriately + GET: $LAST_URL request_headers: accept: text/plain, application/json; q=0.8 - url: /v1/resource/instance response_strings: - "[]" @@ -101,14 +99,13 @@ tests: - name: generic resource desc: there are no generic resources yet - url: /v1/resource/generic + GET: /v1/resource/generic response_strings: - "[]" - name: post resource no user-id desc: https://bugs.launchpad.net/gnocchi/+bug/1424005 - url: /v1/resource/generic - method: post + POST: $LAST_URL request_headers: # Only provide one of these auth headers x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -121,8 +118,7 @@ tests: status: 201 - name: post generic resource - url: /v1/resource/generic - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -143,8 +139,7 @@ tests: - name: post same resource refuse desc: We can only post one identified resource once - url: /v1/resource/generic - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -157,8 +152,7 @@ tests: status: 409 - name: post generic resource bad content type - url: /v1/resource/generic - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -174,8 +168,7 @@ tests: # gets a useful 400 response. - name: post instance resource no data - url: /v1/resource/instance - method: post + POST: /v1/resource/instance request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -183,8 +176,7 @@ tests: status: 400 - name: post instance resource with missing data - url: /v1/resource/instance - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -202,8 +194,7 @@ tests: - "'display_name']" - name: post instance resource - url: /v1/resource/instance - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -224,8 +215,7 @@ tests: # associate metrics. If a metric does not exist there should be a # graceful failure. - name: patch instance resource - url: $LOCATION - method: patch + PATCH: $LOCATION request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -238,8 +228,7 @@ tests: - name: patch instance resource with same data desc: Ensure no useless revision have been created - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 - method: patch + PATCH: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -252,8 +241,7 @@ tests: revision_start: $RESPONSE['$.revision_start'] - name: patch instance resource with id - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 - method: patch + PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -266,8 +254,7 @@ tests: - "'id']" - name: patch instance with metrics - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 - method: patch + PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -282,7 +269,7 @@ tests: - name: get instance history desc: Ensure we can get the history - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/history?sort=revision_end:asc-nullslast + GET: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/history?sort=revision_end:asc-nullslast request_headers: request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -296,8 +283,7 @@ tests: $[1].metrics.'disk.iops': $RESPONSE["metrics.'disk.iops'"] - name: patch instance bad metric association - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 - method: patch + PATCH: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -310,8 +296,7 @@ tests: - Metric f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea does not exist - name: patch instance with bad archive policy - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 - method: patch + PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -325,8 +310,7 @@ tests: - Archive policy noexist does not exist - name: patch instance with no archive policy rule - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 - method: patch + PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -339,8 +323,7 @@ tests: - No archive policy name specified and no archive policy rule found matching the metric name disk.iops - name: patch instance with archive policy rule - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 - method: patch + PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -352,7 +335,7 @@ tests: - name: get patched resource desc: confirm the patched resource is properly patched - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 + GET: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -361,8 +344,7 @@ tests: - name: patch resource empty dict desc: an empty dict in patch is an existence check - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 - method: PATCH + PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -374,8 +356,7 @@ tests: - name: patch resource without change with metrics in response desc: an empty dict in patch is an existence check - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 - method: PATCH + PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -389,8 +370,7 @@ tests: - name: post instance history desc: should don't work - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/history - method: POST + POST: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/history request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -399,8 +379,7 @@ tests: - name: delete instance history desc: should don't work - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/history - method: DELETE + DELETE: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -411,8 +390,7 @@ tests: - name: patch resource no data desc: providing no data is an error - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 - method: PATCH + PATCH: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -423,8 +401,7 @@ tests: - name: patch resource bad data desc: providing data that is not a dict is an error - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 - method: PATCH + PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -437,8 +414,7 @@ tests: - name: patch noexit resource desc: "patching something that doesn't exist is a 404" - url: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9 - method: patch + PATCH: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -449,7 +425,7 @@ tests: - name: get noexist resource desc: if a resource does not exist 404 - url: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9 + GET: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -459,7 +435,7 @@ tests: - name: get bad resource id desc: https://bugs.launchpad.net/gnocchi/+bug/1425588 - url: /v1/resource/instance/noexist + GET: /v1/resource/instance/noexist request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -468,7 +444,7 @@ tests: - The resource could not be found. - name: get metrics for this not-existing resource - url: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util + GET: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -478,12 +454,12 @@ tests: # List resources - name: list instance resources no auth - url: /v1/resource/instance + GET: /v1/resource/instance response_strings: - "[]" - name: list instance resources - url: /v1/resource/instance + GET: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -492,7 +468,7 @@ tests: $[-1].host: compute2 - name: list all resources - url: /v1/resource/generic + GET: /v1/resource/generic request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -502,8 +478,7 @@ tests: # Metric handling when POSTing resources. - name: post new instance with non-existent metrics - url: /v1/resource/instance - method: post + POST: /v1/resource/instance request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -521,8 +496,7 @@ tests: status: 400 - name: post new instance with metrics bad policy - url: /v1/resource/instance - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -541,8 +515,7 @@ tests: status: 400 - name: post new instance with metrics no policy rule - url: /v1/resource/instance - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -562,8 +535,7 @@ tests: - No archive policy name specified and no archive policy rule found matching the metric name cpu.util - name: post new instance with metrics using policy rule - url: /v1/resource/instance - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -581,8 +553,7 @@ tests: status: 201 - name: post new instance with metrics - url: /v1/resource/instance - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -604,8 +575,7 @@ tests: created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - name: post new instance with metrics and un-normalized user/project id from keystone middleware - url: /v1/resource/instance - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -627,7 +597,7 @@ tests: - name: get metrics for this resource desc: with async measure handling this is a null test - url: /v1/resource/instance/$RESPONSE['$.id']/metric/cpu.util/measures + GET: /v1/resource/instance/$RESPONSE['$.id']/metric/cpu.util/measures request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -638,13 +608,13 @@ tests: # Interrogate the NamedMetricController - name: list the instances - url: /v1/resource/instance + GET: /v1/resource/instance request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - name: request metrics from one of the instances - url: /v1/resource/instance/$RESPONSE['$[-1].id']/metric + GET: /v1/resource/instance/$RESPONSE['$[-1].id']/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -655,7 +625,7 @@ tests: - name: request metrics from non uuid metrics desc: 404 from GenericResourceController - url: /v1/resource/instance/not.a.uuid/metric + GET: /v1/resource/instance/not.a.uuid/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -663,7 +633,7 @@ tests: status: 404 - name: request cpuutil metric from instance - url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util + GET: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -672,8 +642,7 @@ tests: $.archive_policy.name: medium - name: try post cpuutil metric to instance - url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -682,7 +651,7 @@ tests: - name: request cpuutil measures from instance desc: with async measure handling this is a null test - url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util/measures + GET: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util/measures request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -690,8 +659,7 @@ tests: - "[]" - name: post cpuutil measures - url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util/measures - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -704,7 +672,7 @@ tests: status: 202 - name: request cpuutil measures again - url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util/measures + GET: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -717,8 +685,7 @@ tests: $[0][2]: 43.100000000000001 - name: post metric at instance - url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric - method: post + POST: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -730,8 +697,7 @@ tests: response_headers: - name: post metric at instance with empty definition - url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -743,8 +709,7 @@ tests: - No archive policy name specified and no archive policy rule found matching the metric name foo.bar - name: post metric at instance using archive policy rule - url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -754,8 +719,7 @@ tests: disk.io.rate: {} - name: duplicate metrics at instance - url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -768,8 +732,7 @@ tests: - Named metric electron.spin already exists - name: post metrics at instance bad policy - url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -784,8 +747,7 @@ tests: # Check bad timestamps - name: post new instance with bad timestamp - url: /v1/resource/instance - method: post + POST: /v1/resource/instance request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -811,8 +773,7 @@ tests: - name: post to non uuid metrics desc: 404 from GenericResourceController - url: /v1/resource/instance/not.a.uuid/metric - method: post + POST: /v1/resource/instance/not.a.uuid/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -824,8 +785,7 @@ tests: - name: post to missing uuid metrics desc: 404 from NamedMetricController - url: /v1/resource/instance/d5a5994e-ee90-11e4-88cf-685b35afa334/metric - method: post + POST: /v1/resource/instance/d5a5994e-ee90-11e4-88cf-685b35afa334/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -839,8 +799,7 @@ tests: - name: post measure on unknown metric desc: 404 from NamedMetricController with metric error - url: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/unknown/measures - method: post + POST: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/unknown/measures request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -855,17 +814,15 @@ tests: # DELETE-ing instances - name: delete instance - url: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 + DELETE: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - method: DELETE status: 204 - name: delete noexist instance - url: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9 + DELETE: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - method: DELETE status: 404 -- GitLab From 9ac00bd8c3a22cea6326fc3e682dde5a7df8b749 Mon Sep 17 00:00:00 2001 From: zhangguoqing Date: Thu, 26 May 2016 06:34:22 +0000 Subject: [PATCH 0258/1483] Tuneup gabbi resource_aggregation.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. Change-Id: Icf6717f69b2a5cab5d26fb5817b80e757270de43 --- .../gabbi/gabbits/resource-aggregation.yaml | 30 +++++++------------ 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/resource-aggregation.yaml b/gnocchi/tests/gabbi/gabbits/resource-aggregation.yaml index 443b8652..c0338476 100644 --- a/gnocchi/tests/gabbi/gabbits/resource-aggregation.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource-aggregation.yaml @@ -4,8 +4,7 @@ fixtures: tests: - name: create archive policy desc: for later use - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -17,8 +16,7 @@ tests: status: 201 - name: create resource 1 - url: /v1/resource/generic - method: post + POST: /v1/resource/generic request_headers: x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 @@ -33,8 +31,7 @@ tests: status: 201 - name: post cpuutil measures 1 - url: /v1/resource/generic/4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4/metric/cpu.util/measures - method: post + POST: /v1/resource/generic/4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4/metric/cpu.util/measures request_headers: x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 @@ -47,8 +44,7 @@ tests: status: 202 - name: create resource 2 - url: /v1/resource/generic - method: post + POST: /v1/resource/generic request_headers: x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 @@ -63,8 +59,7 @@ tests: status: 201 - name: post cpuutil measures 2 - url: /v1/resource/generic/1447CD7E-48A6-4C50-A991-6677CC0D00E6/metric/cpu.util/measures - method: post + POST: /v1/resource/generic/1447CD7E-48A6-4C50-A991-6677CC0D00E6/metric/cpu.util/measures request_headers: x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 @@ -77,8 +72,7 @@ tests: status: 202 - name: create resource 3 - url: /v1/resource/generic - method: post + POST: /v1/resource/generic request_headers: x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 @@ -93,8 +87,7 @@ tests: status: 201 - name: post cpuutil measures 3 - url: /v1/resource/generic/33333BC5-5948-4F29-B7DF-7DE607660452/metric/cpu.util/measures - method: post + POST: /v1/resource/generic/33333BC5-5948-4F29-B7DF-7DE607660452/metric/cpu.util/measures request_headers: x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 @@ -107,8 +100,7 @@ tests: status: 202 - name: aggregate metric with groupby on project_id - url: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id - method: post + POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id request_headers: x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 @@ -135,8 +127,7 @@ tests: project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 - name: aggregate metric with groupby on project_id and invalid group - url: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=thisisdumb - method: post + POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=thisisdumb request_headers: x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 @@ -149,8 +140,7 @@ tests: - Invalid groupby attribute - name: aggregate metric with groupby on project_id and user_id - url: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=user_id - method: post + POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=user_id request_headers: x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 -- GitLab From 4576f1b2d3bfc17d3f8d199d37dd57698699b1b8 Mon Sep 17 00:00:00 2001 From: zhangguoqing Date: Thu, 26 May 2016 07:13:22 +0000 Subject: [PATCH 0259/1483] Tuneup gabbi search_metric.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. Change-Id: I3bef016a5be090fdf59aa8cb4c62384bd146b7ff --- gnocchi/tests/gabbi/gabbits/search-metric.yaml | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/search-metric.yaml b/gnocchi/tests/gabbi/gabbits/search-metric.yaml index 8b70ef83..95c31a37 100644 --- a/gnocchi/tests/gabbi/gabbits/search-metric.yaml +++ b/gnocchi/tests/gabbi/gabbits/search-metric.yaml @@ -9,8 +9,7 @@ fixtures: tests: - name: create archive policy desc: for later use - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -21,17 +20,15 @@ tests: status: 201 - name: create metric - url: /v1/metric + POST: /v1/metric request_headers: content-type: application/json - method: post data: archive_policy_name: "high" status: 201 - name: search measure with wrong start - url: /v1/search/metric?metric_id=$RESPONSE['$.id']&start=foobar - method: post + POST: /v1/search/metric?metric_id=$RESPONSE['$.id']&start=foobar request_headers: content-type: application/json data: @@ -42,17 +39,15 @@ tests: - Invalid value for start - name: create metric 2 - url: /v1/metric + POST: /v1/metric request_headers: content-type: application/json - method: post data: archive_policy_name: "high" status: 201 - name: search measure with wrong stop - url: /v1/search/metric?metric_id=$RESPONSE['$.id']&stop=foobar - method: post + POST: /v1/search/metric?metric_id=$RESPONSE['$.id']&stop=foobar request_headers: content-type: application/json data: @@ -60,4 +55,4 @@ tests: - ≥: 1000 status: 400 response_strings: - - Invalid value for stop \ No newline at end of file + - Invalid value for stop -- GitLab From da74bc25fe29045bc438bb551e6defc2eb67fda4 Mon Sep 17 00:00:00 2001 From: zhangguoqing Date: Thu, 26 May 2016 06:45:52 +0000 Subject: [PATCH 0260/1483] Tuneup gabbi resource_type.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. Use $LAST_URL to refer to the URL of the previous request, but only when the sameness of the URL is a relevant part of the expression of the tests-in-sequence. (That is, don't just use LAST_URL because the URL is the same.) Change-Id: Id07781b370d269a6c88958f8f6611e642ea357fc --- .../tests/gabbi/gabbits/resource-type.yaml | 61 +++++++------------ 1 file changed, 22 insertions(+), 39 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/resource-type.yaml b/gnocchi/tests/gabbi/gabbits/resource-type.yaml index 3d710e16..664d64e8 100644 --- a/gnocchi/tests/gabbi/gabbits/resource-type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource-type.yaml @@ -10,15 +10,14 @@ tests: - name: list resource type desc: only legacy resource types are present - url: /v1/resource_type + GET: /v1/resource_type response_json_paths: $.`len`: 15 # Some bad cases - name: post resource type as non-admin - url: /v1/resource_type - method: post + POST: $LAST_URL data: name: my_custom_resource request_headers: @@ -39,8 +38,7 @@ tests: status: 400 - name: post resource type bad string - url: /v1/resource_type - method: post + POST: $LAST_URL request_headers: x-roles: admin content-type: application/json @@ -63,8 +61,7 @@ tests: # Create a type - name: post resource type - url: /v1/resource_type - method: post + POST: $LAST_URL request_headers: x-roles: admin content-type: application/json @@ -131,13 +128,13 @@ tests: - name: relist resource types desc: we have a resource type now - url: /v1/resource_type + GET: $LAST_URL response_json_paths: $.`len`: 16 $.[11].name: my_custom_resource - name: get the custom resource type - url: /v1/resource_type/my_custom_resource + GET: /v1/resource_type/my_custom_resource response_json_paths: $.name: my_custom_resource $.attributes: @@ -171,15 +168,13 @@ tests: # Some bad case case on the type - name: delete as non-admin - url: /v1/resource_type/my_custom_resource - method: DELETE + DELETE: $LAST_URL status: 403 # Bad resources for this type - name: post invalid resource - url: /v1/resource/my_custom_resource - method: post + POST: /v1/resource/my_custom_resource request_headers: x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea @@ -196,8 +191,7 @@ tests: - "'name']" - name: post invalid resource uuid - url: /v1/resource/my_custom_resource - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea @@ -216,8 +210,7 @@ tests: # Good resources for this type - name: post custom resource - url: /v1/resource/my_custom_resource - method: post + POST: $LAST_URL request_headers: x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea @@ -235,8 +228,7 @@ tests: $.foobar: what - name: patch custom resource - url: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 - method: patch + PATCH: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 request_headers: x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea @@ -252,7 +244,7 @@ tests: $.int: 1 - name: get resource - url: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 + GET: $LAST_URL request_headers: content-type: application/json response_json_paths: @@ -263,8 +255,7 @@ tests: $.int: 1 - name: post resource with default - url: /v1/resource/my_custom_resource - method: post + POST: /v1/resource/my_custom_resource request_headers: x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea @@ -284,7 +275,7 @@ tests: # Ensure we can't delete the type - name: list resource history - url: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast + GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast request_headers: content-type: application/json response_json_paths: @@ -297,8 +288,7 @@ tests: $[1].foobar: what - name: delete in use resource_type - url: /v1/resource_type/my_custom_resource - method: delete + DELETE: /v1/resource_type/my_custom_resource request_headers: x-roles: admin status: 400 @@ -308,38 +298,33 @@ tests: # Delete associated resources - name: delete the resource - url: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 + DELETE: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 request_headers: x-roles: admin - method: DELETE status: 204 - name: delete the second resource - url: /v1/resource/my_custom_resource/c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59 + DELETE: /v1/resource/my_custom_resource/c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59 request_headers: x-roles: admin - method: DELETE status: 204 # Now we can deleted the type - name: delete the custom resource type - method: delete + DELETE: /v1/resource_type/my_custom_resource request_headers: x-roles: admin - url: /v1/resource_type/my_custom_resource status: 204 - name: delete non-existing custom resource type - method: delete + DELETE: $LAST_URL request_headers: x-roles: admin - url: /v1/resource_type/my_custom_resource status: 404 - name: delete missing custom resource type utf8 - url: /v1/resource_type/%E2%9C%94%C3%A9%C3%B1%E2%98%83 - method: DELETE + DELETE: /v1/resource_type/%E2%9C%94%C3%A9%C3%B1%E2%98%83 request_headers: x-roles: admin status: 404 @@ -349,8 +334,7 @@ tests: # Can we readd and delete the same resource type again - name: post resource type again - url: /v1/resource_type - method: post + POST: /v1/resource_type request_headers: x-roles: admin content-type: application/json @@ -359,8 +343,7 @@ tests: status: 201 - name: delete the custom resource type again - method: delete + DELETE: /v1/resource_type/my_custom_resource request_headers: x-roles: admin - url: /v1/resource_type/my_custom_resource status: 204 -- GitLab From f52e62a309a863c057eff2d15989a14d5e96857f Mon Sep 17 00:00:00 2001 From: zhangguoqing Date: Thu, 26 May 2016 06:01:41 +0000 Subject: [PATCH 0261/1483] Tuneup gabbi metric.yaml file to modern standards Make use of METHOD: /url shorthand to make the request method more visible and more tightly associated with the url for easier reading. Change-Id: I453d86438df3ff64f9b99d0bd4dd93891dd7d5b9 --- gnocchi/tests/gabbi/gabbits/metric.yaml | 85 ++++++++++--------------- 1 file changed, 33 insertions(+), 52 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/metric.yaml b/gnocchi/tests/gabbi/gabbits/metric.yaml index e9ef78bc..98004df4 100644 --- a/gnocchi/tests/gabbi/gabbits/metric.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric.yaml @@ -4,13 +4,12 @@ fixtures: tests: - name: wrong metric desc: https://bugs.launchpad.net/gnocchi/+bug/1429949 - url: /v1/metric/foobar + GET: /v1/metric/foobar status: 404 - name: create archive policy desc: for later use - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -21,8 +20,7 @@ tests: status: 201 - name: create archive policy rule - url: /v1/archive_policy_rule - method: POST + POST: /v1/archive_policy_rule request_headers: content-type: application/json x-roles: admin @@ -33,8 +31,7 @@ tests: status: 201 - name: create alt archive policy - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -46,8 +43,7 @@ tests: - name: create alt archive policy rule desc: extra rule that won't be matched - url: /v1/archive_policy_rule - method: POST + POST: /v1/archive_policy_rule request_headers: content-type: application/json x-roles: admin @@ -58,16 +54,15 @@ tests: status: 201 - name: get metric empty - url: /v1/metric + GET: /v1/metric status: 200 response_strings: - "[]" - name: create metric with name and unit - url: /v1/metric + POST: /v1/metric request_headers: content-type: application/json - method: post data: name: "disk.io.rate" unit: "B/s" @@ -78,10 +73,9 @@ tests: $.unit: B/s - name: create metric with name and over length unit - url: /v1/metric + POST: /v1/metric request_headers: content-type: application/json - method: post data: name: "disk.io.rate" unit: "over_length_unit_over_length_unit" @@ -92,10 +86,9 @@ tests: - "'unit']" - name: create metric with name no rule - url: /v1/metric + POST: /v1/metric request_headers: content-type: application/json - method: post data: name: "volume.io.rate" status: 400 @@ -103,8 +96,7 @@ tests: - No archive policy name specified and no archive policy rule found matching the metric name volume.io.rate - name: create metric bad archive policy - url: /v1/metric - method: POST + POST: /v1/metric request_headers: content-type: application/json data: @@ -114,8 +106,7 @@ tests: - Archive policy bad-cookie does not exist - name: create metric bad content-type - url: /v1/metric - method: POST + POST: /v1/metric request_headers: content-type: plain/text data: @@ -123,8 +114,7 @@ tests: status: 415 - name: create valid metric - url: /v1/metric - method: POST + POST: /v1/metric request_headers: content-type: application/json data: @@ -134,16 +124,15 @@ tests: $.archive_policy_name: cookies - name: get valid metric id - url: /v1/metric/$RESPONSE['$.id'] + GET: /v1/metric/$RESPONSE['$.id'] status: 200 response_json_paths: $.archive_policy.name: cookies - name: push measurements to metric before epoch - url: /v1/metric/$RESPONSE['$.id']/measures + POST: /v1/metric/$RESPONSE['$.id']/measures request_headers: content-type: application/json - method: post data: - timestamp: "1915-03-06T14:33:57" value: 43.1 @@ -152,14 +141,13 @@ tests: - Timestamp must be after Epoch - name: get valid metric id again - url: /v1/metric + GET: /v1/metric status: 200 - name: push measurements to metric - url: /v1/metric/$RESPONSE['$[0].id']/measures + POST: /v1/metric/$RESPONSE['$[0].id']/measures request_headers: content-type: application/json - method: post data: - timestamp: "2015-03-06T14:33:57" value: 43.1 @@ -168,8 +156,7 @@ tests: status: 202 - name: create valid metric two - url: /v1/metric - method: POST + POST: /v1/metric request_headers: content-type: application/json data: @@ -179,10 +166,9 @@ tests: $.archive_policy_name: cookies - name: push invalid measurements to metric - url: /v1/metric/$RESPONSE['$.id']/measures + POST: /v1/metric/$RESPONSE['$.id']/measures request_headers: content-type: application/json - method: post data: - timestamp: "2015-03-06T14:33:57" value: 12 @@ -191,8 +177,7 @@ tests: status: 400 - name: create valid metric three - url: /v1/metric - method: POST + POST: /v1/metric request_headers: content-type: application/json data: @@ -202,18 +187,16 @@ tests: $.archive_policy_name: cookies - name: push invalid measurements to metric bis - url: /v1/metric/$RESPONSE['$.id']/measures + POST: /v1/metric/$RESPONSE['$.id']/measures request_headers: content-type: application/json - method: post data: 1 status: 400 - name: add measure unknown metric - url: /v1/metric/fake/measures + POST: /v1/metric/fake/measures request_headers: content-type: application/json - method: post data: - timestamp: "2015-03-06T14:33:57" value: 43.1 @@ -223,59 +206,58 @@ tests: request_headers: x-user-id: foo x-project-id: bar - url: /v1/metric + GET: /v1/metric - name: get metric list - url: /v1/metric + GET: /v1/metric status: 200 response_json_paths: $[0].archive_policy.name: cookies - name: get measurements from metric - url: /v1/metric/$RESPONSE['$[0].id']/measures + GET: /v1/metric/$RESPONSE['$[0].id']/measures status: 200 - name: get metric list for start test - url: /v1/metric + GET: /v1/metric status: 200 response_json_paths: $[0].archive_policy.name: cookies - name: get measurements by start - url: /v1/metric/$RESPONSE['$[0].id']/measures?start=2015-03-06T14:33:57 + GET: /v1/metric/$RESPONSE['$[0].id']/measures?start=2015-03-06T14:33:57 status: 200 - name: get measures unknown metric - url: /v1/metric/fake/measures + GET: /v1/metric/fake/measures status: 404 - name: get metric list for aggregates - url: /v1/metric + GET: /v1/metric status: 200 response_json_paths: $[0].archive_policy.name: cookies - name: get measure unknown aggregates - url: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&aggregation=last + GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&aggregation=last status: 404 response_strings: - Aggregation method 'last' for metric $RESPONSE['$[0].id'] does not exist - name: aggregate measure unknown metric - url: /v1/aggregation/metric?metric=cee6ef1f-52cc-4a16-bbb5-648aedfd1c37 + GET: /v1/aggregation/metric?metric=cee6ef1f-52cc-4a16-bbb5-648aedfd1c37 status: 404 response_strings: - Metric cee6ef1f-52cc-4a16-bbb5-648aedfd1c37 does not exist - name: get metric list for delete - url: /v1/metric + GET: /v1/metric status: 200 response_json_paths: $[0].archive_policy.name: cookies - name: delete metric - url: /v1/metric/$RESPONSE['$[0].id'] - method: DELETE + DELETE: /v1/metric/$RESPONSE['$[0].id'] status: 204 - name: delete metric again @@ -283,6 +265,5 @@ tests: status: 404 - name: delete non existent metric - url: /v1/metric/foo - method: DELETE + DELETE: /v1/metric/foo status: 404 -- GitLab From c736838630be4f0a337c4720557609553bf290e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Tue, 21 Jun 2016 13:01:42 +0200 Subject: [PATCH 0262/1483] d/watch: Fixed upstream URL --- debian/changelog | 6 ++++++ debian/watch | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 54d3a525..3558c026 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (2.0.2-7) UNRELEASED; urgency=medium + + * d/watch: Fixed upstream URL + + -- Ondřej Nový Tue, 21 Jun 2016 13:00:49 +0200 + gnocchi (2.0.2-6) unstable; urgency=medium * Fixed section to be net instead of python (Closes: #825364). diff --git a/debian/watch b/debian/watch index 62628b6a..3f8c9400 100644 --- a/debian/watch +++ b/debian/watch @@ -1,4 +1,4 @@ version=3 opts="uversionmangle=s/\.(b|rc)/~$1/" \ -https://github.com/openstack/nova/gnocchi .*/(\d[\d\.]+)\.tar\.gz +https://github.com/openstack/gnocchi/tags .*/(\d[\d\.]+)\.tar\.gz -- GitLab From 24d2854f604a4b4f1178aa90b677ae3256a2da9c Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 20 May 2016 13:26:11 -0400 Subject: [PATCH 0263/1483] separate cleanup into own worker Change-Id: I5938cbb641cc746ac15ff5b864fa7e7657863676 Closes-Bug: #1583820 --- gnocchi/cli.py | 14 ++++++++++++++ gnocchi/storage/__init__.py | 25 ++++++++++++++----------- gnocchi/tests/test_storage.py | 11 +++++++++++ 3 files changed, 39 insertions(+), 11 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 1fc692c0..29e2e8ec 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -151,6 +151,15 @@ class MetricReporting(MetricProcessBase): exc_info=True) +class MetricJanitor(MetricProcessBase): + def _run_job(self): + try: + self.store.expunge_metrics(self.index) + LOG.debug("Metrics marked for deletion removed from backend") + except Exception: + LOG.error("Unexpected error during metric cleanup", exc_info=True) + + class MetricProcessor(MetricProcessBase): def __init__(self, conf, worker_id=0, interval_delay=0, queue=None): super(MetricProcessor, self).__init__(conf, worker_id, interval_delay) @@ -195,6 +204,11 @@ def metricd(): metric_report.start() workers.append(metric_report) + metric_janitor = MetricJanitor( + conf, interval_delay=conf.storage.metric_cleanup_delay) + metric_janitor.start() + workers.append(metric_janitor) + for worker in workers: worker.join() except KeyboardInterrupt: diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 4354f307..4fa03431 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -35,6 +35,10 @@ OPTS = [ default=60, help="How many seconds to wait between " "metric ingestion reporting"), + cfg.IntOpt('metric_cleanup_delay', + default=60, + help="How many seconds to wait between " + "cleaning of expired data"), ] LOG = log.getLogger(__name__) @@ -171,8 +175,7 @@ class StorageDriver(object): def process_background_tasks(self, index, block_size=128, sync=False): """Process background tasks for this storage. - This calls :func:`process_measures` to process new measures and - :func:`expunge_metrics` to expunge deleted metrics. + This calls :func:`process_measures` to process new measures :param index: An indexer to be used for querying metrics :param block_size: number of metrics to process @@ -180,7 +183,7 @@ class StorageDriver(object): on error :type sync: bool """ - LOG.debug("Processing new and to delete measures") + LOG.debug("Processing new measures") try: self.process_measures(index, block_size, sync) except Exception: @@ -188,16 +191,16 @@ class StorageDriver(object): raise LOG.error("Unexpected error during measures processing", exc_info=True) - LOG.debug("Expunging deleted metrics") - try: - self.expunge_metrics(index, sync) - except Exception: - if sync: - raise - LOG.error("Unexpected error during deleting metrics", - exc_info=True) def expunge_metrics(self, index, sync=False): + """Remove deleted metrics + + :param index: An indexer to be used for querying metrics + :param sync: If True, then delete everything synchronously and raise + on error + :type sync: bool + """ + metrics_to_expunge = index.list_metrics(status='delete') for m in metrics_to_expunge: try: diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index c36a16c9..5b4737c4 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -23,6 +23,7 @@ import six.moves from gnocchi import archive_policy from gnocchi import carbonara +from gnocchi import indexer from gnocchi import storage from gnocchi.storage import _carbonara from gnocchi.storage import null @@ -89,6 +90,16 @@ class TestStorageDriver(tests_base.TestCase): self.storage.delete_metric(self.metric) self.storage.process_background_tasks(self.index, sync=True) + def test_delete_expunge_metric(self): + self.storage.add_measures(self.metric, [ + storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), + ]) + self.storage.process_background_tasks(self.index, sync=True) + self.index.delete_metric(self.metric.id) + self.storage.expunge_metrics(self.index, sync=True) + self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric, + self.metric.id) + def test_measures_reporting(self): report = self.storage.measures_report(True) self.assertIsInstance(report, dict) -- GitLab From a7713222e8c7fd115295697ff2fa461fa8d27f81 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 22 Jun 2016 18:16:18 +0200 Subject: [PATCH 0264/1483] carbonara: compress all TimeSerie classes using LZ4 This fixes the compression not being used by previous version of Gnocchi due to a misimplementation of the serializing code. Change-Id: I6d0c45fc4987c5b84dd5a4664b8065498513d080 --- gnocchi/carbonara.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 944b1ffc..affef246 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -79,10 +79,14 @@ class SerializableMixin(object): @classmethod def unserialize(cls, data): - return cls.from_dict(msgpack.loads(data, encoding='utf-8')) + try: + uncompressed = lz4.loads(data) + except ValueError: + uncompressed = data + return cls.from_dict(msgpack.loads(uncompressed, encoding='utf-8')) def serialize(self): - return msgpack.dumps(self.to_dict()) + return lz4.dumps(msgpack.dumps(self.to_dict())) class TimeSerie(SerializableMixin): @@ -430,13 +434,6 @@ class AggregatedTimeSerie(TimeSerie): 'values': values, } - @classmethod - def unserialize(cls, data): - return cls.from_dict(msgpack.loads(lz4.loads(data), encoding='utf-8')) - - def serialize(self): - return lz4.dumps(msgpack.dumps(self.to_dict())) - def _truncate(self): """Truncate the timeserie.""" if self.max_size is not None: -- GitLab From 778affc1edbaebf8faddca2fcaac3aeca19f02d2 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 19 May 2016 17:05:19 +0200 Subject: [PATCH 0265/1483] track resource_type creation/deletion state The creation and deletion of a resource_type cannot be done in atomic manner (within a transaction). We cannot use Sqlalchemy session and metadata.create_all(). And even we can, mysql doesn't support transactional DDL operation. To workaround this, the change tracks the state of the resource_type tables creation/deletion, with a state column. This allows to release the ResourceClassMapper lock during creation and deletion of a resource type, and reduce the occurrence of deadlock when too many threads compete to get the sqlalchemy models of a resource type. Change-Id: I0ce117b22b50c57bbf5713649eb2af51228e156c --- gnocchi/indexer/__init__.py | 11 + ...7e6f9d542f8b_resource_type_state_column.py | 43 +++ gnocchi/indexer/sqlalchemy.py | 247 ++++++++++++------ gnocchi/indexer/sqlalchemy_base.py | 7 + .../indexer/sqlalchemy/test_migrations.py | 33 ++- gnocchi/tests/test_indexer.py | 63 +++++ 6 files changed, 319 insertions(+), 85 deletions(-) create mode 100644 gnocchi/indexer/alembic/versions/7e6f9d542f8b_resource_type_state_column.py diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 151ee50d..89c3d5c8 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -141,6 +141,17 @@ class ResourceTypeInUse(IndexerException): self.resource_type = resource_type +class UnexpectedResourceTypeState(IndexerException): + """Error raised when an resource type state is not expected.""" + def __init__(self, resource_type, expected_state, state): + super(UnexpectedResourceTypeState, self).__init__( + "Resource type %s state is %s (expected: %s)" % ( + resource_type, state, expected_state)) + self.resource_type = resource_type + self.expected_state = expected_state + self.state = state + + class NoSuchArchivePolicyRule(IndexerException): """Error raised when an archive policy rule does not exist.""" def __init__(self, archive_policy_rule): diff --git a/gnocchi/indexer/alembic/versions/7e6f9d542f8b_resource_type_state_column.py b/gnocchi/indexer/alembic/versions/7e6f9d542f8b_resource_type_state_column.py new file mode 100644 index 00000000..9b3a88ff --- /dev/null +++ b/gnocchi/indexer/alembic/versions/7e6f9d542f8b_resource_type_state_column.py @@ -0,0 +1,43 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""resource_type state column + +Revision ID: 7e6f9d542f8b +Revises: c62df18bf4ee +Create Date: 2016-05-19 16:52:58.939088 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '7e6f9d542f8b' +down_revision = 'c62df18bf4ee' +branch_labels = None +depends_on = None + + +def upgrade(): + states = ("active", "creating", "creation_error", "deleting", + "deletion_error") + enum = sa.Enum(*states, name="resource_type_state_enum") + enum.create(op.get_bind(), checkfirst=False) + op.add_column("resource_type", + sa.Column('state', enum, nullable=False, + server_default="creating")) + rt = sa.sql.table('resource_type', sa.sql.column('state', enum)) + op.execute(rt.update().values(state="active")) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 61faa9c1..ebcf9581 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -112,7 +112,6 @@ class ResourceClassMapper(object): def __init__(self): self._cache = {'generic': {'resource': base.Resource, 'history': base.ResourceHistory}} - self._lock = threading.RLock() @staticmethod def _build_class_mappers(resource_type, baseclass=None): @@ -123,88 +122,103 @@ class ResourceClassMapper(object): resource_ext = type( str("%s_resource" % tablename), (baseclass, base.ResourceExtMixin, base.Resource), - {"__tablename__": tablename}) + {"__tablename__": tablename, "extend_existing": True}) resource_history_ext = type( str("%s_history" % tablename), (baseclass, base.ResourceHistoryExtMixin, base.ResourceHistory), - {"__tablename__": ("%s_history" % tablename)}) + {"__tablename__": ("%s_history" % tablename), + "extend_existing": True}) return {'resource': resource_ext, 'history': resource_history_ext} def get_classes(self, resource_type): - # NOTE(sileht): Most of the times we can bypass the lock so do it + # NOTE(sileht): We don't care about concurrency here because we allow + # sqlalchemy to override its global object with extend_existing=True + # this is safe because classname and tablename are uuid. try: return self._cache[resource_type.tablename] except KeyError: - pass - # TODO(sileht): if the table doesn't exis - with self._lock: - try: - return self._cache[resource_type.tablename] - except KeyError: - mapper = self._build_class_mappers(resource_type) - self._cache[resource_type.tablename] = mapper - return mapper + mapper = self._build_class_mappers(resource_type) + self._cache[resource_type.tablename] = mapper + return mapper @retry_on_deadlock def map_and_create_tables(self, resource_type, facade): - with self._lock: - # NOTE(sileht): map this resource_type to have - # Base.metadata filled with sa.Table objects - mappers = self.get_classes(resource_type) - tables = [Base.metadata.tables[klass.__tablename__] - for klass in mappers.values()] - try: - with facade.writer_connection() as connection: - Base.metadata.create_all(connection, tables=tables) - except exception.DBError as e: - # HACK(jd) Sometimes, PostgreSQL raises an error such as - # "current transaction is aborted, commands ignored until end - # of transaction block" on its own catalog, so we need to - # retry, but this is not caught by oslo.db as a deadlock. This - # is likely because when we use Base.metadata.create_all(), - # sqlalchemy itself gets an error it does not catch or - # something. So this is paperover I guess. - inn_e = e.inner_exception - if (psycopg2 - and isinstance(inn_e, sqlalchemy.exc.InternalError) - and isinstance(inn_e.orig, psycopg2.InternalError) + if resource_type.state != "creating": + raise RuntimeError("map_and_create_tables must be called in state " + "creating") + + mappers = self.get_classes(resource_type) + tables = [Base.metadata.tables[klass.__tablename__] + for klass in mappers.values()] + + try: + with facade.writer_connection() as connection: + Base.metadata.create_all(connection, tables=tables) + except exception.DBError as e: + # HACK(jd) Sometimes, PostgreSQL raises an error such as + # "current transaction is aborted, commands ignored until end + # of transaction block" on its own catalog, so we need to + # retry, but this is not caught by oslo.db as a deadlock. This + # is likely because when we use Base.metadata.create_all(), + # sqlalchemy itself gets an error it does not catch or + # something. So this is paperover I guess. + inn_e = e.inner_exception + if (psycopg2 + and isinstance(inn_e, sqlalchemy.exc.InternalError) + and isinstance(inn_e.orig, psycopg2.InternalError) # current transaction is aborted - and inn_e.orig.pgcode == '25P02'): - raise exception.RetryRequest(e) - raise + and inn_e.orig.pgcode == '25P02'): + raise exception.RetryRequest(e) + raise + + # NOTE(sileht): no need to protect the _cache with a lock + # get_classes cannot be called in state creating + self._cache[resource_type.tablename] = mappers def unmap_and_delete_tables(self, resource_type, connection): - with self._lock: - # NOTE(sileht): map this resource_type to have - # Base.metadata filled with sa.Table objects - mappers = self.get_classes(resource_type) - tables = [Base.metadata.tables[klass.__tablename__] - for klass in mappers.values()] - - if connection is not None: - # NOTE(sileht): Base.metadata.drop_all doesn't - # issue CASCADE stuffs correctly at least on postgresql - # We drop foreign keys manually to not lock the destination - # table for too long during drop table. - # It's safe to not use a transaction since - # the resource_type table is already cleaned and committed - # so this code cannot be triggerred anymore for this - # resource_type - for table in tables: - for fk in table.foreign_key_constraints: - self._safe_execute( - connection, - sqlalchemy.schema.DropConstraint(fk)) - for table in tables: - self._safe_execute(connection, - sqlalchemy.schema.DropTable(table)) - - # TODO(sileht): Remove this resource on other workers - # by using expiration on cache ? + if resource_type.state != "deleting": + raise RuntimeError("unmap_and_delete_tables must be called in " + "state deleting") + + mappers = self.get_classes(resource_type) + del self._cache[resource_type.tablename] + + tables = [Base.metadata.tables[klass.__tablename__] + for klass in mappers.values()] + + if connection is not None: + # NOTE(sileht): Base.metadata.drop_all doesn't + # issue CASCADE stuffs correctly at least on postgresql + # We drop foreign keys manually to not lock the destination + # table for too long during drop table. + # It's safe to not use a transaction since + # the resource_type table is already cleaned and commited + # so this code cannot be triggerred anymore for this + # resource_type for table in tables: - Base.metadata.remove(table) - del self._cache[resource_type.tablename] + for fk in table.foreign_key_constraints: + self._safe_execute( + connection, + sqlalchemy.schema.DropConstraint(fk)) + for table in tables: + self._safe_execute(connection, + sqlalchemy.schema.DropTable(table)) + + # NOTE(sileht): If something goes wrong here, we are currently + # fucked, that why we expose the state to the superuser. + # TODO(sileht): The idea is to make the delete resource_type more + # like a cleanup method, I mean we should don't fail if the + # constraint have already been dropped or the table have already + # been deleted. So, when the superuser have fixed it's backend + # issue, it can rerun 'DELETE ../resource_type/foobar' even the + # state is already error and if we are sure all underlying + # resources have been cleaned we really deleted the resource_type. + + # TODO(sileht): Remove this resource on other workers + # by using expiration on cache ? + for table in tables: + Base.metadata.remove(table) @retry_on_deadlock def _safe_execute(self, connection, works): @@ -284,12 +298,24 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): for rt in base.get_legacy_resource_types(): if not (rt.name == "generic" or create_legacy_resource_types): continue + try: with self.facade.writer() as session: session.add(rt) except exception.DBDuplicateEntry: - pass - self._RESOURCE_TYPE_MANAGER.map_and_create_tables(rt, self.facade) + continue + + if rt.name != "generic": + try: + self._RESOURCE_TYPE_MANAGER.map_and_create_tables( + rt, self.facade) + except Exception: + self._set_resource_type_state(rt.name, "creation_error") + LOG.exception('Fail to create tables for ' + 'resource_type "%s"', rt.name) + continue + + self._set_resource_type_state(rt.name, "active") # NOTE(jd) We can have deadlock errors either here or later in # map_and_create_tables(). We can't decorate create_resource_type() @@ -314,7 +340,8 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): tablename = "rt_%s" % uuid.uuid4().hex resource_type = ResourceType(name=resource_type.name, tablename=tablename, - attributes=resource_type.attributes) + attributes=resource_type.attributes, + state="creating") # NOTE(sileht): ensure the driver is able to store the request # resource_type @@ -322,9 +349,17 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): self._add_resource_type(resource_type) - self._RESOURCE_TYPE_MANAGER.map_and_create_tables(resource_type, - self.facade) + try: + self._RESOURCE_TYPE_MANAGER.map_and_create_tables(resource_type, + self.facade) + except Exception: + # NOTE(sileht): We fail the DDL, we have no way to automatically + # recover, just set a particular state + self._set_resource_type_state(resource_type.name, "creation_error") + raise + self._set_resource_type_state(resource_type.name, "active") + resource_type.state = "active" return resource_type def get_resource_type(self, name): @@ -337,6 +372,17 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): raise indexer.NoSuchResourceType(name) return resource_type + @retry_on_deadlock + def _set_resource_type_state(self, name, state): + with self.facade.writer() as session: + q = session.query(ResourceType) + q = q.filter(ResourceType.name == name) + update = q.update({'state': state}) + if update == 0: + raise indexer.IndexerException( + "Fail to set resource type state of %s to %s" % + (name, state)) + @staticmethod def get_resource_type_schema(): return base.RESOURCE_TYPE_SCHEMA_MANAGER @@ -357,11 +403,28 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): # both adding the resource_type in our table and calling # map_and_create_tables() :-( @retry_on_deadlock - def _delete_resource_type(self, name): + def _mark_as_deleting_resource_type(self, name): try: with self.facade.writer() as session: - resource_type = self._get_resource_type(session, name) - session.delete(resource_type) + rt = self._get_resource_type(session, name) + if rt.state != "active": + raise indexer.UnexpectedResourceTypeState( + name, "active", rt.state) + session.delete(rt) + + # FIXME(sileht): Why do I need to flush here !!! + # I want remove/add in the same transaction !!! + session.flush() + + # NOTE(sileht): delete and recreate to: + # * raise duplicate constraints + # * ensure we do not create a new resource type + # with the same name while we destroy the tables next + rt = ResourceType(name=rt.name, + tablename=rt.tablename, + state="deleting", + attributes=rt.attributes) + session.add(rt) except exception.DBReferenceError as e: if (e.constraint in [ 'fk_resource_resource_type_name', @@ -369,20 +432,40 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): 'fk_rh_resource_type_name']): raise indexer.ResourceTypeInUse(name) raise - return resource_type + return rt + + @retry_on_deadlock + def _delete_resource_type(self, name): + # Really delete the resource type, no resource can be linked to it + # Because we cannot add a resource to a resource_type not in 'active' + # state + with self.facade.writer() as session: + resource_type = self._get_resource_type(session, name) + session.delete(resource_type) def delete_resource_type(self, name): if name == "generic": raise indexer.ResourceTypeInUse(name) - resource_type = self._delete_resource_type(name) + rt = self._mark_as_deleting_resource_type(name) + + try: + with self.facade.writer_connection() as connection: + self._RESOURCE_TYPE_MANAGER.unmap_and_delete_tables( + rt, connection) + except Exception: + # NOTE(sileht): We fail the DDL, we have no way to automatically + # recover, just set a particular state + self._set_resource_type_state(rt.name, "deletion_error") + raise - with self.facade.writer_connection() as connection: - self._RESOURCE_TYPE_MANAGER.unmap_and_delete_tables(resource_type, - connection) + self._delete_resource_type(name) def _resource_type_to_classes(self, session, name): resource_type = self._get_resource_type(session, name) + if resource_type.state != "active": + raise indexer.UnexpectedResourceTypeState( + name, "active", resource_type.state) return self._RESOURCE_TYPE_MANAGER.get_classes(resource_type) def list_archive_policies(self): @@ -823,6 +906,12 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): # No need for a second query all_resources.extend(resources) else: + # TODO(sileht): _resource_type_to_classes can raise + # UnexpectedResourceTypeState or NoSuchResourceType if + # all resources of 'type' and the resource_type 'type' + # is deleted between q.all() and here. This don't + # have many change to occurs. An enhancement can be to + # drop from all_resources the delete resource types. if is_history: target_cls = self._resource_type_to_classes( session, type)['history'] diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index 0646ca7c..09788214 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -214,6 +214,7 @@ def get_legacy_resource_types(): attributes) resource_types.append(ResourceType(name=name, tablename=tablename, + state="creating", attributes=attrs)) return resource_types @@ -241,6 +242,12 @@ class ResourceType(Base, GnocchiBase, resource_type.ResourceType): nullable=False) tablename = sqlalchemy.Column(sqlalchemy.String(35), nullable=False) attributes = sqlalchemy.Column(ResourceTypeAttributes) + state = sqlalchemy.Column(sqlalchemy.Enum("active", "creating", + "creation_error", "deleting", + "deletion_error", + name="resource_type_state_enum"), + nullable=False, + server_default="creating") def to_baseclass(self): cols = {} diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py index df6adfcb..1e7e6bd6 100644 --- a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py +++ b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py @@ -17,6 +17,7 @@ import abc import mock from oslo_db.sqlalchemy import test_migrations import six +import sqlalchemy as sa import sqlalchemy_utils from gnocchi import indexer @@ -46,6 +47,10 @@ class ModelsMigrationsSync( self.index.connect() self.index.upgrade(nocreate=True, create_legacy_resource_types=True) + def tearDown(self): + sqlalchemy_utils.drop_database(self.conf.indexer.url) + super(ModelsMigrationsSync, self).tearDown() + @staticmethod def get_metadata(): return sqlalchemy_base.Base.metadata @@ -53,10 +58,26 @@ class ModelsMigrationsSync( def get_engine(self): return self.index.get_engine() - @staticmethod - def db_sync(engine): - pass + def db_sync(self, engine): + # NOTE(sileht): We ensure all resource type sqlalchemy model are loaded + # in this process + for rt in self.index.list_resource_types(): + if rt.state == "active": + self.index._RESOURCE_TYPE_MANAGER.get_classes(rt) - def tearDown(self): - sqlalchemy_utils.drop_database(self.conf.indexer.url) - super(ModelsMigrationsSync, self).tearDown() + def filter_metadata_diff(self, diff): + tables_to_keep = [] + for rt in self.index.list_resource_types(): + if rt.name.startswith("indexer_test"): + tables_to_keep.extend([rt.tablename, + "%s_history" % rt.tablename]) + new_diff = [] + for line in diff: + if len(line) >= 2: + item = line[1] + # NOTE(sileht): skip resource types created for tests + if (isinstance(item, sa.Table) + and item.name in tables_to_keep): + continue + new_diff.append(line) + return new_diff diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 566d9382..8d6dc357 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -17,12 +17,18 @@ import datetime import operator import uuid +import mock + from gnocchi import archive_policy from gnocchi import indexer from gnocchi.tests import base as tests_base from gnocchi import utils +class MockException(Exception): + pass + + class TestIndexer(tests_base.TestCase): def test_get_driver(self): driver = indexer.get_driver(self.conf) @@ -1035,6 +1041,7 @@ class TestIndexerDriver(tests_base.TestCase): self.assertEqual("string", rtype.attributes[0].typename) self.assertEqual(15, rtype.attributes[0].max_length) self.assertEqual(2, rtype.attributes[0].min_length) + self.assertEqual("active", rtype.state) # List rtypes = self.index.list_resource_types() @@ -1069,3 +1076,59 @@ class TestIndexerDriver(tests_base.TestCase): self.assertRaises(indexer.NoSuchResourceType, self.index.delete_resource_type, "indexer_test") + + def _get_rt_state(self, name): + return self.index.get_resource_type(name).state + + def test_resource_type_unexpected_creation_error(self): + mgr = self.index.get_resource_type_schema() + rtype = mgr.resource_type_from_dict("indexer_test_fail", { + "col1": {"type": "string", "required": True, + "min_length": 2, "max_length": 15} + }) + + states = {'before': None, + 'after': None} + + def map_and_create_mock(rt, conn): + states['before'] = self._get_rt_state("indexer_test_fail") + raise MockException("boom!") + + with mock.patch.object(self.index._RESOURCE_TYPE_MANAGER, + "map_and_create_tables", + side_effect=map_and_create_mock): + self.assertRaises(MockException, + self.index.create_resource_type, + rtype) + states['after'] = self._get_rt_state('indexer_test_fail') + + self.assertEqual([('after', 'creation_error'), + ('before', 'creating')], + sorted(states.items())) + + def test_resource_type_unexpected_deleting_error(self): + mgr = self.index.get_resource_type_schema() + rtype = mgr.resource_type_from_dict("indexer_test_fail2", { + "col1": {"type": "string", "required": True, + "min_length": 2, "max_length": 15} + }) + self.index.create_resource_type(rtype) + + states = {'before': None, + 'after': None} + + def map_and_create_mock(rt, conn): + states['before'] = self._get_rt_state("indexer_test_fail2") + raise MockException("boom!") + + with mock.patch.object(self.index._RESOURCE_TYPE_MANAGER, + "unmap_and_delete_tables", + side_effect=map_and_create_mock): + self.assertRaises(MockException, + self.index.delete_resource_type, + rtype.name) + states['after'] = self._get_rt_state('indexer_test_fail2') + + self.assertEqual([('after', 'deletion_error'), + ('before', 'deleting')], + sorted(states.items())) -- GitLab From 439d5a30c79315bd37c9a19c267231c35a303603 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 20 May 2016 13:40:48 +0200 Subject: [PATCH 0266/1483] Expose resource type state to the API Change-Id: Ic382a430a300190a3c343528b63192db270b4659 --- doc/source/rest.j2 | 11 +++++++++ gnocchi/resource_type.py | 10 ++++---- gnocchi/rest/__init__.py | 3 +++ .../tests/gabbi/gabbits/resource-type.yaml | 3 +++ gnocchi/tests/test_indexer.py | 24 +++++++++---------- gnocchi/tests/test_rest.py | 4 ++-- 6 files changed, 37 insertions(+), 18 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 9eea40c6..681c7d9a 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -347,6 +347,17 @@ It can also be deleted if no more resources are associated to it: {{ scenarios['delete-resource-type']['doc'] }} +Creating resource type means creation of new tables on the indexer backend. +This is heavy operation that will lock some tables for a short amount of times. +When the resource type is created, its initial `state` is `creating`. When the +new tables have been created, the state switches to `active` and the new +resource type is ready to be used. If something unexpected occurs during this +step, the state switches to `creation_error`. + +The same behavior occurs when the resource type is deleted. The state starts to +switch to `deleting`, the resource type is no more usable. Then the tables are +removed and the finally the resource_type is really deleted from the database. +If some unexpected occurs the state switches to `deletion_error`. Searching for resources ======================= diff --git a/gnocchi/resource_type.py b/gnocchi/resource_type.py index 093f3acf..367acb88 100644 --- a/gnocchi/resource_type.py +++ b/gnocchi/resource_type.py @@ -171,14 +171,15 @@ class ResourceTypeSchemaManager(stevedore.ExtensionManager): self[attr["type"]].plugin(name=name, **attr) for name, attr in attributes.items()) - def resource_type_from_dict(self, name, attributes): - return ResourceType(name, self.attributes_from_dict(attributes)) + def resource_type_from_dict(self, name, attributes, state): + return ResourceType(name, self.attributes_from_dict(attributes), state) class ResourceType(object): - def __init__(self, name, attributes): + def __init__(self, name, attributes, state): self.name = name self.attributes = attributes + self.state = state @property def schema(self): @@ -192,4 +193,5 @@ class ResourceType(object): def jsonify(self): return {"name": self.name, - "attributes": self.attributes.jsonify()} + "attributes": self.attributes.jsonify(), + "state": self.state} diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 03ece176..80f2ff42 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -875,10 +875,13 @@ class ResourceTypesController(rest.RestController): def post(self): schema = pecan.request.indexer.get_resource_type_schema() body = deserialize_and_validate(schema) + body["state"] = "creating" + try: rt = schema.resource_type_from_dict(**body) except resource_type.InvalidResourceAttributeName as e: abort(400, e) + enforce("create resource type", body) try: rt = pecan.request.indexer.create_resource_type(rt) diff --git a/gnocchi/tests/gabbi/gabbits/resource-type.yaml b/gnocchi/tests/gabbi/gabbits/resource-type.yaml index 664d64e8..2204bbaa 100644 --- a/gnocchi/tests/gabbi/gabbits/resource-type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource-type.yaml @@ -93,6 +93,7 @@ tests: status: 201 response_json_paths: $.name: my_custom_resource + $.state: active $.attributes: name: type: string @@ -132,11 +133,13 @@ tests: response_json_paths: $.`len`: 16 $.[11].name: my_custom_resource + $.[11].state: active - name: get the custom resource type GET: /v1/resource_type/my_custom_resource response_json_paths: $.name: my_custom_resource + $.state: active $.attributes: name: type: string diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 8d6dc357..b5830c3c 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -449,7 +449,7 @@ class TestIndexerDriver(tests_base.TestCase): rtype = mgr.resource_type_from_dict(resource_type, { "col1": {"type": "string", "required": True, "min_length": 2, "max_length": 15} - }) + }, 'creating') r1 = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) @@ -468,7 +468,7 @@ class TestIndexerDriver(tests_base.TestCase): rtype = mgr.resource_type_from_dict(resource_type, { "col1": {"type": "string", "required": True, "min_length": 2, "max_length": 15} - }) + }, 'creating') self.index.create_resource_type(rtype) r1 = uuid.uuid4() user = str(uuid.uuid4()) @@ -501,7 +501,7 @@ class TestIndexerDriver(tests_base.TestCase): rtype = mgr.resource_type_from_dict(resource_type, { "col1": {"type": "string", "required": False, "min_length": 1, "max_length": 2}, - }) + }, 'creating') self.index.create_resource_type(rtype) r1 = uuid.uuid4() self.index.create_resource(resource_type, r1, @@ -579,7 +579,7 @@ class TestIndexerDriver(tests_base.TestCase): "min_length": 1, "max_length": 20, "required": True} - })) + }, 'creating')) r1 = uuid.uuid4() created = self.index.create_resource(resource_type, r1, str(uuid.uuid4()), @@ -637,7 +637,7 @@ class TestIndexerDriver(tests_base.TestCase): mgr = self.index.get_resource_type_schema() resource_type = str(uuid.uuid4()) self.index.create_resource_type( - mgr.resource_type_from_dict(resource_type, {})) + mgr.resource_type_from_dict(resource_type, {}, 'creating')) r2 = uuid.uuid4() i = self.index.create_resource(resource_type, r2, user, project, @@ -702,7 +702,7 @@ class TestIndexerDriver(tests_base.TestCase): mgr = self.index.get_resource_type_schema() resource_type = str(uuid.uuid4()) self.index.create_resource_type( - mgr.resource_type_from_dict(resource_type, {})) + mgr.resource_type_from_dict(resource_type, {}, 'creating')) r2 = uuid.uuid4() i = self.index.create_resource(resource_type, r2, str(uuid.uuid4()), str(uuid.uuid4())) @@ -750,7 +750,7 @@ class TestIndexerDriver(tests_base.TestCase): "min_length": 1, "max_length": 20, "required": False}, - })) + }, 'creating')) r = self.index.list_resources( resource_type, attribute_filter={"=": {"flavor_id": 1.0}}) self.assertEqual(0, len(r)) @@ -845,7 +845,7 @@ class TestIndexerDriver(tests_base.TestCase): mgr.resource_type_from_dict(resource_type, { "col1": {"type": "string", "required": True, "min_length": 2, "max_length": 15} - })) + }, 'creating')) self.index.create_metric(e1, user, project, archive_policy_name="low") @@ -896,7 +896,7 @@ class TestIndexerDriver(tests_base.TestCase): mgr = self.index.get_resource_type_schema() resource_type = str(uuid.uuid4()) self.index.create_resource_type( - mgr.resource_type_from_dict(resource_type, {})) + mgr.resource_type_from_dict(resource_type, {}, 'creating')) i = self.index.create_resource( resource_type, r2, user, project, started_at=utils.datetime_utc(2000, 1, 1, 23, 23, 23), @@ -1025,7 +1025,7 @@ class TestIndexerDriver(tests_base.TestCase): rtype = mgr.resource_type_from_dict("indexer_test", { "col1": {"type": "string", "required": True, "min_length": 2, "max_length": 15} - }) + }, "creating") # Create self.index.create_resource_type(rtype) @@ -1085,7 +1085,7 @@ class TestIndexerDriver(tests_base.TestCase): rtype = mgr.resource_type_from_dict("indexer_test_fail", { "col1": {"type": "string", "required": True, "min_length": 2, "max_length": 15} - }) + }, "creating") states = {'before': None, 'after': None} @@ -1111,7 +1111,7 @@ class TestIndexerDriver(tests_base.TestCase): rtype = mgr.resource_type_from_dict("indexer_test_fail2", { "col1": {"type": "string", "required": True, "min_length": 2, "max_length": 15} - }) + }, "creating") self.index.create_resource_type(rtype) states = {'before': None, diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index f9888ec3..c17ea170 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -512,7 +512,7 @@ class MetricTest(RestTest): "min_length": 1, "max_length": 40, "required": True} - })) + }, 'creating')) attributes = { "server_group": str(uuid.uuid4()), @@ -626,7 +626,7 @@ class ResourceTest(RestTest): "min_length": 1, "max_length": 40, "required": True} - })) + }, "creating")) self.resource['type'] = self.resource_type @mock.patch.object(utils, 'utcnow') -- GitLab From c2ef2b0407953af353216f62b2662323d7aa897a Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 27 Jun 2016 10:04:02 +0200 Subject: [PATCH 0267/1483] ceph: change make method names for new measures Change-Id: Idf1776b4a849bdf693cff35c11d3981026c869a3 --- gnocchi/storage/__init__.py | 6 +++--- gnocchi/storage/_carbonara.py | 6 +++--- gnocchi/storage/ceph.py | 2 +- gnocchi/storage/file.py | 2 +- gnocchi/storage/swift.py | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 4fa03431..b438e916 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -175,7 +175,7 @@ class StorageDriver(object): def process_background_tasks(self, index, block_size=128, sync=False): """Process background tasks for this storage. - This calls :func:`process_measures` to process new measures + This calls :func:`process_new_measures` to process new measures :param index: An indexer to be used for querying metrics :param block_size: number of metrics to process @@ -185,7 +185,7 @@ class StorageDriver(object): """ LOG.debug("Processing new measures") try: - self.process_measures(index, block_size, sync) + self.process_new_measures(index, block_size, sync) except Exception: if sync: raise @@ -228,7 +228,7 @@ class StorageDriver(object): raise exceptions.NotImplementedError @staticmethod - def process_measures(indexer=None, block_size=None, sync=False): + def process_new_measures(indexer=None, block_size=None, sync=False): """Process added measures in background. Some drivers might need to have a background task running that process diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index c6499298..d136593f 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -224,11 +224,11 @@ class CarbonaraBasedStorage(storage.StorageDriver): oldest_point_to_keep) def add_measures(self, metric, measures): - self._store_measures(metric, msgpackutils.dumps( + self._store_new_measures(metric, msgpackutils.dumps( list(map(tuple, measures)))) @staticmethod - def _store_measures(metric, data): + def _store_new_measures(metric, data): raise NotImplementedError @staticmethod @@ -317,7 +317,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): self._check_for_metric_upgrade, ((metric,) for metric in index.list_metrics())) - def process_measures(self, indexer, block_size, sync=False): + def process_new_measures(self, indexer, block_size, sync=False): metrics_to_process = self._list_metric_with_measures_to_process( block_size, full=sync) metrics = indexer.list_metrics(ids=metrics_to_process) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 5a4f7dac..0c63e5ea 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -125,7 +125,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): for xattr in xattrs: ioctx.rm_xattr(self.MEASURE_PREFIX, xattr) - def _store_measures(self, metric, data): + def _store_new_measures(self, metric, data): # NOTE(sileht): list all objects in a pool is too slow with # many objects (2min for 20000 objects in 50osds cluster), # and enforce us to iterrate over all objects diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 6ea11326..9da136ff 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -111,7 +111,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): if e.errno != errno.EEXIST: raise - def _store_measures(self, metric, data): + def _store_new_measures(self, metric, data): tmpfile = self._get_tempfile() tmpfile.write(data) tmpfile.close() diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 1f133adc..f9905b33 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -110,7 +110,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): if resp['status'] == 204: raise storage.MetricAlreadyExists(metric) - def _store_measures(self, metric, data): + def _store_new_measures(self, metric, data): now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") self.swift.put_object( self.MEASURE_PREFIX, -- GitLab From 78a37f04285e6e7d1f7127e10205b64877e4a87c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 24 May 2016 17:16:50 +0200 Subject: [PATCH 0268/1483] Use pbr WSGI script to build gnocchi-api Change-Id: I2298f9cb94a684747f4b4dbc262cdcab7de49175 --- gnocchi/cli.py | 5 ----- gnocchi/opts.py | 9 --------- gnocchi/rest/app.py | 37 +++++-------------------------------- gnocchi/rest/app.wsgi | 7 +------ gnocchi/service.py | 1 - setup.cfg | 4 +++- 6 files changed, 9 insertions(+), 54 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 29e2e8ec..95628006 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -26,7 +26,6 @@ import six from gnocchi import archive_policy from gnocchi import indexer -from gnocchi.rest import app from gnocchi import service from gnocchi import statsd as statsd_service from gnocchi import storage @@ -67,10 +66,6 @@ def upgrade(): index.create_archive_policy_rule("default", "*", "low") -def api(): - app.build_server() - - def statsd(): statsd_service.start() diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 08c7bdff..7487cc97 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -36,18 +36,9 @@ def list_opts(): cfg.StrOpt('paste_config', default='api-paste.ini', help='Path to API Paste configuration.'), - cfg.PortOpt('port', - default=8041, - help='The port for the Gnocchi API server.'), - cfg.StrOpt('host', - default='0.0.0.0', - help='The listen IP for the Gnocchi API server.'), cfg.BoolOpt('pecan_debug', default=False, help='Toggle Pecan Debug Middleware.'), - cfg.IntOpt('workers', min=1, - help='Number of workers for Gnocchi API server. ' - 'By default the available number of CPU is used.'), cfg.IntOpt('max_limit', default=1000, help=('The maximum number of items returned in a ' diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 587d245c..2417166e 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -22,7 +22,6 @@ from oslo_policy import policy from paste import deploy import pecan import webob.exc -from werkzeug import serving from gnocchi import exceptions from gnocchi import indexer as gnocchi_indexer @@ -112,16 +111,9 @@ def load_app(conf, appname=None, indexer=None, storage=None, def _setup_app(root, conf, indexer, storage, not_implemented_middleware): - # NOTE(sileht): pecan debug won't work in multi-process environment - pecan_debug = conf.api.pecan_debug - if conf.api.workers != 1 and pecan_debug: - pecan_debug = False - LOG.warning('pecan_debug cannot be enabled, if workers is > 1, ' - 'the value is overrided with False') - app = pecan.make_app( root, - debug=pecan_debug, + debug=conf.api.pecan_debug, hooks=(GnocchiHook(storage, indexer, conf),), guess_content_type_from_ext=False, custom_renderers={'json': OsloJSONRenderer}, @@ -133,30 +125,11 @@ def _setup_app(root, conf, indexer, storage, not_implemented_middleware): return app -class WerkzeugApp(object): - # NOTE(sileht): The purpose of this class is only to be used - # with werkzeug to create the app after the werkzeug - # fork gnocchi-api and avoid creation of connection of the - # storage/indexer by the main process. - - def __init__(self, conf): - self.app = None - self.conf = conf - - def __call__(self, environ, start_response): - if self.app is None: - self.app = load_app(conf=self.conf) - return self.app(environ, start_response) - - -def build_server(): - conf = service.prepare_service() - serving.run_simple(conf.api.host, conf.api.port, - WerkzeugApp(conf), - processes=conf.api.workers) - - def app_factory(global_config, **local_conf): global APPCONFIGS appconfig = APPCONFIGS.get(global_config.get('configkey')) return _setup_app(root=local_conf.get('root'), **appconfig) + + +def build_wsgi_app(): + return load_app(service.prepare_service()) diff --git a/gnocchi/rest/app.wsgi b/gnocchi/rest/app.wsgi index 3fbb9c9d..b7fefed1 100644 --- a/gnocchi/rest/app.wsgi +++ b/gnocchi/rest/app.wsgi @@ -17,11 +17,6 @@ See http://pecan.readthedocs.org/en/latest/deployment.html for details. """ -from gnocchi import service from gnocchi.rest import app -# Initialize the oslo configuration library and logging -conf = service.prepare_service() -# The pecan debugger cannot be used in wsgi mode -conf.set_default('pecan_debug', False, group='api') -application = app.load_app(conf) +application = app.build_wsgi_app() diff --git a/gnocchi/service.py b/gnocchi/service.py index c13d8b91..9f500caa 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -54,7 +54,6 @@ def prepare_service(args=None, conf=None, except NotImplementedError: default_workers = 1 - conf.set_default("workers", default_workers, group="api") conf.set_default("workers", default_workers, group="metricd") conf(args, project='gnocchi', validate_default_values=True, diff --git a/setup.cfg b/setup.cfg index 9bc8e874..82df67c0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -110,12 +110,14 @@ gnocchi.aggregates = moving-average = gnocchi.aggregates.moving_stats:MovingAverage console_scripts = - gnocchi-api = gnocchi.cli:api gnocchi-upgrade = gnocchi.cli:upgrade gnocchi-statsd = gnocchi.cli:statsd gnocchi-metricd = gnocchi.cli:metricd carbonara-dump = gnocchi.carbonara:dump_archive_file +wsgi_scripts = + gnocchi-api = gnocchi.rest.app:build_wsgi_app + oslo.config.opts = gnocchi = gnocchi.opts:list_opts -- GitLab From 932741bd497487c7dabfd9f09cbf17a98b21616f Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 28 Jun 2016 14:58:06 +0200 Subject: [PATCH 0269/1483] devstack: Fix requirement typo Change-Id: Idc02b2fc75e26880544207563825f9f5ccbaa77b --- devstack/plugin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 3e409144..6baffa2c 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -377,7 +377,7 @@ function install_gnocchi { install_gnocchiclient - [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && EXTRA_FLAVOR=,keystonmiddleware + [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && EXTRA_FLAVOR=,keystonemiddleware # We don't use setup_package because we don't follow openstack/requirements sudo -H pip install -e "$GNOCCHI_DIR"[test,$GNOCCHI_STORAGE_BACKEND,${DATABASE_TYPE}${EXTRA_FLAVOR}] -- GitLab From d2044e3ce3224ffc64cec60ad2f8096c6bffb1bb Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 28 Jun 2016 17:32:35 +0200 Subject: [PATCH 0270/1483] Revert "carbonara: compress all TimeSerie classes using LZ4" This reverts commit a7713222e8c7fd115295697ff2fa461fa8d27f81. Let's revert that for now, and implement something better in the future anyway. Change-Id: I57874e3fe0a9bdc2aacffa4c686167b70239f29d --- gnocchi/carbonara.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index affef246..944b1ffc 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -79,14 +79,10 @@ class SerializableMixin(object): @classmethod def unserialize(cls, data): - try: - uncompressed = lz4.loads(data) - except ValueError: - uncompressed = data - return cls.from_dict(msgpack.loads(uncompressed, encoding='utf-8')) + return cls.from_dict(msgpack.loads(data, encoding='utf-8')) def serialize(self): - return lz4.dumps(msgpack.dumps(self.to_dict())) + return msgpack.dumps(self.to_dict()) class TimeSerie(SerializableMixin): @@ -434,6 +430,13 @@ class AggregatedTimeSerie(TimeSerie): 'values': values, } + @classmethod + def unserialize(cls, data): + return cls.from_dict(msgpack.loads(lz4.loads(data), encoding='utf-8')) + + def serialize(self): + return lz4.dumps(msgpack.dumps(self.to_dict())) + def _truncate(self): """Truncate the timeserie.""" if self.max_size is not None: -- GitLab From fe711a1f528023fafcae407ec93209f11885f509 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 28 Jun 2016 22:54:48 +0000 Subject: [PATCH 0271/1483] simplify model loading pandas.Timestamp is not great from performance pov and when called enough times (possibly thousands), the overhead becomes quite noticeable. we should just use to_datetime to build all our timestamps and avoid strange tuple/zip/dict combination. this is about 10x faster regardless of size Change-Id: I2725e25174f13fb18f255777d2bb3ec3e893c0d2 --- gnocchi/carbonara.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 944b1ffc..3227fe85 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -129,12 +129,10 @@ class TimeSerie(SerializableMixin): @staticmethod def _timestamps_and_values_from_dict(values): - v = tuple( - zip(*dict( - (pandas.Timestamp(k), v) - for k, v in six.iteritems(values)).items())) + timestamps = pandas.to_datetime(list(values.keys()), unit='ns') + v = list(values.values()) if v: - return v + return timestamps, v return (), () @classmethod -- GitLab From 62913af973563417cb07f5a40af3cdf0d1de7fca Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 24 Jun 2016 17:13:12 +0200 Subject: [PATCH 0272/1483] ceph: uses only one ioctx Change-Id: Ia3c91d0c85e72a91622a18f85ac860da0b59b635 --- gnocchi/storage/ceph.py | 238 ++++++++++++++++++---------------------- gnocchi/tests/base.py | 4 + 2 files changed, 113 insertions(+), 129 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 0c63e5ea..0237b034 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -85,6 +85,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): rados_id=conf.ceph_username, conf=options) self.rados.connect() + self.ioctx = self.rados.open_ioctx(self.pool) # NOTE(sileht): constants can't be class attributes because # they rely on presence of rados module @@ -107,23 +108,28 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): # we are safe and good. self.OMAP_WRITE_FLAGS = rados.LIBRADOS_OPERATION_SKIPRWLOCKS + def stop(self): + self.ioctx.aio_flush() + self.ioctx.close() + self.rados.shutdown() + super(CephStorage, self).stop() + def upgrade(self, index): super(CephStorage, self).upgrade(index) # Move names stored in xattrs to omap - with self._get_ioctx() as ioctx: - try: - xattrs = tuple(k for k, v in - ioctx.get_xattrs(self.MEASURE_PREFIX)) - except rados.ObjectNotFound: - return - with rados.WriteOpCtx() as op: - ioctx.set_omap(op, xattrs, xattrs) - ioctx.operate_write_op(op, self.MEASURE_PREFIX, - flags=self.OMAP_WRITE_FLAGS) + try: + xattrs = tuple(k for k, v in + self.ioctx.get_xattrs(self.MEASURE_PREFIX)) + except rados.ObjectNotFound: + return + with rados.WriteOpCtx() as op: + self.ioctx.set_omap(op, xattrs, xattrs) + self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, + flags=self.OMAP_WRITE_FLAGS) - for xattr in xattrs: - ioctx.rm_xattr(self.MEASURE_PREFIX, xattr) + for xattr in xattrs: + self.ioctx.rm_xattr(self.MEASURE_PREFIX, xattr) def _store_new_measures(self, metric, data): # NOTE(sileht): list all objects in a pool is too slow with @@ -137,17 +143,16 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): str(metric.id), str(uuid.uuid4()), datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S"))) - with self._get_ioctx() as ioctx: - ioctx.write_full(name, data) - with rados.WriteOpCtx() as op: - ioctx.set_omap(op, (name,), ("",)) - ioctx.operate_write_op(op, self.MEASURE_PREFIX, - flags=self.OMAP_WRITE_FLAGS) + self.ioctx.write_full(name, data) + + with rados.WriteOpCtx() as op: + self.ioctx.set_omap(op, (name,), ("",)) + self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, + flags=self.OMAP_WRITE_FLAGS) def _build_report(self, details): - with self._get_ioctx() as ioctx: - names = self._list_object_names_to_process(ioctx) + names = self._list_object_names_to_process() metrics = set() count = 0 metric_details = defaultdict(int) @@ -159,10 +164,10 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): metric_details[metric] += 1 return len(metrics), count, metric_details if details else None - def _list_object_names_to_process(self, ioctx, prefix=""): + def _list_object_names_to_process(self, prefix=""): with rados.ReadOpCtx() as op: - omaps, ret = ioctx.get_omap_vals(op, "", prefix, -1) - ioctx.operate_read_op( + omaps, ret = self.ioctx.get_omap_vals(op, "", prefix, -1) + self.ioctx.operate_read_op( op, self.MEASURE_PREFIX, flag=self.OMAP_READ_FLAGS) # NOTE(sileht): after reading the libradospy, I'm # not sure that ret will have the correct value @@ -175,14 +180,11 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): return (k for k, v in omaps) def _pending_measures_to_process_count(self, metric_id): - with self._get_ioctx() as ioctx: - object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) - return len(list(self._list_object_names_to_process(ioctx, - object_prefix))) + object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) + return len(list(self._list_object_names_to_process(object_prefix))) def _list_metric_with_measures_to_process(self, block_size, full=False): - with self._get_ioctx() as ioctx: - names = self._list_object_names_to_process(ioctx) + names = self._list_object_names_to_process() if full: objs_it = names else: @@ -192,144 +194,125 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): return set([name.split("_")[1] for name in objs_it]) def _delete_unprocessed_measures_for_metric_id(self, metric_id): - with self._get_ioctx() as ctx: - object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) - object_names = self._list_object_names_to_process(ctx, - object_prefix) - # Now clean objects and xattrs - with rados.WriteOpCtx() as op: - # NOTE(sileht): come on Ceph, no return code - # for this operation ?!! - ctx.remove_omap_keys(op, tuple(object_names)) - ctx.operate_write_op(op, self.MEASURE_PREFIX, - flags=self.OMAP_WRITE_FLAGS) - - for n in object_names: - ctx.aio_remove(n) + object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) + object_names = self._list_object_names_to_process(object_prefix) + # Now clean objects and xattrs + with rados.WriteOpCtx() as op: + # NOTE(sileht): come on Ceph, no return code + # for this operation ?!! + self.ioctx.remove_omap_keys(op, tuple(object_names)) + self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, + flags=self.OMAP_WRITE_FLAGS) + + for n in object_names: + self.ioctx.aio_remove(n) @contextlib.contextmanager def _process_measure_for_metric(self, metric): - with self._get_ioctx() as ctx: - object_prefix = self.MEASURE_PREFIX + "_" + str(metric.id) - object_names = list(self._list_object_names_to_process( - ctx, object_prefix)) + object_prefix = self.MEASURE_PREFIX + "_" + str(metric.id) + object_names = list(self._list_object_names_to_process(object_prefix)) - measures = [] - for n in object_names: - data = self._get_object_content(ctx, n) - measures.extend(self._unserialize_measures(data)) + measures = [] + for n in object_names: + data = self._get_object_content(n) + measures.extend(self._unserialize_measures(data)) - yield measures + yield measures - # Now clean objects and xattrs - with rados.WriteOpCtx() as op: - # NOTE(sileht): come on Ceph, no return code - # for this operation ?!! - ctx.remove_omap_keys(op, tuple(object_names)) - ctx.operate_write_op(op, self.MEASURE_PREFIX, - flags=self.OMAP_WRITE_FLAGS) + # Now clean objects and xattrs + with rados.WriteOpCtx() as op: + # NOTE(sileht): come on Ceph, no return code + # for this operation ?!! + self.ioctx.remove_omap_keys(op, tuple(object_names)) + self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, + flags=self.OMAP_WRITE_FLAGS) - for n in object_names: - ctx.aio_remove(n) - - def _get_ioctx(self): - return self.rados.open_ioctx(self.pool) + for n in object_names: + self.ioctx.aio_remove(n) @staticmethod def _get_object_name(metric, timestamp_key, aggregation, granularity): return str("gnocchi_%s_%s_%s_%s" % ( metric.id, timestamp_key, aggregation, granularity)) - @staticmethod - def _object_exists(ioctx, name): + def _object_exists(self, name): try: - ioctx.stat(name) + self.ioctx.stat(name) return True except rados.ObjectNotFound: return False def _create_metric(self, metric): name = "gnocchi_%s_container" % metric.id - with self._get_ioctx() as ioctx: - if self._object_exists(ioctx, name): - raise storage.MetricAlreadyExists(metric) - else: - ioctx.write_full(name, "metric created") + if self._object_exists(name): + raise storage.MetricAlreadyExists(metric) + else: + self.ioctx.write_full(name, "metric created") def _store_metric_measures(self, metric, timestamp_key, aggregation, granularity, data): name = self._get_object_name(metric, timestamp_key, aggregation, granularity) - with self._get_ioctx() as ioctx: - ioctx.write_full(name, data) - ioctx.set_xattr("gnocchi_%s_container" % metric.id, name, "") + self.ioctx.write_full(name, data) + self.ioctx.set_xattr("gnocchi_%s_container" % metric.id, name, "") def _delete_metric_measures(self, metric, timestamp_key, aggregation, granularity): name = self._get_object_name(metric, timestamp_key, aggregation, granularity) - with self._get_ioctx() as ioctx: - ioctx.rm_xattr("gnocchi_%s_container" % metric.id, name) - ioctx.aio_remove(name) + self.ioctx.rm_xattr("gnocchi_%s_container" % metric.id, name) + self.ioctx.aio_remove(name) def _delete_metric(self, metric): - with self._get_ioctx() as ioctx: - try: - xattrs = ioctx.get_xattrs("gnocchi_%s_container" % metric.id) - except rados.ObjectNotFound: - pass - else: - for xattr, _ in xattrs: - ioctx.aio_remove(xattr) - for name in ('container', 'none'): - ioctx.aio_remove("gnocchi_%s_%s" % (metric.id, name)) + try: + xattrs = self.ioctx.get_xattrs("gnocchi_%s_container" % metric.id) + except rados.ObjectNotFound: + pass + else: + for xattr, _ in xattrs: + self.ioctx.aio_remove(xattr) + for name in ('container', 'none'): + self.ioctx.aio_remove("gnocchi_%s_%s" % (metric.id, name)) def _get_measures(self, metric, timestamp_key, aggregation, granularity): try: - with self._get_ioctx() as ioctx: - name = self._get_object_name(metric, timestamp_key, - aggregation, granularity) - return self._get_object_content(ioctx, name) + name = self._get_object_name(metric, timestamp_key, + aggregation, granularity) + return self._get_object_content(name) except rados.ObjectNotFound: - with self._get_ioctx() as ioctx: - if self._object_exists( - ioctx, "gnocchi_%s_container" % metric.id): - raise storage.AggregationDoesNotExist(metric, aggregation) - else: - raise storage.MetricDoesNotExist(metric) + if self._object_exists( + self.ioctx, "gnocchi_%s_container" % metric.id): + raise storage.AggregationDoesNotExist(metric, aggregation) + else: + raise storage.MetricDoesNotExist(metric) def _list_split_keys_for_metric(self, metric, aggregation, granularity): - with self._get_ioctx() as ioctx: - try: - xattrs = ioctx.get_xattrs("gnocchi_%s_container" % metric.id) - except rados.ObjectNotFound: - raise storage.MetricDoesNotExist(metric) - keys = [] - for xattr, value in xattrs: - _, metric_id, key, agg, g = xattr.split('_', 4) - if aggregation == agg and granularity == float(g): - keys.append(key) + try: + xattrs = self.ioctx.get_xattrs("gnocchi_%s_container" % metric.id) + except rados.ObjectNotFound: + raise storage.MetricDoesNotExist(metric) + keys = [] + for xattr, value in xattrs: + _, metric_id, key, agg, g = xattr.split('_', 4) + if aggregation == agg and granularity == float(g): + keys.append(key) return keys def _get_unaggregated_timeserie(self, metric): try: - with self._get_ioctx() as ioctx: - return self._get_object_content( - ioctx, "gnocchi_%s_none" % metric.id) + return self._get_object_content("gnocchi_%s_none" % metric.id) except rados.ObjectNotFound: raise storage.MetricDoesNotExist(metric) def _store_unaggregated_timeserie(self, metric, data): - with self._get_ioctx() as ioctx: - ioctx.write_full("gnocchi_%s_none" % metric.id, data) + self.ioctx.write_full("gnocchi_%s_none" % metric.id, data) - @staticmethod - def _get_object_content(ioctx, name): + def _get_object_content(self, name): offset = 0 content = b'' while True: - data = ioctx.read(name, offset=offset) + data = self.ioctx.read(name, offset=offset) if not data: break content += data @@ -340,23 +323,20 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): def _get_metric_archive(self, metric, aggregation): """Retrieve data in the place we used to store TimeSerieArchive.""" try: - with self._get_ioctx() as ioctx: - return self._get_object_content( - ioctx, str("gnocchi_%s_%s" % (metric.id, aggregation))) + return self._get_object_content( + str("gnocchi_%s_%s" % (metric.id, aggregation))) except rados.ObjectNotFound: raise storage.AggregationDoesNotExist(metric, aggregation) def _store_metric_archive(self, metric, aggregation, data): """Stores data in the place we used to store TimeSerieArchive.""" - with self._get_ioctx() as ioctx: - ioctx.write_full( - str("gnocchi_%s_%s" % (metric.id, aggregation)), data) + self.ioctx.write_full( + str("gnocchi_%s_%s" % (metric.id, aggregation)), data) def _delete_metric_archives(self, metric): - with self._get_ioctx() as ioctx: - for aggregation in metric.archive_policy.aggregation_methods: - try: - ioctx.remove_object( - str("gnocchi_%s_%s" % (metric.id, aggregation))) - except rados.ObjectNotFound: - pass + for aggregation in metric.archive_policy.aggregation_methods: + try: + self.ioctx.remove_object( + str("gnocchi_%s_%s" % (metric.id, aggregation))) + except rados.ObjectNotFound: + pass diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index c7d4a4eb..ab16b111 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -206,6 +206,10 @@ class FakeRadosModule(object): self.kvs_xattrs.pop(key, None) self.kvs_omaps.pop(key, None) + @staticmethod + def aio_flush(): + pass + class FakeRados(object): def __init__(self, kvs, kvs_xattrs, kvs_omaps): self.kvs = kvs -- GitLab From e39b26571a87149451a69b5cb4c3cfda41d9557a Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 30 Jun 2016 00:57:03 +0000 Subject: [PATCH 0273/1483] fix tooz requirement start_heart param was added in 1.38, not 1.35. https://github.com/openstack/tooz/commit/10b971150088e3e6830571c08d7928e5855c21ff Change-Id: Ic59674eb46e1997dce3c34d9d5319df2077e7c75 --- setup.cfg | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/setup.cfg b/setup.cfg index bc769555..313dc4a1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -37,11 +37,11 @@ swift = python-swiftclient>=3.0.0 msgpack-python lz4 - tooz>=1.35 + tooz>=1.38 ceph = msgpack-python lz4 - tooz>=1.35 + tooz>=1.38 ceph-pre-jewel: cradox>=1.0.9 ceph-jewel-and-later: @@ -49,7 +49,7 @@ ceph-jewel-and-later: file = msgpack-python lz4 - tooz>=1.35 + tooz>=1.38 doc = oslosphinx>=2.2.0 sphinx @@ -71,7 +71,7 @@ test = testtools>=0.9.38 WebTest>=2.0.16 doc8 - tooz>=1.35 + tooz>=1.38 keystonemiddleware>=4.0.0 [global] -- GitLab From b9a07079df224fc8fee34588ce01ee9e59f7819e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 1 Jul 2016 12:04:12 +0000 Subject: [PATCH 0274/1483] Fix CORS middleware setup The cors middleware needs to be in front of the pipeline to work correctly. Change-Id: I821d986b7613f6651a8cad7249ae8e36483c8f0c --- etc/gnocchi/api-paste.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/gnocchi/api-paste.ini b/etc/gnocchi/api-paste.ini index 9d1b54ba..451243b9 100644 --- a/etc/gnocchi/api-paste.ini +++ b/etc/gnocchi/api-paste.ini @@ -16,7 +16,7 @@ use = egg:Paste#urlmap pipeline = cors gnocchiv1 [pipeline:gnocchiv1+auth] -pipeline = keystone_authtoken cors gnocchiv1 +pipeline = cors keystone_authtoken gnocchiv1 [app:gnocchiversions] paste.app_factory = gnocchi.rest.app:app_factory -- GitLab From b2742c004c7dd35c8f13b5027eb3ccc0d599e9bd Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 4 Jul 2016 12:02:35 +0200 Subject: [PATCH 0275/1483] Add support for Python 3.5 Change-Id: I3ec0aea8b7c6dfc64becbae72b67907ae1d4ce7c --- setup.cfg | 1 + tox.ini | 16 ++++++++-------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/setup.cfg b/setup.cfg index 313dc4a1..630fc4b9 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,6 +16,7 @@ classifier = Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3.4 + Programming Language :: Python :: 3.5 Topic :: System :: Monitoring [extras] diff --git a/tox.ini b/tox.ini index 5cba5ba8..3a3f41ff 100644 --- a/tox.ini +++ b/tox.ini @@ -1,24 +1,24 @@ [tox] minversion = 1.8 -envlist = py{27,34},py{27,34}-{postgresql,mysql}{,-file,-swift,-ceph},pep8,bashate +envlist = py{27,34,35},py{27,34,35}-{postgresql,mysql}{,-file,-swift,-ceph},pep8,bashate [testenv] usedevelop = True sitepackages = False passenv = LANG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE GNOCCHI_TEST_* deps = .[test] - py{27,34}-postgresql: .[postgresql,swift,ceph,file] - py{27,34}-mysql: .[mysql,swift,ceph,file] + py{27,34,35}-postgresql: .[postgresql,swift,ceph,file] + py{27,34,35}-mysql: .[mysql,swift,ceph,file] setenv = GNOCCHI_TEST_STORAGE_DRIVER=file GNOCCHI_TEST_INDEXER_DRIVER=postgresql GNOCCHI_TEST_STORAGE_DRIVERS=file swift ceph GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql - py{27,34}-{postgresql,mysql}-file: GNOCCHI_TEST_STORAGE_DRIVERS=file - py{27,34}-{postgresql,mysql}-swift: GNOCCHI_TEST_STORAGE_DRIVERS=swift - py{27,34}-{postgresql,mysql}-ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph - py{27,34}-postgresql{,-file,-swift,-ceph}: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql - py{27,34}-mysql{,-file,-swift,-ceph}: GNOCCHI_TEST_INDEXER_DRIVERS=mysql + py{27,34,35}-{postgresql,mysql}-file: GNOCCHI_TEST_STORAGE_DRIVERS=file + py{27,34,35}-{postgresql,mysql}-swift: GNOCCHI_TEST_STORAGE_DRIVERS=swift + py{27,34,35}-{postgresql,mysql}-ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph + py{27,34,35}-postgresql{,-file,-swift,-ceph}: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql + py{27,34,35}-mysql{,-file,-swift,-ceph}: GNOCCHI_TEST_INDEXER_DRIVERS=mysql commands = doc8 --ignore-path doc/source/rest.rst doc/source -- GitLab From e7a9a578fb2777c1965fbfbaf478f1cde892f30c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 6 Jul 2016 14:13:04 +0200 Subject: [PATCH 0276/1483] rest: allow to use X-Domain-Id in policy rules Change-Id: Idfdcd1c46e7d0207d3959dd67d1cadb6a249068b Closes-Bug: #1576804 --- doc/source/rest.j2 | 2 +- gnocchi/rest/__init__.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 9eea40c6..41bea929 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -12,7 +12,7 @@ these headers in your HTTP requests: * X-Project-Id The `X-Roles` header can also be provided in order to match role based ACL -specified in `policy.json`. +specified in `policy.json`, as `X-Domain-Id` to match domain based ACL. If you enable the OpenStack Keystone middleware, you only need to authenticate against Keystone and provide `X-Auth-Token` header with a valid token for each diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 03ece176..ab4d99eb 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -82,7 +82,8 @@ def enforce(rule, target): creds = { 'roles': headers.get("X-Roles", "").split(","), 'user_id': user_id, - 'project_id': project_id + 'project_id': project_id, + 'domain_id': headers.get("X-Domain-Id", ""), } if not isinstance(target, dict): -- GitLab From 95e47fc34cc83ca2e6fdc3e1eeb94598c87a0384 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 7 Jul 2016 17:13:37 +0000 Subject: [PATCH 0277/1483] add missing key param to method definition Change-Id: Ie1f0ab9496ae68f12bf8ff2f62465355c91d0f83 --- gnocchi/storage/_carbonara.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index e9070c5e..d3af3fa7 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -81,7 +81,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): raise NotImplementedError @staticmethod - def _store_metric_measures(metric, aggregation, granularity, data): + def _store_metric_measures(metric, timestamp_key, + aggregation, granularity, data): raise NotImplementedError @staticmethod -- GitLab From db1aeabc77a90bfbc2e1eb593e7cf63b8ecc28d9 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 25 May 2016 08:51:26 +0200 Subject: [PATCH 0278/1483] devstack: Move to grafana 3.x Change-Id: Ib541b7da79a21a2e4ccd9343cb635b523ce67e7e --- devstack/plugin.sh | 39 +++++++++++++++++++++++++-------------- devstack/settings | 9 +++++---- 2 files changed, 30 insertions(+), 18 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 6baffa2c..26cd70ee 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -152,15 +152,19 @@ function _gnocchi_install_grafana { elif is_fedora; then sudo yum install "$GRAFANA_RPM_PKG" fi - - # NOTE(sileht): We current support only 2.6, when - # plugin for 3.0 will be ready we will switch to the grafana - # plugin tool to install it - git_clone ${GRAFANA_PLUGINS_REPO} ${GRAFANA_PLUGINS_DIR} 2.6 - # Grafana-server does not handle symlink :( - sudo mkdir -p /usr/share/grafana/public/app/plugins/datasource/gnocchi - sudo mount -o bind ${GRAFANA_PLUGINS_DIR}/datasources/gnocchi /usr/share/grafana/public/app/plugins/datasource/gnocchi - + if [ ! "$GRAFANA_PLUGIN_VERSION" ]; then + sudo grafana-cli plugins install sileht-gnocchi-datasource + elif [ "$GRAFANA_PLUGIN_VERSION" != "git" ]; then + tmpfile=/tmp/sileht-gnocchi-datasource-${GRAFANA_PLUGIN_VERSION}.tar.gz + wget https://github.com/sileht/grafana-gnocchi-datasource/releases/download/${GRAFANA_PLUGIN_VERSION}/sileht-gnocchi-datasource-${GRAFANA_PLUGIN_VERSION}.tar.gz -O $tmpfile + sudo -u grafana tar -xzf $tmpfile -C /var/lib/grafana/plugins + rm -f $file + else + git_clone ${GRAFANA_PLUGINS_REPO} ${GRAFANA_PLUGINS_DIR} + sudo ln -sf ${GRAFANA_PLUGINS_DIR}/dist /var/lib/grafana/plugins/grafana-gnocchi-datasource + # NOTE(sileht): This is long and have chance to fail, thx nodejs/npm + (cd /var/lib/grafana/plugins/grafana-gnocchi-datasource && npm install && ./run-tests.sh) || true + fi sudo service grafana-server restart } @@ -274,7 +278,6 @@ function configure_gnocchi { if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] ; then iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline gnocchi+auth if is_service_enabled gnocchi-grafana; then - iniset $KEYSTONE_CONF cors allowed_origin ${GRAFANA_URL} iniset $GNOCCHI_CONF cors allowed_origin ${GRAFANA_URL} iniset $GNOCCHI_CONF cors allow_headers Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token,X-Subject-Token fi @@ -315,6 +318,17 @@ function configure_gnocchi { fi } +# configure_keystone_for_gnocchi() - Configure Keystone needs for Gnocchi +function configure_keystone_for_gnocchi { + if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] ; then + if is_service_enabled gnocchi-grafana; then + # NOTE(sileht): keystone configuration have to be set before uwsgi + # is started + iniset $KEYSTONE_CONF cors allowed_origin ${GRAFANA_URL} + fi + fi +} + # configure_ceph_gnocchi() - gnocchi config needs to come after gnocchi is set up function configure_ceph_gnocchi { # Configure gnocchi service options, ceph pool, ceph user and ceph key @@ -447,10 +461,6 @@ function stop_gnocchi { for serv in gnocchi-api; do stop_process $serv done - - if is_service_enabled gnocchi-grafana; then - sudo umount /usr/share/grafana/public/app/plugins/datasource/gnocchi - fi } if is_service_enabled gnocchi-api; then @@ -460,6 +470,7 @@ if is_service_enabled gnocchi-api; then elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Gnocchi" stack_install_service gnocchi + configure_keystone_for_gnocchi elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Gnocchi" configure_gnocchi diff --git a/devstack/settings b/devstack/settings index bdafd158..a47a8609 100644 --- a/devstack/settings +++ b/devstack/settings @@ -53,8 +53,9 @@ GNOCCHI_CEPH_POOL_PGP=${GNOCCHI_CEPH_POOL_PGP:-8} GNOCCHI_STORAGE_BACKEND=${GNOCCHI_STORAGE_BACKEND:-file} # Grafana settings -GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm} -GRAFANA_DEB_PKG=${GRAFANA_DEB_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana_2.6.0_amd64.deb} -GRAFANA_PLUGINS_DIR=${GRAFANA_PLUGINS_DIR:-$DEST/grafana-plugins} -GRAFANA_PLUGINS_REPO=${GRAFANA_PLUGINS_REPO:-http://github.com/sileht/grafana-plugins-gnocchi.git} +GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.4-1464167696.x86_64.rpm} +GRAFANA_DEB_PKG=${GRAFANA_DEB_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.4-1464167696_amd64.deb} +GRAFANA_PLUGIN_VERSION=${GRAFANA_PLUGIN_VERSION} +GRAFANA_PLUGINS_DIR=${GRAFANA_PLUGINS_DIR:-$DEST/grafana-gnocchi-datasource} +GRAFANA_PLUGINS_REPO=${GRAFANA_PLUGINS_REPO:-http://github.com/sileht/grafana-gnocchi-datasource.git} GRAFANA_URL=${GRAFANA_URL:-http://$HOST_IP:3000} -- GitLab From fcd190e36618cb6570bc226d8004f35906759a65 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 11 Jul 2016 14:36:47 +0200 Subject: [PATCH 0279/1483] Updated Danish translation of the debconf templates (Closes: #830650). --- debian/changelog | 8 +++++-- debian/po/da.po | 54 ++++++++++++++---------------------------------- 2 files changed, 22 insertions(+), 40 deletions(-) diff --git a/debian/changelog b/debian/changelog index 3558c026..e74774ef 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,12 @@ -gnocchi (2.0.2-7) UNRELEASED; urgency=medium +gnocchi (2.0.2-8) UNRELEASED; urgency=medium + [ Ondřej Nový ] * d/watch: Fixed upstream URL - -- Ondřej Nový Tue, 21 Jun 2016 13:00:49 +0200 + [ Thomas Goirand ] + * Updated Danish translation of the debconf templates (Closes: #830650). + + -- Thomas Goirand Mon, 11 Jul 2016 14:36:26 +0200 gnocchi (2.0.2-6) unstable; urgency=medium diff --git a/debian/po/da.po b/debian/po/da.po index 81b9b36c..3aa6861e 100644 --- a/debian/po/da.po +++ b/debian/po/da.po @@ -1,11 +1,11 @@ -# Danish translation glance. -# Copyright (C) 2014 glance & nedenstående oversættere. -# This file is distributed under the same license as the glance package. -# Joe Hansen (joedalton2@yahoo.dk), 2012, 2013, 2014. +# Danish translation gnocchi. +# Copyright (C) 2016 gnocchi & nedenstående oversættere. +# This file is distributed under the same license as the gnocchi package. +# Joe Hansen (joedalton2@yahoo.dk), 2012, 2013, 2014, 2016. # msgid "" msgstr "" -"Project-Id-Version: glance\n" +"Project-Id-Version: gnocchi\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" "POT-Creation-Date: 2016-03-29 13:10+0000\n" "PO-Revision-Date: 2014-02-22 12:42+0000\n" @@ -92,18 +92,12 @@ msgstr "Opsæt en database for Gnocchi?" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "No database has been set up for glance-registry or glance-api to use. " -#| "Before continuing, you should make sure you have the following " -#| "information:" msgid "" "No database has been set up for Gnocchi to use. Before continuing, you " "should make sure you have the following information:" msgstr "" -"Ingen database er blevet opsat som glance-registry eller glance-api kan " -"bruge. Før du fortsætter, skal du sikre dig, at du har den følgende " -"information:" +"Ingen database er blevet opsat som Gnocchi kan bruge. Før du fortsætter, " +"skal du sikre dig, at du har den følgende information:" #. Type: boolean #. Description @@ -133,16 +127,12 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "You can change this setting later on by running \"dpkg-reconfigure -plow " -#| "glance-common\"." msgid "" "You can change this setting later on by running \"dpkg-reconfigure -plow " "gnocchi-common\"." msgstr "" "Du kan ændre denne indstilling senere ved at køre »dpkg-reconfigure -plow " -"glance-common«." +"gnocchi-common«." #. Type: boolean #. Description @@ -165,17 +155,14 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 -#, fuzzy -#| msgid "" -#| "Note that you will need to have an up and running Keystone server on " -#| "which to connect using the Keystone authentication token." msgid "" "Note that you will need to have an up and running Keystone server on which " "to connect using a known admin project name, admin username and password. " "The admin auth token is not used anymore." msgstr "" "Bemærk at du skal have en op og kørende Keystoneserver, som du skal forbinde " -"til med Keystones godkendelsessymbol." +"til via et kendt administratorprojektnavn, administratorbrugernavn og " +"adgangskode. Administratorens godkendelsessymbol bruges ikke længere." #. Type: string #. Description @@ -196,10 +183,8 @@ msgstr "" #. Type: string #. Description #: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "Keystone authentication token:" msgid "Keystone admin name:" -msgstr "Godkendelsessymbol for Keystone:" +msgstr "Administratornavn for Keystone:" #. Type: string #. Description @@ -213,18 +198,21 @@ msgid "" "To register the service endpoint, this package needs to know the Admin " "login, name, project name, and password to the Keystone server." msgstr "" +"For at registrere tjenesteslutpunkt skal denne pakke kende til " +"administratorlogind'et, navn, projektnavn og adgangskode for " +"Keystoneserveren." #. Type: string #. Description #: ../gnocchi-api.templates:5001 msgid "Keystone admin project name:" -msgstr "" +msgstr "Projektnavn for Keystoneadministratoren:" #. Type: password #. Description #: ../gnocchi-api.templates:6001 msgid "Keystone admin password:" -msgstr "" +msgstr "Adgangskode for Keystoneadministratoren:" #. Type: string #. Description @@ -269,13 +257,3 @@ msgstr "" "repræsenterer et sted. Indtast venligst zonen du ønsker at bruge, når " "slutpunktet registreres." -#, fuzzy -#~| msgid "" -#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#~| "authentication token." -#~ msgid "" -#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -#~ "authentication token." -#~ msgstr "" -#~ "For at konfigurere dets slutpunkt i Keystone, kræver glance-api Keystones " -#~ "godkendelsessymbol." -- GitLab From a65f0d9560cc94e6100747a2f12dc1bd7d4f19a0 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 21 Apr 2016 11:53:39 +0200 Subject: [PATCH 0280/1483] metricd: use Cotyledon lib Instead of managing multiprocessing.Process manually. This change uses 'Cotyledon': * to have a complete support of processes lifetime. * to restart unexpected ended child process. * better sub process title naming. * to ensure graceful child process exit. Change-Id: Ib1fee98cb657888a903a075e7f38f59281de2548 --- gnocchi/cli.py | 146 +++++++++++++++++++++++------------------------ requirements.txt | 1 + 2 files changed, 72 insertions(+), 75 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 95628006..ca1c7e86 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -14,10 +14,11 @@ # See the License for the specific language governing permissions and # limitations under the License. import multiprocessing -import signal import sys +import threading import time +import cotyledon from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils @@ -78,13 +79,14 @@ def retry_if_retry_is_raised(exception): return isinstance(exception, Retry) -class MetricProcessBase(multiprocessing.Process): - def __init__(self, conf, worker_id=0, interval_delay=0): - super(MetricProcessBase, self).__init__() +class MetricProcessBase(cotyledon.Service): + def __init__(self, worker_id, conf, interval_delay=0): + super(MetricProcessBase, self).__init__(worker_id) self.conf = conf - self.worker_id = worker_id self.startup_delay = worker_id self.interval_delay = interval_delay + self._shutdown = threading.Event() + self._shutdown_done = threading.Event() # Retry with exponential backoff for up to 1 minute @retrying.retry(wait_exponential_multiplier=500, @@ -109,15 +111,22 @@ class MetricProcessBase(multiprocessing.Process): # Delay startup so workers are jittered. time.sleep(self.startup_delay) - while True: - try: - with timeutils.StopWatch() as timer: - self._run_job() - time.sleep(max(0, self.interval_delay - timer.elapsed())) - except KeyboardInterrupt: - # Ignore KeyboardInterrupt so parent handler can kill - # all children. - pass + while not self._shutdown.is_set(): + with timeutils.StopWatch() as timer: + self._run_job() + self._shutdown.wait(max(0, self.interval_delay - + timer.elapsed())) + self._shutdown_done.set() + + def terminate(self): + self._shutdown.set() + self.close_queues() + LOG.info("Waiting ongoing metric processing to finish") + self._shutdown_done.wait() + + @staticmethod + def close_queues(): + raise NotImplementedError @staticmethod def _run_job(): @@ -125,18 +134,20 @@ class MetricProcessBase(multiprocessing.Process): class MetricReporting(MetricProcessBase): - def __init__(self, conf, worker_id=0, interval_delay=0, queues=None): - super(MetricReporting, self).__init__(conf, worker_id, interval_delay) + name = "reporting" + + def __init__(self, worker_id, conf, queues): + super(MetricReporting, self).__init__( + worker_id, conf, conf.storage.metric_reporting_delay) self.queues = queues def _run_job(self): try: report = self.store.measures_report(details=False) - if self.queues: - block_size = max(16, min( - 256, report['summary']['metrics'] // len(self.queues))) - for queue in self.queues: - queue.put(block_size) + block_size = max(16, min( + 256, report['summary']['metrics'] // len(self.queues))) + for queue in self.queues: + queue.put(block_size) LOG.info("Metricd reporting: %d measurements bundles across %d " "metrics wait to be processed.", report['summary']['measures'], @@ -145,8 +156,18 @@ class MetricReporting(MetricProcessBase): LOG.error("Unexpected error during pending measures reporting", exc_info=True) + def close_queues(self): + for queue in self.queues: + queue.close() + class MetricJanitor(MetricProcessBase): + name = "janitor" + + def __init__(self, worker_id, conf): + super(MetricJanitor, self).__init__( + worker_id, conf, conf.storage.metric_cleanup_delay) + def _run_job(self): try: self.store.expunge_metrics(self.index) @@ -156,23 +177,44 @@ class MetricJanitor(MetricProcessBase): class MetricProcessor(MetricProcessBase): - def __init__(self, conf, worker_id=0, interval_delay=0, queue=None): - super(MetricProcessor, self).__init__(conf, worker_id, interval_delay) + name = "processing" + + def __init__(self, worker_id, conf, queue): + super(MetricProcessor, self).__init__( + worker_id, conf, conf.storage.metric_processing_delay) self.queue = queue self.block_size = 128 def _run_job(self): try: - if self.queue: - while not self.queue.empty(): - self.block_size = self.queue.get() - LOG.debug("Re-configuring worker to handle up to %s " - "metrics", self.block_size) + while not self.queue.empty(): + self.block_size = self.queue.get() + LOG.debug("Re-configuring worker to handle up to %s " + "metrics", self.block_size) self.store.process_background_tasks(self.index, self.block_size) except Exception: LOG.error("Unexpected error during measures processing", exc_info=True) + def close_queues(self): + self.queue.close() + + +class MetricdServiceManager(cotyledon.ServiceManager): + def __init__(self, conf): + super(MetricdServiceManager, self).__init__() + self.conf = conf + self.queues = [multiprocessing.Queue() + for i in range(conf.metricd.workers)] + + self.add(self.create_processor, workers=conf.metricd.workers) + self.add(MetricReporting, args=(self.conf, self.queues)) + self.add(MetricJanitor, args=(self.conf,)) + + def create_processor(self, worker_id): + queue = self.queues[worker_id - 1] + return MetricProcessor(worker_id, self.conf, queue) + def metricd(): conf = service.prepare_service() @@ -180,50 +222,4 @@ def metricd(): conf.storage.metric_processing_delay): LOG.error("Metric reporting must run less frequently then processing") sys.exit(0) - - signal.signal(signal.SIGTERM, _metricd_terminate) - - try: - queues = [] - workers = [] - for worker in range(conf.metricd.workers): - queue = multiprocessing.Queue() - metric_worker = MetricProcessor( - conf, worker, conf.storage.metric_processing_delay, queue) - metric_worker.start() - queues.append(queue) - workers.append(metric_worker) - - metric_report = MetricReporting( - conf, 0, conf.storage.metric_reporting_delay, queues) - metric_report.start() - workers.append(metric_report) - - metric_janitor = MetricJanitor( - conf, interval_delay=conf.storage.metric_cleanup_delay) - metric_janitor.start() - workers.append(metric_janitor) - - for worker in workers: - worker.join() - except KeyboardInterrupt: - _metricd_cleanup(workers) - sys.exit(0) - except Exception: - LOG.warning("exiting", exc_info=True) - _metricd_cleanup(workers) - sys.exit(1) - - -def _metricd_cleanup(workers): - for worker in workers: - if hasattr(worker, 'queue'): - worker.queue.close() - worker.terminate() - for worker in workers: - worker.join() - - -def _metricd_terminate(signum, frame): - _metricd_cleanup(multiprocessing.active_children()) - sys.exit(0) + MetricdServiceManager(conf).run() diff --git a/requirements.txt b/requirements.txt index 0127b70b..340ad891 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,7 @@ pandas>=0.17.0 pecan>=0.9 pytimeparse>=1.1.5 futures +cotyledon>=1.2.2 requests six stevedore -- GitLab From ac86707a70e40ec39cd54f4494d3df7686d8918b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 12 Jul 2016 11:14:30 +0200 Subject: [PATCH 0281/1483] doc: Update grafana plugin documentation Change-Id: I46cbd7e586a72ada3ae7e2dafe4232cfc39d9593 --- devstack/plugin.sh | 2 +- doc/source/grafana.rst | 24 ++++++++++++------------ 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 26cd70ee..02220866 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -279,7 +279,7 @@ function configure_gnocchi { iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline gnocchi+auth if is_service_enabled gnocchi-grafana; then iniset $GNOCCHI_CONF cors allowed_origin ${GRAFANA_URL} - iniset $GNOCCHI_CONF cors allow_headers Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token,X-Subject-Token + iniset $GNOCCHI_CONF cors allow_headers Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token,X-Subject-Token,X-User-Id,X-Domain-Id,X-Project-Id,X-Roles fi else iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline gnocchi+noauth diff --git a/doc/source/grafana.rst b/doc/source/grafana.rst index 0d876544..ed8dcd5e 100644 --- a/doc/source/grafana.rst +++ b/doc/source/grafana.rst @@ -2,14 +2,12 @@ Grafana support ================= -`Grafana`_ has support for Gnocchi through a plugin. The repository named -`grafana-plugins`_ contains this plugin. You can enable the plugin by following -the instructions in the `Grafana documentation`_. +`Grafana`_ has support for Gnocchi through a plugin. It can be installed with +grafana-cli:: -.. note:: - A `pull request`_ has been made to merge this plugin directly into Grafana - main tree, but it has unfortunately being denied for the time being. Feel - free to post a comment there requesting its reopening. + sudo grafana-cli plugins install sileht-gnocchi-datasource + +`Source`_ and `Documentation`_ are also available. Grafana has 2 modes of operation: proxy or direct mode. In proxy mode, your browser only communicates with Grafana, and Grafana communicates with Gnocchi. @@ -34,9 +32,12 @@ steps: [cors] allowed_origin = http://example.com/grafana - allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token + allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token,X-Subject-Token,X-User-Id,X-Domain-Id,X-Project-Id,X-Roles + +2. Configure the CORS middleware in Keystone to allow request from Grafana too: -2. Configure the CORS middleware in Keystone in the same fashion. + [cors] + allowed_origin = http://example.com/grafana 3. Configure a new datasource in Grafana with the Keystone URL, a user, a project and a password. Your browser will query Keystone for a token, and @@ -47,7 +48,6 @@ steps: :alt: Grafana screenshot .. _`Grafana`: http://grafana.org -.. _`grafana-plugins`: https://github.com/grafana/grafana-plugins -.. _`pull request`: https://github.com/grafana/grafana/pull/2716 -.. _`Grafana documentation`: http://docs.grafana.org/ +.. _`Documentation`: https://grafana.net/plugins/sileht-gnocchi-datasource +.. _`Source`: https://github.com/sileht/grafana-gnocchi-datasource .. _`CORS`: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing -- GitLab From a2a7fe50f3b13c15ad2e6fdae365b41a4b64f4fd Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 14 Jul 2016 10:25:58 -0400 Subject: [PATCH 0283/1483] fix object_exists reference we use a global context now. Change-Id: I2ea2aba4aa7a0364f48ec59bb7b1a38e22d3028e --- gnocchi/storage/ceph.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 0237b034..c4170c2a 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -280,8 +280,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): aggregation, granularity) return self._get_object_content(name) except rados.ObjectNotFound: - if self._object_exists( - self.ioctx, "gnocchi_%s_container" % metric.id): + if self._object_exists("gnocchi_%s_container" % metric.id): raise storage.AggregationDoesNotExist(metric, aggregation) else: raise storage.MetricDoesNotExist(metric) -- GitLab From e57eed5fef0e433aefab2db2feafc9fcc14609a8 Mon Sep 17 00:00:00 2001 From: zhangguoqing Date: Wed, 13 Jul 2016 02:59:21 +0000 Subject: [PATCH 0284/1483] return explicitly InvalidPagination sort key Since the patch[1] has been Merged, we should return explicitly InvalidPagination sort key to help user orientation error. [1] https://review.openstack.org/#/c/274868/ Change-Id: Ic70f2274dcddf28be9d58e6f97977d8ac7ca5256 --- gnocchi/indexer/sqlalchemy.py | 8 ++------ gnocchi/tests/gabbi/gabbits/metric.yaml | 6 ++++++ setup.cfg | 4 ++-- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 8e10250e..c9b5ddc2 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -530,9 +530,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): except ValueError as e: raise indexer.InvalidPagination(e) except exception.InvalidSortKey as e: - # FIXME(jd) Wait for https://review.openstack.org/274868 to be - # released so we can return which key - raise indexer.InvalidPagination("Invalid sort keys") + raise indexer.InvalidPagination(e) return list(q.all()) @@ -806,9 +804,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): except ValueError as e: raise indexer.InvalidPagination(e) except exception.InvalidSortKey as e: - # FIXME(jd) Wait for https://review.openstack.org/274868 to be - # released so we can return which key - raise indexer.InvalidPagination("Invalid sort keys") + raise indexer.InvalidPagination(e) # Always include metrics q = q.options(sqlalchemy.orm.joinedload("metrics")) diff --git a/gnocchi/tests/gabbi/gabbits/metric.yaml b/gnocchi/tests/gabbi/gabbits/metric.yaml index 98004df4..b31b3d00 100644 --- a/gnocchi/tests/gabbi/gabbits/metric.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric.yaml @@ -59,6 +59,12 @@ tests: response_strings: - "[]" + - name: get metric list with nonexistent sort key + GET: /v1/metric?sort=nonexistent_key:asc + status: 400 + response_strings: + - "Sort key supplied is invalid: nonexistent_key" + - name: create metric with name and unit POST: /v1/metric request_headers: diff --git a/setup.cfg b/setup.cfg index 630fc4b9..4f0f18b1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -24,13 +24,13 @@ keystone = keystonemiddleware>=4.0.0 mysql = pymysql - oslo.db>=1.8.0 + oslo.db>=4.1.0 sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1 postgresql = psycopg2 - oslo.db>=1.8.0 + oslo.db>=4.1.0 sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1 -- GitLab From 880b3b81cd6ce786c1b944858ab9c55a2f184294 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 12 Jul 2016 21:31:36 +0000 Subject: [PATCH 0285/1483] truncate AggregatedTimeSerie on init we should truncate AggregatedTimeSerie on creation similar to BoundedTimeSerie so we don't work on and carry points that are beyond the max size. we should skip dropna on init as null values only exist after aggregation and needlessly computing it has an overhead to it. Change-Id: I3ebcd1763d672780971435447aeb12c7a040757f --- gnocchi/carbonara.py | 6 ++++-- gnocchi/tests/test_storage.py | 21 +++++---------------- 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 3227fe85..5c9fb5c7 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -310,6 +310,7 @@ class AggregatedTimeSerie(TimeSerie): self.sampling = self._to_offset(sampling).nanos / 10e8 self.max_size = max_size self.aggregation_method = aggregation_method + self._truncate(quick=True) @classmethod def from_data(cls, sampling, aggregation_method, timestamps=None, @@ -435,11 +436,12 @@ class AggregatedTimeSerie(TimeSerie): def serialize(self): return lz4.dumps(msgpack.dumps(self.to_dict())) - def _truncate(self): + def _truncate(self, quick=False): """Truncate the timeserie.""" if self.max_size is not None: # Remove empty points if any that could be added by aggregation - self.ts = self.ts.dropna()[-self.max_size:] + self.ts = (self.ts[-self.max_size:] if quick + else self.ts.dropna()[-self.max_size:]) def _resample(self, after): # Group by the sampling, and then apply the aggregation method on diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 5b4737c4..e0be09b6 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -536,9 +536,9 @@ class TestStorageDriver(tests_base.TestCase): name = str(uuid.uuid4()) ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)]) self.index.create_archive_policy(ap) - m = storage.Metric(uuid.uuid4(), ap) - self.index.create_metric(m.id, str(uuid.uuid4()), - str(uuid.uuid4()), name) + m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()), + str(uuid.uuid4()), name) + m = self.index.list_metrics(ids=[m.id])[0] self.storage.add_measures(m, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 0), 1), storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 1), @@ -553,6 +553,7 @@ class TestStorageDriver(tests_base.TestCase): # expand to more points self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)]) + m = self.index.list_metrics(ids=[m.id])[0] self.storage.add_measures(m, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 15), 1), ]) @@ -566,23 +567,11 @@ class TestStorageDriver(tests_base.TestCase): # shrink timespan self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)]) - # unchanged after update if no samples - self.storage.process_background_tasks(self.index, sync=True) + m = self.index.list_metrics(ids=[m.id])[0] self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), - (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0), ], self.storage.get_measures(m)) - # drop points - self.storage.add_measures(m, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 20), 1), - ]) - self.storage.process_background_tasks(self.index, sync=True) - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0), - (utils.datetime_utc(2014, 1, 1, 12, 0, 20), 5.0, 1.0), - ], self.storage.get_measures(m)) class TestMeasureQuery(base.BaseTestCase): -- GitLab From db0eb3fdc14d9cd5f6022d49933d03a4d4ccef5e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 18 Jul 2016 17:11:22 +0200 Subject: [PATCH 0286/1483] rest: enable CORS middleware without Paste It's impossible to pass a ConfigObj via Paste, so let's just embed it in our application directly. Change-Id: Iac4bc42102b02e996159e65df223ae09a2ca10d5 --- etc/gnocchi/api-paste.ini | 8 ++------ gnocchi/rest/app.py | 4 +++- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/etc/gnocchi/api-paste.ini b/etc/gnocchi/api-paste.ini index 451243b9..cb7becb0 100644 --- a/etc/gnocchi/api-paste.ini +++ b/etc/gnocchi/api-paste.ini @@ -13,10 +13,10 @@ use = egg:Paste#urlmap /v1 = gnocchiv1+auth [pipeline:gnocchiv1+noauth] -pipeline = cors gnocchiv1 +pipeline = gnocchiv1 [pipeline:gnocchiv1+auth] -pipeline = cors keystone_authtoken gnocchiv1 +pipeline = keystone_authtoken gnocchiv1 [app:gnocchiversions] paste.app_factory = gnocchi.rest.app:app_factory @@ -29,7 +29,3 @@ root = gnocchi.rest.V1Controller [filter:keystone_authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory oslo_config_project = gnocchi - -[filter:cors] -paste.filter_factory = oslo_middleware.cors:filter_factory -oslo_config_project = gnocchi diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 2417166e..5b1c13ed 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -18,6 +18,7 @@ import uuid from oslo_config import cfg from oslo_log import log +from oslo_middleware import cors from oslo_policy import policy from paste import deploy import pecan @@ -128,7 +129,8 @@ def _setup_app(root, conf, indexer, storage, not_implemented_middleware): def app_factory(global_config, **local_conf): global APPCONFIGS appconfig = APPCONFIGS.get(global_config.get('configkey')) - return _setup_app(root=local_conf.get('root'), **appconfig) + app = _setup_app(root=local_conf.get('root'), **appconfig) + return cors.CORS(app, conf=appconfig['conf']) def build_wsgi_app(): -- GitLab From d46fa37277b9a7fcc60f13b879dad7cf36c7ef49 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 13 Jul 2016 16:10:00 +0200 Subject: [PATCH 0287/1483] rest: set useful default values for CORS middleware This makes Grafana easier to setup. Change-Id: Ib53e1a3c8c9e986af5c58697476dd2922f6526fd --- devstack/plugin.sh | 1 - doc/source/grafana.rst | 1 - gnocchi/opts.py | 12 ++++++++++++ gnocchi/service.py | 1 + setup.cfg | 3 +++ 5 files changed, 16 insertions(+), 2 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 02220866..a7f0cc65 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -279,7 +279,6 @@ function configure_gnocchi { iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline gnocchi+auth if is_service_enabled gnocchi-grafana; then iniset $GNOCCHI_CONF cors allowed_origin ${GRAFANA_URL} - iniset $GNOCCHI_CONF cors allow_headers Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token,X-Subject-Token,X-User-Id,X-Domain-Id,X-Project-Id,X-Roles fi else iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline gnocchi+noauth diff --git a/doc/source/grafana.rst b/doc/source/grafana.rst index ed8dcd5e..ab47dea2 100644 --- a/doc/source/grafana.rst +++ b/doc/source/grafana.rst @@ -32,7 +32,6 @@ steps: [cors] allowed_origin = http://example.com/grafana - allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma,X-Auth-Token,X-Subject-Token,X-User-Id,X-Domain-Id,X-Project-Id,X-Roles 2. Configure the CORS middleware in Keystone to allow request from Grafana too: diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 7487cc97..453510d2 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -14,6 +14,7 @@ import itertools from oslo_config import cfg +from oslo_middleware import cors import uuid import gnocchi.archive_policy @@ -76,3 +77,14 @@ def list_opts(): )), ("archive_policy", gnocchi.archive_policy.OPTS), ] + + +def set_defaults(): + cfg.set_defaults(cors.CORS_OPTS, + allow_headers=[ + 'X-Auth-Token', + 'X-Subject-Token', + 'X-User-Id', + 'X-Domain-Id', + 'X-Project-Id', + 'X-Roles']) diff --git a/gnocchi/service.py b/gnocchi/service.py index 9f500caa..cada5554 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -34,6 +34,7 @@ def prepare_service(args=None, conf=None, default_config_files=None): if conf is None: conf = cfg.ConfigOpts() + opts.set_defaults() # FIXME(jd) Use the pkg_entry info to register the options of these libs log.register_options(conf) db_options.set_defaults(conf) diff --git a/setup.cfg b/setup.cfg index 4f0f18b1..8d52202b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -122,6 +122,9 @@ wsgi_scripts = oslo.config.opts = gnocchi = gnocchi.opts:list_opts +oslo.config.opts.defaults = + gnocchi = gnocchi.opts:set_defaults + tempest.test_plugins = gnocchi_tests = gnocchi.tempest.plugin:GnocchiTempestPlugin -- GitLab From d272d072909b73597405ff6006d6cd60e1b3e57e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 18 Jul 2016 15:23:09 +0200 Subject: [PATCH 0288/1483] Fix list resource race gnocchi.tests.test_indexer.TestIndexerDriver.test_list_resources_* tests can raise: gnocchi.indexer.UnexpectedResourceTypeState: Resource type indexer_test state is deleting (expected: active) This change fixes the TODO that describe this race. Change-Id: Ic45db9f4d42769e6c779fa5362e3c9257aee5957 --- gnocchi/indexer/sqlalchemy.py | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index d081ba94..31c674ca 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -902,20 +902,19 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): # No need for a second query all_resources.extend(resources) else: - # TODO(sileht): _resource_type_to_classes can raise - # UnexpectedResourceTypeState or NoSuchResourceType if - # all resources of 'type' and the resource_type 'type' - # is deleted between q.all() and here. This don't - # have many change to occurs. An enhancement can be to - # drop from all_resources the delete resource types. - if is_history: + try: target_cls = self._resource_type_to_classes( - session, type)['history'] - f = target_cls.revision.in_( - [r.revision for r in resources]) + session, type)['history' if is_history else + 'resource'] + except (indexer.UnexpectedResourceTypeState, + indexer.NoSuchResourceType): + # NOTE(sileht): This resource_type have been + # removed in the meantime. + continue + if is_history: + f = target_cls.revision.in_([r.revision + for r in resources]) else: - target_cls = self._resource_type_to_classes( - session, type)["resource"] f = target_cls.id.in_([r.id for r in resources]) q = session.query(target_cls).filter(f) -- GitLab From 5551a27691603087d74020a67fe9b6a80b529587 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 8 Jun 2016 11:12:33 +0200 Subject: [PATCH 0289/1483] sqlalchemy: fix PostgreSQL transaction aborted in unmap_and_delete_tables This can happen: oslo_db.exception.DBError: (psycopg2.InternalError) current transaction is aborted, commands ignored until end of transaction block [SQL: 'ALTER TABLE rt_2864cce8c5904487b8c7dc48b0c80619 DROP CONSTRAINT fk_rt_2864cce8c5904487b8c7dc48b0c80619_id_resource_id'] Change-Id: I98353265f6a84d8f8177c095b491510f4c9ed199 --- gnocchi/indexer/sqlalchemy.py | 58 ++++++++++++++++++++++------------- setup.cfg | 4 +-- 2 files changed, 39 insertions(+), 23 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 31c674ca..9495d283 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -156,19 +156,7 @@ class ResourceClassMapper(object): with facade.writer_connection() as connection: Base.metadata.create_all(connection, tables=tables) except exception.DBError as e: - # HACK(jd) Sometimes, PostgreSQL raises an error such as - # "current transaction is aborted, commands ignored until end - # of transaction block" on its own catalog, so we need to - # retry, but this is not caught by oslo.db as a deadlock. This - # is likely because when we use Base.metadata.create_all(), - # sqlalchemy itself gets an error it does not catch or - # something. So this is paperover I guess. - inn_e = e.inner_exception - if (psycopg2 - and isinstance(inn_e, sqlalchemy.exc.InternalError) - and isinstance(inn_e.orig, psycopg2.InternalError) - # current transaction is aborted - and inn_e.orig.pgcode == '25P02'): + if self._is_current_transaction_aborted(e): raise exception.RetryRequest(e) raise @@ -176,6 +164,23 @@ class ResourceClassMapper(object): # get_classes cannot be called in state creating self._cache[resource_type.tablename] = mappers + @staticmethod + def _is_current_transaction_aborted(exception): + # HACK(jd) Sometimes, PostgreSQL raises an error such as "current + # transaction is aborted, commands ignored until end of transaction + # block" on its own catalog, so we need to retry, but this is not + # caught by oslo.db as a deadlock. This is likely because when we use + # Base.metadata.create_all(), sqlalchemy itself gets an error it does + # not catch or something. So this is why this function exists. To + # paperover I guess. + inn_e = exception.inner_exception + return (psycopg2 + and isinstance(inn_e, sqlalchemy.exc.InternalError) + and isinstance(inn_e.orig, psycopg2.InternalError) + # current transaction is aborted + and inn_e.orig.pgcode == '25P02') + + @retry_on_deadlock def unmap_and_delete_tables(self, resource_type, connection): if resource_type.state != "deleting": raise RuntimeError("unmap_and_delete_tables must be called in " @@ -196,14 +201,25 @@ class ResourceClassMapper(object): # the resource_type table is already cleaned and commited # so this code cannot be triggerred anymore for this # resource_type - for table in tables: - for fk in table.foreign_key_constraints: - self._safe_execute( - connection, - sqlalchemy.schema.DropConstraint(fk)) - for table in tables: - self._safe_execute(connection, - sqlalchemy.schema.DropTable(table)) + try: + for table in tables: + for fk in table.foreign_key_constraints: + try: + self._safe_execute( + connection, + sqlalchemy.schema.DropConstraint(fk)) + except exception.DBNonExistentConstraint: + pass + for table in tables: + try: + self._safe_execute(connection, + sqlalchemy.schema.DropTable(table)) + except exception.DBNonExistentTable: + pass + except exception.DBError as e: + if self._is_current_transaction_aborted(e): + raise exception.RetryRequest(e) + raise # NOTE(sileht): If something goes wrong here, we are currently # fucked, that why we expose the state to the superuser. diff --git a/setup.cfg b/setup.cfg index 4f0f18b1..78839e5c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -24,13 +24,13 @@ keystone = keystonemiddleware>=4.0.0 mysql = pymysql - oslo.db>=4.1.0 + oslo.db>=4.8.0 sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1 postgresql = psycopg2 - oslo.db>=4.1.0 + oslo.db>=4.8.0 sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1 -- GitLab From 95aae750186e1926ddc802f4b2fd3522c90f6924 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 13 Jul 2016 17:15:07 +0200 Subject: [PATCH 0290/1483] sqlalchemy: remove deprecated kwargs retry_on_request Change-Id: I07a66fffb228fa602e0ed69a363997b44fb2478b --- gnocchi/indexer/sqlalchemy.py | 1 - 1 file changed, 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 9495d283..cb8b549d 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -63,7 +63,6 @@ def retry_on_deadlock(f): # FIXME(jd) The default values in oslo.db are useless, we need to fix that. # Once it's done, let's remove that wrapper of wrapper. return oslo_db.api.wrap_db_retry(retry_on_deadlock=True, - retry_on_request=True, max_retries=10, retry_interval=0.1, inc_retry_interval=True, -- GitLab From 4896b9c6f3088eb676128ce36d55ea94957bf204 Mon Sep 17 00:00:00 2001 From: zhangyanxian Date: Fri, 15 Jul 2016 03:36:30 +0000 Subject: [PATCH 0291/1483] metricd: cleanup logging message for progress Change-Id: I0d5556db381fed09f9aa6af3c298a5fb42803bf0 --- gnocchi/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index ca1c7e86..46e65fcd 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -148,7 +148,7 @@ class MetricReporting(MetricProcessBase): 256, report['summary']['metrics'] // len(self.queues))) for queue in self.queues: queue.put(block_size) - LOG.info("Metricd reporting: %d measurements bundles across %d " + LOG.info("%d measurements bundles across %d " "metrics wait to be processed.", report['summary']['measures'], report['summary']['metrics']) -- GitLab From dbc3b9765a9a92a82f0050be60f0e986b99bd72f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 21 Jul 2016 16:03:49 +0200 Subject: [PATCH 0292/1483] Add iso8601 to requirements We actually import it directly in some cases, so we need to directly depend on it. Change-Id: Ief58722ca562ef464ebe3d1e2fa2294016c5d9a6 --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 340ad891..88ef618f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ pbr numpy +iso8601 oslo.config>=2.6.0 oslo.log>=1.0.0 oslo.policy>=0.3.0 -- GitLab From 5baba422ec391fc2f6ca84ba9642fdd8a7aa4b55 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 13 Jul 2016 17:16:18 +0200 Subject: [PATCH 0293/1483] sqlalchemy: simplify kwarg of retry Use the default value of inc_retry_interval=True and remove the comment that is now partially fixed, at least. Change-Id: I6e5047137ca9ab5a712bbf7a75fae5bea548c889 --- gnocchi/indexer/sqlalchemy.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index cb8b549d..c816b36f 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -60,12 +60,9 @@ LOG = log.getLogger(__name__) def retry_on_deadlock(f): - # FIXME(jd) The default values in oslo.db are useless, we need to fix that. - # Once it's done, let's remove that wrapper of wrapper. return oslo_db.api.wrap_db_retry(retry_on_deadlock=True, max_retries=10, retry_interval=0.1, - inc_retry_interval=True, max_retry_interval=2)(f) -- GitLab From 0ddbe2fba4b79851e7855fe91f8751c563899bea Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 19 Jul 2016 15:38:51 +0000 Subject: [PATCH 0294/1483] improve task distribution let a single process gather metrics to process. it will then pipe the metrics to a queue and all the workers will grab a chunk. this better distributes tasks to avoid overlap between workers and also minimises the load on backend where previously all the workers queried backend for jobs. Change-Id: I75c9091f09272bccbd859996e4d1745eadf8c329 --- gnocchi/cli.py | 84 ++++++++++++++++++++------------ gnocchi/storage/__init__.py | 10 ++-- gnocchi/storage/_carbonara.py | 7 +-- gnocchi/storage/ceph.py | 6 +-- gnocchi/storage/file.py | 5 +- gnocchi/storage/swift.py | 6 +-- gnocchi/tests/gabbi/fixtures.py | 4 +- gnocchi/tests/test_aggregates.py | 4 +- gnocchi/tests/test_rest.py | 4 +- gnocchi/tests/test_statsd.py | 16 ++++-- gnocchi/tests/test_storage.py | 48 ++++++++++-------- 11 files changed, 116 insertions(+), 78 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 46e65fcd..08bfd27c 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import multiprocessing -import sys import threading import time @@ -95,7 +94,6 @@ class MetricProcessBase(cotyledon.Service): def _configure(self): try: self.store = storage.get_driver(self.conf) - self.store.partition = self.worker_id except storage.StorageError as e: LOG.error("Unable to initialize storage: %s" % e) raise Retry(e) @@ -136,18 +134,13 @@ class MetricProcessBase(cotyledon.Service): class MetricReporting(MetricProcessBase): name = "reporting" - def __init__(self, worker_id, conf, queues): + def __init__(self, worker_id, conf): super(MetricReporting, self).__init__( worker_id, conf, conf.storage.metric_reporting_delay) - self.queues = queues def _run_job(self): try: report = self.store.measures_report(details=False) - block_size = max(16, min( - 256, report['summary']['metrics'] // len(self.queues))) - for queue in self.queues: - queue.put(block_size) LOG.info("%d measurements bundles across %d " "metrics wait to be processed.", report['summary']['measures'], @@ -156,9 +149,44 @@ class MetricReporting(MetricProcessBase): LOG.error("Unexpected error during pending measures reporting", exc_info=True) + +class MetricScheduler(MetricProcessBase): + name = "scheduler" + BLOCK_SIZE = 500 + MAX_OVERLAP = 0.3 + + def __init__(self, worker_id, conf, queue): + super(MetricScheduler, self).__init__( + worker_id, conf, conf.storage.metric_processing_delay) + self.queue = queue + self.previously_scheduled_metrics = set() + + def _run_job(self): + try: + # TODO(gordc): add support to detect other agents to enable + # partitioning + metrics = set(self.store.list_metric_with_measures_to_process( + self.BLOCK_SIZE, 0)) + if metrics and not self.queue.empty(): + # NOTE(gordc): drop metrics we previously process to avoid + # handling twice + number_of_scheduled_metrics = len(metrics) + metrics = metrics - self.previously_scheduled_metrics + if (float(number_of_scheduled_metrics - len(metrics)) / + self.BLOCK_SIZE > self.MAX_OVERLAP): + LOG.warning('Metric processing lagging scheduling rate. ' + 'It is recommended to increase the number of ' + 'workers or to lengthen processing interval.') + for m_id in metrics: + self.queue.put(m_id) + self.previously_scheduled_metrics = metrics + LOG.debug("%d metrics scheduled for processing.", len(metrics)) + except Exception: + LOG.error("Unexpected error scheduling metrics for processing", + exc_info=True) + def close_queues(self): - for queue in self.queues: - queue.close() + self.queue.close() class MetricJanitor(MetricProcessBase): @@ -178,20 +206,23 @@ class MetricJanitor(MetricProcessBase): class MetricProcessor(MetricProcessBase): name = "processing" + BLOCK_SIZE = 4 def __init__(self, worker_id, conf, queue): - super(MetricProcessor, self).__init__( - worker_id, conf, conf.storage.metric_processing_delay) + super(MetricProcessor, self).__init__(worker_id, conf, 1) self.queue = queue - self.block_size = 128 def _run_job(self): try: - while not self.queue.empty(): - self.block_size = self.queue.get() - LOG.debug("Re-configuring worker to handle up to %s " - "metrics", self.block_size) - self.store.process_background_tasks(self.index, self.block_size) + metrics = [] + while len(metrics) < self.BLOCK_SIZE: + try: + metrics.append(self.queue.get(block=False)) + except six.moves.queue.Empty: + # queue might be emptied by other workers, continue on. + break + if metrics: + self.store.process_background_tasks(self.index, metrics) except Exception: LOG.error("Unexpected error during measures processing", exc_info=True) @@ -204,22 +235,15 @@ class MetricdServiceManager(cotyledon.ServiceManager): def __init__(self, conf): super(MetricdServiceManager, self).__init__() self.conf = conf - self.queues = [multiprocessing.Queue() - for i in range(conf.metricd.workers)] + self.queue = multiprocessing.Manager().Queue() - self.add(self.create_processor, workers=conf.metricd.workers) - self.add(MetricReporting, args=(self.conf, self.queues)) + self.add(MetricScheduler, args=(self.conf, self.queue)) + self.add(MetricProcessor, args=(self.conf, self.queue), + workers=conf.metricd.workers) + self.add(MetricReporting, args=(self.conf,)) self.add(MetricJanitor, args=(self.conf,)) - def create_processor(self, worker_id): - queue = self.queues[worker_id - 1] - return MetricProcessor(worker_id, self.conf, queue) - def metricd(): conf = service.prepare_service() - if (conf.storage.metric_reporting_delay < - conf.storage.metric_processing_delay): - LOG.error("Metric reporting must run less frequently then processing") - sys.exit(0) MetricdServiceManager(conf).run() diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index b438e916..2a568803 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -28,9 +28,9 @@ OPTS = [ default='file', help='Storage driver to use'), cfg.IntOpt('metric_processing_delay', - default=5, + default=10, help="How many seconds to wait between " - "new metric measure processing"), + "scheduling new metrics to process"), cfg.IntOpt('metric_reporting_delay', default=60, help="How many seconds to wait between " @@ -172,7 +172,7 @@ class StorageDriver(object): def upgrade(index): pass - def process_background_tasks(self, index, block_size=128, sync=False): + def process_background_tasks(self, index, metrics, sync=False): """Process background tasks for this storage. This calls :func:`process_new_measures` to process new measures @@ -185,7 +185,7 @@ class StorageDriver(object): """ LOG.debug("Processing new measures") try: - self.process_new_measures(index, block_size, sync) + self.process_new_measures(index, metrics, sync) except Exception: if sync: raise @@ -228,7 +228,7 @@ class StorageDriver(object): raise exceptions.NotImplementedError @staticmethod - def process_new_measures(indexer=None, block_size=None, sync=False): + def process_new_measures(indexer, metrics, sync=False): """Process added measures in background. Some drivers might need to have a background task running that process diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index d3af3fa7..17434acf 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -59,7 +59,6 @@ class CarbonaraBasedStorage(storage.StorageDriver): except Exception as e: raise storage.StorageError("Unable to start coordinator: %s" % e) self.aggregation_workers_number = conf.aggregation_workers_number - self.partition = 0 def stop(self): self.coord.stop() @@ -216,7 +215,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): raise NotImplementedError @staticmethod - def _list_metric_with_measures_to_process(full=False): + def list_metric_with_measures_to_process(size, part, full=False): raise NotImplementedError @staticmethod @@ -297,9 +296,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): self._check_for_metric_upgrade, ((metric,) for metric in index.list_metrics())) - def process_new_measures(self, indexer, block_size, sync=False): - metrics_to_process = self._list_metric_with_measures_to_process( - block_size, full=sync) + def process_new_measures(self, indexer, metrics_to_process, sync=False): metrics = indexer.list_metrics(ids=metrics_to_process) # This build the list of deleted metrics, i.e. the metrics we have # measures to process for but that are not in the indexer anymore. diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index c4170c2a..15e1dadc 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -183,14 +183,12 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) return len(list(self._list_object_names_to_process(object_prefix))) - def _list_metric_with_measures_to_process(self, block_size, full=False): + def list_metric_with_measures_to_process(self, size, part, full=False): names = self._list_object_names_to_process() if full: objs_it = names else: - objs_it = itertools.islice( - names, block_size * self.partition, - block_size * (self.partition + 1)) + objs_it = itertools.islice(names, size * part, size * (part + 1)) return set([name.split("_")[1] for name in objs_it]) def _delete_unprocessed_measures_for_metric_id(self, metric_id): diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 9da136ff..cd8a150f 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -140,11 +140,10 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): return (len(metric_details.keys()), sum(metric_details.values()), metric_details if details else None) - def _list_metric_with_measures_to_process(self, block_size, full=False): + def list_metric_with_measures_to_process(self, size, part, full=False): if full: return os.listdir(self.measure_path) - return os.listdir(self.measure_path)[ - block_size * self.partition:block_size * (self.partition + 1)] + return os.listdir(self.measure_path)[size * part:size * (part + 1)] def _list_measures_container_for_metric_id(self, metric_id): try: diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index f9905b33..cacd8797 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -132,16 +132,16 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): metric_details[metric] += 1 return metrics, measures, metric_details if details else None - def _list_metric_with_measures_to_process(self, block_size, full=False): + def list_metric_with_measures_to_process(self, size, part, full=False): limit = None if not full: - limit = block_size * (self.partition + 1) + limit = size * (part + 1) headers, files = self.swift.get_container(self.MEASURE_PREFIX, delimiter='/', full_listing=full, limit=limit) if not full: - files = files[block_size * self.partition:] + files = files[size * part:] return set(f['subdir'][:-1] for f in files if 'subdir' in f) def _list_measure_files_for_metric_id(self, metric_id): diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index c935a494..1d94bec4 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -165,7 +165,9 @@ class MetricdThread(threading.Thread): def run(self): while self.flag: - self.storage.process_background_tasks(self.index) + metrics = self.storage.list_metric_with_measures_to_process( + None, None, full=True) + self.storage.process_background_tasks(self.index, metrics) time.sleep(0.1) def stop(self): diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 266e9298..c4c79015 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -66,7 +66,9 @@ class TestAggregates(tests_base.TestCase): str(uuid.uuid4()), str(uuid.uuid4()), 'medium') self.storage.add_measures(metric, measures) - self.storage.process_background_tasks(self.index, sync=True) + metrics = self.storage.list_metric_with_measures_to_process( + None, None, full=True) + self.storage.process_background_tasks(self.index, metrics, sync=True) return metric diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index c17ea170..fe18f3b6 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -87,7 +87,9 @@ class TestingApp(webtest.TestApp): if self.auth: req.headers['X-Auth-Token'] = self.token response = super(TestingApp, self).do_request(req, *args, **kwargs) - self.storage.process_background_tasks(self.indexer, sync=True) + metrics = self.storage.list_metric_with_measures_to_process( + None, None, full=True) + self.storage.process_background_tasks(self.indexer, metrics, sync=True) return response diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index ec8b2e23..912f13dc 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -68,8 +68,10 @@ class TestStatsd(tests_base.TestCase): metric = r.get_metric(metric_key) + metrics = self.stats.storage.list_metric_with_measures_to_process( + None, None, full=True) self.stats.storage.process_background_tasks( - self.stats.indexer, sync=True) + self.stats.indexer, metrics, sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ @@ -88,8 +90,10 @@ class TestStatsd(tests_base.TestCase): ("127.0.0.1", 12345)) self.stats.flush() + metrics = self.stats.storage.list_metric_with_measures_to_process( + None, None, full=True) self.stats.storage.process_background_tasks( - self.stats.indexer, sync=True) + self.stats.indexer, metrics, sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ @@ -121,8 +125,10 @@ class TestStatsd(tests_base.TestCase): metric = r.get_metric(metric_key) self.assertIsNotNone(metric) + metrics = self.stats.storage.list_metric_with_measures_to_process( + None, None, full=True) self.stats.storage.process_background_tasks( - self.stats.indexer, sync=True) + self.stats.indexer, metrics, sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ @@ -139,8 +145,10 @@ class TestStatsd(tests_base.TestCase): ("127.0.0.1", 12345)) self.stats.flush() + metrics = self.stats.storage.list_metric_with_measures_to_process( + None, None, full=True) self.stats.storage.process_background_tasks( - self.stats.indexer, sync=True) + self.stats.indexer, metrics, sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index e0be09b6..58936c7d 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -45,6 +45,12 @@ class TestStorageDriver(tests_base.TestCase): archive_policy_name) return m, m_sql + @staticmethod + def trigger_processing(storage, index): + metrics = storage.list_metric_with_measures_to_process( + None, None, full=True) + storage.process_background_tasks(index, metrics, sync=True) + def test_get_driver(self): self.conf.set_override('driver', 'null', 'storage') driver = storage.get_driver(self.conf) @@ -58,7 +64,7 @@ class TestStorageDriver(tests_base.TestCase): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 13, 0, 1), 1), @@ -67,7 +73,7 @@ class TestStorageDriver(tests_base.TestCase): side_effect=ValueError("boom!")): with mock.patch('gnocchi.carbonara.msgpack.loads', side_effect=ValueError("boom!")): - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 1), @@ -79,22 +85,22 @@ class TestStorageDriver(tests_base.TestCase): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) self.storage.delete_metric(self.metric) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) def test_delete_nonempty_metric_unprocessed(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) self.storage.delete_metric(self.metric) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) def test_delete_expunge_metric(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) self.index.delete_metric(self.metric.id) self.storage.expunge_metrics(self.index, sync=True) self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric, @@ -120,7 +126,7 @@ class TestStorageDriver(tests_base.TestCase): self.storage.add_measures(m, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, i, j), 100) for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) self.assertEqual(3661, len(self.storage.get_measures(m))) @@ -131,7 +137,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] self.storage.add_measures(m, measures) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) # add measure to end, in same aggregate time as last point. self.storage.add_measures(m, [ @@ -139,7 +145,7 @@ class TestStorageDriver(tests_base.TestCase): with mock.patch.object(self.storage, '_store_metric_measures') as c: # should only resample last aggregate - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) count = 0 for call in c.mock_calls: # policy is 60 points and split is 48. should only update 2nd half @@ -153,14 +159,14 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] self.storage.add_measures(m, measures) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) # add measure to end, in same aggregate time as last point. new_point = datetime.datetime(2014, 1, 6, 1, 58, 1) self.storage.add_measures(m, [storage.Measure(new_point, 100)]) with mock.patch.object(self.storage, '_add_measures') as c: - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) for __, args, __ in c.mock_calls: self.assertEqual( args[3].first, carbonara.TimeSerie.round_timestamp( @@ -173,7 +179,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), ]) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), @@ -187,7 +193,7 @@ class TestStorageDriver(tests_base.TestCase): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2015, 1, 1, 12, 0, 1), 69), ]) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), @@ -201,13 +207,13 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), ]) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), ]) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), @@ -240,7 +246,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), ]) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), @@ -408,7 +414,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 10, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 4), ]) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) values = self.storage.get_cross_metric_measures([self.metric, metric2]) self.assertEqual([ @@ -484,7 +490,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 6), storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 2), ]) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) values = self.storage.get_cross_metric_measures([self.metric, metric2]) self.assertEqual([ @@ -511,7 +517,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 6), storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 2), ]) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) self.assertEqual( {metric2: [], @@ -544,7 +550,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 1), storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 10), 1), ]) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), @@ -557,7 +563,7 @@ class TestStorageDriver(tests_base.TestCase): self.storage.add_measures(m, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 15), 1), ]) - self.storage.process_background_tasks(self.index, sync=True) + self.trigger_processing(self.storage, self.index) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), -- GitLab From b580c09091d45ca04ce0728b9968eedbeacefbc3 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 26 Jul 2016 15:46:43 +0200 Subject: [PATCH 0295/1483] indexer: put extend_existing in __tables_args__ extend_existing sqlalchemy table argument that allows to redefine an already existing Table() object wasn't set at the right place. This change fixes that. Change-Id: I9687a07de94186168eb443cf5016dcb28285175e Closes-bug: #1606547 --- gnocchi/indexer/sqlalchemy.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 31c674ca..dda4c4b4 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -116,18 +116,20 @@ class ResourceClassMapper(object): @staticmethod def _build_class_mappers(resource_type, baseclass=None): tablename = resource_type.tablename + tables_args = {"extend_existing": True} + tables_args.update(base.COMMON_TABLES_ARGS) # TODO(sileht): Add columns if not baseclass: baseclass = resource_type.to_baseclass() resource_ext = type( str("%s_resource" % tablename), (baseclass, base.ResourceExtMixin, base.Resource), - {"__tablename__": tablename, "extend_existing": True}) + {"__tablename__": tablename, "__table_args__": tables_args}) resource_history_ext = type( str("%s_history" % tablename), (baseclass, base.ResourceHistoryExtMixin, base.ResourceHistory), {"__tablename__": ("%s_history" % tablename), - "extend_existing": True}) + "__table_args__": tables_args}) return {'resource': resource_ext, 'history': resource_history_ext} -- GitLab From bfefeb20125abbb3e592cf2df32a9846a1bd2e24 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 26 Jul 2016 17:25:28 +0200 Subject: [PATCH 0296/1483] carbonara: do not use oslo_log Change-Id: I46483a43a07b13f47bd261085bb92027bf93959d --- gnocchi/carbonara.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 5c9fb5c7..f7dced7e 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -18,6 +18,7 @@ import datetime import functools +import logging import numbers import operator import re @@ -26,7 +27,6 @@ import time import iso8601 import lz4 import msgpack -from oslo_log import log import pandas import six @@ -38,7 +38,7 @@ from gnocchi import utils # to ensure the module is correctly loaded before we use really it. time.strptime("2016-02-19", "%Y-%m-%d") -LOG = log.getLogger(__name__) +LOG = logging.getLogger(__name__) class NoDeloreanAvailable(Exception): -- GitLab From defda7329ffeb858b27ddc234ea5443f4af6fdab Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 26 Jul 2016 17:24:55 +0200 Subject: [PATCH 0297/1483] carbonara: embed a benchmark tool Change-Id: I6f8c6a3f78f484203a6f983e883f38d8fdbaf665 --- gnocchi/carbonara.py | 49 ++++++++++++++++++++++++++++++++- gnocchi/tests/test_carbonara.py | 3 ++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index f7dced7e..0a58ef0f 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -15,12 +15,13 @@ # License for the specific language governing permissions and limitations # under the License. """Time series data manipulation, better with pancetta.""" - import datetime import functools import logging +import math import numbers import operator +import random import re import time @@ -510,6 +511,48 @@ class AggregatedTimeSerie(TimeSerie): self._resample(first_timestamp) self._truncate() + @classmethod + def benchmark(cls): + """Run a speed benchmark!""" + points = cls.POINTS_PER_SPLIT + sampling = 5 + compress_times = 50 + + now = datetime.datetime(2015, 4, 3, 23, 11) + + for title, values in [ + ("Simple continuous range", six.moves.range(points)), + ("All 0", [float(0)] * points), + ("All 1", [float(1)] * points), + ("0 and 1", [0, 1] * (points // 2)), + ("1 and 0 random", + [random.randint(0, 1) + for x in six.moves.range(points)]), + ("Small number random pos/neg", + [random.randint(-100000, 10000) + for x in six.moves.range(points)]), + ("Small number random pos", + [random.randint(0, 20000) for x in six.moves.range(points)]), + ("Small number random neg", + [random.randint(-20000, 0) for x in six.moves.range(points)]), + ("Sin(x)", map(math.sin, six.moves.range(points))), + ("random ", [random.random() + for x in six.moves.range(points)]), + ]: + pts = pandas.Series(values, + [now + datetime.timedelta(seconds=i*sampling) + for i in six.moves.range(points)]) + ts = cls(ts=pts, sampling=sampling, aggregation_method='mean') + t0 = time.time() + for i in six.moves.range(compress_times): + s = ts.serialize() + t1 = time.time() + print(title) + print(" Bytes per point: %.2f" % (len(s) / float(points))) + print(" Compression speed: %.2f MB/s" + % ((len(msgpack.dumps(ts.to_dict())) + / ((t1 - t0) / compress_times)) / (1024.0 * 1024.0))) + @staticmethod def aggregated(timeseries, aggregation, from_timestamp=None, to_timestamp=None, needed_percent_of_overlap=100.0): @@ -674,3 +717,7 @@ class TimeSerieArchive(SerializableMixin): @classmethod def from_dict(cls, d): return cls([AggregatedTimeSerie.from_dict(a) for a in d['archives']]) + + +if __name__ == '__main__': + AggregatedTimeSerie.benchmark() diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index e906343b..6a48f7f9 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -26,6 +26,9 @@ from gnocchi import carbonara class TestBoundTimeSerie(base.BaseTestCase): + def test_benchmark(self): + carbonara.AggregatedTimeSerie.benchmark() + @staticmethod def test_base(): carbonara.BoundTimeSerie.from_data( -- GitLab From 4022bdbbcfc48e60295ce473f17e32592d869f6a Mon Sep 17 00:00:00 2001 From: root Date: Wed, 27 Jul 2016 14:55:16 +0800 Subject: [PATCH 0298/1483] Add home-page in setup.cfg Change-Id: I2bf6be21fa49455b757c09dac811cc096a2295d9 --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index 78839e5c..05ae9006 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,6 +6,7 @@ description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org +home-page = http://gnocchi.xyz classifier = Environment :: OpenStack Intended Audience :: Information Technology -- GitLab From fba36a11c43ad04c48af9aa7f540e1fb389939d1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 28 Jul 2016 13:18:51 +0200 Subject: [PATCH 0299/1483] tests: extend the test timeout to 120s for migration sync testing MySQL is long to inspect, and usually take up to more than 60s on slow system, which is the default value for OS_TEST_TIMEOUT. This extends it to 120s so we avoid random test failure. Change-Id: I1b207b3ec2f04bba6ef8b397c3822e462fde4332 --- gnocchi/tests/indexer/sqlalchemy/test_migrations.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py index 1e7e6bd6..62445b0b 100644 --- a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py +++ b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py @@ -14,6 +14,7 @@ # under the License. import abc +import fixtures import mock from oslo_db.sqlalchemy import test_migrations import six @@ -35,6 +36,9 @@ class ModelsMigrationsSync( base.TestCase, test_migrations.ModelsMigrationsSync)): + def _set_timeout(self): + self.useFixture(fixtures.Timeout(120, gentle=True)) + def setUp(self): super(ModelsMigrationsSync, self).setUp() self.db = mock.Mock() -- GitLab From 5e71480f87e97b5a37e58d3d771d92fcdabb7f68 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 19 Jul 2016 22:08:50 +0000 Subject: [PATCH 0300/1483] add support for coordination use tooz to track all metricd agents. grab a block based on number of agents active. block size is based on the largest agent with the largest number of workers if coordination fails or is not supported, by default, all agents work against same block with block size based on workers of each agent. Change-Id: I291b30f896f5e538040bf8f64d879eb1ad949395 --- devstack/settings | 1 + gnocchi/cli.py | 101 +++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 93 insertions(+), 9 deletions(-) diff --git a/devstack/settings b/devstack/settings index a47a8609..0d6b3d14 100644 --- a/devstack/settings +++ b/devstack/settings @@ -11,6 +11,7 @@ GNOCCHI_LOG_DIR=/var/log/gnocchi GNOCCHI_AUTH_CACHE_DIR=${GNOCCHI_AUTH_CACHE_DIR:-/var/cache/gnocchi} GNOCCHI_WSGI_DIR=${GNOCCHI_WSGI_DIR:-/var/www/gnocchi} GNOCCHI_DATA_DIR=${GNOCCHI_DATA_DIR:-${DATA_DIR}/gnocchi} +GNOCCHI_COORDINATOR_URL=${GNOCCHI_COORDINATOR_URL:-redis://localhost:6379} # GNOCCHI_DEPLOY defines how Gnocchi is deployed, allowed values: # - mod_wsgi : Run Gnocchi under Apache HTTPd mod_wsgi diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 08bfd27c..f782eacb 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -16,13 +16,19 @@ import multiprocessing import threading import time +import uuid +from concurrent import futures import cotyledon +from futurist import periodics +import msgpack from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import retrying import six +import tooz +from tooz import coordination from gnocchi import archive_policy from gnocchi import indexer @@ -118,12 +124,12 @@ class MetricProcessBase(cotyledon.Service): def terminate(self): self._shutdown.set() - self.close_queues() + self.close_services() LOG.info("Waiting ongoing metric processing to finish") self._shutdown_done.wait() @staticmethod - def close_queues(): + def close_services(): raise NotImplementedError @staticmethod @@ -152,28 +158,100 @@ class MetricReporting(MetricProcessBase): class MetricScheduler(MetricProcessBase): name = "scheduler" - BLOCK_SIZE = 500 MAX_OVERLAP = 0.3 + GROUP_ID = "gnocchi-scheduler" + SYNC_RATE = 30 + TASKS_PER_WORKER = 16 + + def _enable_coordination(self, conf): + self._coord = coordination.get_coordinator( + conf.storage.coordination_url, self._my_id) + self._coord.start(start_heart=True) def __init__(self, worker_id, conf, queue): super(MetricScheduler, self).__init__( worker_id, conf, conf.storage.metric_processing_delay) + self._my_id = str(uuid.uuid4()) + self._enable_coordination(conf) self.queue = queue self.previously_scheduled_metrics = set() + self.workers = conf.metricd.workers + self.block_index = 0 + self.block_size_default = self.workers * self.TASKS_PER_WORKER + self.block_size = self.block_size_default + self.periodic = None + + def set_block(self, event): + get_members_req = self._coord.get_members(self.GROUP_ID) + try: + members = sorted(get_members_req.get()) + self.block_index = members.index(self._my_id) + reqs = list(self._coord.get_member_capabilities(self.GROUP_ID, m) + for m in members) + for req in reqs: + cap = msgpack.loads(req.get(), encoding='utf-8') + max_workers = max(cap['workers'], self.workers) + self.block_size = max_workers * self.TASKS_PER_WORKER + LOG.info('New set of agents detected. Now working on block: %s, ' + 'with up to %s metrics', self.block_index, + self.block_size) + except Exception: + LOG.warning('Error getting block to work on, defaulting to first') + self.block_index = 0 + self.block_size = self.block_size_default + + # Retry with exponential backoff for up to 1 minute + @retrying.retry(wait_exponential_multiplier=500, + wait_exponential_max=60000, + retry_on_exception=retry_if_retry_is_raised) + def _configure(self): + super(MetricScheduler, self)._configure() + try: + cap = msgpack.dumps({'workers': self.workers}) + join_req = self._coord.join_group(self.GROUP_ID, cap) + join_req.get() + LOG.info('Joined coordination group: %s', self.GROUP_ID) + self.set_block(None) + + @periodics.periodic(spacing=self.SYNC_RATE, run_immediately=True) + def run_watchers(): + self._coord.run_watchers() + + self.periodic = periodics.PeriodicWorker.create( + [], executor_factory=lambda: + futures.ThreadPoolExecutor(max_workers=10)) + self.periodic.add(run_watchers) + t = threading.Thread(target=self.periodic.start) + t.daemon = True + t.start() + + self._coord.watch_join_group(self.GROUP_ID, self.set_block) + self._coord.watch_leave_group(self.GROUP_ID, self.set_block) + except coordination.GroupNotCreated as e: + create_group_req = self._coord.create_group(self.GROUP_ID) + try: + create_group_req.get() + except coordination.GroupAlreadyExist: + pass + raise Retry(e) + except tooz.NotImplemented: + LOG.warning('Configured coordination driver does not support ' + 'required functionality. Coordination is disabled.') + except Exception as e: + LOG.error('Failed to configure coordination. Coordination is ' + 'disabled: %s', e) def _run_job(self): try: - # TODO(gordc): add support to detect other agents to enable - # partitioning metrics = set(self.store.list_metric_with_measures_to_process( - self.BLOCK_SIZE, 0)) + self.block_size, self.block_index)) if metrics and not self.queue.empty(): # NOTE(gordc): drop metrics we previously process to avoid # handling twice number_of_scheduled_metrics = len(metrics) metrics = metrics - self.previously_scheduled_metrics if (float(number_of_scheduled_metrics - len(metrics)) / - self.BLOCK_SIZE > self.MAX_OVERLAP): + self.block_size > self.MAX_OVERLAP): LOG.warning('Metric processing lagging scheduling rate. ' 'It is recommended to increase the number of ' 'workers or to lengthen processing interval.') @@ -185,7 +263,12 @@ class MetricScheduler(MetricProcessBase): LOG.error("Unexpected error scheduling metrics for processing", exc_info=True) - def close_queues(self): + def close_services(self): + if self.periodic: + self.periodic.stop() + self.periodic.wait() + self._coord.leave_group(self.GROUP_ID) + self._coord.stop() self.queue.close() @@ -227,7 +310,7 @@ class MetricProcessor(MetricProcessBase): LOG.error("Unexpected error during measures processing", exc_info=True) - def close_queues(self): + def close_services(self): self.queue.close() -- GitLab From 9170d7b2c1f07d49c49c5b696b38b0dc9991975a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 28 Jul 2016 14:48:44 +0200 Subject: [PATCH 0301/1483] test: fix race condition in update testing The archive policy used to test the update mechanism was the "low" one, which is used by a lot of different test in parallel. So modifying it might make these tests fail. Create a temporary archive policy for this test. Change-Id: I28bb882855a04526faab1710519f613c5f194de3 --- gnocchi/tests/test_indexer.py | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index b5830c3c..609c71ab 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -69,13 +69,16 @@ class TestIndexerDriver(tests_base.TestCase): points=12), archive_policy.ArchivePolicyItem(granularity=5, points=6)]) + apname = str(uuid.uuid4()) + self.index.create_archive_policy(archive_policy.ArchivePolicy( + apname, 0, [(12, 300), (24, 3600), (30, 86400)])) ap = self.index.update_archive_policy( - "low", [archive_policy.ArchivePolicyItem(granularity=300, - points=6), - archive_policy.ArchivePolicyItem(granularity=3600, - points=24), - archive_policy.ArchivePolicyItem(granularity=86400, - points=30)]) + apname, [archive_policy.ArchivePolicyItem(granularity=300, + points=6), + archive_policy.ArchivePolicyItem(granularity=3600, + points=24), + archive_policy.ArchivePolicyItem(granularity=86400, + points=30)]) self.assertEqual({ 'back_window': 0, 'aggregation_methods': @@ -84,14 +87,14 @@ class TestIndexerDriver(tests_base.TestCase): {u'granularity': 300, u'points': 6, u'timespan': 1800}, {u'granularity': 3600, u'points': 24, u'timespan': 86400}, {u'granularity': 86400, u'points': 30, u'timespan': 2592000}], - 'name': u'low'}, dict(ap)) + 'name': apname}, dict(ap)) ap = self.index.update_archive_policy( - "low", [archive_policy.ArchivePolicyItem(granularity=300, - points=12), - archive_policy.ArchivePolicyItem(granularity=3600, - points=24), - archive_policy.ArchivePolicyItem(granularity=86400, - points=30)]) + apname, [archive_policy.ArchivePolicyItem(granularity=300, + points=12), + archive_policy.ArchivePolicyItem(granularity=3600, + points=24), + archive_policy.ArchivePolicyItem(granularity=86400, + points=30)]) self.assertEqual({ 'back_window': 0, 'aggregation_methods': @@ -100,7 +103,7 @@ class TestIndexerDriver(tests_base.TestCase): {u'granularity': 300, u'points': 12, u'timespan': 3600}, {u'granularity': 3600, u'points': 24, u'timespan': 86400}, {u'granularity': 86400, u'points': 30, u'timespan': 2592000}], - 'name': u'low'}, dict(ap)) + 'name': apname}, dict(ap)) def test_delete_archive_policy(self): name = str(uuid.uuid4()) -- GitLab From fb3e98e5d1eab1ae1101cb7407210955caf65bde Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 28 Jul 2016 15:03:17 +0200 Subject: [PATCH 0302/1483] sqlalchemy: increase the number of max_retries According to test, 10 is not always enough. Change-Id: I6f7f3513ad3e4276bf9a325e254750929f099372 --- gnocchi/indexer/sqlalchemy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 6f7295ff..17a0e339 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -61,7 +61,7 @@ LOG = log.getLogger(__name__) def retry_on_deadlock(f): return oslo_db.api.wrap_db_retry(retry_on_deadlock=True, - max_retries=10, + max_retries=20, retry_interval=0.1, max_retry_interval=2)(f) -- GitLab From 2bf39a5c96e46b53880af00ab0c161bdeb86b8b0 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 21 Jul 2016 15:03:16 +0000 Subject: [PATCH 0303/1483] drop v1.3 to v2.x migration, drop TimeSerieArchive drop migration support as step0 towards new v3 format. also, start removing TimeSerieArchive as we don't support < v2.x Sem-Ver: apibreak Change-Id: I8de662bd3f561b7055fa5a8173aa432c3a81750e --- gnocchi/carbonara.py | 68 +---------------------- gnocchi/storage/_carbonara.py | 64 ++++------------------ gnocchi/storage/ceph.py | 22 -------- gnocchi/storage/file.py | 33 ------------ gnocchi/storage/swift.py | 25 --------- gnocchi/tests/storage/test_carbonara.py | 72 ++++++++----------------- gnocchi/tests/test_carbonara.py | 10 ++-- 7 files changed, 36 insertions(+), 258 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 0a58ef0f..d2652c6a 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -15,12 +15,12 @@ # License for the specific language governing permissions and limitations # under the License. """Time series data manipulation, better with pancetta.""" + import datetime import functools import logging import math import numbers -import operator import random import re import time @@ -653,71 +653,5 @@ class AggregatedTimeSerie(TimeSerie): for __, timestamp, granularity, value in points] -class TimeSerieArchive(SerializableMixin): - - def __init__(self, agg_timeseries): - """A raw data buffer and a collection of downsampled timeseries. - - Used to represent the set of AggregatedTimeSeries for the range of - granularities supported for a metric (for a particular aggregation - function). - - """ - self.agg_timeseries = sorted(agg_timeseries, - key=operator.attrgetter("sampling")) - - @classmethod - def from_definitions(cls, definitions, aggregation_method='mean'): - """Create a new collection of archived time series. - - :param definition: A list of tuple (sampling, max_size) - :param aggregation_method: Aggregation function to use. - """ - # Limit the main timeserie to a timespan mapping - return cls( - [AggregatedTimeSerie( - sampling=sampling, - aggregation_method=aggregation_method, - max_size=size) - for sampling, size in definitions] - ) - - def fetch(self, from_timestamp=None, to_timestamp=None): - """Fetch aggregated time value. - - Returns a sorted list of tuples (timestamp, granularity, value). - """ - result = [] - end_timestamp = to_timestamp - for ts in reversed(self.agg_timeseries): - points = ts[from_timestamp:to_timestamp] - try: - # Do not include stop timestamp - del points[end_timestamp] - except KeyError: - pass - result.extend([(timestamp, ts.sampling, value) - for timestamp, value - in six.iteritems(points)]) - return result - - def update(self, timeserie): - for agg in self.agg_timeseries: - agg.update(timeserie) - - def to_dict(self): - return { - "archives": [ts.to_dict() for ts in self.agg_timeseries], - } - - def __eq__(self, other): - return (isinstance(other, TimeSerieArchive) - and self.agg_timeseries == other.agg_timeseries) - - @classmethod - def from_dict(cls, d): - return cls([AggregatedTimeSerie.from_dict(a) for a in d['archives']]) - - if __name__ == '__main__': AggregatedTimeSerie.benchmark() diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index d3af3fa7..df92bf37 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -135,31 +135,13 @@ class CarbonaraBasedStorage(storage.StorageDriver): all_keys = self._list_split_keys_for_metric( metric, aggregation, granularity) except storage.MetricDoesNotExist: - # This can happen if it's an old metric with a TimeSerieArchive - all_keys = None - - if not all_keys: - # It does not mean we have no data: it can be an old metric with a - # TimeSerieArchive. - try: - data = self._get_metric_archive(metric, aggregation) - except (storage.MetricDoesNotExist, - storage.AggregationDoesNotExist): - # It really does not exist - for d in metric.archive_policy.definition: - if d.granularity == granularity: - return carbonara.AggregatedTimeSerie( - sampling=granularity, - aggregation_method=aggregation, - max_size=d.points) - raise storage.GranularityDoesNotExist(metric, granularity) - else: - archive = carbonara.TimeSerieArchive.unserialize(data) - # It's an old metric with an TimeSerieArchive! - for ts in archive.agg_timeseries: - if ts.sampling == granularity: - return ts - raise storage.GranularityDoesNotExist(metric, granularity) + for d in metric.archive_policy.definition: + if d.granularity == granularity: + return carbonara.AggregatedTimeSerie( + sampling=granularity, + aggregation_method=aggregation, + max_size=d.points) + raise storage.GranularityDoesNotExist(metric, granularity) if from_timestamp: from_timestamp = carbonara.AggregatedTimeSerie.get_split_key( @@ -227,7 +209,6 @@ class CarbonaraBasedStorage(storage.StorageDriver): with self._lock(metric.id)(blocking=sync): # If the metric has never been upgraded, we need to delete this # here too - self._delete_metric_archives(metric) self._delete_metric(metric) def _delete_metric_measures_before(self, metric, aggregation_method, @@ -262,35 +243,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): return report def _check_for_metric_upgrade(self, metric): - lock = self._lock(metric.id) - with lock: - for agg_method in metric.archive_policy.aggregation_methods: - LOG.debug( - "Checking if the metric %s needs migration for %s" - % (metric, agg_method)) - try: - data = self._get_metric_archive(metric, agg_method) - except storage.MetricDoesNotExist: - # Just try the next metric, this one has no measures - break - except storage.AggregationDoesNotExist: - # This should not happen, but you never know. - LOG.warning( - "Metric %s does not have an archive " - "for aggregation %s, " - "no migration can be done" % (metric, agg_method)) - else: - LOG.info("Migrating metric %s to new format" % metric) - archive = carbonara.TimeSerieArchive.unserialize(data) - for ts in archive.agg_timeseries: - # Store each AggregatedTimeSerie independently - for key, split in ts.split(): - self._store_metric_measures(metric, key, - ts.aggregation_method, - ts.sampling, - split.serialize()) - self._delete_metric_archives(metric) - LOG.info("Migrated metric %s to new format" % metric) + # TODO(gordc): add upgrade for v3 + pass def upgrade(self, index): self._map_in_thread( diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index c4170c2a..aba43c2e 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -317,25 +317,3 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): content += data offset += len(data) return content - - # The following methods deal with Gnocchi <= 1.3 archives - def _get_metric_archive(self, metric, aggregation): - """Retrieve data in the place we used to store TimeSerieArchive.""" - try: - return self._get_object_content( - str("gnocchi_%s_%s" % (metric.id, aggregation))) - except rados.ObjectNotFound: - raise storage.AggregationDoesNotExist(metric, aggregation) - - def _store_metric_archive(self, metric, aggregation, data): - """Stores data in the place we used to store TimeSerieArchive.""" - self.ioctx.write_full( - str("gnocchi_%s_%s" % (metric.id, aggregation)), data) - - def _delete_metric_archives(self, metric): - for aggregation in metric.archive_policy.aggregation_methods: - try: - self.ioctx.remove_object( - str("gnocchi_%s_%s" % (metric.id, aggregation))) - except rados.ObjectNotFound: - pass diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 9da136ff..b9199bac 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -256,36 +256,3 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): raise storage.AggregationDoesNotExist(metric, aggregation) raise storage.MetricDoesNotExist(metric) raise - - # The following methods deal with Gnocchi <= 1.3 archives - def _build_metric_archive_path(self, metric, aggregation): - return os.path.join(self._build_metric_dir(metric), aggregation) - - def _get_metric_archive(self, metric, aggregation): - """Retrieve data in the place we used to store TimeSerieArchive.""" - path = self._build_metric_archive_path(metric, aggregation) - try: - with open(path, 'rb') as aggregation_file: - return aggregation_file.read() - except IOError as e: - if e.errno == errno.ENOENT: - if os.path.exists(self._build_metric_dir(metric)): - raise storage.AggregationDoesNotExist(metric, aggregation) - raise storage.MetricDoesNotExist(metric) - raise - - def _store_metric_archive(self, metric, aggregation, data): - """Stores data in the place we used to store TimeSerieArchive.""" - self._atomic_file_store( - self._build_metric_archive_path(metric, aggregation), - data) - - def _delete_metric_archives(self, metric): - for agg in metric.archive_policy.aggregation_methods: - try: - os.unlink(self._build_metric_archive_path(metric, agg)) - except OSError as e: - if e.errno != errno.ENOENT: - # NOTE(jd) Maybe the metric has never been created (no - # measures) - raise diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index f9905b33..035f3d63 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -281,28 +281,3 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): except swclient.ClientException as e: if e.http_status != 404: raise - - # The following methods deal with Gnocchi <= 1.3 archives - def _get_metric_archive(self, metric, aggregation): - """Retrieve data in the place we used to store TimeSerieArchive.""" - try: - headers, contents = self.swift.get_object( - self._container_name(metric), aggregation) - except swclient.ClientException as e: - if e.http_status == 404: - raise storage.AggregationDoesNotExist(metric, aggregation) - raise - return contents - - def _store_metric_archive(self, metric, aggregation, data): - """Stores data in the place we used to store TimeSerieArchive.""" - self.swift.put_object(self._container_name(metric), aggregation, data) - - def _delete_metric_archives(self, metric): - for aggregation in metric.archive_policy.aggregation_methods: - try: - self.swift.delete_object(self._container_name(metric), - aggregation) - except swclient.ClientException as e: - if e.http_status != 404: - raise diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py index 63a8bd33..bad6c754 100644 --- a/gnocchi/tests/storage/test_carbonara.py +++ b/gnocchi/tests/storage/test_carbonara.py @@ -14,11 +14,10 @@ # License for the specific language governing permissions and limitations # under the License. import datetime +import itertools import uuid import mock -import pandas -import six from gnocchi import carbonara from gnocchi import storage @@ -27,18 +26,6 @@ from gnocchi.tests import base as tests_base from gnocchi import utils -def _to_dict_v1_3(self): - d = {'values': dict((timestamp.value, float(v)) - for timestamp, v - in six.iteritems(self.ts.dropna()))} - sampling = pandas.tseries.offsets.Nano(self.sampling * 10e8) - d.update({ - 'aggregation_method': self.aggregation_method, - 'max_size': self.max_size, - 'sampling': six.text_type(sampling.n) + sampling.rule_code}) - return d - - class TestCarbonaraMigration(tests_base.TestCase): def setUp(self): super(TestCarbonaraMigration, self).setUp() @@ -48,40 +35,24 @@ class TestCarbonaraMigration(tests_base.TestCase): self.metric = storage.Metric(uuid.uuid4(), self.archive_policies['low']) - archive = carbonara.TimeSerieArchive.from_definitions( - [(v.granularity, v.points) - for v in self.metric.archive_policy.definition] - ) + self.storage._create_metric(self.metric) - archive_max = carbonara.TimeSerieArchive.from_definitions( - [(v.granularity, v.points) - for v in self.metric.archive_policy.definition], - aggregation_method='max', - ) + for d, agg in itertools.product( + self.metric.archive_policy.definition, ['mean', 'max']): + ts = carbonara.AggregatedTimeSerie( + sampling=d.granularity, aggregation_method=agg, + max_size=d.points) - for a in (archive, archive_max): - a.update(carbonara.TimeSerie.from_data( + ts.update(carbonara.TimeSerie.from_data( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 4), datetime.datetime(2014, 1, 1, 12, 0, 9)], [4, 5, 6])) - self.storage._create_metric(self.metric) - - # serialise in old format - with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.to_dict', - autospec=True) as f: - f.side_effect = _to_dict_v1_3 - - self.storage._store_metric_archive( - self.metric, - archive.agg_timeseries[0].aggregation_method, - archive.serialize()) - - self.storage._store_metric_archive( - self.metric, - archive_max.agg_timeseries[0].aggregation_method, - archive_max.serialize()) + for key, split in ts.split(): + self.storage._store_metric_measures( + self.metric, key, agg, d.granularity, + split.serialize()) def upgrade(self): with mock.patch.object(self.index, 'list_metrics') as f: @@ -89,9 +60,17 @@ class TestCarbonaraMigration(tests_base.TestCase): self.storage.upgrade(self.index) def test_get_measures(self): - # This is to make gordc safer - self.assertIsNotNone(self.storage._get_metric_archive( - self.metric, "mean")) + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), 86400, 5), + (utils.datetime_utc(2014, 1, 1, 12), 3600, 5), + (utils.datetime_utc(2014, 1, 1, 12), 300, 5) + ], self.storage.get_measures(self.metric)) + + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), 86400, 6), + (utils.datetime_utc(2014, 1, 1, 12), 3600, 6), + (utils.datetime_utc(2014, 1, 1, 12), 300, 6) + ], self.storage.get_measures(self.metric, aggregation='max')) self.upgrade() @@ -107,11 +86,6 @@ class TestCarbonaraMigration(tests_base.TestCase): (utils.datetime_utc(2014, 1, 1, 12), 300, 6) ], self.storage.get_measures(self.metric, aggregation='max')) - self.assertRaises( - storage.AggregationDoesNotExist, - self.storage._get_metric_archive, - self.metric, "mean") - def test_delete_metric_not_upgraded(self): # Make sure that we delete everything (e.g. objects + container) # correctly even if the metric has not been upgraded. diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 6a48f7f9..e69a07c8 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -257,17 +257,13 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual(1, ts[datetime.datetime(2014, 1, 1, 12, 2, 0)]) def test_to_dict_from_dict(self): - ts = carbonara.AggregatedTimeSerie( - sampling='1Min', - max_size=2, - aggregation_method='max') - ts.update(carbonara.TimeSerie.from_data( + ts = carbonara.TimeSerie.from_data( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 1, 4), datetime.datetime(2014, 1, 1, 12, 1, 9), datetime.datetime(2014, 1, 1, 12, 2, 12)], - [3, 5, 7, 1])) - ts2 = carbonara.AggregatedTimeSerie.from_dict(ts.to_dict()) + [3, 5, 7, 1]) + ts2 = carbonara.TimeSerie.from_dict(ts.to_dict()) self.assertEqual(ts, ts2) def test_aggregated_different_archive_no_overlap(self): -- GitLab From a59c759a5473b781ea6645a7c722108eff986e1d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 2 Aug 2016 14:21:37 +0200 Subject: [PATCH 0304/1483] statsd+metric: make retry code common This makes sure the retry code to retry the connection to the coordinator is shared and work in both cases. Until now, it only worked for metricd. We don't care about indexer right now, as oslo.db is in charge of retrying. Change-Id: I9323e66d72e325c071788caaa90fb14ba93ade51 --- gnocchi/cli.py | 37 +++++++---------------------------- gnocchi/storage/_carbonara.py | 17 +++++++++++----- gnocchi/utils.py | 18 +++++++++++++++-- 3 files changed, 35 insertions(+), 37 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index f782eacb..d2c214e0 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -1,5 +1,5 @@ # Copyright (c) 2013 Mirantis Inc. -# Copyright (c) 2015 Red Hat +# Copyright (c) 2015-2016 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,7 +25,6 @@ import msgpack from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils -import retrying import six import tooz from tooz import coordination @@ -35,6 +34,7 @@ from gnocchi import indexer from gnocchi import service from gnocchi import statsd as statsd_service from gnocchi import storage +from gnocchi import utils LOG = log.getLogger(__name__) @@ -76,14 +76,6 @@ def statsd(): statsd_service.start() -class Retry(Exception): - pass - - -def retry_if_retry_is_raised(exception): - return isinstance(exception, Retry) - - class MetricProcessBase(cotyledon.Service): def __init__(self, worker_id, conf, interval_delay=0): super(MetricProcessBase, self).__init__(worker_id) @@ -93,22 +85,10 @@ class MetricProcessBase(cotyledon.Service): self._shutdown = threading.Event() self._shutdown_done = threading.Event() - # Retry with exponential backoff for up to 1 minute - @retrying.retry(wait_exponential_multiplier=500, - wait_exponential_max=60000, - retry_on_exception=retry_if_retry_is_raised) def _configure(self): - try: - self.store = storage.get_driver(self.conf) - except storage.StorageError as e: - LOG.error("Unable to initialize storage: %s" % e) - raise Retry(e) - try: - self.index = indexer.get_driver(self.conf) - self.index.connect() - except indexer.IndexerException as e: - LOG.error("Unable to initialize indexer: %s" % e) - raise Retry(e) + self.store = storage.get_driver(self.conf) + self.index = indexer.get_driver(self.conf) + self.index.connect() def run(self): self._configure() @@ -200,10 +180,7 @@ class MetricScheduler(MetricProcessBase): self.block_index = 0 self.block_size = self.block_size_default - # Retry with exponential backoff for up to 1 minute - @retrying.retry(wait_exponential_multiplier=500, - wait_exponential_max=60000, - retry_on_exception=retry_if_retry_is_raised) + @utils.retry def _configure(self): super(MetricScheduler, self)._configure() try: @@ -233,7 +210,7 @@ class MetricScheduler(MetricProcessBase): create_group_req.get() except coordination.GroupAlreadyExist: pass - raise Retry(e) + raise utils.Retry(e) except tooz.NotImplemented: LOG.warning('Configured coordination driver does not support ' 'required functionality. Coordination is disabled.') diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 17434acf..7b2bdc72 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -30,6 +30,8 @@ from tooz import coordination from gnocchi import carbonara from gnocchi import storage +from gnocchi import utils + OPTS = [ cfg.IntOpt('aggregation_workers_number', @@ -51,14 +53,19 @@ class CarbonaraBasedStorage(storage.StorageDriver): def __init__(self, conf): super(CarbonaraBasedStorage, self).__init__(conf) + self.coord = coordination.get_coordinator( + conf.coordination_url, + str(uuid.uuid4()).encode('ascii')) + self.aggregation_workers_number = conf.aggregation_workers_number + self.start() + + @utils.retry + def start(self): try: - self.coord = coordination.get_coordinator( - conf.coordination_url, - str(uuid.uuid4()).encode('ascii')) self.coord.start(start_heart=True) except Exception as e: - raise storage.StorageError("Unable to start coordinator: %s" % e) - self.aggregation_workers_number = conf.aggregation_workers_number + LOG.error("Unable to start coordinator: %s" % e) + raise utils.Retry(e) def stop(self): self.coord.stop() diff --git a/gnocchi/utils.py b/gnocchi/utils.py index aacf99ff..a49b161d 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2015 eNovance +# Copyright © 2015-2016 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -18,10 +18,10 @@ import datetime import iso8601 from oslo_utils import timeutils from pytimeparse import timeparse +import retrying import six import uuid - # uuid5 namespace for id transformation. # NOTE(chdent): This UUID must stay the same, forever, across all # of gnocchi to preserve its value as a URN namespace. @@ -50,6 +50,20 @@ def UUID(value): raise ValueError(e) +class Retry(Exception): + pass + + +def retry_if_retry_is_raised(exception): + return isinstance(exception, Retry) + + +# Retry with exponential backoff for up to 1 minute +retry = retrying.retry(wait_exponential_multiplier=500, + wait_exponential_max=60000, + retry_on_exception=retry_if_retry_is_raised) + + def to_timestamp(v): if isinstance(v, datetime.datetime): return v -- GitLab From 1c64f5ef2d8ea861a5d268f0a5068617eb0ca1f1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 2 Aug 2016 14:25:33 +0200 Subject: [PATCH 0305/1483] statsd: tweak logging This logs only "resource already exist" on debug, so it sounds less warny for operators. Log something when the daemon is started and ready to process. Change-Id: Ia67c7679fa46d8f0c2fb48aed9fd4132a992733a --- gnocchi/statsd.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index 693a063a..8a337d38 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -44,8 +44,8 @@ class Stats(object): self.conf.statsd.user_id, self.conf.statsd.project_id) except indexer.ResourceAlreadyExists: - LOG.info("Resource %s already exists" - % self.conf.statsd.resource_id) + LOG.debug("Resource %s already exists" + % self.conf.statsd.resource_id) else: LOG.info("Created resource %s" % self.conf.statsd.resource_id) self.gauges = {} @@ -186,6 +186,9 @@ def start(): loop.call_later(conf.statsd.flush_delay, _flush) transport, protocol = loop.run_until_complete(listen) + LOG.info("Started on %s:%d" % (conf.statsd.host, conf.statsd.port)) + LOG.info("Flush delay: %d seconds" % conf.statsd.flush_delay) + try: loop.run_forever() except KeyboardInterrupt: -- GitLab From 970aa60ef903ddfc26edd3ac5761357ed7277380 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 30 Jun 2016 18:21:41 +0000 Subject: [PATCH 0306/1483] offset serialisation binary serialization using struct to have consistent binary representation. serlization format is split into 9B chunks where first byte is metadata byte (only captures on/off currently) and subsequent 8Bs is double-precision float value. offset in relation to beginning of file represents offset from split time. this should offer significant compression opportunities with max size of 9B per point. also, drop to_dict/from_dict in AggregatedTimeSerie since it's not used anywhere. Change-Id: Ida4eeaf666c6256b66155e6e0039b998dc83ddb1 --- gnocchi/carbonara.py | 113 ++++++++++-------------- gnocchi/storage/_carbonara.py | 75 +++++++++++++--- gnocchi/storage/ceph.py | 26 +++--- gnocchi/storage/file.py | 28 +++--- gnocchi/storage/swift.py | 29 +++--- gnocchi/tests/storage/test_carbonara.py | 78 ++++++++++------ gnocchi/tests/test_carbonara.py | 6 +- gnocchi/tests/test_storage.py | 5 +- 8 files changed, 213 insertions(+), 147 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index d2652c6a..77bb27f2 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -18,11 +18,13 @@ import datetime import functools +import itertools import logging import math import numbers import random import re +import struct import time import iso8601 @@ -151,8 +153,7 @@ class TimeSerie(SerializableMixin): def to_dict(self): return { 'values': dict((timestamp.value, float(v)) - for timestamp, v - in six.iteritems(self.ts.dropna())), + for timestamp, v in six.iteritems(self.ts.dropna())) } @staticmethod @@ -286,6 +287,7 @@ class AggregatedTimeSerie(TimeSerie): _AGG_METHOD_PCT_RE = re.compile(r"([1-9][0-9]?)pct") POINTS_PER_SPLIT = 14400 + SERIAL_LEN = 9 def __init__(self, sampling, aggregation_method, ts=None, max_size=None): @@ -342,7 +344,9 @@ class AggregatedTimeSerie(TimeSerie): groupby = self.ts.groupby(functools.partial( self.get_split_key_datetime, sampling=self.sampling)) for group, ts in groupby: - yield self._split_key_to_string(group), TimeSerie(ts) + yield (self._split_key_to_string(group), + AggregatedTimeSerie(self.sampling, self.aggregation_method, + ts)) @classmethod def from_timeseries(cls, timeseries, sampling, aggregation_method, @@ -371,71 +375,44 @@ class AggregatedTimeSerie(TimeSerie): ) @classmethod - def from_dict(cls, d): - """Build a time series from a dict. - - The dict format must be datetime as key and values as values. - - :param d: The dict. - :returns: A TimeSerie object - """ - sampling = d.get('sampling') - if 'first_timestamp' in d: - prev_timestamp = pandas.Timestamp(d.get('first_timestamp') * 10e8) - timestamps = [] - for delta in d.get('timestamps'): - prev_timestamp = datetime.timedelta( - seconds=delta * sampling) + prev_timestamp - timestamps.append(prev_timestamp) - else: - # migrate from v1.3, remove with TimeSerieArchive - timestamps, d['values'] = ( - cls._timestamps_and_values_from_dict(d['values'])) - - return cls.from_data( - sampling=sampling, - aggregation_method=d.get('aggregation_method', 'mean'), - timestamps=timestamps, - values=d.get('values'), - max_size=d.get('max_size')) - - def to_dict(self): - if self.ts.empty: - timestamps = [] - values = [] - first_timestamp = 0 - else: - first_timestamp = float( - self.get_split_key(self.ts.index[0], self.sampling)) - timestamps = [] - prev_timestamp = pandas.Timestamp( - first_timestamp * 10e8).to_pydatetime() - # Use double delta encoding for timestamps - for i in self.ts.index: - # Convert to pydatetime because it's faster to compute than - # Pandas' objects - asdt = i.to_pydatetime() - timestamps.append( - int((asdt - prev_timestamp).total_seconds() - / self.sampling)) - prev_timestamp = asdt - values = self.ts.values.tolist() - - return { - 'first_timestamp': first_timestamp, - 'aggregation_method': self.aggregation_method, - 'max_size': self.max_size, - 'sampling': self.sampling, - 'timestamps': timestamps, - 'values': values, - } - - @classmethod - def unserialize(cls, data): - return cls.from_dict(msgpack.loads(lz4.loads(data), encoding='utf-8')) - - def serialize(self): - return lz4.dumps(msgpack.dumps(self.to_dict())) + def unserialize(cls, data, start, agg_method, sampling): + x, y = [], [] + start = float(start) + decompress = lz4.loads(data) + v_len = len(decompress) // cls.SERIAL_LEN + # NOTE(gordc): use '<' for standardized, little-endian byte order. + deserial = struct.unpack('<' + '?d' * v_len, decompress) + # alternating split into 2 list and drop items with False flag + for i, val in itertools.compress(six.moves.zip(six.moves.range(v_len), + deserial[1::2]), + deserial[::2]): + x.append(val) + y.append(start + (i * sampling)) + y = pandas.to_datetime(y, unit='s') + return cls.from_data(sampling, agg_method, y, x) + + def serialize(self, start=None): + # NOTE(gordc): this binary serializes series based on the split time. + # the format is 1B True/False flag which denotes whether subsequent 8B + # is a real float or zero padding. every 9B represents one second from + # start time. this is intended to be run on data already split. + # ie. False,0,True,0 serialization means start datapoint is padding, + # and 1s after start time, the aggregate value is 0. + if not self.ts.index.is_monotonic: + self.ts = self.ts.sort_index() + offset_div = self.sampling * 10e8 + start = (float(start) * 10e8 if start else + float(self.get_split_key(self.first, self.sampling)) * 10e8) + # calculate how many seconds from start the series runs until and + # initialize list to store alternating delimiter, float entries + e_offset = int((self.last.value - start) // (self.sampling * 10e8)) + 1 + serial = [False] * e_offset * 2 + for i, v in self.ts.iteritems(): + # overwrite zero padding with real points and set flag True + loc = int((i.value - start) // offset_div) + serial[loc * 2] = True + serial[loc * 2 + 1] = float(v) + return lz4.dumps(struct.pack('<' + '?d' * e_offset, *serial)) def _truncate(self, quick=False): """Truncate the timeserie.""" diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index df92bf37..73aa0548 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -16,6 +16,7 @@ # under the License. import collections import datetime +import itertools import operator import uuid @@ -69,7 +70,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): return self.coord.get_lock(lock_name) @staticmethod - def _get_measures(metric, timestamp_key, aggregation, granularity): + def _get_measures(metric, timestamp_key, aggregation, granularity, + version=3): raise NotImplementedError @staticmethod @@ -81,8 +83,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): raise NotImplementedError @staticmethod - def _store_metric_measures(metric, timestamp_key, - aggregation, granularity, data): + def _store_metric_measures(metric, timestamp_key, aggregation, + granularity, data, version=3): raise NotImplementedError @staticmethod @@ -111,12 +113,13 @@ class CarbonaraBasedStorage(storage.StorageDriver): aggregation, granularity): data = self._get_measures(metric, key, aggregation, granularity) try: - return carbonara.TimeSerie.unserialize(data) + return carbonara.AggregatedTimeSerie.unserialize( + data, key, aggregation, granularity) except ValueError: - LOG.error("Data corruption detected for %s " - "aggregated `%s' timeserie, granularity `%s' " - "around time `%s', ignoring." - % (metric.id, aggregation, granularity, key)) + LOG.error("Data corruption detected for %s " + "aggregated `%s' timeserie, granularity `%s' " + "around time `%s', ignoring." + % (metric.id, aggregation, granularity, key)) def _get_measures_timeserie(self, metric, aggregation, granularity, @@ -176,7 +179,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): for key, split in ts.split(): self._store_metric_measures(metric, key, aggregation, archive_policy_def.granularity, - split.serialize()) + split.serialize(key)) if ts.last and archive_policy_def.timespan: oldest_point_to_keep = ts.last - datetime.timedelta( @@ -228,7 +231,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): @staticmethod def _delete_metric_measures(metric, timestamp_key, - aggregation, granularity): + aggregation, granularity, version=3): raise NotImplementedError @staticmethod @@ -243,8 +246,43 @@ class CarbonaraBasedStorage(storage.StorageDriver): return report def _check_for_metric_upgrade(self, metric): - # TODO(gordc): add upgrade for v3 - pass + lock = self._lock(metric.id) + with lock: + for agg_method, d in itertools.product( + metric.archive_policy.aggregation_methods, + metric.archive_policy.definition): + LOG.debug( + "Checking if the metric %s needs migration for %s" + % (metric, agg_method)) + + try: + all_keys = self._list_split_keys_for_metric( + metric, agg_method, d.granularity) + except storage.MetricDoesNotExist: + # Just try the next metric, this one has no measures + break + else: + LOG.info("Migrating metric %s to new format" % metric) + timeseries = filter( + lambda x: x is not None, + self._map_in_thread( + self._get_measures_and_unserialize_v2, + ((metric, key, agg_method, d.granularity) + for key in all_keys)) + ) + ts = carbonara.AggregatedTimeSerie.from_timeseries( + sampling=d.granularity, + aggregation_method=agg_method, + timeseries=timeseries, max_size=d.points) + for key, split in ts.split(): + self._store_metric_measures( + metric, key, ts.aggregation_method, + ts.sampling, split.serialize(key)) + for key in all_keys: + self._delete_metric_measures( + metric, key, agg_method, + d.granularity, version=None) + LOG.info("Migrated metric %s to new format" % metric) def upgrade(self, index): self._map_in_thread( @@ -456,3 +494,16 @@ class CarbonaraBasedStorage(storage.StorageDriver): # We use 'list' to iterate all threads here to raise the first # exception now, not much choice return list(executor.map(lambda args: method(*args), list_of_args)) + + def _get_measures_and_unserialize_v2(self, metric, key, + aggregation, granularity): + """Unserialization method for upgrading v2 objects. Upgrade only.""" + data = self._get_measures( + metric, key, aggregation, granularity, version=None) + try: + return carbonara.TimeSerie.unserialize(data) + except ValueError: + LOG.error("Data corruption detected for %s " + "aggregated `%s' timeserie, granularity `%s' " + "around time `%s', ignoring." + % (metric.id, aggregation, granularity, key)) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index aba43c2e..8745b0bd 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -231,9 +231,11 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): self.ioctx.aio_remove(n) @staticmethod - def _get_object_name(metric, timestamp_key, aggregation, granularity): - return str("gnocchi_%s_%s_%s_%s" % ( + def _get_object_name(metric, timestamp_key, aggregation, granularity, + version=3): + name = str("gnocchi_%s_%s_%s_%s" % ( metric.id, timestamp_key, aggregation, granularity)) + return name + '_v%s' % version if version else name def _object_exists(self, name): try: @@ -250,16 +252,16 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): self.ioctx.write_full(name, "metric created") def _store_metric_measures(self, metric, timestamp_key, - aggregation, granularity, data): + aggregation, granularity, data, version=3): name = self._get_object_name(metric, timestamp_key, - aggregation, granularity) + aggregation, granularity, version) self.ioctx.write_full(name, data) self.ioctx.set_xattr("gnocchi_%s_container" % metric.id, name, "") def _delete_metric_measures(self, metric, timestamp_key, aggregation, - granularity): + granularity, version=3): name = self._get_object_name(metric, timestamp_key, - aggregation, granularity) + aggregation, granularity, version) self.ioctx.rm_xattr("gnocchi_%s_container" % metric.id, name) self.ioctx.aio_remove(name) @@ -274,10 +276,11 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): for name in ('container', 'none'): self.ioctx.aio_remove("gnocchi_%s_%s" % (metric.id, name)) - def _get_measures(self, metric, timestamp_key, aggregation, granularity): + def _get_measures(self, metric, timestamp_key, aggregation, granularity, + version=3): try: name = self._get_object_name(metric, timestamp_key, - aggregation, granularity) + aggregation, granularity, version) return self._get_object_content(name) except rados.ObjectNotFound: if self._object_exists("gnocchi_%s_container" % metric.id): @@ -292,10 +295,9 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): raise storage.MetricDoesNotExist(metric) keys = [] for xattr, value in xattrs: - _, metric_id, key, agg, g = xattr.split('_', 4) - if aggregation == agg and granularity == float(g): - keys.append(key) - + meta = xattr.split('_') + if aggregation == meta[3] and granularity == float(meta[4]): + keys.append(meta[2]) return keys def _get_unaggregated_timeserie(self, metric): diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index b9199bac..bce6fa91 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -83,9 +83,10 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): "agg_" + aggregation) def _build_metric_path_for_split(self, metric, aggregation, - timestamp_key, granularity): - return os.path.join(self._build_metric_path(metric, aggregation), + timestamp_key, granularity, version=3): + path = os.path.join(self._build_metric_path(metric, aggregation), timestamp_key + "_" + str(granularity)) + return path + '_v%s' % version if version else path def _build_measure_path(self, metric_id, random_id=None): path = os.path.join(self.measure_path, six.text_type(metric_id)) @@ -217,22 +218,22 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): raise keys = [] for f in files: - key, sep, file_granularity = f.partition("_") - if file_granularity == str(granularity): - keys.append(key) + meta = f.split("_") + if meta[1] == str(granularity): + keys.append(meta[0]) return keys def _delete_metric_measures(self, metric, timestamp_key, aggregation, - granularity): + granularity, version=3): os.unlink(self._build_metric_path_for_split( - metric, aggregation, timestamp_key, granularity)) + metric, aggregation, timestamp_key, granularity, version)) def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data): + granularity, data, version=3): self._atomic_file_store( self._build_metric_path_for_split(metric, aggregation, - timestamp_key, granularity), - data) + timestamp_key, granularity, + version), data) def _delete_metric(self, metric): path = self._build_metric_dir(metric) @@ -244,9 +245,10 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): # measures) raise - def _get_measures(self, metric, timestamp_key, aggregation, granularity): - path = self._build_metric_path_for_split(metric, aggregation, - timestamp_key, granularity) + def _get_measures(self, metric, timestamp_key, aggregation, granularity, + version=3): + path = self._build_metric_path_for_split( + metric, aggregation, timestamp_key, granularity, version) try: with open(path, 'rb') as aggregation_file: return aggregation_file.read() diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 035f3d63..a1e5cc8d 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -97,8 +97,9 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): return '%s.%s' % (self._container_prefix, str(metric.id)) @staticmethod - def _object_name(split_key, aggregation, granularity): - return '%s_%s_%s' % (split_key, aggregation, granularity) + def _object_name(split_key, aggregation, granularity, version=3): + name = '%s_%s_%s' % (split_key, aggregation, granularity) + return name + '_v%s' % version if version else name def _create_metric(self, metric): # TODO(jd) A container per user in their account? @@ -188,17 +189,18 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): self._bulk_delete(self.MEASURE_PREFIX, files) def _store_metric_measures(self, metric, timestamp_key, - aggregation, granularity, data): + aggregation, granularity, data, version=3): self.swift.put_object( self._container_name(metric), - self._object_name(timestamp_key, aggregation, granularity), - data) + self._object_name(timestamp_key, aggregation, granularity, + version), data) def _delete_metric_measures(self, metric, timestamp_key, aggregation, - granularity): + granularity, version=3): self.swift.delete_object( self._container_name(metric), - self._object_name(timestamp_key, aggregation, granularity)) + self._object_name(timestamp_key, aggregation, granularity, + version)) def _delete_metric(self, metric): self._delete_unaggregated_timeserie(metric) @@ -222,11 +224,12 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): @retrying.retry(stop_max_attempt_number=4, wait_fixed=500, retry_on_result=retry_if_result_empty) - def _get_measures(self, metric, timestamp_key, aggregation, granularity): + def _get_measures(self, metric, timestamp_key, aggregation, granularity, + version=3): try: headers, contents = self.swift.get_object( self._container_name(metric), self._object_name( - timestamp_key, aggregation, granularity)) + timestamp_key, aggregation, granularity, version)) except swclient.ClientException as e: if e.http_status == 404: try: @@ -251,12 +254,12 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): keys = [] for f in files: try: - key, agg, g = f['name'].split('_', 2) - except ValueError: + meta = f['name'].split('_') + if aggregation == meta[1] and granularity == float(meta[2]): + keys.append(meta[0]) + except (ValueError, IndexError): # Might be "none", or any other file. Be resilient. continue - if aggregation == agg and granularity == float(g): - keys.append(key) return keys @retrying.retry(stop_max_attempt_number=4, diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py index bad6c754..ee9749f8 100644 --- a/gnocchi/tests/storage/test_carbonara.py +++ b/gnocchi/tests/storage/test_carbonara.py @@ -18,6 +18,8 @@ import itertools import uuid import mock +import msgpack +import six from gnocchi import carbonara from gnocchi import storage @@ -26,6 +28,13 @@ from gnocchi.tests import base as tests_base from gnocchi import utils +def _serialize_v2(self): + d = {'values': dict((timestamp.value, float(v)) + for timestamp, v + in six.iteritems(self.ts.dropna()))} + return msgpack.dumps(d) + + class TestCarbonaraMigration(tests_base.TestCase): def setUp(self): super(TestCarbonaraMigration, self).setUp() @@ -37,22 +46,27 @@ class TestCarbonaraMigration(tests_base.TestCase): self.storage._create_metric(self.metric) - for d, agg in itertools.product( - self.metric.archive_policy.definition, ['mean', 'max']): - ts = carbonara.AggregatedTimeSerie( - sampling=d.granularity, aggregation_method=agg, - max_size=d.points) + # serialise in old format + with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.serialize', + autospec=True) as f: + f.side_effect = _serialize_v2 - ts.update(carbonara.TimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [4, 5, 6])) + for d, agg in itertools.product( + self.metric.archive_policy.definition, ['mean', 'max']): + ts = carbonara.AggregatedTimeSerie( + sampling=d.granularity, aggregation_method=agg, + max_size=d.points) - for key, split in ts.split(): - self.storage._store_metric_measures( - self.metric, key, agg, d.granularity, - split.serialize()) + ts.update(carbonara.TimeSerie.from_data( + [datetime.datetime(2014, 1, 1, 12, 0, 0), + datetime.datetime(2014, 1, 1, 12, 0, 4), + datetime.datetime(2014, 1, 1, 12, 0, 9)], + [4, 5, 6])) + + for key, split in ts.split(): + self.storage._store_metric_measures( + self.metric, key, agg, d.granularity, + split.serialize(), version=None) def upgrade(self): with mock.patch.object(self.index, 'list_metrics') as f: @@ -60,17 +74,20 @@ class TestCarbonaraMigration(tests_base.TestCase): self.storage.upgrade(self.index) def test_get_measures(self): - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400, 5), - (utils.datetime_utc(2014, 1, 1, 12), 3600, 5), - (utils.datetime_utc(2014, 1, 1, 12), 300, 5) - ], self.storage.get_measures(self.metric)) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400, 6), - (utils.datetime_utc(2014, 1, 1, 12), 3600, 6), - (utils.datetime_utc(2014, 1, 1, 12), 300, 6) - ], self.storage.get_measures(self.metric, aggregation='max')) + with mock.patch.object( + self.storage, '_get_measures_and_unserialize', + side_effect=self.storage._get_measures_and_unserialize_v2): + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), 86400, 5), + (utils.datetime_utc(2014, 1, 1, 12), 3600, 5), + (utils.datetime_utc(2014, 1, 1, 12), 300, 5) + ], self.storage.get_measures(self.metric)) + + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), 86400, 6), + (utils.datetime_utc(2014, 1, 1, 12), 3600, 6), + (utils.datetime_utc(2014, 1, 1, 12), 300, 6) + ], self.storage.get_measures(self.metric, aggregation='max')) self.upgrade() @@ -86,6 +103,17 @@ class TestCarbonaraMigration(tests_base.TestCase): (utils.datetime_utc(2014, 1, 1, 12), 300, 6) ], self.storage.get_measures(self.metric, aggregation='max')) + with mock.patch.object( + self.storage, '_get_measures_and_unserialize', + side_effect=self.storage._get_measures_and_unserialize_v2): + self.assertRaises( + storage.AggregationDoesNotExist, + self.storage.get_measures, self.metric) + + self.assertRaises( + storage.AggregationDoesNotExist, + self.storage.get_measures, self.metric, aggregation='max') + def test_delete_metric_not_upgraded(self): # Make sure that we delete everything (e.g. objects + container) # correctly even if the metric has not been upgraded. diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index e69a07c8..67bd9e6f 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -172,7 +172,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual(5.48, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) # Serialize and unserialize - ts = carbonara.AggregatedTimeSerie.unserialize(ts.serialize()) + ts = carbonara.AggregatedTimeSerie.unserialize( + ts.serialize(), ts.get_split_key(ts.first, 60), '74pct', 60) ts.update(carbonara.TimeSerie.from_tuples( [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), @@ -612,7 +613,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual(ts, carbonara.AggregatedTimeSerie.unserialize( - ts.serialize())) + ts.serialize(), ts.get_split_key(ts.first, 0.5), + 'mean', 0.5)) def test_no_truncation(self): ts = carbonara.AggregatedTimeSerie(sampling=60, diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index e0be09b6..05c84702 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -63,9 +63,10 @@ class TestStorageDriver(tests_base.TestCase): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 13, 0, 1), 1), ]) - with mock.patch('gnocchi.carbonara.msgpack.unpack', + + with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize', side_effect=ValueError("boom!")): - with mock.patch('gnocchi.carbonara.msgpack.loads', + with mock.patch('gnocchi.carbonara.TimeSerie.unserialize', side_effect=ValueError("boom!")): self.storage.process_background_tasks(self.index, sync=True) -- GitLab From b6bc6704a1ca10eb033abe52739a9f5210772875 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 5 Jul 2016 22:48:10 +0000 Subject: [PATCH 0307/1483] ceph - write only new points this patch will only write the updated datapoints in ceph rather than the entire object. swift and file backends will continue to read+update+write objects as they don't support atomic writes to arbitrary offsets Change-Id: I60c166f734b64ce4a8d3df1882e8b27601a04207 --- doc/source/architecture.rst | 9 ++++++++ gnocchi/carbonara.py | 28 +++++++++++++++++-------- gnocchi/storage/_carbonara.py | 19 +++++++++++------ gnocchi/storage/ceph.py | 19 ++++++++++++++--- gnocchi/storage/file.py | 11 +++++++--- gnocchi/storage/swift.py | 11 ++++++---- gnocchi/tests/base.py | 10 +++++++++ gnocchi/tests/storage/test_carbonara.py | 2 +- gnocchi/tests/test_storage.py | 12 +++++------ 9 files changed, 89 insertions(+), 32 deletions(-) diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index ee63edfb..9f340e05 100755 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -83,6 +83,15 @@ the 8 default aggregation methods (mean, min, max, sum, std, median, count, 95pct) with the same "one year, one minute aggregations" resolution, the space used will go up to a maximum of 8 × 4.5 MiB = 36 MiB. +.. note:: + + The Ceph driver does not utilize compression as the Swift and File drivers + do in favour of more efficient write support. Therefore, each point is + always 9B in Ceph where as the Swift and File backends may have a smaller + storage footprint but higher I/O requirements. It also requires some + additional formatting which may add to disk size. + + How to set the archive policy and granularity --------------------------------------------- diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 77bb27f2..859d1a26 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -28,7 +28,6 @@ import struct import time import iso8601 -import lz4 import msgpack import pandas import six @@ -378,10 +377,9 @@ class AggregatedTimeSerie(TimeSerie): def unserialize(cls, data, start, agg_method, sampling): x, y = [], [] start = float(start) - decompress = lz4.loads(data) - v_len = len(decompress) // cls.SERIAL_LEN - # NOTE(gordc): use '<' for standardized, little-endian byte order. - deserial = struct.unpack('<' + '?d' * v_len, decompress) + v_len = len(data) // cls.SERIAL_LEN + # NOTE(gordc): use '<' for standardized, little-endian byte order + deserial = struct.unpack('<' + '?d' * v_len, data) # alternating split into 2 list and drop items with False flag for i, val in itertools.compress(six.moves.zip(six.moves.range(v_len), deserial[1::2]), @@ -391,7 +389,7 @@ class AggregatedTimeSerie(TimeSerie): y = pandas.to_datetime(y, unit='s') return cls.from_data(sampling, agg_method, y, x) - def serialize(self, start=None): + def serialize(self, start=None, padded=True): # NOTE(gordc): this binary serializes series based on the split time. # the format is 1B True/False flag which denotes whether subsequent 8B # is a real float or zero padding. every 9B represents one second from @@ -401,8 +399,9 @@ class AggregatedTimeSerie(TimeSerie): if not self.ts.index.is_monotonic: self.ts = self.ts.sort_index() offset_div = self.sampling * 10e8 - start = (float(start) * 10e8 if start else - float(self.get_split_key(self.first, self.sampling)) * 10e8) + start = ((float(start) * 10e8 if start else + float(self.get_split_key(self.first, self.sampling)) * 10e8) + if padded else self.first.value) # calculate how many seconds from start the series runs until and # initialize list to store alternating delimiter, float entries e_offset = int((self.last.value - start) // (self.sampling * 10e8)) + 1 @@ -412,7 +411,18 @@ class AggregatedTimeSerie(TimeSerie): loc = int((i.value - start) // offset_div) serial[loc * 2] = True serial[loc * 2 + 1] = float(v) - return lz4.dumps(struct.pack('<' + '?d' * e_offset, *serial)) + return struct.pack('<' + '?d' * e_offset, *serial) + + def offset_from_split(self): + split = float(self.get_split_key(self.first, self.sampling)) * 10e8 + return int((self.first.value - split) // (self.sampling * 10e8) + * self.SERIAL_LEN) + + @staticmethod + def padding(offset): + offset = offset // AggregatedTimeSerie.SERIAL_LEN + pad = [False] * offset * 2 + return struct.pack('<' + '?d' * offset, *pad) def _truncate(self, quick=False): """Truncate the timeserie.""" diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 73aa0548..797ac1d0 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -84,7 +84,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): @staticmethod def _store_metric_measures(metric, timestamp_key, aggregation, - granularity, data, version=3): + granularity, data, offset=0, version=3): raise NotImplementedError @staticmethod @@ -170,16 +170,21 @@ class CarbonaraBasedStorage(storage.StorageDriver): timeseries=timeseries, max_size=points) + def _get_measures_to_update(self, metric, agg, apolicy, timeserie): + return self._get_measures_timeserie(metric, agg, apolicy.granularity, + timeserie.first, timeserie.last) + def _add_measures(self, aggregation, archive_policy_def, metric, timeserie): - ts = self._get_measures_timeserie(metric, aggregation, - archive_policy_def.granularity, - timeserie.first, timeserie.last) + ts = self._get_measures_to_update(metric, aggregation, + archive_policy_def, timeserie) ts.update(timeserie) for key, split in ts.split(): self._store_metric_measures(metric, key, aggregation, archive_policy_def.granularity, - split.serialize(key)) + split.serialize(key, self.WRITE_FULL), + offset=(0 if self.WRITE_FULL else + split.offset_from_split())) if ts.last and archive_policy_def.timespan: oldest_point_to_keep = ts.last - datetime.timedelta( @@ -277,7 +282,9 @@ class CarbonaraBasedStorage(storage.StorageDriver): for key, split in ts.split(): self._store_metric_measures( metric, key, ts.aggregation_method, - ts.sampling, split.serialize(key)) + ts.sampling, split.serialize(key, self.WRITE_FULL), + offset=(0 if self.WRITE_FULL else + split.offset_from_split())) for key in all_keys: self._delete_metric_measures( metric, key, agg_method, diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 8745b0bd..acb2b229 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -24,6 +24,7 @@ from oslo_config import cfg from oslo_log import log from oslo_utils import importutils +from gnocchi import carbonara from gnocchi import storage from gnocchi.storage import _carbonara @@ -57,6 +58,9 @@ OPTS = [ class CephStorage(_carbonara.CarbonaraBasedStorage): + + WRITE_FULL = False + def __init__(self, conf): super(CephStorage, self).__init__(conf) self.pool = conf.ceph_pool @@ -251,11 +255,16 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): else: self.ioctx.write_full(name, "metric created") - def _store_metric_measures(self, metric, timestamp_key, - aggregation, granularity, data, version=3): + def _store_metric_measures(self, metric, timestamp_key, aggregation, + granularity, data, offset=0, version=3): name = self._get_object_name(metric, timestamp_key, aggregation, granularity, version) - self.ioctx.write_full(name, data) + try: + self.ioctx.write(name, data, offset=offset) + except rados.ObjectNotFound: + # first time writing data + self.ioctx.write_full( + name, carbonara.AggregatedTimeSerie.padding(offset) + data) self.ioctx.set_xattr("gnocchi_%s_container" % metric.id, name, "") def _delete_metric_measures(self, metric, timestamp_key, aggregation, @@ -288,6 +297,10 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): else: raise storage.MetricDoesNotExist(metric) + def _get_measures_to_update(self, metric, agg, apolicy, timeserie): + return carbonara.AggregatedTimeSerie( + apolicy.granularity, agg, max_size=apolicy.points) + def _list_split_keys_for_metric(self, metric, aggregation, granularity): try: xattrs = self.ioctx.get_xattrs("gnocchi_%s_container" % metric.id) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index bce6fa91..197c2448 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -22,6 +22,7 @@ import shutil import tempfile import uuid +import lz4 from oslo_config import cfg import six @@ -40,6 +41,9 @@ OPTS = [ class FileStorage(_carbonara.CarbonaraBasedStorage): + + WRITE_FULL = True + def __init__(self, conf): super(FileStorage, self).__init__(conf) self.basepath = conf.file_basepath @@ -229,11 +233,12 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): metric, aggregation, timestamp_key, granularity, version)) def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data, version=3): + granularity, data, offset=0, version=3): self._atomic_file_store( self._build_metric_path_for_split(metric, aggregation, timestamp_key, granularity, - version), data) + version), + lz4.dumps(data)) def _delete_metric(self, metric): path = self._build_metric_dir(metric) @@ -251,7 +256,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): metric, aggregation, timestamp_key, granularity, version) try: with open(path, 'rb') as aggregation_file: - return aggregation_file.read() + return lz4.loads(aggregation_file.read()) except IOError as e: if e.errno == errno.ENOENT: if os.path.exists(self._build_metric_dir(metric)): diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index a1e5cc8d..2f6619c1 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -18,6 +18,7 @@ import contextlib import datetime import uuid +import lz4 from oslo_config import cfg from oslo_log import log import retrying @@ -74,6 +75,7 @@ def retry_if_result_empty(result): class SwiftStorage(_carbonara.CarbonaraBasedStorage): + WRITE_FULL = True POST_HEADERS = {'Accept': 'application/json', 'Content-Type': 'text/plain'} def __init__(self, conf): @@ -188,12 +190,13 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): # Now clean objects self._bulk_delete(self.MEASURE_PREFIX, files) - def _store_metric_measures(self, metric, timestamp_key, - aggregation, granularity, data, version=3): + def _store_metric_measures(self, metric, timestamp_key, aggregation, + granularity, data, offset=0, version=3): self.swift.put_object( self._container_name(metric), self._object_name(timestamp_key, aggregation, granularity, - version), data) + version), + lz4.dumps(data)) def _delete_metric_measures(self, metric, timestamp_key, aggregation, granularity, version=3): @@ -240,7 +243,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): raise raise storage.AggregationDoesNotExist(metric, aggregation) raise - return contents + return lz4.loads(contents) def _list_split_keys_for_metric(self, metric, aggregation, granularity): container = self._container_name(metric) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index cd04e49a..298dcf9e 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -136,6 +136,16 @@ class FakeRadosModule(object): self._ensure_key_exists(key) self.kvs[key] = value + def write(self, key, value, offset): + self._validate_key(key) + try: + current = self.kvs[key] + if len(current) < offset: + current += b'\x00' * (offset - len(current)) + self.kvs[key] = current[:offset] + value + except KeyError: + raise FakeRadosModule.ObjectNotFound + def stat(self, key): self._validate_key(key) if key not in self.kvs: diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py index ee9749f8..b2889d1d 100644 --- a/gnocchi/tests/storage/test_carbonara.py +++ b/gnocchi/tests/storage/test_carbonara.py @@ -66,7 +66,7 @@ class TestCarbonaraMigration(tests_base.TestCase): for key, split in ts.split(): self.storage._store_metric_measures( self.metric, key, agg, d.granularity, - split.serialize(), version=None) + split.serialize(), offset=0, version=None) def upgrade(self): with mock.patch.object(self.index, 'list_metrics') as f: diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 05c84702..d86606ad 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -70,11 +70,10 @@ class TestStorageDriver(tests_base.TestCase): side_effect=ValueError("boom!")): self.storage.process_background_tasks(self.index, sync=True) - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 1), - (utils.datetime_utc(2014, 1, 1, 13), 3600.0, 1), - (utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), - ], self.storage.get_measures(self.metric)) + m = self.storage.get_measures(self.metric) + self.assertIn((utils.datetime_utc(2014, 1, 1), 86400.0, 1), m) + self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 3600.0, 1), m) + self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), m) def test_delete_nonempty_metric(self): self.storage.add_measures(self.metric, [ @@ -144,7 +143,8 @@ class TestStorageDriver(tests_base.TestCase): count = 0 for call in c.mock_calls: # policy is 60 points and split is 48. should only update 2nd half - if mock.call(m_sql, mock.ANY, 'mean', 60.0, mock.ANY) == call: + args = call[1] + if args[0] == m_sql and args[2] == 'mean' and args[3] == 60.0: count += 1 self.assertEqual(1, count) -- GitLab From 8a8f19e33e6775cd4b8abd5f40bf48d449a1ae56 Mon Sep 17 00:00:00 2001 From: Hanxi Liu Date: Wed, 3 Aug 2016 23:33:17 +0800 Subject: [PATCH 0308/1483] Put py34 first in the env order of tox To solve the problem of "db type could not be determined" on py34 we have to run first the py34 env to, then, run py27. This patch puts py34 first on the tox.ini list of envs to avoid this problem to happen. Change-Id: I9502b0b42ed742d64517be5463e3c5f65eba61c1 Closes-bug: #1604734 --- tox.ini | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tox.ini b/tox.ini index 3a3f41ff..b6f9be55 100644 --- a/tox.ini +++ b/tox.ini @@ -1,24 +1,24 @@ [tox] minversion = 1.8 -envlist = py{27,34,35},py{27,34,35}-{postgresql,mysql}{,-file,-swift,-ceph},pep8,bashate +envlist = py{34,35,27},py{34,35,27}-{postgresql,mysql}{,-file,-swift,-ceph},pep8,bashate [testenv] usedevelop = True sitepackages = False passenv = LANG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE GNOCCHI_TEST_* deps = .[test] - py{27,34,35}-postgresql: .[postgresql,swift,ceph,file] - py{27,34,35}-mysql: .[mysql,swift,ceph,file] + py{34,35,27}-postgresql: .[postgresql,swift,ceph,file] + py{34,35,27}-mysql: .[mysql,swift,ceph,file] setenv = GNOCCHI_TEST_STORAGE_DRIVER=file GNOCCHI_TEST_INDEXER_DRIVER=postgresql GNOCCHI_TEST_STORAGE_DRIVERS=file swift ceph GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql - py{27,34,35}-{postgresql,mysql}-file: GNOCCHI_TEST_STORAGE_DRIVERS=file - py{27,34,35}-{postgresql,mysql}-swift: GNOCCHI_TEST_STORAGE_DRIVERS=swift - py{27,34,35}-{postgresql,mysql}-ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph - py{27,34,35}-postgresql{,-file,-swift,-ceph}: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql - py{27,34,35}-mysql{,-file,-swift,-ceph}: GNOCCHI_TEST_INDEXER_DRIVERS=mysql + py{34,35,27}-{postgresql,mysql}-file: GNOCCHI_TEST_STORAGE_DRIVERS=file + py{34,35,27}-{postgresql,mysql}-swift: GNOCCHI_TEST_STORAGE_DRIVERS=swift + py{34,35,27}-{postgresql,mysql}-ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph + py{34,35,27}-postgresql{,-file,-swift,-ceph}: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql + py{34,35,27}-mysql{,-file,-swift,-ceph}: GNOCCHI_TEST_INDEXER_DRIVERS=mysql commands = doc8 --ignore-path doc/source/rest.rst doc/source -- GitLab From c13a21a8ae693adb4ff2f25bbf4d1908c0eb84dc Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 2 Aug 2016 15:16:56 +0200 Subject: [PATCH 0309/1483] storage: allow to specify regaggregation over aggregation retrieval Currently, when retrieving e.g. 'mean' aggregation of several metrics, the aggregation of those metrics is always done using 'mean'. This patches allows to specify a reaggregation methods, which applies on top of the measures retrieved. You can know retrieve e.g. the 'max' of the 'mean' of several metrics. Related-Bug: #1573023 Change-Id: I9ea60de498e6f81c738a7236a392019b47b0c269 --- gnocchi/rest/__init__.py | 1 + gnocchi/storage/__init__.py | 3 +++ gnocchi/storage/_carbonara.py | 8 ++++++-- gnocchi/tests/test_storage.py | 10 ++++++++++ 4 files changed, 20 insertions(+), 2 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 701fb2fd..c740383d 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -483,6 +483,7 @@ class AggregatedMetricController(rest.RestController): else: measures = pecan.request.storage.get_cross_metric_measures( metrics, start, stop, aggregation, + None, granularity, needed_overlap) # Replace timestamp keys by their string versions diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 2a568803..80ed0ae0 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -267,6 +267,7 @@ class StorageDriver(object): @staticmethod def get_cross_metric_measures(metrics, from_timestamp=None, to_timestamp=None, aggregation='mean', + reaggregation=None, granularity=None, needed_overlap=None): """Get aggregated measures of multiple entities. @@ -276,6 +277,8 @@ class StorageDriver(object): :param to timestamp: The timestamp to get the measure to. :param granularity: The granularity to retrieve. :param aggregation: The type of aggregation to retrieve. + :param reaggregation: The type of aggregation to compute + on the retrieved measures. """ for metric in metrics: if aggregation not in metric.archive_policy.aggregation_methods: diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 17434acf..44eaae8e 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -418,11 +418,15 @@ class CarbonaraBasedStorage(storage.StorageDriver): def get_cross_metric_measures(self, metrics, from_timestamp=None, to_timestamp=None, aggregation='mean', + reaggregation=None, granularity=None, needed_overlap=100.0): super(CarbonaraBasedStorage, self).get_cross_metric_measures( metrics, from_timestamp, to_timestamp, - aggregation, granularity, needed_overlap) + aggregation, reaggregation, granularity, needed_overlap) + + if reaggregation is None: + reaggregation = aggregation if granularity is None: granularities = ( @@ -452,7 +456,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v) for timestamp, r, v in carbonara.AggregatedTimeSerie.aggregated( - tss, aggregation, from_timestamp, to_timestamp, + tss, reaggregation, from_timestamp, to_timestamp, needed_overlap)] except carbonara.UnAggregableTimeseries as e: raise storage.MetricUnaggregatable(metrics, e.reason) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 58936c7d..c5fc78fe 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -425,6 +425,16 @@ class TestStorageDriver(tests_base.TestCase): (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0) ], values) + values = self.storage.get_cross_metric_measures([self.metric, metric2], + reaggregation='max') + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 39.75), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 39.75), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 69), + (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 23), + (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 44) + ], values) + values = self.storage.get_cross_metric_measures( [self.metric, metric2], from_timestamp=utils.to_timestamp('2014-01-01 12:10:00')) -- GitLab From cf34b97bf287e1090575ec7eb42e97c7996045cb Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 5 Aug 2016 08:59:17 -0400 Subject: [PATCH 0310/1483] highlight pagination more prominently it's easy to skip over the fact that results are paginate. we should highlight this point so users don't overlook it. Change-Id: I8e49ede4bb17a79ead78c33a31a1dd27c90e61b9 Partial-Bug: #1607392 --- doc/source/rest.j2 | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 167b9714..834bf9b2 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -41,10 +41,16 @@ To retrieve the list of all the metrics created, use the following request: {{ scenarios['list-metric']['doc'] }} -Considering the large volume of metrics Gnocchi will store, query results are -limited to `max_limit` value set in the configuration file. Returned results -are ordered by metrics' id values. Default ordering and limits as well as page -start can be modified using query parameters: +.. note:: + + Considering the large volume of metrics Gnocchi will store, query results are + limited to `max_limit` value set in the configuration file. Returned results + are ordered by metrics' id values. To retrieve the next page of results, the + id of a metric should be given as `marker` for the beginning of the next page + of results. + +Default ordering and limits as well as page start can be modified +using query parameters: {{ scenarios['list-metric-pagination']['doc'] }} @@ -299,8 +305,12 @@ or using `details=true` in the query parameter: {{ scenarios['list-resource-generic-details']['doc'] }} -Similar to metric list, query results are limited to `max_limit` value set in -the configuration file. Returned results are ordered by resouces' +.. note:: + + Similar to metric list, query results are limited to `max_limit` value set in + the configuration file. + +Returned results represent a single page of data and are ordered by resouces' revision_start time and started_at values: {{ scenarios['list-resource-generic-pagination']['doc'] }} -- GitLab From acf23cc031d919d9bc985c87b908acf62d0ce356 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 8 Aug 2016 14:20:29 +0200 Subject: [PATCH 0311/1483] Now using gnocchi-upgrade, and not gnocchi-dbsync (Closes: #832792). --- debian/changelog | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index e74774ef..82af6f1f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,12 +1,13 @@ -gnocchi (2.0.2-8) UNRELEASED; urgency=medium +gnocchi (2.0.2-9) UNRELEASED; urgency=medium [ Ondřej Nový ] * d/watch: Fixed upstream URL [ Thomas Goirand ] * Updated Danish translation of the debconf templates (Closes: #830650). + * Now using gnocchi-upgrade, and not gnocchi-dbsync (Closes: #832792). - -- Thomas Goirand Mon, 11 Jul 2016 14:36:26 +0200 + -- Thomas Goirand Mon, 08 Aug 2016 14:19:38 +0200 gnocchi (2.0.2-6) unstable; urgency=medium -- GitLab From cf2bfcb159bef506e86e5abdcae713fa2d5feded Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 8 Aug 2016 12:21:17 +0000 Subject: [PATCH 0312/1483] Releasing gnocchi to unstable. --- debian/changelog | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index 82af6f1f..9549609b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -gnocchi (2.0.2-9) UNRELEASED; urgency=medium +gnocchi (2.0.2-7) unstable; urgency=medium [ Ondřej Nový ] * d/watch: Fixed upstream URL @@ -7,7 +7,7 @@ gnocchi (2.0.2-9) UNRELEASED; urgency=medium * Updated Danish translation of the debconf templates (Closes: #830650). * Now using gnocchi-upgrade, and not gnocchi-dbsync (Closes: #832792). - -- Thomas Goirand Mon, 08 Aug 2016 14:19:38 +0200 + -- Thomas Goirand Mon, 08 Aug 2016 12:21:06 +0000 gnocchi (2.0.2-6) unstable; urgency=medium -- GitLab From 9bc426d901294a1dbbc4a389751eac752bb8aab2 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 2 Aug 2016 15:21:20 +0000 Subject: [PATCH 0313/1483] change to 3600 point object size currently we store 14400 points per object which equals roughly 128KB (max) object size. this is fine but all our default policies do not come anywhere near 14400 points which means the split policy doesn't offer much benefit. this patch proposes change to 3600 points per object to encourage more splits. it also benefits padding scenario to have less potential for zero padding. Change-Id: I0b243db4bd0882a4b5646fb56dc7c6c1c8fd788a --- gnocchi/carbonara.py | 2 +- gnocchi/tests/storage/test_carbonara.py | 79 +++++++++++++++---------- gnocchi/tests/test_carbonara.py | 12 ++-- 3 files changed, 56 insertions(+), 37 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 859d1a26..214e0953 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -285,7 +285,7 @@ class AggregatedTimeSerie(TimeSerie): _AGG_METHOD_PCT_RE = re.compile(r"([1-9][0-9]?)pct") - POINTS_PER_SPLIT = 14400 + POINTS_PER_SPLIT = 3600 SERIAL_LEN = 9 def __init__(self, sampling, aggregation_method, diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py index b2889d1d..99947e35 100644 --- a/gnocchi/tests/storage/test_carbonara.py +++ b/gnocchi/tests/storage/test_carbonara.py @@ -49,24 +49,31 @@ class TestCarbonaraMigration(tests_base.TestCase): # serialise in old format with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.serialize', autospec=True) as f: - f.side_effect = _serialize_v2 - - for d, agg in itertools.product( - self.metric.archive_policy.definition, ['mean', 'max']): - ts = carbonara.AggregatedTimeSerie( - sampling=d.granularity, aggregation_method=agg, - max_size=d.points) - - ts.update(carbonara.TimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [4, 5, 6])) - - for key, split in ts.split(): - self.storage._store_metric_measures( - self.metric, key, agg, d.granularity, - split.serialize(), offset=0, version=None) + with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.' + 'POINTS_PER_SPLIT', 14400): + f.side_effect = _serialize_v2 + + for d, agg in itertools.product( + self.metric.archive_policy.definition, + ['mean', 'max']): + ts = carbonara.AggregatedTimeSerie( + sampling=d.granularity, aggregation_method=agg, + max_size=d.points) + + # NOTE: there is a split at 2016-07-18 on granularity 300 + ts.update(carbonara.TimeSerie.from_data( + [datetime.datetime(2016, 7, 17, 23, 59, 0), + datetime.datetime(2016, 7, 17, 23, 59, 4), + datetime.datetime(2016, 7, 17, 23, 59, 9), + datetime.datetime(2016, 7, 18, 0, 0, 0), + datetime.datetime(2016, 7, 18, 0, 0, 4), + datetime.datetime(2016, 7, 18, 0, 0, 9)], + [4, 5, 6, 7, 8, 9])) + + for key, split in ts.split(): + self.storage._store_metric_measures( + self.metric, key, agg, d.granularity, + split.serialize(), offset=0, version=None) def upgrade(self): with mock.patch.object(self.index, 'list_metrics') as f: @@ -78,29 +85,41 @@ class TestCarbonaraMigration(tests_base.TestCase): self.storage, '_get_measures_and_unserialize', side_effect=self.storage._get_measures_and_unserialize_v2): self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400, 5), - (utils.datetime_utc(2014, 1, 1, 12), 3600, 5), - (utils.datetime_utc(2014, 1, 1, 12), 300, 5) + (utils.datetime_utc(2016, 7, 17), 86400, 5), + (utils.datetime_utc(2016, 7, 18), 86400, 8), + (utils.datetime_utc(2016, 7, 17, 23), 3600, 5), + (utils.datetime_utc(2016, 7, 18, 0), 3600, 8), + (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 5), + (utils.datetime_utc(2016, 7, 18, 0), 300, 8) ], self.storage.get_measures(self.metric)) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400, 6), - (utils.datetime_utc(2014, 1, 1, 12), 3600, 6), - (utils.datetime_utc(2014, 1, 1, 12), 300, 6) + (utils.datetime_utc(2016, 7, 17), 86400, 6), + (utils.datetime_utc(2016, 7, 18), 86400, 9), + (utils.datetime_utc(2016, 7, 17, 23), 3600, 6), + (utils.datetime_utc(2016, 7, 18, 0), 3600, 9), + (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 6), + (utils.datetime_utc(2016, 7, 18, 0), 300, 9) ], self.storage.get_measures(self.metric, aggregation='max')) self.upgrade() self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400, 5), - (utils.datetime_utc(2014, 1, 1, 12), 3600, 5), - (utils.datetime_utc(2014, 1, 1, 12), 300, 5) + (utils.datetime_utc(2016, 7, 17), 86400, 5), + (utils.datetime_utc(2016, 7, 18), 86400, 8), + (utils.datetime_utc(2016, 7, 17, 23), 3600, 5), + (utils.datetime_utc(2016, 7, 18, 0), 3600, 8), + (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 5), + (utils.datetime_utc(2016, 7, 18, 0), 300, 8) ], self.storage.get_measures(self.metric)) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400, 6), - (utils.datetime_utc(2014, 1, 1, 12), 3600, 6), - (utils.datetime_utc(2014, 1, 1, 12), 300, 6) + (utils.datetime_utc(2016, 7, 17), 86400, 6), + (utils.datetime_utc(2016, 7, 18), 86400, 9), + (utils.datetime_utc(2016, 7, 17, 23), 3600, 6), + (utils.datetime_utc(2016, 7, 18, 0), 3600, 9), + (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 6), + (utils.datetime_utc(2016, 7, 18, 0), 300, 9) ], self.storage.get_measures(self.metric, aggregation='max')) with mock.patch.object( diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 67bd9e6f..9a4fc0dc 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -882,21 +882,21 @@ class TestAggregatedTimeSerie(base.BaseTestCase): def test_split_key(self): self.assertEqual( - "1420128000.0", + "1420146000.0", carbonara.AggregatedTimeSerie.get_split_key( datetime.datetime(2015, 1, 1, 23, 34), 5)) self.assertEqual( - "1420056000.0", + "1420110000.0", carbonara.AggregatedTimeSerie.get_split_key( datetime.datetime(2015, 1, 1, 15, 3), 5)) def test_split_key_datetime(self): self.assertEqual( - datetime.datetime(2014, 5, 10), + datetime.datetime(2014, 10, 7), carbonara.AggregatedTimeSerie.get_split_key_datetime( datetime.datetime(2015, 1, 1, 15, 3), 3600)) self.assertEqual( - datetime.datetime(2014, 12, 29, 8), + datetime.datetime(2014, 12, 31, 18), carbonara.AggregatedTimeSerie.get_split_key_datetime( datetime.datetime(2015, 1, 1, 15, 3), 58)) @@ -919,8 +919,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): len(grouped_points)) self.assertEqual("0.0", grouped_points[0][0]) - # 14400 × 5s = 20 hours - self.assertEqual("72000.0", + # 3600 × 5s = 5 hours + self.assertEqual("18000.0", grouped_points[1][0]) self.assertEqual(carbonara.AggregatedTimeSerie.POINTS_PER_SPLIT, len(grouped_points[0][1])) -- GitLab From d012a601c07117fa53ab5a47b6c9dd17400eef7b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 8 Aug 2016 18:38:23 +0200 Subject: [PATCH 0314/1483] rest: remove aggregation dead code When we changed the aggregated metric request form a while back, we forgot to remove some dead code. This patch removes it and dispatch the code we still use in proper places. Change-Id: I1d96b1e505e327b1295e8a5eed819716e84e67dc --- gnocchi/rest/__init__.py | 179 +++++++++++++++++---------------------- 1 file changed, 78 insertions(+), 101 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index c740383d..f521aa60 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -401,103 +401,6 @@ class ArchivePolicyRulesController(rest.RestController): abort(400, e) -class AggregatedMetricController(rest.RestController): - _custom_actions = { - 'measures': ['GET'] - } - - def __init__(self, metric_ids): - self.metric_ids = metric_ids - - @pecan.expose('json') - def get_measures(self, start=None, stop=None, aggregation='mean', - granularity=None, needed_overlap=100.0): - return self.get_cross_metric_measures_from_ids( - self.metric_ids, start, stop, - aggregation, granularity, needed_overlap) - - @classmethod - def get_cross_metric_measures_from_ids(cls, metric_ids, start=None, - stop=None, aggregation='mean', - granularity=None, - needed_overlap=100.0): - # Check RBAC policy - metrics = pecan.request.indexer.list_metrics(ids=metric_ids) - missing_metric_ids = (set(metric_ids) - - set(six.text_type(m.id) for m in metrics)) - if missing_metric_ids: - # Return one of the missing one in the error - abort(404, storage.MetricDoesNotExist( - missing_metric_ids.pop())) - return cls.get_cross_metric_measures_from_objs( - metrics, start, stop, aggregation, granularity, needed_overlap) - - @staticmethod - def get_cross_metric_measures_from_objs(metrics, start=None, stop=None, - aggregation='mean', - granularity=None, - needed_overlap=100.0): - try: - needed_overlap = float(needed_overlap) - except ValueError: - abort(400, 'needed_overlap must be a number') - - if start is not None: - try: - start = Timestamp(start) - except Exception: - abort(400, "Invalid value for start") - - if stop is not None: - try: - stop = Timestamp(stop) - except Exception: - abort(400, "Invalid value for stop") - - if (aggregation - not in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS): - abort( - 400, - 'Invalid aggregation value %s, must be one of %s' - % (aggregation, - archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)) - - for metric in metrics: - enforce("get metric", metric) - - number_of_metrics = len(metrics) - try: - if number_of_metrics == 0: - return [] - if granularity is not None: - try: - granularity = float(granularity) - except ValueError as e: - abort(400, "granularity must be a float: %s" % e) - if number_of_metrics == 1: - # NOTE(sileht): don't do the aggregation if we only have one - # metric - measures = pecan.request.storage.get_measures( - metrics[0], start, stop, aggregation, - granularity) - else: - measures = pecan.request.storage.get_cross_metric_measures( - metrics, start, stop, aggregation, - None, - granularity, - needed_overlap) - # Replace timestamp keys by their string versions - return [(timestamp.isoformat(), offset, v) - for timestamp, offset, v in measures] - except storage.MetricUnaggregatable as e: - abort(400, ("One of the metrics being aggregated doesn't have " - "matching granularity: %s") % str(e)) - except storage.MetricDoesNotExist as e: - abort(404, e) - except storage.AggregationDoesNotExist as e: - abort(404, e) - - def MeasureSchema(m): # NOTE(sileht): don't use voluptuous for performance reasons try: @@ -1364,7 +1267,7 @@ class AggregationResourceController(rest.RestController): metrics = list(filter(None, (r.get_metric(self.metric_name) for r in resources))) - return AggregatedMetricController.get_cross_metric_measures_from_objs( # noqa + return AggregationController.get_cross_metric_measures_from_objs( metrics, start, stop, aggregation, granularity, needed_overlap) def groupper(r): @@ -1377,7 +1280,7 @@ class AggregationResourceController(rest.RestController): for r in resources))) results.append({ "group": dict(key), - "measures": AggregatedMetricController.get_cross_metric_measures_from_objs( # noqa + "measures": AggregationController.get_cross_metric_measures_from_objs( # noqa metrics, start, stop, aggregation, granularity, needed_overlap) }) @@ -1404,12 +1307,86 @@ class AggregationController(rest.RestController): return AggregationResourceController(resource_type, metric_name), remainder + @staticmethod + def get_cross_metric_measures_from_objs(metrics, start=None, stop=None, + aggregation='mean', + granularity=None, + needed_overlap=100.0): + try: + needed_overlap = float(needed_overlap) + except ValueError: + abort(400, 'needed_overlap must be a number') + + if start is not None: + try: + start = Timestamp(start) + except Exception: + abort(400, "Invalid value for start") + + if stop is not None: + try: + stop = Timestamp(stop) + except Exception: + abort(400, "Invalid value for stop") + + if (aggregation + not in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS): + abort( + 400, + 'Invalid aggregation value %s, must be one of %s' + % (aggregation, + archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)) + + for metric in metrics: + enforce("get metric", metric) + + number_of_metrics = len(metrics) + try: + if number_of_metrics == 0: + return [] + if granularity is not None: + try: + granularity = float(granularity) + except ValueError as e: + abort(400, "granularity must be a float: %s" % e) + if number_of_metrics == 1: + # NOTE(sileht): don't do the aggregation if we only have one + # metric + measures = pecan.request.storage.get_measures( + metrics[0], start, stop, aggregation, + granularity) + else: + measures = pecan.request.storage.get_cross_metric_measures( + metrics, start, stop, aggregation, + None, + granularity, + needed_overlap) + # Replace timestamp keys by their string versions + return [(timestamp.isoformat(), offset, v) + for timestamp, offset, v in measures] + except storage.MetricUnaggregatable as e: + abort(400, ("One of the metrics being aggregated doesn't have " + "matching granularity: %s") % str(e)) + except storage.MetricDoesNotExist as e: + abort(404, e) + except storage.AggregationDoesNotExist as e: + abort(404, e) + @pecan.expose('json') def get_metric(self, metric=None, start=None, stop=None, aggregation='mean', granularity=None, needed_overlap=100.0): - return AggregatedMetricController.get_cross_metric_measures_from_ids( - arg_to_list(metric), start, stop, aggregation, + # Check RBAC policy + metric_ids = arg_to_list(metric) + metrics = pecan.request.indexer.list_metrics(ids=metric_ids) + missing_metric_ids = (set(metric_ids) + - set(six.text_type(m.id) for m in metrics)) + if missing_metric_ids: + # Return one of the missing one in the error + abort(404, storage.MetricDoesNotExist( + missing_metric_ids.pop())) + return self.get_cross_metric_measures_from_objs( + metrics, start, stop, aggregation, granularity, needed_overlap) -- GitLab From 82509d782e8a21d70a848065f4948f98419aca54 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 6 Aug 2016 15:48:17 +0200 Subject: [PATCH 0315/1483] rest: allow to specify reaggregation methods in aggregation Change-Id: Ib37c6b7e8b8214bd8b3e8f2e92547fed56923374 Closes-Bug: #1573023 --- doc/source/rest.j2 | 6 +++++ doc/source/rest.yaml | 4 +++ gnocchi/rest/__init__.py | 12 ++++++--- gnocchi/tests/gabbi/gabbits/aggregation.yaml | 28 ++++++++++++++++++++ 4 files changed, 46 insertions(+), 4 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 834bf9b2..a37743ac 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -457,6 +457,12 @@ It can also be done by providing the list of metrics to aggregate: of this already aggregated data may not have sense for certain kind of aggregation method (e.g. stdev). +By default, the measures are aggregated using the aggregation method provided, +e.g. you'll get a mean of means, or a max of maxs. You can specify what method +to use over the retrieved aggregation by using the `reaggregate` parameter: + +{{ scenarios['get-across-metrics-measures-by-metric-ids-reaggregate']['doc'] }} + It's also possible to do that aggregation on metrics linked to resources. In order to select these resources, the following endpoint accepts a query such as the one described in `Searching for resources`_. diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 764ccafb..46c35467 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -541,6 +541,10 @@ request: | GET /v1/aggregation/metric?metric={{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }}&metric={{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['metrics']['cpu.util'] }}&start=2014-10-06T14:34&aggregation=mean HTTP/1.1 +- name: get-across-metrics-measures-by-metric-ids-reaggregate + request: | + GET /v1/aggregation/metric?metric={{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }}&metric={{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['metrics']['cpu.util'] }}&aggregation=mean&reaggregation=min HTTP/1.1 + - name: append-metrics-to-resource request: | POST /v1/resource/generic/{{ scenarios['create-resource-instance-with-metrics']['response'].json['id'] }}/metric HTTP/1.1 diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index f521aa60..ff996d30 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1244,6 +1244,7 @@ class AggregationResourceController(rest.RestController): @pecan.expose('json') def post(self, start=None, stop=None, aggregation='mean', + reaggregation=None, granularity=None, needed_overlap=100.0, groupby=None): # First, set groupby in the right format: a sorted list of unique @@ -1268,7 +1269,8 @@ class AggregationResourceController(rest.RestController): (r.get_metric(self.metric_name) for r in resources))) return AggregationController.get_cross_metric_measures_from_objs( - metrics, start, stop, aggregation, granularity, needed_overlap) + metrics, start, stop, aggregation, reaggregation, + granularity, needed_overlap) def groupper(r): return tuple((attr, r[attr]) for attr in groupby) @@ -1281,7 +1283,7 @@ class AggregationResourceController(rest.RestController): results.append({ "group": dict(key), "measures": AggregationController.get_cross_metric_measures_from_objs( # noqa - metrics, start, stop, aggregation, + metrics, start, stop, aggregation, reaggregation, granularity, needed_overlap) }) @@ -1310,6 +1312,7 @@ class AggregationController(rest.RestController): @staticmethod def get_cross_metric_measures_from_objs(metrics, start=None, stop=None, aggregation='mean', + reaggregation=None, granularity=None, needed_overlap=100.0): try: @@ -1358,7 +1361,7 @@ class AggregationController(rest.RestController): else: measures = pecan.request.storage.get_cross_metric_measures( metrics, start, stop, aggregation, - None, + reaggregation, granularity, needed_overlap) # Replace timestamp keys by their string versions @@ -1375,6 +1378,7 @@ class AggregationController(rest.RestController): @pecan.expose('json') def get_metric(self, metric=None, start=None, stop=None, aggregation='mean', + reaggregation=None, granularity=None, needed_overlap=100.0): # Check RBAC policy metric_ids = arg_to_list(metric) @@ -1386,7 +1390,7 @@ class AggregationController(rest.RestController): abort(404, storage.MetricDoesNotExist( missing_metric_ids.pop())) return self.get_cross_metric_measures_from_objs( - metrics, start, stop, aggregation, + metrics, start, stop, aggregation, reaggregation, granularity, needed_overlap) diff --git a/gnocchi/tests/gabbi/gabbits/aggregation.yaml b/gnocchi/tests/gabbi/gabbits/aggregation.yaml index c1e883a9..f23663ed 100644 --- a/gnocchi/tests/gabbi/gabbits/aggregation.yaml +++ b/gnocchi/tests/gabbi/gabbits/aggregation.yaml @@ -94,6 +94,20 @@ tests: - ['2015-03-06T14:30:00+00:00', 300.0, 15.05] - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - name: get metric list to push metric 4 + GET: /v1/metric + + - name: get measure aggregates and reaggregate + GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&reaggregation=min + poll: + count: 10 + delay: 1 + response_json_paths: + $: + - ['2015-03-06T14:30:00+00:00', 300.0, 2.55] + - ['2015-03-06T14:33:57+00:00', 1.0, 3.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 2.0] + # Aggregation by resource and metric_name - name: post a resource @@ -176,6 +190,20 @@ tests: - ['2015-03-06T14:30:00+00:00', 300.0, 15.05] - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - name: get measure aggregates by granularity from resources and reaggregate + POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&reaggregate=min + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + poll: + count: 10 + delay: 1 + response_json_paths: + $: + - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + # Some negative tests - name: get measure aggregates with wrong GET -- GitLab From 5f9e39be8b5e6ebe9eb16d2a5f9005b4c51520e1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 9 Aug 2016 09:39:49 +0200 Subject: [PATCH 0316/1483] carbonara: remove unused class The mixin is not used anymore, the methods are overwritten where needed anyway. Change-Id: I6bc36f8686aef84caf90ee153e99a5acb7c45831 --- gnocchi/carbonara.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 214e0953..c0852ebc 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -77,17 +77,7 @@ class UnknownAggregationMethod(Exception): "Unknown aggregation method `%s'" % agg) -class SerializableMixin(object): - - @classmethod - def unserialize(cls, data): - return cls.from_dict(msgpack.loads(data, encoding='utf-8')) - - def serialize(self): - return msgpack.dumps(self.to_dict()) - - -class TimeSerie(SerializableMixin): +class TimeSerie(object): """A representation of series of a timestamp with a value. Duplicate timestamps are not allowed and will be filtered to use the @@ -185,6 +175,13 @@ class TimeSerie(SerializableMixin): except IndexError: return + @classmethod + def unserialize(cls, data): + return cls.from_dict(msgpack.loads(data, encoding='utf-8')) + + def serialize(self): + return msgpack.dumps(self.to_dict()) + class BoundTimeSerie(TimeSerie): def __init__(self, ts=None, block_size=None, back_window=0): -- GitLab From fec094aa6ff4e2a5d0c03ed43c521a501372f644 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 9 Aug 2016 09:43:38 +0200 Subject: [PATCH 0317/1483] carbonara: fix benchmark output now that compression is out Just indicate the serialization speed. Closes-Bug: #1611089 Change-Id: I2326f1f036a93d713b023dfcfaab86b6f42673b7 --- gnocchi/carbonara.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index c0852ebc..c0db53b3 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -533,8 +533,8 @@ class AggregatedTimeSerie(TimeSerie): t1 = time.time() print(title) print(" Bytes per point: %.2f" % (len(s) / float(points))) - print(" Compression speed: %.2f MB/s" - % ((len(msgpack.dumps(ts.to_dict())) + print(" Serialization speed: %.2f MB/s" + % (((points * 2 * 8) / ((t1 - t0) / compress_times)) / (1024.0 * 1024.0))) @staticmethod -- GitLab From 4390238970ab180114e75dee7e9e2fd670dfbdbc Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 9 Aug 2016 20:12:23 +0200 Subject: [PATCH 0318/1483] swift: optimize metric reporting in detail mode This only makes one listing request if details are requested. Change-Id: If637230b1d8913f1574f15dc9e93a5ecdac54fad --- gnocchi/storage/swift.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index df6932c4..cbf1ee04 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -121,19 +121,23 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): data) def _build_report(self, details): - headers, files = self.swift.get_container(self.MEASURE_PREFIX, - delimiter='/', - full_listing=True) - metrics = len(files) - measures = int(headers.get('x-container-object-count')) metric_details = defaultdict(int) if details: headers, files = self.swift.get_container(self.MEASURE_PREFIX, full_listing=True) + metrics = set() for f in files: - metric = f['name'].split('/', 1)[0] + metric, metric_files = f['name'].split("/", 1) metric_details[metric] += 1 - return metrics, measures, metric_details if details else None + metrics.add(metric) + nb_metrics = len(metrics) + else: + headers, files = self.swift.get_container(self.MEASURE_PREFIX, + delimiter='/', + full_listing=True) + nb_metrics = len(files) + measures = int(headers.get('x-container-object-count')) + return nb_metrics, measures, metric_details if details else None def list_metric_with_measures_to_process(self, size, part, full=False): limit = None -- GitLab From c5a8465539c2407f621fab2c33e56858e6906fe5 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 9 Aug 2016 21:22:50 +0200 Subject: [PATCH 0319/1483] rest: fix status test There is race condition in one of the test as we cannot be sure that the measures_to_process is empty: we use a common database/storage for all metrics, so that might be wrong. Also, use assertIsInstance. Change-Id: I3799efc24937ef20085bd807c9ba1689a7f87d34 --- gnocchi/tests/test_rest.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index fe18f3b6..02239097 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -175,11 +175,9 @@ class RootTest(RestTest): with self.app.use_admin_user(): r = self.app.get("/v1/status") status = json.loads(r.text) - # We are sure this is empty because we call process_measures() each - # time we do a REST request in this TestingApp. - self.assertEqual({}, status['storage']['measures_to_process']) - self.assertIs(type(status['storage']['summary']['metrics']), int) - self.assertIs(type(status['storage']['summary']['measures']), int) + self.assertIsInstance(status['storage']['measures_to_process'], dict) + self.assertIsInstance(status['storage']['summary']['metrics'], int) + self.assertIsInstance(status['storage']['summary']['measures'], int) class ArchivePolicyTest(RestTest): -- GitLab From f25e1a49da5d8747b915ed4206caa38f1b2d1b33 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 11 Aug 2016 15:05:41 +0200 Subject: [PATCH 0320/1483] storage: remove unused mock in tests Change-Id: I0abfbd5b2b5f966a42c36a0743b3801683c6a37b --- gnocchi/tests/test_storage.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 607fbf30..f5b48f6d 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -56,8 +56,7 @@ class TestStorageDriver(tests_base.TestCase): driver = storage.get_driver(self.conf) self.assertIsInstance(driver, null.NullStorage) - @mock.patch('gnocchi.storage._carbonara.LOG') - def test_corrupted_data(self, logger): + def test_corrupted_data(self): if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): self.skipTest("This driver is not based on Carbonara") -- GitLab From 72a2091727431555eba65c6ef8ff89448f3432f0 Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 5 Aug 2016 09:47:09 -0400 Subject: [PATCH 0321/1483] add support to process measures on GET depending on the backlog, the returned measures from get-measures may not include what sits in carbonara backlog. to allow for more flexibility on metricd deployments, this patch offers the ability to allow users to get all measures even if they don't have enough metricd workers, at the expense of responsiveness. refresh blocks until it can process measures Change-Id: I588ae6879474d780e8ec9e893d4ecc2b367b832e Closes-Bug: #1603495 --- doc/source/rest.j2 | 15 +++++++ doc/source/rest.yaml | 3 ++ gnocchi/rest/__init__.py | 45 +++++++++++--------- gnocchi/tests/gabbi/gabbits-live/live.yaml | 20 +++++++++ gnocchi/tests/gabbi/gabbits/aggregation.yaml | 21 +++++++++ 5 files changed, 85 insertions(+), 19 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index a37743ac..7e2d309b 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -73,6 +73,17 @@ endpoint: {{ scenarios['get-measures']['doc'] }} +Depending on the driver, there may be some lag after POSTing measures before +they are processed and queryable. To ensure your query returns all measures +that have been POSTed, you can force any unprocessed measures to be handled: + +{{ scenarios['get-measures-refresh']['doc'] }} + +.. note:: + + Depending on the amount of data that is unprocessed, `refresh` may add + some overhead to your query. + The list of points returned is composed of tuples with (timestamp, granularity, value) sorted by timestamp. The granularity is the timespan covered by aggregation for this point. @@ -474,6 +485,10 @@ requested resource type, and the compute the aggregation: {{ scenarios['get-across-metrics-measures-by-attributes-lookup-groupby']['doc'] }} +Similar to retrieving measures for a single metric, the `refresh` parameter +can be provided to force all POSTed measures to be processed across all +metrics before computing the result. + Also aggregation across metrics have different behavior depending on if boundary are set ('start' and 'stop') and if 'needed_overlap' is set. diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 46c35467..8204394c 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -214,6 +214,9 @@ - name: get-measures-granularity request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?granularity=1 HTTP/1.1 +- name: get-measures-refresh + request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?refresh=true HTTP/1.1 + - name: create-resource-generic request: | POST /v1/resource/generic HTTP/1.1 diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index ff996d30..f80e0a0f 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -449,7 +449,7 @@ class MetricController(rest.RestController): @pecan.expose('json') def get_measures(self, start=None, stop=None, aggregation='mean', - granularity=None, **param): + granularity=None, refresh=False, **param): self.enforce_metric("get measures") if not (aggregation in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS @@ -473,6 +473,10 @@ class MetricController(rest.RestController): except Exception: abort(400, "Invalid value for stop") + if strutils.bool_from_string(refresh): + pecan.request.storage.process_new_measures( + pecan.request.indexer, [six.text_type(self.metric.id)], True) + try: if aggregation in self.custom_agg: measures = self.custom_agg[aggregation].compute( @@ -1244,9 +1248,8 @@ class AggregationResourceController(rest.RestController): @pecan.expose('json') def post(self, start=None, stop=None, aggregation='mean', - reaggregation=None, - granularity=None, needed_overlap=100.0, - groupby=None): + reaggregation=None, granularity=None, needed_overlap=100.0, + groupby=None, refresh=False): # First, set groupby in the right format: a sorted list of unique # strings. groupby = sorted(set(arg_to_list(groupby))) @@ -1270,7 +1273,7 @@ class AggregationResourceController(rest.RestController): for r in resources))) return AggregationController.get_cross_metric_measures_from_objs( metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap) + granularity, needed_overlap, refresh) def groupper(r): return tuple((attr, r[attr]) for attr in groupby) @@ -1284,7 +1287,7 @@ class AggregationResourceController(rest.RestController): "group": dict(key), "measures": AggregationController.get_cross_metric_measures_from_objs( # noqa metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap) + granularity, needed_overlap, refresh) }) return results @@ -1314,7 +1317,8 @@ class AggregationController(rest.RestController): aggregation='mean', reaggregation=None, granularity=None, - needed_overlap=100.0): + needed_overlap=100.0, + refresh=False): try: needed_overlap = float(needed_overlap) except ValueError: @@ -1344,14 +1348,18 @@ class AggregationController(rest.RestController): enforce("get metric", metric) number_of_metrics = len(metrics) + if number_of_metrics == 0: + return [] + if granularity is not None: + try: + granularity = float(granularity) + except ValueError as e: + abort(400, "granularity must be a float: %s" % e) try: - if number_of_metrics == 0: - return [] - if granularity is not None: - try: - granularity = float(granularity) - except ValueError as e: - abort(400, "granularity must be a float: %s" % e) + if strutils.bool_from_string(refresh): + pecan.request.storage.process_new_measures( + pecan.request.indexer, + [six.text_type(m.id) for m in metrics], True) if number_of_metrics == 1: # NOTE(sileht): don't do the aggregation if we only have one # metric @@ -1376,10 +1384,9 @@ class AggregationController(rest.RestController): abort(404, e) @pecan.expose('json') - def get_metric(self, metric=None, start=None, - stop=None, aggregation='mean', - reaggregation=None, - granularity=None, needed_overlap=100.0): + def get_metric(self, metric=None, start=None, stop=None, + aggregation='mean', reaggregation=None, granularity=None, + needed_overlap=100.0, refresh=False): # Check RBAC policy metric_ids = arg_to_list(metric) metrics = pecan.request.indexer.list_metrics(ids=metric_ids) @@ -1391,7 +1398,7 @@ class AggregationController(rest.RestController): missing_metric_ids.pop())) return self.get_cross_metric_measures_from_objs( metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap) + granularity, needed_overlap, refresh) class CapabilityController(rest.RestController): diff --git a/gnocchi/tests/gabbi/gabbits-live/live.yaml b/gnocchi/tests/gabbi/gabbits-live/live.yaml index bbc924fb..226e4d69 100644 --- a/gnocchi/tests/gabbi/gabbits-live/live.yaml +++ b/gnocchi/tests/gabbi/gabbits-live/live.yaml @@ -617,6 +617,26 @@ tests: $[0][2]: 2 $[1][2]: 2 + - name: post some more measures to the metric on myresource + POST: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures + request_headers: + content-type: application/json + data: + - timestamp: "2015-03-06T14:34:15" + value: 5 + - timestamp: "2015-03-06T14:34:20" + value: 5 + status: 202 + + - name: get myresource measures with refresh + GET: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures?refresh=true + response_json_paths: + $[0][2]: 2 + $[1][2]: 4 + $[2][2]: 2 + $[3][2]: 2 + $[4][2]: 5 + $[5][2]: 5 # # Search for resources diff --git a/gnocchi/tests/gabbi/gabbits/aggregation.yaml b/gnocchi/tests/gabbi/gabbits/aggregation.yaml index f23663ed..6cb11d6c 100644 --- a/gnocchi/tests/gabbi/gabbits/aggregation.yaml +++ b/gnocchi/tests/gabbi/gabbits/aggregation.yaml @@ -68,6 +68,16 @@ tests: GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=foobar status: 400 + - name: get metric list to get aggregates for get with refresh + GET: /v1/metric + + - name: get measure aggregates by granularity with refresh + GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1&refresh=true + response_json_paths: + $: + - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + - name: get metric list to get aggregates 2 GET: /v1/metric @@ -162,6 +172,17 @@ tests: value: 2 status: 202 + - name: get measure aggregates by granularity from resources with refresh + POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&refresh=true + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + response_json_paths: + $: + - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + - name: get measure aggregates by granularity from resources POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1 request_headers: -- GitLab From 0798a1e4e0cdb489f09417b4b56fe6c70553e475 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 11 Aug 2016 12:51:37 +0000 Subject: [PATCH 0322/1483] only look for v2 objects on upgrade upgrade fails if we upgrade an already upgraded storage because it grabs split keys and isn't aware of what version to look for. this patch makes sure upgrade only looks for v2 objects to upgrade and filters anything else. Change-Id: Ifb27c845b716a4d0cf9215e78617398ebd09b6e8 Closes-Bug: #1611912 --- gnocchi/storage/_carbonara.py | 15 +++++++++++++-- gnocchi/storage/ceph.py | 6 ++++-- gnocchi/storage/file.py | 5 +++-- gnocchi/storage/swift.py | 6 ++++-- gnocchi/tests/storage/test_carbonara.py | 25 +++++++++++++++++++++++++ 5 files changed, 49 insertions(+), 8 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index c7fc464b..ff456531 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -94,9 +94,20 @@ class CarbonaraBasedStorage(storage.StorageDriver): raise NotImplementedError @staticmethod - def _list_split_keys_for_metric(metric, aggregation, granularity): + def _list_split_keys_for_metric(metric, aggregation, granularity, + version=None): raise NotImplementedError + @staticmethod + def _version_check(name, v): + """Validate object matches expected version. + + Version should be last attribute and start with 'v' + """ + attrs = name.split("_") + return not v or (not attrs[-1].startswith('v') if v == 2 + else attrs[-1] == 'v%s' % v) + def get_measures(self, metric, from_timestamp=None, to_timestamp=None, aggregation='mean', granularity=None): super(CarbonaraBasedStorage, self).get_measures( @@ -268,7 +279,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): try: all_keys = self._list_split_keys_for_metric( - metric, agg_method, d.granularity) + metric, agg_method, d.granularity, version=2) except storage.MetricDoesNotExist: # Just try the next metric, this one has no measures break diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 23de86ba..18aefabe 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -299,7 +299,8 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): return carbonara.AggregatedTimeSerie( apolicy.granularity, agg, max_size=apolicy.points) - def _list_split_keys_for_metric(self, metric, aggregation, granularity): + def _list_split_keys_for_metric(self, metric, aggregation, granularity, + version=None): try: xattrs = self.ioctx.get_xattrs("gnocchi_%s_container" % metric.id) except rados.ObjectNotFound: @@ -307,7 +308,8 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): keys = [] for xattr, value in xattrs: meta = xattr.split('_') - if aggregation == meta[3] and granularity == float(meta[4]): + if (aggregation == meta[3] and granularity == float(meta[4]) and + self._version_check(xattr, version)): keys.append(meta[2]) return keys diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 243846b1..b77ab667 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -212,7 +212,8 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): raise storage.MetricDoesNotExist(metric) raise - def _list_split_keys_for_metric(self, metric, aggregation, granularity): + def _list_split_keys_for_metric(self, metric, aggregation, granularity, + version=None): try: files = os.listdir(self._build_metric_path(metric, aggregation)) except OSError as e: @@ -222,7 +223,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): keys = [] for f in files: meta = f.split("_") - if meta[1] == str(granularity): + if meta[1] == str(granularity) and self._version_check(f, version): keys.append(meta[0]) return keys diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index df6932c4..231092d7 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -245,7 +245,8 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): raise return lz4.loads(contents) - def _list_split_keys_for_metric(self, metric, aggregation, granularity): + def _list_split_keys_for_metric(self, metric, aggregation, granularity, + version=None): container = self._container_name(metric) try: headers, files = self.swift.get_container( @@ -258,7 +259,8 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): for f in files: try: meta = f['name'].split('_') - if aggregation == meta[1] and granularity == float(meta[2]): + if (aggregation == meta[1] and granularity == float(meta[2]) + and self._version_check(f['name'], version)): keys.append(meta[0]) except (ValueError, IndexError): # Might be "none", or any other file. Be resilient. diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py index 99947e35..a8f9fbe2 100644 --- a/gnocchi/tests/storage/test_carbonara.py +++ b/gnocchi/tests/storage/test_carbonara.py @@ -133,6 +133,31 @@ class TestCarbonaraMigration(tests_base.TestCase): storage.AggregationDoesNotExist, self.storage.get_measures, self.metric, aggregation='max') + def test_upgrade_upgraded_storage(self): + with mock.patch.object( + self.storage, '_get_measures_and_unserialize', + side_effect=self.storage._get_measures_and_unserialize_v2): + self.assertEqual([ + (utils.datetime_utc(2016, 7, 17), 86400, 5), + (utils.datetime_utc(2016, 7, 18), 86400, 8), + (utils.datetime_utc(2016, 7, 17, 23), 3600, 5), + (utils.datetime_utc(2016, 7, 18, 0), 3600, 8), + (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 5), + (utils.datetime_utc(2016, 7, 18, 0), 300, 8) + ], self.storage.get_measures(self.metric)) + + self.assertEqual([ + (utils.datetime_utc(2016, 7, 17), 86400, 6), + (utils.datetime_utc(2016, 7, 18), 86400, 9), + (utils.datetime_utc(2016, 7, 17, 23), 3600, 6), + (utils.datetime_utc(2016, 7, 18, 0), 3600, 9), + (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 6), + (utils.datetime_utc(2016, 7, 18, 0), 300, 9) + ], self.storage.get_measures(self.metric, aggregation='max')) + + self.upgrade() + self.upgrade() + def test_delete_metric_not_upgraded(self): # Make sure that we delete everything (e.g. objects + container) # correctly even if the metric has not been upgraded. -- GitLab From a8c8260d2a41f939a8403d32e2956cb8665db710 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 11 Aug 2016 17:32:27 +0200 Subject: [PATCH 0323/1483] Add 2.2 series release note Change-Id: I9f7430b006bc88dc9707beeb026ddf287c12f0b2 Signed-off-by: Julien Danjou --- releasenotes/source/2.2.rst | 6 ++++++ releasenotes/source/index.rst | 1 + 2 files changed, 7 insertions(+) create mode 100644 releasenotes/source/2.2.rst diff --git a/releasenotes/source/2.2.rst b/releasenotes/source/2.2.rst new file mode 100644 index 00000000..fea024d6 --- /dev/null +++ b/releasenotes/source/2.2.rst @@ -0,0 +1,6 @@ +=================================== + 2.2 Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/2.2 diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst index 3377f194..47b33d6b 100644 --- a/releasenotes/source/index.rst +++ b/releasenotes/source/index.rst @@ -8,6 +8,7 @@ Contents :maxdepth: 2 2.1 + 2.2 unreleased -- GitLab From 313f0330242b05913cc0a3214dfd743a857e4212 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 11 Aug 2016 17:45:26 +0200 Subject: [PATCH 0324/1483] doc: remove unused devstack.rst The content is install.rst now. Change-Id: I03b4c11da7322d5f438eadf4ee166100261dd0a7 --- doc/source/devstack.rst | 21 --------------------- 1 file changed, 21 deletions(-) delete mode 100644 doc/source/devstack.rst diff --git a/doc/source/devstack.rst b/doc/source/devstack.rst deleted file mode 100644 index b116c163..00000000 --- a/doc/source/devstack.rst +++ /dev/null @@ -1,21 +0,0 @@ -========== - Devstack -========== - -To enable Gnocchi in devstack, add the following to local.conf: - -:: - - enable_plugin gnocchi https://github.com/openstack/gnocchi master - enable_service gnocchi-api,gnocchi-metricd - -To enable Grafana support in devstack, you can also enable `gnocchi-grafana`:: - - enable_service gnocchi-grafana - -Then, you can start devstack: - -:: - - ./stack.sh - -- GitLab From ccfe751f274066b4c11d15901b375dc1177e1887 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Fri, 12 Aug 2016 10:44:36 -0400 Subject: [PATCH 0325/1483] Update reno for stable/2.2 Change-Id: I962d8999903765fdd3dd3d11a4c11ad0b1789fd2 --- releasenotes/source/2.2.rst | 6 ++++++ releasenotes/source/index.rst | 1 + 2 files changed, 7 insertions(+) create mode 100644 releasenotes/source/2.2.rst diff --git a/releasenotes/source/2.2.rst b/releasenotes/source/2.2.rst new file mode 100644 index 00000000..fea024d6 --- /dev/null +++ b/releasenotes/source/2.2.rst @@ -0,0 +1,6 @@ +=================================== + 2.2 Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/2.2 diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst index 3377f194..47b33d6b 100644 --- a/releasenotes/source/index.rst +++ b/releasenotes/source/index.rst @@ -8,6 +8,7 @@ Contents :maxdepth: 2 2.1 + 2.2 unreleased -- GitLab From b8e9d91c0bfb18ce84520e43ca8021687af08094 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 11 Aug 2016 18:00:11 +0200 Subject: [PATCH 0326/1483] Merge documentation and release notes This will output only one complete website with all the documentation and the release note directly available in it. Change-Id: I1258c917bbf2309091595c250d4f94f98f442677 --- doc/source/conf.py | 1 + doc/source/index.rst | 1 + .../source/releasenotes}/2.1.rst | 0 .../source/releasenotes}/2.2.rst | 0 doc/source/releasenotes/index.rst | 9 + .../source/releasenotes}/unreleased.rst | 0 releasenotes/source/_static/.placeholder | 0 releasenotes/source/_templates/.placeholder | 0 releasenotes/source/conf.py | 274 ------------------ releasenotes/source/index.rst | 19 -- tox.ini | 7 +- 11 files changed, 13 insertions(+), 298 deletions(-) rename {releasenotes/source => doc/source/releasenotes}/2.1.rst (100%) rename {releasenotes/source => doc/source/releasenotes}/2.2.rst (100%) create mode 100644 doc/source/releasenotes/index.rst rename {releasenotes/source => doc/source/releasenotes}/unreleased.rst (100%) delete mode 100644 releasenotes/source/_static/.placeholder delete mode 100644 releasenotes/source/_templates/.placeholder delete mode 100644 releasenotes/source/conf.py delete mode 100644 releasenotes/source/index.rst diff --git a/doc/source/conf.py b/doc/source/conf.py index ea782e69..51909160 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -32,6 +32,7 @@ extensions = [ 'gnocchi.gendoc', 'sphinxcontrib.httpdomain', 'sphinx.ext.autodoc', + 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. diff --git a/doc/source/index.rst b/doc/source/index.rst index a23ef034..5cebbbfe 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -68,5 +68,6 @@ Documentation statsd grafana glossary + releasenotes/index.rst .. _`OpenStack`: http://openstack.org diff --git a/releasenotes/source/2.1.rst b/doc/source/releasenotes/2.1.rst similarity index 100% rename from releasenotes/source/2.1.rst rename to doc/source/releasenotes/2.1.rst diff --git a/releasenotes/source/2.2.rst b/doc/source/releasenotes/2.2.rst similarity index 100% rename from releasenotes/source/2.2.rst rename to doc/source/releasenotes/2.2.rst diff --git a/doc/source/releasenotes/index.rst b/doc/source/releasenotes/index.rst new file mode 100644 index 00000000..00837e3e --- /dev/null +++ b/doc/source/releasenotes/index.rst @@ -0,0 +1,9 @@ +Release Notes +============= + +.. toctree:: + :maxdepth: 2 + + 2.1 + 2.2 + unreleased diff --git a/releasenotes/source/unreleased.rst b/doc/source/releasenotes/unreleased.rst similarity index 100% rename from releasenotes/source/unreleased.rst rename to doc/source/releasenotes/unreleased.rst diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 9e1ccdb2..00000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,274 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Gnocchi Release Notes documentation build configuration file, created by -# sphinx-quickstart on Mon Nov 23 20:38:38 2015. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'oslosphinx', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Gnocchi Release Notes' -copyright = u'2015-present, Gnocchi developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -import pbr.version -gnocchi_version = pbr.version.VersionInfo('gnocchi') -# The short X.Y version. -version = gnocchi_version.canonical_version_string() -# The full version, including alpha/beta/rc tags. -release = gnocchi_version.version_string_with_vcs() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'GnocchiReleaseNotestdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'Gnocchi.tex', - u'Gnocchi Release Notes Documentation', - u'Gnocchi developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'gnocchi', u'Gnocchi Release Notes Documentation', - [u'Gnocchi developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'Gnocchi', u'Gnocchi Release Notes Documentation', - u'Gnocchi developers', 'Gnocchi', - 'Gnocchi is a multi-tenant timeseries, metrics and resources database.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 47b33d6b..00000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,19 +0,0 @@ -Welcome to Gnocchi Release Notes documentation! -=================================================== - -Contents -======== - -.. toctree:: - :maxdepth: 2 - - 2.1 - 2.2 - unreleased - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/tox.ini b/tox.ini index b6f9be55..9d71d92b 100644 --- a/tox.ini +++ b/tox.ini @@ -65,10 +65,6 @@ show-source = true deps = .[mysql,postgresql,test,file,ceph,swift] commands = oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf -[testenv:releasenotes] -deps = .[test,doc] -commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html - [testenv:docs] # This does not work, see: https://bitbucket.org/hpk42/tox/issues/302 # deps = {[testenv]deps} @@ -82,4 +78,5 @@ commands = doc8 --ignore-path doc/source/rest.rst doc/source [testenv:docs-gnocchi.xyz] deps = .[file,postgresql,test,doc] sphinx_rtd_theme -commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-build -D html_theme=sphinx_rtd_theme doc/source doc/build/html +commands = + pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-build -D html_theme=sphinx_rtd_theme doc/source doc/build/html -- GitLab From f1f81c552b934bc5d60902c7c7fa94032a1efb33 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 16 Aug 2016 12:50:09 +0200 Subject: [PATCH 0327/1483] statsd: simplify testing code No need to list anything, we know the metric id already. It's more sure. Change-Id: I4ec70c5c8f74b252ec06773c45c8eb040e40ac22 --- gnocchi/tests/test_statsd.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index 912f13dc..4d820bc0 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -68,10 +68,8 @@ class TestStatsd(tests_base.TestCase): metric = r.get_metric(metric_key) - metrics = self.stats.storage.list_metric_with_measures_to_process( - None, None, full=True) self.stats.storage.process_background_tasks( - self.stats.indexer, metrics, sync=True) + self.stats.indexer, [str(metric.id)], sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ @@ -90,10 +88,8 @@ class TestStatsd(tests_base.TestCase): ("127.0.0.1", 12345)) self.stats.flush() - metrics = self.stats.storage.list_metric_with_measures_to_process( - None, None, full=True) self.stats.storage.process_background_tasks( - self.stats.indexer, metrics, sync=True) + self.stats.indexer, [str(metric.id)], sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ @@ -125,10 +121,8 @@ class TestStatsd(tests_base.TestCase): metric = r.get_metric(metric_key) self.assertIsNotNone(metric) - metrics = self.stats.storage.list_metric_with_measures_to_process( - None, None, full=True) self.stats.storage.process_background_tasks( - self.stats.indexer, metrics, sync=True) + self.stats.indexer, [str(metric.id)], sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ @@ -145,10 +139,8 @@ class TestStatsd(tests_base.TestCase): ("127.0.0.1", 12345)) self.stats.flush() - metrics = self.stats.storage.list_metric_with_measures_to_process( - None, None, full=True) self.stats.storage.process_background_tasks( - self.stats.indexer, metrics, sync=True) + self.stats.indexer, [str(metric.id)], sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ -- GitLab From abcd34c616ff388376f232317a31fcb9acf8d81b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 16 Aug 2016 10:36:38 +0200 Subject: [PATCH 0328/1483] Add bindep.txt to express binary dependencies This is used at least by the infra tools to know which binary packages should be installed to deploy Gnocchi. Change-Id: I75c657298974dfa263ad788a6e2ef9a13eb5e3d2 --- bindep.txt | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 bindep.txt diff --git a/bindep.txt b/bindep.txt new file mode 100644 index 00000000..613ee2bc --- /dev/null +++ b/bindep.txt @@ -0,0 +1,6 @@ +libpq-dev [platform:dpkg] +postgresql [platform:dpkg] +mysql-client [platform:dpkg] +mysql-server [platform:dpkg] +build-essential [platform:dpkg] +libffi-dev [platform:dpkg] -- GitLab From 9c0a918a655b80ca2391fd5620cf0fa1e1d44b18 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 16 Aug 2016 15:30:46 +0200 Subject: [PATCH 0329/1483] Remove null drivers There are not used anywhere nor really useful. Change-Id: I86061a4d82ae5eddee18971478431475b8131f96 --- gnocchi/indexer/null.py | 20 -------------------- gnocchi/storage/null.py | 20 -------------------- gnocchi/tests/base.py | 2 +- gnocchi/tests/gabbi/fixtures.py | 19 ++++++++----------- gnocchi/tests/test_storage.py | 4 +--- setup.cfg | 2 -- 6 files changed, 10 insertions(+), 57 deletions(-) delete mode 100644 gnocchi/indexer/null.py delete mode 100644 gnocchi/storage/null.py diff --git a/gnocchi/indexer/null.py b/gnocchi/indexer/null.py deleted file mode 100644 index 850e2aeb..00000000 --- a/gnocchi/indexer/null.py +++ /dev/null @@ -1,20 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from gnocchi import indexer - - -class NullIndexer(indexer.IndexerDriver): - pass diff --git a/gnocchi/storage/null.py b/gnocchi/storage/null.py deleted file mode 100644 index 21eed341..00000000 --- a/gnocchi/storage/null.py +++ /dev/null @@ -1,20 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from gnocchi import storage - - -class NullStorage(storage.StorageDriver): - pass diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 298dcf9e..2d614c84 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -436,7 +436,7 @@ class TestCase(base.BaseTestCase): self.conf.set_override( 'driver', - os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "null"), + os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file"), 'storage') def setUp(self): diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index 1d94bec4..df83524f 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -83,10 +83,7 @@ class ConfigFixture(fixture.GabbiFixture): self.conf = conf self.tmp_dir = data_tmp_dir - # TODO(jd) It would be cool if Gabbi was able to use the null:// - # indexer, but this makes the API returns a lot of 501 error, which - # Gabbi does not want to see, so let's just disable it. - if conf.indexer.url is None or conf.indexer.url == "null://": + if conf.indexer.url is None: raise case.SkipTest("No indexer configured") # Use the presence of DEVSTACK_GATE_TEMPEST as a semaphore @@ -141,13 +138,13 @@ class ConfigFixture(fixture.GabbiFixture): if hasattr(self, 'index'): self.index.disconnect() - if not self.conf.indexer.url.startswith("null://"): - # Swallow noise from missing tables when dropping - # database. - with warnings.catch_warnings(): - warnings.filterwarnings('ignore', - module='sqlalchemy.engine.default') - sqlalchemy_utils.drop_database(self.conf.indexer.url) + # Swallow noise from missing tables when dropping + # database. + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', + module='sqlalchemy.engine.default') + sqlalchemy_utils.drop_database(self.conf.indexer.url) + if self.tmp_dir: shutil.rmtree(self.tmp_dir) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index f5b48f6d..4aa6bd60 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -26,7 +26,6 @@ from gnocchi import carbonara from gnocchi import indexer from gnocchi import storage from gnocchi.storage import _carbonara -from gnocchi.storage import null from gnocchi.tests import base as tests_base from gnocchi import utils @@ -52,9 +51,8 @@ class TestStorageDriver(tests_base.TestCase): storage.process_background_tasks(index, metrics, sync=True) def test_get_driver(self): - self.conf.set_override('driver', 'null', 'storage') driver = storage.get_driver(self.conf) - self.assertIsInstance(driver, null.NullStorage) + self.assertIsInstance(driver, storage.StorageDriver) def test_corrupted_data(self): if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): diff --git a/setup.cfg b/setup.cfg index 4b0f3783..812a88a1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -97,13 +97,11 @@ gnocchi.indexer.sqlalchemy.resource_type_attribute = bool = gnocchi.indexer.sqlalchemy_extension:BoolSchema gnocchi.storage = - null = gnocchi.storage.null:NullStorage swift = gnocchi.storage.swift:SwiftStorage ceph = gnocchi.storage.ceph:CephStorage file = gnocchi.storage.file:FileStorage gnocchi.indexer = - null = gnocchi.indexer.null:NullIndexer mysql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer mysql+pymysql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer postgresql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer -- GitLab From fc3838243a0ca29927afabee424a6956c5b52037 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 20 Aug 2016 10:32:34 +0200 Subject: [PATCH 0330/1483] ceph: fix write emulation Ceph never raises ObjectNotFound on write. Change-Id: Ida84fcbcb9fbd75b90eb11b57f3db271c1425102 --- gnocchi/carbonara.py | 6 ------ gnocchi/storage/ceph.py | 7 +------ gnocchi/tests/base.py | 8 ++++---- 3 files changed, 5 insertions(+), 16 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index c0db53b3..2413fbf8 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -415,12 +415,6 @@ class AggregatedTimeSerie(TimeSerie): return int((self.first.value - split) // (self.sampling * 10e8) * self.SERIAL_LEN) - @staticmethod - def padding(offset): - offset = offset // AggregatedTimeSerie.SERIAL_LEN - pad = [False] * offset * 2 - return struct.pack('<' + '?d' * offset, *pad) - def _truncate(self, quick=False): """Truncate the timeserie.""" if self.max_size is not None: diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 23de86ba..6e345ea1 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -257,12 +257,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): granularity, data, offset=0, version=3): name = self._get_object_name(metric, timestamp_key, aggregation, granularity, version) - try: - self.ioctx.write(name, data, offset=offset) - except rados.ObjectNotFound: - # first time writing data - self.ioctx.write_full( - name, carbonara.AggregatedTimeSerie.padding(offset) + data) + self.ioctx.write(name, data, offset=offset) self.ioctx.set_xattr("gnocchi_%s_container" % metric.id, name, "") def _delete_metric_measures(self, metric, timestamp_key, aggregation, diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 298dcf9e..1a0f5ad4 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -140,11 +140,11 @@ class FakeRadosModule(object): self._validate_key(key) try: current = self.kvs[key] - if len(current) < offset: - current += b'\x00' * (offset - len(current)) - self.kvs[key] = current[:offset] + value except KeyError: - raise FakeRadosModule.ObjectNotFound + current = b"" + if len(current) < offset: + current += b'\x00' * (offset - len(current)) + self.kvs[key] = current[:offset] + value def stat(self, key): self._validate_key(key) -- GitLab From b8366fd96a4769d4fc8ab0a2b50edbca312f2e14 Mon Sep 17 00:00:00 2001 From: Hanxi Date: Mon, 22 Aug 2016 19:46:16 +0800 Subject: [PATCH 0331/1483] Fix Gnocchi tempest.conf generation [service_available] isn't being generated. This patch fixes it. Change-Id: I55716e72f7613b47d1832d78fe1658aaa471bd95 Closes-Bug: #1613542 --- gnocchi/tempest/plugin.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gnocchi/tempest/plugin.py b/gnocchi/tempest/plugin.py index b0a9fd82..d4453694 100644 --- a/gnocchi/tempest/plugin.py +++ b/gnocchi/tempest/plugin.py @@ -41,4 +41,5 @@ class GnocchiTempestPlugin(plugins.TempestPlugin): def get_opt_lists(self): return [(tempest_config.metric_group.name, - tempest_config.metric_opts)] + tempest_config.metric_opts), + ('service_available', tempest_config.service_available_opts)] -- GitLab From ec2ef510696d4c8395ac31b961bd598474f3f7fa Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 18 Aug 2016 15:06:12 +0200 Subject: [PATCH 0332/1483] carbonara: expose first_block_timestamp as public Change-Id: Ifb87acf1fd59408ce72d6a8a20e0a3fdba01e9a7 --- gnocchi/carbonara.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 2413fbf8..26876cb4 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -221,7 +221,7 @@ class BoundTimeSerie(TimeSerie): ignore_too_old_timestamps=False): # NOTE: values must be sorted when passed in. if self.block_size is not None and not self.ts.empty: - first_block_timestamp = self._first_block_timestamp() + first_block_timestamp = self.first_block_timestamp() if ignore_too_old_timestamps: for index, (timestamp, value) in enumerate(values): if timestamp >= first_block_timestamp: @@ -263,7 +263,8 @@ class BoundTimeSerie(TimeSerie): }) return basic - def _first_block_timestamp(self): + def first_block_timestamp(self): + """Return the timestamp of the first block.""" rounded = self.round_timestamp(self.ts.index[-1], self.block_size.delta.value) @@ -275,7 +276,7 @@ class BoundTimeSerie(TimeSerie): # Change that to remove the amount of block needed to have # the size <= max_size. A block is a number of "seconds" (a # timespan) - self.ts = self.ts[self._first_block_timestamp():] + self.ts = self.ts[self.first_block_timestamp():] class AggregatedTimeSerie(TimeSerie): -- GitLab From c27b8b73e52cdbabeb253ae64d92f8be66eeefce Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 18 Aug 2016 16:58:11 +0200 Subject: [PATCH 0333/1483] storage: add an intermediate verification Change-Id: I0a1275a644d8068a5fed1324b418ba5e3bbcf887 --- gnocchi/tests/test_storage.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 4aa6bd60..c8c34508 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -207,6 +207,13 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing(self.storage, self.index) + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), 86400.0, 55.5), + (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 55.5), + (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69), + (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 42.0), + ], self.storage.get_measures(self.metric)) + self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), -- GitLab From 96b9aa60d4e90e0fe73d84d6c672bb78ccc8dbdd Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 16 Aug 2016 14:28:35 +0200 Subject: [PATCH 0334/1483] storage: do not list metrics on each measure processing This should simplify and speed-up test run. Also add a specific test to check list_metric_with_measures_to_process(). Change-Id: I981f19ec658a4d9928b9170876b3f5782041806b --- gnocchi/storage/file.py | 5 +-- gnocchi/tests/test_storage.py | 66 +++++++++++++++++++++-------------- 2 files changed, 43 insertions(+), 28 deletions(-) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 243846b1..7b1d127d 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -147,8 +147,9 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): def list_metric_with_measures_to_process(self, size, part, full=False): if full: - return os.listdir(self.measure_path) - return os.listdir(self.measure_path)[size * part:size * (part + 1)] + return set(os.listdir(self.measure_path)) + return set( + os.listdir(self.measure_path)[size * part:size * (part + 1)]) def _list_measures_container_for_metric_id(self, metric_id): try: diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 4aa6bd60..e1320064 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -44,11 +44,10 @@ class TestStorageDriver(tests_base.TestCase): archive_policy_name) return m, m_sql - @staticmethod - def trigger_processing(storage, index): - metrics = storage.list_metric_with_measures_to_process( - None, None, full=True) - storage.process_background_tasks(index, metrics, sync=True) + def trigger_processing(self, metrics=None): + if metrics is None: + metrics = [str(self.metric.id)] + self.storage.process_background_tasks(self.index, metrics, sync=True) def test_get_driver(self): driver = storage.get_driver(self.conf) @@ -61,7 +60,7 @@ class TestStorageDriver(tests_base.TestCase): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) - self.trigger_processing(self.storage, self.index) + self.trigger_processing() self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 13, 0, 1), 1), @@ -71,33 +70,48 @@ class TestStorageDriver(tests_base.TestCase): side_effect=ValueError("boom!")): with mock.patch('gnocchi.carbonara.TimeSerie.unserialize', side_effect=ValueError("boom!")): - self.trigger_processing(self.storage, self.index) + self.trigger_processing() m = self.storage.get_measures(self.metric) self.assertIn((utils.datetime_utc(2014, 1, 1), 86400.0, 1), m) self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 3600.0, 1), m) self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), m) + def test_list_metric_with_measures_to_process(self): + metrics = self.storage.list_metric_with_measures_to_process( + None, None, full=True) + self.assertEqual(set(), metrics) + self.storage.add_measures(self.metric, [ + storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), + ]) + metrics = self.storage.list_metric_with_measures_to_process( + None, None, full=True) + self.assertEqual(set([str(self.metric.id)]), metrics) + self.trigger_processing() + metrics = self.storage.list_metric_with_measures_to_process( + None, None, full=True) + self.assertEqual(set([]), metrics) + def test_delete_nonempty_metric(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) - self.trigger_processing(self.storage, self.index) + self.trigger_processing() self.storage.delete_metric(self.metric) - self.trigger_processing(self.storage, self.index) + self.trigger_processing() def test_delete_nonempty_metric_unprocessed(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) self.storage.delete_metric(self.metric) - self.trigger_processing(self.storage, self.index) + self.trigger_processing() def test_delete_expunge_metric(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) - self.trigger_processing(self.storage, self.index) + self.trigger_processing() self.index.delete_metric(self.metric.id) self.storage.expunge_metrics(self.index, sync=True) self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric, @@ -123,7 +137,7 @@ class TestStorageDriver(tests_base.TestCase): self.storage.add_measures(m, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, i, j), 100) for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) - self.trigger_processing(self.storage, self.index) + self.trigger_processing([str(m.id)]) self.assertEqual(3661, len(self.storage.get_measures(m))) @@ -134,7 +148,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] self.storage.add_measures(m, measures) - self.trigger_processing(self.storage, self.index) + self.trigger_processing([str(m.id)]) # add measure to end, in same aggregate time as last point. self.storage.add_measures(m, [ @@ -142,7 +156,7 @@ class TestStorageDriver(tests_base.TestCase): with mock.patch.object(self.storage, '_store_metric_measures') as c: # should only resample last aggregate - self.trigger_processing(self.storage, self.index) + self.trigger_processing([str(m.id)]) count = 0 for call in c.mock_calls: # policy is 60 points and split is 48. should only update 2nd half @@ -157,14 +171,14 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] self.storage.add_measures(m, measures) - self.trigger_processing(self.storage, self.index) + self.trigger_processing([str(m.id)]) # add measure to end, in same aggregate time as last point. new_point = datetime.datetime(2014, 1, 6, 1, 58, 1) self.storage.add_measures(m, [storage.Measure(new_point, 100)]) with mock.patch.object(self.storage, '_add_measures') as c: - self.trigger_processing(self.storage, self.index) + self.trigger_processing([str(m.id)]) for __, args, __ in c.mock_calls: self.assertEqual( args[3].first, carbonara.TimeSerie.round_timestamp( @@ -177,7 +191,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), ]) - self.trigger_processing(self.storage, self.index) + self.trigger_processing() self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), @@ -191,7 +205,7 @@ class TestStorageDriver(tests_base.TestCase): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2015, 1, 1, 12, 0, 1), 69), ]) - self.trigger_processing(self.storage, self.index) + self.trigger_processing() self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), @@ -205,13 +219,13 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), ]) - self.trigger_processing(self.storage, self.index) + self.trigger_processing() self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), ]) - self.trigger_processing(self.storage, self.index) + self.trigger_processing() self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), @@ -244,7 +258,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), ]) - self.trigger_processing(self.storage, self.index) + self.trigger_processing() self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), @@ -412,7 +426,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 10, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 4), ]) - self.trigger_processing(self.storage, self.index) + self.trigger_processing([str(self.metric.id), str(metric2.id)]) values = self.storage.get_cross_metric_measures([self.metric, metric2]) self.assertEqual([ @@ -498,7 +512,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 6), storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 2), ]) - self.trigger_processing(self.storage, self.index) + self.trigger_processing([str(self.metric.id), str(metric2.id)]) values = self.storage.get_cross_metric_measures([self.metric, metric2]) self.assertEqual([ @@ -525,7 +539,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 6), storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 2), ]) - self.trigger_processing(self.storage, self.index) + self.trigger_processing([str(self.metric.id), str(metric2.id)]) self.assertEqual( {metric2: [], @@ -558,7 +572,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 1), storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 10), 1), ]) - self.trigger_processing(self.storage, self.index) + self.trigger_processing([str(m.id)]) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), @@ -571,7 +585,7 @@ class TestStorageDriver(tests_base.TestCase): self.storage.add_measures(m, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 15), 1), ]) - self.trigger_processing(self.storage, self.index) + self.trigger_processing([str(m.id)]) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), -- GitLab From 039e8dbf5f2d112a6c3044d3b487701d08ec730a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 17 Aug 2016 19:11:58 +0200 Subject: [PATCH 0335/1483] storage: make sure the deletion tests are synchronous Otherwise there's a little chance it fails on storage backends that share their test run. Change-Id: Idbd9db3cbd55fa50e782e13d85602fdd3a949f9c --- gnocchi/tests/test_storage.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index e1320064..8840a8fb 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -97,14 +97,14 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() - self.storage.delete_metric(self.metric) + self.storage.delete_metric(self.metric, sync=True) self.trigger_processing() def test_delete_nonempty_metric_unprocessed(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) - self.storage.delete_metric(self.metric) + self.storage.delete_metric(self.metric, sync=True) self.trigger_processing() def test_delete_expunge_metric(self): -- GitLab From f916f434e33cfc20ef65701711092763791d7580 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 20 Aug 2016 11:01:05 +0200 Subject: [PATCH 0336/1483] carbonara: avoid using futures altogether if no aggregation workers Change-Id: I1308797aded0f18651fc6bbd04046d2140a24714 --- gnocchi/storage/_carbonara.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index c7fc464b..d9316b83 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -58,6 +58,11 @@ class CarbonaraBasedStorage(storage.StorageDriver): conf.coordination_url, str(uuid.uuid4()).encode('ascii')) self.aggregation_workers_number = conf.aggregation_workers_number + if self.aggregation_workers_number == 1: + # NOTE(jd) Avoid using futures at all if we don't want any threads. + self._map_in_thread = self._map_no_thread + else: + self._map_in_thread = self._map_in_futures_threads self.start() @utils.retry @@ -503,7 +508,11 @@ class CarbonaraBasedStorage(storage.StorageDriver): return result - def _map_in_thread(self, method, list_of_args): + @staticmethod + def _map_no_thread(method, list_of_args): + return list(itertools.starmap(method, list_of_args)) + + def _map_in_futures_threads(self, method, list_of_args): with futures.ThreadPoolExecutor( max_workers=self.aggregation_workers_number) as executor: # We use 'list' to iterate all threads here to raise the first -- GitLab From 73ed97906b1c82e655b879e24f3c472ecd9acfe3 Mon Sep 17 00:00:00 2001 From: Hanxi Date: Tue, 23 Aug 2016 00:19:15 +0800 Subject: [PATCH 0337/1483] remove default=None for config options In the cfg module default=None is set as the default value. Change-Id: If7edb70aff5c7b50acca0fc513250731ec15f0c6 Closes-bug: #1323975 --- tools/measures_injector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/measures_injector.py b/tools/measures_injector.py index d4b0a582..2d58ca62 100755 --- a/tools/measures_injector.py +++ b/tools/measures_injector.py @@ -29,7 +29,7 @@ from gnocchi import utils def injector(): conf = cfg.ConfigOpts() conf.register_cli_opts([ - cfg.IntOpt("metrics", default=None), + cfg.IntOpt("metrics"), cfg.IntOpt("batch-of-measures", default=1000), cfg.IntOpt("measures-per-batch", default=10), ]) -- GitLab From 21e57add2e81ab0cc84c0f42caf2b33da40b5ee4 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 18 Aug 2016 13:47:04 +0000 Subject: [PATCH 0338/1483] paginate on upgrade we can have millions (billions?) of metrics. pulling this into memory is slow and bad. turning pagination on for upgrade. Partial-Bug: #1600796 Change-Id: I8fdaa149d738c1a6ef32b3597d573914dd9e6a86 --- gnocchi/storage/_carbonara.py | 13 ++++-- gnocchi/tests/storage/test_carbonara.py | 62 ++++++++++++++++++++++++- 2 files changed, 71 insertions(+), 4 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index ff456531..0fdd98d8 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -51,6 +51,7 @@ LOG = log.getLogger(__name__) class CarbonaraBasedStorage(storage.StorageDriver): MEASURE_PREFIX = "measure" + UPGRADE_BATCH_SIZE = 1000 def __init__(self, conf): super(CarbonaraBasedStorage, self).__init__(conf) @@ -309,9 +310,15 @@ class CarbonaraBasedStorage(storage.StorageDriver): LOG.info("Migrated metric %s to new format" % metric) def upgrade(self, index): - self._map_in_thread( - self._check_for_metric_upgrade, - ((metric,) for metric in index.list_metrics())) + marker = None + while True: + metrics = [(metric,) for metric in + index.list_metrics(limit=self.UPGRADE_BATCH_SIZE, + marker=marker)] + self._map_in_thread(self._check_for_metric_upgrade, metrics) + if len(metrics) == 0: + break + marker = metrics[-1][0].id def process_new_measures(self, indexer, metrics_to_process, sync=False): metrics = indexer.list_metrics(ids=metrics_to_process) diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py index a8f9fbe2..aa375277 100644 --- a/gnocchi/tests/storage/test_carbonara.py +++ b/gnocchi/tests/storage/test_carbonara.py @@ -77,7 +77,7 @@ class TestCarbonaraMigration(tests_base.TestCase): def upgrade(self): with mock.patch.object(self.index, 'list_metrics') as f: - f.return_value = [self.metric] + f.side_effect = [[self.metric], []] self.storage.upgrade(self.index) def test_get_measures(self): @@ -158,6 +158,66 @@ class TestCarbonaraMigration(tests_base.TestCase): self.upgrade() self.upgrade() + def test_get_measures_upgrade_limit(self): + self.metric2 = storage.Metric(uuid.uuid4(), + self.archive_policies['low']) + self.storage._create_metric(self.metric2) + + # serialise in old format + with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.serialize', + autospec=True) as f: + with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.' + 'POINTS_PER_SPLIT', 14400): + f.side_effect = _serialize_v2 + + for d, agg in itertools.product( + self.metric2.archive_policy.definition, + ['mean', 'max']): + ts = carbonara.AggregatedTimeSerie( + sampling=d.granularity, aggregation_method=agg, + max_size=d.points) + + # NOTE: there is a split at 2016-07-18 on granularity 300 + ts.update(carbonara.TimeSerie.from_data( + [datetime.datetime(2016, 7, 17, 23, 59, 0), + datetime.datetime(2016, 7, 17, 23, 59, 4), + datetime.datetime(2016, 7, 17, 23, 59, 9), + datetime.datetime(2016, 7, 18, 0, 0, 0), + datetime.datetime(2016, 7, 18, 0, 0, 4), + datetime.datetime(2016, 7, 18, 0, 0, 9)], + [4, 5, 6, 7, 8, 9])) + + for key, split in ts.split(): + self.storage._store_metric_measures( + self.metric2, key, agg, d.granularity, + split.serialize(), offset=0, version=None) + + with mock.patch.object( + self.storage, '_get_measures_and_unserialize', + side_effect=self.storage._get_measures_and_unserialize_v2): + self.assertEqual([ + (utils.datetime_utc(2016, 7, 17), 86400, 5), + (utils.datetime_utc(2016, 7, 18), 86400, 8), + (utils.datetime_utc(2016, 7, 17, 23), 3600, 5), + (utils.datetime_utc(2016, 7, 18, 0), 3600, 8), + (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 5), + (utils.datetime_utc(2016, 7, 18, 0), 300, 8) + ], self.storage.get_measures(self.metric2)) + + with mock.patch.object(self.index, 'list_metrics') as f: + f.side_effect = [[self.metric], [self.metric2], []] + with mock.patch.object(self.storage, 'UPGRADE_BATCH_SIZE', 1): + self.storage.upgrade(self.index) + + self.assertEqual([ + (utils.datetime_utc(2016, 7, 17), 86400, 5), + (utils.datetime_utc(2016, 7, 18), 86400, 8), + (utils.datetime_utc(2016, 7, 17, 23), 3600, 5), + (utils.datetime_utc(2016, 7, 18, 0), 3600, 8), + (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 5), + (utils.datetime_utc(2016, 7, 18, 0), 300, 8) + ], self.storage.get_measures(self.metric2)) + def test_delete_metric_not_upgraded(self): # Make sure that we delete everything (e.g. objects + container) # correctly even if the metric has not been upgraded. -- GitLab From 84443ca4d47cbee9efa0e1ad1df63ff2ae7f8b1f Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 19 Aug 2016 14:17:57 +0000 Subject: [PATCH 0339/1483] drop non-I/O threading in upgrade threading does nothing in python unless the task is heavily I/O-dependent. we already use threads to retrieve data so we really shouldn't wrap entire upgrade step in threading as it doesn't offer any benefits. Partial-Bug: #1600796 Change-Id: Ia4e834e6566cf053b9f26b2bee9142dff0c72444 --- gnocchi/storage/_carbonara.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 0fdd98d8..9c87d19e 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -312,13 +312,13 @@ class CarbonaraBasedStorage(storage.StorageDriver): def upgrade(self, index): marker = None while True: - metrics = [(metric,) for metric in - index.list_metrics(limit=self.UPGRADE_BATCH_SIZE, - marker=marker)] - self._map_in_thread(self._check_for_metric_upgrade, metrics) + metrics = index.list_metrics(limit=self.UPGRADE_BATCH_SIZE, + marker=marker) + for m in metrics: + self._check_for_metric_upgrade(m) if len(metrics) == 0: break - marker = metrics[-1][0].id + marker = metrics[-1].id def process_new_measures(self, indexer, metrics_to_process, sync=False): metrics = indexer.list_metrics(ids=metrics_to_process) -- GitLab From 8ca82d92c547bc4ba50138cf8553f7c833cd2b98 Mon Sep 17 00:00:00 2001 From: xiaozhuangqing Date: Fri, 26 Aug 2016 11:08:43 +0800 Subject: [PATCH 0340/1483] correct the debug log info, add a blank in log info the log info lack a blank , mix two words in log message Change-Id: I93068d67e1423f008cc4e39776ed91fc40bee6f5 --- gnocchi/storage/_carbonara.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index ff456531..05c3d82e 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -327,7 +327,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): with self._lock(metric_id)(blocking=sync): self._delete_unprocessed_measures_for_metric_id(metric_id) except coordination.LockAcquireFailed: - LOG.debug("Cannot acquire lock for metric %s, postponing" + LOG.debug("Cannot acquire lock for metric %s, postponing " "unprocessed measures deletion" % metric_id) for metric in metrics: lock = self._lock(metric.id) -- GitLab From c19bd9e1b64a09e59ddc23095a64115bd6201b9c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 19 Aug 2016 15:43:06 +0200 Subject: [PATCH 0341/1483] carbonara: factorize out _get_unaggregated_timeserie_and_unserialize We'll need it later on its own. Change-Id: Ibf726cc01b76efaec6d8cd92e257377dd56bc0c4 --- gnocchi/storage/_carbonara.py | 51 +++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 24cc3f73..89218146 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -49,6 +49,13 @@ OPTS = [ LOG = log.getLogger(__name__) +class CorruptionError(ValueError): + """Data corrupted, damn it.""" + + def __init__(self, message): + super(CorruptionError, self).__init__(message) + + class CarbonaraBasedStorage(storage.StorageDriver): MEASURE_PREFIX = "measure" @@ -89,6 +96,24 @@ class CarbonaraBasedStorage(storage.StorageDriver): def _get_unaggregated_timeserie(metric): raise NotImplementedError + def _get_unaggregated_timeserie_and_unserialize(self, metric): + with timeutils.StopWatch() as sw: + raw_measures = ( + self._get_unaggregated_timeserie( + metric) + ) + LOG.debug( + "Retrieve unaggregated measures " + "for %s in %.2fs" + % (metric.id, sw.elapsed())) + try: + return carbonara.BoundTimeSerie.unserialize( + raw_measures) + except ValueError: + raise CorruptionError( + "Data corruption detected for %s " + "unaggregated timeserie" % metric.id) + @staticmethod def _store_unaggregated_timeserie(metric, data): raise NotImplementedError @@ -352,16 +377,10 @@ class CarbonaraBasedStorage(storage.StorageDriver): continue measures = sorted(measures, key=operator.itemgetter(0)) + try: - with timeutils.StopWatch() as sw: - raw_measures = ( - self._get_unaggregated_timeserie( - metric) - ) - LOG.debug( - "Retrieve unaggregated measures " - "for %s in %.2fs" - % (metric.id, sw.elapsed())) + ts = self._get_unaggregated_timeserie_and_unserialize( # noqa + metric) except storage.MetricDoesNotExist: try: self._create_metric(metric) @@ -369,17 +388,9 @@ class CarbonaraBasedStorage(storage.StorageDriver): # Created in the mean time, do not worry pass ts = None - else: - try: - ts = carbonara.BoundTimeSerie.unserialize( - raw_measures) - except ValueError: - ts = None - LOG.error( - "Data corruption detected for %s " - "unaggregated timeserie, " - "recreating an empty one." - % metric.id) + except CorruptionError as e: + LOG.error(e) + ts = None if ts is None: # This is the first time we treat measures for this -- GitLab From a373a9a8f3cb7f750255cf0f5578684bdce8b3db Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sun, 21 Aug 2016 15:30:51 +0200 Subject: [PATCH 0342/1483] swift: remove retrying code This code became useless and bugged since we included LZ4 usage directly into the Swift driver. It was meant to retry if the data from Swift was null, not from lz4.loads(). So let's remove this. Change-Id: I555d1366974ba599c63c8aa4ce316fd3e4e91253 --- gnocchi/storage/swift.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 9f4e2660..5ef96412 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -21,7 +21,6 @@ import uuid import lz4 from oslo_config import cfg from oslo_log import log -import retrying import six from six.moves.urllib.parse import quote try: @@ -69,10 +68,6 @@ OPTS = [ ] -def retry_if_result_empty(result): - return len(result) == 0 - - class SwiftStorage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = True @@ -228,9 +223,6 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): # Deleted in the meantime? Whatever. raise - @retrying.retry(stop_max_attempt_number=4, - wait_fixed=500, - retry_on_result=retry_if_result_empty) def _get_measures(self, metric, timestamp_key, aggregation, granularity, version=3): try: @@ -271,9 +263,6 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): continue return keys - @retrying.retry(stop_max_attempt_number=4, - wait_fixed=500, - retry_on_result=retry_if_result_empty) def _get_unaggregated_timeserie(self, metric): try: headers, contents = self.swift.get_object( -- GitLab From 30050ee61bd739dc872911e284e54aa7d85cf99e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 29 Aug 2016 18:22:36 +0200 Subject: [PATCH 0343/1483] storage: return list of split as a set Change-Id: Ib01808d2f03779c4066d0643e1c5482a15988465 --- gnocchi/storage/ceph.py | 4 ++-- gnocchi/storage/file.py | 4 ++-- gnocchi/storage/swift.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 24e160ed..8aadea90 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -300,12 +300,12 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): xattrs = self.ioctx.get_xattrs("gnocchi_%s_container" % metric.id) except rados.ObjectNotFound: raise storage.MetricDoesNotExist(metric) - keys = [] + keys = set() for xattr, value in xattrs: meta = xattr.split('_') if (aggregation == meta[3] and granularity == float(meta[4]) and self._version_check(xattr, version)): - keys.append(meta[2]) + keys.add(meta[2]) return keys def _get_unaggregated_timeserie(self, metric): diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index e5a84c36..413271ef 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -221,11 +221,11 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): if e.errno == errno.ENOENT: raise storage.MetricDoesNotExist(metric) raise - keys = [] + keys = set() for f in files: meta = f.split("_") if meta[1] == str(granularity) and self._version_check(f, version): - keys.append(meta[0]) + keys.add(meta[0]) return keys def _delete_metric_measures(self, metric, timestamp_key, aggregation, diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 5ef96412..48d4633e 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -251,13 +251,13 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): if e.http_status == 404: raise storage.MetricDoesNotExist(metric) raise - keys = [] + keys = set() for f in files: try: meta = f['name'].split('_') if (aggregation == meta[1] and granularity == float(meta[2]) and self._version_check(f['name'], version)): - keys.append(meta[0]) + keys.add(meta[0]) except (ValueError, IndexError): # Might be "none", or any other file. Be resilient. continue -- GitLab From 2576d304552622778580d2b7f95109c6ee634323 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 1 Sep 2016 17:23:29 +0200 Subject: [PATCH 0344/1483] test: fix a random failure with metric listing The test `gabbi.suitemaker.test_gabbi_prefix_resource_request_metrics_from_one_of_the_instances.test_request' can fail in random cases with: AssertionError: Unable to match $[0].resource_id as 85c44741-cc60-4033-804e-2d3098c7d2e9, got d13982cb-4cce-4f84-a96e-7581be1e599c because there are 2 resources with disk.util metrics and the list is not be ordered. Change-Id: I233c83028cb7295d81b00b2181eb23b8ab96ce97 --- gnocchi/tests/gabbi/gabbits/resource.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index 217a2f4f..31e778d3 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -567,7 +567,7 @@ tests: host: compute3 display_name: myvm2 metrics: - cpu.util: + disk.util: archive_policy_name: medium status: 201 response_json_paths: -- GitLab From 8b3b78096a660c1da7f83db81c290283d03d71ad Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 1 Sep 2016 17:37:13 +0200 Subject: [PATCH 0345/1483] indexer: fix retry on PostgreSQL transaction error This makes sure that we retry with a brand new connection, and not a broken one. Change-Id: Ia180487c57e4c67caebd49a9654f13c37cd4cc15 --- gnocchi/indexer/sqlalchemy.py | 43 +++++++++++++++++------------------ 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 17a0e339..3e0f1b7b 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -179,7 +179,7 @@ class ResourceClassMapper(object): and inn_e.orig.pgcode == '25P02') @retry_on_deadlock - def unmap_and_delete_tables(self, resource_type, connection): + def unmap_and_delete_tables(self, resource_type, facade): if resource_type.state != "deleting": raise RuntimeError("unmap_and_delete_tables must be called in " "state deleting") @@ -190,15 +190,15 @@ class ResourceClassMapper(object): tables = [Base.metadata.tables[klass.__tablename__] for klass in mappers.values()] - if connection is not None: - # NOTE(sileht): Base.metadata.drop_all doesn't - # issue CASCADE stuffs correctly at least on postgresql - # We drop foreign keys manually to not lock the destination - # table for too long during drop table. - # It's safe to not use a transaction since - # the resource_type table is already cleaned and commited - # so this code cannot be triggerred anymore for this - # resource_type + # NOTE(sileht): Base.metadata.drop_all doesn't + # issue CASCADE stuffs correctly at least on postgresql + # We drop foreign keys manually to not lock the destination + # table for too long during drop table. + # It's safe to not use a transaction since + # the resource_type table is already cleaned and commited + # so this code cannot be triggerred anymore for this + # resource_type + with facade.writer_connection() as connection: try: for table in tables: for fk in table.foreign_key_constraints: @@ -219,15 +219,15 @@ class ResourceClassMapper(object): raise exception.RetryRequest(e) raise - # NOTE(sileht): If something goes wrong here, we are currently - # fucked, that why we expose the state to the superuser. - # TODO(sileht): The idea is to make the delete resource_type more - # like a cleanup method, I mean we should don't fail if the - # constraint have already been dropped or the table have already - # been deleted. So, when the superuser have fixed it's backend - # issue, it can rerun 'DELETE ../resource_type/foobar' even the - # state is already error and if we are sure all underlying - # resources have been cleaned we really deleted the resource_type. + # NOTE(sileht): If something goes wrong here, we are currently + # fucked, that why we expose the state to the superuser. + # TODO(sileht): The idea is to make the delete resource_type more + # like a cleanup method, I mean we should don't fail if the + # constraint have already been dropped or the table have already + # been deleted. So, when the superuser have fixed it's backend + # issue, it can rerun 'DELETE ../resource_type/foobar' even the + # state is already error and if we are sure all underlying + # resources have been cleaned we really deleted the resource_type. # TODO(sileht): Remove this resource on other workers # by using expiration on cache ? @@ -464,9 +464,8 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): rt = self._mark_as_deleting_resource_type(name) try: - with self.facade.writer_connection() as connection: - self._RESOURCE_TYPE_MANAGER.unmap_and_delete_tables( - rt, connection) + self._RESOURCE_TYPE_MANAGER.unmap_and_delete_tables( + rt, self.facade) except Exception: # NOTE(sileht): We fail the DDL, we have no way to automatically # recover, just set a particular state -- GitLab From 10fd2690d46f12709b9666ffd602843a666e94ed Mon Sep 17 00:00:00 2001 From: Danek Duvall Date: Thu, 1 Sep 2016 17:26:53 -0700 Subject: [PATCH 0346/1483] Ignore EEXIST when removing measures directories On Linux, rmdir() sets errno to ENOTEMPTY when the directory is not empty. On Solaris, it sets errno to EEXIST. They should be treated equivalently here. Change-Id: I1024e757ac46936a421cc2a1013a0b5735747719 Closes-Bug: #1619509 --- gnocchi/storage/file.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index e5a84c36..84b35969 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -175,7 +175,8 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): # by another process # ENOTEMPTY: ok, someone pushed measure in the meantime, # we'll delete the measures and directory later - if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY: + # EEXIST: some systems use this instead of ENOTEMPTY + if e.errno not in (errno.ENOENT, errno.ENOTEMPTY, errno.EEXIST): raise def _delete_unprocessed_measures_for_metric_id(self, metric_id): -- GitLab From c062916a81af2961a1fbebda82ac8bef88ba62cc Mon Sep 17 00:00:00 2001 From: Jake Yip Date: Fri, 2 Sep 2016 07:07:16 +0000 Subject: [PATCH 0347/1483] Pin oslo.db<=4.13.0 oslo.db==4.13.1 introduced some changes causing the tests to fail File "/xxx/lib/python2.7/site-packages/oslo_db/sqlalchemy/utils.py", line 77, in _get_unique_keys info = model.__table__.info AttributeError: type object 'Result' has no attribute '__table__' Seems to be introduced in: https://github.com/openstack/oslo.db/blame/4.13.1/oslo_db/sqlalchemy/utils.py#L77 Change-Id: I679a958e7b95f594118c2061fa19469ba6ef59dd --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 812a88a1..1374ad51 100644 --- a/setup.cfg +++ b/setup.cfg @@ -25,13 +25,13 @@ keystone = keystonemiddleware>=4.0.0 mysql = pymysql - oslo.db>=4.8.0 + oslo.db>=4.8.0,!=4.13.1 sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1 postgresql = psycopg2 - oslo.db>=4.8.0 + oslo.db>=4.8.0,!=4.13.1 sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1 -- GitLab From 5ab6cf313bbb25a9b66d390da70811569d160cd5 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 31 Aug 2016 20:21:52 +0200 Subject: [PATCH 0348/1483] ceph: fix write emulation Ceph does _not_ truncate data when writing(), which means we need to copy back data that are not overwritten. Change-Id: I27e839a0904f378bbab9b5d911e6a9ce6d4ac03c --- gnocchi/tests/base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 39c1b324..5a4af3cc 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -144,7 +144,9 @@ class FakeRadosModule(object): current = b"" if len(current) < offset: current += b'\x00' * (offset - len(current)) - self.kvs[key] = current[:offset] + value + self.kvs[key] = ( + current[:offset] + value + current[offset + len(value):] + ) def stat(self, key): self._validate_key(key) -- GitLab From 0903b5cbde7c2445d34cc659005c1e243a37be68 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 29 Aug 2016 17:58:59 +0200 Subject: [PATCH 0349/1483] storage: test splits existing after delete Change-Id: I8e9ccf2d1f97616c0c5cdf9d6eef81daf9ce861b --- gnocchi/tests/test_storage.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 2f10894e..cc439cf4 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -214,6 +214,16 @@ class TestStorageDriver(tests_base.TestCase): (utils.datetime_utc(2015, 1, 1, 12), 300.0, 69), ], self.storage.get_measures(self.metric)) + self.assertEqual({"1244160000.0"}, + self.storage._list_split_keys_for_metric( + self.metric, "mean", 86400.0)) + self.assertEqual({"1412640000.0"}, + self.storage._list_split_keys_for_metric( + self.metric, "mean", 3600.0)) + self.assertEqual({"1419120000.0"}, + self.storage._list_split_keys_for_metric( + self.metric, "mean", 300.0)) + def test_updated_measures(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), -- GitLab From 6a8054cfefb041f4055406fc374e97190e04c1ce Mon Sep 17 00:00:00 2001 From: fengchaoyang Date: Sat, 27 Aug 2016 23:55:26 +0800 Subject: [PATCH 0350/1483] Check whether the resource type attribute min is less than or equal to max When use resource type api to create a new resource type, check whether min and min_length is less than max and max_length Closes-Bug: #1617561 Change-Id: Iccfe751f274066b4c11d15901b375dc1177e1887 --- gnocchi/resource_type.py | 18 +++++++++-- gnocchi/rest/__init__.py | 2 ++ .../tests/gabbi/gabbits/resource-type.yaml | 30 +++++++++++++++++++ 3 files changed, 48 insertions(+), 2 deletions(-) diff --git a/gnocchi/resource_type.py b/gnocchi/resource_type.py index 367acb88..79c0e8b7 100644 --- a/gnocchi/resource_type.py +++ b/gnocchi/resource_type.py @@ -40,6 +40,16 @@ class InvalidResourceAttributeName(Exception): self.name = name +class InvalidResourceAttributeValue(ValueError): + """Error raised when the resource attribute min is greater than max""" + def __init__(self, min, max): + super(InvalidResourceAttributeValue, self).__init__( + "Resource attribute value min (or min_length) %s must be less " + "than or equal to max (or max_length) %s!" % (str(min), str(max))) + self.min = min + self.max = max + + class CommonAttributeSchema(object): meta_schema_ext = {} schema_ext = None @@ -80,10 +90,12 @@ class StringSchema(CommonAttributeSchema): def __init__(self, min_length, max_length, *args, **kwargs): super(StringSchema, self).__init__(*args, **kwargs) + if min_length > max_length: + raise InvalidResourceAttributeValue(min_length, max_length) + self.min_length = min_length self.max_length = max_length - # TODO(sileht): ensure min_length <= max_length meta_schema_ext = { voluptuous.Required('min_length', default=0): voluptuous.All(int, voluptuous.Range(min=0, max=255)), @@ -115,10 +127,12 @@ class NumberSchema(CommonAttributeSchema): def __init__(self, min, max, *args, **kwargs): super(NumberSchema, self).__init__(*args, **kwargs) + if max is not None and min > max: + raise InvalidResourceAttributeValue(min, max) + self.min = min self.max = max - # TODO(sileht): ensure min_length <= max_length meta_schema_ext = { voluptuous.Required('min', default=None): voluptuous.Any( None, numbers.Real), diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index f80e0a0f..3ac68403 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -790,6 +790,8 @@ class ResourceTypesController(rest.RestController): rt = schema.resource_type_from_dict(**body) except resource_type.InvalidResourceAttributeName as e: abort(400, e) + except resource_type.InvalidResourceAttributeValue as e: + abort(400, e) enforce("create resource type", body) try: diff --git a/gnocchi/tests/gabbi/gabbits/resource-type.yaml b/gnocchi/tests/gabbi/gabbits/resource-type.yaml index 2204bbaa..9fa32a5c 100644 --- a/gnocchi/tests/gabbi/gabbits/resource-type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource-type.yaml @@ -58,6 +58,36 @@ tests: # - "Invalid input: not a valid value for dictionary value @ data[u'attributes'][u'foo'][u'type']" - "Invalid input:" + - name: post resource type bad min_length value + POST: $LAST_URL + request_headers: + x-roles: admin + content-type: application/json + data: + name: my_custom_resource + attributes: + name: + type: string + required: true + max_length: 2 + min_length: 5 + status: 400 + + - name: post resource type bad min value + POST: $LAST_URL + request_headers: + x-roles: admin + content-type: application/json + data: + name: my_custom_resource + attributes: + int: + type: number + required: false + max: 3 + min: 8 + status: 400 + # Create a type - name: post resource type -- GitLab From b2d64d26acd0f525ef45721f3a07b269eec48e3d Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 6 Sep 2016 22:49:31 +0000 Subject: [PATCH 0351/1483] block oslo.db 4.13.2 it's broken. Change-Id: I06989336f90ae74f38a23276eda4ed688b5a51bd Partial-Bug: #1620848 --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 1374ad51..f32100fb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -25,13 +25,13 @@ keystone = keystonemiddleware>=4.0.0 mysql = pymysql - oslo.db>=4.8.0,!=4.13.1 + oslo.db>=4.8.0,!=4.13.1,!=4.13.2 sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1 postgresql = psycopg2 - oslo.db>=4.8.0,!=4.13.1 + oslo.db>=4.8.0,!=4.13.1,!=4.13.2 sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1 -- GitLab From ab0d2a5e014a10dd4655eb18e46a04c83c571b41 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 5 Sep 2016 12:10:26 +0000 Subject: [PATCH 0352/1483] Put CORS middleware at the pipeline beginning This change places the CORS middleware at the beginning of the pipeline otherwise keystonemiddleware will anwser 401 before CORS interprets OPTIONS calls. Change-Id: I968a0e18a67f6ae952bbb49f59f9a37f745b336d --- gnocchi/rest/app.py | 8 ++--- gnocchi/tests/test_rest.py | 62 +++++++++++++++++++++++++++++++++++++- 2 files changed, 65 insertions(+), 5 deletions(-) diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 5b1c13ed..f2f09404 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -107,8 +107,9 @@ def load_app(conf, appname=None, indexer=None, storage=None, APPCONFIGS[configkey] = config LOG.info("WSGI config used: %s" % cfg_path) - return deploy.loadapp("config:" + cfg_path, name=appname, - global_conf={'configkey': configkey}) + app = deploy.loadapp("config:" + cfg_path, name=appname, + global_conf={'configkey': configkey}) + return cors.CORS(app, conf=conf) def _setup_app(root, conf, indexer, storage, not_implemented_middleware): @@ -129,8 +130,7 @@ def _setup_app(root, conf, indexer, storage, not_implemented_middleware): def app_factory(global_config, **local_conf): global APPCONFIGS appconfig = APPCONFIGS.get(global_config.get('configkey')) - app = _setup_app(root=local_conf.get('root'), **appconfig) - return cors.CORS(app, conf=appconfig['conf']) + return _setup_app(root=local_conf.get('root'), **appconfig) def build_wsgi_app(): diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 02239097..d9ac6037 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -24,6 +24,8 @@ import uuid from keystonemiddleware import fixture as ksm_fixture import mock +from oslo_config import cfg +from oslo_middleware import cors from oslo_utils import timeutils import six from stevedore import extension @@ -53,6 +55,8 @@ class TestingApp(webtest.TestApp): USER_ID_2 = str(uuid.uuid4()) PROJECT_ID_2 = str(uuid.uuid4()) + INVALID_TOKEN = str(uuid.uuid4()) + def __init__(self, *args, **kwargs): self.auth = kwargs.pop('auth') self.storage = kwargs.pop('storage') @@ -83,8 +87,30 @@ class TestingApp(webtest.TestApp): finally: self.token = old_token + @contextlib.contextmanager + def use_invalid_token(self): + if not self.auth: + raise testcase.TestSkipped("No auth enabled") + old_token = self.token + self.token = self.INVALID_TOKEN + try: + yield + finally: + self.token = old_token + + @contextlib.contextmanager + def use_no_token(self): + # We don't skip for no self.auth to ensure + # some test returns the same thing with auth or not + old_token = self.token + self.token = None + try: + yield + finally: + self.token = old_token + def do_request(self, req, *args, **kwargs): - if self.auth: + if self.auth and self.token is not None: req.headers['X-Auth-Token'] = self.token response = super(TestingApp, self).do_request(req, *args, **kwargs) metrics = self.storage.list_metric_with_measures_to_process( @@ -106,6 +132,13 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): self.path_get('etc/gnocchi/api-paste.ini'), group="api") + # NOTE(sileht): This is not concurrency safe, but only this tests file + # deal with cors, so we are fine. set_override don't work because + # cors group doesn't yet exists, and we the CORS middleware is created + # it register the option and directly copy value of all configurations + # options making impossible to override them properly... + cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com") + self.auth_token_fixture = self.useFixture( ksm_fixture.AuthTokenFixture()) self.auth_token_fixture.add_token_data( @@ -147,6 +180,33 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): class RootTest(RestTest): + + def _do_test_cors(self): + resp = self.app.options( + "/v1/status", + headers={'Origin': 'http://notallowed.com', + 'Access-Control-Request-Method': 'GET'}, + status=200) + headers = dict(resp.headers) + self.assertNotIn("Access-Control-Allow-Origin", headers) + self.assertNotIn("Access-Control-Allow-Methods", headers) + resp = self.app.options( + "/v1/status", + headers={'origin': 'http://foobar.com', + 'Access-Control-Request-Method': 'GET'}, + status=200) + headers = dict(resp.headers) + self.assertIn("Access-Control-Allow-Origin", headers) + self.assertIn("Access-Control-Allow-Methods", headers) + + def test_cors_invalid_token(self): + with self.app.use_invalid_token(): + self._do_test_cors() + + def test_cors_no_token(self): + with self.app.use_no_token(): + self._do_test_cors() + def test_deserialize_force_json(self): with self.app.use_admin_user(): self.app.post( -- GitLab From 0ca090c0775200e96325022795de3024a874700c Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 8 Sep 2016 15:48:02 +0200 Subject: [PATCH 0353/1483] search: Fix in operator The in operator was not working, this fixes it. Co-Authored-By: shengping zhang Change-Id: I6f20aac492b0ec1ce9f74c205f81bad5b6493d5c --- gnocchi/indexer/sqlalchemy.py | 6 ++++- gnocchi/tests/gabbi/gabbits/search.yaml | 34 +++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 17a0e339..5f8c431d 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1080,7 +1080,11 @@ class QueryTransformer(object): if converter: try: - value = converter(value) + if isinstance(value, list): + # we got a list for in_ operator + value = [converter(v) for v in value] + else: + value = converter(value) except Exception: raise indexer.QueryValueError(value, field_name) diff --git a/gnocchi/tests/gabbi/gabbits/search.yaml b/gnocchi/tests/gabbi/gabbits/search.yaml index f0d7abd7..f13621c8 100644 --- a/gnocchi/tests/gabbi/gabbits/search.yaml +++ b/gnocchi/tests/gabbi/gabbits/search.yaml @@ -22,3 +22,37 @@ tests: data: =: id: "cd9eef" + + - name: post generic resource + POST: /v1/resource/generic + request_headers: + content-type: application/json + data: + id: faef212f-0bf4-4030-a461-2186fef79be0 + started_at: "2014-01-03T02:02:02.000000" + user_id: 0fbb231484614b1a80131fc22f6afc9c + project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + status: 201 + + - name: post generic resource twice + POST: /v1/resource/generic + request_headers: + content-type: application/json + data: + id: df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e + started_at: "2014-01-03T02:02:02.000000" + user_id: 0fbb231484614b1a80131fc22f6afc9c + project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + status: 201 + + - name: search in_ + POST: /v1/search/resource/generic + request_headers: + content-type: application/json + data: + in: + id: + - faef212f-0bf4-4030-a461-2186fef79be0 + - df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e + response_json_paths: + $.`len`: 2 -- GitLab From 187304c51131aec387ec3fcad0854af1acbbfdbb Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 19 Aug 2016 12:02:00 +0200 Subject: [PATCH 0354/1483] carbonara: write full data when the split is going read-only This change the driver behavior to write an entire split once it becomes in read-only mode, meaning the back window can't touch it anymore. Change-Id: I514cebc7717d9dc5a4902895433014dd20cda23e --- gnocchi/carbonara.py | 126 +++++++++++++++++------- gnocchi/storage/_carbonara.py | 96 +++++++++++++----- gnocchi/storage/ceph.py | 12 +-- gnocchi/storage/file.py | 2 +- gnocchi/storage/swift.py | 2 +- gnocchi/tests/storage/test_carbonara.py | 42 ++++++-- gnocchi/tests/test_carbonara.py | 38 +++---- gnocchi/tests/test_storage.py | 4 +- 8 files changed, 223 insertions(+), 99 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 26876cb4..4279031a 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -77,6 +77,11 @@ class UnknownAggregationMethod(Exception): "Unknown aggregation method `%s'" % agg) +def round_timestamp(ts, freq): + return pandas.Timestamp( + (pandas.Timestamp(ts).value // freq) * freq) + + class TimeSerie(object): """A representation of series of a timestamp with a value. @@ -150,11 +155,6 @@ class TimeSerie(object): if value: return value.nanos / 10e8 - @staticmethod - def round_timestamp(ts, freq): - return pandas.Timestamp( - (pandas.Timestamp(ts).value // freq) * freq) - @staticmethod def _to_offset(value): if isinstance(value, numbers.Real): @@ -265,8 +265,8 @@ class BoundTimeSerie(TimeSerie): def first_block_timestamp(self): """Return the timestamp of the first block.""" - rounded = self.round_timestamp(self.ts.index[-1], - self.block_size.delta.value) + rounded = round_timestamp(self.ts.index[-1], + self.block_size.delta.value) return rounded - (self.block_size * self.back_window) @@ -279,11 +279,66 @@ class BoundTimeSerie(TimeSerie): self.ts = self.ts[self.first_block_timestamp():] +class SplitKey(pandas.Timestamp): + """A class representing a split key. + + A split key is basically a timestamp that can be used to split + `AggregatedTimeSerie` objects in multiple parts. Each part will contain + `SplitKey.POINTS_PER_SPLIT` points. The split key for a given granularity + are regularly spaced. + """ + + POINTS_PER_SPLIT = 3600 + + @classmethod + def _init(cls, value, sampling): + # NOTE(jd) This should be __init__ but it does not work, because of… + # Pandas, Cython, whatever. + self = cls(value) + self._carbonara_sampling = sampling + return self + + @classmethod + def from_timestamp_and_sampling(cls, timestamp, sampling): + return cls._init( + round_timestamp( + timestamp, freq=sampling * cls.POINTS_PER_SPLIT * 10e8), + sampling) + + def __next__(self): + """Get the split key of the next split. + + :return: A `SplitKey` object. + """ + return self._init( + self + datetime.timedelta( + seconds=(self.POINTS_PER_SPLIT * self._carbonara_sampling)), + self._carbonara_sampling) + + next = __next__ + + def __iter__(self): + return self + + def __str__(self): + return str(float(self)) + + def __float__(self): + ts = self.to_datetime() + if ts.tzinfo is None: + ts = ts.replace(tzinfo=iso8601.iso8601.UTC) + return utils.datetime_to_unix(ts) + + def __repr__(self): + return "<%s: %s / %fs>" % (self.__class__.__name__, + pandas.Timestamp.__repr__(self), + self._carbonara_sampling) + + class AggregatedTimeSerie(TimeSerie): _AGG_METHOD_PCT_RE = re.compile(r"([1-9][0-9]?)pct") - POINTS_PER_SPLIT = 3600 SERIAL_LEN = 9 def __init__(self, sampling, aggregation_method, @@ -320,28 +375,11 @@ class AggregatedTimeSerie(TimeSerie): ts=pandas.Series(values, timestamps), max_size=max_size) - @classmethod - def get_split_key_datetime(cls, timestamp, sampling): - return cls.round_timestamp( - timestamp, freq=sampling * cls.POINTS_PER_SPLIT * 10e8) - - @staticmethod - def _split_key_to_string(timestamp): - ts = timestamp.to_datetime() - if ts.tzinfo is None: - ts = ts.replace(tzinfo=iso8601.iso8601.UTC) - return str(utils.datetime_to_unix(ts)) - - @classmethod - def get_split_key(cls, timestamp, sampling): - return cls._split_key_to_string( - cls.get_split_key_datetime(timestamp, sampling)) - def split(self): groupby = self.ts.groupby(functools.partial( - self.get_split_key_datetime, sampling=self.sampling)) + SplitKey.from_timestamp_and_sampling, sampling=self.sampling)) for group, ts in groupby: - yield (self._split_key_to_string(group), + yield (SplitKey._init(group, self.sampling), AggregatedTimeSerie(self.sampling, self.aggregation_method, ts)) @@ -387,7 +425,16 @@ class AggregatedTimeSerie(TimeSerie): y = pandas.to_datetime(y, unit='s') return cls.from_data(sampling, agg_method, y, x) + def get_split_key(self): + return SplitKey.from_timestamp_and_sampling( + self.first, self.sampling) + def serialize(self, start=None, padded=True): + """Serialize an aggregated timeserie. + + :param start: timestamp to start serialization + :param padded: pad the beginning of the serialization with zeroes + """ # NOTE(gordc): this binary serializes series based on the split time. # the format is 1B True/False flag which denotes whether subsequent 8B # is a real float or zero padding. every 9B represents one second from @@ -397,9 +444,13 @@ class AggregatedTimeSerie(TimeSerie): if not self.ts.index.is_monotonic: self.ts = self.ts.sort_index() offset_div = self.sampling * 10e8 - start = ((float(start) * 10e8 if start else - float(self.get_split_key(self.first, self.sampling)) * 10e8) - if padded else self.first.value) + if padded: + if start is None: + start = pandas.Timestamp(start).value + else: + start = self.get_split_key().value + else: + start = self.first.value # calculate how many seconds from start the series runs until and # initialize list to store alternating delimiter, float entries e_offset = int((self.last.value - start) // (self.sampling * 10e8)) + 1 @@ -411,9 +462,9 @@ class AggregatedTimeSerie(TimeSerie): serial[loc * 2 + 1] = float(v) return struct.pack('<' + '?d' * e_offset, *serial) - def offset_from_split(self): - split = float(self.get_split_key(self.first, self.sampling)) * 10e8 - return int((self.first.value - split) // (self.sampling * 10e8) + def offset_from_timestamp(self, timestamp): + return int((self.first.value - pandas.Timestamp(timestamp).value) + // (self.sampling * 10e8) * self.SERIAL_LEN) def _truncate(self, quick=False): @@ -427,7 +478,7 @@ class AggregatedTimeSerie(TimeSerie): # Group by the sampling, and then apply the aggregation method on # the points after `after' groupedby = self.ts[after:].groupby( - functools.partial(self.round_timestamp, + functools.partial(round_timestamp, freq=self.sampling * 10e8)) agg_func = getattr(groupedby, self.aggregation_method_func_name) if self.aggregation_method_func_name == 'quantile': @@ -449,7 +500,7 @@ class AggregatedTimeSerie(TimeSerie): if from_timestamp is None: from_ = None else: - from_ = self.round_timestamp(from_timestamp, self.sampling * 10e8) + from_ = round_timestamp(from_timestamp, self.sampling * 10e8) points = self[from_:to_timestamp] try: # Do not include stop timestamp @@ -493,7 +544,7 @@ class AggregatedTimeSerie(TimeSerie): @classmethod def benchmark(cls): """Run a speed benchmark!""" - points = cls.POINTS_PER_SPLIT + points = SplitKey.POINTS_PER_SPLIT sampling = 5 compress_times = 50 @@ -523,8 +574,9 @@ class AggregatedTimeSerie(TimeSerie): for i in six.moves.range(points)]) ts = cls(ts=pts, sampling=sampling, aggregation_method='mean') t0 = time.time() + key = ts.get_split_key() for i in six.moves.range(compress_times): - s = ts.serialize() + s = ts.serialize(key) t1 = time.time() print(title) print(" Bytes per point: %.2f" % (len(s) / float(points))) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 45830464..8d642b00 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -98,6 +98,12 @@ class CarbonaraBasedStorage(storage.StorageDriver): raise NotImplementedError def _get_unaggregated_timeserie_and_unserialize(self, metric): + """Retrieve unaggregated timeserie for a metric and unserialize it. + + Returns a gnocchi.carbonara.BoundTimeSerie object. If the data cannot + be retrieved, returns None. + + """ with timeutils.StopWatch() as sw: raw_measures = ( self._get_unaggregated_timeserie( @@ -121,7 +127,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): @staticmethod def _store_metric_measures(metric, timestamp_key, aggregation, - granularity, data, offset=0, version=3): + granularity, data, offset=None, version=3): raise NotImplementedError @staticmethod @@ -195,12 +201,14 @@ class CarbonaraBasedStorage(storage.StorageDriver): raise storage.GranularityDoesNotExist(metric, granularity) if from_timestamp: - from_timestamp = carbonara.AggregatedTimeSerie.get_split_key( - from_timestamp, granularity) + from_timestamp = str( + carbonara.SplitKey.from_timestamp_and_sampling( + from_timestamp, granularity)) if to_timestamp: - to_timestamp = carbonara.AggregatedTimeSerie.get_split_key( - to_timestamp, granularity) + to_timestamp = str( + carbonara.SplitKey.from_timestamp_and_sampling( + to_timestamp, granularity)) timeseries = filter( lambda x: x is not None, @@ -218,21 +226,44 @@ class CarbonaraBasedStorage(storage.StorageDriver): timeseries=timeseries, max_size=points) - def _get_measures_to_update(self, metric, agg, apolicy, timeserie): - return self._get_measures_timeserie(metric, agg, apolicy.granularity, - timeserie.first, timeserie.last) + def _store_timeserie_split(self, metric, key, split, + aggregation, archive_policy_def, + oldest_mutable_timestamp): + # NOTE(jd) We write the full split only if the driver works that way + # (self.WRITE_FULL) or if the oldest_mutable_timestamp is out of range. + write_full = self.WRITE_FULL or oldest_mutable_timestamp >= next(key) + key_as_str = str(key) + if write_full: + offset = None + try: + existing = self._get_measures_and_unserialize( + metric, key_as_str, aggregation, + archive_policy_def.granularity) + except storage.AggregationDoesNotExist: + pass + else: + if existing is not None: + # FIXME(jd) not update but rather concat ts + split + existing.update(split) + split = existing + else: + offset = split.offset_from_timestamp(key) + return self._store_metric_measures( + metric, key_as_str, aggregation, archive_policy_def.granularity, + split.serialize(key, write_full), offset=offset) def _add_measures(self, aggregation, archive_policy_def, - metric, timeserie): - ts = self._get_measures_to_update(metric, aggregation, - archive_policy_def, timeserie) + metric, timeserie, + oldest_mutable_timestamp): + ts = carbonara.AggregatedTimeSerie( + archive_policy_def.granularity, + aggregation, + max_size=archive_policy_def.points) ts.update(timeserie) for key, split in ts.split(): - self._store_metric_measures(metric, key, aggregation, - archive_policy_def.granularity, - split.serialize(key, self.WRITE_FULL), - offset=(0 if self.WRITE_FULL else - split.offset_from_split())) + self._store_timeserie_split( + metric, key, split, aggregation, archive_policy_def, + oldest_mutable_timestamp) if ts.last and archive_policy_def.timespan: oldest_point_to_keep = ts.last - datetime.timedelta( @@ -270,8 +301,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): def _delete_metric_measures_before(self, metric, aggregation_method, granularity, timestamp): """Delete measures for a metric before a timestamp.""" - ts = carbonara.AggregatedTimeSerie.get_split_key( - timestamp, granularity) + ts = str(carbonara.SplitKey.from_timestamp_and_sampling( + timestamp, granularity)) for key in self._list_split_keys_for_metric( metric, aggregation_method, granularity): # NOTE(jd) Only delete if the key is strictly inferior to @@ -327,12 +358,26 @@ class CarbonaraBasedStorage(storage.StorageDriver): sampling=d.granularity, aggregation_method=agg_method, timeseries=timeseries, max_size=d.points) + try: + unaggregated = self._get_unaggregated_timeserie_and_unserialize( # noqa + metric) + except (storage.MetricDoesNotExist, CorruptionError) as e: + # NOTE(jd) This case is not really possible – you can't + # have archives with splits and no unaggregated + # timeserie… + LOG.error( + "Unable to find unaggregated timeserie for " + "metric %s, unable to upgrade data: %s", + metric.id, e) + break + oldest_mutable_timestamp = ( + unaggregated.first_block_timestamp() + ) for key, split in ts.split(): - self._store_metric_measures( - metric, key, ts.aggregation_method, - ts.sampling, split.serialize(key, self.WRITE_FULL), - offset=(0 if self.WRITE_FULL else - split.offset_from_split())) + self._store_timeserie_split( + metric, key, split, + ts.aggregation_method, + d, oldest_mutable_timestamp) for key in all_keys: self._delete_metric_measures( metric, key, agg_method, @@ -423,8 +468,9 @@ class CarbonaraBasedStorage(storage.StorageDriver): self._add_measures, ((aggregation, d, metric, carbonara.TimeSerie(bound_timeserie.ts[ - carbonara.TimeSerie.round_timestamp( - tstamp, d.granularity * 10e8):])) + carbonara.round_timestamp( + tstamp, d.granularity * 10e8):]), + bound_timeserie.first_block_timestamp()) for aggregation in agg_methods for d in metric.archive_policy.definition)) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 8aadea90..e234c194 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -24,7 +24,6 @@ from oslo_config import cfg from oslo_log import log from oslo_utils import importutils -from gnocchi import carbonara from gnocchi import storage from gnocchi.storage import _carbonara @@ -254,10 +253,13 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): self.ioctx.write_full(name, "metric created") def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data, offset=0, version=3): + granularity, data, offset=None, version=3): name = self._get_object_name(metric, timestamp_key, aggregation, granularity, version) - self.ioctx.write(name, data, offset=offset) + if offset is None: + self.ioctx.write_full(name, data) + else: + self.ioctx.write(name, data, offset=offset) self.ioctx.set_xattr("gnocchi_%s_container" % metric.id, name, "") def _delete_metric_measures(self, metric, timestamp_key, aggregation, @@ -290,10 +292,6 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): else: raise storage.MetricDoesNotExist(metric) - def _get_measures_to_update(self, metric, agg, apolicy, timeserie): - return carbonara.AggregatedTimeSerie( - apolicy.granularity, agg, max_size=apolicy.points) - def _list_split_keys_for_metric(self, metric, aggregation, granularity, version=None): try: diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 45b01c1b..7b5bb53b 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -235,7 +235,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): metric, aggregation, timestamp_key, granularity, version)) def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data, offset=0, version=3): + granularity, data, offset=None, version=3): self._atomic_file_store( self._build_metric_path_for_split(metric, aggregation, timestamp_key, granularity, diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 48d4633e..3c4d1004 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -190,7 +190,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): self._bulk_delete(self.MEASURE_PREFIX, files) def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data, offset=0, version=3): + granularity, data, offset=None, version=3): self.swift.put_object( self._container_name(metric), self._object_name(timestamp_key, aggregation, granularity, diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py index aa375277..afd7ed46 100644 --- a/gnocchi/tests/storage/test_carbonara.py +++ b/gnocchi/tests/storage/test_carbonara.py @@ -28,7 +28,7 @@ from gnocchi.tests import base as tests_base from gnocchi import utils -def _serialize_v2(self): +def _serialize_v2(self, key): d = {'values': dict((timestamp.value, float(v)) for timestamp, v in six.iteritems(self.ts.dropna()))} @@ -49,10 +49,22 @@ class TestCarbonaraMigration(tests_base.TestCase): # serialise in old format with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.serialize', autospec=True) as f: - with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.' + with mock.patch('gnocchi.carbonara.SplitKey.' 'POINTS_PER_SPLIT', 14400): f.side_effect = _serialize_v2 + # NOTE(jd) This is just to have an unaggregated timserie for + # the upgrade code, I don't think the values are correct lol + ts = carbonara.BoundTimeSerie( + block_size=self.metric.archive_policy.max_block_size, + back_window=self.metric.archive_policy.back_window) + ts.set_values([ + storage.Measure( + datetime.datetime(2016, 7, 17, 23, 59, 0), 23), + ]) + self.storage._store_unaggregated_timeserie(self.metric, + ts.serialize()) + for d, agg in itertools.product( self.metric.archive_policy.definition, ['mean', 'max']): @@ -72,8 +84,10 @@ class TestCarbonaraMigration(tests_base.TestCase): for key, split in ts.split(): self.storage._store_metric_measures( - self.metric, key, agg, d.granularity, - split.serialize(), offset=0, version=None) + self.metric, + str(key), + agg, d.granularity, + split.serialize(key), offset=0, version=None) def upgrade(self): with mock.patch.object(self.index, 'list_metrics') as f: @@ -166,10 +180,22 @@ class TestCarbonaraMigration(tests_base.TestCase): # serialise in old format with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.serialize', autospec=True) as f: - with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.' - 'POINTS_PER_SPLIT', 14400): + with mock.patch('gnocchi.carbonara.SplitKey.POINTS_PER_SPLIT', + 14400): f.side_effect = _serialize_v2 + # NOTE(jd) This is just to have an unaggregated timserie for + # the upgrade code, I don't think the values are correct lol + ts = carbonara.BoundTimeSerie( + block_size=self.metric2.archive_policy.max_block_size, + back_window=self.metric2.archive_policy.back_window) + ts.set_values([ + storage.Measure( + datetime.datetime(2016, 7, 17, 23, 59, 0), 23), + ]) + self.storage._store_unaggregated_timeserie(self.metric2, + ts.serialize()) + for d, agg in itertools.product( self.metric2.archive_policy.definition, ['mean', 'max']): @@ -189,8 +215,8 @@ class TestCarbonaraMigration(tests_base.TestCase): for key, split in ts.split(): self.storage._store_metric_measures( - self.metric2, key, agg, d.granularity, - split.serialize(), offset=0, version=None) + self.metric2, str(key), agg, d.granularity, + split.serialize(key), offset=0, version=None) with mock.patch.object( self.storage, '_get_measures_and_unserialize', diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 9a4fc0dc..9ea38dae 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -172,8 +172,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual(5.48, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) # Serialize and unserialize + key = ts.get_split_key() ts = carbonara.AggregatedTimeSerie.unserialize( - ts.serialize(), ts.get_split_key(ts.first, 60), '74pct', 60) + ts.serialize(key), key, '74pct', 60) ts.update(carbonara.TimeSerie.from_tuples( [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), @@ -611,9 +612,10 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 2, 12, 532), 1), ], before_truncate_callback=ts.update) + key = ts.get_split_key() self.assertEqual(ts, carbonara.AggregatedTimeSerie.unserialize( - ts.serialize(), ts.get_split_key(ts.first, 0.5), + ts.serialize(key), key, 'mean', 0.5)) def test_no_truncation(self): @@ -881,25 +883,25 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ], output) def test_split_key(self): - self.assertEqual( - "1420146000.0", - carbonara.AggregatedTimeSerie.get_split_key( - datetime.datetime(2015, 1, 1, 23, 34), 5)) - self.assertEqual( - "1420110000.0", - carbonara.AggregatedTimeSerie.get_split_key( - datetime.datetime(2015, 1, 1, 15, 3), 5)) - - def test_split_key_datetime(self): self.assertEqual( datetime.datetime(2014, 10, 7), - carbonara.AggregatedTimeSerie.get_split_key_datetime( + carbonara.SplitKey.from_timestamp_and_sampling( datetime.datetime(2015, 1, 1, 15, 3), 3600)) self.assertEqual( datetime.datetime(2014, 12, 31, 18), - carbonara.AggregatedTimeSerie.get_split_key_datetime( + carbonara.SplitKey.from_timestamp_and_sampling( datetime.datetime(2015, 1, 1, 15, 3), 58)) + def test_split_key_next(self): + self.assertEqual( + datetime.datetime(2015, 3, 6), + next(carbonara.SplitKey.from_timestamp_and_sampling( + datetime.datetime(2015, 1, 1, 15, 3), 3600))) + self.assertEqual( + datetime.datetime(2015, 8, 3), + next(next(carbonara.SplitKey.from_timestamp_and_sampling( + datetime.datetime(2015, 1, 1, 15, 3), 3600)))) + def test_split(self): sampling = 5 points = 100000 @@ -915,14 +917,14 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual( math.ceil((points / float(sampling)) - / carbonara.AggregatedTimeSerie.POINTS_PER_SPLIT), + / carbonara.SplitKey.POINTS_PER_SPLIT), len(grouped_points)) self.assertEqual("0.0", - grouped_points[0][0]) + str(carbonara.SplitKey(grouped_points[0][0]))) # 3600 × 5s = 5 hours - self.assertEqual("18000.0", + self.assertEqual(datetime.datetime(1970, 1, 1, 5), grouped_points[1][0]) - self.assertEqual(carbonara.AggregatedTimeSerie.POINTS_PER_SPLIT, + self.assertEqual(carbonara.SplitKey.POINTS_PER_SPLIT, len(grouped_points[0][1])) def test_from_timeseries(self): diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index cc439cf4..36512cb7 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -141,7 +141,7 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual(3661, len(self.storage.get_measures(m))) - @mock.patch('gnocchi.carbonara.AggregatedTimeSerie.POINTS_PER_SPLIT', 48) + @mock.patch('gnocchi.carbonara.SplitKey.POINTS_PER_SPLIT', 48) def test_add_measures_update_subset_split(self): m, m_sql = self._create_metric('medium') measures = [ @@ -181,7 +181,7 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing([str(m.id)]) for __, args, __ in c.mock_calls: self.assertEqual( - args[3].first, carbonara.TimeSerie.round_timestamp( + args[3].first, carbonara.round_timestamp( new_point, args[1].granularity * 10e8)) def test_delete_old_measures(self): -- GitLab From 452a23864cc100b2683cc82b79892f919234ead3 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 29 Aug 2016 18:04:47 +0200 Subject: [PATCH 0355/1483] storage: re-store/write read-only splits Change-Id: I24b6c25999e7b6854ba79bf6972a6c4dea5ff0fc --- gnocchi/carbonara.py | 16 +++++- gnocchi/storage/_carbonara.py | 93 +++++++++++++++++++++++++---------- gnocchi/tests/test_storage.py | 47 ++++++++++++++++++ 3 files changed, 129 insertions(+), 27 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 4279031a..e8684cfb 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -305,6 +305,10 @@ class SplitKey(pandas.Timestamp): timestamp, freq=sampling * cls.POINTS_PER_SPLIT * 10e8), sampling) + @classmethod + def from_key_string(cls, keystr, sampling): + return cls._init(float(keystr) * 10e8, sampling) + def __next__(self): """Get the split key of the next split. @@ -425,9 +429,17 @@ class AggregatedTimeSerie(TimeSerie): y = pandas.to_datetime(y, unit='s') return cls.from_data(sampling, agg_method, y, x) - def get_split_key(self): + def get_split_key(self, timestamp=None): + """Return the split key for a particular timestamp. + + :param timestamp: If None, the first timestamp of the timeserie + is used. + :return: A SplitKey object. + """ + if timestamp is None: + timestamp = self.first return SplitKey.from_timestamp_and_sampling( - self.first, self.sampling) + timestamp, self.sampling) def serialize(self, start=None, padded=True): """Serialize an aggregated timeserie. diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 8d642b00..589bbe53 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -243,8 +243,9 @@ class CarbonaraBasedStorage(storage.StorageDriver): pass else: if existing is not None: - # FIXME(jd) not update but rather concat ts + split - existing.update(split) + if split is not None: + # FIXME(jd) not update but rather concat ts + split + existing.update(split) split = existing else: offset = split.offset_from_timestamp(key) @@ -254,23 +255,74 @@ class CarbonaraBasedStorage(storage.StorageDriver): def _add_measures(self, aggregation, archive_policy_def, metric, timeserie, + previous_oldest_mutable_timestamp, oldest_mutable_timestamp): ts = carbonara.AggregatedTimeSerie( archive_policy_def.granularity, aggregation, max_size=archive_policy_def.points) ts.update(timeserie) - for key, split in ts.split(): - self._store_timeserie_split( - metric, key, split, aggregation, archive_policy_def, - oldest_mutable_timestamp) - if ts.last and archive_policy_def.timespan: + # Don't do anything if the timeserie is empty + if not ts: + return + + # We only need to check for rewrite if driver is not in WRITE_FULL mode + # and if we already stored splits once + need_rewrite = ( + not self.WRITE_FULL + and previous_oldest_mutable_timestamp is not None + ) + + if archive_policy_def.timespan or need_rewrite: + existing_keys = self._list_split_keys_for_metric( + metric, aggregation, archive_policy_def.granularity) + + # First delete old splits + if archive_policy_def.timespan: oldest_point_to_keep = ts.last - datetime.timedelta( seconds=archive_policy_def.timespan) - self._delete_metric_measures_before( - metric, aggregation, archive_policy_def.granularity, - oldest_point_to_keep) + oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) + oldest_key_to_keep_s = str(oldest_key_to_keep) + for key in list(existing_keys): + # NOTE(jd) Only delete if the key is strictly inferior to + # the timestamp; we don't delete any timeserie split that + # contains our timestamp, so we prefer to keep a bit more + # than deleting too much + if key < oldest_key_to_keep_s: + self._delete_metric_measures( + metric, key, aggregation, + archive_policy_def.granularity) + existing_keys.remove(key) + else: + oldest_key_to_keep = carbonara.SplitKey(0) + + # Rewrite all read-only splits just for fun (and compression). This + # only happens if `previous_oldest_mutable_timestamp' exists, which + # means we already wrote some splits at some point – so this is not the + # first time we treat this timeserie. + if need_rewrite: + previous_oldest_mutable_key = str(ts.get_split_key( + previous_oldest_mutable_timestamp)) + oldest_mutable_key = str(ts.get_split_key( + oldest_mutable_timestamp)) + + if previous_oldest_mutable_key != oldest_mutable_key: + for key in existing_keys: + if previous_oldest_mutable_key <= key < oldest_mutable_key: + # NOTE(jd) Rewrite it entirely for fun (and later for + # compression). For that, we just pass None as split. + self._store_timeserie_split( + metric, carbonara.SplitKey.from_key_string( + key, archive_policy_def.granularity), + None, aggregation, archive_policy_def, + oldest_mutable_timestamp) + + for key, split in ts.split(): + if key >= oldest_key_to_keep: + self._store_timeserie_split( + metric, key, split, aggregation, archive_policy_def, + oldest_mutable_timestamp) def add_measures(self, metric, measures): self._store_new_measures(metric, msgpackutils.dumps( @@ -298,21 +350,6 @@ class CarbonaraBasedStorage(storage.StorageDriver): # here too self._delete_metric(metric) - def _delete_metric_measures_before(self, metric, aggregation_method, - granularity, timestamp): - """Delete measures for a metric before a timestamp.""" - ts = str(carbonara.SplitKey.from_timestamp_and_sampling( - timestamp, granularity)) - for key in self._list_split_keys_for_metric( - metric, aggregation_method, granularity): - # NOTE(jd) Only delete if the key is strictly inferior to - # the timestamp; we don't delete any timeserie split that - # contains our timestamp, so we prefer to keep a bit more - # than deleting too much - if key < ts: - self._delete_metric_measures( - metric, key, aggregation_method, granularity) - @staticmethod def _delete_metric_measures(metric, timestamp_key, aggregation, granularity, version=3): @@ -451,6 +488,11 @@ class CarbonaraBasedStorage(storage.StorageDriver): ts = carbonara.BoundTimeSerie( block_size=mbs, back_window=metric.archive_policy.back_window) + current_first_block_timestamp = None + else: + current_first_block_timestamp = ( + ts.first_block_timestamp() + ) # NOTE(jd) This is Python where you need such # hack to pass a variable around a closure, @@ -470,6 +512,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): carbonara.TimeSerie(bound_timeserie.ts[ carbonara.round_timestamp( tstamp, d.granularity * 10e8):]), + current_first_block_timestamp, bound_timeserie.first_block_timestamp()) for aggregation in agg_methods for d in metric.archive_policy.definition)) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 36512cb7..6553df37 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -224,6 +224,53 @@ class TestStorageDriver(tests_base.TestCase): self.storage._list_split_keys_for_metric( self.metric, "mean", 300.0)) + def test_rewrite_measures(self): + self.metric, metric_sql = self._create_metric("high") + + # First store some points scattered across different splits + self.storage.add_measures(self.metric, [ + storage.Measure(datetime.datetime(2016, 1, 1, 12, 0, 1), 69), + storage.Measure(datetime.datetime(2016, 1, 2, 13, 7, 31), 42), + storage.Measure(datetime.datetime(2016, 1, 4, 14, 9, 31), 4), + storage.Measure(datetime.datetime(2016, 1, 6, 15, 12, 45), 44), + ]) + self.trigger_processing() + + self.assertEqual({'1451520000.0', '1451736000.0', '1451952000.0'}, + self.storage._list_split_keys_for_metric( + self.metric, "mean", 60.0)) + + self.assertEqual([ + (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), + ], self.storage.get_measures(self.metric, granularity=60.0)) + + # Now store brand new points that should force a rewrite of one of the + # split (keep in mind the back window size in one hour here). We move + # the BoundTimeSerie processing timeserie to be between + # "2016-01-07 16:12:45" and "2016-01-07 17:12:45". + self.storage.add_measures(self.metric, [ + storage.Measure(datetime.datetime(2016, 1, 7, 16, 18, 45), 45), + storage.Measure(datetime.datetime(2016, 1, 7, 17, 12, 45), 46), + ]) + self.trigger_processing() + + self.assertEqual({'1452168000.0', '1451736000.0', + '1451520000.0', '1451952000.0'}, + self.storage._list_split_keys_for_metric( + self.metric, "mean", 60.0)) + + self.assertEqual([ + (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), + (utils.datetime_utc(2016, 1, 7, 16, 18), 60.0, 45), + (utils.datetime_utc(2016, 1, 7, 17, 12), 60.0, 46), + ], self.storage.get_measures(self.metric, granularity=60.0)) + def test_updated_measures(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), -- GitLab From 1a425863e9e370c0d6307e1b078c6f8647d88940 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 19 Aug 2016 19:17:10 +0200 Subject: [PATCH 0356/1483] carbonara: use calcsize rather than hard coded value Change-Id: I166b8ea3753359fb457f91ada53aed466db97f11 --- gnocchi/carbonara.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index e8684cfb..215d74bf 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -343,7 +343,7 @@ class AggregatedTimeSerie(TimeSerie): _AGG_METHOD_PCT_RE = re.compile(r"([1-9][0-9]?)pct") - SERIAL_LEN = 9 + SERIAL_LEN = struct.calcsize(" Date: Fri, 19 Aug 2016 20:41:47 +0200 Subject: [PATCH 0357/1483] carbonara: compress non padded timeseries This creates 2 distinct formats available when serializing AggregatedTimeSerie: once is "padded" so it can be written based on offset (Ceph) and the other one is "compressed" so it can be written to object stores (Swift, file). This compressed format is also used to store timeseries splits that are out of reach of the back window. Change-Id: I6f5431945ad9873d280eeb0f6b4907ade493406b --- doc/source/architecture.rst | 24 +-- gnocchi/carbonara.py | 157 +++++++++++++----- gnocchi/storage/_carbonara.py | 16 +- gnocchi/storage/file.py | 5 +- gnocchi/storage/swift.py | 5 +- gnocchi/tests/test_carbonara.py | 8 +- gnocchi/tests/test_storage.py | 57 ++++++- .../storage-engine-v3-b34bd0723abf292f.yaml | 13 ++ 8 files changed, 202 insertions(+), 83 deletions(-) create mode 100644 releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index 9f340e05..d7c682f6 100755 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -59,37 +59,29 @@ How to plan for Gnocchi’s storage Gnocchi uses a custom file format based on its library *Carbonara*. In Gnocchi, a time series is a collection of points, where a point is a given measure, or sample, in the lifespan of a time series. The storage format is compressed -using various techniques, therefore the computing of a time series' size can -be estimated based on its worst case scenario with the following formula:: +using various techniques, therefore the computing of a time series' size can be +estimated based on its **worst** case scenario with the following formula:: - number of points × 9 bytes = size in bytes + number of points × 8 bytes = size in bytes The number of points you want to keep is usually determined by the following formula:: - number of points = timespan ÷ granularity + number of points = timespan ÷ granularity For example, if you want to keep a year of data with a one minute resolution:: - number of points = (365 days × 24 hours × 60 minutes) ÷ 1 minute - number of points = 525 600 + number of points = (365 days × 24 hours × 60 minutes) ÷ 1 minute + number of points = 525 600 Then:: - size in bytes = 525 600 × 9 = 4 730 400 bytes = 4 620 KiB + size in bytes = 525 600 × 8 = 4 204 800 bytes = 4 106 KiB This is just for a single aggregated time series. If your archive policy uses the 8 default aggregation methods (mean, min, max, sum, std, median, count, 95pct) with the same "one year, one minute aggregations" resolution, the space -used will go up to a maximum of 8 × 4.5 MiB = 36 MiB. - -.. note:: - - The Ceph driver does not utilize compression as the Swift and File drivers - do in favour of more efficient write support. Therefore, each point is - always 9B in Ceph where as the Swift and File backends may have a smaller - storage footprint but higher I/O requirements. It also requires some - additional formatting which may add to disk size. +used will go up to a maximum of 8 × 4.1 MiB = 32.8 MiB. How to set the archive policy and granularity diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 215d74bf..25833a0a 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -28,6 +28,7 @@ import struct import time import iso8601 +import lz4 import msgpack import pandas import six @@ -343,7 +344,8 @@ class AggregatedTimeSerie(TimeSerie): _AGG_METHOD_PCT_RE = re.compile(r"([1-9][0-9]?)pct") - SERIAL_LEN = struct.calcsize("= next(key) key_as_str = str(key) if write_full: - offset = None try: existing = self._get_measures_and_unserialize( metric, key_as_str, aggregation, @@ -243,15 +242,16 @@ class CarbonaraBasedStorage(storage.StorageDriver): pass else: if existing is not None: - if split is not None: - # FIXME(jd) not update but rather concat ts + split - existing.update(split) - split = existing - else: - offset = split.offset_from_timestamp(key) + if split is None: + split = existing + else: + split.merge(existing) + + offset, data = split.serialize(key, compressed=write_full) + return self._store_metric_measures( metric, key_as_str, aggregation, archive_policy_def.granularity, - split.serialize(key, write_full), offset=offset) + data, offset=offset) def _add_measures(self, aggregation, archive_policy_def, metric, timeserie, diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 7b5bb53b..10ff4c68 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -22,7 +22,6 @@ import shutil import tempfile import uuid -import lz4 from oslo_config import cfg import six @@ -240,7 +239,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): self._build_metric_path_for_split(metric, aggregation, timestamp_key, granularity, version), - lz4.dumps(data)) + data) def _delete_metric(self, metric): path = self._build_metric_dir(metric) @@ -258,7 +257,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): metric, aggregation, timestamp_key, granularity, version) try: with open(path, 'rb') as aggregation_file: - return lz4.loads(aggregation_file.read()) + return aggregation_file.read() except IOError as e: if e.errno == errno.ENOENT: if os.path.exists(self._build_metric_dir(metric)): diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 3c4d1004..ee2bf7a1 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -18,7 +18,6 @@ import contextlib import datetime import uuid -import lz4 from oslo_config import cfg from oslo_log import log import six @@ -195,7 +194,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): self._container_name(metric), self._object_name(timestamp_key, aggregation, granularity, version), - lz4.dumps(data)) + data) def _delete_metric_measures(self, metric, timestamp_key, aggregation, granularity, version=3): @@ -239,7 +238,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): raise raise storage.AggregationDoesNotExist(metric, aggregation) raise - return lz4.loads(contents) + return contents def _list_split_keys_for_metric(self, metric, aggregation, granularity, version=None): diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 9ea38dae..f674d0bd 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -16,6 +16,7 @@ import datetime import math +import fixtures from oslo_utils import timeutils from oslotest import base # TODO(jd) We shouldn't use pandas here @@ -27,6 +28,7 @@ from gnocchi import carbonara class TestBoundTimeSerie(base.BaseTestCase): def test_benchmark(self): + self.useFixture(fixtures.Timeout(120, gentle=True)) carbonara.AggregatedTimeSerie.benchmark() @staticmethod @@ -173,8 +175,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase): # Serialize and unserialize key = ts.get_split_key() + o, s = ts.serialize(key) ts = carbonara.AggregatedTimeSerie.unserialize( - ts.serialize(key), key, '74pct', 60) + s, key, '74pct', ts.sampling) ts.update(carbonara.TimeSerie.from_tuples( [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), @@ -613,9 +616,10 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ], before_truncate_callback=ts.update) key = ts.get_split_key() + o, s = ts.serialize(key) self.assertEqual(ts, carbonara.AggregatedTimeSerie.unserialize( - ts.serialize(key), key, + s, key, 'mean', 0.5)) def test_no_truncation(self): diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 6553df37..f56b412b 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -225,7 +225,15 @@ class TestStorageDriver(tests_base.TestCase): self.metric, "mean", 300.0)) def test_rewrite_measures(self): - self.metric, metric_sql = self._create_metric("high") + # Create an archive policy that spans on several splits. Each split + # being 3600 points, let's go for 36k points so we have 10 splits. + apname = str(uuid.uuid4()) + ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)]) + self.index.create_archive_policy(ap) + self.metric = storage.Metric(uuid.uuid4(), ap) + self.index.create_metric(self.metric.id, str(uuid.uuid4()), + str(uuid.uuid4()), + apname) # First store some points scattered across different splits self.storage.add_measures(self.metric, [ @@ -236,10 +244,27 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() - self.assertEqual({'1451520000.0', '1451736000.0', '1451952000.0'}, + splits = {'1451520000.0', '1451736000.0', '1451952000.0'} + self.assertEqual(splits, self.storage._list_split_keys_for_metric( self.metric, "mean", 60.0)) + if self.storage.WRITE_FULL: + assertCompressedIfWriteFull = self.assertTrue + else: + assertCompressedIfWriteFull = self.assertFalse + + data = self.storage._get_measures( + self.metric, '1451520000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451736000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451952000.0', "mean", 60.0) + assertCompressedIfWriteFull( + carbonara.AggregatedTimeSerie.is_compressed(data)) + self.assertEqual([ (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), @@ -249,26 +274,40 @@ class TestStorageDriver(tests_base.TestCase): # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move - # the BoundTimeSerie processing timeserie to be between - # "2016-01-07 16:12:45" and "2016-01-07 17:12:45". + # the BoundTimeSerie processing timeserie far away from its current + # range. self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2016, 1, 7, 16, 18, 45), 45), - storage.Measure(datetime.datetime(2016, 1, 7, 17, 12, 45), 46), + storage.Measure(datetime.datetime(2016, 1, 10, 16, 18, 45), 45), + storage.Measure(datetime.datetime(2016, 1, 10, 17, 12, 45), 46), ]) self.trigger_processing() - self.assertEqual({'1452168000.0', '1451736000.0', + self.assertEqual({'1452384000.0', '1451736000.0', '1451520000.0', '1451952000.0'}, self.storage._list_split_keys_for_metric( self.metric, "mean", 60.0)) + data = self.storage._get_measures( + self.metric, '1451520000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451736000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451952000.0', "mean", 60.0) + # Now this one is compressed because it has been rewritten! + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1452384000.0', "mean", 60.0) + assertCompressedIfWriteFull( + carbonara.AggregatedTimeSerie.is_compressed(data)) self.assertEqual([ (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), - (utils.datetime_utc(2016, 1, 7, 16, 18), 60.0, 45), - (utils.datetime_utc(2016, 1, 7, 17, 12), 60.0, 46), + (utils.datetime_utc(2016, 1, 10, 16, 18), 60.0, 45), + (utils.datetime_utc(2016, 1, 10, 17, 12), 60.0, 46), ], self.storage.get_measures(self.metric, granularity=60.0)) def test_updated_measures(self): diff --git a/releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml b/releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml new file mode 100644 index 00000000..83aa09df --- /dev/null +++ b/releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml @@ -0,0 +1,13 @@ +--- +features: + - The Carbonara based storage engine has been updated and greatly improved. + It now features fast write for Ceph (no change for file and Swift based + drivers) by using an append method. + It also features on the fly data compression (using LZ4) of the aggregated + time serie, reducing the data space usage by at least 50 %. +upgrade: + - gnocchi-upgrade must be run before running the new version of + gnocchi-metric and the HTTP REST API in order to upgrade from version 2 of + the Carbonara storage engine to version 3. It will read all metrics and + convert them to new version 3 serialization format (compressing the data), + which might take some time. -- GitLab From c6e7751eb3a82dd9a69ef754d07af8f57d44cbff Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sun, 21 Aug 2016 09:46:21 +0200 Subject: [PATCH 0358/1483] carbonara: optimize uncompressed serialization Storing the value in nanoseconds is faster than calling for it each time in Pandas. Also do not do the multiplication twice for the index location. And supposedly they are already float, so that just slows down the serialization. Change-Id: Iaba7f65d8f46770d8b5438cc360f1892a5bcfd3d --- gnocchi/carbonara.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 25833a0a..4e8d4cb7 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -507,15 +507,15 @@ class AggregatedTimeSerie(TimeSerie): # aggregate value is 0. calculate how many seconds from start the # series runs until and initialize list to store alternating # delimiter, float entries + first = self.first.value # NOTE(jd) needed because faster e_offset = int( - (self.last.value - self.first.value) // offset_div) + 1 + (self.last.value - first) // offset_div) + 1 serial = [False] * e_offset * 2 - first = self.first.value # NOTE(jd) needed because faster for i, v in self.ts.iteritems(): # overwrite zero padding with real points and set flag True - loc = int((i.value - first) // offset_div) - serial[loc * 2] = True - serial[loc * 2 + 1] = float(v) + loc = int((i.value - first) // offset_div) * 2 + serial[loc] = True + serial[loc + 1] = v offset = int((first - start) // offset_div) * self.PADDED_SERIAL_LEN return offset, struct.pack('<' + '?d' * e_offset, *serial) -- GitLab From 0b50825ac34d70c4a8503eb1cc09c787d3855d5d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 31 Aug 2016 10:30:50 +0200 Subject: [PATCH 0359/1483] carbonara: Timeserie.aggregate Change-Id: If71ebb78ce0b8fb5818f2d62cd1575384ea54995 Signed-off-by: Julien Danjou --- gnocchi/carbonara.py | 6 ++++++ gnocchi/storage/_carbonara.py | 5 ++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 4e8d4cb7..03bbce29 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -183,6 +183,12 @@ class TimeSerie(object): def serialize(self): return msgpack.dumps(self.to_dict()) + def aggregate(self, granularity, aggregation_method='mean', max_size=None): + ats = AggregatedTimeSerie( + granularity, aggregation_method, max_size=max_size) + ats.update(self) + return ats + class BoundTimeSerie(TimeSerie): def __init__(self, ts=None, block_size=None, back_window=0): diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 9e8a58d3..82b79837 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -257,11 +257,10 @@ class CarbonaraBasedStorage(storage.StorageDriver): metric, timeserie, previous_oldest_mutable_timestamp, oldest_mutable_timestamp): - ts = carbonara.AggregatedTimeSerie( + ts = timeserie.aggregate( archive_policy_def.granularity, aggregation, - max_size=archive_policy_def.points) - ts.update(timeserie) + archive_policy_def.points) # Don't do anything if the timeserie is empty if not ts: -- GitLab From cf413a437ff734325d1beced65d6651f282704c0 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 1 Sep 2016 19:53:00 +0200 Subject: [PATCH 0360/1483] tests/carbonara: use _serialize_v2 without mocking Change-Id: I29f4dfca070337eec4fb144f58cca38d0a5d37ba --- gnocchi/tests/storage/test_carbonara.py | 158 +++++++++++------------- 1 file changed, 75 insertions(+), 83 deletions(-) diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py index afd7ed46..4fd5316d 100644 --- a/gnocchi/tests/storage/test_carbonara.py +++ b/gnocchi/tests/storage/test_carbonara.py @@ -28,10 +28,10 @@ from gnocchi.tests import base as tests_base from gnocchi import utils -def _serialize_v2(self, key): +def _serialize_v2(split): d = {'values': dict((timestamp.value, float(v)) for timestamp, v - in six.iteritems(self.ts.dropna()))} + in six.iteritems(split.ts.dropna()))} return msgpack.dumps(d) @@ -46,48 +46,43 @@ class TestCarbonaraMigration(tests_base.TestCase): self.storage._create_metric(self.metric) - # serialise in old format - with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.serialize', - autospec=True) as f: - with mock.patch('gnocchi.carbonara.SplitKey.' - 'POINTS_PER_SPLIT', 14400): - f.side_effect = _serialize_v2 - - # NOTE(jd) This is just to have an unaggregated timserie for - # the upgrade code, I don't think the values are correct lol - ts = carbonara.BoundTimeSerie( - block_size=self.metric.archive_policy.max_block_size, - back_window=self.metric.archive_policy.back_window) - ts.set_values([ - storage.Measure( - datetime.datetime(2016, 7, 17, 23, 59, 0), 23), - ]) - self.storage._store_unaggregated_timeserie(self.metric, - ts.serialize()) - - for d, agg in itertools.product( - self.metric.archive_policy.definition, - ['mean', 'max']): - ts = carbonara.AggregatedTimeSerie( - sampling=d.granularity, aggregation_method=agg, - max_size=d.points) - - # NOTE: there is a split at 2016-07-18 on granularity 300 - ts.update(carbonara.TimeSerie.from_data( - [datetime.datetime(2016, 7, 17, 23, 59, 0), - datetime.datetime(2016, 7, 17, 23, 59, 4), - datetime.datetime(2016, 7, 17, 23, 59, 9), - datetime.datetime(2016, 7, 18, 0, 0, 0), - datetime.datetime(2016, 7, 18, 0, 0, 4), - datetime.datetime(2016, 7, 18, 0, 0, 9)], - [4, 5, 6, 7, 8, 9])) - - for key, split in ts.split(): - self.storage._store_metric_measures( - self.metric, - str(key), - agg, d.granularity, - split.serialize(key), offset=0, version=None) + with mock.patch('gnocchi.carbonara.SplitKey.' + 'POINTS_PER_SPLIT', 14400): + # NOTE(jd) This is just to have an unaggregated timserie for + # the upgrade code, I don't think the values are correct lol + ts = carbonara.BoundTimeSerie( + block_size=self.metric.archive_policy.max_block_size, + back_window=self.metric.archive_policy.back_window) + ts.set_values([ + storage.Measure( + datetime.datetime(2016, 7, 17, 23, 59, 0), 23), + ]) + self.storage._store_unaggregated_timeserie(self.metric, + ts.serialize()) + + for d, agg in itertools.product( + self.metric.archive_policy.definition, + ['mean', 'max']): + ts = carbonara.AggregatedTimeSerie( + sampling=d.granularity, aggregation_method=agg, + max_size=d.points) + + # NOTE: there is a split at 2016-07-18 on granularity 300 + ts.update(carbonara.TimeSerie.from_data( + [datetime.datetime(2016, 7, 17, 23, 59, 0), + datetime.datetime(2016, 7, 17, 23, 59, 4), + datetime.datetime(2016, 7, 17, 23, 59, 9), + datetime.datetime(2016, 7, 18, 0, 0, 0), + datetime.datetime(2016, 7, 18, 0, 0, 4), + datetime.datetime(2016, 7, 18, 0, 0, 9)], + [4, 5, 6, 7, 8, 9])) + + for key, split in ts.split(): + self.storage._store_metric_measures( + self.metric, + str(key), + agg, d.granularity, + _serialize_v2(split), offset=None, version=None) def upgrade(self): with mock.patch.object(self.index, 'list_metrics') as f: @@ -178,45 +173,42 @@ class TestCarbonaraMigration(tests_base.TestCase): self.storage._create_metric(self.metric2) # serialise in old format - with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.serialize', - autospec=True) as f: - with mock.patch('gnocchi.carbonara.SplitKey.POINTS_PER_SPLIT', - 14400): - f.side_effect = _serialize_v2 - - # NOTE(jd) This is just to have an unaggregated timserie for - # the upgrade code, I don't think the values are correct lol - ts = carbonara.BoundTimeSerie( - block_size=self.metric2.archive_policy.max_block_size, - back_window=self.metric2.archive_policy.back_window) - ts.set_values([ - storage.Measure( - datetime.datetime(2016, 7, 17, 23, 59, 0), 23), - ]) - self.storage._store_unaggregated_timeserie(self.metric2, - ts.serialize()) - - for d, agg in itertools.product( - self.metric2.archive_policy.definition, - ['mean', 'max']): - ts = carbonara.AggregatedTimeSerie( - sampling=d.granularity, aggregation_method=agg, - max_size=d.points) - - # NOTE: there is a split at 2016-07-18 on granularity 300 - ts.update(carbonara.TimeSerie.from_data( - [datetime.datetime(2016, 7, 17, 23, 59, 0), - datetime.datetime(2016, 7, 17, 23, 59, 4), - datetime.datetime(2016, 7, 17, 23, 59, 9), - datetime.datetime(2016, 7, 18, 0, 0, 0), - datetime.datetime(2016, 7, 18, 0, 0, 4), - datetime.datetime(2016, 7, 18, 0, 0, 9)], - [4, 5, 6, 7, 8, 9])) - - for key, split in ts.split(): - self.storage._store_metric_measures( - self.metric2, str(key), agg, d.granularity, - split.serialize(key), offset=0, version=None) + with mock.patch('gnocchi.carbonara.SplitKey.POINTS_PER_SPLIT', + 14400): + + # NOTE(jd) This is just to have an unaggregated timserie for + # the upgrade code, I don't think the values are correct lol + ts = carbonara.BoundTimeSerie( + block_size=self.metric2.archive_policy.max_block_size, + back_window=self.metric2.archive_policy.back_window) + ts.set_values([ + storage.Measure( + datetime.datetime(2016, 7, 17, 23, 59, 0), 23), + ]) + self.storage._store_unaggregated_timeserie(self.metric2, + ts.serialize()) + + for d, agg in itertools.product( + self.metric2.archive_policy.definition, + ['mean', 'max']): + ts = carbonara.AggregatedTimeSerie( + sampling=d.granularity, aggregation_method=agg, + max_size=d.points) + + # NOTE: there is a split at 2016-07-18 on granularity 300 + ts.update(carbonara.TimeSerie.from_data( + [datetime.datetime(2016, 7, 17, 23, 59, 0), + datetime.datetime(2016, 7, 17, 23, 59, 4), + datetime.datetime(2016, 7, 17, 23, 59, 9), + datetime.datetime(2016, 7, 18, 0, 0, 0), + datetime.datetime(2016, 7, 18, 0, 0, 4), + datetime.datetime(2016, 7, 18, 0, 0, 9)], + [4, 5, 6, 7, 8, 9])) + + for key, split in ts.split(): + self.storage._store_metric_measures( + self.metric2, str(key), agg, d.granularity, + _serialize_v2(split), offset=0, version=None) with mock.patch.object( self.storage, '_get_measures_and_unserialize', -- GitLab From 5474eaecce47436b511e4b7acab877da058342ca Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 9 Sep 2016 13:40:35 +0200 Subject: [PATCH 0361/1483] swift: switch default auth version to 3 Change-Id: I7fb5677b0d41cd1fa4e06ee4d4dd8c8cfbf7f1f7 --- devstack/plugin.sh | 6 +++--- gnocchi/storage/swift.py | 16 ++++++++++++---- .../swift_keystone_v3-606da8228fc13a32.yaml | 3 +++ 3 files changed, 18 insertions(+), 7 deletions(-) create mode 100644 releasenotes/notes/swift_keystone_v3-606da8228fc13a32.yaml diff --git a/devstack/plugin.sh b/devstack/plugin.sh index a7f0cc65..da6c9679 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -264,9 +264,9 @@ function configure_gnocchi { iniset $GNOCCHI_CONF storage driver swift iniset $GNOCCHI_CONF storage swift_user gnocchi_swift iniset $GNOCCHI_CONF storage swift_key $SERVICE_PASSWORD - iniset $GNOCCHI_CONF storage swift_tenant_name "gnocchi_swift" - iniset $GNOCCHI_CONF storage swift_auth_version 2 - iniset $GNOCCHI_CONF storage swift_authurl $KEYSTONE_SERVICE_URI/v2.0/ + iniset $GNOCCHI_CONF storage swift_project_name "gnocchi_swift" + iniset $GNOCCHI_CONF storage swift_auth_version 3 + iniset $GNOCCHI_CONF storage swift_authurl $KEYSTONE_SERVICE_URI_V3 elif [[ "$GNOCCHI_STORAGE_BACKEND" = 'file' ]] ; then iniset $GNOCCHI_CONF storage driver file iniset $GNOCCHI_CONF storage file_basepath $GNOCCHI_DATA_DIR/ diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index ee2bf7a1..ff4f7cbf 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -48,12 +48,19 @@ OPTS = [ cfg.StrOpt('swift_user', default="admin:admin", help='Swift user.'), + cfg.StrOpt('swift_user_domain_name', + default='Default', + help='Swift user domain name.'), cfg.StrOpt('swift_key', secret=True, default="admin", help='Swift key/password.'), - cfg.StrOpt('swift_tenant_name', - help='Swift tenant name, only used in v2 auth.'), + cfg.StrOpt('swift_project_name', + help='Swift tenant name, only used in v2/v3 auth.', + deprecated_name="swift_tenant_name"), + cfg.StrOpt('swift_project_domain_name', + default='Default', + help='Swift project domain name.'), cfg.StrOpt('swift_container_prefix', default='gnocchi', help='Prefix to namespace metric containers.'), @@ -82,9 +89,10 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): preauthtoken=conf.swift_preauthtoken, user=conf.swift_user, key=conf.swift_key, - tenant_name=conf.swift_tenant_name, + tenant_name=conf.swift_project_name, timeout=conf.swift_timeout, - os_options={'endpoint_type': conf.swift_endpoint_type}, + os_options={'endpoint_type': conf.swift_endpoint_type, + 'user_domain_name': conf.swift_user_domain_name}, retries=1) self._container_prefix = conf.swift_container_prefix self.swift.put_container(self.MEASURE_PREFIX) diff --git a/releasenotes/notes/swift_keystone_v3-606da8228fc13a32.yaml b/releasenotes/notes/swift_keystone_v3-606da8228fc13a32.yaml new file mode 100644 index 00000000..9a52e062 --- /dev/null +++ b/releasenotes/notes/swift_keystone_v3-606da8228fc13a32.yaml @@ -0,0 +1,3 @@ +--- +features: + - Swift now supports authentication with Keystone v3 API. -- GitLab From e326a0612f21f353091d1d6aacc45167f8cee3a8 Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Mon, 12 Sep 2016 15:25:08 +0900 Subject: [PATCH 0362/1483] Put the regex first This commit add regex first while running tempest tox There is changes in way tempest tox runs tests by using the tempest run command directly instead of some bash script. With tempest run you specify a regex with a '--regex' parameter and to keep backwards compatibility this is added to the tempest tox definitions. But in gnocchi post_test_hook regex is being used after concurrency and ends up having a call equivalent to: tempest run --regex --concurrency=2 gnocchi which obviously is incorrect. Simply switching the arg order should work here. Change-Id: Ibaffa3a8568ea058d964463df4b76196c4d2bc7a Need-by: I3684fce66a799579fa68af119652cafef25a9f03 --- devstack/gate/post_test_hook.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/gate/post_test_hook.sh b/devstack/gate/post_test_hook.sh index 6fe8e139..9c3ff39a 100755 --- a/devstack/gate/post_test_hook.sh +++ b/devstack/gate/post_test_hook.sh @@ -57,7 +57,7 @@ sudo chown -R tempest:stack $BASE/data/tempest # Run tests with tempst cd $BASE/new/tempest set +e -sudo -H -u tempest OS_TEST_TIMEOUT=$TEMPEST_OS_TEST_TIMEOUT tox -eall-plugin -- --concurrency=$TEMPEST_CONCURRENCY gnocchi +sudo -H -u tempest OS_TEST_TIMEOUT=$TEMPEST_OS_TEST_TIMEOUT tox -eall-plugin -- gnocchi --concurrency=$TEMPEST_CONCURRENCY TEMPEST_EXIT_CODE=$? set -e if [[ $TEMPEST_EXIT_CODE != 0 ]]; then -- GitLab From 8abc05b22ad541df3abe1cbe3200de5a5e9f3ce0 Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 9 Sep 2016 20:37:28 +0000 Subject: [PATCH 0363/1483] share groupings across aggregates this changes it so we don't compute a completely new series over and over for each aggregte when in reality, they are all the same. should save on round_timestamp calculations as well. Change-Id: I525fe97b2674eaf06c7170dce8d40523f15443da Closes-Bug: #1621498 Closes-Bug: #1621510 --- gnocchi/carbonara.py | 57 +++++++++++++++++++++++------------ gnocchi/storage/_carbonara.py | 29 +++++++++--------- gnocchi/tests/test_storage.py | 2 +- 3 files changed, 52 insertions(+), 36 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 03bbce29..ad392835 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -183,11 +183,9 @@ class TimeSerie(object): def serialize(self): return msgpack.dumps(self.to_dict()) - def aggregate(self, granularity, aggregation_method='mean', max_size=None): - ats = AggregatedTimeSerie( - granularity, aggregation_method, max_size=max_size) - ats.update(self) - return ats + def group_serie(self, granularity, start=None): + return self.ts[start:].groupby(functools.partial( + round_timestamp, freq=granularity * 10e8)) class BoundTimeSerie(TimeSerie): @@ -363,16 +361,8 @@ class AggregatedTimeSerie(TimeSerie): """ super(AggregatedTimeSerie, self).__init__(ts) - m = self._AGG_METHOD_PCT_RE.match(aggregation_method) - - if m: - self.q = float(m.group(1)) / 100 - self.aggregation_method_func_name = 'quantile' - else: - if not hasattr(pandas.core.groupby.SeriesGroupBy, - aggregation_method): - raise UnknownAggregationMethod(aggregation_method) - self.aggregation_method_func_name = aggregation_method + self.aggregation_method_func_name, self.q = self._get_agg_method( + aggregation_method) self.sampling = self._to_offset(sampling).nanos / 10e8 self.max_size = max_size @@ -387,6 +377,20 @@ class AggregatedTimeSerie(TimeSerie): ts=pandas.Series(values, timestamps), max_size=max_size) + @staticmethod + def _get_agg_method(aggregation_method): + q = None + m = AggregatedTimeSerie._AGG_METHOD_PCT_RE.match(aggregation_method) + if m: + q = float(m.group(1)) / 100 + aggregation_method_func_name = 'quantile' + else: + if not hasattr(pandas.core.groupby.SeriesGroupBy, + aggregation_method): + raise UnknownAggregationMethod(aggregation_method) + aggregation_method_func_name = aggregation_method + return aggregation_method_func_name, q + def split(self): groupby = self.ts.groupby(functools.partial( SplitKey.from_timestamp_and_sampling, sampling=self.sampling)) @@ -405,6 +409,15 @@ class AggregatedTimeSerie(TimeSerie): aggregation_method=aggregation_method, ts=ts, max_size=max_size) + @classmethod + def from_grouped_serie(cls, grouped_serie, sampling, aggregation_method, + max_size=None): + agg_name, q = cls._get_agg_method(aggregation_method) + return cls(sampling, aggregation_method, + ts=cls._resample_grouped(grouped_serie, agg_name, + q).dropna(), + max_size=max_size) + def __eq__(self, other): return (isinstance(other, AggregatedTimeSerie) and super(AggregatedTimeSerie, self).__eq__(other) @@ -538,15 +551,18 @@ class AggregatedTimeSerie(TimeSerie): groupedby = self.ts[after:].groupby( functools.partial(round_timestamp, freq=self.sampling * 10e8)) - agg_func = getattr(groupedby, self.aggregation_method_func_name) - if self.aggregation_method_func_name == 'quantile': - aggregated = agg_func(self.q) - else: - aggregated = agg_func() + aggregated = self._resample_grouped(groupedby, + self.aggregation_method_func_name, + self.q) # Now combine the result with the rest of the point – everything # that is before `after' self.ts = aggregated.combine_first(self.ts[:after][:-1]) + @staticmethod + def _resample_grouped(grouped_serie, agg_name, q=None): + agg_func = getattr(grouped_serie, agg_name) + return agg_func(q) if agg_name == 'quantile' else agg_func() + def fetch(self, from_timestamp=None, to_timestamp=None): """Fetch aggregated time value. @@ -578,6 +594,7 @@ class AggregatedTimeSerie(TimeSerie): self.ts = self.ts.combine_first(ts.ts) def update(self, ts): + # TODO(gordc): remove this since it's not used if ts.ts.empty: return ts.ts = self.clean_ts(ts.ts) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 82b79837..9cc729cd 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -254,13 +254,12 @@ class CarbonaraBasedStorage(storage.StorageDriver): data, offset=offset) def _add_measures(self, aggregation, archive_policy_def, - metric, timeserie, + metric, grouped_serie, previous_oldest_mutable_timestamp, oldest_mutable_timestamp): - ts = timeserie.aggregate( - archive_policy_def.granularity, - aggregation, - archive_policy_def.points) + ts = carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped_serie, archive_policy_def.granularity, + aggregation, max_size=archive_policy_def.points) # Don't do anything if the timeserie is empty if not ts: @@ -505,16 +504,16 @@ class CarbonaraBasedStorage(storage.StorageDriver): # affected by new measures for specific granularity tstamp = max(bound_timeserie.first, measures[0][0]) computed_points['number'] = len(bound_timeserie) - self._map_in_thread( - self._add_measures, - ((aggregation, d, metric, - carbonara.TimeSerie(bound_timeserie.ts[ - carbonara.round_timestamp( - tstamp, d.granularity * 10e8):]), - current_first_block_timestamp, - bound_timeserie.first_block_timestamp()) - for aggregation in agg_methods - for d in metric.archive_policy.definition)) + for d in metric.archive_policy.definition: + ts = bound_timeserie.group_serie( + d.granularity, carbonara.round_timestamp( + tstamp, d.granularity * 10e8)) + self._map_in_thread( + self._add_measures, + ((aggregation, d, metric, ts, + current_first_block_timestamp, + bound_timeserie.first_block_timestamp()) + for aggregation in agg_methods)) with timeutils.StopWatch() as sw: ts.set_values( diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index f56b412b..51aaa9f4 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -181,7 +181,7 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing([str(m.id)]) for __, args, __ in c.mock_calls: self.assertEqual( - args[3].first, carbonara.round_timestamp( + list(args[3])[0][0], carbonara.round_timestamp( new_point, args[1].granularity * 10e8)) def test_delete_old_measures(self): -- GitLab From 7acd3b5ea73148c6a00629a23e1e88fc41dd33b4 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 12 Sep 2016 15:25:12 +0000 Subject: [PATCH 0364/1483] cleanup carbonara - drop update method previous workflow retrieved existing, merged in unaggregated back_window+new, aggregated existing+back_window+new, and saved. new workflow aggregates back_window+new, retrieves/overwrites existing. this removes methods used for previous workflow. Change-Id: Ie921b4bfdc4c612870989ec4b7961154006cecea --- gnocchi/carbonara.py | 49 +-- gnocchi/tests/storage/test_carbonara.py | 26 +- gnocchi/tests/test_carbonara.py | 400 +++++++++++++----------- 3 files changed, 239 insertions(+), 236 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index ad392835..78fcdf71 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -184,6 +184,11 @@ class TimeSerie(object): return msgpack.dumps(self.to_dict()) def group_serie(self, granularity, start=None): + # NOTE(jd) Our whole serialization system is based on Epoch, and we + # store unsigned integer, so we can't store anything before Epoch. + # Sorry! + if self.ts.index[0].value < 0: + raise BeforeEpochError(self.ts.index[0]) return self.ts[start:].groupby(functools.partial( round_timestamp, freq=granularity * 10e8)) @@ -545,19 +550,6 @@ class AggregatedTimeSerie(TimeSerie): self.ts = (self.ts[-self.max_size:] if quick else self.ts.dropna()[-self.max_size:]) - def _resample(self, after): - # Group by the sampling, and then apply the aggregation method on - # the points after `after' - groupedby = self.ts[after:].groupby( - functools.partial(round_timestamp, - freq=self.sampling * 10e8)) - aggregated = self._resample_grouped(groupedby, - self.aggregation_method_func_name, - self.q) - # Now combine the result with the rest of the point – everything - # that is before `after' - self.ts = aggregated.combine_first(self.ts[:after][:-1]) - @staticmethod def _resample_grouped(grouped_serie, agg_name, q=None): agg_func = getattr(grouped_serie, agg_name) @@ -593,37 +585,6 @@ class AggregatedTimeSerie(TimeSerie): """ self.ts = self.ts.combine_first(ts.ts) - def update(self, ts): - # TODO(gordc): remove this since it's not used - if ts.ts.empty: - return - ts.ts = self.clean_ts(ts.ts) - index = ts.ts.index - first_timestamp = index[0] - last_timestamp = index[-1] - - # NOTE(jd) Our whole serialization system is based on Epoch, and we - # store unsigned integer, so we can't store anything before Epoch. - # Sorry! - if first_timestamp.value < 0: - raise BeforeEpochError(first_timestamp) - - # Build a new time serie excluding all data points in the range of the - # timeserie passed as argument - new_ts = self.ts.drop(self.ts[first_timestamp:last_timestamp].index) - - # Build a new timeserie where we replaced the timestamp range covered - # by the timeserie passed as argument - self.ts = ts.ts.combine_first(new_ts) - - # Resample starting from the first timestamp we received - # TODO(jd) So this only works correctly because we expect that we are - # not going to replace a range in the middle of our timeserie. So we re - # resample EVERYTHING FROM first timestamp. We should rather resample - # from first timestamp AND TO LAST TIMESTAMP! - self._resample(first_timestamp) - self._truncate() - @classmethod def benchmark(cls): """Run a speed benchmark!""" diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py index 4fd5316d..660b5030 100644 --- a/gnocchi/tests/storage/test_carbonara.py +++ b/gnocchi/tests/storage/test_carbonara.py @@ -61,21 +61,20 @@ class TestCarbonaraMigration(tests_base.TestCase): ts.serialize()) for d, agg in itertools.product( - self.metric.archive_policy.definition, - ['mean', 'max']): - ts = carbonara.AggregatedTimeSerie( - sampling=d.granularity, aggregation_method=agg, - max_size=d.points) + self.metric.archive_policy.definition, ['mean', 'max']): # NOTE: there is a split at 2016-07-18 on granularity 300 - ts.update(carbonara.TimeSerie.from_data( + ts = carbonara.TimeSerie.from_data( [datetime.datetime(2016, 7, 17, 23, 59, 0), datetime.datetime(2016, 7, 17, 23, 59, 4), datetime.datetime(2016, 7, 17, 23, 59, 9), datetime.datetime(2016, 7, 18, 0, 0, 0), datetime.datetime(2016, 7, 18, 0, 0, 4), datetime.datetime(2016, 7, 18, 0, 0, 9)], - [4, 5, 6, 7, 8, 9])) + [4, 5, 6, 7, 8, 9]) + grouped = ts.group_serie(d.granularity) + ts = carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped, d.granularity, agg, max_size=d.points) for key, split in ts.split(): self.storage._store_metric_measures( @@ -189,21 +188,20 @@ class TestCarbonaraMigration(tests_base.TestCase): ts.serialize()) for d, agg in itertools.product( - self.metric2.archive_policy.definition, - ['mean', 'max']): - ts = carbonara.AggregatedTimeSerie( - sampling=d.granularity, aggregation_method=agg, - max_size=d.points) + self.metric2.archive_policy.definition, ['mean', 'max']): # NOTE: there is a split at 2016-07-18 on granularity 300 - ts.update(carbonara.TimeSerie.from_data( + ts = carbonara.TimeSerie.from_data( [datetime.datetime(2016, 7, 17, 23, 59, 0), datetime.datetime(2016, 7, 17, 23, 59, 4), datetime.datetime(2016, 7, 17, 23, 59, 9), datetime.datetime(2016, 7, 18, 0, 0, 0), datetime.datetime(2016, 7, 18, 0, 0, 4), datetime.datetime(2016, 7, 18, 0, 0, 9)], - [4, 5, 6, 7, 8, 9])) + [4, 5, 6, 7, 8, 9]) + grouped = ts.group_serie(d.granularity) + ts = carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped, d.granularity, agg, max_size=d.points) for key, split in ts.split(): self.storage._store_metric_measures( diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index f674d0bd..f0276e5f 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. import datetime +import functools import math import fixtures @@ -145,15 +146,13 @@ class TestAggregatedTimeSerie(base.BaseTestCase): "2014-01-01 13:00:04+01:00"))) def test_before_epoch(self): - ts = carbonara.AggregatedTimeSerie(sampling='1Min', - aggregation_method='74pct') + ts = carbonara.TimeSerie.from_tuples( + [(datetime.datetime(1950, 1, 1, 12), 3), + (datetime.datetime(2014, 1, 1, 12), 5), + (datetime.datetime(2014, 1, 1, 12), 6)]) self.assertRaises(carbonara.BeforeEpochError, - ts.update, - carbonara.TimeSerie.from_tuples( - [(datetime.datetime(1950, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)])) + ts.group_serie, 60) def test_bad_percentile(self): for bad_percentile in ('0pct', '100pct', '-1pct', '123pct'): @@ -162,13 +161,18 @@ class TestAggregatedTimeSerie(base.BaseTestCase): sampling='1Min', aggregation_method=bad_percentile) + @staticmethod + def _resample(ts, sampling, agg, max_size=None): + grouped = ts.group_serie(sampling) + return carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped, sampling, agg, max_size=max_size) + def test_74_percentile_serialized(self): - ts = carbonara.AggregatedTimeSerie(sampling='1Min', - aggregation_method='74pct') - ts.update(carbonara.TimeSerie.from_tuples( + ts = carbonara.TimeSerie.from_tuples( [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), (datetime.datetime(2014, 1, 1, 12, 0, 4), 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)])) + (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)]) + ts = self._resample(ts, 60, '74pct') self.assertEqual(1, len(ts)) self.assertEqual(5.48, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) @@ -176,24 +180,25 @@ class TestAggregatedTimeSerie(base.BaseTestCase): # Serialize and unserialize key = ts.get_split_key() o, s = ts.serialize(key) - ts = carbonara.AggregatedTimeSerie.unserialize( + saved_ts = carbonara.AggregatedTimeSerie.unserialize( s, key, '74pct', ts.sampling) - ts.update(carbonara.TimeSerie.from_tuples( + ts = carbonara.TimeSerie.from_tuples( [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), (datetime.datetime(2014, 1, 1, 12, 0, 4), 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)])) + (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)]) + ts = self._resample(ts, 60, '74pct') + ts.merge(saved_ts) self.assertEqual(1, len(ts)) self.assertEqual(5.48, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) def test_95_percentile(self): - ts = carbonara.AggregatedTimeSerie(sampling='1Min', - aggregation_method='95pct') - ts.update(carbonara.TimeSerie.from_tuples( + ts = carbonara.TimeSerie.from_tuples( [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), (datetime.datetime(2014, 1, 1, 12, 0, 4), 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)])) + (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)]) + ts = self._resample(ts, 60, '95pct') self.assertEqual(1, len(ts)) self.assertEqual(5.9000000000000004, @@ -209,54 +214,50 @@ class TestAggregatedTimeSerie(base.BaseTestCase): [3, 5]) def test_max_size(self): - ts = carbonara.AggregatedTimeSerie(sampling=1, max_size=2, - aggregation_method='mean') - ts.update(carbonara.TimeSerie.from_data( + ts = carbonara.TimeSerie.from_data( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 4), datetime.datetime(2014, 1, 1, 12, 0, 9)], - [3, 5, 6])) + [3, 5, 6]) + ts = self._resample(ts, 1, 'mean', max_size=2) + self.assertEqual(2, len(ts)) self.assertEqual(5, ts[0]) self.assertEqual(6, ts[1]) def test_down_sampling(self): - ts = carbonara.AggregatedTimeSerie(sampling='5Min', - aggregation_method='mean') - ts.update(carbonara.TimeSerie.from_data( + ts = carbonara.TimeSerie.from_data( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 4), datetime.datetime(2014, 1, 1, 12, 0, 9)], - [3, 5, 7])) + [3, 5, 7]) + ts = self._resample(ts, 300, 'mean') + self.assertEqual(1, len(ts)) self.assertEqual(5, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) def test_down_sampling_with_max_size(self): - ts = carbonara.AggregatedTimeSerie( - sampling='1Min', - aggregation_method='mean', - max_size=2) - ts.update(carbonara.TimeSerie.from_data( + ts = carbonara.TimeSerie.from_data( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 1, 4), datetime.datetime(2014, 1, 1, 12, 1, 9), datetime.datetime(2014, 1, 1, 12, 2, 12)], - [3, 5, 7, 1])) + [3, 5, 7, 1]) + ts = self._resample(ts, 60, 'mean', max_size=2) + self.assertEqual(2, len(ts)) self.assertEqual(6, ts[datetime.datetime(2014, 1, 1, 12, 1, 0)]) self.assertEqual(1, ts[datetime.datetime(2014, 1, 1, 12, 2, 0)]) def test_down_sampling_with_max_size_and_method_max(self): - ts = carbonara.AggregatedTimeSerie( - sampling='1Min', - max_size=2, - aggregation_method='max') - ts.update(carbonara.TimeSerie.from_data( + ts = carbonara.TimeSerie.from_data( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 1, 4), datetime.datetime(2014, 1, 1, 12, 1, 9), datetime.datetime(2014, 1, 1, 12, 2, 12)], - [3, 5, 70, 1])) + [3, 5, 70, 1]) + ts = self._resample(ts, 60, 'max', max_size=2) + self.assertEqual(2, len(ts)) self.assertEqual(70, ts[datetime.datetime(2014, 1, 1, 12, 1, 0)]) self.assertEqual(1, ts[datetime.datetime(2014, 1, 1, 12, 2, 0)]) @@ -271,45 +272,54 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts2 = carbonara.TimeSerie.from_dict(ts.to_dict()) self.assertEqual(ts, ts2) + @staticmethod + def _resample_and_merge(ts, agg_dict): + """Helper method that mimics _add_measures workflow.""" + grouped = ts.group_serie(agg_dict['sampling']) + existing = agg_dict.get('return') + agg_dict['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped, agg_dict['sampling'], agg_dict['agg'], + max_size=agg_dict.get('size')) + if existing: + agg_dict['return'].merge(existing) + def test_aggregated_different_archive_no_overlap(self): - tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50, - aggregation_method='mean') - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) - tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50, - aggregation_method='mean') - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling) + tsc1 = {'sampling': 60, 'size': 50, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': 60, 'size': 50, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values([(datetime.datetime(2014, 1, 1, 11, 46, 4), 4)], - before_truncate_callback=tsc1.update) + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) tsb2.set_values([(datetime.datetime(2014, 1, 1, 9, 1, 4), 4)], - before_truncate_callback=tsc2.update) + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) dtfrom = datetime.datetime(2014, 1, 1, 11, 0, 0) self.assertRaises(carbonara.UnAggregableTimeseries, carbonara.AggregatedTimeSerie.aggregated, - [tsc1, tsc2], from_timestamp=dtfrom, - aggregation='mean') + [tsc1['return'], tsc2['return']], + from_timestamp=dtfrom, aggregation='mean') def test_aggregated_different_archive_no_overlap2(self): - tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50, - aggregation_method='mean') - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) + tsc1 = {'sampling': 60, 'size': 50, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50, aggregation_method='mean') tsb1.set_values([(datetime.datetime(2014, 1, 1, 12, 3, 0), 4)], - before_truncate_callback=tsc1.update) + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) self.assertRaises(carbonara.UnAggregableTimeseries, carbonara.AggregatedTimeSerie.aggregated, - [tsc1, tsc2], aggregation='mean') + [tsc1['return'], tsc2], aggregation='mean') def test_aggregated_different_archive_overlap(self): - tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10, - aggregation_method='mean') - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) - tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10, - aggregation_method='mean') - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling) + tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) # NOTE(sileht): minute 8 is missing in both and # minute 7 in tsc2 too, but it looks like we have @@ -324,7 +334,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 6, 0), 4), (datetime.datetime(2014, 1, 1, 12, 7, 0), 10), (datetime.datetime(2014, 1, 1, 12, 9, 0), 2), - ], before_truncate_callback=tsc1.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) tsb2.set_values([ (datetime.datetime(2014, 1, 1, 12, 1, 0), 3), @@ -336,7 +347,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 9, 0), 2), (datetime.datetime(2014, 1, 1, 12, 11, 0), 2), (datetime.datetime(2014, 1, 1, 12, 12, 0), 2), - ], before_truncate_callback=tsc2.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) dtfrom = datetime.datetime(2014, 1, 1, 12, 0, 0) dtto = datetime.datetime(2014, 1, 1, 12, 10, 0) @@ -345,12 +357,14 @@ class TestAggregatedTimeSerie(base.BaseTestCase): # so that fail self.assertRaises(carbonara.UnAggregableTimeseries, carbonara.AggregatedTimeSerie.aggregated, - [tsc1, tsc2], from_timestamp=dtfrom, + [tsc1['return'], tsc2['return']], + from_timestamp=dtfrom, to_timestamp=dtto, aggregation='mean') # Retry with 80% and it works output = carbonara.AggregatedTimeSerie.aggregated([ - tsc1, tsc2], from_timestamp=dtfrom, to_timestamp=dtto, + tsc1['return'], tsc2['return']], + from_timestamp=dtfrom, to_timestamp=dtto, aggregation='mean', needed_percent_of_overlap=80.0) self.assertEqual([ @@ -365,12 +379,10 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ], output) def test_aggregated_different_archive_overlap_edge_missing1(self): - tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10, - aggregation_method='mean') - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) - tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10, - aggregation_method='mean') - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling) + tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values([ (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), @@ -379,7 +391,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 6, 0), 7), (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), - ], before_truncate_callback=tsc1.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) tsb2.set_values([ (datetime.datetime(2014, 1, 1, 11, 0, 0), 6), @@ -389,13 +402,14 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), (datetime.datetime(2014, 1, 1, 12, 5, 0), 16), (datetime.datetime(2014, 1, 1, 12, 6, 0), 12), - ], before_truncate_callback=tsc2.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) # By default we require 100% of point that overlap # but we allow that the last datapoint is missing # of the precisest granularity output = carbonara.AggregatedTimeSerie.aggregated([ - tsc1, tsc2], aggregation='sum') + tsc1['return'], tsc2['return']], aggregation='sum') self.assertEqual([ (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 33.0), @@ -405,32 +419,31 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ], output) def test_aggregated_different_archive_overlap_edge_missing2(self): - tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10, - aggregation_method='mean') - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) - tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10, - aggregation_method='mean') - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling) + tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values([ (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - ], before_truncate_callback=tsc1.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) tsb2.set_values([ (datetime.datetime(2014, 1, 1, 11, 0, 0), 4), (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - ], before_truncate_callback=tsc2.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1, tsc2], aggregation='mean') + [tsc1['return'], tsc2['return']], aggregation='mean') self.assertEqual([ (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 4.0), ], output) def test_fetch(self): - ts = carbonara.AggregatedTimeSerie(sampling=60, max_size=10, - aggregation_method='mean') - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + ts = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) tsb.set_values([ (datetime.datetime(2014, 1, 1, 11, 46, 4), 4), @@ -449,11 +462,13 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 5, 1), 15), (datetime.datetime(2014, 1, 1, 12, 5, 12), 1), (datetime.datetime(2014, 1, 1, 12, 6, 0, 2), 3), - ], before_truncate_callback=ts.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 6), 5), - ], before_truncate_callback=ts.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual([ (datetime.datetime(2014, 1, 1, 11, 54), 60.0, 4.0), @@ -466,7 +481,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0), (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0), (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0) - ], ts.fetch()) + ], ts['return'].fetch()) self.assertEqual([ (datetime.datetime(2014, 1, 1, 12, 1), 60.0, 5.5), @@ -475,19 +490,19 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0), (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0), (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0) - ], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) def test_fetch_agg_pct(self): - ts = carbonara.AggregatedTimeSerie(sampling=1, max_size=3600 * 24, - aggregation_method='90pct') - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + ts = {'sampling': 1, 'size': 3600 * 24, 'agg': '90pct'} + tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), (datetime.datetime(2014, 1, 1, 12, 0, 0, 123), 4), (datetime.datetime(2014, 1, 1, 12, 0, 2), 4)], - before_truncate_callback=ts.update) + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) - result = ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) + result = ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) reference = [ (pandas.Timestamp('2014-01-01 12:00:00'), 1.0, 3.9), @@ -504,9 +519,10 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertAlmostEqual(ref[2], res[2]) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 2, 113), 110)], - before_truncate_callback=ts.update) + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) - result = ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) + result = ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) reference = [ (pandas.Timestamp('2014-01-01 12:00:00'), 1.0, 3.9), @@ -523,9 +539,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertAlmostEqual(ref[2], res[2]) def test_fetch_nano(self): - ts = carbonara.AggregatedTimeSerie(sampling=0.2, max_size=10, - aggregation_method='mean') - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + ts = {'sampling': 0.2, 'size': 10, 'agg': 'mean'} + tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) tsb.set_values([ (datetime.datetime(2014, 1, 1, 11, 46, 0, 200123), 4), @@ -533,79 +548,84 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 11, 47, 0, 323154), 50), (datetime.datetime(2014, 1, 1, 11, 48, 0, 590903), 4), (datetime.datetime(2014, 1, 1, 11, 48, 0, 903291), 4), - ], before_truncate_callback=ts.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) tsb.set_values([ (datetime.datetime(2014, 1, 1, 11, 48, 0, 821312), 5), - ], before_truncate_callback=ts.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual([ (datetime.datetime(2014, 1, 1, 11, 46, 0, 200000), 0.2, 6.0), (datetime.datetime(2014, 1, 1, 11, 47, 0, 200000), 0.2, 50.0), (datetime.datetime(2014, 1, 1, 11, 48, 0, 400000), 0.2, 4.0), (datetime.datetime(2014, 1, 1, 11, 48, 0, 800000), 0.2, 4.5) - ], ts.fetch()) + ], ts['return'].fetch()) def test_fetch_agg_std(self): - ts = carbonara.AggregatedTimeSerie(sampling=60, max_size=60, - aggregation_method='std') - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + # NOTE (gordc): this is a good test to ensure we drop NaN entries + # 2014-01-01 12:00:00 will appear if we don't dropna() + ts = {'sampling': 60, 'size': 60, 'agg': 'std'} + tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), (datetime.datetime(2014, 1, 1, 12, 1, 4), 4), (datetime.datetime(2014, 1, 1, 12, 1, 9), 7), (datetime.datetime(2014, 1, 1, 12, 2, 1), 15), (datetime.datetime(2014, 1, 1, 12, 2, 12), 1)], - before_truncate_callback=ts.update) + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual([ (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 2.1213203435596424), (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 9.8994949366116654), - ], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)], - before_truncate_callback=ts.update) + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual([ (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 2.1213203435596424), (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 59.304300012730948), - ], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) def test_fetch_agg_max(self): - ts = carbonara.AggregatedTimeSerie(sampling=60, max_size=60, - aggregation_method='max') - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + ts = {'sampling': 60, 'size': 60, 'agg': 'max'} + tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), (datetime.datetime(2014, 1, 1, 12, 1, 4), 4), (datetime.datetime(2014, 1, 1, 12, 1, 9), 7), (datetime.datetime(2014, 1, 1, 12, 2, 1), 15), (datetime.datetime(2014, 1, 1, 12, 2, 12), 1)], - before_truncate_callback=ts.update) + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual([ (pandas.Timestamp('2014-01-01 12:00:00'), 60.0, 3), (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 7), (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 15), - ], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)], - before_truncate_callback=ts.update) + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual([ (pandas.Timestamp('2014-01-01 12:00:00'), 60.0, 3), (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 7), (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 110), - ], ts.fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) def test_serialize(self): - ts = carbonara.AggregatedTimeSerie(sampling=0.5, - aggregation_method='mean') - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + ts = {'sampling': 0.5, 'agg': 'mean'} + tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 0, 0, 1234), 3), @@ -613,28 +633,30 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 1, 4, 234), 5), (datetime.datetime(2014, 1, 1, 12, 1, 9, 32), 7), (datetime.datetime(2014, 1, 1, 12, 2, 12, 532), 1), - ], before_truncate_callback=ts.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) - key = ts.get_split_key() - o, s = ts.serialize(key) - self.assertEqual(ts, + key = ts['return'].get_split_key() + o, s = ts['return'].serialize(key) + self.assertEqual(ts['return'], carbonara.AggregatedTimeSerie.unserialize( s, key, 'mean', 0.5)) def test_no_truncation(self): - ts = carbonara.AggregatedTimeSerie(sampling=60, - aggregation_method='mean') + ts = {'sampling': 60, 'agg': 'mean'} tsb = carbonara.BoundTimeSerie() for i in six.moves.range(1, 11): tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, i, i), float(i)) - ], before_truncate_callback=ts.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, i, i + 1), float(i + 1)) - ], before_truncate_callback=ts.update) - self.assertEqual(i, len(ts.fetch())) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) + self.assertEqual(i, len(ts['return'].fetch())) def test_back_window(self): """Back window testing. @@ -642,9 +664,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): Test the back window on an archive is not longer than the window we aggregate on. """ - ts = carbonara.AggregatedTimeSerie(sampling=1, max_size=60, - aggregation_method='mean') - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + ts = {'sampling': 1, 'size': 60, 'agg': 'mean'} + tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 0, 1, 2300), 1), @@ -652,7 +673,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 0, 2, 4500), 3), (datetime.datetime(2014, 1, 1, 12, 0, 2, 7800), 4), (datetime.datetime(2014, 1, 1, 12, 0, 3, 8), 2.5), - ], before_truncate_callback=ts.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual( [ @@ -660,7 +682,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (pandas.Timestamp('2014-01-01 12:00:02'), 1.0, 3.5), (pandas.Timestamp('2014-01-01 12:00:03'), 1.0, 2.5), ], - ts.fetch()) + ts['return'].fetch()) try: tsb.set_values([ @@ -683,9 +705,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): Test the back window on an archive is not longer than the window we aggregate on. """ - ts = carbonara.AggregatedTimeSerie(sampling=1, max_size=60, - aggregation_method='mean') - tsb = carbonara.BoundTimeSerie(block_size=ts.sampling) + ts = {'sampling': 1, 'size': 60, 'agg': 'mean'} + tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 0, 1, 2300), 1), @@ -693,7 +714,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 0, 2, 4500), 3), (datetime.datetime(2014, 1, 1, 12, 0, 2, 7800), 4), (datetime.datetime(2014, 1, 1, 12, 0, 3, 8), 2.5), - ], before_truncate_callback=ts.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual( [ @@ -701,11 +723,13 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (pandas.Timestamp('2014-01-01 12:00:02'), 1.0, 3.5), (pandas.Timestamp('2014-01-01 12:00:03'), 1.0, 2.5), ], - ts.fetch()) + ts['return'].fetch()) tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9), - ], ignore_too_old_timestamps=True, before_truncate_callback=ts.update) + ], ignore_too_old_timestamps=True, + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual( [ @@ -713,12 +737,14 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (pandas.Timestamp('2014-01-01 12:00:02'), 1.0, 3.5), (pandas.Timestamp('2014-01-01 12:00:03'), 1.0, 2.5), ], - ts.fetch()) + ts['return'].fetch()) tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9), (datetime.datetime(2014, 1, 1, 12, 0, 3, 9), 4.5), - ], ignore_too_old_timestamps=True, before_truncate_callback=ts.update) + ], ignore_too_old_timestamps=True, + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual( [ @@ -726,27 +752,47 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (pandas.Timestamp('2014-01-01 12:00:02'), 1.0, 3.5), (pandas.Timestamp('2014-01-01 12:00:03'), 1.0, 3.5), ], - ts.fetch()) + ts['return'].fetch()) def test_aggregated_nominal(self): - tsc1 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10, - aggregation_method='mean') - tsc12 = carbonara.AggregatedTimeSerie(sampling=300, max_size=6, - aggregation_method='mean') - tsb1 = carbonara.BoundTimeSerie(block_size=tsc12.sampling) - tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=10, - aggregation_method='mean') - tsc22 = carbonara.AggregatedTimeSerie(sampling=300, max_size=6, - aggregation_method='mean') - tsb2 = carbonara.BoundTimeSerie(block_size=tsc22.sampling) + tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsc12 = {'sampling': 300, 'size': 6, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc12['sampling']) + tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsc22 = {'sampling': 300, 'size': 6, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc22['sampling']) def ts1_update(ts): - tsc1.update(ts) - tsc12.update(ts) + grouped = ts.group_serie(tsc1['sampling']) + existing = tsc1.get('return') + tsc1['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped, tsc1['sampling'], tsc1['agg'], + max_size=tsc1['size']) + if existing: + tsc1['return'].merge(existing) + grouped = ts.group_serie(tsc12['sampling']) + existing = tsc12.get('return') + tsc12['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped, tsc12['sampling'], tsc12['agg'], + max_size=tsc12['size']) + if existing: + tsc12['return'].merge(existing) def ts2_update(ts): - tsc2.update(ts) - tsc22.update(ts) + grouped = ts.group_serie(tsc2['sampling']) + existing = tsc2.get('return') + tsc2['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped, tsc2['sampling'], tsc2['agg'], + max_size=tsc2['size']) + if existing: + tsc2['return'].merge(existing) + grouped = ts.group_serie(tsc22['sampling']) + existing = tsc22.get('return') + tsc22['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped, tsc22['sampling'], tsc22['agg'], + max_size=tsc22['size']) + if existing: + tsc22['return'].merge(existing) tsb1.set_values([ (datetime.datetime(2014, 1, 1, 11, 46, 4), 4), @@ -787,7 +833,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ], before_truncate_callback=ts2_update) output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1, tsc12, tsc2, tsc22], 'mean') + [tsc1['return'], tsc12['return'], tsc2['return'], tsc22['return']], + 'mean') self.assertEqual([ (datetime.datetime(2014, 1, 1, 11, 45), 300.0, 5.75), (datetime.datetime(2014, 1, 1, 11, 50), 300.0, 27.5), @@ -807,29 +854,29 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ], output) def test_aggregated_partial_overlap(self): - tsc1 = carbonara.AggregatedTimeSerie(sampling=1, max_size=86400, - aggregation_method='mean') - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1.sampling) - tsc2 = carbonara.AggregatedTimeSerie(sampling=1, max_size=86400, - aggregation_method='mean') - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2.sampling) + tsc1 = {'sampling': 1, 'size': 86400, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': 1, 'size': 60, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values([ (datetime.datetime(2015, 12, 3, 13, 19, 15), 1), (datetime.datetime(2015, 12, 3, 13, 20, 15), 1), (datetime.datetime(2015, 12, 3, 13, 21, 15), 1), (datetime.datetime(2015, 12, 3, 13, 22, 15), 1), - ], before_truncate_callback=tsc1.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) tsb2.set_values([ (datetime.datetime(2015, 12, 3, 13, 21, 15), 10), (datetime.datetime(2015, 12, 3, 13, 22, 15), 10), (datetime.datetime(2015, 12, 3, 13, 23, 15), 10), (datetime.datetime(2015, 12, 3, 13, 24, 15), 10), - ], before_truncate_callback=tsc2.update) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1, tsc2], aggregation="sum") + [tsc1['return'], tsc2['return']], aggregation="sum") self.assertEqual([ (pandas.Timestamp('2015-12-03 13:21:15'), 1.0, 11.0), @@ -840,7 +887,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): dtto = datetime.datetime(2015, 12, 3, 13, 25, 0) output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1, tsc2], from_timestamp=dtfrom, to_timestamp=dtto, + [tsc1['return'], tsc2['return']], + from_timestamp=dtfrom, to_timestamp=dtto, aggregation="sum", needed_percent_of_overlap=0) self.assertEqual([ @@ -856,16 +904,16 @@ class TestAggregatedTimeSerie(base.BaseTestCase): # so that fail if from or to is set self.assertRaises(carbonara.UnAggregableTimeseries, carbonara.AggregatedTimeSerie.aggregated, - [tsc1, tsc2], to_timestamp=dtto, - aggregation='mean') + [tsc1['return'], tsc2['return']], + to_timestamp=dtto, aggregation='mean') self.assertRaises(carbonara.UnAggregableTimeseries, carbonara.AggregatedTimeSerie.aggregated, - [tsc1, tsc2], from_timestamp=dtfrom, - aggregation='mean') + [tsc1['return'], tsc2['return']], + from_timestamp=dtfrom, aggregation='mean') # Retry with 50% and it works output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1, tsc2], from_timestamp=dtfrom, + [tsc1['return'], tsc2['return']], from_timestamp=dtfrom, aggregation="sum", needed_percent_of_overlap=50.0) self.assertEqual([ @@ -876,7 +924,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ], output) output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1, tsc2], to_timestamp=dtto, + [tsc1['return'], tsc2['return']], to_timestamp=dtto, aggregation="sum", needed_percent_of_overlap=50.0) self.assertEqual([ @@ -913,9 +961,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): timestamps=map(datetime.datetime.utcfromtimestamp, six.moves.range(points)), values=six.moves.range(points)) - agg = carbonara.AggregatedTimeSerie(sampling=sampling, - aggregation_method='mean') - agg.update(ts) + agg = self._resample(ts, sampling, 'mean') grouped_points = list(agg.split()) @@ -938,9 +984,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): timestamps=map(datetime.datetime.utcfromtimestamp, six.moves.range(points)), values=six.moves.range(points)) - agg = carbonara.AggregatedTimeSerie(sampling=sampling, - aggregation_method='mean') - agg.update(ts) + agg = self._resample(ts, sampling, 'mean') split = [t[1] for t in list(agg.split())] -- GitLab From 6163dfbde885e9ff71e0edc15b48f56ba71c1667 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 4 Apr 2016 09:08:05 +0200 Subject: [PATCH 0365/1483] Allow to update resource-type (add attributes) This change allows to patch a resource type. The patch payload format is the RFC6902. For now, only operation add on /attributes path is allowed. Partial-Bug: #1615077 Change-Id: Ia88396dbe771f916f9e72d7072caa03d30a8e693 --- doc/source/rest.j2 | 4 + doc/source/rest.yaml | 14 ++ etc/gnocchi/policy.json | 1 + ...205ff_add_updating_resource_type_states.py | 74 ++++++++++ gnocchi/indexer/sqlalchemy.py | 102 +++++++++++--- gnocchi/indexer/sqlalchemy_base.py | 11 +- gnocchi/rest/__init__.py | 82 ++++++++++- .../tests/gabbi/gabbits/resource-type.yaml | 127 +++++++++++++++++- .../resource-type-patch-8b6a85009db0671c.yaml | 7 + requirements.txt | 1 + 10 files changed, 397 insertions(+), 26 deletions(-) create mode 100644 gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py create mode 100644 releasenotes/notes/resource-type-patch-8b6a85009db0671c.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 7e2d309b..b8ad9362 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -368,6 +368,10 @@ It can also be deleted if no more resources are associated to it: {{ scenarios['delete-resource-type']['doc'] }} +Attributes can be added: + +{{ scenarios['patch-resource-type']['doc'] }} + Creating resource type means creation of new tables on the indexer backend. This is heavy operation that will lock some tables for a short amount of times. When the resource type is created, its initial `state` is `creating`. When the diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 8204394c..122fb402 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -389,6 +389,20 @@ - name: list-resource-type request: GET /v1/resource_type HTTP/1.1 +- name: patch-resource-type + request: | + PATCH /v1/resource_type/my_custom_type HTTP/1.1 + Content-Type: application/json-patch+json + + [ + { + "op": "add", + "path": "/attributes/awesome-stuff", + "value": {"type": "bool", "required": false} + } + ] + + - name: delete-resource-type request: DELETE /v1/resource_type/my_custom_type HTTP/1.1 diff --git a/etc/gnocchi/policy.json b/etc/gnocchi/policy.json index 78b0a23a..4c55b031 100644 --- a/etc/gnocchi/policy.json +++ b/etc/gnocchi/policy.json @@ -14,6 +14,7 @@ "create resource type": "role:admin", "delete resource type": "role:admin", + "update resource type": "role:admin", "list resource type": "", "get resource type": "", diff --git a/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py b/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py new file mode 100644 index 00000000..57d8ad5c --- /dev/null +++ b/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py @@ -0,0 +1,74 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add updating resource type states + +Revision ID: 27d2a1d205ff +Revises: 7e6f9d542f8b +Create Date: 2016-08-31 14:05:34.316496 + +""" + +from alembic import op +import sqlalchemy as sa + +from gnocchi.indexer import sqlalchemy_base +from gnocchi import utils + +# revision identifiers, used by Alembic. +revision = '27d2a1d205ff' +down_revision = '7e6f9d542f8b' +branch_labels = None +depends_on = None + + +resource_type = sa.sql.table( + 'resource_type', + sa.sql.column('updated_at', sqlalchemy_base.PreciseTimestamp())) + + +def upgrade(): + + op.alter_column('resource_type', 'state', + type_=sa.Enum("active", "creating", + "creation_error", "deleting", + "deletion_error", "updating", + "updating_error", + name="resource_type_state_enum"), + nullable=False, + server_default="creating") + + # NOTE(sileht): postgresql have a builtin ENUM type, so + # just altering the column won't works. + # https://bitbucket.org/zzzeek/alembic/issues/270/altering-enum-type + # Does it break offline migration because we use get_bind() ? + + # NOTE(luogangyi): since we cannot use 'ALTER TYPE' in transaction, + # we split the 'ALTER TYPE' operation into several steps. + bind = op.get_bind() + if bind and bind.engine.name == "postgresql": + op.execute("ALTER TYPE resource_type_state_enum ADD VALUE 'updating';") + op.execute("ALTER TYPE resource_type_state_enum ADD VALUE " + "'updating_error';") + + op.add_column("resource_type", + sa.Column("updated_at", + sqlalchemy_base.PreciseTimestamp(), + nullable=True)) + + op.execute(resource_type.update().values({'updated_at': utils.utcnow()})) + op.alter_column("resource_type", "updated_at", + type_=sqlalchemy_base.PreciseTimestamp(), + nullable=False) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 457b8c6b..a03ac390 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -20,6 +20,8 @@ import os.path import threading import uuid +from alembic import migration +from alembic import operations import oslo_db.api from oslo_db import exception from oslo_db.sqlalchemy import enginefacade @@ -106,8 +108,10 @@ class PerInstanceFacade(object): class ResourceClassMapper(object): def __init__(self): + # FIXME(sileht): 3 attributes, perhaps we need a better structure. self._cache = {'generic': {'resource': base.Resource, - 'history': base.ResourceHistory}} + 'history': base.ResourceHistory, + 'updated_at': utils.utcnow()}} @staticmethod def _build_class_mappers(resource_type, baseclass=None): @@ -127,14 +131,24 @@ class ResourceClassMapper(object): {"__tablename__": ("%s_history" % tablename), "__table_args__": tables_args}) return {'resource': resource_ext, - 'history': resource_history_ext} + 'history': resource_history_ext, + 'updated_at': resource_type.updated_at} def get_classes(self, resource_type): # NOTE(sileht): We don't care about concurrency here because we allow # sqlalchemy to override its global object with extend_existing=True # this is safe because classname and tablename are uuid. try: - return self._cache[resource_type.tablename] + mappers = self._cache[resource_type.tablename] + # Cache is outdated + if (resource_type.name != "generic" + and resource_type.updated_at > mappers['updated_at']): + for table_purpose in ['resource', 'history']: + Base.metadata.remove(Base.metadata.tables[ + mappers[table_purpose].__tablename__]) + del self._cache[resource_type.tablename] + raise KeyError + return mappers except KeyError: mapper = self._build_class_mappers(resource_type) self._cache[resource_type.tablename] = mapper @@ -147,8 +161,8 @@ class ResourceClassMapper(object): "creating") mappers = self.get_classes(resource_type) - tables = [Base.metadata.tables[klass.__tablename__] - for klass in mappers.values()] + tables = [Base.metadata.tables[mappers["resource"].__tablename__], + Base.metadata.tables[mappers["history"].__tablename__]] try: with facade.writer_connection() as connection: @@ -187,8 +201,8 @@ class ResourceClassMapper(object): mappers = self.get_classes(resource_type) del self._cache[resource_type.tablename] - tables = [Base.metadata.tables[klass.__tablename__] - for klass in mappers.values()] + tables = [Base.metadata.tables[mappers['resource'].__tablename__], + Base.metadata.tables[mappers['history'].__tablename__]] # NOTE(sileht): Base.metadata.drop_all doesn't # issue CASCADE stuffs correctly at least on postgresql @@ -376,6 +390,48 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): resource_type.state = "active" return resource_type + def update_resource_type(self, name, add_attributes=None): + if not add_attributes: + return + self._set_resource_type_state(name, "updating", "active") + + try: + with self.facade.independent_writer() as session: + rt = self._get_resource_type(session, name) + + with self.facade.writer_connection() as connection: + ctx = migration.MigrationContext.configure(connection) + op = operations.Operations(ctx) + with op.batch_alter_table(rt.tablename) as batch_op: + for attr in add_attributes: + # TODO(sileht): When attr.required is True, we have + # to pass a default. rest layer current protect us, + # requied = True is not yet allowed + batch_op.add_column(sqlalchemy.Column( + attr.name, attr.satype, + nullable=not attr.required)) + + rt.state = "active" + rt.updated_at = utils.utcnow() + rt.attributes.extend(add_attributes) + # FIXME(sileht): yeah that's wierd but attributes is a custom + # json column and 'extend' doesn't trigger sql update, this + # enforce the update. I wonder if sqlalchemy provides something + # on column description side. + sqlalchemy.orm.attributes.flag_modified(rt, 'attributes') + + except Exception: + # NOTE(sileht): We fail the DDL, we have no way to automatically + # recover, just set a particular state + # TODO(sileht): Create a repair REST endpoint that delete + # columns not existing in the database but in the resource type + # description. This will allow to pass wrong update_error to active + # state, that currently not possible. + self._set_resource_type_state(name, "updating_error") + raise + + return rt + def get_resource_type(self, name): with self.facade.independent_reader() as session: return self._get_resource_type(session, name) @@ -387,12 +443,20 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): return resource_type @retry_on_deadlock - def _set_resource_type_state(self, name, state): + def _set_resource_type_state(self, name, state, + expected_previous_state=None): with self.facade.writer() as session: q = session.query(ResourceType) q = q.filter(ResourceType.name == name) + if expected_previous_state is not None: + q = q.filter(ResourceType.state == expected_previous_state) update = q.update({'state': state}) if update == 0: + if expected_previous_state is not None: + rt = session.query(ResourceType).get(name) + if rt: + raise indexer.UnexpectedResourceTypeState( + name, expected_previous_state, rt.state) raise indexer.IndexerException( "Fail to set resource type state of %s to %s" % (name, state)) @@ -474,7 +538,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): self._delete_resource_type(name) - def _resource_type_to_classes(self, session, name): + def _resource_type_to_mappers(self, session, name): resource_type = self._get_resource_type(session, name) if resource_type.state != "active": raise indexer.UnexpectedResourceTypeState( @@ -642,7 +706,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): raise ValueError( "Start timestamp cannot be after end timestamp") with self.facade.writer() as session: - resource_cls = self._resource_type_to_classes( + resource_cls = self._resource_type_to_mappers( session, resource_type)['resource'] r = resource_cls( id=id, @@ -678,9 +742,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): create_revision=True, **kwargs): with self.facade.writer() as session: - classes = self._resource_type_to_classes(session, resource_type) - resource_cls = classes["resource"] - resource_history_cls = classes["history"] + mappers = self._resource_type_to_mappers(session, resource_type) + resource_cls = mappers["resource"] + resource_history_cls = mappers["history"] try: # NOTE(sileht): We use FOR UPDATE that is not galera friendly, @@ -799,7 +863,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): @retry_on_deadlock def get_resource(self, resource_type, resource_id, with_metrics=False): with self.facade.independent_reader() as session: - resource_cls = self._resource_type_to_classes( + resource_cls = self._resource_type_to_mappers( session, resource_type)['resource'] q = session.query( resource_cls).filter( @@ -809,9 +873,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): return q.first() def _get_history_result_mapper(self, session, resource_type): - classes = self._resource_type_to_classes(session, resource_type) - resource_cls = classes['resource'] - history_cls = classes['history'] + mappers = self._resource_type_to_mappers(session, resource_type) + resource_cls = mappers['resource'] + history_cls = mappers['history'] resource_cols = {} history_cols = {} @@ -863,7 +927,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): target_cls = self._get_history_result_mapper( session, resource_type) else: - target_cls = self._resource_type_to_classes( + target_cls = self._resource_type_to_mappers( session, resource_type)["resource"] q = session.query(target_cls) @@ -916,7 +980,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): all_resources.extend(resources) else: try: - target_cls = self._resource_type_to_classes( + target_cls = self._resource_type_to_mappers( session, type)['history' if is_history else 'resource'] except (indexer.UnexpectedResourceTypeState, diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index 09788214..fa9c5021 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -244,10 +244,19 @@ class ResourceType(Base, GnocchiBase, resource_type.ResourceType): attributes = sqlalchemy.Column(ResourceTypeAttributes) state = sqlalchemy.Column(sqlalchemy.Enum("active", "creating", "creation_error", "deleting", - "deletion_error", + "deletion_error", "updating", + "updating_error", name="resource_type_state_enum"), nullable=False, server_default="creating") + updated_at = sqlalchemy.Column(PreciseTimestamp, nullable=False, + # NOTE(jd): We would like to use + # sqlalchemy.func.now, but we can't + # because the type of PreciseTimestamp in + # MySQL is not a Timestamp, so it would + # not store a timestamp but a date as an + # integer. + default=lambda: utils.utcnow()) def to_baseclass(self): cols = {} diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 3ac68403..70dad8fc 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -17,6 +17,7 @@ import itertools import uuid +import jsonpatch from oslo_utils import strutils import pecan from pecan import rest @@ -146,10 +147,13 @@ def set_resp_location_hdr(location): pecan.response.headers['Location'] = location -def deserialize(): +def deserialize(expected_content_types=None): + if expected_content_types is None: + expected_content_types = ("application/json", ) + mime_type, options = werkzeug.http.parse_options_header( pecan.request.headers.get('Content-Type')) - if mime_type != "application/json": + if mime_type not in expected_content_types: abort(415) try: params = json.load(pecan.request.body_file_raw, @@ -159,10 +163,11 @@ def deserialize(): return params -def deserialize_and_validate(schema, required=True): +def deserialize_and_validate(schema, required=True, + expected_content_types=None): try: return voluptuous.Schema(schema, required=required)( - deserialize()) + deserialize(expected_content_types=expected_content_types)) except voluptuous.Error as e: abort(400, "Invalid input: %s" % e) @@ -747,6 +752,20 @@ def etag_set_headers(obj): pecan.response.last_modified = obj.lastmodified +def AttributesPath(value): + if value.startswith("/attributes"): + return value + raise ValueError("Only attributes can be modified") + + +# TODO(sileht): Implements delete op +ResourceTypeJsonPatchSchema = voluptuous.Schema([{ + "op": "add", + "path": AttributesPath, + "value": dict, +}]) + + class ResourceTypeController(rest.RestController): def __init__(self, name): self._name = name @@ -760,6 +779,61 @@ class ResourceTypeController(rest.RestController): enforce("get resource type", rt) return rt + @pecan.expose('json') + def patch(self): + # NOTE(sileht): should we check for "application/json-patch+json" + # Content-Type ? + + try: + rt = pecan.request.indexer.get_resource_type(self._name) + except indexer.NoSuchResourceType as e: + abort(404, e) + enforce("update resource type", rt) + + # Ensure this is a valid jsonpatch dict + patch = deserialize_and_validate( + ResourceTypeJsonPatchSchema, + expected_content_types=["application/json-patch+json"]) + + # Add new attributes to the resource type + rt_json_current = rt.jsonify() + try: + rt_json_next = jsonpatch.apply_patch(rt_json_current, patch) + except jsonpatch.JsonPatchException as e: + abort(400, e) + del rt_json_next['state'] + + # Validate that the whole new resource_type is valid + schema = pecan.request.indexer.get_resource_type_schema() + try: + rt_json_next = voluptuous.Schema(schema, required=True)( + rt_json_next) + except voluptuous.Error as e: + abort(400, "Invalid input: %s" % e) + + # Get only newly formatted attributes + attrs = {k: v for k, v in rt_json_next["attributes"].items() + if k not in rt_json_current["attributes"]} + + try: + attrs = schema.attributes_from_dict(attrs) + except resource_type.InvalidResourceAttributeName as e: + abort(400, e) + + # TODO(sileht): Add a default field on an attribute + # to be able to fill non-nullable column on sql side. + # And obviousy remove this limitation + for attr in attrs: + if attr.required: + abort(400, ValueError("Adding required attributes is not yet " + "possible.")) + + try: + return pecan.request.indexer.update_resource_type( + self._name, add_attributes=attrs) + except indexer.NoSuchResourceType as e: + abort(400, e) + @pecan.expose() def delete(self): try: diff --git a/gnocchi/tests/gabbi/gabbits/resource-type.yaml b/gnocchi/tests/gabbi/gabbits/resource-type.yaml index 9fa32a5c..d7c6afe3 100644 --- a/gnocchi/tests/gabbi/gabbits/resource-type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource-type.yaml @@ -305,8 +305,6 @@ tests: $.uuid: e495ebad-be64-46c0-81d6-b079beb48df9 $.int: -# Ensure we can't delete the type - - name: list resource history GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast request_headers: @@ -320,6 +318,131 @@ tests: $[1].name: foo $[1].foobar: what +# CRUD resource type attributes + + - name: post a new resource attribute + url: /v1/resource_type/my_custom_resource + method: patch + request_headers: + x-roles: admin + content-type: application/json-patch+json + data: + - op: add + path: /attributes/newstuff + value: + type: string + required: False + min_length: 0 + max_length: 255 + status: 200 + response_json_paths: + $.name: my_custom_resource + $.attributes: + name: + type: string + required: True + min_length: 2 + max_length: 5 + foobar: + type: string + required: False + min_length: 0 + max_length: 255 + uuid: + type: uuid + required: True + int: + type: number + required: False + min: -2 + max: 3 + float: + type: number + required: false + min: -2.3 + max: + bool: + type: bool + required: false + newstuff: + type: string + required: False + min_length: 0 + max_length: 255 + + - name: get the new custom resource type + url: /v1/resource_type/my_custom_resource + response_json_paths: + $.name: my_custom_resource + $.attributes: + name: + type: string + required: True + min_length: 2 + max_length: 5 + foobar: + type: string + required: False + min_length: 0 + max_length: 255 + uuid: + type: uuid + required: True + int: + type: number + required: False + min: -2 + max: 3 + float: + type: number + required: false + min: -2.3 + max: + bool: + type: bool + required: false + newstuff: + type: string + required: False + min_length: 0 + max_length: 255 + +# Invalid patch + + - name: patch a resource attribute replace + url: /v1/resource_type/my_custom_resource + method: patch + request_headers: + x-roles: admin + content-type: application/json-patch+json + data: + - op: replace + path: /attributes/newstuff + value: + type: string + required: False + min_length: 0 + max_length: 255 + status: 400 + + - name: patch a resource attribute type not exist + url: /v1/resource_type/my_custom_resource + method: patch + request_headers: + x-roles: admin + content-type: application/json-patch+json + data: + - op: add + path: /attributes/newstuff + value: + type: notexist + required: False + min_length: 0 + max_length: 255 + status: 400 + +# Ensure we can't delete the type + - name: delete in use resource_type DELETE: /v1/resource_type/my_custom_resource request_headers: diff --git a/releasenotes/notes/resource-type-patch-8b6a85009db0671c.yaml b/releasenotes/notes/resource-type-patch-8b6a85009db0671c.yaml new file mode 100644 index 00000000..c6a81713 --- /dev/null +++ b/releasenotes/notes/resource-type-patch-8b6a85009db0671c.yaml @@ -0,0 +1,7 @@ +--- +features: + - |- + a new REST API endpoint have been added to be able + to update a resource-type: "PATCH /v1/resource-type/foobar" + The expected payload is in RFC6902 format. Some examples + can be found in the documentation. diff --git a/requirements.txt b/requirements.txt index 88ef618f..23cf70d1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,6 +11,7 @@ pandas>=0.17.0 pecan>=0.9 pytimeparse>=1.1.5 futures +jsonpatch cotyledon>=1.2.2 requests six -- GitLab From 593fd452686e69503dcf68a68bd88d554667a226 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 31 Aug 2016 17:25:10 +0200 Subject: [PATCH 0366/1483] Allow to update resource-type (delete attributes) This change extends the resource type patch to allow to delete attributes. Partial-Bug: #1615077 Change-Id: Iea24c075755a0e75f618ba8498515b2f8baa0a2c --- doc/source/rest.j2 | 2 +- doc/source/rest.yaml | 4 + gnocchi/indexer/sqlalchemy.py | 10 ++- gnocchi/rest/__init__.py | 25 ++++-- .../tests/gabbi/gabbits/resource-type.yaml | 89 +++++++++++++++++-- 5 files changed, 111 insertions(+), 19 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index b8ad9362..aeee4a42 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -368,7 +368,7 @@ It can also be deleted if no more resources are associated to it: {{ scenarios['delete-resource-type']['doc'] }} -Attributes can be added: +Attributes can be added or removed: {{ scenarios['patch-resource-type']['doc'] }} diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 122fb402..19871d2d 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -399,6 +399,10 @@ "op": "add", "path": "/attributes/awesome-stuff", "value": {"type": "bool", "required": false} + }, + { + "op": "remove", + "path": "/attributes/prefix" } ] diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index a03ac390..02f16fb0 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -390,8 +390,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): resource_type.state = "active" return resource_type - def update_resource_type(self, name, add_attributes=None): - if not add_attributes: + def update_resource_type(self, name, add_attributes=None, + del_attributes=None): + if not add_attributes and not del_attributes: return self._set_resource_type_state(name, "updating", "active") @@ -403,6 +404,8 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): ctx = migration.MigrationContext.configure(connection) op = operations.Operations(ctx) with op.batch_alter_table(rt.tablename) as batch_op: + for attr in del_attributes: + batch_op.drop_column(attr) for attr in add_attributes: # TODO(sileht): When attr.required is True, we have # to pass a default. rest layer current protect us, @@ -414,6 +417,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): rt.state = "active" rt.updated_at = utils.utcnow() rt.attributes.extend(add_attributes) + for attr in list(rt.attributes): + if attr.name in del_attributes: + rt.attributes.remove(attr) # FIXME(sileht): yeah that's wierd but attributes is a custom # json column and 'extend' doesn't trigger sql update, this # enforce the update. I wonder if sqlalchemy provides something diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 70dad8fc..f30afd19 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -758,11 +758,10 @@ def AttributesPath(value): raise ValueError("Only attributes can be modified") -# TODO(sileht): Implements delete op ResourceTypeJsonPatchSchema = voluptuous.Schema([{ - "op": "add", + "op": voluptuous.Any("add", "remove"), "path": AttributesPath, - "value": dict, + voluptuous.Optional("value"): dict, }]) @@ -811,26 +810,34 @@ class ResourceTypeController(rest.RestController): except voluptuous.Error as e: abort(400, "Invalid input: %s" % e) - # Get only newly formatted attributes - attrs = {k: v for k, v in rt_json_next["attributes"].items() - if k not in rt_json_current["attributes"]} + # Get only newly formatted and deleted attributes + add_attrs = {k: v for k, v in rt_json_next["attributes"].items() + if k not in rt_json_current["attributes"]} + del_attrs = [k for k in rt_json_current["attributes"] + if k not in rt_json_next["attributes"]] + + if not add_attrs and not del_attrs: + # NOTE(sileht): just returns the resource, the asked changes + # just do nothing + return rt try: - attrs = schema.attributes_from_dict(attrs) + add_attrs = schema.attributes_from_dict(add_attrs) except resource_type.InvalidResourceAttributeName as e: abort(400, e) # TODO(sileht): Add a default field on an attribute # to be able to fill non-nullable column on sql side. # And obviousy remove this limitation - for attr in attrs: + for attr in add_attrs: if attr.required: abort(400, ValueError("Adding required attributes is not yet " "possible.")) try: return pecan.request.indexer.update_resource_type( - self._name, add_attributes=attrs) + self._name, add_attributes=add_attrs, + del_attributes=del_attrs) except indexer.NoSuchResourceType as e: abort(400, e) diff --git a/gnocchi/tests/gabbi/gabbits/resource-type.yaml b/gnocchi/tests/gabbi/gabbits/resource-type.yaml index d7c6afe3..6079f1e1 100644 --- a/gnocchi/tests/gabbi/gabbits/resource-type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource-type.yaml @@ -334,6 +334,8 @@ tests: required: False min_length: 0 max_length: 255 + - op: remove + path: /attributes/foobar status: 200 response_json_paths: $.name: my_custom_resource @@ -343,11 +345,6 @@ tests: required: True min_length: 2 max_length: 5 - foobar: - type: string - required: False - min_length: 0 - max_length: 255 uuid: type: uuid required: True @@ -380,11 +377,55 @@ tests: required: True min_length: 2 max_length: 5 - foobar: + uuid: + type: uuid + required: True + int: + type: number + required: False + min: -2 + max: 3 + float: + type: number + required: false + min: -2.3 + max: + bool: + type: bool + required: false + newstuff: type: string required: False min_length: 0 max_length: 255 + +# Invalid patch + + - name: add/delete the same resource attribute + url: /v1/resource_type/my_custom_resource + method: patch + request_headers: + x-roles: admin + content-type: application/json-patch+json + data: + - op: add + path: /attributes/what + value: + type: string + required: False + min_length: 0 + max_length: 255 + - op: remove + path: /attributes/what + status: 200 + response_json_paths: + $.name: my_custom_resource + $.attributes: + name: + type: string + required: True + min_length: 2 + max_length: 5 uuid: type: uuid required: True @@ -407,7 +448,25 @@ tests: min_length: 0 max_length: 255 -# Invalid patch + - name: delete/add the same resource attribute + url: /v1/resource_type/my_custom_resource + method: patch + request_headers: + x-roles: admin + content-type: application/json-patch+json + data: + - op: remove + path: /attributes/what + - op: add + path: /attributes/what + value: + type: string + required: False + min_length: 0 + max_length: 255 + status: 400 + response_strings: + - "can't remove non-existent object 'what'" - name: patch a resource attribute replace url: /v1/resource_type/my_custom_resource @@ -424,6 +483,9 @@ tests: min_length: 0 max_length: 255 status: 400 + response_strings: + - "Invalid input: not a valid value for dictionary value @ data[0][" + - "'op']" - name: patch a resource attribute type not exist url: /v1/resource_type/my_custom_resource @@ -441,6 +503,19 @@ tests: max_length: 255 status: 400 + - name: patch a resource attribute type unknown + url: /v1/resource_type/my_custom_resource + method: patch + request_headers: + x-roles: admin + content-type: application/json-patch+json + data: + - op: remove + path: /attributes/unknown + status: 400 + response_strings: + - "can't remove non-existent object 'unknown'" + # Ensure we can't delete the type - name: delete in use resource_type -- GitLab From d3276209545e755d9db0067c837ddcf4e63add70 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 8 Sep 2016 17:05:13 +0200 Subject: [PATCH 0367/1483] swift: bump swiftclient dependency to 3.1.0 and set retries=0 Now that swiftclient has a fix for Keystone authentication, this reverts back to retries=0 on the client and depends on the more recent version of swiftclient. Change-Id: Iae144426701a4cfe8937a811e761d9c8cb6d86db Related-Bug: #1589926 --- gnocchi/storage/swift.py | 2 +- setup.cfg | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index ff4f7cbf..68f2f1d4 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -93,7 +93,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): timeout=conf.swift_timeout, os_options={'endpoint_type': conf.swift_endpoint_type, 'user_domain_name': conf.swift_user_domain_name}, - retries=1) + retries=0) self._container_prefix = conf.swift_container_prefix self.swift.put_container(self.MEASURE_PREFIX) diff --git a/setup.cfg b/setup.cfg index f32100fb..a5ea71c7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -36,7 +36,7 @@ postgresql = sqlalchemy-utils alembic>=0.7.6,!=0.8.1 swift = - python-swiftclient>=3.0.0 + python-swiftclient>=3.1.0 msgpack-python lz4 tooz>=1.38 -- GitLab From 9cec1076bf5741973db27209006e74e04bee5f27 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 14 Sep 2016 16:46:30 +0200 Subject: [PATCH 0368/1483] gendoc: allow DELETE to have body Change-Id: Ib7f49f92dfa2d13f3b34fdd16e5229b90c849071 --- gnocchi/gendoc.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index b3db9e5e..780d4e20 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -116,6 +116,16 @@ def setup(app): fake_file.write(template.render(scenarios=scenarios).encode('utf-8')) fake_file.seek(0) request = webapp.RequestClass.from_file(fake_file) + + # TODO(jd) Fix this lame bug in webob + if request.method in ("DELETE"): + # Webob has a bug it does not read the body for DELETE, l4m3r + clen = request.content_length + if clen is None: + request.body = fake_file.read() + else: + request.body = fake_file.read(clen) + app.info("Doing request %s: %s" % (entry['name'], six.text_type(request))) with webapp.use_admin_user(): -- GitLab From d0406db979dd335810524b09df5654efa8f6d17e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 14 Sep 2016 17:02:41 +0200 Subject: [PATCH 0369/1483] reno: fix formatting in resource type patch Change-Id: I14b0ccf5345bba93c22a3a58d94e1db8a2fa7237 Signed-off-by: Julien Danjou --- .../notes/resource-type-patch-8b6a85009db0671c.yaml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/releasenotes/notes/resource-type-patch-8b6a85009db0671c.yaml b/releasenotes/notes/resource-type-patch-8b6a85009db0671c.yaml index c6a81713..a837c72d 100644 --- a/releasenotes/notes/resource-type-patch-8b6a85009db0671c.yaml +++ b/releasenotes/notes/resource-type-patch-8b6a85009db0671c.yaml @@ -1,7 +1,6 @@ --- features: - |- - a new REST API endpoint have been added to be able - to update a resource-type: "PATCH /v1/resource-type/foobar" - The expected payload is in RFC6902 format. Some examples - can be found in the documentation. + A new REST API endpoint have been added to be able to update a + resource-type: "PATCH /v1/resource-type/foobar". The expected payload is in + RFC6902 format. Some examples can be found in the documentation. -- GitLab From b7619c5cf7d2746a24f65a84f77ec381cb9a48ee Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 14 Sep 2016 17:03:39 +0200 Subject: [PATCH 0370/1483] doc: reverse release note order Change-Id: I62b286136d6150dc5f7debaa72017cfe15a0c07a Signed-off-by: Julien Danjou --- doc/source/releasenotes/index.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/releasenotes/index.rst b/doc/source/releasenotes/index.rst index 00837e3e..00b614e3 100644 --- a/doc/source/releasenotes/index.rst +++ b/doc/source/releasenotes/index.rst @@ -4,6 +4,6 @@ Release Notes .. toctree:: :maxdepth: 2 - 2.1 - 2.2 unreleased + 2.2 + 2.1 -- GitLab From 230bda7a9102458439e02391c97d22788fc3f1b6 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sun, 11 Sep 2016 06:48:44 +0000 Subject: [PATCH 0371/1483] cleanup metricd shutdown process On shutdown metricd raise a bunch of backtrace like: TRACE cotyledon File "/vagrant/stack/gnocchi/gnocchi/cli.py", line 291, in close_services TRACE cotyledon self.queue.close() TRACE cotyledon AttributeError: 'AutoProxy[Queue]' object has no attribute 'close' TRACE cotyledon File "/vagrant/stack/gnocchi/gnocchi/cli.py", line 113, in close_services TRACE cotyledon raise NotImplementedError TRACE cotyledon NotImplementedError This change fixes the shutdown process to cleanly stop everything Change-Id: I23135a5c1d8551c8993bcd4c536c87f400d628af --- gnocchi/cli.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index d2c214e0..ab506bac 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -110,7 +110,7 @@ class MetricProcessBase(cotyledon.Service): @staticmethod def close_services(): - raise NotImplementedError + pass @staticmethod def _run_job(): @@ -246,7 +246,6 @@ class MetricScheduler(MetricProcessBase): self.periodic.wait() self._coord.leave_group(self.GROUP_ID) self._coord.stop() - self.queue.close() class MetricJanitor(MetricProcessBase): @@ -287,9 +286,6 @@ class MetricProcessor(MetricProcessBase): LOG.error("Unexpected error during measures processing", exc_info=True) - def close_services(self): - self.queue.close() - class MetricdServiceManager(cotyledon.ServiceManager): def __init__(self, conf): @@ -303,6 +299,10 @@ class MetricdServiceManager(cotyledon.ServiceManager): self.add(MetricReporting, args=(self.conf,)) self.add(MetricJanitor, args=(self.conf,)) + def run(self): + super(MetricdServiceManager, self).run() + self.queue.close() + def metricd(): conf = service.prepare_service() -- GitLab From 233894812176705d2b09edbcfde0edc8771cbbf5 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sun, 11 Sep 2016 08:56:46 +0200 Subject: [PATCH 0372/1483] tox: remove useless keyword Change-Id: Iae5d79717b25c6bdf689bd4dbaa00095214ad47f --- tox.ini | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/tox.ini b/tox.ini index 9d71d92b..032b2ef6 100644 --- a/tox.ini +++ b/tox.ini @@ -7,19 +7,18 @@ usedevelop = True sitepackages = False passenv = LANG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE GNOCCHI_TEST_* deps = .[test] - py{34,35,27}-postgresql: .[postgresql,swift,ceph,file] - py{34,35,27}-mysql: .[mysql,swift,ceph,file] + postgresql: .[postgresql,swift,ceph,file] + mysql: .[mysql,swift,ceph,file] setenv = GNOCCHI_TEST_STORAGE_DRIVER=file GNOCCHI_TEST_INDEXER_DRIVER=postgresql GNOCCHI_TEST_STORAGE_DRIVERS=file swift ceph GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql - py{34,35,27}-{postgresql,mysql}-file: GNOCCHI_TEST_STORAGE_DRIVERS=file - py{34,35,27}-{postgresql,mysql}-swift: GNOCCHI_TEST_STORAGE_DRIVERS=swift - py{34,35,27}-{postgresql,mysql}-ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph - py{34,35,27}-postgresql{,-file,-swift,-ceph}: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql - py{34,35,27}-mysql{,-file,-swift,-ceph}: GNOCCHI_TEST_INDEXER_DRIVERS=mysql - + file: GNOCCHI_TEST_STORAGE_DRIVERS=file + swift: GNOCCHI_TEST_STORAGE_DRIVERS=swift + ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph + postgresql: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql + mysql: GNOCCHI_TEST_INDEXER_DRIVERS=mysql commands = doc8 --ignore-path doc/source/rest.rst doc/source oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf -- GitLab From 3a8f315e5316fe255ac3d893ccdab43d261fbbb8 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 15 Sep 2016 11:10:21 +0200 Subject: [PATCH 0373/1483] Allow to pass search filter in the query string This change moves some logic from gnocchiclient to server side. That allow to use the gnocchiclient syntax for search query on server side. Full javascript application (like grafana) will take a big advantage of this. Also all applications that was to propose text free query style will all have the same format instead of having each application reinventing a "almost" identical thing. Change-Id: Idb9f9fcaab7f14ef7d2c514455bbf0d3a04e200b --- gnocchi/rest/__init__.py | 96 +++++++++++++++++++++++++ gnocchi/tests/gabbi/gabbits/search.yaml | 7 ++ gnocchi/tests/test_rest.py | 71 ++++++++++++++++++ 3 files changed, 174 insertions(+) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index f30afd19..7076bff3 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -21,6 +21,7 @@ import jsonpatch from oslo_utils import strutils import pecan from pecan import rest +import pyparsing import six from six.moves.urllib import parse as urllib_parse from stevedore import extension @@ -1077,6 +1078,90 @@ class ResourcesByTypeController(rest.RestController): return ResourcesController(resource_type), remainder +class InvalidQueryStringSearchAttrFilter(Exception): + def __init__(self, reason): + super(InvalidQueryStringSearchAttrFilter, self).__init__( + "Invalid filter: %s" % reason) + + +class QueryStringSearchAttrFilter(object): + uninary_operators = ("not", ) + binary_operator = (u">=", u"<=", u"!=", u">", u"<", u"=", u"==", u"eq", + u"ne", u"lt", u"gt", u"ge", u"le", u"in", u"like", u"≠", + u"≥", u"≤", u"like" "in") + multiple_operators = (u"and", u"or", u"∧", u"∨") + + operator = pyparsing.Regex(u"|".join(binary_operator)) + null = pyparsing.Regex("None|none|null").setParseAction( + pyparsing.replaceWith(None)) + boolean = "False|True|false|true" + boolean = pyparsing.Regex(boolean).setParseAction( + lambda t: t[0].lower() == "true") + hex_string = lambda n: pyparsing.Word(pyparsing.hexnums, exact=n) + uuid_string = pyparsing.Combine( + hex_string(8) + (pyparsing.Optional("-") + hex_string(4)) * 3 + + pyparsing.Optional("-") + hex_string(12)) + number = r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?" + number = pyparsing.Regex(number).setParseAction(lambda t: float(t[0])) + identifier = pyparsing.Word(pyparsing.alphas, pyparsing.alphanums + "_") + quoted_string = pyparsing.QuotedString('"') | pyparsing.QuotedString("'") + comparison_term = pyparsing.Forward() + in_list = pyparsing.Group( + pyparsing.Suppress('[') + + pyparsing.Optional(pyparsing.delimitedList(comparison_term)) + + pyparsing.Suppress(']'))("list") + comparison_term << (null | boolean | uuid_string | identifier | number | + quoted_string | in_list) + condition = pyparsing.Group(comparison_term + operator + comparison_term) + + expr = pyparsing.operatorPrecedence(condition, [ + ("not", 1, pyparsing.opAssoc.RIGHT, ), + ("and", 2, pyparsing.opAssoc.LEFT, ), + ("∧", 2, pyparsing.opAssoc.LEFT, ), + ("or", 2, pyparsing.opAssoc.LEFT, ), + ("∨", 2, pyparsing.opAssoc.LEFT, ), + ]) + + @classmethod + def _parsed_query2dict(cls, parsed_query): + result = None + while parsed_query: + part = parsed_query.pop() + if part in cls.binary_operator: + result = {part: {parsed_query.pop(): result}} + + elif part in cls.multiple_operators: + if result.get(part): + result[part].append( + cls._parsed_query2dict(parsed_query.pop())) + else: + result = {part: [result]} + + elif part in cls.uninary_operators: + result = {part: result} + elif isinstance(part, pyparsing.ParseResults): + kind = part.getName() + if kind == "list": + res = part.asList() + else: + res = cls._parsed_query2dict(part) + if result is None: + result = res + elif isinstance(result, dict): + list(result.values())[0].append(res) + else: + result = part + return result + + @classmethod + def parse(cls, query): + try: + parsed_query = cls.expr.parseString(query, parseAll=True)[0] + except pyparsing.ParseException as e: + raise InvalidQueryStringSearchAttrFilter(six.text_type(e)) + return cls._parsed_query2dict(parsed_query) + + def _ResourceSearchSchema(v): """Helper method to indirect the recursivity of the search schema""" return SearchResourceTypeController.ResourceSearchSchema(v) @@ -1109,9 +1194,20 @@ class SearchResourceTypeController(rest.RestController): ) ) + @classmethod + def parse_and_validate_qs_filter(cls, query): + try: + attr_filter = QueryStringSearchAttrFilter.parse(query) + except InvalidQueryStringSearchAttrFilter as e: + raise abort(400, e) + return voluptuous.Schema(cls.ResourceSearchSchema, + required=True)(attr_filter) + def _search(self, **kwargs): if pecan.request.body: attr_filter = deserialize_and_validate(self.ResourceSearchSchema) + elif kwargs.get("filter"): + attr_filter = self.parse_and_validate_qs_filter(kwargs["filter"]) else: attr_filter = None diff --git a/gnocchi/tests/gabbi/gabbits/search.yaml b/gnocchi/tests/gabbi/gabbits/search.yaml index f13621c8..f4dc1da9 100644 --- a/gnocchi/tests/gabbi/gabbits/search.yaml +++ b/gnocchi/tests/gabbi/gabbits/search.yaml @@ -56,3 +56,10 @@ tests: - df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e response_json_paths: $.`len`: 2 + + - name: search in_ query string + POST: /v1/search/resource/generic?filter=id%20in%20%5Bfaef212f-0bf4-4030-a461-2186fef79be0%2C%20df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e%5D + request_headers: + content-type: application/json + response_json_paths: + $.`len`: 2 diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index d9ac6037..83655530 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -34,6 +34,7 @@ from testtools import testcase import webtest from gnocchi import archive_policy +from gnocchi import rest from gnocchi.rest import app from gnocchi.tests import base as tests_base from gnocchi import utils @@ -1878,3 +1879,73 @@ class GenericResourceTest(RestTest): "Invalid input: extra keys not allowed @ data[" + repr(u'wrongoperator') + "]", result.text) + + +class QueryStringSearchAttrFilterTest(tests_base.TestCase): + def _do_test(self, expr, expected): + req = rest.QueryStringSearchAttrFilter.parse(expr) + self.assertEqual(expected, req) + + def test_search_query_builder(self): + self._do_test('foo=7EED6CC3-EDC8-48C9-8EF6-8A36B9ACC91C', + {"=": {"foo": "7EED6CC3-EDC8-48C9-8EF6-8A36B9ACC91C"}}) + self._do_test('foo=7EED6CC3EDC848C98EF68A36B9ACC91C', + {"=": {"foo": "7EED6CC3EDC848C98EF68A36B9ACC91C"}}) + self._do_test('foo=bar', {"=": {"foo": "bar"}}) + self._do_test('foo!=1', {"!=": {"foo": 1.0}}) + self._do_test('foo=True', {"=": {"foo": True}}) + self._do_test('foo=null', {"=": {"foo": None}}) + self._do_test('foo="null"', {"=": {"foo": "null"}}) + self._do_test('foo in ["null", "foo"]', + {"in": {"foo": ["null", "foo"]}}) + self._do_test(u'foo="quote" and bar≠1', + {"and": [{u"≠": {"bar": 1}}, + {"=": {"foo": "quote"}}]}) + self._do_test('foo="quote" or bar like "%%foo"', + {"or": [{"like": {"bar": "%%foo"}}, + {"=": {"foo": "quote"}}]}) + + self._do_test('not (foo="quote" or bar like "%%foo" or foo="what!" ' + 'or bar="who?")', + {"not": {"or": [ + {"=": {"bar": "who?"}}, + {"=": {"foo": "what!"}}, + {"like": {"bar": "%%foo"}}, + {"=": {"foo": "quote"}}, + ]}}) + + self._do_test('(foo="quote" or bar like "%%foo" or not foo="what!" ' + 'or bar="who?") and cat="meme"', + {"and": [ + {"=": {"cat": "meme"}}, + {"or": [ + {"=": {"bar": "who?"}}, + {"not": {"=": {"foo": "what!"}}}, + {"like": {"bar": "%%foo"}}, + {"=": {"foo": "quote"}}, + ]} + ]}) + + self._do_test('foo="quote" or bar like "%%foo" or foo="what!" ' + 'or bar="who?" and cat="meme"', + {"or": [ + {"and": [ + {"=": {"cat": "meme"}}, + {"=": {"bar": "who?"}}, + ]}, + {"=": {"foo": "what!"}}, + {"like": {"bar": "%%foo"}}, + {"=": {"foo": "quote"}}, + ]}) + + self._do_test('foo="quote" or bar like "%%foo" and foo="what!" ' + 'or bar="who?" or cat="meme"', + {"or": [ + {"=": {"cat": "meme"}}, + {"=": {"bar": "who?"}}, + {"and": [ + {"=": {"foo": "what!"}}, + {"like": {"bar": "%%foo"}}, + ]}, + {"=": {"foo": "quote"}}, + ]}) -- GitLab From db2afd16939dcd9a01cc644b5e299aa958718ecc Mon Sep 17 00:00:00 2001 From: shengping zhang Date: Thu, 4 Aug 2016 15:29:14 +0800 Subject: [PATCH 0374/1483] This patch is used to delete batch of resources 1. Add a method delete in gnocchi/rest/__init__.py to accept an attribute filter to delete matched resources. 2. Add method delete_resources in gnocchi/indexer/sqlalchemy.py to delete related data in db. 3. Add a new rule in policy. 4. Add document to descrbe the functions The HTTP request for deleting a batch of resources by ids is looks like: DELETE /v1/resource/ Content-Type: application/json {"in": {"id":[xx_id,yy_id...]}}. The HTTP request for deleting a batch of resources filter by resources started_data is looks like: DELETE /v1/resource/ Content-Type: application/json {">=": {"started_data": "2016-08-24"}} Or even more complicated for deleing a batch of resources: DELETE /v1/resource/ Content-Type: application/json { "and": [ {">=": {"started_data":"2016-08-06"}}, {"=":{"id":"xxxx_id"}} ] } TODO: An corresponding gnocchi client CLI needs to be added later. Partial-Bug: #1585262 Co-Authored-By: Mehdi Abaakouk Change-Id: I2a21c9e76fe08819b60e1a198335213c3b32e96f --- doc/source/rest.j2 | 21 +- doc/source/rest.yaml | 91 ++++++ etc/gnocchi/policy.json | 1 + gnocchi/indexer/__init__.py | 5 + gnocchi/indexer/sqlalchemy.py | 31 ++ gnocchi/rest/__init__.py | 32 +- gnocchi/tests/gabbi/gabbits/resource.yaml | 305 ++++++++++++++++++ gnocchi/tests/test_indexer.py | 34 ++ .../delete-resources-f10d21fc02f53f16.yaml | 3 + 9 files changed, 519 insertions(+), 4 deletions(-) create mode 100644 releasenotes/notes/delete-resources-f10d21fc02f53f16.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index aeee4a42..c6018650 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -292,14 +292,29 @@ And to retrieve its modification history: {{ scenarios['get-patched-instance-history']['doc'] }} -It possible to delete a resource altogether: +It is possible to delete a resource altogether: {{ scenarios['delete-resource-generic']['doc'] }} +It is also possible to delete a batch of resources based on attribute values, and +returns a number of deleted resources. + +To delete resources based on ids: + +{{ scenarios['delete-resources-by-ids']['doc'] }} + +or delete resources based on time: + +{{ scenarios['delete-resources-by-time']['doc']}} + .. IMPORTANT:: - When a resource is deleted, all its associated metrics are deleted at the - same time. + When a resource is deleted, all its associated metrics are deleted at the + same time. + + When a batch of resources are deleted, an attribute filter is required to + avoid deletion of the entire database. + All resources can be listed, either by using the `generic` type that will list all types of resources, or by filtering on their resource type: diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 19871d2d..b4d665fc 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -501,6 +501,97 @@ - name: delete-resource-generic request: DELETE /v1/resource/generic/{{ scenarios['create-resource-generic']['response'].json['id'] }} HTTP/1.1 +- name: create-resources-a + request: | + POST /v1/resource/generic HTTP/1.1 + Content-Type: application/json + + { + "id": "340102AA-AA19-BBE0-E1E2-2D3JDC7D289R", + "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ", + "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ" + } + +- name: create-resources-b + request: | + POST /v1/resource/generic HTTP/1.1 + Content-Type: application/json + + { + "id": "340102AA-AAEF-AA90-E1E2-2D3JDC7D289R", + "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ", + "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ" + } + +- name: create-resources-c + request: | + POST /v1/resource/generic HTTP/1.1 + Content-Type: application/json + + { + "id": "340102AA-AAEF-BCEF-E112-2D3JDC7D289R", + "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ", + "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ" + } + +- name: create-resources-d + request: | + POST /v1/resource/generic HTTP/1.1 + Content-Type: application/json + + { + "id": "340102AA-AAEF-BCEF-E112-2D15DC7D289R", + "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ", + "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ" + } + +- name: create-resources-e + request: | + POST /v1/resource/generic HTTP/1.1 + Content-Type: application/json + + { + "id": "340102AA-AAEF-BCEF-E112-2D3JDC30289R", + "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ", + "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ" + } + +- name: create-resources-f + request: | + POST /v1/resource/generic HTTP/1.1 + Content-Type: application/json + + { + "id": "340102AA-AAEF-BCEF-E112-2D15349D109R", + "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ", + "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ" + } + +- name: delete-resources-by-ids + request: | + DELETE /v1/resource/generic HTTP/1.1 + Content-Type: application/json + + { + "in": { + "id": [ + "{{ scenarios['create-resources-a']['response'].json['id'] }}", + "{{ scenarios['create-resources-b']['response'].json['id'] }}", + "{{ scenarios['create-resources-c']['response'].json['id'] }}" + ] + } + } + +- name: delete-resources-by-time + request: | + DELETE /v1/resource/generic HTTP/1.1 + Content-Type: application/json + + { + ">=": {"started_at": "{{ scenarios['create-resources-f']['response'].json['started_at'] }}"} + } + + - name: get-resource-named-metrics-measures request: GET /v1/resource/generic/{{ scenarios['create-resource-instance-with-metrics']['response'].json['id'] }}/metric/cpu.util/measures?start=2014-10-06T14:34 HTTP/1.1 diff --git a/etc/gnocchi/policy.json b/etc/gnocchi/policy.json index 4c55b031..00aaeddd 100644 --- a/etc/gnocchi/policy.json +++ b/etc/gnocchi/policy.json @@ -9,6 +9,7 @@ "get resource": "rule:admin_or_creator or rule:resource_owner", "update resource": "rule:admin_or_creator", "delete resource": "rule:admin_or_creator", + "delete resources": "rule:admin_or_creator", "list resource": "rule:admin_or_creator or rule:resource_owner", "search resource": "rule:admin_or_creator or rule:resource_owner", diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 89c3d5c8..36ba19b8 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -362,6 +362,11 @@ class IndexerDriver(object): def delete_resource(uuid): raise exceptions.NotImplementedError + @staticmethod + def delete_resources(resource_type='generic', + attribute_filter=None): + raise exceptions.NotImplementedError + @staticmethod def delete_metric(id): raise exceptions.NotImplementedError diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 02f16fb0..ec15addd 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -866,6 +866,37 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): Resource.id == resource_id).delete() == 0: raise indexer.NoSuchResource(resource_id) + @retry_on_deadlock + def delete_resources(self, resource_type='generic', + attribute_filter=None): + if not attribute_filter: + raise ValueError("attribute_filter must be set") + + with self.facade.writer() as session: + target_cls = self._resource_type_to_mappers( + session, resource_type)["resource"] + + q = session.query(target_cls.id) + + engine = session.connection() + try: + f = QueryTransformer.build_filter(engine.dialect.name, + target_cls, + attribute_filter) + except indexer.QueryAttributeError as e: + # NOTE(jd) The QueryAttributeError does not know about + # resource_type, so convert it + raise indexer.ResourceAttributeError(resource_type, + e.attribute) + + q = q.filter(f) + + session.query(Metric).filter( + Metric.resource_id.in_(q) + ).update({"status": "delete"}, + synchronize_session=False) + return q.delete(synchronize_session=False) + @retry_on_deadlock def get_resource(self, resource_type, resource_id, with_metrics=False): with self.facade.independent_reader() as session: diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index f30afd19..c23c9aee 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1059,6 +1059,34 @@ class ResourcesController(rest.RestController): except indexer.IndexerException as e: abort(400, e) + @pecan.expose('json') + def delete(self, **kwargs): + # NOTE(sileht): Don't allow empty filter, this is going to delete + # the entire database. + attr_filter = deserialize_and_validate( + SearchResourceTypeController.ResourceSearchSchema) + + # the voluptuous checks everything, but it is better to + # have this here. + if not attr_filter: + abort(400, "caution: the query can not be empty, or it will \ + delete entire database") + + user, project = get_user_and_project() + policy_filter = _get_list_resource_policy_filter( + "delete resources", self._resource_type, user, project) + + if policy_filter: + attr_filter = {"and": [policy_filter, attr_filter]} + + try: + delete_num = pecan.request.indexer.delete_resources( + self._resource_type, attribute_filter=attr_filter) + except indexer.IndexerException as e: + abort(400, e) + + return {"deleted": delete_num} + class ResourcesByTypeController(rest.RestController): @pecan.expose('json') @@ -1104,7 +1132,9 @@ class SearchResourceTypeController(rest.RestController): u"and", u"∨", u"or", u"∧", u"not", - ): [_ResourceSearchSchema], + ): voluptuous.All( + [_ResourceSearchSchema], voluptuous.Length(min=1) + ) } ) ) diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index 31e778d3..8f3198c3 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -826,3 +826,308 @@ tests: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea status: 404 + +# Delete a batch of resources by attributes filter + + - name: create resource one + desc: before test batch delete, create some resources + POST: /v1/resource/generic + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + id: f93450f2-aaaa-4d67-9985-02511241e7d1 + started_at: "2014-01-03T02:02:02.000000" + user_id: 0fbb231484614b1a80131fc22f6afc9c + project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + status: 201 + + - name: create resource two + desc: before test batch delete, create some resources + POST: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + id: f93450f2-bbbb-4d67-9985-02511241e7d1 + started_at: "2014-01-03T02:02:02.000000" + user_id: 0fbb231484614b1a80131fc22f6afc9c + project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + status: 201 + + - name: create resource three + desc: before test batch delete, create some resources + POST: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + id: f93450f2-cccc-4d67-9985-02511241e7d1 + started_at: "2014-08-04T00:00:00.000000" + user_id: 0fbb231484614b1a80131fc22f6afc9c + project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + status: 201 + + - name: create resource four + desc: before test batch delete, create some resources + POST: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + id: f93450f2-dddd-4d67-9985-02511241e7d1 + started_at: "2014-08-04T00:00:00.000000" + user_id: 0fbb231484614b1a80131fc22f6afc9c + project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + status: 201 + + - name: create resource five + desc: before test batch delete, create some resources + POST: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + id: f93450f2-eeee-4d67-9985-02511241e7d1 + started_at: "2015-08-14T00:00:00.000000" + user_id: 0fbb231484614b1a80131fc22f6afc9c + project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + status: 201 + + - name: create resource six + desc: before test batch delete, create some resources + POST: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + id: f93450f2-ffff-4d67-9985-02511241e7d1 + started_at: "2015-08-14T00:00:00.000000" + user_id: 0fbb231484614b1a80131fc22f6afc9c + project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + status: 201 + + - name: get resource one + desc: ensure the resources exists + GET: /v1/resource/generic/f93450f2-aaaa-4d67-9985-02511241e7d1 + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 200 + + - name: get resource two + desc: ensure the resources exists + GET: /v1/resource/generic/f93450f2-bbbb-4d67-9985-02511241e7d1 + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 200 + + - name: get resource three + desc: ensure the resources exists + GET: /v1/resource/generic/f93450f2-cccc-4d67-9985-02511241e7d1 + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 200 + + - name: get resource four + desc: ensure the resources exists + GET: /v1/resource/generic/f93450f2-dddd-4d67-9985-02511241e7d1 + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 200 + + - name: get resource five + desc: ensure the resources exists + GET: /v1/resource/generic/f93450f2-eeee-4d67-9985-02511241e7d1 + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 200 + + - name: get resource six + desc: ensure the resources exists + GET: /v1/resource/generic/f93450f2-ffff-4d67-9985-02511241e7d1 + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 200 + + - name: delete random data structure + desc: delete a empty list test + DELETE: /v1/resource/generic + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + resource_ids: + [] + attrs: + test + status: 400 + + - name: delete something empty + desc: use empty filter for delete + DELETE: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: "" + status: 400 + + - name: delete something empty a + desc: use empty filter for delete + DELETE: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + in: + id: [] + status: 200 + response_json_paths: + $.deleted: 0 + + - name: delete something empty b + desc: use empty filter for delete + DELETE: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + in: {} + status: 400 + + - name: delete something empty c + desc: use empty filter for delete + DELETE: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + in: + and: [] + status: 400 + + - name: delete something empty d + desc: use empty filter for delete + DELETE: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + in: + and: + - or: [] + - id: + =: "" + status: 400 + + - name: delete something empty e + desc: use empty filter for delete + DELETE: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + and: [] + status: 400 + + - name: delete something empty f + desc: use empty filter for delete + DELETE: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + and: + - in: + id: [] + - started_at: "" + status: 400 + + - name: delete batch of resources filter by started_at + desc: delete the created resources + DELETE: /v1/resource/generic + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + eq: + started_at: "2014-08-04" + status: 200 + response_json_paths: + $.deleted: 2 + + - name: delete batch of resources filter by mutliple ids + desc: delete the created resources + DELETE: /v1/resource/generic + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + in: + id: + - f93450f2-aaaa-4d67-9985-02511241e7d1 + - f93450f2-bbbb-4d67-9985-02511241e7d1 + status: 200 + response_json_paths: + $.deleted: 2 + + - name: delete both existent and non-existent data + desc: delete exits and non-exist data + DELETE: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + in: + id: + - f93450f2-eeee-4d67-9985-02511241e7d1 + - f93450f2-ffff-4d67-9985-02511241e7d1 + - f93450f2-yyyy-4d67-9985-02511241e7d1 + - f93450f2-xxxx-4d67-9985-02511241e7d1 + status: 200 + response_json_paths: + $.deleted: 2 + + - name: delete multiple non-existent resources + desc: delete a batch of non-existent resources + DELETE: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + in: + id: + - f93450f2-zzzz-4d67-9985-02511241e7d1 + - f93450f2-kkkk-4d67-9985-02511241e7d1 + status: 200 + response_json_paths: + $.deleted: 0 diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 609c71ab..af642297 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -950,6 +950,40 @@ class TestIndexerDriver(tests_base.TestCase): }) self.assertEqual(0, len(resources)) + def test_deletes_resources(self): + r1 = uuid.uuid4() + r2 = uuid.uuid4() + user = str(uuid.uuid4()) + project = str(uuid.uuid4()) + metrics = {'foo': {'archive_policy_name': 'medium'}} + g1 = self.index.create_resource('generic', r1, user, project, + user, project, metrics=metrics) + g2 = self.index.create_resource('generic', r2, user, project, + user, project, metrics=metrics) + + metrics = self.index.list_metrics(ids=[g1['metrics'][0]['id'], + g2['metrics'][0]['id']]) + self.assertEqual(2, len(metrics)) + for m in metrics: + self.assertEqual('active', m['status']) + + deleted = self.index.delete_resources( + 'generic', + attribute_filter={"=": {"user_id": user}}) + self.assertEqual(2, deleted) + + resources = self.index.list_resources( + 'generic', + attribute_filter={"=": {"user_id": user}}) + self.assertEqual(0, len(resources)) + + metrics = self.index.list_metrics(ids=[g1['metrics'][0]['id'], + g2['metrics'][0]['id']], + status='delete') + self.assertEqual(2, len(metrics)) + for m in metrics: + self.assertEqual('delete', m['status']) + def test_get_metric(self): e1 = uuid.uuid4() user = str(uuid.uuid4()) diff --git a/releasenotes/notes/delete-resources-f10d21fc02f53f16.yaml b/releasenotes/notes/delete-resources-f10d21fc02f53f16.yaml new file mode 100644 index 00000000..0f6b0421 --- /dev/null +++ b/releasenotes/notes/delete-resources-f10d21fc02f53f16.yaml @@ -0,0 +1,3 @@ +--- +feature: + - A new REST API call is provided to delete multiple resources at once using a search filter. -- GitLab From fccda018288efae453bedbabafca43f665cba756 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 12 Sep 2016 21:18:33 +0200 Subject: [PATCH 0375/1483] ceph: fix setup extra Change-Id: I21bfb7177c16b200dcbf1849176c50e1371128cc --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index f32100fb..152244a3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -44,9 +44,9 @@ ceph = msgpack-python lz4 tooz>=1.38 -ceph-pre-jewel: +ceph-pre-jewel = cradox>=1.0.9 -ceph-jewel-and-later: +ceph-jewel-and-later = python-rados>=10.1.0 # not available on pypi file = msgpack-python -- GitLab From f89f4048a722e552c4c3f96b70671115b111b9a4 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 12 Sep 2016 19:54:03 +0200 Subject: [PATCH 0376/1483] ceph: Fix metricd start metricd can be started before api, in that case metricd fail because the measure object don't yet exists. Closes-bug: #1586149 Change-Id: Id7822f16718e31d6a8916cec8a6b77194071a31e --- gnocchi/storage/ceph.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index e234c194..6fe39302 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -170,8 +170,12 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): def _list_object_names_to_process(self, prefix=""): with rados.ReadOpCtx() as op: omaps, ret = self.ioctx.get_omap_vals(op, "", prefix, -1) - self.ioctx.operate_read_op( - op, self.MEASURE_PREFIX, flag=self.OMAP_READ_FLAGS) + try: + self.ioctx.operate_read_op( + op, self.MEASURE_PREFIX, flag=self.OMAP_READ_FLAGS) + except rados.ObjectNotFound: + # API have still written nothing + return () # NOTE(sileht): after reading the libradospy, I'm # not sure that ret will have the correct value # get_omap_vals transforms the C int to python int -- GitLab From db3788506c3da22b681b3650358d8b32f55d1ed0 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 15 Sep 2016 18:52:34 +0200 Subject: [PATCH 0377/1483] doc: remove the rolling upgrade documentation Unfortunately, the v2 -> v3 upgrade does not support any kind of rolling upgrade due to the complexity of the new workflow. Change-Id: I943124c87a27b1423421d211d9a3428673f6ebde --- doc/source/install.rst | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index 0d5029bb..6cf36d1e 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -100,27 +100,3 @@ that your indexer and storage are properly upgraded. Run the following: storage. 3. Start the new Gnocchi API server and metric daemon - -Minimal interruption upgrade -============================ -Gnocchi supports online upgrade of its storage system, which avoids -interrupting Gnocchi for a long time. In order to upgrade from previous -versions, you need to follow the following steps: - -1. Stop the old Gnocchi API server and metric daemon - -2. Run `gnocchi-upgrade --skip-storage` with the new version of Gnocchi. - This can take several minutes depending on the size of your index. - -3. Start the new Gnocchi API server. - -4. Run `gnocchi-upgrade` with the new version of Gnocchi - This can take several hours depending on the size of your storage. - -5. Start the new Gnocchi metric daemon. - -This will upgrade the indexer and storage in two passes. While a new version of -Gnocchi API cannot run with an old version of the indexer, it can run with an -old version of its storage back-end. For performance reasons, _metricd_ needs -to run an upgraded storage back-end, otherwise it would spend too much time -checking for upgrade pattern on each run. -- GitLab From e8dc20affc447339ecae680a1bb6f1b7ea0465a1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 15 Sep 2016 18:13:24 +0200 Subject: [PATCH 0378/1483] carbonara: retrieve unaggregated timeserie only once for upgrade The unaggregated timeserie was retrieve for each archive policy aggregation method and definition, which is useless. Once is enough. Change-Id: I83002ed1f49c8b50eb56f698a9af17a333ceac4f --- gnocchi/storage/_carbonara.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 9cc729cd..a358d430 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -367,6 +367,21 @@ class CarbonaraBasedStorage(storage.StorageDriver): def _check_for_metric_upgrade(self, metric): lock = self._lock(metric.id) with lock: + try: + unaggregated = self._get_unaggregated_timeserie_and_unserialize( # noqa + metric) + except (storage.MetricDoesNotExist, CorruptionError) as e: + # NOTE(jd) This case is not really possible – you can't + # have archives with splits and no unaggregated + # timeserie… + LOG.error( + "Unable to find unaggregated timeserie for " + "metric %s, unable to upgrade data: %s", + metric.id, e) + return + oldest_mutable_timestamp = ( + unaggregated.first_block_timestamp() + ) for agg_method, d in itertools.product( metric.archive_policy.aggregation_methods, metric.archive_policy.definition): @@ -393,21 +408,6 @@ class CarbonaraBasedStorage(storage.StorageDriver): sampling=d.granularity, aggregation_method=agg_method, timeseries=timeseries, max_size=d.points) - try: - unaggregated = self._get_unaggregated_timeserie_and_unserialize( # noqa - metric) - except (storage.MetricDoesNotExist, CorruptionError) as e: - # NOTE(jd) This case is not really possible – you can't - # have archives with splits and no unaggregated - # timeserie… - LOG.error( - "Unable to find unaggregated timeserie for " - "metric %s, unable to upgrade data: %s", - metric.id, e) - break - oldest_mutable_timestamp = ( - unaggregated.first_block_timestamp() - ) for key, split in ts.split(): self._store_timeserie_split( metric, key, split, -- GitLab From 85b6854bbc574731a2ffdfa5c10e63a9ae7763e6 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 15 Sep 2016 22:14:50 +0200 Subject: [PATCH 0379/1483] releasenotes: fix typo in storage-engine-v3 Change-Id: I94fc09327fd639da3b979f7d797362b30f6cbdf1 --- releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml b/releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml index 83aa09df..cb2ef22a 100644 --- a/releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml +++ b/releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml @@ -7,7 +7,7 @@ features: time serie, reducing the data space usage by at least 50 %. upgrade: - gnocchi-upgrade must be run before running the new version of - gnocchi-metric and the HTTP REST API in order to upgrade from version 2 of + gnocchi-metricd and the HTTP REST API in order to upgrade from version 2 of the Carbonara storage engine to version 3. It will read all metrics and convert them to new version 3 serialization format (compressing the data), which might take some time. -- GitLab From c4f6b6e732d584f5422a0dae1a3b83f7d7ecda52 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 15 Sep 2016 15:25:01 +0000 Subject: [PATCH 0380/1483] lower processor job polling and handle queuing when processing metrics which require updating very few points/aggregates/series, the job may take very little time (e.g less than 0.1s). additionally, each processing worker will grab 4 metrics every 1s. because of that, the worker may be idle -- since it finished its 4 metrics -- even though there are other jobs on queue. this removes delay and blocks until job is on queue. additionally, we process jobs in chunks. this creates the chunks before queuing rather than on receiving end to minimise steps involved Change-Id: I940d011b38be0615982977a02ab944f4697934d8 Partial-Bug: #1623263 --- gnocchi/cli.py | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index ab506bac..e5dd6313 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -142,6 +142,7 @@ class MetricScheduler(MetricProcessBase): GROUP_ID = "gnocchi-scheduler" SYNC_RATE = 30 TASKS_PER_WORKER = 16 + BLOCK_SIZE = 4 def _enable_coordination(self, conf): self._coord = coordination.get_coordinator( @@ -232,8 +233,9 @@ class MetricScheduler(MetricProcessBase): LOG.warning('Metric processing lagging scheduling rate. ' 'It is recommended to increase the number of ' 'workers or to lengthen processing interval.') - for m_id in metrics: - self.queue.put(m_id) + metrics = list(metrics) + for i in six.moves.range(0, len(metrics), self.BLOCK_SIZE): + self.queue.put(metrics[i:i + self.BLOCK_SIZE]) self.previously_scheduled_metrics = metrics LOG.debug("%d metrics scheduled for processing.", len(metrics)) except Exception: @@ -265,23 +267,16 @@ class MetricJanitor(MetricProcessBase): class MetricProcessor(MetricProcessBase): name = "processing" - BLOCK_SIZE = 4 def __init__(self, worker_id, conf, queue): - super(MetricProcessor, self).__init__(worker_id, conf, 1) + super(MetricProcessor, self).__init__(worker_id, conf, 0) self.queue = queue def _run_job(self): try: metrics = [] - while len(metrics) < self.BLOCK_SIZE: - try: - metrics.append(self.queue.get(block=False)) - except six.moves.queue.Empty: - # queue might be emptied by other workers, continue on. - break - if metrics: - self.store.process_background_tasks(self.index, metrics) + metrics = self.queue.get(block=True) + self.store.process_background_tasks(self.index, metrics) except Exception: LOG.error("Unexpected error during measures processing", exc_info=True) -- GitLab From 1a85b4c68b277497dccf14f37631c83818a17c11 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 18 Jul 2016 14:50:36 +0200 Subject: [PATCH 0381/1483] Allow to retry to delete a resource Since commit 7077a1704a5fb2a2b769034a4431dd98e5de5353, we are able to rerun the resource type deletion safely. This change removes the restriction of having the resource type in state "active" and allow "creation_error" and "deletion_error" state. Change-Id: I36312cec46b64436dbab33d83820595fb0f38e6c --- gnocchi/indexer/sqlalchemy.py | 21 ++++++++++----------- gnocchi/tests/test_indexer.py | 12 ++++++++++++ 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index ec15addd..348d1723 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -233,15 +233,11 @@ class ResourceClassMapper(object): raise exception.RetryRequest(e) raise - # NOTE(sileht): If something goes wrong here, we are currently - # fucked, that why we expose the state to the superuser. - # TODO(sileht): The idea is to make the delete resource_type more - # like a cleanup method, I mean we should don't fail if the - # constraint have already been dropped or the table have already - # been deleted. So, when the superuser have fixed it's backend - # issue, it can rerun 'DELETE ../resource_type/foobar' even the - # state is already error and if we are sure all underlying - # resources have been cleaned we really deleted the resource_type. + # NOTE(sileht): If something goes wrong here, we are currently + # fucked, that why we expose the state to the superuser. + # But we allow him to delete a resource type in error state + # in case of he cleanup the mess manually and want gnocchi to + # control and finish the cleanup. # TODO(sileht): Remove this resource on other workers # by using expiration on cache ? @@ -491,9 +487,12 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): try: with self.facade.writer() as session: rt = self._get_resource_type(session, name) - if rt.state != "active": + if rt.state not in ["active", "deletion_error", + "creation_error", "updating_error"]: raise indexer.UnexpectedResourceTypeState( - name, "active", rt.state) + name, + "active/deletion_error/creation_error/updating_error", + rt.state) session.delete(rt) # FIXME(sileht): Why do I need to flush here !!! diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index af642297..55264b6e 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -1169,3 +1169,15 @@ class TestIndexerDriver(tests_base.TestCase): self.assertEqual([('after', 'deletion_error'), ('before', 'deleting')], sorted(states.items())) + + # We can cleanup the mess ! + self.index.delete_resource_type("indexer_test_fail2") + + # Ensure it's deleted + self.assertRaises(indexer.NoSuchResourceType, + self.index.get_resource_type, + "indexer_test_fail2") + + self.assertRaises(indexer.NoSuchResourceType, + self.index.delete_resource_type, + "indexer_test_fail2") -- GitLab From dc0de1db2b7f4f9f858f0e7dabc9fc7e550282f7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 16 Sep 2016 15:59:27 +0200 Subject: [PATCH 0382/1483] storage: increase default processing delays The default processing delay are too aggressive for most common deployment that do not need real time data. Increase these values a bit. Change-Id: I44d0c098892be45c3faeb2da3c1242bdd1717cfa --- gnocchi/storage/__init__.py | 6 +++--- gnocchi/tests/gabbi/gabbits-live/live.yaml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 80ed0ae0..4d59d0e2 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -28,15 +28,15 @@ OPTS = [ default='file', help='Storage driver to use'), cfg.IntOpt('metric_processing_delay', - default=10, + default=60, help="How many seconds to wait between " "scheduling new metrics to process"), cfg.IntOpt('metric_reporting_delay', - default=60, + default=120, help="How many seconds to wait between " "metric ingestion reporting"), cfg.IntOpt('metric_cleanup_delay', - default=60, + default=300, help="How many seconds to wait between " "cleaning of expired data"), ] diff --git a/gnocchi/tests/gabbi/gabbits-live/live.yaml b/gnocchi/tests/gabbi/gabbits-live/live.yaml index 226e4d69..71b23ae9 100644 --- a/gnocchi/tests/gabbi/gabbits-live/live.yaml +++ b/gnocchi/tests/gabbi/gabbits-live/live.yaml @@ -678,11 +678,11 @@ tests: content-type: application/json status: 400 - - name: delete single archive policy cleanup. + - name: delete single archive policy cleanup url: /v1/archive_policy/gabbilive method: DELETE poll: - count: 60 + count: 360 delay: 1 status: 204 -- GitLab From 52f4e5e16b59f4eed90bcf19636a97c877515a7b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 16 Sep 2016 17:07:38 +0200 Subject: [PATCH 0383/1483] devstack-gate: do not create legacy resources Change-Id: Ieab501727ee07bb46b1a4d8a0223d4dd1c466814 --- devstack/gate/post_test_hook.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/devstack/gate/post_test_hook.sh b/devstack/gate/post_test_hook.sh index 9c3ff39a..9cbb922f 100755 --- a/devstack/gate/post_test_hook.sh +++ b/devstack/gate/post_test_hook.sh @@ -43,8 +43,7 @@ export GNOCCHI_SERVICE_URL=$(openstack catalog show metric -c endpoints -f value curl -X GET ${GNOCCHI_SERVICE_URL}/v1/archive_policy -H "Content-Type: application/json" -# NOTE(sileht): gabbi tests needs assert on some Ceilometer resource types -sudo gnocchi-upgrade --create-legacy-resource-types +sudo gnocchi-upgrade # Just ensure tools still works gnocchi metric create -- GitLab From f067210bccab0b71e0fa8d12e6598a5cd0de31a4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 15 Sep 2016 14:50:51 +0200 Subject: [PATCH 0384/1483] carbonara: implement compressed format for BoundTimeSerie This replaces msgpack by a struct+lz4 encoding, which reduces the storage per point from 18 bytes to 8-14 bytes per point. Change-Id: I2897dc759a7331e9c5f16d1700668be424fbb99b --- gnocchi/carbonara.py | 144 ++++++++++++++-------- gnocchi/storage/_carbonara.py | 48 ++++++-- gnocchi/storage/ceph.py | 19 ++- gnocchi/storage/file.py | 23 +++- gnocchi/storage/swift.py | 21 +++- gnocchi/tests/storage/test_carbonara.py | 153 ++++++++++-------------- gnocchi/tests/test_carbonara.py | 16 +-- gnocchi/tests/test_storage.py | 2 +- 8 files changed, 247 insertions(+), 179 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 78fcdf71..ee0f5330 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -29,7 +29,6 @@ import time import iso8601 import lz4 -import msgpack import pandas import six @@ -133,29 +132,6 @@ class TimeSerie(object): return timestamps, v return (), () - @classmethod - def from_dict(cls, d): - """Build a time series from a dict. - - The dict format must be datetime as key and values as values. - - :param d: The dict. - :returns: A TimeSerie object - """ - return cls.from_data( - *cls._timestamps_and_values_from_dict(d['values'])) - - def to_dict(self): - return { - 'values': dict((timestamp.value, float(v)) - for timestamp, v in six.iteritems(self.ts.dropna())) - } - - @staticmethod - def _serialize_time_period(value): - if value: - return value.nanos / 10e8 - @staticmethod def _to_offset(value): if isinstance(value, numbers.Real): @@ -176,13 +152,6 @@ class TimeSerie(object): except IndexError: return - @classmethod - def unserialize(cls, data): - return cls.from_dict(msgpack.loads(data, encoding='utf-8')) - - def serialize(self): - return msgpack.dumps(self.to_dict()) - def group_serie(self, granularity, start=None): # NOTE(jd) Our whole serialization system is based on Epoch, and we # store unsigned integer, so we can't store anything before Epoch. @@ -251,27 +220,94 @@ class BoundTimeSerie(TimeSerie): before_truncate_callback(self) self._truncate() + _SERIALIZATION_TIMESTAMP_VALUE_LEN = struct.calcsize(" Date: Fri, 16 Sep 2016 15:49:53 +0200 Subject: [PATCH 0385/1483] carbonara: replace msgpack encoding with struct for new measures This replaces the msgpack encoding that was use to store new measures to a lighter struct one. oslo_serialization.msgpackutils actually makes a pretty big encoding of the `datetime.datetime' object, whereas having just a long integer is enough in our case. Change-Id: If89e3d740a400a912c79e5731fb7907f97d2fe42 --- doc/source/install.rst | 13 +- gnocchi/storage/__init__.py | 3 +- gnocchi/storage/_carbonara.py | 29 +++- gnocchi/tests/storage/test_carbonara.py | 4 +- gnocchi/tests/test_aggregates.py | 2 +- gnocchi/tests/test_statsd.py | 15 +- gnocchi/tests/test_storage.py | 176 ++++++++++++------------ tools/measures_injector.py | 4 +- 8 files changed, 132 insertions(+), 114 deletions(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index 6cf36d1e..6a5f5a87 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -91,12 +91,17 @@ Upgrading In order to upgrade from a previous version of Gnocchi, you need to make sure that your indexer and storage are properly upgraded. Run the following: -1. Stop the old version of Gnocchi API server and metric daemon +1. Stop the old version of Gnocchi API server and `gnocchi-statsd` daemon -2. Install the new version of Gnocchi +2. Make sure that the processing backlog is empty (`gnocchi status`) -2. Run `gnocchi-upgrade` +3. Stop the old version of `gnocchi-metricd` daemon + +4. Install the new version of Gnocchi + +5. Run `gnocchi-upgrade` This can take several hours depending on the size of your index and storage. -3. Start the new Gnocchi API server and metric daemon +6. Start the new Gnocchi API server, `gnocchi-metricd` + and `gnocchi-statsd` daemons diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 80ed0ae0..420b55b6 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -16,7 +16,6 @@ import operator from oslo_config import cfg from oslo_log import log -from oslo_utils import timeutils from stevedore import driver from gnocchi import exceptions @@ -46,7 +45,7 @@ LOG = log.getLogger(__name__) class Measure(object): def __init__(self, timestamp, value): - self.timestamp = timeutils.normalize_time(timestamp) + self.timestamp = timestamp self.value = value def __iter__(self): diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 8b2338c0..ab9f1702 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -18,6 +18,7 @@ import collections import datetime import itertools import operator +import struct import uuid from concurrent import futures @@ -25,9 +26,10 @@ import iso8601 import msgpack from oslo_config import cfg from oslo_log import log -from oslo_serialization import msgpackutils from oslo_utils import timeutils +import pandas import six +import six.moves from tooz import coordination from gnocchi import carbonara @@ -329,8 +331,16 @@ class CarbonaraBasedStorage(storage.StorageDriver): oldest_mutable_timestamp) def add_measures(self, metric, measures): - self._store_new_measures(metric, msgpackutils.dumps( - list(map(tuple, measures)))) + measures = list(measures) + data = struct.pack( + "<" + self._MEASURE_SERIAL_FORMAT * len(measures), + *list( + itertools.chain( + # NOTE(jd) int(10e8) to avoid rounding errors + *((int(utils.datetime_to_unix(timestamp) * int(10e8)), + value) + for timestamp, value in measures)))) + self._store_new_measures(metric, data) @staticmethod def _store_new_measures(metric, data): @@ -359,9 +369,16 @@ class CarbonaraBasedStorage(storage.StorageDriver): aggregation, granularity, version=3): raise NotImplementedError - @staticmethod - def _unserialize_measures(data): - return msgpackutils.loads(data) + _MEASURE_SERIAL_FORMAT = "Qd" + _MEASURE_SERIAL_LEN = struct.calcsize(_MEASURE_SERIAL_FORMAT) + + def _unserialize_measures(self, data): + nb_measures = len(data) // self._MEASURE_SERIAL_LEN + measures = struct.unpack( + "<" + self._MEASURE_SERIAL_FORMAT * nb_measures, data) + return six.moves.zip( + pandas.to_datetime(measures[::2], unit='ns'), + itertools.islice(measures, 1, len(measures), 2)) def measures_report(self, details=True): metrics, measures, full_details = self._build_report(details) diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py index 58717c80..d8ea6159 100644 --- a/gnocchi/tests/storage/test_carbonara.py +++ b/gnocchi/tests/storage/test_carbonara.py @@ -141,8 +141,8 @@ class TestCarbonaraMigration(tests_base.TestCase): self.storage.get_measures, self.metric, aggregation='max') self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2016, 7, 18), 69), - storage.Measure(datetime.datetime(2016, 7, 18, 1, 1), 64), + storage.Measure(utils.datetime_utc(2016, 7, 18), 69), + storage.Measure(utils.datetime_utc(2016, 7, 18, 1, 1), 64), ]) with mock.patch.object(self.index, 'list_metrics') as f: diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index c4c79015..c07df7ef 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -58,7 +58,7 @@ class TestAggregates(tests_base.TestCase): def _test_create_metric_and_data(self, data, spacing): metric = storage.Metric( uuid.uuid4(), self.archive_policies['medium']) - start_time = datetime.datetime(2014, 1, 1, 12) + start_time = utils.datetime_utc(2014, 1, 1, 12) incr = datetime.timedelta(seconds=spacing) measures = [storage.Measure(start_time + incr * n, val) for n, val in enumerate(data)] diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index 4d820bc0..7531e25c 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -1,5 +1,6 @@ # -*- encoding: utf-8 -*- # +# Copyright © 2016 Red Hat, Inc. # Copyright © 2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,11 +14,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import datetime import uuid import mock -from oslo_utils import timeutils from gnocchi import indexer from gnocchi import statsd @@ -52,11 +51,11 @@ class TestStatsd(tests_base.TestCase): def test_flush_empty(self): self.server.stats.flush() - @mock.patch.object(timeutils, 'utcnow') + @mock.patch.object(utils, 'utcnow') def _test_gauge_or_ms(self, metric_type, utcnow): metric_name = "test_gauge_or_ms" metric_key = metric_name + "|" + metric_type - utcnow.return_value = datetime.datetime(2015, 1, 7, 13, 58, 36) + utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 58, 36) self.server.datagram_received( ("%s:1|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) @@ -78,7 +77,7 @@ class TestStatsd(tests_base.TestCase): (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0) ], measures) - utcnow.return_value = datetime.datetime(2015, 1, 7, 13, 59, 37) + utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37) # This one is going to be ignored self.server.datagram_received( ("%s:45|%s" % (metric_name, metric_type)).encode('ascii'), @@ -105,11 +104,11 @@ class TestStatsd(tests_base.TestCase): def test_ms(self): self._test_gauge_or_ms("ms") - @mock.patch.object(timeutils, 'utcnow') + @mock.patch.object(utils, 'utcnow') def test_counter(self, utcnow): metric_name = "test_counter" metric_key = metric_name + "|c" - utcnow.return_value = datetime.datetime(2015, 1, 7, 13, 58, 36) + utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 58, 36) self.server.datagram_received( ("%s:1|c" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) @@ -130,7 +129,7 @@ class TestStatsd(tests_base.TestCase): (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0)], measures) - utcnow.return_value = datetime.datetime(2015, 1, 7, 13, 59, 37) + utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37) self.server.datagram_received( ("%s:45|c" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index a0460972..7ab25375 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -58,12 +58,12 @@ class TestStorageDriver(tests_base.TestCase): self.skipTest("This driver is not based on Carbonara") self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 13, 0, 1), 1), + storage.Measure(utils.datetime_utc(2014, 1, 1, 13, 0, 1), 1), ]) with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize', @@ -82,7 +82,7 @@ class TestStorageDriver(tests_base.TestCase): None, None, full=True) self.assertEqual(set(), metrics) self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), ]) metrics = self.storage.list_metric_with_measures_to_process( None, None, full=True) @@ -94,7 +94,7 @@ class TestStorageDriver(tests_base.TestCase): def test_delete_nonempty_metric(self): self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() self.storage.delete_metric(self.metric, sync=True) @@ -102,14 +102,14 @@ class TestStorageDriver(tests_base.TestCase): def test_delete_nonempty_metric_unprocessed(self): self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), ]) self.storage.delete_metric(self.metric, sync=True) self.trigger_processing() def test_delete_expunge_metric(self): self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() self.index.delete_metric(self.metric.id) @@ -135,7 +135,7 @@ class TestStorageDriver(tests_base.TestCase): def test_add_measures_big(self): m, __ = self._create_metric('high') self.storage.add_measures(m, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, i, j), 100) + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, i, j), 100) for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) self.trigger_processing([str(m.id)]) @@ -145,14 +145,14 @@ class TestStorageDriver(tests_base.TestCase): def test_add_measures_update_subset_split(self): m, m_sql = self._create_metric('medium') measures = [ - storage.Measure(datetime.datetime(2014, 1, 6, i, j, 0), 100) + storage.Measure(utils.datetime_utc(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] self.storage.add_measures(m, measures) self.trigger_processing([str(m.id)]) # add measure to end, in same aggregate time as last point. self.storage.add_measures(m, [ - storage.Measure(datetime.datetime(2014, 1, 6, 1, 58, 1), 100)]) + storage.Measure(utils.datetime_utc(2014, 1, 6, 1, 58, 1), 100)]) with mock.patch.object(self.storage, '_store_metric_measures') as c: # should only resample last aggregate @@ -168,13 +168,13 @@ class TestStorageDriver(tests_base.TestCase): def test_add_measures_update_subset(self): m, m_sql = self._create_metric('medium') measures = [ - storage.Measure(datetime.datetime(2014, 1, 6, i, j, 0), 100) + storage.Measure(utils.datetime_utc(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] self.storage.add_measures(m, measures) self.trigger_processing([str(m.id)]) # add measure to end, in same aggregate time as last point. - new_point = datetime.datetime(2014, 1, 6, 1, 58, 1) + new_point = utils.datetime_utc(2014, 1, 6, 1, 58, 1) self.storage.add_measures(m, [storage.Measure(new_point, 100)]) with mock.patch.object(self.storage, '_add_measures') as c: @@ -186,10 +186,10 @@ class TestStorageDriver(tests_base.TestCase): def test_delete_old_measures(self): self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), ]) self.trigger_processing() @@ -203,7 +203,7 @@ class TestStorageDriver(tests_base.TestCase): # One year later… self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2015, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2015, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() @@ -237,10 +237,10 @@ class TestStorageDriver(tests_base.TestCase): # First store some points scattered across different splits self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2016, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime.datetime(2016, 1, 2, 13, 7, 31), 42), - storage.Measure(datetime.datetime(2016, 1, 4, 14, 9, 31), 4), - storage.Measure(datetime.datetime(2016, 1, 6, 15, 12, 45), 44), + storage.Measure(utils.datetime_utc(2016, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2016, 1, 2, 13, 7, 31), 42), + storage.Measure(utils.datetime_utc(2016, 1, 4, 14, 9, 31), 4), + storage.Measure(utils.datetime_utc(2016, 1, 6, 15, 12, 45), 44), ]) self.trigger_processing() @@ -277,8 +277,8 @@ class TestStorageDriver(tests_base.TestCase): # the BoundTimeSerie processing timeserie far away from its current # range. self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2016, 1, 10, 16, 18, 45), 45), - storage.Measure(datetime.datetime(2016, 1, 10, 17, 12, 45), 46), + storage.Measure(utils.datetime_utc(2016, 1, 10, 16, 18, 45), 45), + storage.Measure(utils.datetime_utc(2016, 1, 10, 17, 12, 45), 46), ]) self.trigger_processing() @@ -312,8 +312,8 @@ class TestStorageDriver(tests_base.TestCase): def test_updated_measures(self): self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), ]) self.trigger_processing() @@ -325,8 +325,8 @@ class TestStorageDriver(tests_base.TestCase): ], self.storage.get_measures(self.metric)) self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), ]) self.trigger_processing() @@ -356,10 +356,10 @@ class TestStorageDriver(tests_base.TestCase): def test_add_and_get_measures(self): self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), ]) self.trigger_processing() @@ -446,10 +446,10 @@ class TestStorageDriver(tests_base.TestCase): def test_get_measure_unknown_aggregation(self): self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), ]) self.assertRaises(storage.AggregationDoesNotExist, self.storage.get_measures, @@ -459,16 +459,16 @@ class TestStorageDriver(tests_base.TestCase): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['low']) self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), ]) self.storage.add_measures(metric2, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), ]) self.assertRaises(storage.AggregationDoesNotExist, self.storage.get_cross_metric_measures, @@ -479,16 +479,16 @@ class TestStorageDriver(tests_base.TestCase): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['low']) self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), ]) self.storage.add_measures(metric2, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), ]) self.assertRaises(storage.GranularityDoesNotExist, self.storage.get_cross_metric_measures, @@ -499,16 +499,16 @@ class TestStorageDriver(tests_base.TestCase): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['no_granularity_match']) self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), ]) self.storage.add_measures(metric2, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), ]) self.assertRaises(storage.MetricUnaggregatable, @@ -518,16 +518,16 @@ class TestStorageDriver(tests_base.TestCase): def test_add_and_get_cross_metric_measures(self): metric2, __ = self._create_metric() self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), ]) self.storage.add_measures(metric2, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 9), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 41), 2), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 10, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 4), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 5), 9), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 41), 2), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 10, 31), 4), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 13, 10), 4), ]) self.trigger_processing([str(self.metric.id), str(metric2.id)]) @@ -603,17 +603,17 @@ class TestStorageDriver(tests_base.TestCase): def test_add_and_get_cross_metric_measures_with_holes(self): metric2, __ = self._create_metric() self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 5, 31), 8), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 42), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 5, 31), 8), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 42), ]) self.storage.add_measures(metric2, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 9), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 2), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 6), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 2), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 5), 9), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 2), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 6), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 13, 10), 2), ]) self.trigger_processing([str(self.metric.id), str(metric2.id)]) @@ -629,18 +629,18 @@ class TestStorageDriver(tests_base.TestCase): def test_search_value(self): metric2, __ = self._create_metric() self.storage.add_measures(self.metric, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1,), 69), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 5, 31), 8), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 42), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1,), 69), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 5, 31), 8), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 42), ]) self.storage.add_measures(metric2, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 9), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 2), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 6), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 2), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 5), 9), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 2), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 6), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 13, 10), 2), ]) self.trigger_processing([str(self.metric.id), str(metric2.id)]) @@ -671,9 +671,9 @@ class TestStorageDriver(tests_base.TestCase): str(uuid.uuid4()), name) m = self.index.list_metrics(ids=[m.id])[0] self.storage.add_measures(m, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 0), 1), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 1), - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 10), 1), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 0), 1), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 5), 1), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 10), 1), ]) self.trigger_processing([str(m.id)]) self.assertEqual([ @@ -686,7 +686,7 @@ class TestStorageDriver(tests_base.TestCase): name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)]) m = self.index.list_metrics(ids=[m.id])[0] self.storage.add_measures(m, [ - storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 15), 1), + storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 15), 1), ]) self.trigger_processing([str(m.id)]) self.assertEqual([ diff --git a/tools/measures_injector.py b/tools/measures_injector.py index 2d58ca62..dd112d55 100755 --- a/tools/measures_injector.py +++ b/tools/measures_injector.py @@ -13,7 +13,6 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. -import datetime import random from concurrent import futures @@ -45,8 +44,7 @@ def injector(): def todo(metric): for _ in six.moves.range(conf.batch_of_measures): measures = [ - storage.Measure(utils.to_timestamp(datetime.datetime.now()), - random.random()) + storage.Measure(utils.utcnow(), random.random()) for __ in six.moves.range(conf.measures_per_batch)] s.add_measures(metric, measures) -- GitLab From 12affbbff938ac871819717860736b1c1131cc96 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 19 Sep 2016 10:09:02 +0200 Subject: [PATCH 0386/1483] devstack: rename werkzeug to simple We don't use Werkzeug anymore but PBR wsgi_script entry. Change-Id: I52ed2776c2305fd32a9d1530d29e4d94ca534746 --- devstack/plugin.sh | 2 +- devstack/settings | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index da6c9679..ae1ba4d7 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -40,7 +40,7 @@ GITREPO["python-gnocchiclient"]=${GNOCCHICLIENT_REPO:-${GIT_BASE}/openstack/pyth if [ -z "$GNOCCHI_DEPLOY" ]; then # Default - GNOCCHI_DEPLOY=werkzeug + GNOCCHI_DEPLOY=simple # Fallback to common wsgi devstack configuration if [ "$ENABLE_HTTPD_MOD_WSGI_SERVICES" == "True" ]; then diff --git a/devstack/settings b/devstack/settings index 0d6b3d14..b693db70 100644 --- a/devstack/settings +++ b/devstack/settings @@ -15,7 +15,7 @@ GNOCCHI_COORDINATOR_URL=${GNOCCHI_COORDINATOR_URL:-redis://localhost:6379} # GNOCCHI_DEPLOY defines how Gnocchi is deployed, allowed values: # - mod_wsgi : Run Gnocchi under Apache HTTPd mod_wsgi -# - werkzeug : Run gnocchi-api +# - simple : Run gnocchi-api # - uwsgi : Run Gnocchi under uwsgi # - : Fallback to GNOCCHI_USE_MOD_WSGI or ENABLE_HTTPD_MOD_WSGI_SERVICES GNOCCHI_DEPLOY=${GNOCCHI_DEPLOY} -- GitLab From f22caccc638f577a83aa7c86d7ef946bbfab1fd8 Mon Sep 17 00:00:00 2001 From: zhangyanxian Date: Tue, 20 Sep 2016 06:42:46 +0000 Subject: [PATCH 0387/1483] Fix a typo in sqlalchemy.py TrivialFix Change-Id: If3c1cea4d8785d42752ae47341723ba622bdab7a --- gnocchi/indexer/sqlalchemy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 348d1723..fe56466f 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -209,7 +209,7 @@ class ResourceClassMapper(object): # We drop foreign keys manually to not lock the destination # table for too long during drop table. # It's safe to not use a transaction since - # the resource_type table is already cleaned and commited + # the resource_type table is already cleaned and committed # so this code cannot be triggerred anymore for this # resource_type with facade.writer_connection() as connection: -- GitLab From 96dd4ed08c4585b3d82e1a38d9a4aed0c62f77f9 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 19 Sep 2016 16:52:06 +0200 Subject: [PATCH 0388/1483] carbonara-drivers: elapsed can be zero If computation is really fast elapsed can be zero. Tests something fail on that with: File "gnocchi/storage/_carbonara.py", line 530, in process_new_measures * computed_points['number']) / elapsed) ZeroDivisionError: float division by zero This change fixes that. Change-Id: I350db83fb90566f985f8555fcb6f2d077bb806d4 --- gnocchi/storage/_carbonara.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index a358d430..40f9f33d 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -525,14 +525,20 @@ class CarbonaraBasedStorage(storage.StorageDriver): len(agg_methods) * len(metric.archive_policy.definition) ) - speed = ((number_of_operations - * computed_points['number']) / elapsed) + + if elapsed > 0: + perf = " (%d points/s, %d measures/s)" % ( + ((number_of_operations + * computed_points['number']) / elapsed), + ((number_of_operations + * len(measures)) / elapsed) + ) + else: + perf = "" LOG.debug( "Computed new metric %s with %d new measures " - "in %.2f seconds (%d points/s, %d measures/s)" - % (metric.id, len(measures), elapsed, speed, - (number_of_operations * len(measures)) - / elapsed)) + "in %.2f seconds%s" + % (metric.id, len(measures), elapsed, perf)) self._store_unaggregated_timeserie(metric, ts.serialize()) -- GitLab From aa3d0ae22f9004eb6ad294f4cb255f694f32c443 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 19 Sep 2016 10:28:47 +0200 Subject: [PATCH 0389/1483] Fix PostgreSQL migration script with resource_type_state_enum On PostgreSQL, migration scripts are run within transaction, so things can be already committed or not. Adding value on an Enum already commit in PostgreSQL won't work within a new transaction, so we have to delete the old enum and create a new one. Closes-Bug: #1624504 Change-Id: I8227021bc0e3dc63d1a28512fe7c610da4c6dc63 --- ...205ff_add_updating_resource_type_states.py | 33 ++++++++++++++----- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py b/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py index 57d8ad5c..21dc7e42 100644 --- a/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py +++ b/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py @@ -38,17 +38,19 @@ resource_type = sa.sql.table( 'resource_type', sa.sql.column('updated_at', sqlalchemy_base.PreciseTimestamp())) +state_enum = sa.Enum("active", "creating", + "creation_error", "deleting", + "deletion_error", "updating", + "updating_error", + name="resource_type_state_enum") + def upgrade(): op.alter_column('resource_type', 'state', - type_=sa.Enum("active", "creating", - "creation_error", "deleting", - "deletion_error", "updating", - "updating_error", - name="resource_type_state_enum"), + type_=state_enum, nullable=False, - server_default="creating") + server_default=None) # NOTE(sileht): postgresql have a builtin ENUM type, so # just altering the column won't works. @@ -59,10 +61,23 @@ def upgrade(): # we split the 'ALTER TYPE' operation into several steps. bind = op.get_bind() if bind and bind.engine.name == "postgresql": - op.execute("ALTER TYPE resource_type_state_enum ADD VALUE 'updating';") - op.execute("ALTER TYPE resource_type_state_enum ADD VALUE " - "'updating_error';") + op.execute("ALTER TYPE resource_type_state_enum RENAME TO \ + old_resource_type_state_enum") + op.execute("CREATE TYPE resource_type_state_enum AS ENUM \ + ('active', 'creating', 'creation_error', \ + 'deleting', 'deletion_error', 'updating', \ + 'updating_error')") + op.execute("ALTER TABLE resource_type ALTER COLUMN state TYPE \ + resource_type_state_enum USING \ + state::text::resource_type_state_enum") + op.execute("DROP TYPE old_resource_type_state_enum") + # NOTE(sileht): we can't alter type with server_default set on + # postgresql... + op.alter_column('resource_type', 'state', + type_=state_enum, + nullable=False, + server_default="creating") op.add_column("resource_type", sa.Column("updated_at", sqlalchemy_base.PreciseTimestamp(), -- GitLab From 72fb45f3c40705359afddf2a75d9371352c49737 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 19 Sep 2016 09:49:39 +0200 Subject: [PATCH 0390/1483] ceph: rename optional extra names - is not allowed in extra name, and are silently ignored. We also rename them to make them the extra purpose clearer. Change-Id: I4470050e1a196577e2c667ce7c2949d82655f77c --- doc/source/install.rst | 8 ++++---- setup.cfg | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index 6cf36d1e..08d03006 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -47,8 +47,8 @@ The list of variants available is: * postgresql – provides PostgreSQL indexer support * swift – provides OpenStack Swift storage support * ceph – provides common part of Ceph storage support -* ceph-pre-jewel – provides Ceph (<10.1.0) storage support -* ceph-jewel-and-later – provides Ceph (>=10.1.0) storage support +* ceph_recommended_lib – provides Ceph (>=0.80) storage support +* ceph_alternative_lib – provides Ceph (>=10.1.0) storage support * file – provides file driver support * doc – documentation building support * test – unit and functional tests support @@ -61,7 +61,7 @@ procedure:: Again, depending on the drivers and features you want to use, you need to install extra variants using, for example:: - pip install -e .[postgresql,ceph,ceph-pre-jewel] + pip install -e .[postgresql,ceph,ceph_recommended_lib] Ceph requirements @@ -72,7 +72,7 @@ only since python-rados >= 9.1.0. To handle this, Gnocchi uses 'cradox' python library which has exactly the same API but works with Ceph >= 0.80.0. If Ceph and python-rados are >= 9.1.0, cradox python library becomes optional -but is still recommended until 10.1.0. +but is still recommended. Initialization diff --git a/setup.cfg b/setup.cfg index ff18611e..5a5119ab 100644 --- a/setup.cfg +++ b/setup.cfg @@ -44,9 +44,9 @@ ceph = msgpack-python lz4 tooz>=1.38 -ceph-pre-jewel = +ceph_recommended_lib = cradox>=1.0.9 -ceph-jewel-and-later = +ceph_alternative_lib = python-rados>=10.1.0 # not available on pypi file = msgpack-python -- GitLab From 51a0950121306ac49ff20679b251c41fff59ef63 Mon Sep 17 00:00:00 2001 From: shengping zhang Date: Tue, 20 Sep 2016 17:57:14 +0800 Subject: [PATCH 0391/1483] metricd: fix a data type inconsistent bug The variable 'self.previously_scheduled_metrics' initially is Set type, afterwards it is assigned a List type data ('metrics'), this causes the error: 2016-09-20 17:14:27.919 28772 ERROR gnocchi.cli TypeError: unsupported operand type(s) for -: 'set' and 'list' Change-Id: Iec5ab651d82fdd8d058f8a3232a6cd93df1e3cf5 Closes-Bug: #1625529 --- gnocchi/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index e5dd6313..b65d02b9 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -236,7 +236,7 @@ class MetricScheduler(MetricProcessBase): metrics = list(metrics) for i in six.moves.range(0, len(metrics), self.BLOCK_SIZE): self.queue.put(metrics[i:i + self.BLOCK_SIZE]) - self.previously_scheduled_metrics = metrics + self.previously_scheduled_metrics = set(metrics) LOG.debug("%d metrics scheduled for processing.", len(metrics)) except Exception: LOG.error("Unexpected error scheduling metrics for processing", -- GitLab From d66b6efd1544a2cf66551e2a95c9e154a70fe5b1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 15 Sep 2016 23:51:07 +0200 Subject: [PATCH 0392/1483] cli: do not run tooz watchers in parallel If you run more than one tooz watcher at the same time, we might end up updating block_size and block_index in parallel, causing incorrect computing of the block to grab. It's easier and simpler to just run the watchers serially. Change-Id: I1d7f4f725a1c62c7511af6189d8deaebf836f31d --- gnocchi/cli.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index e5dd6313..8f3e5736 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -18,7 +18,6 @@ import threading import time import uuid -from concurrent import futures import cotyledon from futurist import periodics import msgpack @@ -195,9 +194,7 @@ class MetricScheduler(MetricProcessBase): def run_watchers(): self._coord.run_watchers() - self.periodic = periodics.PeriodicWorker.create( - [], executor_factory=lambda: - futures.ThreadPoolExecutor(max_workers=10)) + self.periodic = periodics.PeriodicWorker.create([]) self.periodic.add(run_watchers) t = threading.Thread(target=self.periodic.start) t.daemon = True -- GitLab From 39ee5998bd34fe534c035f42333989109a9c66e9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 20 Sep 2016 14:53:09 +0200 Subject: [PATCH 0393/1483] resource_type: check that min is not None before comparing with max None > int() is not valid in Python 3. Change-Id: Ifd49eda012ef7a7b60bcf34b72e4df4f9f3d083b --- gnocchi/resource_type.py | 2 +- .../tests/gabbi/gabbits/resource-type.yaml | 29 +++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/gnocchi/resource_type.py b/gnocchi/resource_type.py index 79c0e8b7..df6f7189 100644 --- a/gnocchi/resource_type.py +++ b/gnocchi/resource_type.py @@ -127,7 +127,7 @@ class NumberSchema(CommonAttributeSchema): def __init__(self, min, max, *args, **kwargs): super(NumberSchema, self).__init__(*args, **kwargs) - if max is not None and min > max: + if max is not None and min is not None and min > max: raise InvalidResourceAttributeValue(min, max) self.min = min diff --git a/gnocchi/tests/gabbi/gabbits/resource-type.yaml b/gnocchi/tests/gabbi/gabbits/resource-type.yaml index 6079f1e1..786cf27e 100644 --- a/gnocchi/tests/gabbi/gabbits/resource-type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource-type.yaml @@ -113,6 +113,10 @@ tests: required: false min: -2 max: 3 + intnomin: + type: number + required: false + max: 3 float: type: number required: false @@ -143,6 +147,11 @@ tests: required: False min: -2 max: 3 + intnomin: + type: number + required: False + min: + max: 3 float: type: number required: false @@ -189,6 +198,11 @@ tests: required: False min: -2 max: 3 + intnomin: + type: number + required: False + min: + max: 3 float: type: number required: false @@ -353,6 +367,11 @@ tests: required: False min: -2 max: 3 + intnomin: + type: number + required: False + min: + max: 3 float: type: number required: false @@ -385,6 +404,11 @@ tests: required: False min: -2 max: 3 + intnomin: + type: number + required: False + min: + max: 3 float: type: number required: false @@ -434,6 +458,11 @@ tests: required: False min: -2 max: 3 + intnomin: + type: number + required: False + min: + max: 3 float: type: number required: false -- GitLab From d28daa279b1d6417720e64d6db6fbd27fb284f34 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 20 Sep 2016 23:01:20 +0200 Subject: [PATCH 0394/1483] Fix gnocchi-metricd shutdown gnocchi-metricd does not shutdown gracefully because we block on metric queue.get(). This change adds a timeout to allow the process to terminate every 10 seconds. Change-Id: Id7917464009b19a0e27ff84694803f97f43cfac7 --- gnocchi/cli.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index f2bdf18a..8ae29ca4 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -271,8 +271,12 @@ class MetricProcessor(MetricProcessBase): def _run_job(self): try: - metrics = [] - metrics = self.queue.get(block=True) + try: + metrics = self.queue.get(block=True, timeout=10) + except six.moves.queue.Empty: + # NOTE(sileht): Allow the process to exit gracefully every + # 10 seconds + return self.store.process_background_tasks(self.index, metrics) except Exception: LOG.error("Unexpected error during measures processing", -- GitLab From 252e7bf1aef7638a59f2359627069302b602bd60 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 16 Sep 2016 01:06:32 +0200 Subject: [PATCH 0395/1483] track the metric locked time Change-Id: I108f05e58a265425622cd04dcd7524cf7bce44b1 --- gnocchi/storage/_carbonara.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index c675af32..065b7870 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -485,6 +485,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): # get back later to it if needed. if lock.acquire(blocking=sync): try: + locksw = timeutils.StopWatch().start() LOG.debug("Processing measures for %s" % metric) with self._process_measure_for_metric(metric) as measures: # NOTE(mnaser): The metric could have been handled by @@ -575,7 +576,12 @@ class CarbonaraBasedStorage(storage.StorageDriver): self._store_unaggregated_timeserie(metric, ts.serialize()) + + LOG.debug("Metric %s locked during %.2f seconds" % + (metric.id, locksw.elapsed())) except Exception: + LOG.debug("Metric %s locked during %.2f seconds" % + (metric.id, locksw.elapsed())) if sync: raise LOG.error("Error processing new measures", exc_info=True) -- GitLab From 7a37e4e8724de6309df9e8052eccce4792f05505 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 21 Sep 2016 20:57:10 +0000 Subject: [PATCH 0396/1483] compute new first_block_timestamp once first_block_timestamp doesn't change across granularities or aggregates. let's compute it once rather than granularities*aggregates times. Change-Id: If03132e6c96976f993847965645999d547f85698 --- gnocchi/storage/_carbonara.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 065b7870..5b7e0fd9 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -537,6 +537,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): # granularity. the following takes only the points # affected by new measures for specific granularity tstamp = max(bound_timeserie.first, measures[0][0]) + new_first_block_timestamp = ( + bound_timeserie.first_block_timestamp()) computed_points['number'] = len(bound_timeserie) for d in metric.archive_policy.definition: ts = bound_timeserie.group_serie( @@ -546,7 +548,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): self._add_measures, ((aggregation, d, metric, ts, current_first_block_timestamp, - bound_timeserie.first_block_timestamp()) + new_first_block_timestamp) for aggregation in agg_methods)) with timeutils.StopWatch() as sw: -- GitLab From bd142765daff8ced7b6919d2e33158bcc9bcfd6e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sat, 10 Sep 2016 23:22:57 +0200 Subject: [PATCH 0397/1483] Add simple upgrade tests This change adds a tox targets that run upgrade testing. The two jobs have been chosen for gate testing, but other can be later added for local testing. Change-Id: I7d06847243fd3be4e8cd381205d6c21bcd087f30 --- 7bcd2a25.diff | 30 ++++++++++++++ bindep.txt | 2 + run-upgrade-tests.sh | 95 ++++++++++++++++++++++++++++++++++++++++++++ tox.ini | 28 ++++++++++++- 4 files changed, 154 insertions(+), 1 deletion(-) create mode 100644 7bcd2a25.diff create mode 100755 run-upgrade-tests.sh diff --git a/7bcd2a25.diff b/7bcd2a25.diff new file mode 100644 index 00000000..6a7b18e2 --- /dev/null +++ b/7bcd2a25.diff @@ -0,0 +1,30 @@ +From 7bcd2a259be0a35d9387a24329f55250efde3aec Mon Sep 17 00:00:00 2001 +From: Mehdi Abaakouk +Date: Mon, 12 Sep 2016 19:54:03 +0200 +Subject: [PATCH] ceph: Fix metricd start + +metricd can be started before api, in that case +metricd fail because the measure object don't yet exists. + +Change-Id: Id7822f16718e31d6a8916cec8a6b77194071a31e +--- + +diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py +index 15e1dad..d2ea4f8 100644 +--- a/gnocchi/storage/ceph.py ++++ b/gnocchi/storage/ceph.py +@@ -167,8 +167,12 @@ + def _list_object_names_to_process(self, prefix=""): + with rados.ReadOpCtx() as op: + omaps, ret = self.ioctx.get_omap_vals(op, "", prefix, -1) +- self.ioctx.operate_read_op( +- op, self.MEASURE_PREFIX, flag=self.OMAP_READ_FLAGS) ++ try: ++ self.ioctx.operate_read_op( ++ op, self.MEASURE_PREFIX, flag=self.OMAP_READ_FLAGS) ++ except rados.ObjectNotFound: ++ # API have still written nothing ++ return () + # NOTE(sileht): after reading the libradospy, I'm + # not sure that ret will have the correct value + # get_omap_vals transforms the C int to python int diff --git a/bindep.txt b/bindep.txt index 613ee2bc..cd6bd714 100644 --- a/bindep.txt +++ b/bindep.txt @@ -4,3 +4,5 @@ mysql-client [platform:dpkg] mysql-server [platform:dpkg] build-essential [platform:dpkg] libffi-dev [platform:dpkg] +librados-dev [platform:dpkg] +ceph [platform:dpkg] diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh new file mode 100755 index 00000000..8b1d30ba --- /dev/null +++ b/run-upgrade-tests.sh @@ -0,0 +1,95 @@ +#!/bin/bash +set -e + +export OS_AUTH_PLUGIN=gnocchi-noauth +export GNOCCHI_ENDPOINT=http://localhost:8041 +export GNOCCHI_USER_ID=99aae-4dc2-4fbc-b5b8-9688c470d9cc +export GNOCCHI_PROJECT_ID=c8d27445-48af-457c-8e0d-1de7103eae1f +export GNOCCHI_DATA=$(mktemp -d -t gnocchi.XXXX) + +RESOURCE_IDS=( + "5a301761-aaaa-46e2-8900-8b4f6fe6675a" + "5a301761-bbbb-46e2-8900-8b4f6fe6675a" + "5a301761-cccc-46e2-8900-8b4f6fe6675a" +) + +GDATE=$((which gdate >/dev/null && echo gdate) || echo date) + +dump_data(){ + dir="$1" + mkdir -p $dir + echo "* Dumping measures aggregations to $dir" + for resource_id in $RESOURCE_IDS; do + for agg in min max mean sum ; do + gnocchi measures show --aggregation $agg --resource-id $resource_id metric > $dir/${agg}.txt + done + done +} + +inject_data() { + echo "* Injecting measures in Gnocchi" + # TODO(sileht): Generate better data that ensure we have enought split that cover all + # situation + for resource_id in $RESOURCE_IDS; do + gnocchi resource create generic --attribute id:$resource_id -n metric:high >/dev/null + done + + { + echo -n '{' + resource_sep="" + for resource_id in $RESOURCE_IDS; do + echo -n "$resource_sep \"$resource_id\": { \"metric\": [ " + measures_sep="" + for i in $(seq 0 10 288000); do + now=$($GDATE --iso-8601=s -d "-${i}minute") ; value=$((RANDOM % 13 + 52)) + echo -n "$measures_sep {\"timestamp\": \"$now\", \"value\": $value }" + measures_sep="," + done + echo -n "] }" + resource_sep="," + done + echo -n '}' + } | gnocchi measures batch-resources-metrics - + + echo "* Waiting for measures computation" + while [ $(gnocchi status -f value -c "storage/total number of measures to process") -gt 0 ]; do sleep 1 ; done +} + +pifpaf_stop(){ + : +} + +cleanup(){ + pifpaf_stop + rm -rf $GNOCCHI_DATA +} +trap cleanup EXIT + + +if [ "$STORAGE_DAEMON" == "ceph" ]; then + rados -c $STORAGE_CEPH_CONF mkpool gnocchi + STORAGE_URL=ceph://$STORAGE_CEPH_CONF +else + STORAGE_URL=file://$GNOCCHI_DATA +fi + +# NOTE(sileht): temporary fix a gnocchi 2.2 bug +# https://review.openstack.org/#/c/369011/ +patch -p2 -d $VIRTUAL_ENV/lib/python*/site-packages/gnocchi < 7bcd2a25.diff + +eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) +inject_data $GNOCCHI_DATA +dump_data $GNOCCHI_DATA/old +pifpaf_stop + +old_version=$(pip freeze | sed -n '/gnocchi==/s/.*==\(.*\)/\1/p') +new_version=$(python setup.py --version) +echo "* Upgrading Gnocchi from $old_version to $new_version" +pip install -q -U .[${GNOCCHI_VARIANT}] + + +eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) +dump_data $GNOCCHI_DATA/new + +echo "* Checking output difference between Gnocchi $old_version and $new_version" +diff -uNr $GNOCCHI_DATA/old $GNOCCHI_DATA/new diff --git a/tox.ini b/tox.ini index 032b2ef6..c4b4b6b9 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] minversion = 1.8 -envlist = py{34,35,27},py{34,35,27}-{postgresql,mysql}{,-file,-swift,-ceph},pep8,bashate +envlist = py{34,35,27},py{34,35,27}-{postgresql,mysql}{,-file,-swift,-ceph},pep8,bashate,py35-postgresql-file-upgrade-from-2.2,py27-mysql-ceph-upgrade-from-2.2 [testenv] usedevelop = True @@ -24,6 +24,32 @@ commands = oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf {toxinidir}/run-tests.sh {posargs} +[testenv:py35-postgresql-file-upgrade-from-2.2] +# We should always recreate since the script upgrade +# Gnocchi we can't reuse the virtualenv +recreate = True +skip_install = True +usedevelop = False +setenv = GNOCCHI_VARIANT=test,postgresql,file +deps = gnocchi[{env:GNOCCHI_VARIANT}]>=2.2,<2.3 + pifpaf>=0.13 + gnocchiclient +commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} + +[testenv:py27-mysql-ceph-upgrade-from-2.2] +# We should always recreate since the script upgrade +# Gnocchi we can't reuse the virtualenv +recreate = True +skip_install = True +usedevelop = False +setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib +deps = gnocchi[{env:GNOCCHI_VARIANT}]>=2.2,<2.3 + gnocchiclient + pifpaf>=0.13 + cradox +# cradox is required because 2.2 extra names are incorrect +commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} + [testenv:bashate] deps = bashate commands = bashate -v devstack/plugin.sh devstack/gate/gate_hook.sh devstack/gate/post_test_hook.sh -- GitLab From 31cdcb5b4fa8450ac831978f02314b86d5d1ef28 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 19 Sep 2016 15:28:47 +0200 Subject: [PATCH 0398/1483] ceph: fix python3 issue Change-Id: I089f15e085780eab236a1c57810cbf6645e07d42 --- gnocchi/storage/ceph.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index dc4b1696..312276c4 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -127,7 +127,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): except rados.ObjectNotFound: return with rados.WriteOpCtx() as op: - self.ioctx.set_omap(op, xattrs, xattrs) + self.ioctx.set_omap(op, xattrs, tuple([b""]*len(xattrs))) self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, flags=self.OMAP_WRITE_FLAGS) @@ -150,7 +150,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): self.ioctx.write_full(name, data) with rados.WriteOpCtx() as op: - self.ioctx.set_omap(op, (name,), ("",)) + self.ioctx.set_omap(op, (name,), (b"",)) self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, flags=self.OMAP_WRITE_FLAGS) @@ -254,7 +254,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): if self._object_exists(name): raise storage.MetricAlreadyExists(metric) else: - self.ioctx.write_full(name, "metric created") + self.ioctx.write_full(name, b"metric created") def _store_metric_measures(self, metric, timestamp_key, aggregation, granularity, data, offset=None, version=3): @@ -264,7 +264,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): self.ioctx.write_full(name, data) else: self.ioctx.write(name, data, offset=offset) - self.ioctx.set_xattr("gnocchi_%s_container" % metric.id, name, "") + self.ioctx.set_xattr("gnocchi_%s_container" % metric.id, name, b"") def _delete_metric_measures(self, metric, timestamp_key, aggregation, granularity, version=3): -- GitLab From c6b2c5185c67038545759aef067198207756bad0 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 22 Sep 2016 08:30:08 -0400 Subject: [PATCH 0399/1483] Update .gitreview for stable/3.0 Change-Id: I860b493122e63183ed81c1df1a80112ae8ae9ce0 --- .gitreview | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitreview b/.gitreview index e4b8477d..2cc8cebc 100644 --- a/.gitreview +++ b/.gitreview @@ -2,3 +2,4 @@ host=review.openstack.org port=29418 project=openstack/gnocchi.git +defaultbranch=stable/3.0 -- GitLab From 723557ff647ec3b62820a8448a378effc91e35f9 Mon Sep 17 00:00:00 2001 From: Boden R Date: Thu, 8 Sep 2016 14:35:34 -0600 Subject: [PATCH 0400/1483] Replace retrying with tenacity We are replacing all usages of the 'retrying' package with 'tenacity' as the author of retrying is not actively maintaining the project. Tenacity is a fork of retrying, but has improved the interface and extensibility (see [1] for more details). Our end goal here is removing the retrying package from our requirements. Tenacity provides the same functionality as retrying, but has the following major differences to account for: - tenacity uses seconds rather than ms as retrying did. - tenacity has different kwargs for the decorator and Retrying class itself. - tenacity has a different approach for retrying args by using classes for its stop/wait/retry kwargs. - By default tenacity raises a RetryError if a retried callable times out; retrying raises the last exception from the callable. Tenacity provides backwards compatibility here by offering the 'reraise' kwarg. - tenacity defines 'time.sleep' as a default value for a kwarg. That said consumers who need to mock patch time.sleep need to account for this via mocking of time.sleep before tenacity is imported. This patch updates all usages of retrying with tenacity. Unit tests will be added where applicable. Note: This change is not newton critical so projects are welcome to hold off on committing until post-newton. Ideally this change will merge by the first part of Ocata so dependant functionality can land and have time to solidify for Ocata. [1] https://github.com/jd/tenacity Change-Id: Id9507d1a192768cd636be308f85275a80df0157c --- gnocchi/utils.py | 13 +++++-------- requirements.txt | 2 +- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index a49b161d..b7145654 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -18,8 +18,8 @@ import datetime import iso8601 from oslo_utils import timeutils from pytimeparse import timeparse -import retrying import six +import tenacity import uuid # uuid5 namespace for id transformation. @@ -54,14 +54,11 @@ class Retry(Exception): pass -def retry_if_retry_is_raised(exception): - return isinstance(exception, Retry) - - # Retry with exponential backoff for up to 1 minute -retry = retrying.retry(wait_exponential_multiplier=500, - wait_exponential_max=60000, - retry_on_exception=retry_if_retry_is_raised) +retry = tenacity.retry( + wait=tenacity.wait_exponential(multiplier=0.5, max=60), + retry=tenacity.retry_if_exception_type(Retry), + reraise=True) def to_timestamp(v): diff --git a/requirements.txt b/requirements.txt index 23cf70d1..dfec86af 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,7 +19,7 @@ stevedore voluptuous werkzeug trollius; python_version < '3.4' -retrying +tenacity>=3.1.0 # Apache-2.0 WebOb>=1.4.1 Paste PasteDeploy -- GitLab From b8c0c23dd5b47624c580c3cb5b72307fdc23fdff Mon Sep 17 00:00:00 2001 From: Xiang Li Date: Fri, 23 Sep 2016 07:20:31 -0400 Subject: [PATCH 0401/1483] Modify api startup parameters in devstack plugin Because the api binary script is generated by pbr now, the api startup parameters need to be adapted accordingly in devstack script or the api will be unable to start if the devstack deploy mode is 'simple'. Change-Id: I72abc3a83a4a2cf993198b0b1ac98c79ee26ec56 Closes-Bug: #1626979 --- devstack/plugin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index ae1ba4d7..ff9db54a 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -426,7 +426,7 @@ function start_gnocchi { elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then run_process gnocchi-api "$GNOCCHI_BIN_DIR/uwsgi $GNOCCHI_UWSGI_FILE" else - run_process gnocchi-api "$GNOCCHI_BIN_DIR/gnocchi-api -d -v --config-file $GNOCCHI_CONF" + run_process gnocchi-api "$GNOCCHI_BIN_DIR/gnocchi-api --port $GNOCCHI_SERVICE_PORT" fi # only die on API if it was actually intended to be turned on if is_service_enabled gnocchi-api; then -- GitLab From b5ad7170a078a89581aed61a07e302ee911f0f1b Mon Sep 17 00:00:00 2001 From: shengping zhang Date: Thu, 22 Sep 2016 16:14:22 +0800 Subject: [PATCH 0402/1483] Add granularity in searching for values in metrics Added parameter granularity in 'search_value()' method to allow search values by granularities. The http reqeust is like this: method: POST uri: /v1/search/metric?metric_id= &granularity=1second&granularity=2s data body: {...} Change-Id: Ifed526e56a052fec29f9b8b16a3979d959aa814b Closes-Bug: #1626409 --- doc/source/rest.j2 | 7 +- doc/source/rest.yaml | 52 +++++++++ gnocchi/rest/__init__.py | 12 +- gnocchi/storage/_carbonara.py | 12 +- .../tests/gabbi/gabbits/search-metric.yaml | 110 +++++++++++++++++- ...paramter-granularity-7f22c677dc1b1238.yaml | 4 + 6 files changed, 186 insertions(+), 11 deletions(-) create mode 100644 releasenotes/notes/add-paramter-granularity-7f22c677dc1b1238.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index c6018650..143cc5bf 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -449,13 +449,18 @@ is also available in the filtering queries. Searching for values in metrics =============================== -It's possible to search for values in metrics. For example, this will look for +It is possible to search for values in metrics. For example, this will look for all values that are greater than or equal to 50 if we add 23 to them and that are not equal to 55. You have to specify the list of metrics to look into by using the `metric_id` query parameter several times. {{ scenarios['search-value-in-metric']['doc'] }} +And it is possible to search for values in metrics by using one or multiple +granularities: + +{{ scenarios['search-value-in-metrics-by-granularity']['doc'] }} + You can specify a time range to look for by specifying the `start` and/or `stop` query parameter, and the aggregation method to use by specifying the `aggregation` query parameter. diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index b4d665fc..78aceb16 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -202,6 +202,58 @@ {"and": [{">=": [{"+": 23}, 50]}, {"!=": 55}]} +- name: create-metric-a + request: | + POST /v1/metric HTTP/1.1 + Content-Type: application/json + + { + "archive_policy_name": "short" + } + +- name: post-measures-for-granularity-search + request: | + POST /v1/metric/{{ scenarios['create-metric-a']['response'].json['id'] }}/measures HTTP/1.1 + Content-Type: application/json + + [ + { + "timestamp": "2014-10-06T14:34:12", + "value": 12 + }, + { + "timestamp": "2014-10-06T14:34:14", + "value": 12 + }, + { + "timestamp": "2014-10-06T14:34:16", + "value": 12 + }, + { + "timestamp": "2014-10-06T14:34:18", + "value": 12 + }, + { + "timestamp": "2014-10-06T14:34:20", + "value": 12 + }, + { + "timestamp": "2014-10-06T14:34:22", + "value": 12 + }, + { + "timestamp": "2014-10-06T14:34:24", + "value": 12 + } + ] + +- name: search-value-in-metrics-by-granularity + request: | + POST /v1/search/metric?metric_id={{ scenarios['create-metric-a']['response'].json['id'] }}&granularity=1second&granularity=1800s HTTP/1.1 + Content-Type: application/json + + {"=": 12} + - name: get-measures request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures HTTP/1.1 diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index c23c9aee..a2871a61 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1245,7 +1245,11 @@ class SearchMetricController(rest.RestController): ) @pecan.expose('json') - def post(self, metric_id, start=None, stop=None, aggregation='mean'): + def post(self, metric_id, start=None, stop=None, aggregation='mean', + granularity=[]): + + granularity = [Timespan(g) + for g in arg_to_list(granularity)] metrics = pecan.request.indexer.list_metrics( ids=arg_to_list(metric_id)) @@ -1274,11 +1278,15 @@ class SearchMetricController(rest.RestController): str(metric.id): values for metric, values in six.iteritems( pecan.request.storage.search_value( - metrics, query, start, stop, aggregation) + metrics, query, start, stop, aggregation, + granularity + ) ) } except storage.InvalidQuery as e: abort(400, e) + except storage.GranularityDoesNotExist as e: + abort(400, e) class ResourcesMetricsMeasuresBatchController(rest.RestController): diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 065b7870..5e050fa0 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -645,18 +645,20 @@ class CarbonaraBasedStorage(storage.StorageDriver): for timestamp, g, value in values if predicate(value)]} - # TODO(jd) Add granularity parameter here and in the REST API - # rather than fetching all granularities def search_value(self, metrics, query, from_timestamp=None, - to_timestamp=None, aggregation='mean'): + to_timestamp=None, aggregation='mean', + granularity=[]): predicate = storage.MeasureQuery(query) + results = self._map_in_thread( self._find_measure, [(metric, aggregation, - ap.granularity, predicate, + gran, predicate, from_timestamp, to_timestamp) for metric in metrics - for ap in metric.archive_policy.definition]) + for gran in granularity or + (defin.granularity + for defin in metric.archive_policy.definition)]) result = collections.defaultdict(list) for r in results: for metric, metric_result in six.iteritems(r): diff --git a/gnocchi/tests/gabbi/gabbits/search-metric.yaml b/gnocchi/tests/gabbi/gabbits/search-metric.yaml index 95c31a37..13492971 100644 --- a/gnocchi/tests/gabbi/gabbits/search-metric.yaml +++ b/gnocchi/tests/gabbi/gabbits/search-metric.yaml @@ -16,7 +16,12 @@ tests: data: name: high definition: - - granularity: 1 second + - granularity: 1 second + timespan: 1 hour + - granularity: 2 second + timespan: 1 hour + response_headers: + location: $SCHEME://$NETLOC/v1/archive_policy/high status: 201 - name: create metric @@ -24,11 +29,110 @@ tests: request_headers: content-type: application/json data: - archive_policy_name: "high" + archive_policy_name: high status: 201 + - name: post measures + desc: for later use + POST: /v1/batch/metrics/measures + request_headers: + content-type: application/json + data: + $RESPONSE['$.id']: + - timestamp: "2014-10-06T14:34:12" + value: 12 + - timestamp: "2014-10-06T14:34:14" + value: 12 + - timestamp: "2014-10-06T14:34:16" + value: 12 + - timestamp: "2014-10-06T14:34:18" + value: 12 + - timestamp: "2014-10-06T14:34:20" + value: 12 + - timestamp: "2014-10-06T14:34:22" + value: 12 + - timestamp: "2014-10-06T14:34:24" + value: 12 + - timestamp: "2014-10-06T14:34:26" + value: 12 + - timestamp: "2014-10-06T14:34:28" + value: 12 + - timestamp: "2014-10-06T14:34:30" + value: 12 + - timestamp: "2014-10-06T14:34:32" + value: 12 + - timestamp: "2014-10-06T14:34:34" + value: 12 + status: 202 + + - name: get metric id for search one + GET: /v1/metric + status: 200 + response_json_paths: + $[0].archive_policy.name: high + + - name: search with one correct granularity + POST: /v1/search/metric?metric_id=$RESPONSE['$[0].id']&granularity=1s + request_headers: + content-type: application/json + data: + "=": 12 + status: 200 + + - name: get metric id for search two + GET: /v1/metric + status: 200 + response_json_paths: + $[0].archive_policy.name: high + + - name: search with multiple correct granularities + POST: /v1/search/metric?metric_id=$RESPONSE['$[0].id']&granularity=1second&granularity=2s + request_headers: + content-type: application/json + data: + "=": 12 + status: 200 + + - name: get metric id for search three + GET: /v1/metric + status: 200 + response_json_paths: + $[0].archive_policy.name: high + + - name: search with correct and incorrect granularities + POST: /v1/search/metric?metric_id=$RESPONSE['$[0].id']&granularity=1s&granularity=300 + request_headers: + content-type: application/json + data: + "=": 12 + status: 400 + response_strings: + - Granularity '300.0' for metric $RESPONSE['$[0].id'] does not exist + + - name: get metric id for search four + GET: /v1/metric + status: 200 + response_json_paths: + $[0].archive_policy.name: high + + - name: search with incorrect granularity + POST: /v1/search/metric?metric_id=$RESPONSE['$[0].id']&granularity=300 + request_headers: + content-type: application/json + data: + "=": 12 + status: 400 + response_strings: + - Granularity '300.0' for metric $RESPONSE['$[0].id'] does not exist + + - name: get metric id for search five + GET: /v1/metric + status: 200 + response_json_paths: + $[0].archive_policy.name: high + - name: search measure with wrong start - POST: /v1/search/metric?metric_id=$RESPONSE['$.id']&start=foobar + POST: /v1/search/metric?metric_id=$RESPONSE['$[0].id']&start=foobar request_headers: content-type: application/json data: diff --git a/releasenotes/notes/add-paramter-granularity-7f22c677dc1b1238.yaml b/releasenotes/notes/add-paramter-granularity-7f22c677dc1b1238.yaml new file mode 100644 index 00000000..2f833808 --- /dev/null +++ b/releasenotes/notes/add-paramter-granularity-7f22c677dc1b1238.yaml @@ -0,0 +1,4 @@ +--- +features: + - Allow to search for values in metrics by using + one or more granularities. -- GitLab From b5106ebb8c8ae72e97033db5cef1e0987d46120b Mon Sep 17 00:00:00 2001 From: Hanxi Liu Date: Sun, 25 Sep 2016 10:32:55 +0800 Subject: [PATCH 0403/1483] Fix typos in tests/gabbi/gabbits/resource.yaml Change-Id: Ie2882db6f59d8c43e3c0ef846a80b74f927757ca --- gnocchi/tests/gabbi/gabbits/resource.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index 8f3198c3..02da4d97 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -968,7 +968,7 @@ tests: status: 200 - name: delete random data structure - desc: delete a empty list test + desc: delete an empty list test DELETE: /v1/resource/generic request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -1082,7 +1082,7 @@ tests: response_json_paths: $.deleted: 2 - - name: delete batch of resources filter by mutliple ids + - name: delete batch of resources filter by multiple ids desc: delete the created resources DELETE: /v1/resource/generic request_headers: -- GitLab From 07034a1ab93bbd0a6132ecb7f2173e8c438efa80 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 21 Sep 2016 22:40:53 +0200 Subject: [PATCH 0404/1483] doc: include stable/3.0 release notes Change-Id: I85b876d16305647af4e3929bad83d62f29c13cdf Signed-off-by: Julien Danjou --- doc/source/releasenotes/3.0.rst | 6 ++++++ doc/source/releasenotes/index.rst | 1 + 2 files changed, 7 insertions(+) create mode 100644 doc/source/releasenotes/3.0.rst diff --git a/doc/source/releasenotes/3.0.rst b/doc/source/releasenotes/3.0.rst new file mode 100644 index 00000000..4f664099 --- /dev/null +++ b/doc/source/releasenotes/3.0.rst @@ -0,0 +1,6 @@ +=================================== + 3.0 Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/3.0 diff --git a/doc/source/releasenotes/index.rst b/doc/source/releasenotes/index.rst index 00b614e3..f4ff78f2 100644 --- a/doc/source/releasenotes/index.rst +++ b/doc/source/releasenotes/index.rst @@ -5,5 +5,6 @@ Release Notes :maxdepth: 2 unreleased + 3.0 2.2 2.1 -- GitLab From 21845a85a987e0fdc7bfada304297772e43b6ea7 Mon Sep 17 00:00:00 2001 From: shengping zhang Date: Fri, 23 Sep 2016 18:09:49 +0800 Subject: [PATCH 0405/1483] remove the pandas module in test test_carbonara.py I saw this: # TODO(jd) We shouldn't use pandas here so try to do some helps I remove the pandas module and use datetime.datetime instead. Change-Id: I5bcd89ed36b1c969346757027959218ea63d9ad3 --- gnocchi/tests/test_carbonara.py | 230 +++++++++++++++++++++++--------- 1 file changed, 165 insertions(+), 65 deletions(-) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 3b06b2a1..07d42db5 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -20,8 +20,6 @@ import math import fixtures from oslo_utils import timeutils from oslotest import base -# TODO(jd) We shouldn't use pandas here -import pandas import six from gnocchi import carbonara @@ -362,14 +360,30 @@ class TestAggregatedTimeSerie(base.BaseTestCase): aggregation='mean', needed_percent_of_overlap=80.0) self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 3.0), - (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 3.0), - (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 4.0), - (pandas.Timestamp('2014-01-01 12:04:00'), 60.0, 4.0), - (pandas.Timestamp('2014-01-01 12:05:00'), 60.0, 3.0), - (pandas.Timestamp('2014-01-01 12:06:00'), 60.0, 5.0), - (pandas.Timestamp('2014-01-01 12:07:00'), 60.0, 10.0), - (pandas.Timestamp('2014-01-01 12:09:00'), 60.0, 2.0), + (datetime.datetime( + 2014, 1, 1, 12, 1, 0 + ), 60.0, 3.0), + (datetime.datetime( + 2014, 1, 1, 12, 2, 0 + ), 60.0, 3.0), + (datetime.datetime( + 2014, 1, 1, 12, 3, 0 + ), 60.0, 4.0), + (datetime.datetime( + 2014, 1, 1, 12, 4, 0 + ), 60.0, 4.0), + (datetime.datetime( + 2014, 1, 1, 12, 5, 0 + ), 60.0, 3.0), + (datetime.datetime( + 2014, 1, 1, 12, 6, 0 + ), 60.0, 5.0), + (datetime.datetime( + 2014, 1, 1, 12, 7, 0 + ), 60.0, 10.0), + (datetime.datetime( + 2014, 1, 1, 12, 9, 0 + ), 60.0, 2.0), ], output) def test_aggregated_different_archive_overlap_edge_missing1(self): @@ -406,10 +420,18 @@ class TestAggregatedTimeSerie(base.BaseTestCase): tsc1['return'], tsc2['return']], aggregation='sum') self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 33.0), - (pandas.Timestamp('2014-01-01 12:04:00'), 60.0, 5.0), - (pandas.Timestamp('2014-01-01 12:05:00'), 60.0, 18.0), - (pandas.Timestamp('2014-01-01 12:06:00'), 60.0, 19.0), + (datetime.datetime( + 2014, 1, 1, 12, 3, 0 + ), 60.0, 33.0), + (datetime.datetime( + 2014, 1, 1, 12, 4, 0 + ), 60.0, 5.0), + (datetime.datetime( + 2014, 1, 1, 12, 5, 0 + ), 60.0, 18.0), + (datetime.datetime( + 2014, 1, 1, 12, 6, 0 + ), 60.0, 19.0), ], output) def test_aggregated_different_archive_overlap_edge_missing2(self): @@ -432,7 +454,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase): output = carbonara.AggregatedTimeSerie.aggregated( [tsc1['return'], tsc2['return']], aggregation='mean') self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:03:00'), 60.0, 4.0), + (datetime.datetime( + 2014, 1, 1, 12, 3, 0 + ), 60.0, 4.0), ], output) def test_fetch(self): @@ -498,10 +522,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): result = ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) reference = [ - (pandas.Timestamp('2014-01-01 12:00:00'), - 1.0, 3.9), - (pandas.Timestamp('2014-01-01 12:00:02'), - 1.0, 4) + (datetime.datetime( + 2014, 1, 1, 12, 0, 0 + ), 1.0, 3.9), + (datetime.datetime( + 2014, 1, 1, 12, 0, 2 + ), 1.0, 4) ] self.assertEqual(len(reference), len(result)) @@ -518,10 +544,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): result = ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) reference = [ - (pandas.Timestamp('2014-01-01 12:00:00'), - 1.0, 3.9), - (pandas.Timestamp('2014-01-01 12:00:02'), - 1.0, 99.4) + (datetime.datetime( + 2014, 1, 1, 12, 0, 0 + ), 1.0, 3.9), + (datetime.datetime( + 2014, 1, 1, 12, 0, 2 + ), 1.0, 99.4) ] self.assertEqual(len(reference), len(result)) @@ -572,10 +600,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self._resample_and_merge, agg_dict=ts)) self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:01:00'), - 60.0, 2.1213203435596424), - (pandas.Timestamp('2014-01-01 12:02:00'), - 60.0, 9.8994949366116654), + (datetime.datetime( + 2014, 1, 1, 12, 1, 0 + ), 60.0, 2.1213203435596424), + (datetime.datetime( + 2014, 1, 1, 12, 2, 0 + ), 60.0, 9.8994949366116654), ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)], @@ -583,10 +613,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self._resample_and_merge, agg_dict=ts)) self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:01:00'), - 60.0, 2.1213203435596424), - (pandas.Timestamp('2014-01-01 12:02:00'), - 60.0, 59.304300012730948), + (datetime.datetime( + 2014, 1, 1, 12, 1, 0 + ), 60.0, 2.1213203435596424), + (datetime.datetime( + 2014, 1, 1, 12, 2, 0 + ), 60.0, 59.304300012730948), ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) def test_fetch_agg_max(self): @@ -602,9 +634,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self._resample_and_merge, agg_dict=ts)) self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:00:00'), 60.0, 3), - (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 7), - (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 15), + (datetime.datetime( + 2014, 1, 1, 12, 0, 0 + ), 60.0, 3), + (datetime.datetime( + 2014, 1, 1, 12, 1, 0 + ), 60.0, 7), + (datetime.datetime( + 2014, 1, 1, 12, 2, 0 + ), 60.0, 15), ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)], @@ -612,9 +650,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self._resample_and_merge, agg_dict=ts)) self.assertEqual([ - (pandas.Timestamp('2014-01-01 12:00:00'), 60.0, 3), - (pandas.Timestamp('2014-01-01 12:01:00'), 60.0, 7), - (pandas.Timestamp('2014-01-01 12:02:00'), 60.0, 110), + (datetime.datetime( + 2014, 1, 1, 12, 0, 0 + ), 60.0, 3), + (datetime.datetime( + 2014, 1, 1, 12, 1, 0 + ), 60.0, 7), + (datetime.datetime( + 2014, 1, 1, 12, 2, 0 + ), 60.0, 110), ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) def test_serialize(self): @@ -672,9 +716,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual( [ - (pandas.Timestamp('2014-01-01 12:00:01'), 1.0, 1.5), - (pandas.Timestamp('2014-01-01 12:00:02'), 1.0, 3.5), - (pandas.Timestamp('2014-01-01 12:00:03'), 1.0, 2.5), + (datetime.datetime( + 2014, 1, 1, 12, 0, 1 + ), 1.0, 1.5), + (datetime.datetime( + 2014, 1, 1, 12, 0, 2 + ), 1.0, 3.5), + (datetime.datetime( + 2014, 1, 1, 12, 0, 3 + ), 1.0, 2.5), ], ts['return'].fetch()) @@ -713,9 +763,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual( [ - (pandas.Timestamp('2014-01-01 12:00:01'), 1.0, 1.5), - (pandas.Timestamp('2014-01-01 12:00:02'), 1.0, 3.5), - (pandas.Timestamp('2014-01-01 12:00:03'), 1.0, 2.5), + (datetime.datetime( + 2014, 1, 1, 12, 0, 1 + ), 1.0, 1.5), + (datetime.datetime( + 2014, 1, 1, 12, 0, 2 + ), 1.0, 3.5), + (datetime.datetime( + 2014, 1, 1, 12, 0, 3 + ), 1.0, 2.5), ], ts['return'].fetch()) @@ -727,9 +783,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual( [ - (pandas.Timestamp('2014-01-01 12:00:01'), 1.0, 1.5), - (pandas.Timestamp('2014-01-01 12:00:02'), 1.0, 3.5), - (pandas.Timestamp('2014-01-01 12:00:03'), 1.0, 2.5), + (datetime.datetime( + 2014, 1, 1, 12, 0, 1 + ), 1.0, 1.5), + (datetime.datetime( + 2014, 1, 1, 12, 0, 2 + ), 1.0, 3.5), + (datetime.datetime( + 2014, 1, 1, 12, 0, 3 + ), 1.0, 2.5), ], ts['return'].fetch()) @@ -742,9 +804,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual( [ - (pandas.Timestamp('2014-01-01 12:00:01'), 1.0, 1.5), - (pandas.Timestamp('2014-01-01 12:00:02'), 1.0, 3.5), - (pandas.Timestamp('2014-01-01 12:00:03'), 1.0, 3.5), + (datetime.datetime( + 2014, 1, 1, 12, 0, 1 + ), 1.0, 1.5), + (datetime.datetime( + 2014, 1, 1, 12, 0, 2 + ), 1.0, 3.5), + (datetime.datetime( + 2014, 1, 1, 12, 0, 3 + ), 1.0, 3.5), ], ts['return'].fetch()) @@ -873,8 +941,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): [tsc1['return'], tsc2['return']], aggregation="sum") self.assertEqual([ - (pandas.Timestamp('2015-12-03 13:21:15'), 1.0, 11.0), - (pandas.Timestamp('2015-12-03 13:22:15'), 1.0, 11.0), + (datetime.datetime( + 2015, 12, 3, 13, 21, 15 + ), 1.0, 11.0), + (datetime.datetime( + 2015, 12, 3, 13, 22, 15 + ), 1.0, 11.0), ], output) dtfrom = datetime.datetime(2015, 12, 3, 13, 17, 0) @@ -886,12 +958,24 @@ class TestAggregatedTimeSerie(base.BaseTestCase): aggregation="sum", needed_percent_of_overlap=0) self.assertEqual([ - (pandas.Timestamp('2015-12-03 13:19:15'), 1.0, 1.0), - (pandas.Timestamp('2015-12-03 13:20:15'), 1.0, 1.0), - (pandas.Timestamp('2015-12-03 13:21:15'), 1.0, 11.0), - (pandas.Timestamp('2015-12-03 13:22:15'), 1.0, 11.0), - (pandas.Timestamp('2015-12-03 13:23:15'), 1.0, 10.0), - (pandas.Timestamp('2015-12-03 13:24:15'), 1.0, 10.0), + (datetime.datetime( + 2015, 12, 3, 13, 19, 15 + ), 1.0, 1.0), + (datetime.datetime( + 2015, 12, 3, 13, 20, 15 + ), 1.0, 1.0), + (datetime.datetime( + 2015, 12, 3, 13, 21, 15 + ), 1.0, 11.0), + (datetime.datetime( + 2015, 12, 3, 13, 22, 15 + ), 1.0, 11.0), + (datetime.datetime( + 2015, 12, 3, 13, 23, 15 + ), 1.0, 10.0), + (datetime.datetime( + 2015, 12, 3, 13, 24, 15 + ), 1.0, 10.0), ], output) # By default we require 100% of point that overlap @@ -911,10 +995,18 @@ class TestAggregatedTimeSerie(base.BaseTestCase): aggregation="sum", needed_percent_of_overlap=50.0) self.assertEqual([ - (pandas.Timestamp('2015-12-03 13:19:15'), 1.0, 1.0), - (pandas.Timestamp('2015-12-03 13:20:15'), 1.0, 1.0), - (pandas.Timestamp('2015-12-03 13:21:15'), 1.0, 11.0), - (pandas.Timestamp('2015-12-03 13:22:15'), 1.0, 11.0), + (datetime.datetime( + 2015, 12, 3, 13, 19, 15 + ), 1.0, 1.0), + (datetime.datetime( + 2015, 12, 3, 13, 20, 15 + ), 1.0, 1.0), + (datetime.datetime( + 2015, 12, 3, 13, 21, 15 + ), 1.0, 11.0), + (datetime.datetime( + 2015, 12, 3, 13, 22, 15 + ), 1.0, 11.0), ], output) output = carbonara.AggregatedTimeSerie.aggregated( @@ -922,10 +1014,18 @@ class TestAggregatedTimeSerie(base.BaseTestCase): aggregation="sum", needed_percent_of_overlap=50.0) self.assertEqual([ - (pandas.Timestamp('2015-12-03 13:21:15'), 1.0, 11.0), - (pandas.Timestamp('2015-12-03 13:22:15'), 1.0, 11.0), - (pandas.Timestamp('2015-12-03 13:23:15'), 1.0, 10.0), - (pandas.Timestamp('2015-12-03 13:24:15'), 1.0, 10.0), + (datetime.datetime( + 2015, 12, 3, 13, 21, 15 + ), 1.0, 11.0), + (datetime.datetime( + 2015, 12, 3, 13, 22, 15 + ), 1.0, 11.0), + (datetime.datetime( + 2015, 12, 3, 13, 23, 15 + ), 1.0, 10.0), + (datetime.datetime( + 2015, 12, 3, 13, 24, 15 + ), 1.0, 10.0), ], output) def test_split_key(self): -- GitLab From e26bf9078f0d15cd9756a604e1fc7a13522aab99 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 26 Sep 2016 09:31:16 +0200 Subject: [PATCH 0406/1483] tox: shorter envdir name for upgrade target The shebang of a script in linux is limited to 127 chars (execve limitation). In gate, within the virtualenv the python path in all binaries shebang is a bit long: /home/jenkins/workspace/gate-gnocchi-tox-db-py35-postgresql-file-upgrade-from-2.2-ubuntu-xenial/.tox/py35-postgresql-file-upgrade-from-2.2/bin/python This change sets a envdir manually to workaround the issue and keep explicit name in tox target. Change-Id: I6edadc5e03891595db23afc12ee36a519989bd8a --- tox.ini | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tox.ini b/tox.ini index c4b4b6b9..efab18a3 100644 --- a/tox.ini +++ b/tox.ini @@ -27,6 +27,7 @@ commands = [testenv:py35-postgresql-file-upgrade-from-2.2] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv +envdir = upgrade recreate = True skip_install = True usedevelop = False @@ -39,6 +40,7 @@ commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-te [testenv:py27-mysql-ceph-upgrade-from-2.2] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv +envdir = upgrade recreate = True skip_install = True usedevelop = False -- GitLab From 984a9768651b9e8bcd1a629767fb95c1564a9c36 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 28 Sep 2016 07:57:23 +0200 Subject: [PATCH 0407/1483] Add upgrade targets for Gnocchi 3.0 Change-Id: I77f2ee82d6b630dd24f5364dbe3207ca2f8b9be4 --- tox.ini | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tox.ini b/tox.ini index efab18a3..3be5c1df 100644 --- a/tox.ini +++ b/tox.ini @@ -24,6 +24,32 @@ commands = oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf {toxinidir}/run-tests.sh {posargs} +[testenv:py35-postgresql-file-upgrade-from-3.0] +# We should always recreate since the script upgrade +# Gnocchi we can't reuse the virtualenv +envdir = upgrade +recreate = True +skip_install = True +usedevelop = False +setenv = GNOCCHI_VARIANT=test,postgresql,file +deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.0,<3.1 + pifpaf>=0.13 + gnocchiclient +commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} + +[testenv:py27-mysql-ceph-upgrade-from-3.0] +# We should always recreate since the script upgrade +# Gnocchi we can't reuse the virtualenv +envdir = upgrade +recreate = True +skip_install = True +usedevelop = False +setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib +deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.0,<3.1 + gnocchiclient + pifpaf>=0.13 +commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} + [testenv:py35-postgresql-file-upgrade-from-2.2] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv -- GitLab From 15c0c710365e9988d9dfc09d3f7e93c75a75852e Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 28 Sep 2016 17:24:43 +0100 Subject: [PATCH 0408/1483] Add STRICT_RESPONSE_HEADERS check to gabbi tests This uses a new feature in wsgi_intercept 1.4.1 that checks to make sure that response headers are native str type (different things in Python 2 and 3). Since Pecan is well behaved this "just works". Tested locally with both py27 and py35. Change-Id: I1406cfbfa14f69132fc91549779424b8427e7b78 --- gnocchi/tests/gabbi/test_gabbi.py | 2 ++ setup.cfg | 1 + 2 files changed, 3 insertions(+) diff --git a/gnocchi/tests/gabbi/test_gabbi.py b/gnocchi/tests/gabbi/test_gabbi.py index 48c1edb9..ea2222d0 100644 --- a/gnocchi/tests/gabbi/test_gabbi.py +++ b/gnocchi/tests/gabbi/test_gabbi.py @@ -18,10 +18,12 @@ import os from gabbi import driver +import wsgi_intercept from gnocchi.tests.gabbi import fixtures +wsgi_intercept.STRICT_RESPONSE_HEADERS = True TESTS_DIR = 'gabbits' diff --git a/setup.cfg b/setup.cfg index 5a5119ab..93d67546 100644 --- a/setup.cfg +++ b/setup.cfg @@ -75,6 +75,7 @@ test = doc8 tooz>=1.38 keystonemiddleware>=4.0.0 + wsgi_intercept>=1.4.1 [global] setup-hooks = -- GitLab From 35c4a08dd4163e42d4af82695fb8642952b63b91 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 29 Sep 2016 17:52:03 +0200 Subject: [PATCH 0409/1483] Fix some gabbi tests Last gabbi version checks that data have the right content-type. In theses tests we say that data in plain/text and we put json that's not correct, old version was silencly ignored and now this is correctly checked. Same when we put data but no content-type. Since our tests are wrong, this change fixes them. Change-Id: I9a60db296467b08ef32f956471ef5e4f81f462af --- gnocchi/tests/gabbi/gabbits-live/live.yaml | 3 +-- gnocchi/tests/gabbi/gabbits/archive.yaml | 1 + gnocchi/tests/gabbi/gabbits/metric.yaml | 3 +-- gnocchi/tests/gabbi/gabbits/resource.yaml | 7 ++----- 4 files changed, 5 insertions(+), 9 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits-live/live.yaml b/gnocchi/tests/gabbi/gabbits-live/live.yaml index 71b23ae9..9ed086c8 100644 --- a/gnocchi/tests/gabbi/gabbits-live/live.yaml +++ b/gnocchi/tests/gabbi/gabbits-live/live.yaml @@ -441,8 +441,7 @@ tests: method: POST request_headers: content-type: plain/text - data: - archive_policy_name: cookies + data: '{"archive_policy_name": "cookies"}' status: 415 diff --git a/gnocchi/tests/gabbi/gabbits/archive.yaml b/gnocchi/tests/gabbi/gabbits/archive.yaml index 5519d63a..42fe13c8 100644 --- a/gnocchi/tests/gabbi/gabbits/archive.yaml +++ b/gnocchi/tests/gabbi/gabbits/archive.yaml @@ -485,6 +485,7 @@ tests: - name: fail to create policy non-admin POST: /v1/archive_policy request_headers: + content-type: application/json x-user-id: b45187c5-150b-4730-bcb2-b5e04e234220 x-project-id: 16764ee0-bffe-4843-aa36-04b002cdbc7c data: diff --git a/gnocchi/tests/gabbi/gabbits/metric.yaml b/gnocchi/tests/gabbi/gabbits/metric.yaml index b31b3d00..45155f3d 100644 --- a/gnocchi/tests/gabbi/gabbits/metric.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric.yaml @@ -115,8 +115,7 @@ tests: POST: /v1/metric request_headers: content-type: plain/text - data: - archive_policy_name: cookies + data: '{"archive_policy_name": "cookies"}' status: 415 - name: create valid metric diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index 02da4d97..e9ce2882 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -157,11 +157,7 @@ tests: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: text/plain - data: - id: f93450f2-d8a5-4d67-9985-02511241e7d1 - started_at: "2014-01-03T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + data: '{"id": "f93450f2-d8a5-4d67-9985-02511241e7d1", "started_at": "2014-01-03T02:02:02.000000", "user_id": "0fbb231484614b1a80131fc22f6afc9c", "project_id": "f3d41b770cc14f0bb94a1d5be9c0e3ea"}' status: 415 # Create a new instance resource, demonstrate that including no data @@ -339,6 +335,7 @@ tests: request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json data: host: compute2 -- GitLab From 0198b5bac50b01b6de0efeca0c4af8a7569d156e Mon Sep 17 00:00:00 2001 From: Hanxi Liu Date: Fri, 30 Sep 2016 11:42:27 +0800 Subject: [PATCH 0410/1483] Use xx=None instead of xx=[] to initialize the default value When using a "mutable" xx = [] as the default parameter value, this parameter will only be initialized at the first call. If executed multiple times, it wil create a new function object each time. Modifying the default value to None to avoid more confusion. The solution is initialize xx with xx= xx or [] in method body. More details:http://effbot.org/zone/default-values.htm Change-Id: I8266e5dbf0b4bf1b12e584cb0a58e277ba4d4334 --- gnocchi/rest/__init__.py | 5 ++--- gnocchi/storage/_carbonara.py | 3 ++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index a2871a61..32aafbfc 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1246,10 +1246,9 @@ class SearchMetricController(rest.RestController): @pecan.expose('json') def post(self, metric_id, start=None, stop=None, aggregation='mean', - granularity=[]): - + granularity=None): granularity = [Timespan(g) - for g in arg_to_list(granularity)] + for g in arg_to_list(granularity or [])] metrics = pecan.request.indexer.list_metrics( ids=arg_to_list(metric_id)) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 5e050fa0..822f04f2 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -647,7 +647,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): def search_value(self, metrics, query, from_timestamp=None, to_timestamp=None, aggregation='mean', - granularity=[]): + granularity=None): + granularity = granularity or [] predicate = storage.MeasureQuery(query) results = self._map_in_thread( -- GitLab From 1157caca38b862df405f7ec4f6fce282e34e5191 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 21 Sep 2016 15:06:18 +0200 Subject: [PATCH 0411/1483] tests: Cover resource-type modification This change adds basic live tests for downstream integration testing. Change-Id: Ie2eb059ba699d342adf29888c66252642065d838 --- gnocchi/tests/gabbi/gabbits-live/live.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/gnocchi/tests/gabbi/gabbits-live/live.yaml b/gnocchi/tests/gabbi/gabbits-live/live.yaml index 9ed086c8..f63e1bb9 100644 --- a/gnocchi/tests/gabbi/gabbits-live/live.yaml +++ b/gnocchi/tests/gabbi/gabbits-live/live.yaml @@ -498,6 +498,25 @@ tests: response_headers: location: $SCHEME://$NETLOC/v1/resource_type/myresource + - name: add an attribute + url: /v1/resource/myresource + request_headers: + content-type: application/json-patch+json + data: + - op: "add" + path: "/attributes/awesome-stuff" + value: {"type": "bool", "required": false} + status: 200 + + - name: remove an attribute + url: /v1/resource/myresource + request_headers: + content-type: application/json-patch+json + data: + - op: "remove" + path: "/attributes/awesome-stuff" + status: 200 + - name: myresource resource bad accept desc: Expect 406 on bad accept type request_headers: -- GitLab From 0a06089bc13a44394dcfe39ac91dff7ac128e3d0 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 30 Sep 2016 16:41:56 +0200 Subject: [PATCH 0412/1483] upgrade tests: don't patch gnocchi 3.0 The upgrade tests was patching gnocchi 2.2 with a patch not yet release on that branch. This is not needed when Gnocchi version is not 2.2.0. So this change applies the patch only on Gnocchi 2.2.0. Change-Id: I33ceb9ec06bf10ac896d037b7a76f6296f9e5a09 --- run-upgrade-tests.sh | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index 8b1d30ba..2b69558e 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -73,16 +73,18 @@ else STORAGE_URL=file://$GNOCCHI_DATA fi -# NOTE(sileht): temporary fix a gnocchi 2.2 bug -# https://review.openstack.org/#/c/369011/ -patch -p2 -d $VIRTUAL_ENV/lib/python*/site-packages/gnocchi < 7bcd2a25.diff +old_version=$(pip freeze | sed -n '/gnocchi==/s/.*==\(.*\)/\1/p') +if [ "${old_version:0:5}" == "2.2.0" ]; then + # NOTE(sileht): temporary fix a gnocchi 2.2.0 bug + # https://review.openstack.org/#/c/369011/ + patch -p2 -d $VIRTUAL_ENV/lib/python*/site-packages/gnocchi < 7bcd2a25.diff +fi eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) inject_data $GNOCCHI_DATA dump_data $GNOCCHI_DATA/old pifpaf_stop -old_version=$(pip freeze | sed -n '/gnocchi==/s/.*==\(.*\)/\1/p') new_version=$(python setup.py --version) echo "* Upgrading Gnocchi from $old_version to $new_version" pip install -q -U .[${GNOCCHI_VARIANT}] -- GitLab From c34b246ee95c9b8f0296d338d5ab2d63952d12a4 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 15 Sep 2016 22:18:09 +0200 Subject: [PATCH 0413/1483] Don't mock ceph The rados module was mocked because it was not possible to easy ceph into gate. But now pifpaf support it and we can install anything we want with bindeps.txt file. Also future changes will use more and more API from Ceph, making Rados mock class more hard to maintain. This change removes the mock and uses a real ceph cluster setuped with pifpaf to run tests. Change-Id: I50bd1cac0017565ef678c2ce9235a21368eeea93 --- gnocchi/tests/base.py | 220 +++--------------------------------------- run-tests.sh | 6 +- tox.ini | 4 +- 3 files changed, 19 insertions(+), 211 deletions(-) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 5a4af3cc..667a1468 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -13,10 +13,10 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import errno import functools import json import os +import subprocess import uuid import fixtures @@ -58,206 +58,6 @@ def _skip_decorator(func): return skip_if_not_implemented -class FakeRadosModule(object): - class OpCtx(object): - def __enter__(self): - self.ops = [] - return self - - def __exit__(self, *args, **kwargs): - pass - - WriteOpCtx = ReadOpCtx = OpCtx - - class OmapIterator(object): - class OpRetCode(object): - def __init__(self): - self.ret = 0 - - def __eq__(self, other): - return self.ret == other - - def __init__(self, start_filter, prefix_filter, number): - self.start_filter = start_filter - self.prefix_filter = prefix_filter - self.number = number - self.data = {} - self.op_ret = self.OpRetCode() - - def set_data(self, data): - if not data: - self.op_ret.ret = errno.ENOENT - else: - self.data = data - - def __iter__(self): - # NOTE(sileht): we use only the prefix for now - return ((k, v) for k, v in self.data.items() - if k.startswith(self.prefix_filter)) - - LIBRADOS_OPERATION_BALANCE_READS = 1 - LIBRADOS_OPERATION_SKIPRWLOCKS = 16 - - class ObjectNotFound(Exception): - pass - - class ioctx(object): - def __init__(self, kvs, kvs_xattrs, kvs_omaps): - self.kvs = kvs - self.kvs_xattrs = kvs_xattrs - self.kvs_omaps = kvs_omaps - self.librados = self - self.io = self - - def __enter__(self): - return self - - @staticmethod - def __exit__(exc_type, exc_value, traceback): - pass - - def _ensure_key_exists(self, key): - if key not in self.kvs: - self.kvs[key] = "" - self.kvs_xattrs[key] = {} - self.kvs_omaps[key] = {} - - @staticmethod - def close(): - pass - - @staticmethod - def _validate_key(name): - if not isinstance(name, str): - raise TypeError("key is not a 'str' object") - - def write_full(self, key, value): - self._validate_key(key) - self._ensure_key_exists(key) - self.kvs[key] = value - - def write(self, key, value, offset): - self._validate_key(key) - try: - current = self.kvs[key] - except KeyError: - current = b"" - if len(current) < offset: - current += b'\x00' * (offset - len(current)) - self.kvs[key] = ( - current[:offset] + value + current[offset + len(value):] - ) - - def stat(self, key): - self._validate_key(key) - if key not in self.kvs: - raise FakeRadosModule.ObjectNotFound - else: - return (1024, "timestamp") - - def read(self, key, length=8192, offset=0): - self._validate_key(key) - if key not in self.kvs: - raise FakeRadosModule.ObjectNotFound - else: - return self.kvs[key][offset:offset+length] - - def operate_read_op(self, op, key, flag=0): - for op in op.ops: - op(key) - - def get_omap_vals(self, op, start_filter, prefix_filter, number): - oi = FakeRadosModule.OmapIterator(start_filter, prefix_filter, - number) - op.ops.append(lambda oid: oi.set_data(self.kvs_omaps.get(oid))) - return oi, oi.op_ret - - def operate_write_op(self, op, key, flags=0): - for op in op.ops: - op(key) - - def set_omap(self, op, keys, values): - def add(oid): - self._ensure_key_exists(oid) - omaps = self.kvs_omaps.setdefault(oid, {}) - omaps.update(dict(zip(keys, values))) - op.ops.append(add) - - def remove_omap_keys(self, op, keys): - def rm(oid): - for key in keys: - del self.kvs_omaps[oid][key] - op.ops.append(rm) - - def get_xattrs(self, key): - if key not in self.kvs: - raise FakeRadosModule.ObjectNotFound - return six.iteritems(self.kvs_xattrs.get(key, {}).copy()) - - def set_xattr(self, key, attr, value): - self._ensure_key_exists(key) - xattrs = self.kvs_xattrs.setdefault(key, {}) - xattrs[attr] = value - - def rm_xattr(self, key, attr): - if key not in self.kvs: - raise FakeRadosModule.ObjectNotFound - del self.kvs_xattrs[key][attr] - - def remove_object(self, key): - self._validate_key(key) - if key not in self.kvs: - raise FakeRadosModule.ObjectNotFound - del self.kvs[key] - del self.kvs_xattrs[key] - del self.kvs_omaps[key] - - def aio_remove(self, key): - self._validate_key(key) - self.kvs.pop(key, None) - self.kvs_xattrs.pop(key, None) - self.kvs_omaps.pop(key, None) - - @staticmethod - def aio_flush(): - pass - - class FakeRados(object): - def __init__(self, kvs, kvs_xattrs, kvs_omaps): - self.kvs = kvs - self.kvs_xattrs = kvs_xattrs - self.kvs_omaps = kvs_omaps - - @staticmethod - def connect(): - pass - - @staticmethod - def shutdown(): - pass - - def open_ioctx(self, pool): - return FakeRadosModule.ioctx(self.kvs, self.kvs_xattrs, - self.kvs_omaps) - - def __init__(self): - self.kvs = {} - self.kvs_xattrs = {} - self.kvs_omaps = {} - - def Rados(self, *args, **kwargs): - return FakeRadosModule.FakeRados(self.kvs, self.kvs_xattrs, - self.kvs_omaps) - - @staticmethod - def run_in_thread(method, args): - return method(*args) - - @staticmethod - def make_ex(ret, reason): - raise Exception(reason) - - class FakeSwiftClient(object): def __init__(self, *args, **kwargs): self.kvs = {} @@ -436,10 +236,12 @@ class TestCase(base.BaseTestCase): except indexer.ArchivePolicyAlreadyExists: pass - self.conf.set_override( - 'driver', - os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file"), - 'storage') + storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file") + self.conf.set_override('driver', storage_driver, 'storage') + if storage_driver == 'ceph': + self.conf.set_override('ceph_conffile', + os.getenv("CEPH_CONF"), + 'storage') def setUp(self): super(TestCase, self).setUp() @@ -448,14 +250,16 @@ class TestCase(base.BaseTestCase): 'swiftclient.client.Connection', FakeSwiftClient)) - self.useFixture(mockpatch.Patch('gnocchi.storage.ceph.rados', - FakeRadosModule())) - if self.conf.storage.driver == 'file': tempdir = self.useFixture(fixtures.TempDir()) self.conf.set_override('file_basepath', tempdir.path, 'storage') + elif self.conf.storage.driver == 'ceph': + pool_name = uuid.uuid4().hex + subprocess.call("rados -c %s mkpool %s" % ( + os.getenv("CEPH_CONF"), pool_name), shell=True) + self.conf.set_override('ceph_pool', pool_name, 'storage') self.storage = storage.get_driver(self.conf) # NOTE(jd) Do not upgrade the storage. We don't really need the storage diff --git a/run-tests.sh b/run-tests.sh index 06c35801..b9d87eae 100755 --- a/run-tests.sh +++ b/run-tests.sh @@ -7,6 +7,10 @@ do export GNOCCHI_TEST_STORAGE_DRIVER=$storage for indexer in ${GNOCCHI_TEST_INDEXER_DRIVERS} do - pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* + if [ "$GNOCCHI_TEST_STORAGE_DRIVER" == "ceph" ]; then + pifpaf run ceph -- pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* + else + pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* + fi done done diff --git a/tox.ini b/tox.ini index 3be5c1df..98b7db85 100644 --- a/tox.ini +++ b/tox.ini @@ -7,8 +7,8 @@ usedevelop = True sitepackages = False passenv = LANG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE GNOCCHI_TEST_* deps = .[test] - postgresql: .[postgresql,swift,ceph,file] - mysql: .[mysql,swift,ceph,file] + postgresql: .[postgresql,swift,file,ceph,ceph_recommended_lib] + mysql: .[mysql,swift,file,ceph,ceph_recommended_lib] setenv = GNOCCHI_TEST_STORAGE_DRIVER=file GNOCCHI_TEST_INDEXER_DRIVER=postgresql -- GitLab From 83f02996556af62d085ecf5bfe240440914fee8f Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 20 Sep 2016 13:53:25 +0000 Subject: [PATCH 0414/1483] switch to stop validating batch individually deserialize_and_validate takes up the majority of the POST time when using batch interface. this is because it validates each point individually so the larger the POST, the processing time increases nearly linearly. this changes it so we process it at as a batch Closes-Bug: #1624537 Change-Id: I4f8b8559bfee0994fbc975856b592b13d00e57c8 --- gnocchi/rest/__init__.py | 30 ++-- gnocchi/statsd.py | 6 +- gnocchi/storage/_carbonara.py | 7 +- gnocchi/tests/gabbi/gabbits/metric.yaml | 13 ++ gnocchi/tests/storage/test_carbonara.py | 4 +- gnocchi/tests/test_aggregates.py | 5 +- gnocchi/tests/test_storage.py | 176 ++++++++++++------------ gnocchi/utils.py | 5 + tools/measures_injector.py | 3 +- 9 files changed, 134 insertions(+), 115 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 32aafbfc..3129fbde 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -18,7 +18,9 @@ import itertools import uuid import jsonpatch +import numpy as np from oslo_utils import strutils +import pandas as pd import pecan from pecan import rest import six @@ -406,20 +408,22 @@ class ArchivePolicyRulesController(rest.RestController): abort(400, e) -def MeasureSchema(m): - # NOTE(sileht): don't use voluptuous for performance reasons +def MeasuresListSchema(measures): try: - value = float(m['value']) - except Exception: - abort(400, "Invalid input for a value") + times = pd.to_datetime([i['timestamp'] for i in measures], utc=True, + unit='ns', box=False) + if np.any(times < np.datetime64('1970')): + raise ValueError('Timestamp must be after Epoch') + except ValueError as e: + abort(400, "Invalid input for timestamp: %s" % e) try: - timestamp = Timestamp(m['timestamp']) - except Exception as e: - abort(400, - "Invalid input for timestamp `%s': %s" % (m['timestamp'], e)) + values = [float(i['value']) for i in measures] + except Exception: + abort(400, "Invalid input for a value") - return storage.Measure(timestamp, value) + return (storage.Measure(t, v) for t, v in six.moves.zip( + times.tolist(), values)) class MetricController(rest.RestController): @@ -449,7 +453,7 @@ class MetricController(rest.RestController): abort(400, "Invalid input for measures") if params: pecan.request.storage.add_measures( - self.metric, six.moves.map(MeasureSchema, params)) + self.metric, MeasuresListSchema(params)) pecan.response.status = 202 @pecan.expose('json') @@ -1290,7 +1294,7 @@ class SearchMetricController(rest.RestController): class ResourcesMetricsMeasuresBatchController(rest.RestController): MeasuresBatchSchema = voluptuous.Schema( - {utils.ResourceUUID: {six.text_type: [MeasureSchema]}} + {utils.ResourceUUID: {six.text_type: MeasuresListSchema}} ) @pecan.expose() @@ -1334,7 +1338,7 @@ class MetricsMeasuresBatchController(rest.RestController): # only the last key will be retain by json python module to # build the python dict. MeasuresBatchSchema = voluptuous.Schema( - {utils.UUID: [MeasureSchema]} + {utils.UUID: MeasuresListSchema} ) @pecan.expose() diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index 8a337d38..6798ac56 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -65,14 +65,14 @@ class Stats(object): "Invalid sampling for ms: `%d`, should be none" % sampling) self.times[metric_name] = storage.Measure( - utils.utcnow(), value) + int(utils.datetime_to_unix(utils.utcnow()) * int(10e8)), value) elif metric_type == "g": if sampling is not None: raise ValueError( "Invalid sampling for g: `%d`, should be none" % sampling) self.gauges[metric_name] = storage.Measure( - utils.utcnow(), value) + int(utils.datetime_to_unix(utils.utcnow()) * int(10e8)), value) elif metric_type == "c": sampling = 1 if sampling is None else sampling if metric_name in self.counters: @@ -80,7 +80,7 @@ class Stats(object): else: current_value = 0 self.counters[metric_name] = storage.Measure( - utils.utcnow(), + int(utils.datetime_to_unix(utils.utcnow()) * int(10e8)), current_value + (value * (1 / sampling))) # TODO(jd) Support "set" type # elif metric_type == "s": diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 822f04f2..0dbb89f1 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -334,12 +334,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): measures = list(measures) data = struct.pack( "<" + self._MEASURE_SERIAL_FORMAT * len(measures), - *list( - itertools.chain( - # NOTE(jd) int(10e8) to avoid rounding errors - *((int(utils.datetime_to_unix(timestamp) * int(10e8)), - value) - for timestamp, value in measures)))) + *list(itertools.chain.from_iterable(measures))) self._store_new_measures(metric, data) @staticmethod diff --git a/gnocchi/tests/gabbi/gabbits/metric.yaml b/gnocchi/tests/gabbi/gabbits/metric.yaml index 45155f3d..52dd36af 100644 --- a/gnocchi/tests/gabbi/gabbits/metric.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric.yaml @@ -145,6 +145,19 @@ tests: response_strings: - Timestamp must be after Epoch + - name: get valid metric id for bad timestamp + GET: /v1/metric + status: 200 + + - name: push measurements to metric with bad timestamp + POST: /v1/metric/$RESPONSE['$[0].id']/measures + request_headers: + content-type: application/json + data: + - timestamp: "1915-100-06T14:33:57" + value: 43.1 + status: 400 + - name: get valid metric id again GET: /v1/metric status: 200 diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py index d8ea6159..9df631a2 100644 --- a/gnocchi/tests/storage/test_carbonara.py +++ b/gnocchi/tests/storage/test_carbonara.py @@ -141,8 +141,8 @@ class TestCarbonaraMigration(tests_base.TestCase): self.storage.get_measures, self.metric, aggregation='max') self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2016, 7, 18), 69), - storage.Measure(utils.datetime_utc(2016, 7, 18, 1, 1), 64), + storage.Measure(utils.dt_to_unix_ns(2016, 7, 18), 69), + storage.Measure(utils.dt_to_unix_ns(2016, 7, 18, 1, 1), 64), ]) with mock.patch.object(self.index, 'list_metrics') as f: diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index c07df7ef..b11da8da 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -60,8 +60,9 @@ class TestAggregates(tests_base.TestCase): uuid.uuid4(), self.archive_policies['medium']) start_time = utils.datetime_utc(2014, 1, 1, 12) incr = datetime.timedelta(seconds=spacing) - measures = [storage.Measure(start_time + incr * n, val) - for n, val in enumerate(data)] + measures = [storage.Measure(int( + utils.datetime_to_unix(start_time + incr * n) * int(10e8)), val) + for n, val in enumerate(data)] self.index.create_metric(metric.id, str(uuid.uuid4()), str(uuid.uuid4()), 'medium') diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 7ab25375..65b42240 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -58,12 +58,12 @@ class TestStorageDriver(tests_base.TestCase): self.skipTest("This driver is not based on Carbonara") self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 13, 0, 1), 1), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 13, 0, 1), 1), ]) with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize', @@ -82,7 +82,7 @@ class TestStorageDriver(tests_base.TestCase): None, None, full=True) self.assertEqual(set(), metrics) self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) metrics = self.storage.list_metric_with_measures_to_process( None, None, full=True) @@ -94,7 +94,7 @@ class TestStorageDriver(tests_base.TestCase): def test_delete_nonempty_metric(self): self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() self.storage.delete_metric(self.metric, sync=True) @@ -102,14 +102,14 @@ class TestStorageDriver(tests_base.TestCase): def test_delete_nonempty_metric_unprocessed(self): self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) self.storage.delete_metric(self.metric, sync=True) self.trigger_processing() def test_delete_expunge_metric(self): self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() self.index.delete_metric(self.metric.id) @@ -135,7 +135,7 @@ class TestStorageDriver(tests_base.TestCase): def test_add_measures_big(self): m, __ = self._create_metric('high') self.storage.add_measures(m, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, i, j), 100) + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, i, j), 100) for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) self.trigger_processing([str(m.id)]) @@ -145,14 +145,14 @@ class TestStorageDriver(tests_base.TestCase): def test_add_measures_update_subset_split(self): m, m_sql = self._create_metric('medium') measures = [ - storage.Measure(utils.datetime_utc(2014, 1, 6, i, j, 0), 100) + storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] self.storage.add_measures(m, measures) self.trigger_processing([str(m.id)]) # add measure to end, in same aggregate time as last point. self.storage.add_measures(m, [ - storage.Measure(utils.datetime_utc(2014, 1, 6, 1, 58, 1), 100)]) + storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, 1, 58, 1), 100)]) with mock.patch.object(self.storage, '_store_metric_measures') as c: # should only resample last aggregate @@ -168,13 +168,13 @@ class TestStorageDriver(tests_base.TestCase): def test_add_measures_update_subset(self): m, m_sql = self._create_metric('medium') measures = [ - storage.Measure(utils.datetime_utc(2014, 1, 6, i, j, 0), 100) + storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] self.storage.add_measures(m, measures) self.trigger_processing([str(m.id)]) # add measure to end, in same aggregate time as last point. - new_point = utils.datetime_utc(2014, 1, 6, 1, 58, 1) + new_point = utils.dt_to_unix_ns(2014, 1, 6, 1, 58, 1) self.storage.add_measures(m, [storage.Measure(new_point, 100)]) with mock.patch.object(self.storage, '_add_measures') as c: @@ -186,10 +186,10 @@ class TestStorageDriver(tests_base.TestCase): def test_delete_old_measures(self): self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) self.trigger_processing() @@ -203,7 +203,7 @@ class TestStorageDriver(tests_base.TestCase): # One year later… self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2015, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2015, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() @@ -237,10 +237,10 @@ class TestStorageDriver(tests_base.TestCase): # First store some points scattered across different splits self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2016, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.datetime_utc(2016, 1, 2, 13, 7, 31), 42), - storage.Measure(utils.datetime_utc(2016, 1, 4, 14, 9, 31), 4), - storage.Measure(utils.datetime_utc(2016, 1, 6, 15, 12, 45), 44), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44), ]) self.trigger_processing() @@ -277,8 +277,8 @@ class TestStorageDriver(tests_base.TestCase): # the BoundTimeSerie processing timeserie far away from its current # range. self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2016, 1, 10, 16, 18, 45), 45), - storage.Measure(utils.datetime_utc(2016, 1, 10, 17, 12, 45), 46), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46), ]) self.trigger_processing() @@ -312,8 +312,8 @@ class TestStorageDriver(tests_base.TestCase): def test_updated_measures(self): self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), ]) self.trigger_processing() @@ -325,8 +325,8 @@ class TestStorageDriver(tests_base.TestCase): ], self.storage.get_measures(self.metric)) self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) self.trigger_processing() @@ -356,10 +356,10 @@ class TestStorageDriver(tests_base.TestCase): def test_add_and_get_measures(self): self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) self.trigger_processing() @@ -446,10 +446,10 @@ class TestStorageDriver(tests_base.TestCase): def test_get_measure_unknown_aggregation(self): self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) self.assertRaises(storage.AggregationDoesNotExist, self.storage.get_measures, @@ -459,16 +459,16 @@ class TestStorageDriver(tests_base.TestCase): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['low']) self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) self.storage.add_measures(metric2, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) self.assertRaises(storage.AggregationDoesNotExist, self.storage.get_cross_metric_measures, @@ -479,16 +479,16 @@ class TestStorageDriver(tests_base.TestCase): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['low']) self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) self.storage.add_measures(metric2, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) self.assertRaises(storage.GranularityDoesNotExist, self.storage.get_cross_metric_measures, @@ -499,16 +499,16 @@ class TestStorageDriver(tests_base.TestCase): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['no_granularity_match']) self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) self.storage.add_measures(metric2, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) self.assertRaises(storage.MetricUnaggregatable, @@ -518,16 +518,16 @@ class TestStorageDriver(tests_base.TestCase): def test_add_and_get_cross_metric_measures(self): metric2, __ = self._create_metric() self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 44), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) self.storage.add_measures(metric2, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 5), 9), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 41), 2), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 10, 31), 4), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 13, 10), 4), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 41), 2), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 10, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 13, 10), 4), ]) self.trigger_processing([str(self.metric.id), str(metric2.id)]) @@ -603,17 +603,17 @@ class TestStorageDriver(tests_base.TestCase): def test_add_and_get_cross_metric_measures_with_holes(self): metric2, __ = self._create_metric() self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 5, 31), 8), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 42), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 5, 31), 8), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 42), ]) self.storage.add_measures(metric2, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 5), 9), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 2), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 6), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 13, 10), 2), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 2), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 6), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 13, 10), 2), ]) self.trigger_processing([str(self.metric.id), str(metric2.id)]) @@ -629,18 +629,18 @@ class TestStorageDriver(tests_base.TestCase): def test_search_value(self): metric2, __ = self._create_metric() self.storage.add_measures(self.metric, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1,), 69), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 5, 31), 8), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 12, 45), 42), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1,), 69), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 5, 31), 8), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 42), ]) self.storage.add_measures(metric2, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 5), 9), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 7, 31), 2), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 9, 31), 6), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 13, 10), 2), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 2), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 6), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 13, 10), 2), ]) self.trigger_processing([str(self.metric.id), str(metric2.id)]) @@ -671,9 +671,9 @@ class TestStorageDriver(tests_base.TestCase): str(uuid.uuid4()), name) m = self.index.list_metrics(ids=[m.id])[0] self.storage.add_measures(m, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 0), 1), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 5), 1), - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 10), 1), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 0), 1), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 1), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 10), 1), ]) self.trigger_processing([str(m.id)]) self.assertEqual([ @@ -686,7 +686,7 @@ class TestStorageDriver(tests_base.TestCase): name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)]) m = self.index.list_metrics(ids=[m.id])[0] self.storage.add_measures(m, [ - storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 15), 1), + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 15), 1), ]) self.trigger_processing([str(m.id)]) self.assertEqual([ diff --git a/gnocchi/utils.py b/gnocchi/utils.py index b7145654..8a3d2a4a 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -110,3 +110,8 @@ unix_universal_start = datetime_utc(1970, 1, 1) def datetime_to_unix(timestamp): return (timestamp - unix_universal_start).total_seconds() + + +def dt_to_unix_ns(*args): + return int(datetime_to_unix(datetime.datetime( + *args, tzinfo=iso8601.iso8601.UTC)) * int(10e8)) diff --git a/tools/measures_injector.py b/tools/measures_injector.py index dd112d55..bdf64610 100755 --- a/tools/measures_injector.py +++ b/tools/measures_injector.py @@ -44,7 +44,8 @@ def injector(): def todo(metric): for _ in six.moves.range(conf.batch_of_measures): measures = [ - storage.Measure(utils.utcnow(), random.random()) + storage.Measure(int(utils.datetime_to_unix( + utils.utcnow()) * int(10e8)), random.random()) for __ in six.moves.range(conf.measures_per_batch)] s.add_measures(metric, measures) -- GitLab From bbbe67e3ad3900a7a6f5fcb6253b50a5a1cdb13b Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 15 Sep 2016 22:22:27 +0000 Subject: [PATCH 0415/1483] enable threading of batched writes when adding measures for batch POST, we sequentially write each metric and its measures. this is all IO and can be done with threads. as benchmark, POST 50 batches, 20metrics each 1point/metric: with threading: ~1.7109s (34.22ms/batch) without: ~4.3594s (87.19ms/batch) 60points/metric: with threading: ~4.9793s (99.59ms/batch) without: ~7.1350s (142.7ms/batch) 720points/metric: with threading: ~20.3034s (0.4061ms/batch) without: ~21.7325 (0.4347ms/batch) Change-Id: I8f49f6d69a7bf5ee348a9b243b596da73c844e97 --- gnocchi/rest/__init__.py | 17 ++++++++++++----- gnocchi/service.py | 10 ++-------- gnocchi/utils.py | 9 +++++++++ 3 files changed, 23 insertions(+), 13 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 3129fbde..5a4763dd 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -17,6 +17,7 @@ import itertools import uuid +from concurrent import futures import jsonpatch import numpy as np from oslo_utils import strutils @@ -227,6 +228,8 @@ RESOURCE_DEFAULT_PAGINATION = ['revision_start:asc', METRIC_DEFAULT_PAGINATION = ['id:asc'] +THREADS = utils.get_default_workers() + def get_pagination_options(params, default): max_limit = pecan.request.conf.api.max_limit @@ -1323,9 +1326,11 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): for metric in known_metrics: enforce("post measures", metric) - for metric in known_metrics: - measures = body[metric.resource_id][metric.name] - pecan.request.storage.add_measures(metric, measures) + storage = pecan.request.storage + with futures.ThreadPoolExecutor(max_workers=THREADS) as executor: + executor.map(lambda x: storage.add_measures(*x), + ((metric, body[metric.resource_id][metric.name]) + for metric in known_metrics)) pecan.response.status = 202 @@ -1354,8 +1359,10 @@ class MetricsMeasuresBatchController(rest.RestController): for metric in metrics: enforce("post measures", metric) - for metric in metrics: - pecan.request.storage.add_measures(metric, body[metric.id]) + storage = pecan.request.storage + with futures.ThreadPoolExecutor(max_workers=THREADS) as executor: + executor.map(lambda x: storage.add_measures(*x), + ((metric, body[metric.id]) for metric in metrics)) pecan.response.status = 202 diff --git a/gnocchi/service.py b/gnocchi/service.py index cada5554..9557e5cc 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -15,8 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import multiprocessing - from oslo_config import cfg from oslo_db import options as db_options from oslo_log import log @@ -26,6 +24,7 @@ from six.moves.urllib import parse as urlparse from gnocchi import archive_policy from gnocchi import opts +from gnocchi import utils LOG = log.getLogger(__name__) @@ -50,12 +49,7 @@ def prepare_service(args=None, conf=None, conf.archive_policy.default_aggregation_methods ) - try: - default_workers = multiprocessing.cpu_count() or 1 - except NotImplementedError: - default_workers = 1 - - conf.set_default("workers", default_workers, group="metricd") + conf.set_default("workers", utils.get_default_workers(), group="metricd") conf(args, project='gnocchi', validate_default_values=True, default_config_files=default_config_files, diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 8a3d2a4a..139b89c8 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. import datetime +import multiprocessing import iso8601 from oslo_utils import timeutils @@ -115,3 +116,11 @@ def datetime_to_unix(timestamp): def dt_to_unix_ns(*args): return int(datetime_to_unix(datetime.datetime( *args, tzinfo=iso8601.iso8601.UTC)) * int(10e8)) + + +def get_default_workers(): + try: + default_workers = multiprocessing.cpu_count() or 1 + except NotImplementedError: + default_workers = 1 + return default_workers -- GitLab From bbc821d3da1e5f3774b44d7bd878ee2d93475c5e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 8 Apr 2015 16:54:49 +0200 Subject: [PATCH 0416/1483] sqlalchemy: use precise DATETIME rather than DECIMAL in MySQL Recent MySQL versions have the support for precise timestamps. It's not on by default, but it's enable so we use a special type for that. Change-Id: I6717ab793d0b105edbca1054d80f17be6e3b4286 --- doc/source/configuration.rst | 2 +- .../5c4f93e5bb4_mysql_float_to_timestamp.py | 76 +++++++++++++++++++ gnocchi/indexer/sqlalchemy.py | 2 +- gnocchi/indexer/sqlalchemy_base.py | 43 ++++++++--- ...sql_precise_datetime-57f868f3f42302e2.yaml | 4 + 5 files changed, 113 insertions(+), 14 deletions(-) create mode 100644 gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py create mode 100644 releasenotes/notes/mysql_precise_datetime-57f868f3f42302e2.yaml diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 2480c1a3..87ed7f68 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -43,7 +43,7 @@ Gnocchi provides these storage drivers: Gnocchi provides these indexer drivers: - `PostgreSQL`_ (recommended) -- `MySQL`_ +- `MySQL`_ (at least version 5.6.4) .. _`Swift`: https://launchpad.net/swift .. _`Ceph`: http://ceph.com/ diff --git a/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py b/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py new file mode 100644 index 00000000..9df79fae --- /dev/null +++ b/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py @@ -0,0 +1,76 @@ +# -*- encoding: utf-8 -*- +# +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""mysql_float_to_timestamp + +Revision ID: 5c4f93e5bb4 +Revises: 7e6f9d542f8b +Create Date: 2016-07-25 15:36:36.469847 + +""" + +from alembic import op +import sqlalchemy as sa +from sqlalchemy.sql import func + +from gnocchi.indexer import sqlalchemy_base + +# revision identifiers, used by Alembic. +revision = '5c4f93e5bb4' +down_revision = '27d2a1d205ff' +branch_labels = None +depends_on = None + + +def upgrade(): + bind = op.get_bind() + if bind and bind.engine.name == "mysql": + # NOTE(jd) So that crappy engine that is MySQL does not have "ALTER + # TABLE … USING …". We need to copy everything and convert… + for table_name, column_name in (("resource", "started_at"), + ("resource", "ended_at"), + ("resource", "revision_start"), + ("resource_history", "started_at"), + ("resource_history", "ended_at"), + ("resource_history", "revision_start"), + ("resource_history", "revision_end"), + ("resource_type", "updated_at")): + + nullable = column_name == "ended_at" + + existing_type = sa.types.DECIMAL( + precision=20, scale=6, asdecimal=True) + existing_col = sa.Column( + column_name, + existing_type, + nullable=nullable) + temp_col = sa.Column( + column_name + "_ts", + sqlalchemy_base.TimestampUTC(), + nullable=nullable) + op.add_column(table_name, temp_col) + t = sa.sql.table(table_name, existing_col, temp_col) + op.execute(t.update().values( + **{column_name + "_ts": func.from_unixtime(existing_col)})) + op.drop_column(table_name, column_name) + op.alter_column(table_name, + column_name + "_ts", + nullable=nullable, + type_=sqlalchemy_base.TimestampUTC(), + existing_nullable=nullable, + existing_type=existing_type, + new_column_name=column_name) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index fe56466f..dfe13971 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1165,7 +1165,7 @@ class QueryTransformer(object): if value is not None: converter = None - if isinstance(attr.type, base.PreciseTimestamp): + if isinstance(attr.type, base.TimestampUTC): converter = utils.to_timestamp elif (isinstance(attr.type, sqlalchemy_utils.UUIDType) and not isinstance(value, uuid.UUID)): diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index fa9c5021..74fc33f6 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -1,5 +1,6 @@ # -*- encoding: utf-8 -*- # +# Copyright © 2016 Red Hat, Inc. # Copyright © 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -24,6 +25,7 @@ from oslo_utils import timeutils from oslo_utils import units import six import sqlalchemy +from sqlalchemy.dialects import mysql from sqlalchemy.ext import declarative from sqlalchemy import types import sqlalchemy_utils @@ -42,7 +44,11 @@ COMMON_TABLES_ARGS = {'mysql_charset': "utf8", class PreciseTimestamp(types.TypeDecorator): - """Represents a timestamp precise to the microsecond.""" + """Represents a timestamp precise to the microsecond. + + Deprecated in favor of TimestampUTC. + Still used in alembic migrations. + """ impl = sqlalchemy.DateTime @@ -100,6 +106,25 @@ class PreciseTimestamp(types.TypeDecorator): tzinfo=iso8601.iso8601.UTC) +class TimestampUTC(types.TypeDecorator): + """Represents a timestamp precise to the microsecond.""" + + impl = sqlalchemy.DateTime + + def load_dialect_impl(self, dialect): + if dialect.name == 'mysql': + return dialect.type_descriptor(mysql.DATETIME(fsp=6)) + return self.impl + + def process_bind_param(self, value, dialect): + if value is not None: + return timeutils.normalize_time(value) + + def process_result_value(self, value, dialect): + if value is not None: + return value.replace(tzinfo=iso8601.iso8601.UTC) + + class GnocchiBase(models.ModelBase): __table_args__ = ( COMMON_TABLES_ARGS, @@ -249,7 +274,7 @@ class ResourceType(Base, GnocchiBase, resource_type.ResourceType): name="resource_type_state_enum"), nullable=False, server_default="creating") - updated_at = sqlalchemy.Column(PreciseTimestamp, nullable=False, + updated_at = sqlalchemy.Column(TimestampUTC, nullable=False, # NOTE(jd): We would like to use # sqlalchemy.func.now, but we can't # because the type of PreciseTimestamp in @@ -297,17 +322,11 @@ class ResourceMixin(ResourceJsonifier): sqlalchemy.String(255)) created_by_project_id = sqlalchemy.Column( sqlalchemy.String(255)) - started_at = sqlalchemy.Column(PreciseTimestamp, nullable=False, - # NOTE(jd): We would like to use - # sqlalchemy.func.now, but we can't - # because the type of PreciseTimestamp in - # MySQL is not a Timestamp, so it would - # not store a timestamp but a date as an - # integer. + started_at = sqlalchemy.Column(TimestampUTC, nullable=False, default=lambda: utils.utcnow()) - revision_start = sqlalchemy.Column(PreciseTimestamp, nullable=False, + revision_start = sqlalchemy.Column(TimestampUTC, nullable=False, default=lambda: utils.utcnow()) - ended_at = sqlalchemy.Column(PreciseTimestamp) + ended_at = sqlalchemy.Column(TimestampUTC) user_id = sqlalchemy.Column(sqlalchemy.String(255)) project_id = sqlalchemy.Column(sqlalchemy.String(255)) original_resource_id = sqlalchemy.Column(sqlalchemy.String(255)) @@ -346,7 +365,7 @@ class ResourceHistory(ResourceMixin, Base, GnocchiBase): ondelete="CASCADE", name="fk_rh_id_resource_id"), nullable=False) - revision_end = sqlalchemy.Column(PreciseTimestamp, nullable=False, + revision_end = sqlalchemy.Column(TimestampUTC, nullable=False, default=lambda: utils.utcnow()) metrics = sqlalchemy.orm.relationship( Metric, primaryjoin="Metric.resource_id == ResourceHistory.id", diff --git a/releasenotes/notes/mysql_precise_datetime-57f868f3f42302e2.yaml b/releasenotes/notes/mysql_precise_datetime-57f868f3f42302e2.yaml new file mode 100644 index 00000000..579c835d --- /dev/null +++ b/releasenotes/notes/mysql_precise_datetime-57f868f3f42302e2.yaml @@ -0,0 +1,4 @@ +--- +other: + - Gnocchi now leverages microseconds timestamps available since MySQL 5.6.4, + meaning it is now the minimum required version of MySQL. -- GitLab From 811a8fd3ce4ff40bb3398835595c13529ed9f650 Mon Sep 17 00:00:00 2001 From: Nishant Kumar Date: Sat, 1 Oct 2016 12:31:49 +0530 Subject: [PATCH 0417/1483] Stop adding ServiceAvailable group option Service available group already exists.Therefore we don't need to register this group here again. Change-Id: I86a7cecbafb1785c9fc15a1d78fe2a79c5527bf1 Closes-Bug: #1621036 --- gnocchi/tempest/config.py | 13 ++++--------- gnocchi/tempest/plugin.py | 13 +++++-------- 2 files changed, 9 insertions(+), 17 deletions(-) diff --git a/gnocchi/tempest/config.py b/gnocchi/tempest/config.py index 54bf8ff9..74d7ef3e 100644 --- a/gnocchi/tempest/config.py +++ b/gnocchi/tempest/config.py @@ -13,15 +13,10 @@ from oslo_config import cfg - -service_available_group = cfg.OptGroup(name="service_available", - title="Available OpenStack Services") - -service_available_opts = [ - cfg.BoolOpt("gnocchi", - default=True, - help="Whether or not Gnocchi is expected to be available"), -] +service_option = cfg.BoolOpt('gnocchi', + default=True, + help="Whether or not Gnocchi is expected to be" + "available") metric_group = cfg.OptGroup(name='metric', title='Metric Service Options') diff --git a/gnocchi/tempest/plugin.py b/gnocchi/tempest/plugin.py index d4453694..3410471f 100644 --- a/gnocchi/tempest/plugin.py +++ b/gnocchi/tempest/plugin.py @@ -16,7 +16,6 @@ from __future__ import absolute_import import os -from tempest import config from tempest.test_discover import plugins import gnocchi @@ -32,14 +31,12 @@ class GnocchiTempestPlugin(plugins.TempestPlugin): return full_test_dir, base_path def register_opts(self, conf): - config.register_opt_group(conf, - tempest_config.service_available_group, - tempest_config.service_available_opts) - config.register_opt_group(conf, - tempest_config.metric_group, - tempest_config.metric_opts) + conf.register_opt(tempest_config.service_option, + group='service_available') + conf.register_group(tempest_config.metric_group) + conf.register_opts(tempest_config.metric_opts, group='metric') def get_opt_lists(self): return [(tempest_config.metric_group.name, tempest_config.metric_opts), - ('service_available', tempest_config.service_available_opts)] + ('service_available', [tempest_config.service_option])] -- GitLab From bb3cd40774fcb6633ea2a24551eab28e13403fc1 Mon Sep 17 00:00:00 2001 From: Hanxi Liu Date: Wed, 5 Oct 2016 00:11:06 +0800 Subject: [PATCH 0418/1483] Remove the file named MANIFEST.in This file is automatically generated by pbr[1]. There appears to be no good reason to keep it around. Note: we don't generate MANIFEST.in in a file format from pbr, but as input for setuptools.setup(). [1]https://github.com/openstack-dev/pbr/blob/master/pbr/packaging.py#L454 Change-Id: I7a62adada41d130a40698f71043f3774a6c69f40 --- MANIFEST.in | 1 - 1 file changed, 1 deletion(-) delete mode 100644 MANIFEST.in diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 8f248e6e..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1 +0,0 @@ -include etc/gnocchi/gnocchi.conf -- GitLab From b36b66eedfcc18e7c6f1944c73e422712bb816ef Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 4 Oct 2016 18:28:54 +0200 Subject: [PATCH 0419/1483] Fix oslo.log minimum requirement Gnocchi actually uses oslo_log.log.get_default_log_levels which is provided by oslo.log starting at 2.3.0. Change-Id: Idd01d577a0b85a6603f5c69f783c951c7a0bd438 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index dfec86af..818e3b45 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ pbr numpy iso8601 oslo.config>=2.6.0 -oslo.log>=1.0.0 +oslo.log>=2.3.0 oslo.policy>=0.3.0 oslo.serialization>=1.4.0 oslo.utils>=3.3.0 -- GitLab From afdb60597a156c3968ea324893099ae8ddf44d21 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 4 Oct 2016 18:28:54 +0200 Subject: [PATCH 0420/1483] Fix oslo.log minimum requirement Gnocchi actually uses oslo_log.log.get_default_log_levels which is provided by oslo.log starting at 2.3.0. Change-Id: Idd01d577a0b85a6603f5c69f783c951c7a0bd438 (cherry picked from commit b36b66eedfcc18e7c6f1944c73e422712bb816ef) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 23cf70d1..818c2e75 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ pbr numpy iso8601 oslo.config>=2.6.0 -oslo.log>=1.0.0 +oslo.log>=2.3.0 oslo.policy>=0.3.0 oslo.serialization>=1.4.0 oslo.utils>=3.3.0 -- GitLab From 35e22d1f422f3e9e802272ac9e4f5ea922436824 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 29 Sep 2016 17:52:03 +0200 Subject: [PATCH 0421/1483] Fix some gabbi tests Last gabbi version checks that data have the right content-type. In theses tests we say that data in plain/text and we put json that's not correct, old version was silencly ignored and now this is correctly checked. Same when we put data but no content-type. Since our tests are wrong, this change fixes them. Change-Id: I9a60db296467b08ef32f956471ef5e4f81f462af (cherry picked from commit 35c4a08dd4163e42d4af82695fb8642952b63b91) --- gnocchi/tests/gabbi/gabbits-live/live.yaml | 3 +-- gnocchi/tests/gabbi/gabbits/archive.yaml | 1 + gnocchi/tests/gabbi/gabbits/metric.yaml | 3 +-- gnocchi/tests/gabbi/gabbits/resource.yaml | 7 ++----- 4 files changed, 5 insertions(+), 9 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits-live/live.yaml b/gnocchi/tests/gabbi/gabbits-live/live.yaml index 71b23ae9..9ed086c8 100644 --- a/gnocchi/tests/gabbi/gabbits-live/live.yaml +++ b/gnocchi/tests/gabbi/gabbits-live/live.yaml @@ -441,8 +441,7 @@ tests: method: POST request_headers: content-type: plain/text - data: - archive_policy_name: cookies + data: '{"archive_policy_name": "cookies"}' status: 415 diff --git a/gnocchi/tests/gabbi/gabbits/archive.yaml b/gnocchi/tests/gabbi/gabbits/archive.yaml index 5519d63a..42fe13c8 100644 --- a/gnocchi/tests/gabbi/gabbits/archive.yaml +++ b/gnocchi/tests/gabbi/gabbits/archive.yaml @@ -485,6 +485,7 @@ tests: - name: fail to create policy non-admin POST: /v1/archive_policy request_headers: + content-type: application/json x-user-id: b45187c5-150b-4730-bcb2-b5e04e234220 x-project-id: 16764ee0-bffe-4843-aa36-04b002cdbc7c data: diff --git a/gnocchi/tests/gabbi/gabbits/metric.yaml b/gnocchi/tests/gabbi/gabbits/metric.yaml index b31b3d00..45155f3d 100644 --- a/gnocchi/tests/gabbi/gabbits/metric.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric.yaml @@ -115,8 +115,7 @@ tests: POST: /v1/metric request_headers: content-type: plain/text - data: - archive_policy_name: cookies + data: '{"archive_policy_name": "cookies"}' status: 415 - name: create valid metric diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index 8f3198c3..2730e093 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -157,11 +157,7 @@ tests: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: text/plain - data: - id: f93450f2-d8a5-4d67-9985-02511241e7d1 - started_at: "2014-01-03T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + data: '{"id": "f93450f2-d8a5-4d67-9985-02511241e7d1", "started_at": "2014-01-03T02:02:02.000000", "user_id": "0fbb231484614b1a80131fc22f6afc9c", "project_id": "f3d41b770cc14f0bb94a1d5be9c0e3ea"}' status: 415 # Create a new instance resource, demonstrate that including no data @@ -339,6 +335,7 @@ tests: request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json data: host: compute2 -- GitLab From 93bb23d4a00b89727522c700e82b60c18856cee1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 5 Oct 2016 18:16:45 +0200 Subject: [PATCH 0422/1483] doc,tests: fix reaggregate/reaggregation mispelling The query argument used was mistyped, and the results were actually 'mean' and not 'min' in the end. Same in doc. Change-Id: Iac5683eecc923a4150e93ecb23cff343faad988b Related-Bug: #1630306 --- doc/source/rest.j2 | 2 +- gnocchi/tests/gabbi/gabbits/aggregation.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 143cc5bf..fbae6e55 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -494,7 +494,7 @@ It can also be done by providing the list of metrics to aggregate: By default, the measures are aggregated using the aggregation method provided, e.g. you'll get a mean of means, or a max of maxs. You can specify what method -to use over the retrieved aggregation by using the `reaggregate` parameter: +to use over the retrieved aggregation by using the `reaggregation` parameter: {{ scenarios['get-across-metrics-measures-by-metric-ids-reaggregate']['doc'] }} diff --git a/gnocchi/tests/gabbi/gabbits/aggregation.yaml b/gnocchi/tests/gabbi/gabbits/aggregation.yaml index 6cb11d6c..196f18ac 100644 --- a/gnocchi/tests/gabbi/gabbits/aggregation.yaml +++ b/gnocchi/tests/gabbi/gabbits/aggregation.yaml @@ -212,7 +212,7 @@ tests: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - name: get measure aggregates by granularity from resources and reaggregate - POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&reaggregate=min + POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&reaggregation=min request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -222,8 +222,8 @@ tests: delay: 1 response_json_paths: $: - - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + - ['2015-03-06T14:33:57+00:00', 1.0, 3.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 2.0] # Some negative tests -- GitLab From 50c6141343f0a932cb8d5066fc22f4fc4b152ffb Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 6 Oct 2016 18:25:37 +0200 Subject: [PATCH 0423/1483] Remove Python 3.4 support Change-Id: I12e6a50ac2cf1c96a9fa59054a5a751b6ca775e5 --- setup.cfg | 1 - tox.ini | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 93d67546..d26939fd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,7 +16,6 @@ classifier = Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3.4 Programming Language :: Python :: 3.5 Topic :: System :: Monitoring diff --git a/tox.ini b/tox.ini index 98b7db85..8cdf8c98 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] minversion = 1.8 -envlist = py{34,35,27},py{34,35,27}-{postgresql,mysql}{,-file,-swift,-ceph},pep8,bashate,py35-postgresql-file-upgrade-from-2.2,py27-mysql-ceph-upgrade-from-2.2 +envlist = py{35,27},py{35,27}-{postgresql,mysql}{,-file,-swift,-ceph},pep8,bashate,py35-postgresql-file-upgrade-from-2.2,py27-mysql-ceph-upgrade-from-2.2 [testenv] usedevelop = True -- GitLab From 5186bcc6ba8ce258b36f7ea3301dd7f25ebdff19 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 6 Oct 2016 18:13:37 +0200 Subject: [PATCH 0424/1483] tox: only install all storage drivers in py-$index or py-#index-all There's no need to install e.g. Ceph when testing with Swift. Change-Id: I302f3fe4cd8e886e964961ece31228fa349ee797 --- tox.ini | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tox.ini b/tox.ini index 8cdf8c98..448c6522 100644 --- a/tox.ini +++ b/tox.ini @@ -1,14 +1,11 @@ [tox] minversion = 1.8 -envlist = py{35,27},py{35,27}-{postgresql,mysql}{,-file,-swift,-ceph},pep8,bashate,py35-postgresql-file-upgrade-from-2.2,py27-mysql-ceph-upgrade-from-2.2 +envlist = py{35,27},py{35,27}-{postgresql,mysql}{,-all,-file,-swift,-ceph},pep8,bashate,py35-postgresql-file-upgrade-from-2.2,py27-mysql-ceph-upgrade-from-2.2 [testenv] usedevelop = True sitepackages = False passenv = LANG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE GNOCCHI_TEST_* -deps = .[test] - postgresql: .[postgresql,swift,file,ceph,ceph_recommended_lib] - mysql: .[mysql,swift,file,ceph,ceph_recommended_lib] setenv = GNOCCHI_TEST_STORAGE_DRIVER=file GNOCCHI_TEST_INDEXER_DRIVER=postgresql @@ -19,6 +16,14 @@ setenv = ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph postgresql: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql: GNOCCHI_TEST_INDEXER_DRIVERS=mysql + + GNOCCHI_STORAGE_DEPS=file,swift,ceph,ceph_recommended_lib + ceph: GNOCCHI_STORAGE_DEPS=ceph,ceph_recommended_lib + swift: GNOCCHI_STORAGE_DEPS=swift + file: GNOCCHI_STORAGE_DEPS=file +deps = .[test] + postgresql: .[postgresql,{env:GNOCCHI_STORAGE_DEPS}] + mysql: .[mysql,{env:GNOCCHI_STORAGE_DEPS}] commands = doc8 --ignore-path doc/source/rest.rst doc/source oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf -- GitLab From 6baf05ba4d80b5c41bc4b67936818511eb7a673f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 15 Apr 2016 16:40:57 +0200 Subject: [PATCH 0425/1483] Add a S3 based storage driver This relies on s3rver to test (via pifpaf), but also has been tested against AWS S3 and works perfectly. Change-Id: I99d6394f662fdecd4021990aa3377516bebff674 --- doc/source/architecture.rst | 7 +- doc/source/configuration.rst | 5 + doc/source/install.rst | 1 + doc/source/running.rst | 4 +- gnocchi/opts.py | 4 +- gnocchi/storage/s3.py | 359 ++++++++++++++++++ gnocchi/tests/base.py | 12 +- gnocchi/tests/test_storage.py | 16 + gnocchi/utils.py | 10 + .../notes/s3_driver-4b30122bdbe0385d.yaml | 5 + run-tests.sh | 13 + setup.cfg | 8 +- tox.ini | 12 +- 13 files changed, 443 insertions(+), 13 deletions(-) create mode 100644 gnocchi/storage/s3.py create mode 100644 releasenotes/notes/s3_driver-4b30122bdbe0385d.yaml diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index d7c682f6..8ce43846 100755 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -36,6 +36,7 @@ Gnocchi currently offers different storage drivers: * File * Swift +* S3 * Ceph (preferred) The drivers are based on an intermediate library, named *Carbonara*, which @@ -49,9 +50,9 @@ than the file driver. Depending on the size of your architecture, using the file driver and storing your data on a disk might be enough. If you need to scale the number of server with the file driver, you can export and share the data via NFS among all -Gnocchi processes. In any case, it is obvious that Ceph and Swift drivers are -largely more scalable. Ceph also offers better consistency, and hence is the -recommended driver. +Gnocchi processes. In any case, it is obvious that S3, Ceph and Swift drivers +are largely more scalable. Ceph also offers better consistency, and hence is +the recommended driver. How to plan for Gnocchi’s storage --------------------------------- diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 87ed7f68..9dce1a8f 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -32,6 +32,9 @@ options you want to change and configure: | storage.ceph_* | Configuration options to access Ceph | | | if you use the Ceph storage driver. | +---------------------+---------------------------------------------------+ +| storage.s3_* | Configuration options to access S3 | +| | if you use the S3 storage driver. | ++---------------------+---------------------------------------------------+ Gnocchi provides these storage drivers: @@ -39,6 +42,7 @@ Gnocchi provides these storage drivers: - File (default) - `Swift`_ - `Ceph`_ +- `S3`_ Gnocchi provides these indexer drivers: @@ -47,6 +51,7 @@ Gnocchi provides these indexer drivers: .. _`Swift`: https://launchpad.net/swift .. _`Ceph`: http://ceph.com/ +.. _`S3`: https://aws.amazon.com/s3/ .. _`PostgreSQL`: http://postgresql.org .. _`MySQL`: http://mysql.com diff --git a/doc/source/install.rst b/doc/source/install.rst index 72a64c86..31af07bf 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -46,6 +46,7 @@ The list of variants available is: * mysql - provides MySQL indexer support * postgresql – provides PostgreSQL indexer support * swift – provides OpenStack Swift storage support +* s3 – provides Amazon S3 storage support * ceph – provides common part of Ceph storage support * ceph_recommended_lib – provides Ceph (>=0.80) storage support * ceph_alternative_lib – provides Ceph (>=10.1.0) storage support diff --git a/doc/source/running.rst b/doc/source/running.rst index 3e83dfbf..5a9f91eb 100644 --- a/doc/source/running.rst +++ b/doc/source/running.rst @@ -68,7 +68,7 @@ How to backup and restore Gnocchi In order to be able to recover from an unfortunate event, you need to backup both the index and the storage. That means creating a database dump (PostgreSQL -or MySQL) and doing snapshots or copy of your data storage (Ceph, Swift or your -file system). The procedure to restore is no more complicated than initial +or MySQL) and doing snapshots or copy of your data storage (Ceph, S3, Swift or +your file system). The procedure to restore is no more complicated than initial deployment: restore your index and storage backups, reinstall Gnocchi if necessary, and restart it. diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 453510d2..54197ba3 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -22,6 +22,7 @@ import gnocchi.indexer import gnocchi.storage import gnocchi.storage.ceph import gnocchi.storage.file +import gnocchi.storage.s3 import gnocchi.storage.swift @@ -49,7 +50,8 @@ def list_opts(): gnocchi.storage.OPTS, gnocchi.storage.ceph.OPTS, gnocchi.storage.file.OPTS, - gnocchi.storage.swift.OPTS)), + gnocchi.storage.swift.OPTS, + gnocchi.storage.s3.OPTS)), ("statsd", ( cfg.StrOpt('host', default='0.0.0.0', diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py new file mode 100644 index 00000000..091a49ca --- /dev/null +++ b/gnocchi/storage/s3.py @@ -0,0 +1,359 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from collections import defaultdict +import contextlib +import datetime +import logging +import os +import uuid + +from oslo_config import cfg +import retrying +import six +try: + import boto3 + import botocore.exceptions +except ImportError: + boto3 = None + +from gnocchi import storage +from gnocchi.storage import _carbonara +from gnocchi import utils + +LOG = logging.getLogger(__name__) + +OPTS = [ + cfg.StrOpt('s3_endpoint_url', + help='S3 endpoint URL'), + cfg.StrOpt('s3_region_name', + default=os.getenv("AWS_DEFAULT_REGION"), + help='S3 region name'), + cfg.StrOpt('s3_access_key_id', + default=os.getenv("AWS_ACCESS_KEY_ID"), + help='S3 access key id'), + cfg.StrOpt('s3_secret_access_key', + default=os.getenv("AWS_SECRET_ACCESS_KEY"), + help='S3 secret access key'), + cfg.StrOpt('s3_bucket_prefix', + default='gnocchi', + help='Prefix to namespace metric bucket.'), +] + + +def retry_if_operationaborted(exception): + return (isinstance(exception, botocore.exceptions.ClientError) + and exception.response['Error'].get('Code') == "OperationAborted") + + +class S3Storage(_carbonara.CarbonaraBasedStorage): + + WRITE_FULL = True + + def __init__(self, conf): + super(S3Storage, self).__init__(conf) + if boto3 is None: + raise RuntimeError("boto3 unavailable") + self.s3 = boto3.client( + 's3', + endpoint_url=conf.s3_endpoint_url, + region_name=conf.s3_region_name, + aws_access_key_id=conf.s3_access_key_id, + aws_secret_access_key=conf.s3_secret_access_key) + self._region_name = conf.s3_region_name + self._bucket_prefix = conf.s3_bucket_prefix + self._bucket_name_measures = ( + self._bucket_prefix + "-" + self.MEASURE_PREFIX + ) + try: + self._create_bucket(self._bucket_name_measures) + except botocore.exceptions.ClientError as e: + if e.response['Error'].get('Code') not in ( + "BucketAlreadyExists", "BucketAlreadyOwnedByYou" + ): + raise + + # NOTE(jd) OperationAborted might be raised if we try to create the bucket + # for the first time at the same time + @retrying.retry(stop_max_attempt_number=10, + wait_fixed=500, + retry_on_exception=retry_if_operationaborted) + def _create_bucket(self, name): + if self._region_name: + kwargs = dict(CreateBucketConfiguration={ + "LocationConstraint": self._region_name, + }) + else: + kwargs = {} + return self.s3.create_bucket(Bucket=name, **kwargs) + + def _bucket_name(self, metric): + return '%s-%s' % (self._bucket_prefix, str(metric.id)) + + @staticmethod + def _object_name(split_key, aggregation, granularity, version=3): + name = '%s_%s_%s' % (split_key, aggregation, granularity) + return name + '_v%s' % version if version else name + + def _create_metric(self, metric): + try: + self._create_bucket(self._bucket_name(metric)) + except botocore.exceptions.ClientError as e: + if e.response['Error'].get('Code') != "BucketAlreadyExists": + raise + # raise storage.MetricAlreadyExists(metric) + + def _store_new_measures(self, metric, data): + now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") + self.s3.put_object( + Bucket=self._bucket_name_measures, + Key=(six.text_type(metric.id) + + "/" + + six.text_type(uuid.uuid4()) + + now), + Body=data) + + def _build_report(self, details): + metric_details = defaultdict(int) + response = {} + while response.get('IsTruncated', True): + if 'NextContinuationToken' in response: + kwargs = { + 'ContinuationToken': response['NextContinuationToken'] + } + else: + kwargs = {} + response = self.s3.list_objects_v2( + Bucket=self._bucket_name_measures, + **kwargs) + for c in response.get('Contents', ()): + metric, metric_file = c['Key'].split("/", 1) + metric_details[metric] += 1 + return (len(metric_details), sum(metric_details.values()), + metric_details if details else None) + + def list_metric_with_measures_to_process(self, size, part, full=False): + if full: + limit = 1000 # 1000 is the default anyway + else: + limit = size * (part + 1) + + metrics = set() + response = {} + # Handle pagination + while response.get('IsTruncated', True): + if 'NextContinuationToken' in response: + kwargs = { + 'ContinuationToken': response['NextContinuationToken'] + } + else: + kwargs = {} + response = self.s3.list_objects_v2( + Bucket=self._bucket_name_measures, + Delimiter="/", + MaxKeys=limit, + **kwargs) + for p in response.get('CommonPrefixes', ()): + metrics.add(p['Prefix'].rstrip('/')) + + if full: + return metrics + + return metrics[size * part:] + + def _list_measure_files_for_metric_id(self, metric_id): + files = set() + response = {} + while response.get('IsTruncated', True): + if 'NextContinuationToken' in response: + kwargs = { + 'ContinuationToken': response['NextContinuationToken'] + } + else: + kwargs = {} + response = self.s3.list_objects_v2( + Bucket=self._bucket_name_measures, + Prefix=six.text_type(metric_id) + "/", + **kwargs) + + for c in response.get('Contents', ()): + files.add(c['Key']) + + return files + + def _pending_measures_to_process_count(self, metric_id): + return len(self._list_measure_files_for_metric_id(metric_id)) + + def _bulk_delete(self, bucket, objects): + # NOTE(jd) The maximum object to delete at once is 1000 + # TODO(jd) Parallelize? + deleted = 0 + for obj_slice in utils.grouper(objects, 1000): + d = { + 'Objects': [{'Key': o} for o in obj_slice], + # FIXME(jd) Use Quiet mode, but s3rver does not seem to + # support it + # 'Quiet': True, + } + response = self.s3.delete_objects( + Bucket=bucket, + Delete=d) + deleted += len(response['Deleted']) + LOG.debug('%s objects deleted, %s objects skipped', + deleted, + len(objects) - deleted) + + def _delete_unprocessed_measures_for_metric_id(self, metric_id): + files = self._list_measure_files_for_metric_id(metric_id) + self._bulk_delete(self._bucket_name_measures, files) + + @contextlib.contextmanager + def _process_measure_for_metric(self, metric): + files = self._list_measure_files_for_metric_id(metric.id) + + measures = [] + for f in files: + response = self.s3.get_object( + Bucket=self._bucket_name_measures, + Key=f) + measures.extend( + self._unserialize_measures(response['Body'].read())) + + yield measures + + # Now clean objects + self._bulk_delete(self._bucket_name_measures, files) + + def _store_metric_measures(self, metric, timestamp_key, aggregation, + granularity, data, offset=0, version=3): + self.s3.put_object( + Bucket=self._bucket_name(metric), + Key=self._object_name( + timestamp_key, aggregation, granularity, version), + Body=data) + + def _delete_metric_measures(self, metric, timestamp_key, aggregation, + granularity, version=3): + self.s3.delete_object( + Bucket=self._bucket_name(metric), + Key=self._object_name( + timestamp_key, aggregation, granularity, version)) + + def _delete_metric(self, metric): + self._delete_unaggregated_timeserie(metric) + bucket = self._bucket_name(metric) + response = {} + while response.get('IsTruncated', True): + if 'NextContinuationToken' in response: + kwargs = { + 'ContinuationToken': response['NextContinuationToken'] + } + else: + kwargs = {} + try: + response = self.s3.list_objects_v2( + Bucket=bucket, **kwargs) + except botocore.exceptions.ClientError as e: + if e.response['Error'].get('Code') == "NoSuchBucket": + # Maybe it never has been created (no measure) + return + raise + self._bulk_delete(bucket, [c['Key'] + for c in response.get('Contents', ())]) + try: + self.s3.delete_bucket(Bucket=bucket) + except botocore.exceptions.ClientError as e: + if e.response['Error'].get('Code') != "NoSuchBucket": + raise + + def _get_measures(self, metric, timestamp_key, aggregation, granularity, + version=3): + try: + response = self.s3.get_object( + Bucket=self._bucket_name(metric), + Key=self._object_name( + timestamp_key, aggregation, granularity, version)) + except botocore.exceptions.ClientError as e: + code = e.response['Error'].get('Code') + if code == "NoSuchBucket": + raise storage.MetricDoesNotExist(metric) + elif code == "NoSuchKey": + raise storage.AggregationDoesNotExist(metric, aggregation) + raise + return response['Body'].read() + + def _list_split_keys_for_metric(self, metric, aggregation, granularity, + version=None): + bucket = self._bucket_name(metric) + keys = set() + response = {} + while response.get('IsTruncated', True): + if 'NextContinuationToken' in response: + kwargs = { + 'ContinuationToken': response['NextContinuationToken'] + } + else: + kwargs = {} + try: + response = self.s3.list_objects_v2( + Bucket=bucket, + **kwargs) + except botocore.exceptions.ClientError as e: + if e.response['Error'].get('Code') == "NoSuchBucket": + raise storage.MetricDoesNotExist(metric) + raise + for f in response.get('Contents', ()): + try: + meta = f['Key'].split('_') + if (aggregation == meta[1] + and granularity == float(meta[2]) + and self._version_check(f['Key'], version)): + keys.add(meta[0]) + except (ValueError, IndexError): + # Might be "none", or any other file. Be resilient. + continue + return keys + + @staticmethod + def _build_unaggregated_timeserie_path(version): + return 'none' + ("_v%s" % version if version else "") + + def _get_unaggregated_timeserie(self, metric, version=3): + try: + response = self.s3.get_object( + Bucket=self._bucket_name(metric), + Key=self._build_unaggregated_timeserie_path(version)) + except botocore.exceptions.ClientError as e: + if e.response['Error'].get('Code') in ("NoSuchBucket", + "NoSuchKey"): + raise storage.MetricDoesNotExist(metric) + raise + return response['Body'].read() + + def _store_unaggregated_timeserie(self, metric, data, version=3): + self.s3.put_object( + Bucket=self._bucket_name(metric), + Key=self._build_unaggregated_timeserie_path(version), + Body=data) + + def _delete_unaggregated_timeserie(self, metric, version=3): + try: + self.s3.delete_object( + Bucket=self._bucket_name(metric), + Key=self._build_unaggregated_timeserie_path(version)) + except botocore.exceptions.ClientError as e: + code = e.response['Error'].get('Code') + if code not in ("NoSuchKey", "NoSuchBucket"): + raise diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 667a1468..5da70b57 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2014-2015 eNovance +# Copyright © 2014-2016 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -208,6 +208,16 @@ class TestCase(base.BaseTestCase): self.path_get('etc/gnocchi/policy.json'), group="oslo_policy") + # NOTE(jd) This allows to test S3 on AWS + if not os.getenv("AWS_ACCESS_KEY_ID"): + self.conf.set_override('s3_endpoint_url', + os.getenv("GNOCCHI_STORAGE_HTTP_URL"), + group="storage") + self.conf.set_override('s3_access_key_id', "gnocchi", + group="storage") + self.conf.set_override('s3_secret_access_key', "anythingworks", + group="storage") + self.index = indexer.get_driver(self.conf) self.index.connect() diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 65b42240..0e468f00 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -56,6 +56,10 @@ class TestStorageDriver(tests_base.TestCase): def test_corrupted_data(self): if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): self.skipTest("This driver is not based on Carbonara") + if self.conf.storage.driver == "s3": + self.skipTest( + "This test does not work with S3 as backend as the S3 driver " + "has no fake client, and tests run in parallel.") self.storage.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), @@ -78,6 +82,10 @@ class TestStorageDriver(tests_base.TestCase): self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), m) def test_list_metric_with_measures_to_process(self): + if self.conf.storage.driver == "s3": + self.skipTest( + "This test does not work with S3 as backend as the S3 driver " + "has no fake client, and tests run in parallel.") metrics = self.storage.list_metric_with_measures_to_process( None, None, full=True) self.assertEqual(set(), metrics) @@ -143,6 +151,10 @@ class TestStorageDriver(tests_base.TestCase): @mock.patch('gnocchi.carbonara.SplitKey.POINTS_PER_SPLIT', 48) def test_add_measures_update_subset_split(self): + if self.conf.storage.driver == "s3": + self.skipTest( + "This test does not work with S3 as backend as the S3 driver " + "has no fake client, and tests run in parallel.") m, m_sql = self._create_metric('medium') measures = [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, i, j, 0), 100) @@ -166,6 +178,10 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual(1, count) def test_add_measures_update_subset(self): + if self.conf.storage.driver == "s3": + self.skipTest( + "This test does not work with S3 as backend as the S3 driver " + "has no fake client, and tests run in parallel.") m, m_sql = self._create_metric('medium') measures = [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, i, j, 0), 100) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 139b89c8..97c13deb 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. import datetime +import itertools import multiprocessing import iso8601 @@ -124,3 +125,12 @@ def get_default_workers(): except NotImplementedError: default_workers = 1 return default_workers + + +def grouper(iterable, n): + it = iter(iterable) + while True: + chunk = tuple(itertools.islice(it, n)) + if not chunk: + return + yield chunk diff --git a/releasenotes/notes/s3_driver-4b30122bdbe0385d.yaml b/releasenotes/notes/s3_driver-4b30122bdbe0385d.yaml new file mode 100644 index 00000000..535c6d1e --- /dev/null +++ b/releasenotes/notes/s3_driver-4b30122bdbe0385d.yaml @@ -0,0 +1,5 @@ +--- +features: + - New storage driver for AWS S3. + This new driver works in the same way that the Swift driver, expect that it + leverages the Amazon Web Services S3 object storage API. diff --git a/run-tests.sh b/run-tests.sh index b9d87eae..6577fd52 100755 --- a/run-tests.sh +++ b/run-tests.sh @@ -9,6 +9,19 @@ do do if [ "$GNOCCHI_TEST_STORAGE_DRIVER" == "ceph" ]; then pifpaf run ceph -- pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* + elif [ "$GNOCCHI_TEST_STORAGE_DRIVER" == "s3" ] + then + if ! which s3rver >/dev/null 2>&1 + then + mkdir npm-s3rver + export NPM_CONFIG_PREFIX=npm-s3rver + npm install s3rver --global + export PATH=$PWD/npm-s3rver/bin:$PATH + fi + pifpaf -e GNOCCHI_STORAGE run s3rver -- \ + pifpaf -e GNOCCHI_INDEXER run $indexer -- \ + ./tools/pretty_tox.sh $* + else pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* fi diff --git a/setup.cfg b/setup.cfg index d26939fd..380ab9e6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,6 +34,11 @@ postgresql = sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1 +s3 = + boto3 + msgpack-python + lz4 + tooz>=1.38 swift = python-swiftclient>=3.1.0 msgpack-python @@ -59,7 +64,7 @@ doc = Jinja2 reno>=1.6.2 test = - pifpaf>=0.2.0 + pifpaf>=0.12.0 gabbi>=1.21.0 coverage>=3.6 fixtures @@ -100,6 +105,7 @@ gnocchi.storage = swift = gnocchi.storage.swift:SwiftStorage ceph = gnocchi.storage.ceph:CephStorage file = gnocchi.storage.file:FileStorage + s3 = gnocchi.storage.s3:S3Storage gnocchi.indexer = mysql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer diff --git a/tox.ini b/tox.ini index 448c6522..e4b682ef 100644 --- a/tox.ini +++ b/tox.ini @@ -1,26 +1,28 @@ [tox] minversion = 1.8 -envlist = py{35,27},py{35,27}-{postgresql,mysql}{,-all,-file,-swift,-ceph},pep8,bashate,py35-postgresql-file-upgrade-from-2.2,py27-mysql-ceph-upgrade-from-2.2 +envlist = py{35,27},py{35,27}-{postgresql,mysql}{,-all,-file,-swift,-ceph,-s3},pep8,bashate,py35-postgresql-file-upgrade-from-2.2,py27-mysql-ceph-upgrade-from-2.2 [testenv] usedevelop = True sitepackages = False -passenv = LANG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE GNOCCHI_TEST_* +passenv = LANG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE GNOCCHI_TEST_* AWS_* setenv = GNOCCHI_TEST_STORAGE_DRIVER=file GNOCCHI_TEST_INDEXER_DRIVER=postgresql - GNOCCHI_TEST_STORAGE_DRIVERS=file swift ceph + GNOCCHI_TEST_STORAGE_DRIVERS=file swift ceph s3 GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql file: GNOCCHI_TEST_STORAGE_DRIVERS=file swift: GNOCCHI_TEST_STORAGE_DRIVERS=swift ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph + s3: GNOCCHI_TEST_STORAGE_DRIVERS=s3 postgresql: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql: GNOCCHI_TEST_INDEXER_DRIVERS=mysql - GNOCCHI_STORAGE_DEPS=file,swift,ceph,ceph_recommended_lib + GNOCCHI_STORAGE_DEPS=file,swift,s3,ceph,ceph_recommended_lib ceph: GNOCCHI_STORAGE_DEPS=ceph,ceph_recommended_lib swift: GNOCCHI_STORAGE_DEPS=swift file: GNOCCHI_STORAGE_DEPS=file + s3: GNOCCHI_STORAGE_DEPS=s3 deps = .[test] postgresql: .[postgresql,{env:GNOCCHI_STORAGE_DEPS}] mysql: .[mysql,{env:GNOCCHI_STORAGE_DEPS}] @@ -120,7 +122,7 @@ exclude = .tox,.eggs,doc show-source = true [testenv:genconfig] -deps = .[mysql,postgresql,test,file,ceph,swift] +deps = .[mysql,postgresql,test,file,ceph,swift,s3] commands = oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf [testenv:docs] -- GitLab From 510d799d05c738038159e74bf4d13140b6f2edca Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 21 Sep 2016 10:26:02 +0200 Subject: [PATCH 0426/1483] Use Cotyledon oslo config glue Oslo.service was providing some oslo config configuration. mainly to print all cfg options on startup and a timeout option in case of service doesn't stop gracefully in an timely fashion. It also reload the configuration file on sighup. This change uses the optional cotyledon helper to do that stuffs. Change-Id: I6227fab55b2f4d33611ca3c4bcf01239ff585748 --- etc/gnocchi/gnocchi-config-generator.conf | 1 + gnocchi/cli.py | 19 +++++++++++++++++-- .../notes/reloading-734a639a667c93ee.yaml | 6 ++++++ requirements.txt | 2 +- 4 files changed, 25 insertions(+), 3 deletions(-) create mode 100644 releasenotes/notes/reloading-734a639a667c93ee.yaml diff --git a/etc/gnocchi/gnocchi-config-generator.conf b/etc/gnocchi/gnocchi-config-generator.conf index fa6ae57b..a7918068 100644 --- a/etc/gnocchi/gnocchi-config-generator.conf +++ b/etc/gnocchi/gnocchi-config-generator.conf @@ -6,4 +6,5 @@ namespace = oslo.db namespace = oslo.log namespace = oslo.middleware namespace = oslo.policy +namespace = cotyledon namespace = keystonemiddleware.auth_token diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 8ae29ca4..a4e7cdbc 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -19,6 +19,7 @@ import time import uuid import cotyledon +from cotyledon import oslo_config_glue from futurist import periodics import msgpack from oslo_config import cfg @@ -286,15 +287,29 @@ class MetricProcessor(MetricProcessBase): class MetricdServiceManager(cotyledon.ServiceManager): def __init__(self, conf): super(MetricdServiceManager, self).__init__() + oslo_config_glue.setup(self, conf) + self.conf = conf self.queue = multiprocessing.Manager().Queue() self.add(MetricScheduler, args=(self.conf, self.queue)) - self.add(MetricProcessor, args=(self.conf, self.queue), - workers=conf.metricd.workers) + self.metric_processor_id = self.add( + MetricProcessor, args=(self.conf, self.queue), + workers=conf.metricd.workers) self.add(MetricReporting, args=(self.conf,)) self.add(MetricJanitor, args=(self.conf,)) + self.register_hooks(on_reload=self.on_reload) + + def on_reload(self): + # NOTE(sileht): We do not implement reload() in Workers so all workers + # will received SIGHUP and exit gracefully, then their will be + # restarted with the new number of workers. This is important because + # we use the number of worker to declare the capability in tooz and + # to select the block of metrics to proceed. + self.reconfigure(self.metric_processor_id, + workers=self.conf.metricd.workers) + def run(self): super(MetricdServiceManager, self).run() self.queue.close() diff --git a/releasenotes/notes/reloading-734a639a667c93ee.yaml b/releasenotes/notes/reloading-734a639a667c93ee.yaml new file mode 100644 index 00000000..0cf2eb73 --- /dev/null +++ b/releasenotes/notes/reloading-734a639a667c93ee.yaml @@ -0,0 +1,6 @@ +--- +features: + - gnocchi-metricd now uses the cotyledon/oslo.config helper to handle + configuration file reloading. You can dynamically change the number + of workers by changing the configuration file and sending SIGHUP to the + metricd master process. diff --git a/requirements.txt b/requirements.txt index 818e3b45..5c83463c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ pecan>=0.9 pytimeparse>=1.1.5 futures jsonpatch -cotyledon>=1.2.2 +cotyledon>=1.5.0 requests six stevedore -- GitLab From 2b1ed8d4528df8c3071343d1cd5764b6a7122fd1 Mon Sep 17 00:00:00 2001 From: Juan Antonio Osorio Robles Date: Mon, 10 Oct 2016 09:16:45 +0300 Subject: [PATCH 0427/1483] Add http_proxy_to_wsgi to api-paste This sets up the HTTPProxyToWSGI middleware in front of Gnocchi. The purpose of thise middleware is to set up the request URL correctly in case there is a proxy (For instance, a loadbalancer such as HAProxy) in front of Gnocchi. So, for instance, when TLS connections are being terminated in the proxy, and one tries to get the versions from the / resource of Gnocchi, one will notice that the protocol is incorrect; It will show 'http' instead of 'https'. So this middleware handles such cases. Thus helping Keystone discovery work correctly. The HTTPProxyToWSGI is off by default and needs to be enabled via a configuration value. Change-Id: Ic5526cf37e70335fa2cc70946a271253f227f129 Closes-Bug: #1590608 --- etc/gnocchi/api-paste.ini | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/etc/gnocchi/api-paste.ini b/etc/gnocchi/api-paste.ini index cb7becb0..ad56b17e 100644 --- a/etc/gnocchi/api-paste.ini +++ b/etc/gnocchi/api-paste.ini @@ -4,19 +4,22 @@ pipeline = gnocchi+noauth [composite:gnocchi+noauth] use = egg:Paste#urlmap -/ = gnocchiversions +/ = gnocchiversions_pipeline /v1 = gnocchiv1+noauth [composite:gnocchi+auth] use = egg:Paste#urlmap -/ = gnocchiversions +/ = gnocchiversions_pipeline /v1 = gnocchiv1+auth [pipeline:gnocchiv1+noauth] -pipeline = gnocchiv1 +pipeline = http_proxy_to_wsgi gnocchiv1 [pipeline:gnocchiv1+auth] -pipeline = keystone_authtoken gnocchiv1 +pipeline = http_proxy_to_wsgi keystone_authtoken gnocchiv1 + +[pipeline:gnocchiversions_pipeline] +pipeline = http_proxy_to_wsgi gnocchiversions [app:gnocchiversions] paste.app_factory = gnocchi.rest.app:app_factory @@ -29,3 +32,7 @@ root = gnocchi.rest.V1Controller [filter:keystone_authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory oslo_config_project = gnocchi + +[filter:http_proxy_to_wsgi] +paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory +oslo_config_project = gnocchi -- GitLab From 2ac7a209478a57d019b179b4e0bb331b232724f1 Mon Sep 17 00:00:00 2001 From: Hanxi Liu Date: Tue, 11 Oct 2016 01:19:59 +0800 Subject: [PATCH 0428/1483] Add helper for utcnow to epoch nano We use int(utils.datetime_to_unix() * int(10e8)) so much. So we need add a helper to simply them. Change-Id: I948c545b7366b66dd39d5d95383bff17e42fc7f2 Closes-Bug: #1630576 --- gnocchi/statsd.py | 6 +++--- gnocchi/tests/test_aggregates.py | 4 ++-- gnocchi/utils.py | 4 ++++ tools/measures_injector.py | 4 ++-- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index 6798ac56..ea00796a 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -65,14 +65,14 @@ class Stats(object): "Invalid sampling for ms: `%d`, should be none" % sampling) self.times[metric_name] = storage.Measure( - int(utils.datetime_to_unix(utils.utcnow()) * int(10e8)), value) + utils.dt_in_unix_ns(utils.utcnow()), value) elif metric_type == "g": if sampling is not None: raise ValueError( "Invalid sampling for g: `%d`, should be none" % sampling) self.gauges[metric_name] = storage.Measure( - int(utils.datetime_to_unix(utils.utcnow()) * int(10e8)), value) + utils.dt_in_unix_ns(utils.utcnow()), value) elif metric_type == "c": sampling = 1 if sampling is None else sampling if metric_name in self.counters: @@ -80,7 +80,7 @@ class Stats(object): else: current_value = 0 self.counters[metric_name] = storage.Measure( - int(utils.datetime_to_unix(utils.utcnow()) * int(10e8)), + utils.dt_in_unix_ns(utils.utcnow()), current_value + (value * (1 / sampling))) # TODO(jd) Support "set" type # elif metric_type == "s": diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index b11da8da..7ff7d490 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -60,8 +60,8 @@ class TestAggregates(tests_base.TestCase): uuid.uuid4(), self.archive_policies['medium']) start_time = utils.datetime_utc(2014, 1, 1, 12) incr = datetime.timedelta(seconds=spacing) - measures = [storage.Measure(int( - utils.datetime_to_unix(start_time + incr * n) * int(10e8)), val) + measures = [storage.Measure( + utils.dt_in_unix_ns(start_time + incr * n), val) for n, val in enumerate(data)] self.index.create_metric(metric.id, str(uuid.uuid4()), str(uuid.uuid4()), diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 139b89c8..639a7ac7 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -118,6 +118,10 @@ def dt_to_unix_ns(*args): *args, tzinfo=iso8601.iso8601.UTC)) * int(10e8)) +def dt_in_unix_ns(timestamp): + return int(datetime_to_unix(timestamp) * int(10e8)) + + def get_default_workers(): try: default_workers = multiprocessing.cpu_count() or 1 diff --git a/tools/measures_injector.py b/tools/measures_injector.py index bdf64610..daa5f4d8 100755 --- a/tools/measures_injector.py +++ b/tools/measures_injector.py @@ -44,8 +44,8 @@ def injector(): def todo(metric): for _ in six.moves.range(conf.batch_of_measures): measures = [ - storage.Measure(int(utils.datetime_to_unix( - utils.utcnow()) * int(10e8)), random.random()) + storage.Measure( + utils.dt_in_unix_ns(utils.utcnow()), random.random()) for __ in six.moves.range(conf.measures_per_batch)] s.add_measures(metric, measures) -- GitLab From bc6a61d51df96efa4de074436ee98143d824bfb2 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 5 Oct 2016 18:16:45 +0200 Subject: [PATCH 0429/1483] doc,tests: fix reaggregate/reaggregation mispelling The query argument used was mistyped, and the results were actually 'mean' and not 'min' in the end. Same in doc. Change-Id: Iac5683eecc923a4150e93ecb23cff343faad988b Related-Bug: #1630306 --- doc/source/rest.j2 | 2 +- gnocchi/tests/gabbi/gabbits/aggregation.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index c6018650..b76cedb5 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -489,7 +489,7 @@ It can also be done by providing the list of metrics to aggregate: By default, the measures are aggregated using the aggregation method provided, e.g. you'll get a mean of means, or a max of maxs. You can specify what method -to use over the retrieved aggregation by using the `reaggregate` parameter: +to use over the retrieved aggregation by using the `reaggregation` parameter: {{ scenarios['get-across-metrics-measures-by-metric-ids-reaggregate']['doc'] }} diff --git a/gnocchi/tests/gabbi/gabbits/aggregation.yaml b/gnocchi/tests/gabbi/gabbits/aggregation.yaml index 6cb11d6c..196f18ac 100644 --- a/gnocchi/tests/gabbi/gabbits/aggregation.yaml +++ b/gnocchi/tests/gabbi/gabbits/aggregation.yaml @@ -212,7 +212,7 @@ tests: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - name: get measure aggregates by granularity from resources and reaggregate - POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&reaggregate=min + POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&reaggregation=min request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -222,8 +222,8 @@ tests: delay: 1 response_json_paths: $: - - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + - ['2015-03-06T14:33:57+00:00', 1.0, 3.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 2.0] # Some negative tests -- GitLab From 201dc6d2cfedbc535435d84977b392dee38b4e02 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 11 Oct 2016 12:24:51 +0200 Subject: [PATCH 0430/1483] Revert "Remove the file named MANIFEST.in" This breaks gnocchi installation gnocchi.conf is no more generated without. This reverts commit bb3cd40774fcb6633ea2a24551eab28e13403fc1. Change-Id: Ieeb33bc06a741c75bc1438363f1a30f904ef0e2f --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) create mode 100644 MANIFEST.in diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..8f248e6e --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +include etc/gnocchi/gnocchi.conf -- GitLab From 9a95873f4016792bd9c53aac0df93824375a127b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 6 Oct 2016 23:08:25 +0200 Subject: [PATCH 0431/1483] carbonara: fix SplitKey with datetime greater than 32bits value Current implementation based on pandas.Timestamp can't handle keys that go further than 2^32 seconds after epoch, which makes e.g. archive policies with very high granularity failing. Change-Id: Idb81345544cc25e36447473e5115d9d856766c83 --- gnocchi/carbonara.py | 67 ++++++++++++++++++++------------- gnocchi/storage/_carbonara.py | 8 ++-- gnocchi/tests/test_carbonara.py | 30 +++++++++++---- 3 files changed, 68 insertions(+), 37 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index ee0f5330..b0af065c 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -27,13 +27,10 @@ import re import struct import time -import iso8601 import lz4 import pandas import six -from gnocchi import utils - # NOTE(sileht): pandas relies on time.strptime() # and often triggers http://bugs.python.org/issue7980 # its dues to our heavy threads usage, this is the workaround @@ -325,7 +322,8 @@ class BoundTimeSerie(TimeSerie): self.ts = self.ts[self.first_block_timestamp():] -class SplitKey(pandas.Timestamp): +@functools.total_ordering +class SplitKey(object): """A class representing a split key. A split key is basically a timestamp that can be used to split @@ -336,33 +334,30 @@ class SplitKey(pandas.Timestamp): POINTS_PER_SPLIT = 3600 - @classmethod - def _init(cls, value, sampling): - # NOTE(jd) This should be __init__ but it does not work, because of… - # Pandas, Cython, whatever. - self = cls(value) - self._carbonara_sampling = sampling - return self + def __init__(self, value, sampling): + if isinstance(value, SplitKey): + self.key = value.key + elif isinstance(value, pandas.Timestamp): + self.key = value.value / 10e8 + else: + self.key = float(value) + + self._carbonara_sampling = float(sampling) @classmethod def from_timestamp_and_sampling(cls, timestamp, sampling): - return cls._init( + return cls( round_timestamp( timestamp, freq=sampling * cls.POINTS_PER_SPLIT * 10e8), sampling) - @classmethod - def from_key_string(cls, keystr, sampling): - return cls._init(float(keystr) * 10e8, sampling) - def __next__(self): """Get the split key of the next split. :return: A `SplitKey` object. """ - return self._init( - self + datetime.timedelta( - seconds=(self.POINTS_PER_SPLIT * self._carbonara_sampling)), + return self.__class__( + self.key + self._carbonara_sampling * self.POINTS_PER_SPLIT, self._carbonara_sampling) next = __next__ @@ -370,18 +365,35 @@ class SplitKey(pandas.Timestamp): def __iter__(self): return self + def __hash__(self): + return hash(self.key) + + def __lt__(self, other): + if isinstance(other, SplitKey): + return self.key < other.key + if isinstance(other, pandas.Timestamp): + return self.key * 10e8 < other.value + return self.key < other + + def __eq__(self, other): + if isinstance(other, SplitKey): + return self.key == other.key + if isinstance(other, pandas.Timestamp): + return self.key * 10e8 == other.value + return self.key == other + def __str__(self): return str(float(self)) def __float__(self): - ts = self.to_datetime() - if ts.tzinfo is None: - ts = ts.replace(tzinfo=iso8601.iso8601.UTC) - return utils.datetime_to_unix(ts) + return self.key + + def as_datetime(self): + return pandas.Timestamp(self.key, unit='s') def __repr__(self): return "<%s: %s / %fs>" % (self.__class__.__name__, - pandas.Timestamp.__repr__(self), + repr(self.key), self._carbonara_sampling) @@ -436,7 +448,7 @@ class AggregatedTimeSerie(TimeSerie): groupby = self.ts.groupby(functools.partial( SplitKey.from_timestamp_and_sampling, sampling=self.sampling)) for group, ts in groupby: - yield (SplitKey._init(group, self.sampling), + yield (SplitKey(group, self.sampling), AggregatedTimeSerie(self.sampling, self.aggregation_method, ts)) @@ -544,7 +556,10 @@ class AggregatedTimeSerie(TimeSerie): if not self.ts.index.is_monotonic: self.ts = self.ts.sort_index() offset_div = self.sampling * 10e8 - start = pandas.Timestamp(start).value + if isinstance(start, SplitKey): + start = start.as_datetime().value + else: + start = pandas.Timestamp(start).value # calculate how many seconds from start the series runs until and # initialize list to store alternating delimiter, float entries if compressed: diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 8118dbee..f2ccdef6 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -239,7 +239,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): oldest_mutable_timestamp): # NOTE(jd) We write the full split only if the driver works that way # (self.WRITE_FULL) or if the oldest_mutable_timestamp is out of range. - write_full = self.WRITE_FULL or oldest_mutable_timestamp >= next(key) + write_full = self.WRITE_FULL or next(key) < oldest_mutable_timestamp key_as_str = str(key) if write_full: try: @@ -301,7 +301,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): archive_policy_def.granularity) existing_keys.remove(key) else: - oldest_key_to_keep = carbonara.SplitKey(0) + oldest_key_to_keep = carbonara.SplitKey(0, 0) # Rewrite all read-only splits just for fun (and compression). This # only happens if `previous_oldest_mutable_timestamp' exists, which @@ -319,8 +319,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): # NOTE(jd) Rewrite it entirely for fun (and later for # compression). For that, we just pass None as split. self._store_timeserie_split( - metric, carbonara.SplitKey.from_key_string( - key, archive_policy_def.granularity), + metric, carbonara.SplitKey( + float(key), archive_policy_def.granularity), None, aggregation, archive_policy_def, oldest_mutable_timestamp) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 07d42db5..71fd64d0 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2014-2015 eNovance +# Copyright © 2014-2016 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -20,6 +20,7 @@ import math import fixtures from oslo_utils import timeutils from oslotest import base +import pandas import six from gnocchi import carbonara @@ -1032,21 +1033,36 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual( datetime.datetime(2014, 10, 7), carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600)) + datetime.datetime(2015, 1, 1, 15, 3), 3600).as_datetime()) self.assertEqual( datetime.datetime(2014, 12, 31, 18), carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 58)) + datetime.datetime(2015, 1, 1, 15, 3), 58).as_datetime()) + self.assertEqual( + 1420048800.0, + float(carbonara.SplitKey.from_timestamp_and_sampling( + datetime.datetime(2015, 1, 1, 15, 3), 58))) + + key = carbonara.SplitKey.from_timestamp_and_sampling( + datetime.datetime(2015, 1, 1, 15, 3), 3600) + + self.assertGreater(key, pandas.Timestamp(0)) + + self.assertGreaterEqual(key, pandas.Timestamp(0)) def test_split_key_next(self): self.assertEqual( datetime.datetime(2015, 3, 6), next(carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600))) + datetime.datetime(2015, 1, 1, 15, 3), 3600)).as_datetime()) self.assertEqual( datetime.datetime(2015, 8, 3), next(next(carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600)))) + datetime.datetime(2015, 1, 1, 15, 3), 3600))).as_datetime()) + self.assertEqual( + 113529600000.0, + float(next(carbonara.SplitKey.from_timestamp_and_sampling( + datetime.datetime(2015, 1, 1, 15, 3), 3600 * 24 * 365)))) def test_split(self): sampling = 5 @@ -1064,10 +1080,10 @@ class TestAggregatedTimeSerie(base.BaseTestCase): / carbonara.SplitKey.POINTS_PER_SPLIT), len(grouped_points)) self.assertEqual("0.0", - str(carbonara.SplitKey(grouped_points[0][0]))) + str(carbonara.SplitKey(grouped_points[0][0], 0))) # 3600 × 5s = 5 hours self.assertEqual(datetime.datetime(1970, 1, 1, 5), - grouped_points[1][0]) + grouped_points[1][0].as_datetime()) self.assertEqual(carbonara.SplitKey.POINTS_PER_SPLIT, len(grouped_points[0][1])) -- GitLab From 16a05dedfc505a31856d79e9a850606044fa5bc1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 12 Oct 2016 11:03:37 +0200 Subject: [PATCH 0432/1483] Remove unused requests dependency This has been introduced for the Ceilometer dispatcher, but does not seem to be required anymore. Change-Id: Ic2d57524f33bf1f0e42c7134476c674b7fe142fa --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5c83463c..ed07797a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,6 @@ pytimeparse>=1.1.5 futures jsonpatch cotyledon>=1.5.0 -requests six stevedore voluptuous -- GitLab From 0e453f27e11250a4ebb21b0d7e5a2af58a269f95 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 12 Oct 2016 14:17:37 +0200 Subject: [PATCH 0433/1483] test: allow to pass OS_DEBUG Change-Id: I916e94dff9b50c89406054e2934c1ab54b946a6f --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index e4b682ef..6949277b 100644 --- a/tox.ini +++ b/tox.ini @@ -5,7 +5,7 @@ envlist = py{35,27},py{35,27}-{postgresql,mysql}{,-all,-file,-swift,-ceph,-s3},p [testenv] usedevelop = True sitepackages = False -passenv = LANG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE GNOCCHI_TEST_* AWS_* +passenv = LANG OS_DEBUG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE GNOCCHI_TEST_* AWS_* setenv = GNOCCHI_TEST_STORAGE_DRIVER=file GNOCCHI_TEST_INDEXER_DRIVER=postgresql -- GitLab From 4bc517028a96f8ae6f07c79397c487fe6014461f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 10 Oct 2016 10:02:11 +0200 Subject: [PATCH 0434/1483] Remove pecan_debug option There's no need to have that option exposed to users, as they never need it. A developer who needs it can just add it as it fits. Though I never used it. Change-Id: I8568b59d8a27eb7b6d4d0e3b8e8080641968ffe9 --- gnocchi/opts.py | 3 --- gnocchi/rest/app.py | 1 - gnocchi/tests/gabbi/fixtures.py | 2 -- releasenotes/notes/pecan-debug-removed-1a9dbc4a0a6ad581.yaml | 3 +++ 4 files changed, 3 insertions(+), 6 deletions(-) create mode 100644 releasenotes/notes/pecan-debug-removed-1a9dbc4a0a6ad581.yaml diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 54197ba3..54be4427 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -38,9 +38,6 @@ def list_opts(): cfg.StrOpt('paste_config', default='api-paste.ini', help='Path to API Paste configuration.'), - cfg.BoolOpt('pecan_debug', - default=False, - help='Toggle Pecan Debug Middleware.'), cfg.IntOpt('max_limit', default=1000, help=('The maximum number of items returned in a ' diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index f2f09404..927a7811 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -115,7 +115,6 @@ def load_app(conf, appname=None, indexer=None, storage=None, def _setup_app(root, conf, indexer, storage, not_implemented_middleware): app = pecan.make_app( root, - debug=conf.api.pecan_debug, hooks=(GnocchiHook(storage, indexer, conf),), guess_content_type_from_ext=False, custom_renderers={'json': OsloJSONRenderer}, diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index df83524f..1c891f36 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -108,8 +108,6 @@ class ConfigFixture(fixture.GabbiFixture): index.connect() index.upgrade(create_legacy_resource_types=True) - conf.set_override('pecan_debug', False, 'api') - # Set pagination to a testable value conf.set_override('max_limit', 7, 'api') diff --git a/releasenotes/notes/pecan-debug-removed-1a9dbc4a0a6ad581.yaml b/releasenotes/notes/pecan-debug-removed-1a9dbc4a0a6ad581.yaml new file mode 100644 index 00000000..9098b81f --- /dev/null +++ b/releasenotes/notes/pecan-debug-removed-1a9dbc4a0a6ad581.yaml @@ -0,0 +1,3 @@ +--- +upgrade: + - The api.pecan_debug has been removed. -- GitLab From 34beae67ee42ecab0a019e8620b85e23e0d840c8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 12 Oct 2016 12:08:43 +0200 Subject: [PATCH 0435/1483] test: rewrite test_post_unix_timestamp in Gabbi The test is actually poorly written as it uses a string rather than a float. There's no reason that the test should pass, even if it does. The real test that we need passing is a float, not a string parseable with a float. Change-Id: I6bf9df329e434bd477612780544813d1107b51c1 --- gnocchi/tests/gabbi/gabbits/resource.yaml | 4 ++-- gnocchi/tests/test_rest.py | 10 ---------- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index e9ce2882..da3ab42b 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -827,7 +827,7 @@ tests: # Delete a batch of resources by attributes filter - name: create resource one - desc: before test batch delete, create some resources + desc: before test batch delete, create some resources using a float in started_at POST: /v1/resource/generic request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -835,7 +835,7 @@ tests: content-type: application/json data: id: f93450f2-aaaa-4d67-9985-02511241e7d1 - started_at: "2014-01-03T02:02:02.000000" + started_at: 1388714522.0 user_id: 0fbb231484614b1a80131fc22f6afc9c project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea status: 201 diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 83655530..e26e4a85 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -745,16 +745,6 @@ class ResourceTest(RestTest): self.assertIn("Resource %s already exists" % self.attributes['id'], result.text) - def test_post_unix_timestamp(self): - self.attributes['started_at'] = "1400580045.856219" - result = self.app.post_json( - "/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - resource = json.loads(result.text) - self.assertEqual(u"2014-05-20T10:00:45.856219+00:00", - resource['started_at']) - def test_post_invalid_timestamp(self): self.attributes['started_at'] = "2014-01-01 02:02:02" self.attributes['ended_at'] = "2013-01-01 02:02:02" -- GitLab From 537cd2992203771dede5be914305996fef7f5b27 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 11 Oct 2016 15:26:29 +0200 Subject: [PATCH 0436/1483] rest: fix Epoch timestamp parsing The recent change in 83f02996556af62d085ecf5bfe240440914fee8f leverages pandas to convert timestamps, but specifies unit as being nanoseconds. That broke the use case where the timestamps are sent as float representing the number of seconds after Epoch, which was supported. Change-Id: Ic9df7357b7006d2f592232e793ac12136e950f2e --- gnocchi/rest/__init__.py | 10 ++++- gnocchi/tests/gabbi/gabbits/metric.yaml | 57 ++++++++++++++----------- 2 files changed, 41 insertions(+), 26 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 77a56a4b..a8953eb1 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. import itertools +import numbers import uuid from concurrent import futures @@ -413,9 +414,14 @@ class ArchivePolicyRulesController(rest.RestController): def MeasuresListSchema(measures): + timestamps = [ + float(i['timestamp']) * 10e8 + if isinstance(i['timestamp'], numbers.Real) + else i['timestamp'] + for i in measures + ] try: - times = pd.to_datetime([i['timestamp'] for i in measures], utc=True, - unit='ns', box=False) + times = pd.to_datetime(timestamps, utc=True, unit='ns', box=False) if np.any(times < np.datetime64('1970')): raise ValueError('Timestamp must be after Epoch') except ValueError as e: diff --git a/gnocchi/tests/gabbi/gabbits/metric.yaml b/gnocchi/tests/gabbi/gabbits/metric.yaml index 52dd36af..c76a805f 100644 --- a/gnocchi/tests/gabbi/gabbits/metric.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric.yaml @@ -147,7 +147,8 @@ tests: - name: get valid metric id for bad timestamp GET: /v1/metric - status: 200 + response_json_paths: + $[0].archive_policy.name: cookies - name: push measurements to metric with bad timestamp POST: /v1/metric/$RESPONSE['$[0].id']/measures @@ -160,19 +161,47 @@ tests: - name: get valid metric id again GET: /v1/metric - status: 200 - - name: push measurements to metric + - name: push measurements to metric epoch format POST: /v1/metric/$RESPONSE['$[0].id']/measures request_headers: content-type: application/json data: - - timestamp: "2015-03-06T14:33:57" + - timestamp: 1425652437.0 value: 43.1 + status: 202 + + - name: get valid metric id again 2 + GET: /v1/metric + + - name: push measurements to metric + POST: /v1/metric/$RESPONSE['$[0].id']/measures + request_headers: + content-type: application/json + data: - timestamp: "2015-03-06T14:34:12" value: 12 status: 202 + - name: get valid metric id again 3 + GET: /v1/metric + + - name: get measurements by start + GET: /v1/metric/$RESPONSE['$[0].id']/measures?refresh=true&start=2015-03-06T14:34 + response_json_paths: + $: + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + + - name: get valid metric id again 4 + GET: /v1/metric + + - name: get measurements from metric + GET: /v1/metric/$RESPONSE['$[0].id']/measures?refresh=true + response_json_paths: + $: + - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - name: create valid metric two POST: /v1/metric request_headers: @@ -226,26 +255,6 @@ tests: x-project-id: bar GET: /v1/metric - - name: get metric list - GET: /v1/metric - status: 200 - response_json_paths: - $[0].archive_policy.name: cookies - - - name: get measurements from metric - GET: /v1/metric/$RESPONSE['$[0].id']/measures - status: 200 - - - name: get metric list for start test - GET: /v1/metric - status: 200 - response_json_paths: - $[0].archive_policy.name: cookies - - - name: get measurements by start - GET: /v1/metric/$RESPONSE['$[0].id']/measures?start=2015-03-06T14:33:57 - status: 200 - - name: get measures unknown metric GET: /v1/metric/fake/measures status: 404 -- GitLab From 6430afbdad15ce9ba6389602777fe5bf0c2c3bb8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 11 Oct 2016 18:58:48 +0200 Subject: [PATCH 0437/1483] Unify timestamp parsing This unifies all timestamp parsing across Gnocchi. It introduces a new function to_timestamps() which is responsible for parsing an array of anything into an array of numpy.datetime64. This array can then be converted to datetime.datetime object (e.g. for using in the indexer which supports only microsecond precision) or kept as is for storage (which supports nanoseconds). This also include a test plan for making sure we support relative timestamps even in measures sending (e.g. "-1 minute"). Change-Id: I394a9c14929435504ad6eaa6ce7016fb02f5b26e --- gnocchi/indexer/sqlalchemy.py | 2 +- gnocchi/json.py | 7 ++- gnocchi/rest/__init__.py | 36 +++--------- .../gabbits/metric-timestamp-format.yaml | 34 ++++++++++++ gnocchi/tests/test_rest.py | 6 +- gnocchi/utils.py | 55 ++++++++++++++----- 6 files changed, 93 insertions(+), 47 deletions(-) create mode 100644 gnocchi/tests/gabbi/gabbits/metric-timestamp-format.yaml diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index dfe13971..bb76185d 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1166,7 +1166,7 @@ class QueryTransformer(object): converter = None if isinstance(attr.type, base.TimestampUTC): - converter = utils.to_timestamp + converter = utils.to_datetime elif (isinstance(attr.type, sqlalchemy_utils.UUIDType) and not isinstance(value, uuid.UUID)): converter = utils.ResourceUUID diff --git a/gnocchi/json.py b/gnocchi/json.py index 2dfb2180..63b63c70 100644 --- a/gnocchi/json.py +++ b/gnocchi/json.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2015 Red Hat +# Copyright © 2015-2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -15,6 +15,7 @@ # under the License. import datetime +import numpy from oslo_serialization import jsonutils @@ -26,6 +27,9 @@ def _to_primitive(value, *args, **kwargs): # https://review.openstack.org/#/c/166861/ if isinstance(value, datetime.datetime): return value.isoformat() + if isinstance(value, numpy.datetime64): + # Do not include nanoseconds if null + return str(value).rpartition(".000000000")[0] + "+00:00" # This mimics what Pecan implements in its default JSON encoder if hasattr(value, "jsonify"): return _to_primitive(value.jsonify(), *args, **kwargs) @@ -43,6 +47,7 @@ def to_primitive(*args, **kwargs): def dumps(obj, *args, **kwargs): return jsonutils.dumps(obj, default=to_primitive) + # For convenience loads = jsonutils.loads load = jsonutils.load diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index a8953eb1..8df70505 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -15,14 +15,11 @@ # License for the specific language governing permissions and limitations # under the License. import itertools -import numbers import uuid from concurrent import futures import jsonpatch -import numpy as np from oslo_utils import strutils -import pandas as pd import pecan from pecan import rest import pyparsing @@ -177,13 +174,6 @@ def deserialize_and_validate(schema, required=True, abort(400, "Invalid input: %s" % e) -def Timestamp(v): - t = utils.to_timestamp(v) - if t < utils.unix_universal_start: - raise ValueError("Timestamp must be after Epoch") - return t - - def PositiveOrNullInt(value): value = int(value) if value < 0: @@ -414,16 +404,8 @@ class ArchivePolicyRulesController(rest.RestController): def MeasuresListSchema(measures): - timestamps = [ - float(i['timestamp']) * 10e8 - if isinstance(i['timestamp'], numbers.Real) - else i['timestamp'] - for i in measures - ] try: - times = pd.to_datetime(timestamps, utc=True, unit='ns', box=False) - if np.any(times < np.datetime64('1970')): - raise ValueError('Timestamp must be after Epoch') + times = utils.to_timestamps((m['timestamp'] for m in measures)) except ValueError as e: abort(400, "Invalid input for timestamp: %s" % e) @@ -482,13 +464,13 @@ class MetricController(rest.RestController): if start is not None: try: - start = Timestamp(start) + start = utils.to_datetime(start) except Exception: abort(400, "Invalid value for start") if stop is not None: try: - stop = Timestamp(stop) + stop = utils.to_datetime(stop) except Exception: abort(400, "Invalid value for stop") @@ -908,8 +890,8 @@ class ResourceTypesController(rest.RestController): def ResourceSchema(schema): base_schema = { - voluptuous.Optional('started_at'): Timestamp, - voluptuous.Optional('ended_at'): Timestamp, + voluptuous.Optional('started_at'): utils.to_datetime, + voluptuous.Optional('ended_at'): utils.to_datetime, voluptuous.Optional('user_id'): voluptuous.Any(None, six.text_type), voluptuous.Optional('project_id'): voluptuous.Any(None, six.text_type), voluptuous.Optional('metrics'): MetricsSchema, @@ -1371,13 +1353,13 @@ class SearchMetricController(rest.RestController): if start is not None: try: - start = Timestamp(start) + start = utils.to_datetime(start) except Exception: abort(400, "Invalid value for start") if stop is not None: try: - stop = Timestamp(stop) + stop = utils.to_datetime(stop) except Exception: abort(400, "Invalid value for stop") @@ -1559,13 +1541,13 @@ class AggregationController(rest.RestController): if start is not None: try: - start = Timestamp(start) + start = utils.to_datetime(start) except Exception: abort(400, "Invalid value for start") if stop is not None: try: - stop = Timestamp(stop) + stop = utils.to_datetime(stop) except Exception: abort(400, "Invalid value for stop") diff --git a/gnocchi/tests/gabbi/gabbits/metric-timestamp-format.yaml b/gnocchi/tests/gabbi/gabbits/metric-timestamp-format.yaml new file mode 100644 index 00000000..cdfaa677 --- /dev/null +++ b/gnocchi/tests/gabbi/gabbits/metric-timestamp-format.yaml @@ -0,0 +1,34 @@ +fixtures: + - ConfigFixture + +tests: + - name: create archive policy + desc: for later use + POST: /v1/archive_policy + request_headers: + content-type: application/json + x-roles: admin + data: + name: cookies + definition: + - granularity: 1 second + status: 201 + + - name: create metric + POST: /v1/metric + request_headers: + content-type: application/json + data: + archive_policy_name: cookies + status: 201 + response_json_paths: + $.archive_policy_name: cookies + + - name: push measurements to metric with relative timestamp + POST: /v1/metric/$RESPONSE['$.id']/measures + request_headers: + content-type: application/json + data: + - timestamp: "-5 minutes" + value: 43.1 + status: 202 diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index e26e4a85..3fb645c5 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -444,10 +444,10 @@ class MetricTest(RestTest): self.app.get("/v1/metric/%s/measures" % metric['id'], status=403) - @mock.patch.object(utils, 'utcnow') + @mock.patch.object(timeutils, 'utcnow') def test_get_measure_start_relative(self, utcnow): """Make sure the timestamps can be relative to now.""" - utcnow.return_value = utils.datetime_utc(2014, 1, 1, 10, 23) + utcnow.return_value = datetime.datetime(2014, 1, 1, 10, 23) result = self.app.post_json("/v1/metric", params={"archive_policy_name": "high"}) metric = json.loads(result.text) @@ -459,7 +459,7 @@ class MetricTest(RestTest): % metric['id'], status=200) result = json.loads(ret.text) - now = utils.utcnow() + now = utils.datetime_utc(2014, 1, 1, 10, 23) self.assertEqual([ ['2014-01-01T10:00:00+00:00', 3600.0, 1234.2], [(now diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 25a30b31..ff5981ac 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -16,9 +16,12 @@ import datetime import itertools import multiprocessing +import numbers import iso8601 +import numpy from oslo_utils import timeutils +import pandas from pytimeparse import timeparse import six import tenacity @@ -63,22 +66,44 @@ retry = tenacity.retry( reraise=True) -def to_timestamp(v): - if isinstance(v, datetime.datetime): - return v - try: - v = float(v) - except (ValueError, TypeError): - v = six.text_type(v) - try: - return timeutils.parse_isotime(v) - except ValueError: +unix_universal_start64 = numpy.datetime64("1970") + + +def to_timestamps(values): + timestamps = [] + for v in values: + if isinstance(v, numbers.Real): + timestamps.append(float(v) * 10e8) + elif isinstance(v, datetime.datetime): + timestamps.append(v) + else: delta = timeparse.timeparse(v) - if delta is None: - raise ValueError("Unable to parse timestamp %s" % v) - return utcnow() + datetime.timedelta(seconds=delta) - return datetime.datetime.utcfromtimestamp(v).replace( - tzinfo=iso8601.iso8601.UTC) + timestamps.append(v + if delta is None + else numpy.datetime64(timeutils.utcnow()) + + numpy.timedelta64(delta)) + try: + times = pandas.to_datetime(timestamps, utc=True, box=False) + except ValueError: + raise ValueError("Unable to convert timestamps") + + if (times < unix_universal_start64).any(): + raise ValueError('Timestamp must be after Epoch') + + return times + + +def to_timestamp(value): + return to_timestamps((value,))[0] + + +def to_datetime(value): + return timestamp_to_datetime(to_timestamp(value)) + + +def timestamp_to_datetime(v): + return datetime.datetime.utcfromtimestamp( + v.astype(float) / 10e8).replace(tzinfo=iso8601.iso8601.UTC) def to_timespan(value): -- GitLab From 37d8feab1a372e7ffa71debbb42aedbc6b80cec1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 12 Oct 2016 21:40:45 +0200 Subject: [PATCH 0438/1483] Fix typo in release note file Change-Id: Iea81bb104e26dcbf35d2154991da1f55623f41fa Signed-off-by: Julien Danjou --- ...b1238.yaml => add-parameter-granularity-7f22c677dc1b1238.yaml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename releasenotes/notes/{add-paramter-granularity-7f22c677dc1b1238.yaml => add-parameter-granularity-7f22c677dc1b1238.yaml} (100%) diff --git a/releasenotes/notes/add-paramter-granularity-7f22c677dc1b1238.yaml b/releasenotes/notes/add-parameter-granularity-7f22c677dc1b1238.yaml similarity index 100% rename from releasenotes/notes/add-paramter-granularity-7f22c677dc1b1238.yaml rename to releasenotes/notes/add-parameter-granularity-7f22c677dc1b1238.yaml -- GitLab From 67cdbb737ae8438a535640ebefecdaaa0bcbfe63 Mon Sep 17 00:00:00 2001 From: Juan Antonio Osorio Robles Date: Mon, 10 Oct 2016 09:16:45 +0300 Subject: [PATCH 0439/1483] Add http_proxy_to_wsgi to api-paste This sets up the HTTPProxyToWSGI middleware in front of Gnocchi. The purpose of thise middleware is to set up the request URL correctly in case there is a proxy (For instance, a loadbalancer such as HAProxy) in front of Gnocchi. So, for instance, when TLS connections are being terminated in the proxy, and one tries to get the versions from the / resource of Gnocchi, one will notice that the protocol is incorrect; It will show 'http' instead of 'https'. So this middleware handles such cases. Thus helping Keystone discovery work correctly. The HTTPProxyToWSGI is off by default and needs to be enabled via a configuration value. Change-Id: Ic5526cf37e70335fa2cc70946a271253f227f129 Closes-Bug: #1590608 (cherry picked from commit 2b1ed8d4528df8c3071343d1cd5764b6a7122fd1) --- etc/gnocchi/api-paste.ini | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/etc/gnocchi/api-paste.ini b/etc/gnocchi/api-paste.ini index cb7becb0..ad56b17e 100644 --- a/etc/gnocchi/api-paste.ini +++ b/etc/gnocchi/api-paste.ini @@ -4,19 +4,22 @@ pipeline = gnocchi+noauth [composite:gnocchi+noauth] use = egg:Paste#urlmap -/ = gnocchiversions +/ = gnocchiversions_pipeline /v1 = gnocchiv1+noauth [composite:gnocchi+auth] use = egg:Paste#urlmap -/ = gnocchiversions +/ = gnocchiversions_pipeline /v1 = gnocchiv1+auth [pipeline:gnocchiv1+noauth] -pipeline = gnocchiv1 +pipeline = http_proxy_to_wsgi gnocchiv1 [pipeline:gnocchiv1+auth] -pipeline = keystone_authtoken gnocchiv1 +pipeline = http_proxy_to_wsgi keystone_authtoken gnocchiv1 + +[pipeline:gnocchiversions_pipeline] +pipeline = http_proxy_to_wsgi gnocchiversions [app:gnocchiversions] paste.app_factory = gnocchi.rest.app:app_factory @@ -29,3 +32,7 @@ root = gnocchi.rest.V1Controller [filter:keystone_authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory oslo_config_project = gnocchi + +[filter:http_proxy_to_wsgi] +paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory +oslo_config_project = gnocchi -- GitLab From 839316ac17f79e2dbcc81d9abbb9e54f8e67bcbe Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 13 Oct 2016 11:09:32 +0200 Subject: [PATCH 0440/1483] doc: add s3 to the list of Carbonara based drivers Change-Id: I609e246bfcef1341efdbac2fda7a92fcb27e4444 Signed-off-by: Julien Danjou --- doc/source/configuration.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 9dce1a8f..76b98624 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -79,8 +79,8 @@ edit the `api-paste.ini` file to add the Keystone authentication middleware:: Driver notes ============ -Carbonara based drivers (file, swift, ceph) -------------------------------------------- +Carbonara based drivers (file, swift, ceph, s3) +----------------------------------------------- To ensure consistency across all *gnocchi-api* and *gnocchi-metricd* workers, these drivers need a distributed locking mechanism. This is provided by the -- GitLab From e7d8fafa77b0cef20b653020425c15be76e389b5 Mon Sep 17 00:00:00 2001 From: Ali Shariat Date: Fri, 14 Oct 2016 14:48:38 -0400 Subject: [PATCH 0441/1483] rest: using ujson to deserialize use faster ujson to deserialize the json data in the rest api. when loading in 20metrics x 720points each: - native json lib takes 0.105s - ujson lib takes 0.00758s Change-Id: I0f545a14237ae090833a30a5530f5c6d1e4587c3 Closes-Bug: #1625654 --- gnocchi/json.py | 5 +++-- gnocchi/rest/__init__.py | 3 +-- requirements.txt | 1 + 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/gnocchi/json.py b/gnocchi/json.py index 2dfb2180..8b3c93f7 100644 --- a/gnocchi/json.py +++ b/gnocchi/json.py @@ -16,6 +16,7 @@ import datetime from oslo_serialization import jsonutils +import ujson _ORIG_TP = jsonutils.to_primitive @@ -44,5 +45,5 @@ def dumps(obj, *args, **kwargs): return jsonutils.dumps(obj, default=to_primitive) # For convenience -loads = jsonutils.loads -load = jsonutils.load +loads = ujson.loads +load = ujson.load diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 77a56a4b..37a19c74 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -160,8 +160,7 @@ def deserialize(expected_content_types=None): if mime_type not in expected_content_types: abort(415) try: - params = json.load(pecan.request.body_file_raw, - encoding=options.get('charset', 'ascii')) + params = json.load(pecan.request.body_file_raw) except Exception as e: abort(400, "Unable to decode body: " + six.text_type(e)) return params diff --git a/requirements.txt b/requirements.txt index ed07797a..725e54cf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,6 +15,7 @@ jsonpatch cotyledon>=1.5.0 six stevedore +ujson voluptuous werkzeug trollius; python_version < '3.4' -- GitLab From b04b37010f76d90a02073817b2c24b7fc8092672 Mon Sep 17 00:00:00 2001 From: Hanxi Liu Date: Mon, 17 Oct 2016 16:37:44 +0800 Subject: [PATCH 0442/1483] Fix incorrect EXTRA_FLAVOR in plugin.sh Change-Id: I881e49abacd554b387d4fc156262f00220b64de0 --- devstack/plugin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index ff9db54a..9fca9b4b 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -390,7 +390,7 @@ function install_gnocchi { install_gnocchiclient - [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && EXTRA_FLAVOR=,keystonemiddleware + [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && EXTRA_FLAVOR=,keystone # We don't use setup_package because we don't follow openstack/requirements sudo -H pip install -e "$GNOCCHI_DIR"[test,$GNOCCHI_STORAGE_BACKEND,${DATABASE_TYPE}${EXTRA_FLAVOR}] -- GitLab From 175d8bd3bf6453a1935fc08972199b7d96eec82d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 6 Oct 2016 23:08:25 +0200 Subject: [PATCH 0443/1483] carbonara: fix SplitKey with datetime greater than 32bits value Current implementation based on pandas.Timestamp can't handle keys that go further than 2^32 seconds after epoch, which makes e.g. archive policies with very high granularity failing. Change-Id: Idb81345544cc25e36447473e5115d9d856766c83 (cherry picked from commit 9a95873f4016792bd9c53aac0df93824375a127b) --- gnocchi/carbonara.py | 67 ++++++++++++++++++++------------- gnocchi/storage/_carbonara.py | 8 ++-- gnocchi/tests/test_carbonara.py | 29 ++++++++++---- 3 files changed, 67 insertions(+), 37 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index ee0f5330..b0af065c 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -27,13 +27,10 @@ import re import struct import time -import iso8601 import lz4 import pandas import six -from gnocchi import utils - # NOTE(sileht): pandas relies on time.strptime() # and often triggers http://bugs.python.org/issue7980 # its dues to our heavy threads usage, this is the workaround @@ -325,7 +322,8 @@ class BoundTimeSerie(TimeSerie): self.ts = self.ts[self.first_block_timestamp():] -class SplitKey(pandas.Timestamp): +@functools.total_ordering +class SplitKey(object): """A class representing a split key. A split key is basically a timestamp that can be used to split @@ -336,33 +334,30 @@ class SplitKey(pandas.Timestamp): POINTS_PER_SPLIT = 3600 - @classmethod - def _init(cls, value, sampling): - # NOTE(jd) This should be __init__ but it does not work, because of… - # Pandas, Cython, whatever. - self = cls(value) - self._carbonara_sampling = sampling - return self + def __init__(self, value, sampling): + if isinstance(value, SplitKey): + self.key = value.key + elif isinstance(value, pandas.Timestamp): + self.key = value.value / 10e8 + else: + self.key = float(value) + + self._carbonara_sampling = float(sampling) @classmethod def from_timestamp_and_sampling(cls, timestamp, sampling): - return cls._init( + return cls( round_timestamp( timestamp, freq=sampling * cls.POINTS_PER_SPLIT * 10e8), sampling) - @classmethod - def from_key_string(cls, keystr, sampling): - return cls._init(float(keystr) * 10e8, sampling) - def __next__(self): """Get the split key of the next split. :return: A `SplitKey` object. """ - return self._init( - self + datetime.timedelta( - seconds=(self.POINTS_PER_SPLIT * self._carbonara_sampling)), + return self.__class__( + self.key + self._carbonara_sampling * self.POINTS_PER_SPLIT, self._carbonara_sampling) next = __next__ @@ -370,18 +365,35 @@ class SplitKey(pandas.Timestamp): def __iter__(self): return self + def __hash__(self): + return hash(self.key) + + def __lt__(self, other): + if isinstance(other, SplitKey): + return self.key < other.key + if isinstance(other, pandas.Timestamp): + return self.key * 10e8 < other.value + return self.key < other + + def __eq__(self, other): + if isinstance(other, SplitKey): + return self.key == other.key + if isinstance(other, pandas.Timestamp): + return self.key * 10e8 == other.value + return self.key == other + def __str__(self): return str(float(self)) def __float__(self): - ts = self.to_datetime() - if ts.tzinfo is None: - ts = ts.replace(tzinfo=iso8601.iso8601.UTC) - return utils.datetime_to_unix(ts) + return self.key + + def as_datetime(self): + return pandas.Timestamp(self.key, unit='s') def __repr__(self): return "<%s: %s / %fs>" % (self.__class__.__name__, - pandas.Timestamp.__repr__(self), + repr(self.key), self._carbonara_sampling) @@ -436,7 +448,7 @@ class AggregatedTimeSerie(TimeSerie): groupby = self.ts.groupby(functools.partial( SplitKey.from_timestamp_and_sampling, sampling=self.sampling)) for group, ts in groupby: - yield (SplitKey._init(group, self.sampling), + yield (SplitKey(group, self.sampling), AggregatedTimeSerie(self.sampling, self.aggregation_method, ts)) @@ -544,7 +556,10 @@ class AggregatedTimeSerie(TimeSerie): if not self.ts.index.is_monotonic: self.ts = self.ts.sort_index() offset_div = self.sampling * 10e8 - start = pandas.Timestamp(start).value + if isinstance(start, SplitKey): + start = start.as_datetime().value + else: + start = pandas.Timestamp(start).value # calculate how many seconds from start the series runs until and # initialize list to store alternating delimiter, float entries if compressed: diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 065b7870..372800c8 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -239,7 +239,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): oldest_mutable_timestamp): # NOTE(jd) We write the full split only if the driver works that way # (self.WRITE_FULL) or if the oldest_mutable_timestamp is out of range. - write_full = self.WRITE_FULL or oldest_mutable_timestamp >= next(key) + write_full = self.WRITE_FULL or next(key) < oldest_mutable_timestamp key_as_str = str(key) if write_full: try: @@ -301,7 +301,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): archive_policy_def.granularity) existing_keys.remove(key) else: - oldest_key_to_keep = carbonara.SplitKey(0) + oldest_key_to_keep = carbonara.SplitKey(0, 0) # Rewrite all read-only splits just for fun (and compression). This # only happens if `previous_oldest_mutable_timestamp' exists, which @@ -319,8 +319,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): # NOTE(jd) Rewrite it entirely for fun (and later for # compression). For that, we just pass None as split. self._store_timeserie_split( - metric, carbonara.SplitKey.from_key_string( - key, archive_policy_def.granularity), + metric, carbonara.SplitKey( + float(key), archive_policy_def.granularity), None, aggregation, archive_policy_def, oldest_mutable_timestamp) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 3b06b2a1..647e407f 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2014-2015 eNovance +# Copyright © 2014-2016 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -932,21 +932,36 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual( datetime.datetime(2014, 10, 7), carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600)) + datetime.datetime(2015, 1, 1, 15, 3), 3600).as_datetime()) self.assertEqual( datetime.datetime(2014, 12, 31, 18), carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 58)) + datetime.datetime(2015, 1, 1, 15, 3), 58).as_datetime()) + self.assertEqual( + 1420048800.0, + float(carbonara.SplitKey.from_timestamp_and_sampling( + datetime.datetime(2015, 1, 1, 15, 3), 58))) + + key = carbonara.SplitKey.from_timestamp_and_sampling( + datetime.datetime(2015, 1, 1, 15, 3), 3600) + + self.assertGreater(key, pandas.Timestamp(0)) + + self.assertGreaterEqual(key, pandas.Timestamp(0)) def test_split_key_next(self): self.assertEqual( datetime.datetime(2015, 3, 6), next(carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600))) + datetime.datetime(2015, 1, 1, 15, 3), 3600)).as_datetime()) self.assertEqual( datetime.datetime(2015, 8, 3), next(next(carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600)))) + datetime.datetime(2015, 1, 1, 15, 3), 3600))).as_datetime()) + self.assertEqual( + 113529600000.0, + float(next(carbonara.SplitKey.from_timestamp_and_sampling( + datetime.datetime(2015, 1, 1, 15, 3), 3600 * 24 * 365)))) def test_split(self): sampling = 5 @@ -964,10 +979,10 @@ class TestAggregatedTimeSerie(base.BaseTestCase): / carbonara.SplitKey.POINTS_PER_SPLIT), len(grouped_points)) self.assertEqual("0.0", - str(carbonara.SplitKey(grouped_points[0][0]))) + str(carbonara.SplitKey(grouped_points[0][0], 0))) # 3600 × 5s = 5 hours self.assertEqual(datetime.datetime(1970, 1, 1, 5), - grouped_points[1][0]) + grouped_points[1][0].as_datetime()) self.assertEqual(carbonara.SplitKey.POINTS_PER_SPLIT, len(grouped_points[0][1])) -- GitLab From a44d88f594e0661c7435e941c75795409c252d43 Mon Sep 17 00:00:00 2001 From: Hanxi Liu Date: Mon, 17 Oct 2016 16:37:44 +0800 Subject: [PATCH 0444/1483] Fix incorrect EXTRA_FLAVOR in plugin.sh Change-Id: I881e49abacd554b387d4fc156262f00220b64de0 --- devstack/plugin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index ae1ba4d7..72883b39 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -390,7 +390,7 @@ function install_gnocchi { install_gnocchiclient - [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && EXTRA_FLAVOR=,keystonemiddleware + [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && EXTRA_FLAVOR=,keystone # We don't use setup_package because we don't follow openstack/requirements sudo -H pip install -e "$GNOCCHI_DIR"[test,$GNOCCHI_STORAGE_BACKEND,${DATABASE_TYPE}${EXTRA_FLAVOR}] -- GitLab From 06fcd34c73fe4a7f8157ac1d3502cf89a74601f8 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 17 Oct 2016 21:44:55 +0000 Subject: [PATCH 0445/1483] devstack: stop all gnocchi services, not just api Change-Id: I666ce4f24d81f6f6c140ec7021d105ff951a88e3 --- devstack/plugin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 9fca9b4b..336fe719 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -457,7 +457,7 @@ function stop_gnocchi { restart_apache_server fi # Kill the gnocchi screen windows - for serv in gnocchi-api; do + for serv in gnocchi-api gnocchi-metricd gnocchi-statsd; do stop_process $serv done } -- GitLab From f13cd69e390c4eb60cf4add9f769f17a143ec135 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 17 Oct 2016 21:44:55 +0000 Subject: [PATCH 0446/1483] devstack: stop all gnocchi services, not just api Change-Id: I666ce4f24d81f6f6c140ec7021d105ff951a88e3 --- devstack/plugin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 72883b39..63dd0500 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -457,7 +457,7 @@ function stop_gnocchi { restart_apache_server fi # Kill the gnocchi screen windows - for serv in gnocchi-api; do + for serv in gnocchi-api gnocchi-metricd gnocchi-statsd; do stop_process $serv done } -- GitLab From 4637aeb2d3aae4dd4b980612f9728b002a9a6de5 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 19 Oct 2016 21:23:42 +0000 Subject: [PATCH 0447/1483] accommodate new oslo.config new oslo.config enforces type. it should give warning but we uuid type doesn't give ValueError or TypeError, which are caught. rather it gives AttributeError Change-Id: Ib8fe3f0e6f37f2ca5c0785e73b2c770b71fac9e6 --- gnocchi/tests/test_statsd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index 7531e25c..6d6cb790 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -34,7 +34,7 @@ class TestStatsd(tests_base.TestCase): super(TestStatsd, self).setUp() self.conf.set_override("resource_id", - uuid.uuid4(), "statsd") + str(uuid.uuid4()), "statsd") self.conf.set_override("user_id", self.STATSD_USER_ID, "statsd") self.conf.set_override("project_id", -- GitLab From aab337a912a6a13d9bd48eadd32251d7d997a9fd Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 19 Oct 2016 21:23:42 +0000 Subject: [PATCH 0448/1483] accommodate new oslo.config new oslo.config enforces type. it should give warning but we uuid type doesn't give ValueError or TypeError, which are caught. rather it gives AttributeError Change-Id: Ib8fe3f0e6f37f2ca5c0785e73b2c770b71fac9e6 --- gnocchi/tests/test_statsd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index 7531e25c..6d6cb790 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -34,7 +34,7 @@ class TestStatsd(tests_base.TestCase): super(TestStatsd, self).setUp() self.conf.set_override("resource_id", - uuid.uuid4(), "statsd") + str(uuid.uuid4()), "statsd") self.conf.set_override("user_id", self.STATSD_USER_ID, "statsd") self.conf.set_override("project_id", -- GitLab From 2ba63d4947f47d7a675d0b84a6a49422c5256568 Mon Sep 17 00:00:00 2001 From: Hanxi Liu Date: Thu, 20 Oct 2016 15:11:42 +0800 Subject: [PATCH 0449/1483] Update doc because default services are all being added to settings Change-Id: Ib69d05c9f820af89b382444efd2a5ff2783708c5 --- doc/source/install.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index 31af07bf..cf927029 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -10,7 +10,6 @@ To enable Gnocchi in devstack, add the following to local.conf: :: enable_plugin gnocchi https://github.com/openstack/gnocchi master - enable_service gnocchi-api,gnocchi-metricd To enable Grafana support in devstack, you can also enable `gnocchi-grafana`:: -- GitLab From 386c163d931154809e616a3b1b83844612a16b09 Mon Sep 17 00:00:00 2001 From: Hanxi Liu Date: Thu, 20 Oct 2016 15:11:42 +0800 Subject: [PATCH 0450/1483] Update doc because default services are all being added to settings Change-Id: Ib69d05c9f820af89b382444efd2a5ff2783708c5 --- doc/source/install.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index 72a64c86..91a5c142 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -10,7 +10,6 @@ To enable Gnocchi in devstack, add the following to local.conf: :: enable_plugin gnocchi https://github.com/openstack/gnocchi master - enable_service gnocchi-api,gnocchi-metricd To enable Grafana support in devstack, you can also enable `gnocchi-grafana`:: -- GitLab From e1ab93cb0c7d6debb9e5834268124d5f675ea25d Mon Sep 17 00:00:00 2001 From: Hanxi Liu Date: Thu, 20 Oct 2016 16:51:51 +0800 Subject: [PATCH 0451/1483] json: remove outdated comment The patch that is referred in the comment was merged, and then reverted. It did not please oslo.serialization as it broke the format they were using. So we need to carry this forever in Gnocchi: let's remove the comment. Change-Id: I1f647e764c3928976e5a1c7ff9d32174192cbbe7 --- gnocchi/json.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/gnocchi/json.py b/gnocchi/json.py index 8b3c93f7..47859c16 100644 --- a/gnocchi/json.py +++ b/gnocchi/json.py @@ -23,8 +23,6 @@ _ORIG_TP = jsonutils.to_primitive def _to_primitive(value, *args, **kwargs): - # TODO(jd): Remove that once oslo.serialization is released with - # https://review.openstack.org/#/c/166861/ if isinstance(value, datetime.datetime): return value.isoformat() # This mimics what Pecan implements in its default JSON encoder -- GitLab From 234b0f0504cb052b4c87da0fa96e567655d2da33 Mon Sep 17 00:00:00 2001 From: Sam Morrison Date: Wed, 26 Oct 2016 02:46:42 +1100 Subject: [PATCH 0452/1483] Add missing granularity to base storage class This was already implemented in the carbonara storage driver and was inadvertently missed from the parent class Change-Id: Ida2747a3a7cb04c9faca4c9dd7245e1e03bd82a0 --- gnocchi/storage/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index ec255f9d..555b72d8 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -290,7 +290,8 @@ class StorageDriver(object): @staticmethod def search_value(metrics, query, from_timestamp=None, to_timestamp=None, - aggregation='mean'): + aggregation='mean', + granularity=None): """Search for an aggregated value that realizes a predicate. :param metrics: The list of metrics to look into. @@ -298,6 +299,7 @@ class StorageDriver(object): :param from_timestamp: The timestamp to get the measure from. :param to_timestamp: The timestamp to get the measure to. :param aggregation: The type of aggregation to retrieve. + :param granularity: The granularity to retrieve. """ raise exceptions.NotImplementedError -- GitLab From 1e8b6de7f17f972e83e0b05a9c0c8b2267a3aaed Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 27 Oct 2016 10:55:16 +0200 Subject: [PATCH 0453/1483] Don't hang with wsgiref This change adds a workaround to not hangs with certain wsgi server when we read the body. Change-Id: I512041231a03308c61a98cf626fdc4c074ea3564 --- gnocchi/rest/__init__.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 37a19c74..6e57e18f 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -160,7 +160,18 @@ def deserialize(expected_content_types=None): if mime_type not in expected_content_types: abort(415) try: - params = json.load(pecan.request.body_file_raw) + # NOTE(sileht): We prefer use seekable if possible because it takes + # care of Content-Length when you use read()/readlines(). + # webob and wsgiref people are both well following RFCs, but sometimes + # their are case when the application can hang, because read is waiting + # for new data even the content-length is reach when keepalive + # connection are used. For more detail see: + # https://bugs.python.org/issue21878 + # https://github.com/Pylons/webob/issues/279 + if pecan.request.is_body_seekable: + params = json.load(pecan.request.body_file_seekable) + else: + params = json.load(pecan.request.body_file_raw) except Exception as e: abort(400, "Unable to decode body: " + six.text_type(e)) return params -- GitLab From dd7cda948b87d2a20a8a56455f388b2ad205314e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 28 Oct 2016 14:33:02 +0200 Subject: [PATCH 0454/1483] rest: register default JSON converter using pecan.jsonify This has the upside of making sure that this is used when encoding errors too, not only regular output. Change-Id: I8f311bde8f285675a029d44bbffa7d5e720a39eb --- gnocchi/rest/app.py | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 927a7811..9ef6e279 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2014-2015 eNovance +# Copyright © 2014-2016 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -22,6 +22,7 @@ from oslo_middleware import cors from oslo_policy import policy from paste import deploy import pecan +from pecan import jsonify import webob.exc from gnocchi import exceptions @@ -34,6 +35,10 @@ from gnocchi import storage as gnocchi_storage LOG = log.getLogger(__name__) +# Register our encoder by default for everything +jsonify.jsonify.register(object)(json.to_primitive) + + class GnocchiHook(pecan.hooks.PecanHook): def __init__(self, storage, indexer, conf): @@ -49,16 +54,6 @@ class GnocchiHook(pecan.hooks.PecanHook): state.request.policy_enforcer = self.policy_enforcer -class OsloJSONRenderer(object): - @staticmethod - def __init__(*args, **kwargs): - pass - - @staticmethod - def render(template_path, namespace): - return json.dumps(namespace) - - class NotImplementedMiddleware(object): def __init__(self, app): self.app = app @@ -117,7 +112,6 @@ def _setup_app(root, conf, indexer, storage, not_implemented_middleware): root, hooks=(GnocchiHook(storage, indexer, conf),), guess_content_type_from_ext=False, - custom_renderers={'json': OsloJSONRenderer}, ) if not_implemented_middleware: -- GitLab From c95b98f76574af1138129f6696fe0d30f1b1155f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 1 Nov 2016 17:15:42 +0100 Subject: [PATCH 0455/1483] Revert "drop non-I/O threading in upgrade" This reverts commit 84443ca4d47cbee9efa0e1ad1df63ff2ae7f8b1f. Upgrade with some storage drivers such as Swift consumes a lot of I/O with high latency, so running in parallel improve speed. Change-Id: I65106a5fbb8cd19235274ea55fb19fe2ecff9093 --- gnocchi/storage/_carbonara.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index f2ccdef6..37460b4b 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -448,13 +448,13 @@ class CarbonaraBasedStorage(storage.StorageDriver): def upgrade(self, index): marker = None while True: - metrics = index.list_metrics(limit=self.UPGRADE_BATCH_SIZE, - marker=marker) - for m in metrics: - self._check_for_metric_upgrade(m) + metrics = [(metric,) for metric in + index.list_metrics(limit=self.UPGRADE_BATCH_SIZE, + marker=marker)] + self._map_in_thread(self._check_for_metric_upgrade, metrics) if len(metrics) == 0: break - marker = metrics[-1].id + marker = metrics[-1][0].id def process_new_measures(self, indexer, metrics_to_process, sync=False): metrics = indexer.list_metrics(ids=metrics_to_process) -- GitLab From b97a91a854404a77751547c485bc2a790e75b19c Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 1 Nov 2016 16:59:08 +0000 Subject: [PATCH 0456/1483] support consistent timestamps supporting a single post with multiple timestamp formats slows down performance and isn't very realistic. this patch assumes a single format is used. Change-Id: If180455a2edd43317ddfddf008bf1bb0f8aae3ee --- .../gabbits/metric-timestamp-format.yaml | 21 ++++++++++++++ gnocchi/utils.py | 28 ++++++++++--------- 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits/metric-timestamp-format.yaml b/gnocchi/tests/gabbi/gabbits/metric-timestamp-format.yaml index cdfaa677..7f212071 100644 --- a/gnocchi/tests/gabbi/gabbits/metric-timestamp-format.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric-timestamp-format.yaml @@ -32,3 +32,24 @@ tests: - timestamp: "-5 minutes" value: 43.1 status: 202 + + - name: create metric 2 + POST: /v1/metric + request_headers: + content-type: application/json + data: + archive_policy_name: cookies + status: 201 + response_json_paths: + $.archive_policy_name: cookies + + - name: push measurements to metric with mixed timestamps + POST: /v1/metric/$RESPONSE['$.id']/measures + request_headers: + content-type: application/json + data: + - timestamp: 1478012832 + value: 43.1 + - timestamp: "-5 minutes" + value: 43.1 + status: 400 diff --git a/gnocchi/utils.py b/gnocchi/utils.py index ff5981ac..a29cec9b 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -70,20 +70,22 @@ unix_universal_start64 = numpy.datetime64("1970") def to_timestamps(values): - timestamps = [] - for v in values: - if isinstance(v, numbers.Real): - timestamps.append(float(v) * 10e8) - elif isinstance(v, datetime.datetime): - timestamps.append(v) - else: - delta = timeparse.timeparse(v) - timestamps.append(v - if delta is None - else numpy.datetime64(timeutils.utcnow()) - + numpy.timedelta64(delta)) try: - times = pandas.to_datetime(timestamps, utc=True, box=False) + values = list(values) + if isinstance(values[0], numbers.Real): + times = pandas.to_datetime(values, utc=True, box=False, + unit='s') + elif isinstance(values[0], datetime.datetime): + times = pandas.to_datetime(values, utc=True, box=False) + else: + timestamps = [] + for v in values: + delta = timeparse.timeparse(v) + timestamps.append(v + if delta is None + else numpy.datetime64(timeutils.utcnow()) + + numpy.timedelta64(delta)) + times = pandas.to_datetime(timestamps, utc=True, box=False) except ValueError: raise ValueError("Unable to convert timestamps") -- GitLab From f3c9a9a8ea9b5562d8a8c6bb7c06dfae0c96b0fa Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 1 Nov 2016 17:15:42 +0100 Subject: [PATCH 0457/1483] Revert "drop non-I/O threading in upgrade" This reverts commit 84443ca4d47cbee9efa0e1ad1df63ff2ae7f8b1f. Upgrade with some storage drivers such as Swift consumes a lot of I/O with high latency, so running in parallel improve speed. Change-Id: I65106a5fbb8cd19235274ea55fb19fe2ecff9093 (cherry picked from commit c95b98f76574af1138129f6696fe0d30f1b1155f) --- gnocchi/storage/_carbonara.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 372800c8..55992877 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -453,13 +453,13 @@ class CarbonaraBasedStorage(storage.StorageDriver): def upgrade(self, index): marker = None while True: - metrics = index.list_metrics(limit=self.UPGRADE_BATCH_SIZE, - marker=marker) - for m in metrics: - self._check_for_metric_upgrade(m) + metrics = [(metric,) for metric in + index.list_metrics(limit=self.UPGRADE_BATCH_SIZE, + marker=marker)] + self._map_in_thread(self._check_for_metric_upgrade, metrics) if len(metrics) == 0: break - marker = metrics[-1].id + marker = metrics[-1][0].id def process_new_measures(self, indexer, metrics_to_process, sync=False): metrics = indexer.list_metrics(ids=metrics_to_process) -- GitLab From a8c170d5c05295640276deda672ea71d7e85dacb Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 15 Sep 2016 16:12:31 +0200 Subject: [PATCH 0458/1483] ceph: move out of xattr completely Change-Id: I18582ae01681c9d1d36d0cb87ae9dd3f4d69c38b --- gnocchi/storage/ceph.py | 73 +++++++++++++------ .../notes/ceph-omap-34e069dfb3df764d.yaml | 5 ++ 2 files changed, 56 insertions(+), 22 deletions(-) create mode 100644 releasenotes/notes/ceph-omap-34e069dfb3df764d.yaml diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 312276c4..35120e35 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -134,6 +134,22 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): for xattr in xattrs: self.ioctx.rm_xattr(self.MEASURE_PREFIX, xattr) + def _check_for_metric_upgrade(self, metric): + lock = self._lock(metric.id) + with lock: + container = "gnocchi_%s_container" % metric.id + try: + xattrs = tuple(k for k, v in self.ioctx.get_xattrs(container)) + except rados.ObjectNotFound: + pass + else: + with rados.WriteOpCtx() as op: + self.ioctx.set_omap(op, xattrs, tuple([b""] * len(xattrs))) + self.ioctx.operate_write_op(op, container) + for xattr in xattrs: + self.ioctx.rm_xattr(container, xattr) + super(CephStorage, self)._check_for_metric_upgrade(metric) + def _store_new_measures(self, metric, data): # NOTE(sileht): list all objects in a pool is too slow with # many objects (2min for 20000 objects in 50osds cluster), @@ -201,7 +217,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): def _delete_unprocessed_measures_for_metric_id(self, metric_id): object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) object_names = self._list_object_names_to_process(object_prefix) - # Now clean objects and xattrs + # Now clean objects and omap with rados.WriteOpCtx() as op: # NOTE(sileht): come on Ceph, no return code # for this operation ?!! @@ -224,7 +240,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): yield measures - # Now clean objects and xattrs + # Now clean objects and omap with rados.WriteOpCtx() as op: # NOTE(sileht): come on Ceph, no return code # for this operation ?!! @@ -264,23 +280,31 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): self.ioctx.write_full(name, data) else: self.ioctx.write(name, data, offset=offset) - self.ioctx.set_xattr("gnocchi_%s_container" % metric.id, name, b"") + with rados.WriteOpCtx() as op: + self.ioctx.set_omap(op, (name,), (b"",)) + self.ioctx.operate_write_op(op, "gnocchi_%s_container" % metric.id) def _delete_metric_measures(self, metric, timestamp_key, aggregation, granularity, version=3): name = self._get_object_name(metric, timestamp_key, aggregation, granularity, version) - self.ioctx.rm_xattr("gnocchi_%s_container" % metric.id, name) + with rados.WriteOpCtx() as op: + self.ioctx.remove_omap_keys(op, (name,)) + self.ioctx.operate_write_op(op, "gnocchi_%s_container" % metric.id) self.ioctx.aio_remove(name) def _delete_metric(self, metric): - try: - xattrs = self.ioctx.get_xattrs("gnocchi_%s_container" % metric.id) - except rados.ObjectNotFound: - pass - else: - for xattr, _ in xattrs: - self.ioctx.aio_remove(xattr) + with rados.ReadOpCtx() as op: + omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1) + try: + self.ioctx.operate_read_op( + op, "gnocchi_%s_container" % metric.id) + except rados.ObjectNotFound: + return + if ret == errno.ENOENT: + return + for name, _ in omaps: + self.ioctx.aio_remove(name) for name in ('container', 'none'): self.ioctx.aio_remove("gnocchi_%s_%s" % (metric.id, name)) @@ -298,17 +322,22 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): def _list_split_keys_for_metric(self, metric, aggregation, granularity, version=None): - try: - xattrs = self.ioctx.get_xattrs("gnocchi_%s_container" % metric.id) - except rados.ObjectNotFound: - raise storage.MetricDoesNotExist(metric) - keys = set() - for xattr, value in xattrs: - meta = xattr.split('_') - if (aggregation == meta[3] and granularity == float(meta[4]) and - self._version_check(xattr, version)): - keys.add(meta[2]) - return keys + with rados.ReadOpCtx() as op: + omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1) + try: + self.ioctx.operate_read_op( + op, "gnocchi_%s_container" % metric.id) + except rados.ObjectNotFound: + raise storage.MetricDoesNotExist(metric) + if ret == errno.ENOENT: + raise storage.MetricDoesNotExist(metric) + keys = set() + for name, value in omaps: + meta = name.split('_') + if (aggregation == meta[3] and granularity == float(meta[4]) + and self._version_check(name, version)): + keys.add(meta[2]) + return keys @staticmethod def _build_unaggregated_timeserie_path(metric, version): diff --git a/releasenotes/notes/ceph-omap-34e069dfb3df764d.yaml b/releasenotes/notes/ceph-omap-34e069dfb3df764d.yaml new file mode 100644 index 00000000..d053330b --- /dev/null +++ b/releasenotes/notes/ceph-omap-34e069dfb3df764d.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - Ceph driver has moved the storage of measures metadata + from xattr to omap API. Already created measures are migrated + during gnocchi-upgrade run. -- GitLab From 021a041b43af6b916f4381a3b8bcf770d4da3339 Mon Sep 17 00:00:00 2001 From: Ali Shariat Date: Thu, 3 Nov 2016 15:33:14 -0400 Subject: [PATCH 0459/1483] rest: empty search query in resource search resource search should support empty query. Right now, if the query is not passed at all the api returns all of the resources. but with empty query, i.e. {}, it breaks. Change-Id: I5f85bae0b710b67e6cbd0c3165666cf09c17e759 --- gnocchi/rest/__init__.py | 2 +- gnocchi/tests/gabbi/gabbits/search.yaml | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 6b03c075..ed22c06b 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1224,7 +1224,7 @@ class SearchResourceTypeController(rest.RestController): ResourceSearchSchema = voluptuous.Schema( voluptuous.All( - voluptuous.Length(min=1, max=1), + voluptuous.Length(min=0, max=1), { voluptuous.Any( u"=", u"==", u"eq", diff --git a/gnocchi/tests/gabbi/gabbits/search.yaml b/gnocchi/tests/gabbi/gabbits/search.yaml index f4dc1da9..4503454a 100644 --- a/gnocchi/tests/gabbi/gabbits/search.yaml +++ b/gnocchi/tests/gabbi/gabbits/search.yaml @@ -63,3 +63,11 @@ tests: content-type: application/json response_json_paths: $.`len`: 2 + + - name: search empty query + POST: /v1/search/resource/generic + request_headers: + content-type: application/json + data: {} + response_json_paths: + $.`len`: 2 \ No newline at end of file -- GitLab From ad5b4fc620509335dc63436cb8cb28e40df27275 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 7 Nov 2016 15:59:52 +0100 Subject: [PATCH 0460/1483] Allow timespan to be floating values Everything supports precision up to micro or nanoseconds, so there's no reason to convert timespan to seconds only. Change-Id: I9750a810600be3937c7f5cbb8b920012b3dcf2e4 --- gnocchi/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 25a30b31..867f1154 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -85,7 +85,7 @@ def to_timespan(value): if value is None: raise ValueError("Invalid timespan") try: - seconds = int(value) + seconds = float(value) except Exception: try: seconds = timeparse.timeparse(six.text_type(value)) -- GitLab From e4d3b573c6299f02ae7a0f023994c4057a400302 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 7 Nov 2016 16:20:37 +0100 Subject: [PATCH 0461/1483] rest: don't use is_body_seekable The condition was wrongly reversed in previous attempt to fix the wsgiref hanging bug. Making the change useless the code was always returning an unseekable version of the wsgi.input. Also using is_body_seekable is useless body_file_seekable already do this check (correctly) and return the raw one when the wsgi input is already seekable. Change-Id: Ifdfd419d964c8cfc9295bbf1c772429c68e5618d --- gnocchi/rest/__init__.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index a553c8e0..da1dad38 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -166,10 +166,7 @@ def deserialize(expected_content_types=None): # connection are used. For more detail see: # https://bugs.python.org/issue21878 # https://github.com/Pylons/webob/issues/279 - if pecan.request.is_body_seekable: - params = json.load(pecan.request.body_file_seekable) - else: - params = json.load(pecan.request.body_file_raw) + params = json.load(pecan.request.body_file_seekable) except Exception as e: abort(400, "Unable to decode body: " + six.text_type(e)) return params -- GitLab From 04917de5852a5a2c4a740f71772cf30f6d0a9a54 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 7 Nov 2016 12:53:09 +0100 Subject: [PATCH 0462/1483] metricd: retry slowly coordination connection failure If the coordinator fails to start (e.g. ToozConnectionError), the metricd subprocess started by Cotyledon fails to start and raises an error. That means Cotyledon will retry to spawn the process with no delay at all, spamming the CPU with forking request. This patches fatorize the coordination retrieval and connection code into one function in gnocchi.utils, which makes sure both metricd and the Carbonara based drivers leverage the same code to retry to connection to the coordinator with some delay. Change-Id: I83157c5fdb0a3e488a9b788d48d974de80219dbb --- gnocchi/cli.py | 10 ++-------- gnocchi/storage/_carbonara.py | 14 ++------------ gnocchi/utils.py | 24 +++++++++++++++++++++++- 3 files changed, 27 insertions(+), 21 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index a4e7cdbc..dae30b88 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -16,7 +16,6 @@ import multiprocessing import threading import time -import uuid import cotyledon from cotyledon import oslo_config_glue @@ -144,16 +143,11 @@ class MetricScheduler(MetricProcessBase): TASKS_PER_WORKER = 16 BLOCK_SIZE = 4 - def _enable_coordination(self, conf): - self._coord = coordination.get_coordinator( - conf.storage.coordination_url, self._my_id) - self._coord.start(start_heart=True) - def __init__(self, worker_id, conf, queue): super(MetricScheduler, self).__init__( worker_id, conf, conf.storage.metric_processing_delay) - self._my_id = str(uuid.uuid4()) - self._enable_coordination(conf) + self._coord, self._my_id = utils.get_coordinator_and_start( + conf.storage.coordination_url) self.queue = queue self.previously_scheduled_metrics = set() self.workers = conf.metricd.workers diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 37460b4b..04dd719b 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -65,24 +65,14 @@ class CarbonaraBasedStorage(storage.StorageDriver): def __init__(self, conf): super(CarbonaraBasedStorage, self).__init__(conf) - self.coord = coordination.get_coordinator( - conf.coordination_url, - str(uuid.uuid4()).encode('ascii')) self.aggregation_workers_number = conf.aggregation_workers_number if self.aggregation_workers_number == 1: # NOTE(jd) Avoid using futures at all if we don't want any threads. self._map_in_thread = self._map_no_thread else: self._map_in_thread = self._map_in_futures_threads - self.start() - - @utils.retry - def start(self): - try: - self.coord.start(start_heart=True) - except Exception as e: - LOG.error("Unable to start coordinator: %s" % e) - raise utils.Retry(e) + self.coord, my_id = utils.get_coordinator_and_start( + conf.coordination_url) def stop(self): self.coord.stop() diff --git a/gnocchi/utils.py b/gnocchi/utils.py index a29cec9b..2d6fa984 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -17,15 +17,20 @@ import datetime import itertools import multiprocessing import numbers +import uuid import iso8601 import numpy +from oslo_log import log from oslo_utils import timeutils import pandas from pytimeparse import timeparse import six import tenacity -import uuid +from tooz import coordination + + +LOG = log.getLogger(__name__) # uuid5 namespace for id transformation. # NOTE(chdent): This UUID must stay the same, forever, across all @@ -66,6 +71,23 @@ retry = tenacity.retry( reraise=True) +# TODO(jd) Move this to tooz? +@retry +def _enable_coordination(coord): + try: + coord.start(start_heart=True) + except Exception as e: + LOG.error("Unable to start coordinator: %s", e) + raise Retry(e) + + +def get_coordinator_and_start(url): + my_id = str(uuid.uuid4()) + coord = coordination.get_coordinator(url, my_id) + _enable_coordination(coord) + return coord, my_id + + unix_universal_start64 = numpy.datetime64("1970") -- GitLab From 04dde36fa1bc05743e1f298f71509691e6d862d0 Mon Sep 17 00:00:00 2001 From: zhangyanxian Date: Mon, 7 Nov 2016 01:33:01 +0000 Subject: [PATCH 0463/1483] Interpolate strings using logging own methods String interpolation should be delayed to be handled by the logging code, rather than being done at the point of the logging call. Change-Id: Ia6bd6c25c3bd21ab019755195428a3a4753f1784 --- gnocchi/cli.py | 4 ++-- gnocchi/rest/app.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index a4e7cdbc..72586211 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -56,12 +56,12 @@ def upgrade(): index = indexer.get_driver(conf) index.connect() if not conf.skip_index: - LOG.info("Upgrading indexer %s" % index) + LOG.info("Upgrading indexer %s", index) index.upgrade( create_legacy_resource_types=conf.create_legacy_resource_types) if not conf.skip_storage: s = storage.get_driver(conf) - LOG.info("Upgrading storage %s" % s) + LOG.info("Upgrading storage %s", s) s.upgrade(index) if (not conf.skip_archive_policies_creation diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 9ef6e279..58966f48 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -101,7 +101,7 @@ def load_app(conf, appname=None, indexer=None, storage=None, configkey = str(uuid.uuid4()) APPCONFIGS[configkey] = config - LOG.info("WSGI config used: %s" % cfg_path) + LOG.info("WSGI config used: %s", cfg_path) app = deploy.loadapp("config:" + cfg_path, name=appname, global_conf={'configkey': configkey}) return cors.CORS(app, conf=conf) -- GitLab From f9b19ad8d05728b6938dfa7cb90cca5143c38927 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 10 Oct 2016 16:56:44 +0200 Subject: [PATCH 0464/1483] rest: allow to create missing metrics when sending measures in batch Change-Id: If1f684ad368d4fb797267bbc4c099938f2722dfe --- doc/source/rest.j2 | 5 +++ doc/source/rest.yaml | 14 ++++++ gnocchi/indexer/sqlalchemy.py | 2 + gnocchi/rest/__init__.py | 40 ++++++++++++++--- .../tests/gabbi/gabbits/batch-measures.yaml | 45 +++++++++++++++++++ ...sures_create_metrics-f73790a8475ad628.yaml | 5 +++ 6 files changed, 105 insertions(+), 6 deletions(-) create mode 100644 releasenotes/notes/batch_resource_measures_create_metrics-f73790a8475ad628.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index fbae6e55..50a44a39 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -121,6 +121,11 @@ Or using named metrics of resources: {{ scenarios['post-measures-batch-named']['doc'] }} +If some named metrics specified in the batch request do not exist, Gnocchi can +try to create them as long as an archive policy rule matches: + +{{ scenarios['post-measures-batch-named-create']['doc'] }} + Archive Policy ============== diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 78aceb16..87c8314e 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -550,6 +550,20 @@ } } +- name: post-measures-batch-named-create + request: | + POST /v1/batch/resources/metrics/measures?create_metrics=true HTTP/1.1 + Content-Type: application/json + + { + "{{ scenarios['create-resource-with-new-metrics']['response'].json['id'] }}": { + "disk.io.test": [ + { "timestamp": "2014-10-06T14:34:12", "value": 71 }, + { "timestamp": "2014-10-06T14:34:20", "value": 81 } + ] + } + } + - name: delete-resource-generic request: DELETE /v1/resource/generic/{{ scenarios['create-resource-generic']['response'].json['id'] }} HTTP/1.1 diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index bb76185d..3bdba1bd 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -653,6 +653,8 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): if (e.constraint == 'fk_metric_ap_name_ap_name'): raise indexer.NoSuchArchivePolicy(archive_policy_name) + if e.constraint == 'fk_metric_resource_id_resource_id': + raise indexer.NoSuchResource(resource_id) raise return m diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index a553c8e0..db0826e6 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -51,8 +51,9 @@ def abort(status_code, detail='', headers=None, comment=None, **kw): """Like pecan.abort, but make sure detail is a string.""" if status_code == 404 and not detail: raise RuntimeError("http code 404 must have 'detail' set") - return pecan.abort(status_code, six.text_type(detail), - headers, comment, **kw) + if isinstance(detail, Exception): + detail = six.text_type(detail) + return pecan.abort(status_code, detail, headers, comment, **kw) def get_user_and_project(): @@ -1394,8 +1395,8 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): {utils.ResourceUUID: {six.text_type: MeasuresListSchema}} ) - @pecan.expose() - def post(self): + @pecan.expose('json') + def post(self, create_metrics=False): body = deserialize_and_validate(self.MeasuresBatchSchema) known_metrics = [] @@ -1405,8 +1406,35 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): metrics = pecan.request.indexer.list_metrics( names=names, resource_id=resource_id) - if len(names) != len(metrics): - known_names = [m.name for m in metrics] + known_names = [m.name for m in metrics] + if strutils.bool_from_string(create_metrics): + user_id, project_id = get_user_and_project() + unknown_resources = set() + for name in names: + if name not in known_names: + metric = MetricsController.MetricSchema({ + "name": name + }) + try: + pecan.request.indexer.create_metric( + uuid.uuid4(), + user_id, project_id, + resource_id=resource_id, + name=metric.get('name'), + unit=metric.get('unit'), + archive_policy_name=metric[ + 'archive_policy_name']) + except indexer.NoSuchResource as e: + unknown_resources.add(resource_id) + except indexer.IndexerException as e: + # This catch NoSuchArchivePolicy, which is unlikely + # be still possible + abort(400, e) + if unknown_resources: + abort(400, {"cause": "Unknown resources", + "detail": unknown_resources}) + + elif len(names) != len(metrics): unknown_metrics.extend( ["%s/%s" % (six.text_type(resource_id), m) for m in names if m not in known_names]) diff --git a/gnocchi/tests/gabbi/gabbits/batch-measures.yaml b/gnocchi/tests/gabbi/gabbits/batch-measures.yaml index e5b748dd..7eae012d 100644 --- a/gnocchi/tests/gabbi/gabbits/batch-measures.yaml +++ b/gnocchi/tests/gabbi/gabbits/batch-measures.yaml @@ -160,3 +160,48 @@ tests: - timestamp: "2015-03-06T14:34:12" value: 12 status: 202 + + - name: create archive policy rule for auto + POST: /v1/archive_policy_rule + request_headers: + content-type: application/json + x-roles: admin + data: + name: rule_auto + metric_pattern: "auto.*" + archive_policy_name: simple + status: 201 + + - name: push measurements to unknown named metrics and create it + POST: /v1/batch/resources/metrics/measures?create_metrics=true + request_headers: + content-type: application/json + data: + 46c9418d-d63b-4cdd-be89-8f57ffc5952e: + auto.test: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + status: 202 + + - name: get created metric to check creation + GET: /v1/resource/generic/46c9418d-d63b-4cdd-be89-8f57ffc5952e/metric/auto.test + + - name: push measurements to unknown named metrics and resource with create_metrics + POST: /v1/batch/resources/metrics/measures?create_metrics=true + request_headers: + content-type: application/json + accept: application/json + data: + aaaaaaaa-d63b-4cdd-be89-111111111111: + auto.test: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + status: 400 + response_json_paths: + $.description.cause: "Unknown resources" + $.description.detail: + - "aaaaaaaa-d63b-4cdd-be89-111111111111" diff --git a/releasenotes/notes/batch_resource_measures_create_metrics-f73790a8475ad628.yaml b/releasenotes/notes/batch_resource_measures_create_metrics-f73790a8475ad628.yaml new file mode 100644 index 00000000..afccc58b --- /dev/null +++ b/releasenotes/notes/batch_resource_measures_create_metrics-f73790a8475ad628.yaml @@ -0,0 +1,5 @@ +--- +features: + - "When sending measures in batch for resources, it is now possible to pass + `create_metric=true` to the query parameters so missing metrics are created. + This only works if an archive policy rule matching those named metrics matches." -- GitLab From abfa908429ce3d0f5013d707197b5f5382afab85 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 7 Nov 2016 13:07:59 +0100 Subject: [PATCH 0465/1483] Use tenacity.TryAgain rather than a own-defined exception Change-Id: I9f895ab2a636aaa866fd422cf902e19987d34354 --- gnocchi/cli.py | 3 ++- gnocchi/utils.py | 7 +------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index dae30b88..9edae53f 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -25,6 +25,7 @@ from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six +import tenacity import tooz from tooz import coordination @@ -203,7 +204,7 @@ class MetricScheduler(MetricProcessBase): create_group_req.get() except coordination.GroupAlreadyExist: pass - raise utils.Retry(e) + raise tenacity.TryAgain(e) except tooz.NotImplemented: LOG.warning('Configured coordination driver does not support ' 'required functionality. Coordination is disabled.') diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 2d6fa984..2c741111 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -60,14 +60,9 @@ def UUID(value): raise ValueError(e) -class Retry(Exception): - pass - - # Retry with exponential backoff for up to 1 minute retry = tenacity.retry( wait=tenacity.wait_exponential(multiplier=0.5, max=60), - retry=tenacity.retry_if_exception_type(Retry), reraise=True) @@ -78,7 +73,7 @@ def _enable_coordination(coord): coord.start(start_heart=True) except Exception as e: LOG.error("Unable to start coordinator: %s", e) - raise Retry(e) + raise tenacity.TryAgain(e) def get_coordinator_and_start(url): -- GitLab From 7418017e7f9a78b387f94ef41a0cd271c3ae4f0b Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 4 Nov 2016 21:47:34 +0000 Subject: [PATCH 0466/1483] support resampling support ability to resample data to another granularity that isn't necessarily in archive policy. Change-Id: I7bd83a3f550e25f438b50cef8e663f96fc0073a6 Related-Bug: #1631046 --- doc/source/rest.j2 | 9 ++++++ doc/source/rest.yaml | 3 ++ gnocchi/carbonara.py | 4 +++ gnocchi/rest/__init__.py | 13 ++++++-- gnocchi/storage/__init__.py | 3 +- gnocchi/storage/_carbonara.py | 10 ++++-- gnocchi/tests/gabbi/gabbits/metric.yaml | 43 +++++++++++++++++++++++++ gnocchi/tests/test_carbonara.py | 16 +++++++++ 8 files changed, 95 insertions(+), 6 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index fbae6e55..87ba7eea 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -109,6 +109,15 @@ to retrieve, rather than all the granularities available: {{ scenarios['get-measures-granularity']['doc'] }} +In addition to granularities defined by the archive policy, measures can be +resampled to a new granularity. + +{{ scenarios['get-measures-resample']['doc'] }} + +.. note:: + + Depending on the aggregation method and frequency of measures, resampled + data may lack accuracy as it is working against previously aggregated data. Measures batching ================= diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 78aceb16..3b5be4f2 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -269,6 +269,9 @@ - name: get-measures-refresh request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?refresh=true HTTP/1.1 +- name: get-measures-resample + request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?resample=5&granularity=1 HTTP/1.1 + - name: create-resource-generic request: | POST /v1/resource/generic HTTP/1.1 diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index b0af065c..58718088 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -422,6 +422,10 @@ class AggregatedTimeSerie(TimeSerie): self.aggregation_method = aggregation_method self._truncate(quick=True) + def resample(self, sampling): + return AggregatedTimeSerie.from_grouped_serie( + self.group_serie(sampling), sampling, self.aggregation_method) + @classmethod def from_data(cls, sampling, aggregation_method, timestamps=None, values=None, max_size=None): diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index ed22c06b..41db45bb 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -478,7 +478,7 @@ class MetricController(rest.RestController): @pecan.expose('json') def get_measures(self, start=None, stop=None, aggregation='mean', - granularity=None, refresh=False, **param): + granularity=None, resample=None, refresh=False, **param): self.enforce_metric("get measures") if not (aggregation in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS @@ -502,6 +502,14 @@ class MetricController(rest.RestController): except Exception: abort(400, "Invalid value for stop") + if resample: + if not granularity: + abort(400, 'A granularity must be specified to resample') + try: + resample = Timespan(resample) + except ValueError as e: + abort(400, e) + if strutils.bool_from_string(refresh): pecan.request.storage.process_new_measures( pecan.request.indexer, [six.text_type(self.metric.id)], True) @@ -514,7 +522,8 @@ class MetricController(rest.RestController): else: measures = pecan.request.storage.get_measures( self.metric, start, stop, aggregation, - float(granularity) if granularity is not None else None) + float(granularity) if granularity is not None else None, + resample) # Replace timestamp keys by their string versions return [(timestamp.isoformat(), offset, v) for timestamp, offset, v in measures] diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 555b72d8..ff2d4a14 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -247,7 +247,7 @@ class StorageDriver(object): @staticmethod def get_measures(metric, from_timestamp=None, to_timestamp=None, - aggregation='mean', granularity=None): + aggregation='mean', granularity=None, resample=None): """Get a measure to a metric. :param metric: The metric measured. @@ -255,6 +255,7 @@ class StorageDriver(object): :param to timestamp: The timestamp to get the measure to. :param aggregation: The type of aggregation to retrieve. :param granularity: The granularity to retrieve. + :param resample: The granularity to resample to. """ if aggregation not in metric.archive_policy.aggregation_methods: raise AggregationDoesNotExist(metric, aggregation) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 37460b4b..21f45133 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -154,7 +154,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): else attrs[-1] == 'v%s' % v) def get_measures(self, metric, from_timestamp=None, to_timestamp=None, - aggregation='mean', granularity=None): + aggregation='mean', granularity=None, resample=None): super(CarbonaraBasedStorage, self).get_measures( metric, from_timestamp, to_timestamp, aggregation) if granularity is None: @@ -164,9 +164,13 @@ class CarbonaraBasedStorage(storage.StorageDriver): from_timestamp, to_timestamp) for ap in reversed(metric.archive_policy.definition))) else: - agg_timeseries = [self._get_measures_timeserie( + agg_timeseries = self._get_measures_timeserie( metric, aggregation, granularity, - from_timestamp, to_timestamp)] + from_timestamp, to_timestamp) + if resample: + agg_timeseries = agg_timeseries.resample(resample) + agg_timeseries = [agg_timeseries] + return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v) for ts in agg_timeseries for timestamp, r, v in ts.fetch(from_timestamp, to_timestamp)] diff --git a/gnocchi/tests/gabbi/gabbits/metric.yaml b/gnocchi/tests/gabbi/gabbits/metric.yaml index c76a805f..1b4b9c8b 100644 --- a/gnocchi/tests/gabbi/gabbits/metric.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric.yaml @@ -202,6 +202,49 @@ tests: - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - name: get valid metric id again 5 + GET: /v1/metric + + - name: push measurements to metric again + POST: /v1/metric/$RESPONSE['$[0].id']/measures + request_headers: + content-type: application/json + data: + - timestamp: "2015-03-06T14:34:15" + value: 16 + - timestamp: "2015-03-06T14:35:12" + value: 9 + - timestamp: "2015-03-06T14:35:15" + value: 11 + status: 202 + + - name: get valid metric id again 6 + GET: /v1/metric + + - name: get measurements from metric and resample + GET: /v1/metric/$RESPONSE['$[0].id']/measures?refresh=true&resample=60&granularity=1 + response_json_paths: + $: + - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] + - ["2015-03-06T14:34:00+00:00", 60.0, 14.0] + - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + + - name: get valid metric id again 7 + GET: /v1/metric + + - name: get measurements from metric and resample no granularity + GET: /v1/metric/$RESPONSE['$[0].id']/measures?resample=60 + status: 400 + response_strings: + - A granularity must be specified to resample + + - name: get valid metric id again 8 + GET: /v1/metric + + - name: get measurements from metric and bad resample + GET: /v1/metric/$RESPONSE['$[0].id']/measures?resample=abc + status: 400 + - name: create valid metric two POST: /v1/metric request_headers: diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 71fd64d0..5f88379c 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -1104,3 +1104,19 @@ class TestAggregatedTimeSerie(base.BaseTestCase): sampling=agg.sampling, max_size=agg.max_size, aggregation_method=agg.aggregation_method)) + + def test_resample(self): + ts = carbonara.TimeSerie.from_data( + [datetime.datetime(2014, 1, 1, 12, 0, 0), + datetime.datetime(2014, 1, 1, 12, 0, 4), + datetime.datetime(2014, 1, 1, 12, 0, 9), + datetime.datetime(2014, 1, 1, 12, 0, 11), + datetime.datetime(2014, 1, 1, 12, 0, 12)], + [3, 5, 6, 2, 4]) + agg_ts = self._resample(ts, 5, 'mean') + self.assertEqual(3, len(agg_ts)) + + agg_ts = agg_ts.resample(10) + self.assertEqual(2, len(agg_ts)) + self.assertEqual(5, agg_ts[0]) + self.assertEqual(3, agg_ts[1]) -- GitLab From 68a2a356679b9d9ea7224c99e1d0670ca0c45be8 Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 4 Nov 2016 23:02:57 +0000 Subject: [PATCH 0467/1483] support resampling on aggregation endpoint support ability to resample data to another granularity that isn't necessarily in archive policy. resampling is done prior to reaggregation Change-Id: Ibda001e13ef03a1cd550eed8b662944bb51e9d98 Closes-Bug: #1631046 --- doc/source/rest.j2 | 8 +++- gnocchi/rest/__init__.py | 27 ++++++++----- gnocchi/storage/__init__.py | 3 +- gnocchi/storage/_carbonara.py | 26 ++++++++----- gnocchi/tests/gabbi/gabbits/aggregation.yaml | 39 +++++++++++++++++++ .../dynamic-resampling-b5e545b1485c152f.yaml | 6 +++ 6 files changed, 88 insertions(+), 21 deletions(-) create mode 100644 releasenotes/notes/dynamic-resampling-b5e545b1485c152f.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 87ba7eea..b6a1af7a 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -520,7 +520,13 @@ requested resource type, and the compute the aggregation: Similar to retrieving measures for a single metric, the `refresh` parameter can be provided to force all POSTed measures to be processed across all -metrics before computing the result. +metrics before computing the result. The `resample` parameter may be used as +well. + +.. note:: + + Resampling is done prior to any reaggregation if both parameters are + specified. Also aggregation across metrics have different behavior depending on if boundary are set ('start' and 'stop') and if 'needed_overlap' is set. diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 41db45bb..90a70184 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1501,7 +1501,7 @@ class AggregationResourceController(rest.RestController): @pecan.expose('json') def post(self, start=None, stop=None, aggregation='mean', reaggregation=None, granularity=None, needed_overlap=100.0, - groupby=None, refresh=False): + groupby=None, refresh=False, resample=None): # First, set groupby in the right format: a sorted list of unique # strings. groupby = sorted(set(arg_to_list(groupby))) @@ -1525,7 +1525,7 @@ class AggregationResourceController(rest.RestController): for r in resources))) return AggregationController.get_cross_metric_measures_from_objs( metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap, refresh) + granularity, needed_overlap, refresh, resample) def groupper(r): return tuple((attr, r[attr]) for attr in groupby) @@ -1539,7 +1539,7 @@ class AggregationResourceController(rest.RestController): "group": dict(key), "measures": AggregationController.get_cross_metric_measures_from_objs( # noqa metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap, refresh) + granularity, needed_overlap, refresh, resample) }) return results @@ -1570,7 +1570,7 @@ class AggregationController(rest.RestController): reaggregation=None, granularity=None, needed_overlap=100.0, - refresh=False): + refresh=False, resample=None): try: needed_overlap = float(needed_overlap) except ValueError: @@ -1607,6 +1607,15 @@ class AggregationController(rest.RestController): granularity = float(granularity) except ValueError as e: abort(400, "granularity must be a float: %s" % e) + + if resample: + if not granularity: + abort(400, 'A granularity must be specified to resample') + try: + resample = Timespan(resample) + except ValueError as e: + abort(400, e) + try: if strutils.bool_from_string(refresh): pecan.request.storage.process_new_measures( @@ -1617,13 +1626,11 @@ class AggregationController(rest.RestController): # metric measures = pecan.request.storage.get_measures( metrics[0], start, stop, aggregation, - granularity) + granularity, resample) else: measures = pecan.request.storage.get_cross_metric_measures( metrics, start, stop, aggregation, - reaggregation, - granularity, - needed_overlap) + reaggregation, resample, granularity, needed_overlap) # Replace timestamp keys by their string versions return [(timestamp.isoformat(), offset, v) for timestamp, offset, v in measures] @@ -1638,7 +1645,7 @@ class AggregationController(rest.RestController): @pecan.expose('json') def get_metric(self, metric=None, start=None, stop=None, aggregation='mean', reaggregation=None, granularity=None, - needed_overlap=100.0, refresh=False): + needed_overlap=100.0, refresh=False, resample=None): # Check RBAC policy metric_ids = arg_to_list(metric) metrics = pecan.request.indexer.list_metrics(ids=metric_ids) @@ -1650,7 +1657,7 @@ class AggregationController(rest.RestController): missing_metric_ids.pop())) return self.get_cross_metric_measures_from_objs( metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap, refresh) + granularity, needed_overlap, refresh, resample) class CapabilityController(rest.RestController): diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index ff2d4a14..152aa20b 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -267,7 +267,7 @@ class StorageDriver(object): @staticmethod def get_cross_metric_measures(metrics, from_timestamp=None, to_timestamp=None, aggregation='mean', - reaggregation=None, + reaggregation=None, resample=None, granularity=None, needed_overlap=None): """Get aggregated measures of multiple entities. @@ -279,6 +279,7 @@ class StorageDriver(object): :param aggregation: The type of aggregation to retrieve. :param reaggregation: The type of aggregation to compute on the retrieved measures. + :param resample: The granularity to resample to. """ for metric in metrics: if aggregation not in metric.archive_policy.aggregation_methods: diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 21f45133..9460b8bc 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -591,12 +591,11 @@ class CarbonaraBasedStorage(storage.StorageDriver): def get_cross_metric_measures(self, metrics, from_timestamp=None, to_timestamp=None, aggregation='mean', - reaggregation=None, - granularity=None, - needed_overlap=100.0): + reaggregation=None, resample=None, + granularity=None, needed_overlap=100.0): super(CarbonaraBasedStorage, self).get_cross_metric_measures( metrics, from_timestamp, to_timestamp, - aggregation, reaggregation, granularity, needed_overlap) + aggregation, reaggregation, resample, granularity, needed_overlap) if reaggregation is None: reaggregation = aggregation @@ -620,11 +619,20 @@ class CarbonaraBasedStorage(storage.StorageDriver): else: granularities_in_common = [granularity] - tss = self._map_in_thread(self._get_measures_timeserie, - [(metric, aggregation, g, - from_timestamp, to_timestamp) - for metric in metrics - for g in granularities_in_common]) + if resample and granularity: + tss = self._map_in_thread(self._get_measures_timeserie, + [(metric, aggregation, granularity, + from_timestamp, to_timestamp) + for metric in metrics]) + for i, ts in enumerate(tss): + tss[i] = ts.resample(resample) + else: + tss = self._map_in_thread(self._get_measures_timeserie, + [(metric, aggregation, g, + from_timestamp, to_timestamp) + for metric in metrics + for g in granularities_in_common]) + try: return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v) for timestamp, r, v diff --git a/gnocchi/tests/gabbi/gabbits/aggregation.yaml b/gnocchi/tests/gabbi/gabbits/aggregation.yaml index 196f18ac..19fdd897 100644 --- a/gnocchi/tests/gabbi/gabbits/aggregation.yaml +++ b/gnocchi/tests/gabbi/gabbits/aggregation.yaml @@ -118,6 +118,16 @@ tests: - ['2015-03-06T14:33:57+00:00', 1.0, 3.1] - ['2015-03-06T14:34:12+00:00', 1.0, 2.0] + - name: get metric list to get aggregates 5 + GET: /v1/metric + + - name: get measure aggregates and resample + GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1&resample=60 + response_json_paths: + $: + - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] + - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] + # Aggregation by resource and metric_name - name: post a resource @@ -197,6 +207,35 @@ tests: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + - name: get measure aggregates by granularity from resources and resample + POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&resample=60 + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + response_json_paths: + $: + - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] + - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] + + - name: get measure aggregates by granularity from resources and bad resample + POST: /v1/aggregation/resource/generic/metric/agg_meter?resample=abc + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 400 + + - name: get measure aggregates by granularity from resources and resample no granularity + POST: /v1/aggregation/resource/generic/metric/agg_meter?resample=60 + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + status: 400 + response_strings: + - A granularity must be specified to resample + - name: get measure aggregates by granularity with timestamps from resources POST: /v1/aggregation/resource/generic/metric/agg_meter?start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 request_headers: diff --git a/releasenotes/notes/dynamic-resampling-b5e545b1485c152f.yaml b/releasenotes/notes/dynamic-resampling-b5e545b1485c152f.yaml new file mode 100644 index 00000000..b2c5167b --- /dev/null +++ b/releasenotes/notes/dynamic-resampling-b5e545b1485c152f.yaml @@ -0,0 +1,6 @@ +--- +features: + - Add `resample` parameter to support resampling stored time-series to + another granularity not necessarily in existing archive policy. If both + resampling and reaggregation parameters are specified, resampling will + occur prior to reaggregation. -- GitLab From 0c66859f82cbfd5cf1ee79bfc8c8c4bd373dca6e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 7 Nov 2016 18:56:18 +0100 Subject: [PATCH 0468/1483] carbonara: add support for Gnocchi v2 measures format MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds a fallback method when unserializing measures so Gnocchi 3 is able to read measures stored in Gnocchi 2 format. Change-Id: Ic863ace3ac8df430e5de93f17a45f73398ed143f --- doc/source/install.rst | 10 ++++------ gnocchi/storage/_carbonara.py | 17 ++++++++++++++--- gnocchi/storage/ceph.py | 2 +- gnocchi/storage/file.py | 2 +- gnocchi/storage/s3.py | 2 +- gnocchi/storage/swift.py | 2 +- 6 files changed, 22 insertions(+), 13 deletions(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index cf927029..e7260517 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -93,15 +93,13 @@ that your indexer and storage are properly upgraded. Run the following: 1. Stop the old version of Gnocchi API server and `gnocchi-statsd` daemon -2. Make sure that the processing backlog is empty (`gnocchi status`) +2. Stop the old version of `gnocchi-metricd` daemon -3. Stop the old version of `gnocchi-metricd` daemon +3. Install the new version of Gnocchi -4. Install the new version of Gnocchi - -5. Run `gnocchi-upgrade` +4. Run `gnocchi-upgrade` This can take several hours depending on the size of your index and storage. -6. Start the new Gnocchi API server, `gnocchi-metricd` +5. Start the new Gnocchi API server, `gnocchi-metricd` and `gnocchi-statsd` daemons diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 37460b4b..1ea8f0cf 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -26,6 +26,7 @@ import iso8601 import msgpack from oslo_config import cfg from oslo_log import log +from oslo_serialization import msgpackutils from oslo_utils import timeutils import pandas import six @@ -367,10 +368,20 @@ class CarbonaraBasedStorage(storage.StorageDriver): _MEASURE_SERIAL_FORMAT = "Qd" _MEASURE_SERIAL_LEN = struct.calcsize(_MEASURE_SERIAL_FORMAT) - def _unserialize_measures(self, data): + def _unserialize_measures(self, measure_id, data): nb_measures = len(data) // self._MEASURE_SERIAL_LEN - measures = struct.unpack( - "<" + self._MEASURE_SERIAL_FORMAT * nb_measures, data) + try: + measures = struct.unpack( + "<" + self._MEASURE_SERIAL_FORMAT * nb_measures, data) + except struct.error: + # This either a corruption, either a v2 measures + try: + return msgpackutils.loads(data) + except ValueError: + LOG.error( + "Unable to decode measure %s, possible data corruption", + measure_id) + raise return six.moves.zip( pandas.to_datetime(measures[::2], unit='ns'), itertools.islice(measures, 1, len(measures), 2)) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 35120e35..1d9a7089 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -236,7 +236,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): measures = [] for n in object_names: data = self._get_object_content(n) - measures.extend(self._unserialize_measures(data)) + measures.extend(self._unserialize_measures(n, data)) yield measures diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 8c91df59..4643dd49 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -194,7 +194,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): for f in files: abspath = self._build_measure_path(metric.id, f) with open(abspath, "rb") as e: - measures.extend(self._unserialize_measures(e.read())) + measures.extend(self._unserialize_measures(f, e.read())) yield measures diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 091a49ca..46d85367 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -229,7 +229,7 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): Bucket=self._bucket_name_measures, Key=f) measures.extend( - self._unserialize_measures(response['Body'].read())) + self._unserialize_measures(f, response['Body'].read())) yield measures diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index ff8eafa9..2ef9d4fd 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -189,7 +189,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): for f in files: headers, data = self.swift.get_object( self.MEASURE_PREFIX, f['name']) - measures.extend(self._unserialize_measures(data)) + measures.extend(self._unserialize_measures(f['name'], data)) yield measures -- GitLab From 219fd0d03900e72d4e176ec085b6d90d64d1c86e Mon Sep 17 00:00:00 2001 From: Hanxi Liu Date: Wed, 2 Nov 2016 12:50:10 +0800 Subject: [PATCH 0469/1483] rest: use flatten_dict_to_keypairs instead of recursive_keypairs flatten_dict_to_keypairs has been added to oslo.utils recently. So we should use it to replace recursive_keypairs. This has been released in oslo.utils 3.18.0 and we have to bump it. Change-Id: I3b2b18a7bd2651d6c7f1515e885e9ce79ea24201 --- gnocchi/rest/__init__.py | 14 ++------------ requirements.txt | 2 +- 2 files changed, 3 insertions(+), 13 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index da1dad38..1036ff86 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -19,6 +19,7 @@ import uuid from concurrent import futures import jsonpatch +from oslo_utils import dictutils from oslo_utils import strutils import pecan from pecan import rest @@ -62,17 +63,6 @@ def get_user_and_project(): return (user_id, project_id) -# TODO(jd) Move this to oslo.utils as I stole it from Ceilometer -def recursive_keypairs(d, separator='.'): - """Generator that produces sequence of keypairs for nested dictionaries.""" - for name, value in sorted(six.iteritems(d)): - if isinstance(value, dict): - for subname, subvalue in recursive_keypairs(value, separator): - yield ('%s%s%s' % (name, separator, subname), subvalue) - else: - yield name, value - - def enforce(rule, target): """Return the user and project the request should be limited to. @@ -93,7 +83,7 @@ def enforce(rule, target): target = target.__dict__ # Flatten dict - target = dict(recursive_keypairs(target)) + target = dict(dictutils.flatten_dict_to_keypairs(d=target, separator='.')) if not pecan.request.policy_enforcer.enforce(rule, target, creds): abort(403) diff --git a/requirements.txt b/requirements.txt index 725e54cf..ad86cbc2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ oslo.config>=2.6.0 oslo.log>=2.3.0 oslo.policy>=0.3.0 oslo.serialization>=1.4.0 -oslo.utils>=3.3.0 +oslo.utils>=3.18.0 oslo.middleware>=3.11.0 pandas>=0.17.0 pecan>=0.9 -- GitLab From fd8b81b230bc1e67225959332466f29726c9fdfd Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 7 Nov 2016 22:03:53 +0100 Subject: [PATCH 0470/1483] Bump hacking to 0.12 Change-Id: Ib844f02e9865e9c90e1f1de597362468aae110bc --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 6949277b..f4447303 100644 --- a/tox.ini +++ b/tox.ini @@ -91,7 +91,7 @@ commands = bashate -v devstack/plugin.sh devstack/gate/gate_hook.sh devstack/gat whitelist_externals = bash [testenv:pep8] -deps = hacking>=0.11,<0.12 +deps = hacking>=0.12,<0.13 commands = flake8 [testenv:py27-gate] -- GitLab From 21966a74e9ab127e02cde4915cd2d5a95d5b0814 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 1 Nov 2016 20:21:44 +0000 Subject: [PATCH 0471/1483] support pandas.to_timedelta pandas supports batch processing timedeltas. we should use that instead of parsing each item individually. testing showed it took 0.009s vs 0.026s to convert 1000 timestamps using pandas vs current implementation respectively. Change-Id: Iaf8bad65584a83c36761e614a07dcb0312697548 --- gnocchi/utils.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 4b952765..851d9150 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -21,7 +21,7 @@ import numbers import iso8601 import numpy from oslo_utils import timeutils -import pandas +import pandas as pd from pytimeparse import timeparse import six import tenacity @@ -73,19 +73,12 @@ def to_timestamps(values): try: values = list(values) if isinstance(values[0], numbers.Real): - times = pandas.to_datetime(values, utc=True, box=False, - unit='s') - elif isinstance(values[0], datetime.datetime): - times = pandas.to_datetime(values, utc=True, box=False) + times = pd.to_datetime(values, utc=True, box=False, unit='s') + elif (isinstance(values[0], datetime.datetime) or + is_valid_timestamp(values[0])): + times = pd.to_datetime(values, utc=True, box=False) else: - timestamps = [] - for v in values: - delta = timeparse.timeparse(v) - timestamps.append(v - if delta is None - else numpy.datetime64(timeutils.utcnow()) - + numpy.timedelta64(delta)) - times = pandas.to_datetime(timestamps, utc=True, box=False) + times = (utcnow() + pd.to_timedelta(values)).values except ValueError: raise ValueError("Unable to convert timestamps") @@ -95,6 +88,14 @@ def to_timestamps(values): return times +def is_valid_timestamp(value): + try: + pd.to_datetime(value) + except Exception: + return False + return True + + def to_timestamp(value): return to_timestamps((value,))[0] -- GitLab From 69597091962744a24bbeb61bd8ed56ccc626fd0a Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 8 Nov 2016 17:12:58 +0000 Subject: [PATCH 0472/1483] drop pytimeparse requirement use pandas timedelta as it offers comparable single item parsing and better batch parsing Change-Id: Ifd79385a4363ae4a9da5e9e94513ac98c7cdb1cb --- gnocchi/aggregates/moving_stats.py | 9 ++++----- gnocchi/tests/test_aggregates.py | 11 +++-------- gnocchi/utils.py | 5 +---- requirements.txt | 1 - 4 files changed, 8 insertions(+), 18 deletions(-) diff --git a/gnocchi/aggregates/moving_stats.py b/gnocchi/aggregates/moving_stats.py index fa4290ae..3645a0f3 100644 --- a/gnocchi/aggregates/moving_stats.py +++ b/gnocchi/aggregates/moving_stats.py @@ -16,14 +16,13 @@ import datetime import numpy +from oslo_utils import strutils +from oslo_utils import timeutils import pandas import six from gnocchi import aggregates - -from oslo_utils import strutils -from oslo_utils import timeutils -from pytimeparse import timeparse +from gnocchi import utils class MovingAverage(aggregates.CustomAggregator): @@ -35,7 +34,7 @@ class MovingAverage(aggregates.CustomAggregator): msg = 'Moving aggregate must have window specified.' raise aggregates.CustomAggFailure(msg) try: - return float(timeparse.timeparse(six.text_type(window))) + return utils.to_timespan(six.text_type(window)).total_seconds() except Exception: raise aggregates.CustomAggFailure('Invalid value for window') diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 7ff7d490..1100e33c 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -46,14 +46,9 @@ class TestAggregates(tests_base.TestCase): self.assertEqual(60.0, result) window = '60' - self.assertRaises(aggregates.CustomAggFailure, - agg_obj.check_window_valid, - window) - - window = None - self.assertRaises(aggregates.CustomAggFailure, - agg_obj.check_window_valid, - window) + agg_obj = self.custom_agg[agg_method] + result = agg_obj.check_window_valid(window) + self.assertEqual(60.0, result) def _test_create_metric_and_data(self, data, spacing): metric = storage.Metric( diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 851d9150..590088b4 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -22,7 +22,6 @@ import iso8601 import numpy from oslo_utils import timeutils import pandas as pd -from pytimeparse import timeparse import six import tenacity import uuid @@ -116,11 +115,9 @@ def to_timespan(value): seconds = float(value) except Exception: try: - seconds = timeparse.timeparse(six.text_type(value)) + seconds = pd.Timedelta(six.text_type(value)).total_seconds() except Exception: raise ValueError("Unable to parse timespan") - if seconds is None: - raise ValueError("Unable to parse timespan") if seconds <= 0: raise ValueError("Timespan must be positive") return datetime.timedelta(seconds=seconds) diff --git a/requirements.txt b/requirements.txt index ad86cbc2..79626e71 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,6 @@ oslo.utils>=3.18.0 oslo.middleware>=3.11.0 pandas>=0.17.0 pecan>=0.9 -pytimeparse>=1.1.5 futures jsonpatch cotyledon>=1.5.0 -- GitLab From 9af1d884201e54f0efcb3f83b2148e715a63754a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 7 Nov 2016 12:53:09 +0100 Subject: [PATCH 0473/1483] metricd: retry slowly coordination connection failure If the coordinator fails to start (e.g. ToozConnectionError), the metricd subprocess started by Cotyledon fails to start and raises an error. That means Cotyledon will retry to spawn the process with no delay at all, spamming the CPU with forking request. This patches fatorize the coordination retrieval and connection code into one function in gnocchi.utils, which makes sure both metricd and the Carbonara based drivers leverage the same code to retry to connection to the coordinator with some delay. Change-Id: I83157c5fdb0a3e488a9b788d48d974de80219dbb (cherry picked from commit 04917de5852a5a2c4a740f71772cf30f6d0a9a54) --- gnocchi/cli.py | 10 ++-------- gnocchi/storage/_carbonara.py | 14 ++------------ gnocchi/utils.py | 25 ++++++++++++++++++++++++- 3 files changed, 28 insertions(+), 21 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 8ae29ca4..f7c68dcc 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -16,7 +16,6 @@ import multiprocessing import threading import time -import uuid import cotyledon from futurist import periodics @@ -143,16 +142,11 @@ class MetricScheduler(MetricProcessBase): TASKS_PER_WORKER = 16 BLOCK_SIZE = 4 - def _enable_coordination(self, conf): - self._coord = coordination.get_coordinator( - conf.storage.coordination_url, self._my_id) - self._coord.start(start_heart=True) - def __init__(self, worker_id, conf, queue): super(MetricScheduler, self).__init__( worker_id, conf, conf.storage.metric_processing_delay) - self._my_id = str(uuid.uuid4()) - self._enable_coordination(conf) + self._coord, self._my_id = utils.get_coordinator_and_start( + conf.storage.coordination_url) self.queue = queue self.previously_scheduled_metrics = set() self.workers = conf.metricd.workers diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 55992877..ee6061a4 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -65,24 +65,14 @@ class CarbonaraBasedStorage(storage.StorageDriver): def __init__(self, conf): super(CarbonaraBasedStorage, self).__init__(conf) - self.coord = coordination.get_coordinator( - conf.coordination_url, - str(uuid.uuid4()).encode('ascii')) self.aggregation_workers_number = conf.aggregation_workers_number if self.aggregation_workers_number == 1: # NOTE(jd) Avoid using futures at all if we don't want any threads. self._map_in_thread = self._map_no_thread else: self._map_in_thread = self._map_in_futures_threads - self.start() - - @utils.retry - def start(self): - try: - self.coord.start(start_heart=True) - except Exception as e: - LOG.error("Unable to start coordinator: %s" % e) - raise utils.Retry(e) + self.coord, my_id = utils.get_coordinator_and_start( + conf.coordination_url) def stop(self): self.coord.stop() diff --git a/gnocchi/utils.py b/gnocchi/utils.py index a49b161d..d823aace 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -14,13 +14,19 @@ # License for the specific language governing permissions and limitations # under the License. import datetime +import uuid import iso8601 + +from oslo_log import log from oslo_utils import timeutils from pytimeparse import timeparse import retrying import six -import uuid +from tooz import coordination + + +LOG = log.getLogger(__name__) # uuid5 namespace for id transformation. # NOTE(chdent): This UUID must stay the same, forever, across all @@ -64,6 +70,23 @@ retry = retrying.retry(wait_exponential_multiplier=500, retry_on_exception=retry_if_retry_is_raised) +# TODO(jd) Move this to tooz? +@retry +def _enable_coordination(coord): + try: + coord.start(start_heart=True) + except Exception as e: + LOG.error("Unable to start coordinator: %s", e) + raise Retry(e) + + +def get_coordinator_and_start(url): + my_id = str(uuid.uuid4()) + coord = coordination.get_coordinator(url, my_id) + _enable_coordination(coord) + return coord, my_id + + def to_timestamp(v): if isinstance(v, datetime.datetime): return v -- GitLab From 2cf420db477335608602352957626de5bf79dceb Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 7 Nov 2016 18:56:18 +0100 Subject: [PATCH 0474/1483] carbonara: add support for Gnocchi v2 measures format MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds a fallback method when unserializing measures so Gnocchi 3 is able to read measures stored in Gnocchi 2 format. Change-Id: Ic863ace3ac8df430e5de93f17a45f73398ed143f (cherry picked from commit 0c66859f82cbfd5cf1ee79bfc8c8c4bd373dca6e) --- doc/source/install.rst | 10 ++++------ gnocchi/storage/_carbonara.py | 17 ++++++++++++++--- gnocchi/storage/ceph.py | 2 +- gnocchi/storage/file.py | 2 +- gnocchi/storage/swift.py | 2 +- 5 files changed, 21 insertions(+), 12 deletions(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index 91a5c142..3f75bfb4 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -92,15 +92,13 @@ that your indexer and storage are properly upgraded. Run the following: 1. Stop the old version of Gnocchi API server and `gnocchi-statsd` daemon -2. Make sure that the processing backlog is empty (`gnocchi status`) +2. Stop the old version of `gnocchi-metricd` daemon -3. Stop the old version of `gnocchi-metricd` daemon +3. Install the new version of Gnocchi -4. Install the new version of Gnocchi - -5. Run `gnocchi-upgrade` +4. Run `gnocchi-upgrade` This can take several hours depending on the size of your index and storage. -6. Start the new Gnocchi API server, `gnocchi-metricd` +5. Start the new Gnocchi API server, `gnocchi-metricd` and `gnocchi-statsd` daemons diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index ee6061a4..a32772d7 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -26,6 +26,7 @@ import iso8601 import msgpack from oslo_config import cfg from oslo_log import log +from oslo_serialization import msgpackutils from oslo_utils import timeutils import pandas import six @@ -362,10 +363,20 @@ class CarbonaraBasedStorage(storage.StorageDriver): _MEASURE_SERIAL_FORMAT = "Qd" _MEASURE_SERIAL_LEN = struct.calcsize(_MEASURE_SERIAL_FORMAT) - def _unserialize_measures(self, data): + def _unserialize_measures(self, measure_id, data): nb_measures = len(data) // self._MEASURE_SERIAL_LEN - measures = struct.unpack( - "<" + self._MEASURE_SERIAL_FORMAT * nb_measures, data) + try: + measures = struct.unpack( + "<" + self._MEASURE_SERIAL_FORMAT * nb_measures, data) + except struct.error: + # This either a corruption, either a v2 measures + try: + return msgpackutils.loads(data) + except ValueError: + LOG.error( + "Unable to decode measure %s, possible data corruption", + measure_id) + raise return six.moves.zip( pandas.to_datetime(measures[::2], unit='ns'), itertools.islice(measures, 1, len(measures), 2)) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index dc4b1696..628ee019 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -220,7 +220,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): measures = [] for n in object_names: data = self._get_object_content(n) - measures.extend(self._unserialize_measures(data)) + measures.extend(self._unserialize_measures(n, data)) yield measures diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 8c91df59..4643dd49 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -194,7 +194,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): for f in files: abspath = self._build_measure_path(metric.id, f) with open(abspath, "rb") as e: - measures.extend(self._unserialize_measures(e.read())) + measures.extend(self._unserialize_measures(f, e.read())) yield measures diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index ff8eafa9..2ef9d4fd 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -189,7 +189,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): for f in files: headers, data = self.swift.get_object( self.MEASURE_PREFIX, f['name']) - measures.extend(self._unserialize_measures(data)) + measures.extend(self._unserialize_measures(f['name'], data)) yield measures -- GitLab From c5779e4dc26c3b059adb2743bbf4384e5d2a52b6 Mon Sep 17 00:00:00 2001 From: fengchaoyang Date: Mon, 14 Nov 2016 17:56:27 +0800 Subject: [PATCH 0475/1483] Remove usage of deprecated operatorPrecedence and remove duplicate operators "operatorPrecedence" has been deprecated, and it will be dropped in a future release. (see http://pythonhosted.org/pyparsing/pyparsing-module.html#operatorPrecedence) Operators "like" "in" operators are already included earlier in the list Change-Id: If84d79e0b8c4620a99233c768c33a9dd43fc551f --- gnocchi/rest/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index abf3b5ac..c050ca10 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1109,7 +1109,7 @@ class QueryStringSearchAttrFilter(object): uninary_operators = ("not", ) binary_operator = (u">=", u"<=", u"!=", u">", u"<", u"=", u"==", u"eq", u"ne", u"lt", u"gt", u"ge", u"le", u"in", u"like", u"≠", - u"≥", u"≤", u"like" "in") + u"≥", u"≤") multiple_operators = (u"and", u"or", u"∧", u"∨") operator = pyparsing.Regex(u"|".join(binary_operator)) @@ -1135,7 +1135,7 @@ class QueryStringSearchAttrFilter(object): quoted_string | in_list) condition = pyparsing.Group(comparison_term + operator + comparison_term) - expr = pyparsing.operatorPrecedence(condition, [ + expr = pyparsing.infixNotation(condition, [ ("not", 1, pyparsing.opAssoc.RIGHT, ), ("and", 2, pyparsing.opAssoc.LEFT, ), ("∧", 2, pyparsing.opAssoc.LEFT, ), -- GitLab From ff53ff89929acc57ec81b24d40ef047261b8bcc3 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 10 Nov 2016 15:01:14 +0100 Subject: [PATCH 0476/1483] utils: do not retry on any exception We only want to retry when asking for it, not on any exception as it is by default. Change-Id: I5519996031087c8a0e8ae4e0bca5a661dfc95a0d --- gnocchi/utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 7125f069..8f794dec 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -63,6 +63,8 @@ def UUID(value): # Retry with exponential backoff for up to 1 minute retry = tenacity.retry( wait=tenacity.wait_exponential(multiplier=0.5, max=60), + # Never retry except when explicitly asked by raising TryAgain + retry=tenacity.retry_never, reraise=True) -- GitLab From e2732aff9a3ec4d3b9bceca1fd317f92b3bfa85d Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 15 Sep 2016 16:39:58 +0200 Subject: [PATCH 0477/1483] ceph: make computed measures read async With this change Gnocchi will fetch all measures associated with a metric in parallel using the Ceph aio API. Change-Id: Idcab0eb447962f0d3c049f301b543806b673c869 --- gnocchi/storage/ceph.py | 33 +++++++++++++++++-- .../ceph-read-async-ca2f7512c6842adb.yaml | 4 +++ 2 files changed, 34 insertions(+), 3 deletions(-) create mode 100644 releasenotes/notes/ceph-read-async-ca2f7512c6842adb.yaml diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 1d9a7089..273786ac 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -17,6 +17,7 @@ from collections import defaultdict import contextlib import datetime import errno +import functools import itertools import uuid @@ -234,9 +235,35 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): object_names = list(self._list_object_names_to_process(object_prefix)) measures = [] - for n in object_names: - data = self._get_object_content(n) - measures.extend(self._unserialize_measures(n, data)) + ops = [] + bufsize = 8192 # Same sa rados_read one + + tmp_measures = {} + + def add_to_measures(name, comp, data): + if name in tmp_measures: + tmp_measures[name] += data + else: + tmp_measures[name] = data + if len(data) < bufsize: + measures.extend(self._unserialize_measures(name, + tmp_measures[name])) + del tmp_measures[name] + else: + ops.append(self.ioctx.aio_read( + name, bufsize, len(tmp_measures[name]), + functools.partial(add_to_measures, name) + )) + + for name in object_names: + ops.append(self.ioctx.aio_read( + name, bufsize, 0, + functools.partial(add_to_measures, name) + )) + + while ops: + op = ops.pop() + op.wait_for_complete_and_cb() yield measures diff --git a/releasenotes/notes/ceph-read-async-ca2f7512c6842adb.yaml b/releasenotes/notes/ceph-read-async-ca2f7512c6842adb.yaml new file mode 100644 index 00000000..2dfe37de --- /dev/null +++ b/releasenotes/notes/ceph-read-async-ca2f7512c6842adb.yaml @@ -0,0 +1,4 @@ +--- +other: + - ceph driver now uses the rados async api to retrieve + measurements to process in parallel. -- GitLab From db7ea6aea5c94b6f13c0d1d9fdae5308a816c1d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Czhangshengping2012=E2=80=9D?= Date: Wed, 16 Nov 2016 11:46:04 +0800 Subject: [PATCH 0478/1483] Replace retry with tenacity.retry Change-Id: I9b6e5e1adfb2d836378ea0fa463a9adbed8983d2 --- gnocchi/storage/s3.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 46d85367..29156e12 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -21,8 +21,8 @@ import os import uuid from oslo_config import cfg -import retrying import six +import tenacity try: import boto3 import botocore.exceptions @@ -87,9 +87,11 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): # NOTE(jd) OperationAborted might be raised if we try to create the bucket # for the first time at the same time - @retrying.retry(stop_max_attempt_number=10, - wait_fixed=500, - retry_on_exception=retry_if_operationaborted) + @tenacity.retry( + stop=tenacity.stop_after_attempt(10), + wait=tenacity.wait_fixed(0.5), + retry=tenacity.retry_if_exception(retry_if_operationaborted) + ) def _create_bucket(self, name): if self._region_name: kwargs = dict(CreateBucketConfiguration={ -- GitLab From 516e68111a7055641131bc22485dbaaafa856304 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 21 Nov 2016 12:42:09 +0100 Subject: [PATCH 0479/1483] rest: don't fail if the batch measure is not a dict Change-Id: I59a044d3086d0d45fa5ce74fae62ce298799e32e --- gnocchi/rest/__init__.py | 2 ++ gnocchi/tests/gabbi/gabbits/batch-measures.yaml | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index c050ca10..67ebdc91 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -404,6 +404,8 @@ class ArchivePolicyRulesController(rest.RestController): def MeasuresListSchema(measures): try: times = utils.to_timestamps((m['timestamp'] for m in measures)) + except TypeError: + abort(400, "Invalid format for measures") except ValueError as e: abort(400, "Invalid input for timestamp: %s" % e) diff --git a/gnocchi/tests/gabbi/gabbits/batch-measures.yaml b/gnocchi/tests/gabbi/gabbits/batch-measures.yaml index 7eae012d..47dd13c6 100644 --- a/gnocchi/tests/gabbi/gabbits/batch-measures.yaml +++ b/gnocchi/tests/gabbi/gabbits/batch-measures.yaml @@ -205,3 +205,17 @@ tests: $.description.cause: "Unknown resources" $.description.detail: - "aaaaaaaa-d63b-4cdd-be89-111111111111" + + - name: push measurements to named metrics and resource with create_metrics with wrong measure objects + POST: /v1/batch/resources/metrics/measures?create_metrics=true + request_headers: + content-type: application/json + accept: application/json + data: + 46c9418d-d63b-4cdd-be89-8f57ffc5952e: + auto.test: + - [ "2015-03-06T14:33:57", 43.1] + - [ "2015-03-06T14:34:12", 12] + status: 400 + response_strings: + - "Invalid format for measures" -- GitLab From 4e5b18d2d5e0b3e27502d457f9f28b839a560438 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 22 Nov 2016 12:59:25 +0100 Subject: [PATCH 0480/1483] rest: fix batching error handling When using measure batching and some resources doesn't exists we must return the list of all missing resources. Change-Id: I2752c71bfc77166c7d48e1f5031ac0ee175c09ac --- gnocchi/rest/__init__.py | 9 +++++---- gnocchi/tests/gabbi/gabbits/batch-measures.yaml | 10 +++++++++- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 67ebdc91..ac580bb3 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1390,6 +1390,7 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): known_metrics = [] unknown_metrics = [] + unknown_resources = set() for resource_id in body: names = body[resource_id].keys() metrics = pecan.request.indexer.list_metrics( @@ -1398,7 +1399,6 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): known_names = [m.name for m in metrics] if strutils.bool_from_string(create_metrics): user_id, project_id = get_user_and_project() - unknown_resources = set() for name in names: if name not in known_names: metric = MetricsController.MetricSchema({ @@ -1419,9 +1419,6 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): # This catch NoSuchArchivePolicy, which is unlikely # be still possible abort(400, e) - if unknown_resources: - abort(400, {"cause": "Unknown resources", - "detail": unknown_resources}) elif len(names) != len(metrics): unknown_metrics.extend( @@ -1430,6 +1427,10 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): known_metrics.extend(metrics) + if unknown_resources: + abort(400, {"cause": "Unknown resources", + "detail": unknown_resources}) + if unknown_metrics: abort(400, "Unknown metrics: %s" % ", ".join( sorted(unknown_metrics))) diff --git a/gnocchi/tests/gabbi/gabbits/batch-measures.yaml b/gnocchi/tests/gabbi/gabbits/batch-measures.yaml index 47dd13c6..ef820412 100644 --- a/gnocchi/tests/gabbi/gabbits/batch-measures.yaml +++ b/gnocchi/tests/gabbi/gabbits/batch-measures.yaml @@ -200,11 +200,19 @@ tests: value: 43.1 - timestamp: "2015-03-06T14:34:12" value: 12 + bbbbbbbb-d63b-4cdd-be89-111111111111: + auto.test: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + status: 400 response_json_paths: $.description.cause: "Unknown resources" - $.description.detail: + $.description.detail.`sorted`: - "aaaaaaaa-d63b-4cdd-be89-111111111111" + - "bbbbbbbb-d63b-4cdd-be89-111111111111" - name: push measurements to named metrics and resource with create_metrics with wrong measure objects POST: /v1/batch/resources/metrics/measures?create_metrics=true -- GitLab From e6d3cee03aa4490a872b0ca478592c218f561f9c Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 22 Nov 2016 13:54:32 +0100 Subject: [PATCH 0481/1483] rest: Don't use private webob API We previously used body_file_raw, then switch to body_file_seekable to make it works with wsgiref. But body_file_seekable changes the request content length. Making the Gnocchi 'content length' different from the one previously read by the WSGI Server. If the Gnocchi one is smaller than the WSGI server one. They Gnocchi will read the wsgi.input only partially. While the WSGI server will not close or reuse the connection until the wsgi.input have been entirely read. In case of uwsgi, it will not return the status line neither. Also these two methods are undocumented, the public API for a file-like wsgi.input is body_file and it does a ton of crap to read the wsgi.input correctly. So just uses body_file. Change-Id: Iac81cf3d3a755abec97bf0b933e09d87b55657e2 --- gnocchi/rest/__init__.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index ac580bb3..d5351d49 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -149,15 +149,7 @@ def deserialize(expected_content_types=None): if mime_type not in expected_content_types: abort(415) try: - # NOTE(sileht): We prefer use seekable if possible because it takes - # care of Content-Length when you use read()/readlines(). - # webob and wsgiref people are both well following RFCs, but sometimes - # their are case when the application can hang, because read is waiting - # for new data even the content-length is reach when keepalive - # connection are used. For more detail see: - # https://bugs.python.org/issue21878 - # https://github.com/Pylons/webob/issues/279 - params = json.load(pecan.request.body_file_seekable) + params = json.load(pecan.request.body_file) except Exception as e: abort(400, "Unable to decode body: " + six.text_type(e)) return params -- GitLab From 00b349ea7a1c230693b8950fd4d7290a04c32884 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 22 Nov 2016 14:30:53 +0100 Subject: [PATCH 0482/1483] indexer: list_metric(), skip sql if names is empty If list_metrics is called with an empty list this creates a sql request can will not match anything. Sqlachemey is complaining about this when we batch measure for resource without metrics. SAWarning: The IN-predicate on "metric.name" was invoked with an empty sequence. This results in a contradiction, which nonetheless can be expensive to evaluate. Consider alternative strategies for improved performance. 'strategies for improved performance.' % expr) This change fixes that. Change-Id: I9287c8d8ef1a5787f9245d78c197d870f959b0c6 --- gnocchi/indexer/sqlalchemy.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 3bdba1bd..bcfc2c28 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -665,6 +665,8 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): sorts = sorts or [] if ids is not None and not ids: return [] + if names is not None and not names: + return [] with self.facade.independent_reader() as session: q = session.query(Metric).filter( Metric.status == status) -- GitLab From 38c2d32c010adac10ff5a045c95f813a0febc1bc Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 23 Nov 2016 09:03:16 +0100 Subject: [PATCH 0483/1483] rest: wait for the thread pool executor result Currently we return to wsgi server once all threads have been started and don't wait for their return. Keeping running threads not attached to a request make some wsgi server mad. And also returning 202 when it should 500 doesn't help to debug application. Change-Id: I922e0b7d61280ac42b1068afdcafe68fd5835ba8 --- gnocchi/rest/__init__.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 998630b2..737e6d75 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1441,9 +1441,9 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): storage = pecan.request.storage with futures.ThreadPoolExecutor(max_workers=THREADS) as executor: - executor.map(lambda x: storage.add_measures(*x), - ((metric, body[metric.resource_id][metric.name]) - for metric in known_metrics)) + list(executor.map(lambda x: storage.add_measures(*x), + ((metric, body[metric.resource_id][metric.name]) + for metric in known_metrics))) pecan.response.status = 202 @@ -1474,8 +1474,9 @@ class MetricsMeasuresBatchController(rest.RestController): storage = pecan.request.storage with futures.ThreadPoolExecutor(max_workers=THREADS) as executor: - executor.map(lambda x: storage.add_measures(*x), - ((metric, body[metric.id]) for metric in metrics)) + list(executor.map(lambda x: storage.add_measures(*x), + ((metric, body[metric.id]) for metric in + metrics))) pecan.response.status = 202 -- GitLab From b48483af463ac3f0551b90f813baa82ac65cb7b0 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 23 Nov 2016 16:24:06 +0100 Subject: [PATCH 0484/1483] Remove 95pct and median from default archive policies This should makes the default at least 25% faster for metricd, which is quite of a big deal. These are still available as custom methods to add anyway. Change-Id: If390cd55f65fee836b12c04fa3b5fdabfb0c77d8 --- doc/source/architecture.rst | 10 +++++----- gnocchi/archive_policy.py | 3 +-- ...m-default-aggregation-methods-2f5ec059855e17f9.yaml | 5 +++++ 3 files changed, 11 insertions(+), 7 deletions(-) create mode 100644 releasenotes/notes/removed-median-and-95pct-from-default-aggregation-methods-2f5ec059855e17f9.yaml diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index 8ce43846..7babef96 100755 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -77,12 +77,12 @@ For example, if you want to keep a year of data with a one minute resolution:: Then:: - size in bytes = 525 600 × 8 = 4 204 800 bytes = 4 106 KiB + size in bytes = 525 600 bytes × 6 = 3 159 600 bytes = 3 085 KiB This is just for a single aggregated time series. If your archive policy uses -the 8 default aggregation methods (mean, min, max, sum, std, median, count, -95pct) with the same "one year, one minute aggregations" resolution, the space -used will go up to a maximum of 8 × 4.1 MiB = 32.8 MiB. +the 6 default aggregation methods (mean, min, max, sum, std, count) with the +same "one year, one minute aggregations" resolution, the space used will go up +to a maximum of 6 × 4.1 MiB = 24.6 MiB. How to set the archive policy and granularity @@ -117,7 +117,7 @@ Default archive policies By default, 3 archive policies are created using the default archive policy list (listed in `default_aggregation_methods`, i.e. mean, min, max, sum, std, -median, count, 95pct): +count): - low (maximum estimated size per metric: 5 KiB) diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py index 79217391..b5685f34 100644 --- a/gnocchi/archive_policy.py +++ b/gnocchi/archive_policy.py @@ -141,8 +141,7 @@ OPTS = [ 'default_aggregation_methods', item_type=types.String( choices=ArchivePolicy.VALID_AGGREGATION_METHODS), - default=['mean', 'min', 'max', 'sum', - 'std', 'median', 'count', '95pct'], + default=['mean', 'min', 'max', 'sum', 'std', 'count'], help='Default aggregation methods to use in created archive policies'), ] diff --git a/releasenotes/notes/removed-median-and-95pct-from-default-aggregation-methods-2f5ec059855e17f9.yaml b/releasenotes/notes/removed-median-and-95pct-from-default-aggregation-methods-2f5ec059855e17f9.yaml new file mode 100644 index 00000000..75ff241a --- /dev/null +++ b/releasenotes/notes/removed-median-and-95pct-from-default-aggregation-methods-2f5ec059855e17f9.yaml @@ -0,0 +1,5 @@ +--- +other: + - The default archive policies list does not contain the 95pct and median + aggregation methods by default. These are the least used methods and should + make gnocchi-metricd faster by more than 25% in the default scenario. -- GitLab From 8612ab884c161aa0b28d16a8233b12607f8cb70f Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 23 Nov 2016 12:18:23 +0100 Subject: [PATCH 0485/1483] storage: split process_new_measures() This change splits process_new_measures(), by separating the metrics loop and the locking mechanism from the the computation and the storage of the timeseries. Change-Id: Id98c67e3dfec4615564f096f10ac34603047292f --- gnocchi/storage/_carbonara.py | 207 ++++++++++++++++------------------ 1 file changed, 100 insertions(+), 107 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index db470394..d3c4a72f 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -477,118 +477,111 @@ class CarbonaraBasedStorage(storage.StorageDriver): except coordination.LockAcquireFailed: LOG.debug("Cannot acquire lock for metric %s, postponing " "unprocessed measures deletion" % metric_id) + for metric in metrics: lock = self._lock(metric.id) - agg_methods = list(metric.archive_policy.aggregation_methods) # Do not block if we cannot acquire the lock, that means some other # worker is doing the job. We'll just ignore this metric and may # get back later to it if needed. - if lock.acquire(blocking=sync): - try: - locksw = timeutils.StopWatch().start() - LOG.debug("Processing measures for %s" % metric) - with self._process_measure_for_metric(metric) as measures: - # NOTE(mnaser): The metric could have been handled by - # another worker, ignore if no measures. - if len(measures) == 0: - LOG.debug("Skipping %s (already processed)" - % metric) - continue - - measures = sorted(measures, key=operator.itemgetter(0)) - - block_size = metric.archive_policy.max_block_size - try: - ts = self._get_unaggregated_timeserie_and_unserialize( # noqa - metric, - block_size=block_size, - back_window=metric.archive_policy.back_window) - except storage.MetricDoesNotExist: - try: - self._create_metric(metric) - except storage.MetricAlreadyExists: - # Created in the mean time, do not worry - pass - ts = None - except CorruptionError as e: - LOG.error(e) - ts = None - - if ts is None: - # This is the first time we treat measures for this - # metric, or data are corrupted, create a new one - ts = carbonara.BoundTimeSerie( - block_size=block_size, - back_window=metric.archive_policy.back_window) - current_first_block_timestamp = None - else: - current_first_block_timestamp = ( - ts.first_block_timestamp() - ) - - # NOTE(jd) This is Python where you need such - # hack to pass a variable around a closure, - # sorry. - computed_points = {"number": 0} - - def _map_add_measures(bound_timeserie): - # NOTE (gordc): bound_timeserie is entire set of - # unaggregated measures matching largest - # granularity. the following takes only the points - # affected by new measures for specific granularity - tstamp = max(bound_timeserie.first, measures[0][0]) - new_first_block_timestamp = ( - bound_timeserie.first_block_timestamp()) - computed_points['number'] = len(bound_timeserie) - for d in metric.archive_policy.definition: - ts = bound_timeserie.group_serie( - d.granularity, carbonara.round_timestamp( - tstamp, d.granularity * 10e8)) - self._map_in_thread( - self._add_measures, - ((aggregation, d, metric, ts, - current_first_block_timestamp, - new_first_block_timestamp) - for aggregation in agg_methods)) - - with timeutils.StopWatch() as sw: - ts.set_values( - measures, - before_truncate_callback=_map_add_measures, - ignore_too_old_timestamps=True) - elapsed = sw.elapsed() - number_of_operations = ( - len(agg_methods) - * len(metric.archive_policy.definition) - ) - - if elapsed > 0: - perf = " (%d points/s, %d measures/s)" % ( - ((number_of_operations - * computed_points['number']) / elapsed), - ((number_of_operations - * len(measures)) / elapsed) - ) - else: - perf = "" - LOG.debug( - "Computed new metric %s with %d new measures " - "in %.2f seconds%s" - % (metric.id, len(measures), elapsed, perf)) - - self._store_unaggregated_timeserie(metric, - ts.serialize()) - - LOG.debug("Metric %s locked during %.2f seconds" % - (metric.id, locksw.elapsed())) - except Exception: - LOG.debug("Metric %s locked during %.2f seconds" % - (metric.id, locksw.elapsed())) - if sync: - raise - LOG.error("Error processing new measures", exc_info=True) - finally: - lock.release() + if not lock.acquire(blocking=sync): + continue + try: + locksw = timeutils.StopWatch().start() + LOG.debug("Processing measures for %s" % metric) + with self._process_measure_for_metric(metric) as measures: + self._compute_and_store_timeseries(metric, measures) + LOG.debug("Metric %s locked during %.2f seconds" % + (metric.id, locksw.elapsed())) + except Exception: + LOG.debug("Metric %s locked during %.2f seconds" % + (metric.id, locksw.elapsed())) + if sync: + raise + LOG.error("Error processing new measures", exc_info=True) + finally: + lock.release() + + def _compute_and_store_timeseries(self, metric, measures): + # NOTE(mnaser): The metric could have been handled by + # another worker, ignore if no measures. + if len(measures) == 0: + LOG.debug("Skipping %s (already processed)" % metric) + return + + measures = sorted(measures, key=operator.itemgetter(0)) + + agg_methods = list(metric.archive_policy.aggregation_methods) + block_size = metric.archive_policy.max_block_size + back_window = metric.archive_policy.back_window + definition = metric.archive_policy.definition + + try: + ts = self._get_unaggregated_timeserie_and_unserialize( + metric, block_size=block_size, back_window=back_window) + except storage.MetricDoesNotExist: + try: + self._create_metric(metric) + except storage.MetricAlreadyExists: + # Created in the mean time, do not worry + pass + ts = None + except CorruptionError as e: + LOG.error(e) + ts = None + + if ts is None: + # This is the first time we treat measures for this + # metric, or data are corrupted, create a new one + ts = carbonara.BoundTimeSerie(block_size=block_size, + back_window=back_window) + current_first_block_timestamp = None + else: + current_first_block_timestamp = ts.first_block_timestamp() + + # NOTE(jd) This is Python where you need such + # hack to pass a variable around a closure, + # sorry. + computed_points = {"number": 0} + + def _map_add_measures(bound_timeserie): + # NOTE (gordc): bound_timeserie is entire set of + # unaggregated measures matching largest + # granularity. the following takes only the points + # affected by new measures for specific granularity + tstamp = max(bound_timeserie.first, measures[0][0]) + new_first_block_timestamp = bound_timeserie.first_block_timestamp() + computed_points['number'] = len(bound_timeserie) + for d in definition: + ts = bound_timeserie.group_serie( + d.granularity, carbonara.round_timestamp( + tstamp, d.granularity * 10e8)) + + self._map_in_thread( + self._add_measures, + ((aggregation, d, metric, ts, + current_first_block_timestamp, + new_first_block_timestamp) + for aggregation in agg_methods)) + + with timeutils.StopWatch() as sw: + ts.set_values(measures, + before_truncate_callback=_map_add_measures, + ignore_too_old_timestamps=True) + + elapsed = sw.elapsed() + number_of_operations = (len(agg_methods) * len(definition)) + perf = "" + if elapsed > 0: + perf = " (%d points/s, %d measures/s)" % ( + ((number_of_operations * computed_points['number']) / + elapsed), + ((number_of_operations * len(measures)) / elapsed) + ) + LOG.debug("Computed new metric %s with %d new measures " + "in %.2f seconds%s" + % (metric.id, len(measures), elapsed, perf)) + + self._store_unaggregated_timeserie(metric, ts.serialize()) def get_cross_metric_measures(self, metrics, from_timestamp=None, to_timestamp=None, aggregation='mean', -- GitLab From 5b833bb42617c6b8568aa5045b74a2c917f52ceb Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 24 Nov 2016 10:14:05 +0100 Subject: [PATCH 0486/1483] storage: remove _pending_measures_to_process_count() _pending_measures_to_process_count is used only in file driver and don't need to be part of the _carbonara storage interface Change-Id: I563e6143a69c9d93aeafd2b6d2faf1ebcd540615 --- gnocchi/storage/_carbonara.py | 4 ---- gnocchi/storage/ceph.py | 4 ---- gnocchi/storage/file.py | 7 ++----- gnocchi/storage/s3.py | 3 --- gnocchi/storage/swift.py | 3 --- 5 files changed, 2 insertions(+), 19 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index d3c4a72f..34fbd8ef 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -344,10 +344,6 @@ class CarbonaraBasedStorage(storage.StorageDriver): def list_metric_with_measures_to_process(size, part, full=False): raise NotImplementedError - @staticmethod - def _pending_measures_to_process_count(metric_id): - raise NotImplementedError - def delete_metric(self, metric, sync=False): with self._lock(metric.id)(blocking=sync): # If the metric has never been upgraded, we need to delete this diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 273786ac..97ab02af 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -203,10 +203,6 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): return () return (k for k, v in omaps) - def _pending_measures_to_process_count(self, metric_id): - object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) - return len(list(self._list_object_names_to_process(object_prefix))) - def list_metric_with_measures_to_process(self, size, part, full=False): names = self._list_object_names_to_process() if full: diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 4643dd49..29fc718b 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -141,8 +141,8 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): def _build_report(self, details): metric_details = {} for metric in os.listdir(self.measure_path): - metric_details[metric] = ( - self._pending_measures_to_process_count(metric)) + metric_details[metric] = len( + self._list_measures_container_for_metric_id(metric)) return (len(metric_details.keys()), sum(metric_details.values()), metric_details if details else None) @@ -184,9 +184,6 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): files = self._list_measures_container_for_metric_id(metric_id) self._delete_measures_files_for_metric_id(metric_id, files) - def _pending_measures_to_process_count(self, metric_id): - return len(self._list_measures_container_for_metric_id(metric_id)) - @contextlib.contextmanager def _process_measure_for_metric(self, metric): files = self._list_measures_container_for_metric_id(metric.id) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 29156e12..66990c33 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -195,9 +195,6 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): return files - def _pending_measures_to_process_count(self, metric_id): - return len(self._list_measure_files_for_metric_id(metric_id)) - def _bulk_delete(self, bucket, objects): # NOTE(jd) The maximum object to delete at once is 1000 # TODO(jd) Parallelize? diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 2ef9d4fd..e8975e69 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -159,9 +159,6 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): full_listing=True) return files - def _pending_measures_to_process_count(self, metric_id): - return len(self._list_measure_files_for_metric_id(metric_id)) - def _bulk_delete(self, container, objects): objects = [quote(('/%s/%s' % (container, obj['name'])).encode('utf-8')) for obj in objects] -- GitLab From db389384bfa1dc5d662263512c3315a79ff8420e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 24 Nov 2016 12:40:19 +0100 Subject: [PATCH 0487/1483] file: remove tmp configuration os.rename don't allow to put the tmp into another fs safelty so that doesn't make sense to be able to configure it. This will avoid to build a non working setup. Change-Id: Ice2830633141249dcfc9a11009a44fe6e4ee2279 --- gnocchi/storage/file.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 29fc718b..02b435e8 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -33,9 +33,6 @@ OPTS = [ cfg.StrOpt('file_basepath', default='/var/lib/gnocchi', help='Path used to store gnocchi data files.'), - cfg.StrOpt('file_basepath_tmp', - default='${file_basepath}/tmp', - help='Path used to store Gnocchi temporary files.'), ] @@ -46,7 +43,8 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): def __init__(self, conf): super(FileStorage, self).__init__(conf) self.basepath = conf.file_basepath - self.basepath_tmp = conf.file_basepath_tmp + self.basepath_tmp = os.path.join(conf.file_basepath, + 'tmp') try: os.mkdir(self.basepath) except OSError as e: -- GitLab From 0b970f09c404300878284877f5d44837f8c644be Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 18 Oct 2016 18:33:20 +0200 Subject: [PATCH 0488/1483] doc: remove unused links Change-Id: If6dadcc595034003cd97d41feae73a19b5ea7424 --- doc/source/configuration.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 76b98624..7c5fcce7 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -72,8 +72,6 @@ edit the `api-paste.ini` file to add the Keystone authentication middleware:: .. _`Paste Deployment`: http://pythonpaste.org/deploy/ .. _`OpenStack Keystone`: http://launchpad.net/keystone -.. _`CORS`: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing -.. _`Grafana`: http://grafana.org/ Driver notes -- GitLab From 14a7cbaa645c14dd04f859d56b937371518831f1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 24 Nov 2016 15:06:09 +0100 Subject: [PATCH 0489/1483] config: only include oslo.middleware options that are shipped Adding options from middleware not enabled by default is not a good idea. Change-Id: I3ed922c84e6d37f2424262d108e6bdc5a03e2858 --- etc/gnocchi/gnocchi-config-generator.conf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/etc/gnocchi/gnocchi-config-generator.conf b/etc/gnocchi/gnocchi-config-generator.conf index a7918068..ffbe5311 100644 --- a/etc/gnocchi/gnocchi-config-generator.conf +++ b/etc/gnocchi/gnocchi-config-generator.conf @@ -4,7 +4,8 @@ wrap_width = 79 namespace = gnocchi namespace = oslo.db namespace = oslo.log -namespace = oslo.middleware +namespace = oslo.middleware.cors +namespace = oslo.middleware.http_proxy_to_wsgi namespace = oslo.policy namespace = cotyledon namespace = keystonemiddleware.auth_token -- GitLab From e413a93036952245d3cfd56347857b928e61ad4e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 24 Nov 2016 10:50:31 +0100 Subject: [PATCH 0490/1483] storage: split the storage interface Change-Id: I4933b99405c6c382b50d7ef5aa28146301ed5f0f --- gnocchi/cli.py | 7 +- gnocchi/rest/__init__.py | 8 +-- gnocchi/statsd.py | 2 +- gnocchi/storage/__init__.py | 31 ++------- gnocchi/storage/_carbonara.py | 56 ++-------------- gnocchi/storage/ceph.py | 8 ++- gnocchi/storage/file.py | 8 ++- gnocchi/storage/incoming/__init__.py | 46 +++++++++++++ gnocchi/storage/incoming/_carbonara.py | 85 +++++++++++++++++++++++++ gnocchi/storage/s3.py | 8 ++- gnocchi/storage/swift.py | 8 ++- gnocchi/tests/gabbi/fixtures.py | 3 +- gnocchi/tests/storage/test_carbonara.py | 2 +- gnocchi/tests/test_aggregates.py | 4 +- gnocchi/tests/test_rest.py | 2 +- gnocchi/tests/test_storage.py | 79 +++++++++++------------ tools/measures_injector.py | 2 +- 17 files changed, 220 insertions(+), 139 deletions(-) create mode 100644 gnocchi/storage/incoming/__init__.py create mode 100644 gnocchi/storage/incoming/_carbonara.py diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 3fc966a0..65dfceb3 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -126,7 +126,7 @@ class MetricReporting(MetricProcessBase): def _run_job(self): try: - report = self.store.measures_report(details=False) + report = self.store.incoming.measures_report(details=False) LOG.info("%d measurements bundles across %d " "metrics wait to be processed.", report['summary']['measures'], @@ -214,8 +214,9 @@ class MetricScheduler(MetricProcessBase): def _run_job(self): try: - metrics = set(self.store.list_metric_with_measures_to_process( - self.block_size, self.block_index)) + metrics = set( + self.store.incoming.list_metric_with_measures_to_process( + self.block_size, self.block_index)) if metrics and not self.queue.empty(): # NOTE(gordc): drop metrics we previously process to avoid # handling twice diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 737e6d75..41148935 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -436,7 +436,7 @@ class MetricController(rest.RestController): if not isinstance(params, list): abort(400, "Invalid input for measures") if params: - pecan.request.storage.add_measures( + pecan.request.storage.incoming.add_measures( self.metric, MeasuresListSchema(params)) pecan.response.status = 202 @@ -1439,7 +1439,7 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): for metric in known_metrics: enforce("post measures", metric) - storage = pecan.request.storage + storage = pecan.request.storage.incoming with futures.ThreadPoolExecutor(max_workers=THREADS) as executor: list(executor.map(lambda x: storage.add_measures(*x), ((metric, body[metric.resource_id][metric.name]) @@ -1472,7 +1472,7 @@ class MetricsMeasuresBatchController(rest.RestController): for metric in metrics: enforce("post measures", metric) - storage = pecan.request.storage + storage = pecan.request.storage.incoming with futures.ThreadPoolExecutor(max_workers=THREADS) as executor: list(executor.map(lambda x: storage.add_measures(*x), ((metric, body[metric.id]) for metric in @@ -1671,7 +1671,7 @@ class StatusController(rest.RestController): @pecan.expose('json') def get(details=True): enforce("get status", {}) - report = pecan.request.storage.measures_report( + report = pecan.request.storage.incoming.measures_report( strutils.bool_from_string(details)) report_dict = {"storage": {"summary": report['summary']}} if 'details' in report: diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index ea00796a..da0e4f01 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -112,7 +112,7 @@ class Stats(object): archive_policy_name=ap_name, name=metric_name, resource_id=self.conf.statsd.resource_id) - self.storage.add_measures(metric, (measure,)) + self.storage.incoming.add_measures(metric, (measure,)) except Exception as e: LOG.error("Unable to add measure %s: %s" % (metric_name, e)) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 152aa20b..8b793548 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -155,13 +155,16 @@ def get_driver_class(conf): def get_driver(conf): """Return the configured driver.""" - return get_driver_class(conf)(conf.storage) + d = get_driver_class(conf)(conf.storage) + # TODO(sileht): Temporary set incoming driver here + # until we split all drivers + d.incoming = d + return d class StorageDriver(object): - @staticmethod - def __init__(conf): - pass + def __init__(self, conf): + self.incoming = None @staticmethod def stop(): @@ -217,15 +220,6 @@ class StorageDriver(object): # time, not a big deal pass - @staticmethod - def add_measures(metric, measures): - """Add a measure to a metric. - - :param metric: The metric measured. - :param measures: The actual measures. - """ - raise exceptions.NotImplementedError - @staticmethod def process_new_measures(indexer, metrics, sync=False): """Process added measures in background. @@ -234,17 +228,6 @@ class StorageDriver(object): the measures sent to metrics. This is used for that. """ - @staticmethod - def measures_report(details=True): - """Return a report of pending to process measures. - - Only useful for drivers that process measurements in background - - :return: {'summary': {'metrics': count, 'measures': count}, - 'details': {metric_id: pending_measures_count}} - """ - raise exceptions.NotImplementedError - @staticmethod def get_measures(metric, from_timestamp=None, to_timestamp=None, aggregation='mean', granularity=None, resample=None): diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 34fbd8ef..0ee2e283 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -18,7 +18,6 @@ import collections import datetime import itertools import operator -import struct import uuid from concurrent import futures @@ -26,9 +25,7 @@ import iso8601 import msgpack from oslo_config import cfg from oslo_log import log -from oslo_serialization import msgpackutils from oslo_utils import timeutils -import pandas import six import six.moves from tooz import coordination @@ -61,7 +58,6 @@ class CorruptionError(ValueError): class CarbonaraBasedStorage(storage.StorageDriver): - MEASURE_PREFIX = "measure" UPGRADE_BATCH_SIZE = 1000 def __init__(self, conf): @@ -325,25 +321,10 @@ class CarbonaraBasedStorage(storage.StorageDriver): metric, key, split, aggregation, archive_policy_def, oldest_mutable_timestamp) - def add_measures(self, metric, measures): - measures = list(measures) - data = struct.pack( - "<" + self._MEASURE_SERIAL_FORMAT * len(measures), - *list(itertools.chain.from_iterable(measures))) - self._store_new_measures(metric, data) - - @staticmethod - def _store_new_measures(metric, data): - raise NotImplementedError - @staticmethod def _delete_metric(metric): raise NotImplementedError - @staticmethod - def list_metric_with_measures_to_process(size, part, full=False): - raise NotImplementedError - def delete_metric(self, metric, sync=False): with self._lock(metric.id)(blocking=sync): # If the metric has never been upgraded, we need to delete this @@ -355,34 +336,6 @@ class CarbonaraBasedStorage(storage.StorageDriver): aggregation, granularity, version=3): raise NotImplementedError - _MEASURE_SERIAL_FORMAT = "Qd" - _MEASURE_SERIAL_LEN = struct.calcsize(_MEASURE_SERIAL_FORMAT) - - def _unserialize_measures(self, measure_id, data): - nb_measures = len(data) // self._MEASURE_SERIAL_LEN - try: - measures = struct.unpack( - "<" + self._MEASURE_SERIAL_FORMAT * nb_measures, data) - except struct.error: - # This either a corruption, either a v2 measures - try: - return msgpackutils.loads(data) - except ValueError: - LOG.error( - "Unable to decode measure %s, possible data corruption", - measure_id) - raise - return six.moves.zip( - pandas.to_datetime(measures[::2], unit='ns'), - itertools.islice(measures, 1, len(measures), 2)) - - def measures_report(self, details=True): - metrics, measures, full_details = self._build_report(details) - report = {'summary': {'metrics': metrics, 'measures': measures}} - if full_details is not None: - report['details'] = full_details - return report - def _check_for_metric_upgrade(self, metric): lock = self._lock(metric.id) with lock: @@ -457,7 +410,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): break marker = metrics[-1][0].id - def process_new_measures(self, indexer, metrics_to_process, sync=False): + def process_new_measures(self, indexer, metrics_to_process, + sync=False): metrics = indexer.list_metrics(ids=metrics_to_process) # This build the list of deleted metrics, i.e. the metrics we have # measures to process for but that are not in the indexer anymore. @@ -469,7 +423,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): # measurement files under its feet is not nice! try: with self._lock(metric_id)(blocking=sync): - self._delete_unprocessed_measures_for_metric_id(metric_id) + self.incoming.delete_unprocessed_measures_for_metric_id( + metric_id) except coordination.LockAcquireFailed: LOG.debug("Cannot acquire lock for metric %s, postponing " "unprocessed measures deletion" % metric_id) @@ -484,7 +439,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): try: locksw = timeutils.StopWatch().start() LOG.debug("Processing measures for %s" % metric) - with self._process_measure_for_metric(metric) as measures: + with self.incoming.process_measure_for_metric(metric) \ + as measures: self._compute_and_store_timeseries(metric, measures) LOG.debug("Metric %s locked during %.2f seconds" % (metric.id, locksw.elapsed())) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 97ab02af..9816b891 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -27,6 +27,7 @@ from oslo_utils import importutils from gnocchi import storage from gnocchi.storage import _carbonara +from gnocchi.storage.incoming import _carbonara as incoming_carbonara LOG = log.getLogger(__name__) @@ -57,7 +58,8 @@ OPTS = [ ] -class CephStorage(_carbonara.CarbonaraBasedStorage): +class CephStorage(_carbonara.CarbonaraBasedStorage, + incoming_carbonara.CarbonaraBasedStorage): WRITE_FULL = False @@ -211,7 +213,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): objs_it = itertools.islice(names, size * part, size * (part + 1)) return set([name.split("_")[1] for name in objs_it]) - def _delete_unprocessed_measures_for_metric_id(self, metric_id): + def delete_unprocessed_measures_for_metric_id(self, metric_id): object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) object_names = self._list_object_names_to_process(object_prefix) # Now clean objects and omap @@ -226,7 +228,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): self.ioctx.aio_remove(n) @contextlib.contextmanager - def _process_measure_for_metric(self, metric): + def process_measure_for_metric(self, metric): object_prefix = self.MEASURE_PREFIX + "_" + str(metric.id) object_names = list(self._list_object_names_to_process(object_prefix)) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 02b435e8..43d25c36 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -27,6 +27,7 @@ import six from gnocchi import storage from gnocchi.storage import _carbonara +from gnocchi.storage.incoming import _carbonara as incoming_carbonara OPTS = [ @@ -36,7 +37,8 @@ OPTS = [ ] -class FileStorage(_carbonara.CarbonaraBasedStorage): +class FileStorage(_carbonara.CarbonaraBasedStorage, + incoming_carbonara.CarbonaraBasedStorage): WRITE_FULL = True @@ -178,12 +180,12 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): if e.errno not in (errno.ENOENT, errno.ENOTEMPTY, errno.EEXIST): raise - def _delete_unprocessed_measures_for_metric_id(self, metric_id): + def delete_unprocessed_measures_for_metric_id(self, metric_id): files = self._list_measures_container_for_metric_id(metric_id) self._delete_measures_files_for_metric_id(metric_id, files) @contextlib.contextmanager - def _process_measure_for_metric(self, metric): + def process_measure_for_metric(self, metric): files = self._list_measures_container_for_metric_id(metric.id) measures = [] for f in files: diff --git a/gnocchi/storage/incoming/__init__.py b/gnocchi/storage/incoming/__init__.py new file mode 100644 index 00000000..dfbe9396 --- /dev/null +++ b/gnocchi/storage/incoming/__init__.py @@ -0,0 +1,46 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2014-2015 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gnocchi import exceptions + + +# TODO(sileht): We inherit from this storage driver temporary +# until we moved out all incoming code from here. +class StorageDriver(object): + + @staticmethod + def add_measures(metric, measures): + """Add a measure to a metric. + + :param metric: The metric measured. + :param measures: The actual measures. + """ + raise exceptions.NotImplementedError + + @staticmethod + def measures_report(details=True): + """Return a report of pending to process measures. + + Only useful for drivers that process measurements in background + + :return: {'summary': {'metrics': count, 'measures': count}, + 'details': {metric_id: pending_measures_count}} + """ + raise exceptions.NotImplementedError + + @staticmethod + def list_metric_with_measures_to_process(size, part, full=False): + raise NotImplementedError diff --git a/gnocchi/storage/incoming/_carbonara.py b/gnocchi/storage/incoming/_carbonara.py new file mode 100644 index 00000000..0c349e9d --- /dev/null +++ b/gnocchi/storage/incoming/_carbonara.py @@ -0,0 +1,85 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2016 Red Hat, Inc. +# Copyright © 2014-2015 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import itertools +import struct + +from oslo_log import log +from oslo_serialization import msgpackutils +import pandas +import six.moves + +from gnocchi.storage import incoming + +LOG = log.getLogger(__name__) + + +class CarbonaraBasedStorage(incoming.StorageDriver): + MEASURE_PREFIX = "measure" + _MEASURE_SERIAL_FORMAT = "Qd" + _MEASURE_SERIAL_LEN = struct.calcsize(_MEASURE_SERIAL_FORMAT) + + def _unserialize_measures(self, measure_id, data): + nb_measures = len(data) // self._MEASURE_SERIAL_LEN + try: + measures = struct.unpack( + "<" + self._MEASURE_SERIAL_FORMAT * nb_measures, data) + except struct.error: + # This either a corruption, either a v2 measures + try: + return msgpackutils.loads(data) + except ValueError: + LOG.error( + "Unable to decode measure %s, possible data corruption", + measure_id) + raise + return six.moves.zip( + pandas.to_datetime(measures[::2], unit='ns'), + itertools.islice(measures, 1, len(measures), 2)) + + def add_measures(self, metric, measures): + measures = list(measures) + data = struct.pack( + "<" + self._MEASURE_SERIAL_FORMAT * len(measures), + *list(itertools.chain.from_iterable(measures))) + self._store_new_measures(metric, data) + + @staticmethod + def _store_new_measures(metric, data): + raise NotImplementedError + + def measures_report(self, details=True): + metrics, measures, full_details = self._build_report(details) + report = {'summary': {'metrics': metrics, 'measures': measures}} + if full_details is not None: + report['details'] = full_details + return report + + @staticmethod + def _build_report(details): + raise NotImplementedError + + @staticmethod + def list_metric_with_measures_to_process(size, part, full=False): + raise NotImplementedError + + @staticmethod + def delete_unprocessed_measures_for_metric_id(metric_id): + raise NotImplementedError + + @staticmethod + def process_measure_for_metric(metric): + raise NotImplementedError diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 66990c33..e4d5a838 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -31,6 +31,7 @@ except ImportError: from gnocchi import storage from gnocchi.storage import _carbonara +from gnocchi.storage.incoming import _carbonara as incoming_carbonara from gnocchi import utils LOG = logging.getLogger(__name__) @@ -58,7 +59,8 @@ def retry_if_operationaborted(exception): and exception.response['Error'].get('Code') == "OperationAborted") -class S3Storage(_carbonara.CarbonaraBasedStorage): +class S3Storage(_carbonara.CarbonaraBasedStorage, + incoming_carbonara.CarbonaraBasedStorage): WRITE_FULL = True @@ -214,12 +216,12 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): deleted, len(objects) - deleted) - def _delete_unprocessed_measures_for_metric_id(self, metric_id): + def delete_unprocessed_measures_for_metric_id(self, metric_id): files = self._list_measure_files_for_metric_id(metric_id) self._bulk_delete(self._bucket_name_measures, files) @contextlib.contextmanager - def _process_measure_for_metric(self, metric): + def process_measure_for_metric(self, metric): files = self._list_measure_files_for_metric_id(metric.id) measures = [] diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index e8975e69..fca66b91 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -30,6 +30,7 @@ except ImportError: from gnocchi import storage from gnocchi.storage import _carbonara +from gnocchi.storage.incoming import _carbonara as incoming_carbonara LOG = log.getLogger(__name__) @@ -74,7 +75,8 @@ OPTS = [ ] -class SwiftStorage(_carbonara.CarbonaraBasedStorage): +class SwiftStorage(_carbonara.CarbonaraBasedStorage, + incoming_carbonara.CarbonaraBasedStorage): WRITE_FULL = True POST_HEADERS = {'Accept': 'application/json', 'Content-Type': 'text/plain'} @@ -174,12 +176,12 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): LOG.debug('# of objects deleted: %s, # of objects skipped: %s', resp['Number Deleted'], resp['Number Not Found']) - def _delete_unprocessed_measures_for_metric_id(self, metric_id): + def delete_unprocessed_measures_for_metric_id(self, metric_id): files = self._list_measure_files_for_metric_id(metric_id) self._bulk_delete(self.MEASURE_PREFIX, files) @contextlib.contextmanager - def _process_measure_for_metric(self, metric): + def process_measure_for_metric(self, metric): files = self._list_measure_files_for_metric_id(metric.id) measures = [] diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index 1c891f36..5694e206 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -159,8 +159,9 @@ class MetricdThread(threading.Thread): self.flag = True def run(self): + incoming = self.storage.incoming while self.flag: - metrics = self.storage.list_metric_with_measures_to_process( + metrics = incoming.list_metric_with_measures_to_process( None, None, full=True) self.storage.process_background_tasks(self.index, metrics) time.sleep(0.1) diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py index 9df631a2..62b89482 100644 --- a/gnocchi/tests/storage/test_carbonara.py +++ b/gnocchi/tests/storage/test_carbonara.py @@ -140,7 +140,7 @@ class TestCarbonaraMigration(tests_base.TestCase): storage.AggregationDoesNotExist, self.storage.get_measures, self.metric, aggregation='max') - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2016, 7, 18), 69), storage.Measure(utils.dt_to_unix_ns(2016, 7, 18, 1, 1), 64), ]) diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 1100e33c..ba2d6246 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -61,8 +61,8 @@ class TestAggregates(tests_base.TestCase): self.index.create_metric(metric.id, str(uuid.uuid4()), str(uuid.uuid4()), 'medium') - self.storage.add_measures(metric, measures) - metrics = self.storage.list_metric_with_measures_to_process( + self.storage.incoming.add_measures(metric, measures) + metrics = self.storage.incoming.list_metric_with_measures_to_process( None, None, full=True) self.storage.process_background_tasks(self.index, metrics, sync=True) diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 3fb645c5..72cf41c4 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -114,7 +114,7 @@ class TestingApp(webtest.TestApp): if self.auth and self.token is not None: req.headers['X-Auth-Token'] = self.token response = super(TestingApp, self).do_request(req, *args, **kwargs) - metrics = self.storage.list_metric_with_measures_to_process( + metrics = self.storage.incoming.list_metric_with_measures_to_process( None, None, full=True) self.storage.process_background_tasks(self.indexer, metrics, sync=True) return response diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 0e468f00..961a6c95 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -61,12 +61,12 @@ class TestStorageDriver(tests_base.TestCase): "This test does not work with S3 as backend as the S3 driver " "has no fake client, and tests run in parallel.") - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 13, 0, 1), 1), ]) @@ -86,22 +86,22 @@ class TestStorageDriver(tests_base.TestCase): self.skipTest( "This test does not work with S3 as backend as the S3 driver " "has no fake client, and tests run in parallel.") - metrics = self.storage.list_metric_with_measures_to_process( + metrics = self.storage.incoming.list_metric_with_measures_to_process( None, None, full=True) self.assertEqual(set(), metrics) - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) - metrics = self.storage.list_metric_with_measures_to_process( + metrics = self.storage.incoming.list_metric_with_measures_to_process( None, None, full=True) self.assertEqual(set([str(self.metric.id)]), metrics) self.trigger_processing() - metrics = self.storage.list_metric_with_measures_to_process( + metrics = self.storage.incoming.list_metric_with_measures_to_process( None, None, full=True) self.assertEqual(set([]), metrics) def test_delete_nonempty_metric(self): - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() @@ -109,14 +109,14 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() def test_delete_nonempty_metric_unprocessed(self): - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) self.storage.delete_metric(self.metric, sync=True) self.trigger_processing() def test_delete_expunge_metric(self): - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() @@ -126,14 +126,14 @@ class TestStorageDriver(tests_base.TestCase): self.metric.id) def test_measures_reporting(self): - report = self.storage.measures_report(True) + report = self.storage.incoming.measures_report(True) self.assertIsInstance(report, dict) self.assertIn('summary', report) self.assertIn('metrics', report['summary']) self.assertIn('measures', report['summary']) self.assertIn('details', report) self.assertIsInstance(report['details'], dict) - report = self.storage.measures_report(False) + report = self.storage.incoming.measures_report(False) self.assertIsInstance(report, dict) self.assertIn('summary', report) self.assertIn('metrics', report['summary']) @@ -142,7 +142,7 @@ class TestStorageDriver(tests_base.TestCase): def test_add_measures_big(self): m, __ = self._create_metric('high') - self.storage.add_measures(m, [ + self.storage.incoming.add_measures(m, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, i, j), 100) for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) self.trigger_processing([str(m.id)]) @@ -159,11 +159,11 @@ class TestStorageDriver(tests_base.TestCase): measures = [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] - self.storage.add_measures(m, measures) + self.storage.incoming.add_measures(m, measures) self.trigger_processing([str(m.id)]) # add measure to end, in same aggregate time as last point. - self.storage.add_measures(m, [ + self.storage.incoming.add_measures(m, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, 1, 58, 1), 100)]) with mock.patch.object(self.storage, '_store_metric_measures') as c: @@ -186,14 +186,15 @@ class TestStorageDriver(tests_base.TestCase): measures = [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] - self.storage.add_measures(m, measures) + self.storage.incoming.add_measures(m, measures) self.trigger_processing([str(m.id)]) # add measure to end, in same aggregate time as last point. new_point = utils.dt_to_unix_ns(2014, 1, 6, 1, 58, 1) - self.storage.add_measures(m, [storage.Measure(new_point, 100)]) + self.storage.incoming.add_measures( + m, [storage.Measure(new_point, 100)]) - with mock.patch.object(self.storage, '_add_measures') as c: + with mock.patch.object(self.storage.incoming, 'add_measures') as c: self.trigger_processing([str(m.id)]) for __, args, __ in c.mock_calls: self.assertEqual( @@ -201,7 +202,7 @@ class TestStorageDriver(tests_base.TestCase): new_point, args[1].granularity * 10e8)) def test_delete_old_measures(self): - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), @@ -218,7 +219,7 @@ class TestStorageDriver(tests_base.TestCase): ], self.storage.get_measures(self.metric)) # One year later… - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2015, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() @@ -252,7 +253,7 @@ class TestStorageDriver(tests_base.TestCase): apname) # First store some points scattered across different splits - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4), @@ -292,7 +293,7 @@ class TestStorageDriver(tests_base.TestCase): # split (keep in mind the back window size in one hour here). We move # the BoundTimeSerie processing timeserie far away from its current # range. - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45), storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46), ]) @@ -327,7 +328,7 @@ class TestStorageDriver(tests_base.TestCase): ], self.storage.get_measures(self.metric, granularity=60.0)) def test_updated_measures(self): - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), ]) @@ -340,7 +341,7 @@ class TestStorageDriver(tests_base.TestCase): (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 42.0), ], self.storage.get_measures(self.metric)) - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) @@ -371,7 +372,7 @@ class TestStorageDriver(tests_base.TestCase): ], self.storage.get_measures(self.metric, aggregation='min')) def test_add_and_get_measures(self): - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), @@ -461,7 +462,7 @@ class TestStorageDriver(tests_base.TestCase): self.archive_policies['low'])])) def test_get_measure_unknown_aggregation(self): - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), @@ -474,13 +475,13 @@ class TestStorageDriver(tests_base.TestCase): def test_get_cross_metric_measures_unknown_aggregation(self): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['low']) - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) - self.storage.add_measures(metric2, [ + self.storage.incoming.add_measures(metric2, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), @@ -494,13 +495,13 @@ class TestStorageDriver(tests_base.TestCase): def test_get_cross_metric_measures_unknown_granularity(self): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['low']) - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) - self.storage.add_measures(metric2, [ + self.storage.incoming.add_measures(metric2, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), @@ -514,13 +515,13 @@ class TestStorageDriver(tests_base.TestCase): def test_add_and_get_cross_metric_measures_different_archives(self): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['no_granularity_match']) - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) - self.storage.add_measures(metric2, [ + self.storage.incoming.add_measures(metric2, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), @@ -533,13 +534,13 @@ class TestStorageDriver(tests_base.TestCase): def test_add_and_get_cross_metric_measures(self): metric2, __ = self._create_metric() - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) - self.storage.add_measures(metric2, [ + self.storage.incoming.add_measures(metric2, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 41), 2), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 10, 31), 4), @@ -618,14 +619,14 @@ class TestStorageDriver(tests_base.TestCase): def test_add_and_get_cross_metric_measures_with_holes(self): metric2, __ = self._create_metric() - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 5, 31), 8), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 42), ]) - self.storage.add_measures(metric2, [ + self.storage.incoming.add_measures(metric2, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 2), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 6), @@ -644,7 +645,7 @@ class TestStorageDriver(tests_base.TestCase): def test_search_value(self): metric2, __ = self._create_metric() - self.storage.add_measures(self.metric, [ + self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1,), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 5, 31), 8), @@ -652,7 +653,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 42), ]) - self.storage.add_measures(metric2, [ + self.storage.incoming.add_measures(metric2, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 2), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 6), @@ -686,7 +687,7 @@ class TestStorageDriver(tests_base.TestCase): m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()), str(uuid.uuid4()), name) m = self.index.list_metrics(ids=[m.id])[0] - self.storage.add_measures(m, [ + self.storage.incoming.add_measures(m, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 0), 1), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 1), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 10), 1), @@ -701,7 +702,7 @@ class TestStorageDriver(tests_base.TestCase): self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)]) m = self.index.list_metrics(ids=[m.id])[0] - self.storage.add_measures(m, [ + self.storage.incoming.add_measures(m, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 15), 1), ]) self.trigger_processing([str(m.id)]) diff --git a/tools/measures_injector.py b/tools/measures_injector.py index daa5f4d8..01e6a385 100755 --- a/tools/measures_injector.py +++ b/tools/measures_injector.py @@ -47,7 +47,7 @@ def injector(): storage.Measure( utils.dt_in_unix_ns(utils.utcnow()), random.random()) for __ in six.moves.range(conf.measures_per_batch)] - s.add_measures(metric, measures) + s.incoming.add_measures(metric, measures) with futures.ThreadPoolExecutor(max_workers=len(metrics)) as executor: # We use 'list' to iterate all threads here to raise the first -- GitLab From 9055624e5c30daf1978c4aabc74d9962022449e6 Mon Sep 17 00:00:00 2001 From: zhangguoqing Date: Sun, 27 Nov 2016 03:22:07 +0000 Subject: [PATCH 0491/1483] Fix two trivial docs errors in architecture.rst 1 Correct a grammar about not only...but also... 2 Now, there are four storage drivers rather than three in gnocchi. Change-Id: I8744adda401e7f92dc3688c51d28f958e9d85b08 --- doc/source/architecture.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index 7babef96..ca4e882e 100755 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -24,10 +24,10 @@ receives timestamps and values, and pre-computes aggregations according to the defined archive policies. The *indexer* is responsible for storing the index of all resources, along with -their types and properties. Gnocchi only knows about resource types from the -OpenStack project, but also provides a *generic* type so you can create basic -resources and handle the resource properties yourself. The indexer is also -responsible for linking resources with metrics. +their types and properties. Gnocchi not only knows about resource types from +the OpenStack project, but also provides a *generic* type so you can create +basic resources and handle the resource properties yourself. The indexer is +also responsible for linking resources with metrics. How to choose back-ends ~~~~~~~~~~~~~~~~~~~~~~~ @@ -43,7 +43,7 @@ The drivers are based on an intermediate library, named *Carbonara*, which handles the time series manipulation, since none of these storage technologies handle time series natively. -The three *Carbonara* based drivers are working well and are as scalable as +The four *Carbonara* based drivers are working well and are as scalable as their back-end technology permits. Ceph and Swift are inherently more scalable than the file driver. -- GitLab From 049116d1d16691fe9a7e54c7ea9ca5c2e7dbb5bb Mon Sep 17 00:00:00 2001 From: melissaml Date: Mon, 28 Nov 2016 18:44:26 +0800 Subject: [PATCH 0492/1483] Modify variable's using method in Log Messages String interpolation should be delayed to be handled by the logging code, rather than being done at the point of the logging call. Ref:http://docs.openstack.org/developer/oslo.i18n/guidelines.html#log-translation For example: LOG.info(_LI('some message: variable=%s') % variable) LOG.info(_LI('some message: variable=%s'), variable) Change-Id: I70d42077e16f8b23507a5a1c70339d06c93a2e5a --- gnocchi/statsd.py | 16 ++++++++-------- gnocchi/storage/__init__.py | 2 +- gnocchi/storage/_carbonara.py | 14 +++++++------- gnocchi/storage/ceph.py | 2 +- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index ea00796a..cb61024a 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -47,7 +47,7 @@ class Stats(object): LOG.debug("Resource %s already exists" % self.conf.statsd.resource_id) else: - LOG.info("Created resource %s" % self.conf.statsd.resource_id) + LOG.info("Created resource %s", self.conf.statsd.resource_id) self.gauges = {} self.counters = {} self.times = {} @@ -114,8 +114,8 @@ class Stats(object): resource_id=self.conf.statsd.resource_id) self.storage.add_measures(metric, (measure,)) except Exception as e: - LOG.error("Unable to add measure %s: %s" - % (metric_name, e)) + LOG.error("Unable to add measure %s: %s", + (metric_name, e)) self.reset() @@ -140,7 +140,7 @@ class StatsdServer(object): try: messages = [m for m in data.decode().split("\n") if m] except Exception as e: - LOG.error("Unable to decode datagram: %s" % e) + LOG.error("Unable to decode datagram: %s", e) return for message in messages: metric = message.split("|") @@ -150,7 +150,7 @@ class StatsdServer(object): elif len(metric) == 3: metric_name, metric_type, sampling = metric else: - LOG.error("Invalid number of | in `%s'" % message) + LOG.error("Invalid number of | in `%s'", message) continue sampling = float(sampling[1:]) if sampling is not None else None metric_name, metric_str_val = metric_name.split(':') @@ -161,7 +161,7 @@ class StatsdServer(object): self.stats.treat_metric(metric_name, metric_type, value, sampling) except Exception as e: - LOG.error("Unable to treat metric %s: %s" % (message, str(e))) + LOG.error("Unable to treat metric %s: %s", (message, str(e))) def start(): @@ -186,8 +186,8 @@ def start(): loop.call_later(conf.statsd.flush_delay, _flush) transport, protocol = loop.run_until_complete(listen) - LOG.info("Started on %s:%d" % (conf.statsd.host, conf.statsd.port)) - LOG.info("Flush delay: %d seconds" % conf.statsd.flush_delay) + LOG.info("Started on %s:%d", (conf.statsd.host, conf.statsd.port)) + LOG.info("Flush delay: %d seconds", conf.statsd.flush_delay) try: loop.run_forever() diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 152aa20b..22a4dee6 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -207,7 +207,7 @@ class StorageDriver(object): except Exception: if sync: raise - LOG.error("Unable to expunge metric %s from storage" % m, + LOG.error("Unable to expunge metric %s from storage", m, exc_info=True) continue try: diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 34fbd8ef..9562602d 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -175,8 +175,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): except ValueError: LOG.error("Data corruption detected for %s " "aggregated `%s' timeserie, granularity `%s' " - "around time `%s', ignoring." - % (metric.id, aggregation, granularity, key)) + "around time `%s', ignoring.", + (metric.id, aggregation, granularity, key)) def _get_measures_timeserie(self, metric, aggregation, granularity, @@ -422,7 +422,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): # Just try the next metric, this one has no measures break else: - LOG.info("Migrating metric %s to new format" % metric) + LOG.info("Migrating metric %s to new format", metric) timeseries = filter( lambda x: x is not None, self._map_in_thread( @@ -444,7 +444,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): metric, key, agg_method, d.granularity, version=None) self._delete_unaggregated_timeserie(metric, version=None) - LOG.info("Migrated metric %s to new format" % metric) + LOG.info("Migrated metric %s to new format", metric) def upgrade(self, index): marker = None @@ -694,7 +694,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): try: return self._unserialize_timeserie_v2(data) except ValueError: - LOG.error("Data corruption detected for %s ignoring." % metric.id) + LOG.error("Data corruption detected for %s ignoring.", metric.id) def _get_measures_and_unserialize_v2(self, metric, key, aggregation, granularity): @@ -706,5 +706,5 @@ class CarbonaraBasedStorage(storage.StorageDriver): except ValueError: LOG.error("Data corruption detected for %s " "aggregated `%s' timeserie, granularity `%s' " - "around time `%s', ignoring." - % (metric.id, aggregation, granularity, key)) + "around time `%s', ignoring.", + (metric.id, aggregation, granularity, key)) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 97ab02af..62addf2e 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -78,7 +78,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): "omap feature. Install 'cradox' (recommended) " "or upgrade 'python-rados' >= 9.1.0 ") - LOG.info("Ceph storage backend use '%s' python library" % + LOG.info("Ceph storage backend use '%s' python library", RADOS_MODULE_NAME) # NOTE(sileht): librados handles reconnection itself, -- GitLab From 77c0aa0c9f51b164d79a9e8716b3328189372a37 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 30 Nov 2016 13:59:06 +0100 Subject: [PATCH 0493/1483] rest: don't ignore measures of created metrics When we batch measurements with create_metrics=True, and a metric is created, we miss to add their measures. This change fixes that. Change-Id: Ic2ef94cb620c1e819ab76d973442037b8de59672 --- gnocchi/rest/__init__.py | 4 +++- gnocchi/tests/gabbi/gabbits/batch-measures.yaml | 6 ++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 41148935..da4d2e7e 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1406,7 +1406,7 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): "name": name }) try: - pecan.request.indexer.create_metric( + m = pecan.request.indexer.create_metric( uuid.uuid4(), user_id, project_id, resource_id=resource_id, @@ -1420,6 +1420,8 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): # This catch NoSuchArchivePolicy, which is unlikely # be still possible abort(400, e) + else: + known_metrics.append(m) elif len(names) != len(metrics): unknown_metrics.extend( diff --git a/gnocchi/tests/gabbi/gabbits/batch-measures.yaml b/gnocchi/tests/gabbi/gabbits/batch-measures.yaml index ef820412..9eedcea5 100644 --- a/gnocchi/tests/gabbi/gabbits/batch-measures.yaml +++ b/gnocchi/tests/gabbi/gabbits/batch-measures.yaml @@ -188,6 +188,12 @@ tests: - name: get created metric to check creation GET: /v1/resource/generic/46c9418d-d63b-4cdd-be89-8f57ffc5952e/metric/auto.test + - name: ensure measure have been posted + GET: /v1/resource/generic/46c9418d-d63b-4cdd-be89-8f57ffc5952e/metric/auto.test/measures?refresh=true&start=2015-03-06T14:34 + response_json_paths: + $: + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - name: push measurements to unknown named metrics and resource with create_metrics POST: /v1/batch/resources/metrics/measures?create_metrics=true request_headers: -- GitLab From b6d08c2c576eeb225cb1add743b9d44d02a569c1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 30 Nov 2016 14:15:11 +0100 Subject: [PATCH 0494/1483] fix logging... Recently [1] we move some (why not all?) logging to do lazy formatting (why ? we don't use i18n...). But introduce some bugs because just replacing % by , is not enough when tuple are using for formatting... This change fixes that. [1] 049116d1d16691fe9a7e54c7ea9ca5c2e7dbb5bb Change-Id: Iff677b84030f6c0db261e9003636764cbeed2cf7 --- gnocchi/statsd.py | 12 ++++++------ gnocchi/storage/_carbonara.py | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index 4d70beb9..87f6595b 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -44,8 +44,8 @@ class Stats(object): self.conf.statsd.user_id, self.conf.statsd.project_id) except indexer.ResourceAlreadyExists: - LOG.debug("Resource %s already exists" - % self.conf.statsd.resource_id) + LOG.debug("Resource %s already exists", + self.conf.statsd.resource_id) else: LOG.info("Created resource %s", self.conf.statsd.resource_id) self.gauges = {} @@ -115,7 +115,7 @@ class Stats(object): self.storage.incoming.add_measures(metric, (measure,)) except Exception as e: LOG.error("Unable to add measure %s: %s", - (metric_name, e)) + metric_name, e) self.reset() @@ -136,7 +136,7 @@ class StatsdServer(object): pass def datagram_received(self, data, addr): - LOG.debug("Received data `%r' from %s" % (data, addr)) + LOG.debug("Received data `%r' from %s", data, addr) try: messages = [m for m in data.decode().split("\n") if m] except Exception as e: @@ -161,7 +161,7 @@ class StatsdServer(object): self.stats.treat_metric(metric_name, metric_type, value, sampling) except Exception as e: - LOG.error("Unable to treat metric %s: %s", (message, str(e))) + LOG.error("Unable to treat metric %s: %s", message, str(e)) def start(): @@ -186,7 +186,7 @@ def start(): loop.call_later(conf.statsd.flush_delay, _flush) transport, protocol = loop.run_until_complete(listen) - LOG.info("Started on %s:%d", (conf.statsd.host, conf.statsd.port)) + LOG.info("Started on %s:%d", conf.statsd.host, conf.statsd.port) LOG.info("Flush delay: %d seconds", conf.statsd.flush_delay) try: diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index b7487131..23362c67 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -172,7 +172,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): LOG.error("Data corruption detected for %s " "aggregated `%s' timeserie, granularity `%s' " "around time `%s', ignoring.", - (metric.id, aggregation, granularity, key)) + metric.id, aggregation, granularity, key) def _get_measures_timeserie(self, metric, aggregation, granularity, @@ -663,4 +663,4 @@ class CarbonaraBasedStorage(storage.StorageDriver): LOG.error("Data corruption detected for %s " "aggregated `%s' timeserie, granularity `%s' " "around time `%s', ignoring.", - (metric.id, aggregation, granularity, key)) + metric.id, aggregation, granularity, key) -- GitLab From dbcfa9ccbdae5c8f2e7141a08fbdeed146cb0f06 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 30 Nov 2016 17:09:21 +0100 Subject: [PATCH 0495/1483] doc: Add reference to gnocchi-nagios tool Change-Id: Iaabf427c948fa1bd095d374f1ca2bc8fda628bac --- doc/source/index.rst | 2 ++ doc/source/nagios.rst | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 doc/source/nagios.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index 5cebbbfe..36757a1f 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -50,6 +50,7 @@ Key Features - Queryable resource indexer - Multi-tenant - Grafana support +- Nagios/Icinga support - Statsd protocol support @@ -67,6 +68,7 @@ Documentation rest statsd grafana + nagios glossary releasenotes/index.rst diff --git a/doc/source/nagios.rst b/doc/source/nagios.rst new file mode 100644 index 00000000..72d2556c --- /dev/null +++ b/doc/source/nagios.rst @@ -0,0 +1,19 @@ +===================== +Nagios/Icinga support +===================== + +`Nagios`_ and `Icinga`_ has support for Gnocchi through a Gnocchi-nagios +service. It can be installed with pip:: + + pip install gnocchi-nagios + +`Source`_ and `Documentation`_ are also available. + +Gnocchi-nagios collects perfdata files generated by `Nagios`_ or `Icinga`_; +transforms them into Gnocchi resources, metrics and measures format; and +publishes them to the Gnocchi REST API. + +.. _`Nagios`: https://www.nagios.org/ +.. _`Icinga`: https://www.icinga.com/ +.. _`Documentation`: http://gnocchi-nagios.readthedocs.io/en/latest/ +.. _`Source`: https://github.com/sileht/gnocchi-nagios -- GitLab From fcf08aa0450c160fceb5a75a76233d4acd94acb6 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 28 Nov 2016 16:04:08 +0100 Subject: [PATCH 0496/1483] api: use egg entry_point rather than code path Change-Id: I7ecfafe966572eebea06b40aed2e7e6174e51996 --- etc/gnocchi/api-paste.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/etc/gnocchi/api-paste.ini b/etc/gnocchi/api-paste.ini index ad56b17e..8f3fc26a 100644 --- a/etc/gnocchi/api-paste.ini +++ b/etc/gnocchi/api-paste.ini @@ -30,9 +30,9 @@ paste.app_factory = gnocchi.rest.app:app_factory root = gnocchi.rest.V1Controller [filter:keystone_authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory +use = egg:keystonemiddleware#auth_token oslo_config_project = gnocchi [filter:http_proxy_to_wsgi] -paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory +use = egg:oslo.middleware#http_proxy_to_wsgi oslo_config_project = gnocchi -- GitLab From 4bdb88bfaf656046e7b70115d38c048fe9a2cb60 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 5 Dec 2016 18:32:29 +0100 Subject: [PATCH 0497/1483] rest: remove user_id and project_id from metric schema Those fields are never used nor set anywhere. We probably forgot to delete them at some point. Change-Id: Iea46102de6a8342538712da8bcaba974fd15897a --- gnocchi/rest/__init__.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index da4d2e7e..8e4e9ab5 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -522,8 +522,6 @@ class MetricsController(rest.RestController): return MetricController(metrics[0]), remainder _MetricSchema = voluptuous.Schema({ - "user_id": six.text_type, - "project_id": six.text_type, "archive_policy_name": six.text_type, "name": six.text_type, voluptuous.Optional("unit"): @@ -559,8 +557,6 @@ class MetricsController(rest.RestController): enforce("create metric", { "created_by_user_id": user_id, "created_by_project_id": project_id, - "user_id": definition.get('user_id'), - "project_id": definition.get('project_id'), "archive_policy_name": archive_policy_name, "name": name, "unit": definition.get('unit'), -- GitLab From f5794af695b9a105f500ef56751a13c8a1dbe6cf Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 5 Dec 2016 11:57:28 +0100 Subject: [PATCH 0498/1483] fix oslo.db 4.15.0 breakage oslo.db 4.15.0 breaks our gate because it assumes we use the oslo.db provision module. This have been fixed by Ie8c454528ce3aa816c04fbb4beb69f4b5ec57e9c Also oslo.db now cleans the database resources. We was mocking self.db to avoid that before because of the provision module dependencies. But since this module is no more required, we can use the oslo.db facility. This change does that but keep compatibility with oslo.db < 4.15.0 Change-Id: I69f8ff9f702064e8fc5bf4018ebc6f3b2a8ea1a8 --- gnocchi/tests/indexer/sqlalchemy/test_migrations.py | 11 ++++++++--- setup.cfg | 5 +++-- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py index 62445b0b..da44cb29 100644 --- a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py +++ b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py @@ -16,6 +16,7 @@ import abc import fixtures import mock +import oslo_db.exception from oslo_db.sqlalchemy import test_migrations import six import sqlalchemy as sa @@ -50,10 +51,14 @@ class ModelsMigrationsSync( self.index = indexer.get_driver(self.conf) self.index.connect() self.index.upgrade(nocreate=True, create_legacy_resource_types=True) + self.addCleanup(self._drop_database) - def tearDown(self): - sqlalchemy_utils.drop_database(self.conf.indexer.url) - super(ModelsMigrationsSync, self).tearDown() + def _drop_database(self): + try: + sqlalchemy_utils.drop_database(self.conf.indexer.url) + except oslo_db.exception.DBNonExistentDatabase: + # NOTE(sileht): oslo db >= 4.15.0 cleanup this for us + pass @staticmethod def get_metadata(): diff --git a/setup.cfg b/setup.cfg index 380ab9e6..28e2fe29 100644 --- a/setup.cfg +++ b/setup.cfg @@ -24,13 +24,13 @@ keystone = keystonemiddleware>=4.0.0 mysql = pymysql - oslo.db>=4.8.0,!=4.13.1,!=4.13.2 + oslo.db>=4.8.0,!=4.13.1,!=4.13.2,!=4.15.0 sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1 postgresql = psycopg2 - oslo.db>=4.8.0,!=4.13.1,!=4.13.2 + oslo.db>=4.8.0,!=4.13.1,!=4.13.2,!=4.15.0 sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1 @@ -74,6 +74,7 @@ test = os-testr testrepository testscenarios + testresources>=0.2.4 # Apache-2.0/BSD testtools>=0.9.38 WebTest>=2.0.16 doc8 -- GitLab From ecbb5fd4ca9aac10a42659b1657eb6f4122e9a3b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 24 Nov 2016 13:00:20 +0100 Subject: [PATCH 0499/1483] storage: split file driver Change-Id: I9a125ffc40853ec689b88f1164a60b242d3f37d9 --- gnocchi/storage/__init__.py | 14 ++- gnocchi/storage/common/__init__.py | 0 gnocchi/storage/file.py | 128 ++------------------------- gnocchi/storage/incoming/__init__.py | 4 + gnocchi/storage/incoming/file.py | 124 ++++++++++++++++++++++++++ gnocchi/utils.py | 11 +++ setup.cfg | 3 + 7 files changed, 159 insertions(+), 125 deletions(-) create mode 100644 gnocchi/storage/common/__init__.py create mode 100644 gnocchi/storage/incoming/file.py diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 7dddc15c..2851d959 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -17,6 +17,7 @@ import operator from oslo_config import cfg from oslo_log import log from stevedore import driver +import stevedore.exception from gnocchi import exceptions from gnocchi import indexer @@ -144,21 +145,26 @@ class MetricUnaggregatable(StorageError): % (", ".join((str(m.id) for m in metrics)), reason)) -def get_driver_class(conf): +def get_driver_class(namespace, conf): """Return the storage driver class. :param conf: The conf to use to determine the driver. """ - return driver.DriverManager('gnocchi.storage', + return driver.DriverManager(namespace, conf.storage.driver).driver def get_driver(conf): """Return the configured driver.""" - d = get_driver_class(conf)(conf.storage) + d = get_driver_class('gnocchi.storage', conf)(conf.storage) # TODO(sileht): Temporary set incoming driver here # until we split all drivers - d.incoming = d + try: + d.incoming = get_driver_class( + 'gnocchi.storage.incoming', conf)(conf.storage) + except stevedore.exception.NoMatches: + # Fallback to legacy driver + d.incoming = d return d diff --git a/gnocchi/storage/common/__init__.py b/gnocchi/storage/common/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 43d25c36..12d40a33 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -14,20 +14,16 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import contextlib -import datetime import errno import os import shutil import tempfile -import uuid from oslo_config import cfg -import six from gnocchi import storage from gnocchi.storage import _carbonara -from gnocchi.storage.incoming import _carbonara as incoming_carbonara +from gnocchi import utils OPTS = [ @@ -37,40 +33,19 @@ OPTS = [ ] -class FileStorage(_carbonara.CarbonaraBasedStorage, - incoming_carbonara.CarbonaraBasedStorage): - +class FileStorage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = True def __init__(self, conf): super(FileStorage, self).__init__(conf) self.basepath = conf.file_basepath - self.basepath_tmp = os.path.join(conf.file_basepath, - 'tmp') - try: - os.mkdir(self.basepath) - except OSError as e: - if e.errno != errno.EEXIST: - raise - self.measure_path = os.path.join(self.basepath, self.MEASURE_PREFIX) - try: - os.mkdir(self.measure_path) - except OSError as e: - if e.errno != errno.EEXIST: - raise - try: - os.mkdir(self.basepath_tmp) - except OSError as e: - if e.errno != errno.EEXIST: - raise - - def _get_tempfile(self): - return tempfile.NamedTemporaryFile(prefix='gnocchi', - dir=self.basepath_tmp, - delete=False) + self.basepath_tmp = os.path.join(self.basepath, 'tmp') + utils.ensure_paths([self.basepath_tmp]) def _atomic_file_store(self, dest, data): - tmpfile = self._get_tempfile() + tmpfile = tempfile.NamedTemporaryFile( + prefix='gnocchi', dir=self.basepath_tmp, + delete=False) tmpfile.write(data) tmpfile.close() os.rename(tmpfile.name, dest) @@ -93,15 +68,6 @@ class FileStorage(_carbonara.CarbonaraBasedStorage, timestamp_key + "_" + str(granularity)) return path + '_v%s' % version if version else path - def _build_measure_path(self, metric_id, random_id=None): - path = os.path.join(self.measure_path, six.text_type(metric_id)) - if random_id: - if random_id is True: - now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") - random_id = six.text_type(uuid.uuid4()) + now - return os.path.join(path, random_id) - return path - def _create_metric(self, metric): path = self._build_metric_dir(metric) try: @@ -117,86 +83,6 @@ class FileStorage(_carbonara.CarbonaraBasedStorage, if e.errno != errno.EEXIST: raise - def _store_new_measures(self, metric, data): - tmpfile = self._get_tempfile() - tmpfile.write(data) - tmpfile.close() - path = self._build_measure_path(metric.id, True) - while True: - try: - os.rename(tmpfile.name, path) - break - except OSError as e: - if e.errno != errno.ENOENT: - raise - try: - os.mkdir(self._build_measure_path(metric.id)) - except OSError as e: - # NOTE(jd) It's possible that another process created the - # path just before us! In this case, good for us, let's do - # nothing then! (see bug #1475684) - if e.errno != errno.EEXIST: - raise - - def _build_report(self, details): - metric_details = {} - for metric in os.listdir(self.measure_path): - metric_details[metric] = len( - self._list_measures_container_for_metric_id(metric)) - return (len(metric_details.keys()), sum(metric_details.values()), - metric_details if details else None) - - def list_metric_with_measures_to_process(self, size, part, full=False): - if full: - return set(os.listdir(self.measure_path)) - return set( - os.listdir(self.measure_path)[size * part:size * (part + 1)]) - - def _list_measures_container_for_metric_id(self, metric_id): - try: - return os.listdir(self._build_measure_path(metric_id)) - except OSError as e: - # Some other process treated this one, then do nothing - if e.errno == errno.ENOENT: - return [] - raise - - def _delete_measures_files_for_metric_id(self, metric_id, files): - for f in files: - try: - os.unlink(self._build_measure_path(metric_id, f)) - except OSError as e: - # Another process deleted it in the meantime, no prob' - if e.errno != errno.ENOENT: - raise - try: - os.rmdir(self._build_measure_path(metric_id)) - except OSError as e: - # ENOENT: ok, it has been removed at almost the same time - # by another process - # ENOTEMPTY: ok, someone pushed measure in the meantime, - # we'll delete the measures and directory later - # EEXIST: some systems use this instead of ENOTEMPTY - if e.errno not in (errno.ENOENT, errno.ENOTEMPTY, errno.EEXIST): - raise - - def delete_unprocessed_measures_for_metric_id(self, metric_id): - files = self._list_measures_container_for_metric_id(metric_id) - self._delete_measures_files_for_metric_id(metric_id, files) - - @contextlib.contextmanager - def process_measure_for_metric(self, metric): - files = self._list_measures_container_for_metric_id(metric.id) - measures = [] - for f in files: - abspath = self._build_measure_path(metric.id, f) - with open(abspath, "rb") as e: - measures.extend(self._unserialize_measures(f, e.read())) - - yield measures - - self._delete_measures_files_for_metric_id(metric.id, files) - def _store_unaggregated_timeserie(self, metric, data, version=3): self._atomic_file_store( self._build_unaggregated_timeserie_path(metric, version), diff --git a/gnocchi/storage/incoming/__init__.py b/gnocchi/storage/incoming/__init__.py index dfbe9396..0dbcc98e 100644 --- a/gnocchi/storage/incoming/__init__.py +++ b/gnocchi/storage/incoming/__init__.py @@ -21,6 +21,10 @@ from gnocchi import exceptions # until we moved out all incoming code from here. class StorageDriver(object): + @staticmethod + def __init__(conf): + pass + @staticmethod def add_measures(metric, measures): """Add a measure to a metric. diff --git a/gnocchi/storage/incoming/file.py b/gnocchi/storage/incoming/file.py new file mode 100644 index 00000000..743e68ab --- /dev/null +++ b/gnocchi/storage/incoming/file.py @@ -0,0 +1,124 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import contextlib +import datetime +import errno +import os +import tempfile +import uuid + +import six + +from gnocchi.storage.incoming import _carbonara +from gnocchi import utils + + +class FileStorage(_carbonara.CarbonaraBasedStorage): + def __init__(self, conf): + super(FileStorage, self).__init__(conf) + self.basepath = conf.file_basepath + self.basepath_tmp = os.path.join(self.basepath, 'tmp') + self.measure_path = os.path.join(self.basepath, 'measure') + utils.ensure_paths([self.basepath_tmp, self.measure_path]) + + def _build_measure_path(self, metric_id, random_id=None): + path = os.path.join(self.measure_path, six.text_type(metric_id)) + if random_id: + if random_id is True: + now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") + random_id = six.text_type(uuid.uuid4()) + now + return os.path.join(path, random_id) + return path + + def _store_new_measures(self, metric, data): + tmpfile = tempfile.NamedTemporaryFile( + prefix='gnocchi', dir=self.basepath_tmp, + delete=False) + tmpfile.write(data) + tmpfile.close() + path = self._build_measure_path(metric.id, True) + while True: + try: + os.rename(tmpfile.name, path) + break + except OSError as e: + if e.errno != errno.ENOENT: + raise + try: + os.mkdir(self._build_measure_path(metric.id)) + except OSError as e: + # NOTE(jd) It's possible that another process created the + # path just before us! In this case, good for us, let's do + # nothing then! (see bug #1475684) + if e.errno != errno.EEXIST: + raise + + def _build_report(self, details): + metric_details = {} + for metric in os.listdir(self.measure_path): + metric_details[metric] = len( + self._list_measures_container_for_metric_id(metric)) + return (len(metric_details.keys()), sum(metric_details.values()), + metric_details if details else None) + + def list_metric_with_measures_to_process(self, size, part, full=False): + if full: + return set(os.listdir(self.measure_path)) + return set( + os.listdir(self.measure_path)[size * part:size * (part + 1)]) + + def _list_measures_container_for_metric_id(self, metric_id): + try: + return os.listdir(self._build_measure_path(metric_id)) + except OSError as e: + # Some other process treated this one, then do nothing + if e.errno == errno.ENOENT: + return [] + raise + + def _delete_measures_files_for_metric_id(self, metric_id, files): + for f in files: + try: + os.unlink(self._build_measure_path(metric_id, f)) + except OSError as e: + # Another process deleted it in the meantime, no prob' + if e.errno != errno.ENOENT: + raise + try: + os.rmdir(self._build_measure_path(metric_id)) + except OSError as e: + # ENOENT: ok, it has been removed at almost the same time + # by another process + # ENOTEMPTY: ok, someone pushed measure in the meantime, + # we'll delete the measures and directory later + # EEXIST: some systems use this instead of ENOTEMPTY + if e.errno not in (errno.ENOENT, errno.ENOTEMPTY, errno.EEXIST): + raise + + def delete_unprocessed_measures_for_metric_id(self, metric_id): + files = self._list_measures_container_for_metric_id(metric_id) + self._delete_measures_files_for_metric_id(metric_id, files) + + @contextlib.contextmanager + def process_measure_for_metric(self, metric): + files = self._list_measures_container_for_metric_id(metric.id) + measures = [] + for f in files: + abspath = self._build_measure_path(metric.id, f) + with open(abspath, "rb") as e: + measures.extend(self._unserialize_measures(f, e.read())) + + yield measures + + self._delete_measures_files_for_metric_id(metric.id, files) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index c3045780..59e0b56d 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -14,9 +14,11 @@ # License for the specific language governing permissions and limitations # under the License. import datetime +import errno import itertools import multiprocessing import numbers +import os import uuid import iso8601 @@ -182,3 +184,12 @@ def grouper(iterable, n): if not chunk: return yield chunk + + +def ensure_paths(paths): + for p in paths: + try: + os.makedirs(p) + except OSError as e: + if e.errno != errno.EEXIST: + raise diff --git a/setup.cfg b/setup.cfg index 380ab9e6..eb03ae47 100644 --- a/setup.cfg +++ b/setup.cfg @@ -107,6 +107,9 @@ gnocchi.storage = file = gnocchi.storage.file:FileStorage s3 = gnocchi.storage.s3:S3Storage +gnocchi.storage.incoming = + file = gnocchi.storage.incoming.file:FileStorage + gnocchi.indexer = mysql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer mysql+pymysql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer -- GitLab From 9d0192a4c998756e3194d650d99d6b956f66b81b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 24 Nov 2016 12:25:42 +0100 Subject: [PATCH 0500/1483] storage: split ceph driver Change-Id: I97967198f4d4559c868a5b4a6276c02e64a36e61 --- gnocchi/storage/__init__.py | 6 +- gnocchi/storage/ceph.py | 218 +-------------------------- gnocchi/storage/common/ceph.py | 66 ++++++++ gnocchi/storage/incoming/__init__.py | 4 + gnocchi/storage/incoming/ceph.py | 196 ++++++++++++++++++++++++ setup.cfg | 1 + 6 files changed, 275 insertions(+), 216 deletions(-) create mode 100644 gnocchi/storage/common/ceph.py create mode 100644 gnocchi/storage/incoming/ceph.py diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 2851d959..6d8fa5f0 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -176,9 +176,9 @@ class StorageDriver(object): def stop(): pass - @staticmethod - def upgrade(index): - pass + def upgrade(self, index): + if self.incoming is not self: + self.incoming.upgrade(index) def process_background_tasks(self, index, metrics, sync=False): """Process background tasks for this storage. diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 67e3c55e..cc9f06a8 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -13,37 +13,18 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from collections import defaultdict -import contextlib -import datetime import errno -import functools -import itertools -import uuid from oslo_config import cfg from oslo_log import log -from oslo_utils import importutils from gnocchi import storage from gnocchi.storage import _carbonara -from gnocchi.storage.incoming import _carbonara as incoming_carbonara +from gnocchi.storage.common import ceph LOG = log.getLogger(__name__) -for RADOS_MODULE_NAME in ('cradox', 'rados'): - rados = importutils.try_import(RADOS_MODULE_NAME) - if rados is not None: - break -else: - RADOS_MODULE_NAME = None - -if rados is not None and hasattr(rados, 'run_in_thread'): - rados.run_in_thread = lambda target, args, timeout=None: target(*args) - LOG.info("rados.run_in_thread is monkeypatched.") - - OPTS = [ cfg.StrOpt('ceph_pool', default='gnocchi', @@ -57,86 +38,20 @@ OPTS = [ help='Ceph configuration file.'), ] +rados = ceph.rados -class CephStorage(_carbonara.CarbonaraBasedStorage, - incoming_carbonara.CarbonaraBasedStorage): +class CephStorage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = False def __init__(self, conf): super(CephStorage, self).__init__(conf) - self.pool = conf.ceph_pool - options = {} - if conf.ceph_keyring: - options['keyring'] = conf.ceph_keyring - if conf.ceph_secret: - options['key'] = conf.ceph_secret - - if not rados: - raise ImportError("No module named 'rados' nor 'cradox'") - - if not hasattr(rados, 'OmapIterator'): - raise ImportError("Your rados python module does not support " - "omap feature. Install 'cradox' (recommended) " - "or upgrade 'python-rados' >= 9.1.0 ") - - LOG.info("Ceph storage backend use '%s' python library", - RADOS_MODULE_NAME) - - # NOTE(sileht): librados handles reconnection itself, - # by default if a call timeout (30sec), it raises - # a rados.Timeout exception, and librados - # still continues to reconnect on the next call - self.rados = rados.Rados(conffile=conf.ceph_conffile, - rados_id=conf.ceph_username, - conf=options) - self.rados.connect() - self.ioctx = self.rados.open_ioctx(self.pool) - - # NOTE(sileht): constants can't be class attributes because - # they rely on presence of rados module - - # NOTE(sileht): We allow to read the measure object on - # outdated replicats, that safe for us, we will - # get the new stuffs on next metricd pass. - self.OMAP_READ_FLAGS = (rados.LIBRADOS_OPERATION_BALANCE_READS | - rados.LIBRADOS_OPERATION_SKIPRWLOCKS) - - # NOTE(sileht): That should be safe to manipulate the omap keys - # with any OSDs at the same times, each osd should replicate the - # new key to others and same thing for deletion. - # I wonder how ceph handle rm_omap and set_omap run at same time - # on the same key. I assume the operation are timestamped so that will - # be same. If not, they are still one acceptable race here, a rm_omap - # can finish before all replicats of set_omap are done, but we don't - # care, if that occurs next metricd run, will just remove it again, no - # object with the measure have already been delected by previous, so - # we are safe and good. - self.OMAP_WRITE_FLAGS = rados.LIBRADOS_OPERATION_SKIPRWLOCKS + self.rados, self.ioctx = ceph.create_rados_connection(conf) def stop(self): - self.ioctx.aio_flush() - self.ioctx.close() - self.rados.shutdown() + ceph.close_rados_connection(self.rados, self.ioctx) super(CephStorage, self).stop() - def upgrade(self, index): - super(CephStorage, self).upgrade(index) - - # Move names stored in xattrs to omap - try: - xattrs = tuple(k for k, v in - self.ioctx.get_xattrs(self.MEASURE_PREFIX)) - except rados.ObjectNotFound: - return - with rados.WriteOpCtx() as op: - self.ioctx.set_omap(op, xattrs, tuple([b""]*len(xattrs))) - self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, - flags=self.OMAP_WRITE_FLAGS) - - for xattr in xattrs: - self.ioctx.rm_xattr(self.MEASURE_PREFIX, xattr) - def _check_for_metric_upgrade(self, metric): lock = self._lock(metric.id) with lock: @@ -153,129 +68,6 @@ class CephStorage(_carbonara.CarbonaraBasedStorage, self.ioctx.rm_xattr(container, xattr) super(CephStorage, self)._check_for_metric_upgrade(metric) - def _store_new_measures(self, metric, data): - # NOTE(sileht): list all objects in a pool is too slow with - # many objects (2min for 20000 objects in 50osds cluster), - # and enforce us to iterrate over all objects - # So we create an object MEASURE_PREFIX, that have as - # omap the list of objects to process (not xattr because - # it doesn't allow to configure the locking behavior) - name = "_".join(( - self.MEASURE_PREFIX, - str(metric.id), - str(uuid.uuid4()), - datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S"))) - - self.ioctx.write_full(name, data) - - with rados.WriteOpCtx() as op: - self.ioctx.set_omap(op, (name,), (b"",)) - self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, - flags=self.OMAP_WRITE_FLAGS) - - def _build_report(self, details): - names = self._list_object_names_to_process() - metrics = set() - count = 0 - metric_details = defaultdict(int) - for name in names: - count += 1 - metric = name.split("_")[1] - metrics.add(metric) - if details: - metric_details[metric] += 1 - return len(metrics), count, metric_details if details else None - - def _list_object_names_to_process(self, prefix=""): - with rados.ReadOpCtx() as op: - omaps, ret = self.ioctx.get_omap_vals(op, "", prefix, -1) - try: - self.ioctx.operate_read_op( - op, self.MEASURE_PREFIX, flag=self.OMAP_READ_FLAGS) - except rados.ObjectNotFound: - # API have still written nothing - return () - # NOTE(sileht): after reading the libradospy, I'm - # not sure that ret will have the correct value - # get_omap_vals transforms the C int to python int - # before operate_read_op is called, I dunno if the int - # content is copied during this transformation or if - # this is a pointer to the C int, I think it's copied... - if ret == errno.ENOENT: - return () - return (k for k, v in omaps) - - def list_metric_with_measures_to_process(self, size, part, full=False): - names = self._list_object_names_to_process() - if full: - objs_it = names - else: - objs_it = itertools.islice(names, size * part, size * (part + 1)) - return set([name.split("_")[1] for name in objs_it]) - - def delete_unprocessed_measures_for_metric_id(self, metric_id): - object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) - object_names = self._list_object_names_to_process(object_prefix) - # Now clean objects and omap - with rados.WriteOpCtx() as op: - # NOTE(sileht): come on Ceph, no return code - # for this operation ?!! - self.ioctx.remove_omap_keys(op, tuple(object_names)) - self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, - flags=self.OMAP_WRITE_FLAGS) - - for n in object_names: - self.ioctx.aio_remove(n) - - @contextlib.contextmanager - def process_measure_for_metric(self, metric): - object_prefix = self.MEASURE_PREFIX + "_" + str(metric.id) - object_names = list(self._list_object_names_to_process(object_prefix)) - - measures = [] - ops = [] - bufsize = 8192 # Same sa rados_read one - - tmp_measures = {} - - def add_to_measures(name, comp, data): - if name in tmp_measures: - tmp_measures[name] += data - else: - tmp_measures[name] = data - if len(data) < bufsize: - measures.extend(self._unserialize_measures(name, - tmp_measures[name])) - del tmp_measures[name] - else: - ops.append(self.ioctx.aio_read( - name, bufsize, len(tmp_measures[name]), - functools.partial(add_to_measures, name) - )) - - for name in object_names: - ops.append(self.ioctx.aio_read( - name, bufsize, 0, - functools.partial(add_to_measures, name) - )) - - while ops: - op = ops.pop() - op.wait_for_complete_and_cb() - - yield measures - - # Now clean objects and omap - with rados.WriteOpCtx() as op: - # NOTE(sileht): come on Ceph, no return code - # for this operation ?!! - self.ioctx.remove_omap_keys(op, tuple(object_names)) - self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, - flags=self.OMAP_WRITE_FLAGS) - - for n in object_names: - self.ioctx.aio_remove(n) - @staticmethod def _get_object_name(metric, timestamp_key, aggregation, granularity, version=3): diff --git a/gnocchi/storage/common/ceph.py b/gnocchi/storage/common/ceph.py new file mode 100644 index 00000000..468bdb19 --- /dev/null +++ b/gnocchi/storage/common/ceph.py @@ -0,0 +1,66 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log +from oslo_utils import importutils + +LOG = log.getLogger(__name__) + + +for RADOS_MODULE_NAME in ('cradox', 'rados'): + rados = importutils.try_import(RADOS_MODULE_NAME) + if rados is not None: + break +else: + RADOS_MODULE_NAME = None + +if rados is not None and hasattr(rados, 'run_in_thread'): + rados.run_in_thread = lambda target, args, timeout=None: target(*args) + LOG.info("rados.run_in_thread is monkeypatched.") + + +def create_rados_connection(conf): + options = {} + if conf.ceph_keyring: + options['keyring'] = conf.ceph_keyring + if conf.ceph_secret: + options['key'] = conf.ceph_secret + + if not rados: + raise ImportError("No module named 'rados' nor 'cradox'") + + if not hasattr(rados, 'OmapIterator'): + raise ImportError("Your rados python module does not support " + "omap feature. Install 'cradox' (recommended) " + "or upgrade 'python-rados' >= 9.1.0 ") + + LOG.info("Ceph storage backend use '%s' python library", + RADOS_MODULE_NAME) + + # NOTE(sileht): librados handles reconnection itself, + # by default if a call timeout (30sec), it raises + # a rados.Timeout exception, and librados + # still continues to reconnect on the next call + conn = rados.Rados(conffile=conf.ceph_conffile, + rados_id=conf.ceph_username, + conf=options) + conn.connect() + ioctx = conn.open_ioctx(conf.ceph_pool) + return conn, ioctx + + +def close_rados_connection(conn, ioctx): + ioctx.aio_flush() + ioctx.close() + conn.shutdown() diff --git a/gnocchi/storage/incoming/__init__.py b/gnocchi/storage/incoming/__init__.py index 0dbcc98e..3cdd4a57 100644 --- a/gnocchi/storage/incoming/__init__.py +++ b/gnocchi/storage/incoming/__init__.py @@ -25,6 +25,10 @@ class StorageDriver(object): def __init__(conf): pass + @staticmethod + def upgrade(indexer): + pass + @staticmethod def add_measures(metric, measures): """Add a measure to a metric. diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py new file mode 100644 index 00000000..1ea98780 --- /dev/null +++ b/gnocchi/storage/incoming/ceph.py @@ -0,0 +1,196 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from collections import defaultdict +import contextlib +import datetime +import errno +import functools +import itertools +import uuid + + +from gnocchi.storage.common import ceph +from gnocchi.storage.incoming import _carbonara + +rados = ceph.rados + + +class CephStorage(_carbonara.CarbonaraBasedStorage): + def __init__(self, conf): + super(CephStorage, self).__init__(conf) + self.rados, self.ioctx = ceph.create_rados_connection(conf) + # NOTE(sileht): constants can't be class attributes because + # they rely on presence of rados module + + # NOTE(sileht): We allow to read the measure object on + # outdated replicats, that safe for us, we will + # get the new stuffs on next metricd pass. + self.OMAP_READ_FLAGS = (rados.LIBRADOS_OPERATION_BALANCE_READS | + rados.LIBRADOS_OPERATION_SKIPRWLOCKS) + + # NOTE(sileht): That should be safe to manipulate the omap keys + # with any OSDs at the same times, each osd should replicate the + # new key to others and same thing for deletion. + # I wonder how ceph handle rm_omap and set_omap run at same time + # on the same key. I assume the operation are timestamped so that will + # be same. If not, they are still one acceptable race here, a rm_omap + # can finish before all replicats of set_omap are done, but we don't + # care, if that occurs next metricd run, will just remove it again, no + # object with the measure have already been delected by previous, so + # we are safe and good. + self.OMAP_WRITE_FLAGS = rados.LIBRADOS_OPERATION_SKIPRWLOCKS + + def stop(self): + ceph.close_rados_connection(self.rados, self.ioctx) + super(CephStorage, self).stop() + + def upgrade(self, index): + super(CephStorage, self).upgrade(index) + + # Move names stored in xattrs to omap + try: + xattrs = tuple(k for k, v in + self.ioctx.get_xattrs(self.MEASURE_PREFIX)) + except rados.ObjectNotFound: + return + with rados.WriteOpCtx() as op: + self.ioctx.set_omap(op, xattrs, tuple([b""]*len(xattrs))) + self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, + flags=self.OMAP_WRITE_FLAGS) + + for xattr in xattrs: + self.ioctx.rm_xattr(self.MEASURE_PREFIX, xattr) + + def _store_new_measures(self, metric, data): + # NOTE(sileht): list all objects in a pool is too slow with + # many objects (2min for 20000 objects in 50osds cluster), + # and enforce us to iterrate over all objects + # So we create an object MEASURE_PREFIX, that have as + # omap the list of objects to process (not xattr because + # it doesn't allow to configure the locking behavior) + name = "_".join(( + self.MEASURE_PREFIX, + str(metric.id), + str(uuid.uuid4()), + datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S"))) + + self.ioctx.write_full(name, data) + + with rados.WriteOpCtx() as op: + self.ioctx.set_omap(op, (name,), (b"",)) + self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, + flags=self.OMAP_WRITE_FLAGS) + + def _build_report(self, details): + names = self._list_object_names_to_process() + metrics = set() + count = 0 + metric_details = defaultdict(int) + for name in names: + count += 1 + metric = name.split("_")[1] + metrics.add(metric) + if details: + metric_details[metric] += 1 + return len(metrics), count, metric_details if details else None + + def _list_object_names_to_process(self, prefix=""): + with rados.ReadOpCtx() as op: + omaps, ret = self.ioctx.get_omap_vals(op, "", prefix, -1) + try: + self.ioctx.operate_read_op( + op, self.MEASURE_PREFIX, flag=self.OMAP_READ_FLAGS) + except rados.ObjectNotFound: + # API have still written nothing + return () + # NOTE(sileht): after reading the libradospy, I'm + # not sure that ret will have the correct value + # get_omap_vals transforms the C int to python int + # before operate_read_op is called, I dunno if the int + # content is copied during this transformation or if + # this is a pointer to the C int, I think it's copied... + if ret == errno.ENOENT: + return () + return (k for k, v in omaps) + + def list_metric_with_measures_to_process(self, size, part, full=False): + names = self._list_object_names_to_process() + if full: + objs_it = names + else: + objs_it = itertools.islice(names, size * part, size * (part + 1)) + return set([name.split("_")[1] for name in objs_it]) + + def delete_unprocessed_measures_for_metric_id(self, metric_id): + object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) + object_names = self._list_object_names_to_process(object_prefix) + # Now clean objects and omap + with rados.WriteOpCtx() as op: + # NOTE(sileht): come on Ceph, no return code + # for this operation ?!! + self.ioctx.remove_omap_keys(op, tuple(object_names)) + self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, + flags=self.OMAP_WRITE_FLAGS) + + for n in object_names: + self.ioctx.aio_remove(n) + + @contextlib.contextmanager + def process_measure_for_metric(self, metric): + object_prefix = self.MEASURE_PREFIX + "_" + str(metric.id) + object_names = list(self._list_object_names_to_process(object_prefix)) + + measures = [] + ops = [] + bufsize = 8192 # Same sa rados_read one + + tmp_measures = {} + + def add_to_measures(name, comp, data): + if name in tmp_measures: + tmp_measures[name] += data + else: + tmp_measures[name] = data + if len(data) < bufsize: + measures.extend(self._unserialize_measures(name, + tmp_measures[name])) + del tmp_measures[name] + else: + ops.append(self.ioctx.aio_read( + name, bufsize, len(tmp_measures[name]), + functools.partial(add_to_measures, name) + )) + + for name in object_names: + ops.append(self.ioctx.aio_read( + name, bufsize, 0, + functools.partial(add_to_measures, name) + )) + + while ops: + op = ops.pop() + op.wait_for_complete_and_cb() + + yield measures + + # Now clean objects and omap + with rados.WriteOpCtx() as op: + # NOTE(sileht): come on Ceph, no return code + # for this operation ?!! + self.ioctx.remove_omap_keys(op, tuple(object_names)) + self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, + flags=self.OMAP_WRITE_FLAGS) + + for n in object_names: + self.ioctx.aio_remove(n) diff --git a/setup.cfg b/setup.cfg index eb03ae47..0e9e76d2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -108,6 +108,7 @@ gnocchi.storage = s3 = gnocchi.storage.s3:S3Storage gnocchi.storage.incoming = + ceph = gnocchi.storage.incoming.ceph:CephStorage file = gnocchi.storage.incoming.file:FileStorage gnocchi.indexer = -- GitLab From bbe2d378f9c14e2a3811247ed873560360d0f521 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 6 Dec 2016 22:30:13 +0000 Subject: [PATCH 0501/1483] use datetime when defining series range the rest api passes in datetime when retreiving measures. our tests should consistently use datetime even though the carbonara driver will handle epoch as well Change-Id: I79997de48ee6c128210731d57115020710afd837 --- gnocchi/tests/test_storage.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 961a6c95..9815e33c 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -569,7 +569,7 @@ class TestStorageDriver(tests_base.TestCase): values = self.storage.get_cross_metric_measures( [self.metric, metric2], - from_timestamp=utils.to_timestamp('2014-01-01 12:10:00')) + from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 0)) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 22.25), @@ -578,7 +578,7 @@ class TestStorageDriver(tests_base.TestCase): values = self.storage.get_cross_metric_measures( [self.metric, metric2], - to_timestamp=utils.to_timestamp('2014-01-01 12:05:00')) + to_timestamp=datetime.datetime(2014, 1, 1, 12, 5, 0)) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 22.25), @@ -588,8 +588,8 @@ class TestStorageDriver(tests_base.TestCase): values = self.storage.get_cross_metric_measures( [self.metric, metric2], - to_timestamp=utils.to_timestamp('2014-01-01 12:10:10'), - from_timestamp=utils.to_timestamp('2014-01-01 12:10:10')) + from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10), + to_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10)) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 22.25), @@ -598,8 +598,8 @@ class TestStorageDriver(tests_base.TestCase): values = self.storage.get_cross_metric_measures( [self.metric, metric2], - from_timestamp=utils.to_timestamp('2014-01-01 12:00:00'), - to_timestamp=utils.to_timestamp('2014-01-01 12:00:01')) + from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), + to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 1)) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), @@ -609,8 +609,8 @@ class TestStorageDriver(tests_base.TestCase): values = self.storage.get_cross_metric_measures( [self.metric, metric2], - from_timestamp=utils.to_timestamp('2014-01-01 12:00:00'), - to_timestamp=utils.to_timestamp('2014-01-01 12:00:01'), + from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), + to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 1), granularity=300.0) self.assertEqual([ -- GitLab From 399f0d3bf1af0a475deb9e55f6cd443e28b7b6db Mon Sep 17 00:00:00 2001 From: Xiang Li Date: Fri, 23 Sep 2016 07:20:31 -0400 Subject: [PATCH 0502/1483] Modify api startup parameters in devstack plugin Because the api binary script is generated by pbr now, the api startup parameters need to be adapted accordingly in devstack script or the api will be unable to start if the devstack deploy mode is 'simple'. Change-Id: I72abc3a83a4a2cf993198b0b1ac98c79ee26ec56 Closes-Bug: #1626979 (cherry picked from commit b8c0c23dd5b47624c580c3cb5b72307fdc23fdff) --- devstack/plugin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 63dd0500..336fe719 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -426,7 +426,7 @@ function start_gnocchi { elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then run_process gnocchi-api "$GNOCCHI_BIN_DIR/uwsgi $GNOCCHI_UWSGI_FILE" else - run_process gnocchi-api "$GNOCCHI_BIN_DIR/gnocchi-api -d -v --config-file $GNOCCHI_CONF" + run_process gnocchi-api "$GNOCCHI_BIN_DIR/gnocchi-api --port $GNOCCHI_SERVICE_PORT" fi # only die on API if it was actually intended to be turned on if is_service_enabled gnocchi-api; then -- GitLab From 2a787565d557792e613be38ae90cc83541fbb904 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 5 Dec 2016 11:57:28 +0100 Subject: [PATCH 0503/1483] fix oslo.db 4.15.0 breakage oslo.db 4.15.0 breaks our gate because it assumes we use the oslo.db provision module. This have been fixed by Ie8c454528ce3aa816c04fbb4beb69f4b5ec57e9c Also oslo.db now cleans the database resources. We was mocking self.db to avoid that before because of the provision module dependencies. But since this module is no more required, we can use the oslo.db facility. This change does that but keep compatibility with oslo.db < 4.15.0 Change-Id: I69f8ff9f702064e8fc5bf4018ebc6f3b2a8ea1a8 (cherry picked from commit f5794af695b9a105f500ef56751a13c8a1dbe6cf) --- gnocchi/tests/indexer/sqlalchemy/test_migrations.py | 11 ++++++++--- setup.cfg | 5 +++-- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py index 62445b0b..da44cb29 100644 --- a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py +++ b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py @@ -16,6 +16,7 @@ import abc import fixtures import mock +import oslo_db.exception from oslo_db.sqlalchemy import test_migrations import six import sqlalchemy as sa @@ -50,10 +51,14 @@ class ModelsMigrationsSync( self.index = indexer.get_driver(self.conf) self.index.connect() self.index.upgrade(nocreate=True, create_legacy_resource_types=True) + self.addCleanup(self._drop_database) - def tearDown(self): - sqlalchemy_utils.drop_database(self.conf.indexer.url) - super(ModelsMigrationsSync, self).tearDown() + def _drop_database(self): + try: + sqlalchemy_utils.drop_database(self.conf.indexer.url) + except oslo_db.exception.DBNonExistentDatabase: + # NOTE(sileht): oslo db >= 4.15.0 cleanup this for us + pass @staticmethod def get_metadata(): diff --git a/setup.cfg b/setup.cfg index 5a5119ab..cd061727 100644 --- a/setup.cfg +++ b/setup.cfg @@ -25,13 +25,13 @@ keystone = keystonemiddleware>=4.0.0 mysql = pymysql - oslo.db>=4.8.0,!=4.13.1,!=4.13.2 + oslo.db>=4.8.0,!=4.13.1,!=4.13.2,!=4.15.0 sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1 postgresql = psycopg2 - oslo.db>=4.8.0,!=4.13.1,!=4.13.2 + oslo.db>=4.8.0,!=4.13.1,!=4.13.2,!=4.15.0 sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1 @@ -70,6 +70,7 @@ test = os-testr testrepository testscenarios + testresources>=0.2.4 # Apache-2.0/BSD testtools>=0.9.38 WebTest>=2.0.16 doc8 -- GitLab From 8ce4215763c798e19cf9b85211b6eea08abfd15d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 24 Nov 2016 15:06:09 +0100 Subject: [PATCH 0504/1483] config: only include oslo.middleware options that are shipped Adding options from middleware not enabled by default is not a good idea. Change-Id: I3ed922c84e6d37f2424262d108e6bdc5a03e2858 (cherry picked from commit 14a7cbaa645c14dd04f859d56b937371518831f1) --- etc/gnocchi/gnocchi-config-generator.conf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/etc/gnocchi/gnocchi-config-generator.conf b/etc/gnocchi/gnocchi-config-generator.conf index fa6ae57b..4a9d23b8 100644 --- a/etc/gnocchi/gnocchi-config-generator.conf +++ b/etc/gnocchi/gnocchi-config-generator.conf @@ -4,6 +4,7 @@ wrap_width = 79 namespace = gnocchi namespace = oslo.db namespace = oslo.log -namespace = oslo.middleware +namespace = oslo.middleware.cors +namespace = oslo.middleware.http_proxy_to_wsgi namespace = oslo.policy namespace = keystonemiddleware.auth_token -- GitLab From 7cff2f7f63c5ac233ad490275248bb49ffa5362b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 28 Nov 2016 14:18:29 +0100 Subject: [PATCH 0505/1483] Enable oslo_middleware healthcheck middleware by default This allows to get the health of the API endpoint easily. Change-Id: I2d11dfe914be1453d380c1302180448b295df43d --- etc/gnocchi/api-paste.ini | 6 ++++++ etc/gnocchi/gnocchi-config-generator.conf | 1 + gnocchi/tests/gabbi/gabbits/healthcheck.yaml | 7 +++++++ .../notes/healthcheck-middleware-81c2f0d02ebdb5cc.yaml | 5 +++++ requirements.txt | 2 +- 5 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 gnocchi/tests/gabbi/gabbits/healthcheck.yaml create mode 100644 releasenotes/notes/healthcheck-middleware-81c2f0d02ebdb5cc.yaml diff --git a/etc/gnocchi/api-paste.ini b/etc/gnocchi/api-paste.ini index ad56b17e..8dc07c0f 100644 --- a/etc/gnocchi/api-paste.ini +++ b/etc/gnocchi/api-paste.ini @@ -6,11 +6,13 @@ pipeline = gnocchi+noauth use = egg:Paste#urlmap / = gnocchiversions_pipeline /v1 = gnocchiv1+noauth +/healthcheck = healthcheck [composite:gnocchi+auth] use = egg:Paste#urlmap / = gnocchiversions_pipeline /v1 = gnocchiv1+auth +/healthcheck = healthcheck [pipeline:gnocchiv1+noauth] pipeline = http_proxy_to_wsgi gnocchiv1 @@ -36,3 +38,7 @@ oslo_config_project = gnocchi [filter:http_proxy_to_wsgi] paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory oslo_config_project = gnocchi + +[app:healthcheck] +use = egg:oslo.middleware#healthcheck +oslo_config_project = gnocchi diff --git a/etc/gnocchi/gnocchi-config-generator.conf b/etc/gnocchi/gnocchi-config-generator.conf index ffbe5311..741f015f 100644 --- a/etc/gnocchi/gnocchi-config-generator.conf +++ b/etc/gnocchi/gnocchi-config-generator.conf @@ -5,6 +5,7 @@ namespace = gnocchi namespace = oslo.db namespace = oslo.log namespace = oslo.middleware.cors +namespace = oslo.middleware.healthcheck namespace = oslo.middleware.http_proxy_to_wsgi namespace = oslo.policy namespace = cotyledon diff --git a/gnocchi/tests/gabbi/gabbits/healthcheck.yaml b/gnocchi/tests/gabbi/gabbits/healthcheck.yaml new file mode 100644 index 00000000..a2cf6fd1 --- /dev/null +++ b/gnocchi/tests/gabbi/gabbits/healthcheck.yaml @@ -0,0 +1,7 @@ +fixtures: + - ConfigFixture + +tests: + - name: healthcheck + GET: /healthcheck + status: 200 diff --git a/releasenotes/notes/healthcheck-middleware-81c2f0d02ebdb5cc.yaml b/releasenotes/notes/healthcheck-middleware-81c2f0d02ebdb5cc.yaml new file mode 100644 index 00000000..5e28af9c --- /dev/null +++ b/releasenotes/notes/healthcheck-middleware-81c2f0d02ebdb5cc.yaml @@ -0,0 +1,5 @@ +--- +features: + - A healthcheck endpoint is provided by default at /healthcheck. It leverages + oslo_middleware healthcheck middleware. It allows to retrieve information + about the health of the API service. diff --git a/requirements.txt b/requirements.txt index 79626e71..a341dcf4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ oslo.log>=2.3.0 oslo.policy>=0.3.0 oslo.serialization>=1.4.0 oslo.utils>=3.18.0 -oslo.middleware>=3.11.0 +oslo.middleware>=3.22.0 pandas>=0.17.0 pecan>=0.9 futures -- GitLab From 33851864ad1b487de13159f8a51fdb4253d200f4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 9 Dec 2016 11:37:54 +0100 Subject: [PATCH 0506/1483] doc: add a page talking about collectd support Change-Id: I3ae3c169b1575bed6960339a9776ac6cfe80b448 --- doc/source/collectd.rst | 14 ++++++++++++++ doc/source/index.rst | 3 ++- 2 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 doc/source/collectd.rst diff --git a/doc/source/collectd.rst b/doc/source/collectd.rst new file mode 100644 index 00000000..835df4ba --- /dev/null +++ b/doc/source/collectd.rst @@ -0,0 +1,14 @@ +================== + Collectd support +================== + +`Collectd`_ can use Gnocchi to store its data through a plugin called +`collectd-gnocchi`. It can be installed with _pip_:: + + pip install collectd-gnocchi + +`Sources and documentation`_ are also available. + + +.. _`Collectd`: https://www.collectd.org/ +.. _`Sources and documentation`: https://github.com/jd/collectd-gnocchi diff --git a/doc/source/index.rst b/doc/source/index.rst index 36757a1f..4d72f9c0 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -52,7 +52,7 @@ Key Features - Grafana support - Nagios/Icinga support - Statsd protocol support - +- Collectd plugin support Documentation ------------- @@ -69,6 +69,7 @@ Documentation statsd grafana nagios + collectd glossary releasenotes/index.rst -- GitLab From 05c2656e034468cb457bed2d20ca3889c9d495e3 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 24 Nov 2016 17:22:37 +0100 Subject: [PATCH 0507/1483] storage: split swift driver Change-Id: I3e791f92a9a3ef19927abb577403521f94418cf8 --- gnocchi/storage/common/swift.py | 63 ++++++++++++++++ gnocchi/storage/incoming/swift.py | 98 +++++++++++++++++++++++++ gnocchi/storage/swift.py | 115 ++---------------------------- setup.cfg | 1 + 4 files changed, 169 insertions(+), 108 deletions(-) create mode 100644 gnocchi/storage/common/swift.py create mode 100644 gnocchi/storage/incoming/swift.py diff --git a/gnocchi/storage/common/swift.py b/gnocchi/storage/common/swift.py new file mode 100644 index 00000000..95003fdd --- /dev/null +++ b/gnocchi/storage/common/swift.py @@ -0,0 +1,63 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo_log import log +from six.moves.urllib.parse import quote + +try: + from swiftclient import client as swclient + from swiftclient import utils as swift_utils +except ImportError: + swclient = None + swift_utils = None + +from gnocchi import storage + +LOG = log.getLogger(__name__) + + +def get_connection(conf): + if swclient is None: + raise RuntimeError("python-swiftclient unavailable") + return swclient.Connection( + auth_version=conf.swift_auth_version, + authurl=conf.swift_authurl, + preauthtoken=conf.swift_preauthtoken, + user=conf.swift_user, + key=conf.swift_key, + tenant_name=conf.swift_project_name, + timeout=conf.swift_timeout, + os_options={'endpoint_type': conf.swift_endpoint_type, + 'user_domain_name': conf.swift_user_domain_name}, + retries=0) + + +POST_HEADERS = {'Accept': 'application/json', 'Content-Type': 'text/plain'} + + +def bulk_delete(conn, container, objects): + objects = [quote(('/%s/%s' % (container, obj['name'])).encode('utf-8')) + for obj in objects] + resp = {} + headers, body = conn.post_account( + headers=POST_HEADERS, query_string='bulk-delete', + data=b''.join(obj.encode('utf-8') + b'\n' for obj in objects), + response_dict=resp) + if resp['status'] != 200: + raise storage.StorageError( + "Unable to bulk-delete, is bulk-delete enabled in Swift?") + resp = swift_utils.parse_api_response(headers, body) + LOG.debug('# of objects deleted: %s, # of objects skipped: %s', + resp['Number Deleted'], resp['Number Not Found']) diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py new file mode 100644 index 00000000..7b996c0c --- /dev/null +++ b/gnocchi/storage/incoming/swift.py @@ -0,0 +1,98 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from collections import defaultdict +import contextlib +import datetime +import uuid + +from oslo_log import log +import six + +from gnocchi.storage.common import swift +from gnocchi.storage.incoming import _carbonara + +swclient = swift.swclient +swift_utils = swift.swift_utils + +LOG = log.getLogger(__name__) + + +class SwiftStorage(_carbonara.CarbonaraBasedStorage): + def __init__(self, conf): + super(SwiftStorage, self).__init__(conf) + self.swift = swift.get_connection(conf) + self.swift.put_container(self.MEASURE_PREFIX) + + def _store_new_measures(self, metric, data): + now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") + self.swift.put_object( + self.MEASURE_PREFIX, + six.text_type(metric.id) + "/" + six.text_type(uuid.uuid4()) + now, + data) + + def _build_report(self, details): + metric_details = defaultdict(int) + if details: + headers, files = self.swift.get_container(self.MEASURE_PREFIX, + full_listing=True) + metrics = set() + for f in files: + metric, metric_files = f['name'].split("/", 1) + metric_details[metric] += 1 + metrics.add(metric) + nb_metrics = len(metrics) + else: + headers, files = self.swift.get_container(self.MEASURE_PREFIX, + delimiter='/', + full_listing=True) + nb_metrics = len(files) + measures = int(headers.get('x-container-object-count')) + return nb_metrics, measures, metric_details if details else None + + def list_metric_with_measures_to_process(self, size, part, full=False): + limit = None + if not full: + limit = size * (part + 1) + headers, files = self.swift.get_container(self.MEASURE_PREFIX, + delimiter='/', + full_listing=full, + limit=limit) + if not full: + files = files[size * part:] + return set(f['subdir'][:-1] for f in files if 'subdir' in f) + + def _list_measure_files_for_metric_id(self, metric_id): + headers, files = self.swift.get_container( + self.MEASURE_PREFIX, path=six.text_type(metric_id), + full_listing=True) + return files + + def delete_unprocessed_measures_for_metric_id(self, metric_id): + files = self._list_measure_files_for_metric_id(metric_id) + swift.bulk_delete(self.swift, self.MEASURE_PREFIX, files) + + @contextlib.contextmanager + def process_measure_for_metric(self, metric): + files = self._list_measure_files_for_metric_id(metric.id) + + measures = [] + for f in files: + headers, data = self.swift.get_object( + self.MEASURE_PREFIX, f['name']) + measures.extend(self._unserialize_measures(f['name'], data)) + + yield measures + + # Now clean objects + swift.bulk_delete(self.swift, self.MEASURE_PREFIX, files) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index fca66b91..6df94ac7 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -13,24 +13,16 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from collections import defaultdict -import contextlib -import datetime -import uuid from oslo_config import cfg from oslo_log import log -import six -from six.moves.urllib.parse import quote -try: - from swiftclient import client as swclient - from swiftclient import utils as swift_utils -except ImportError: - swclient = None from gnocchi import storage from gnocchi.storage import _carbonara -from gnocchi.storage.incoming import _carbonara as incoming_carbonara +from gnocchi.storage.common import swift + +swclient = swift.swclient +swift_utils = swift.swift_utils LOG = log.getLogger(__name__) @@ -75,29 +67,14 @@ OPTS = [ ] -class SwiftStorage(_carbonara.CarbonaraBasedStorage, - incoming_carbonara.CarbonaraBasedStorage): +class SwiftStorage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = True - POST_HEADERS = {'Accept': 'application/json', 'Content-Type': 'text/plain'} def __init__(self, conf): super(SwiftStorage, self).__init__(conf) - if swclient is None: - raise RuntimeError("python-swiftclient unavailable") - self.swift = swclient.Connection( - auth_version=conf.swift_auth_version, - authurl=conf.swift_authurl, - preauthtoken=conf.swift_preauthtoken, - user=conf.swift_user, - key=conf.swift_key, - tenant_name=conf.swift_project_name, - timeout=conf.swift_timeout, - os_options={'endpoint_type': conf.swift_endpoint_type, - 'user_domain_name': conf.swift_user_domain_name}, - retries=0) + self.swift = swift.get_connection(conf) self._container_prefix = conf.swift_container_prefix - self.swift.put_container(self.MEASURE_PREFIX) def _container_name(self, metric): return '%s.%s' % (self._container_prefix, str(metric.id)) @@ -117,84 +94,6 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage, if resp['status'] == 204: raise storage.MetricAlreadyExists(metric) - def _store_new_measures(self, metric, data): - now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") - self.swift.put_object( - self.MEASURE_PREFIX, - six.text_type(metric.id) + "/" + six.text_type(uuid.uuid4()) + now, - data) - - def _build_report(self, details): - metric_details = defaultdict(int) - if details: - headers, files = self.swift.get_container(self.MEASURE_PREFIX, - full_listing=True) - metrics = set() - for f in files: - metric, metric_files = f['name'].split("/", 1) - metric_details[metric] += 1 - metrics.add(metric) - nb_metrics = len(metrics) - else: - headers, files = self.swift.get_container(self.MEASURE_PREFIX, - delimiter='/', - full_listing=True) - nb_metrics = len(files) - measures = int(headers.get('x-container-object-count')) - return nb_metrics, measures, metric_details if details else None - - def list_metric_with_measures_to_process(self, size, part, full=False): - limit = None - if not full: - limit = size * (part + 1) - headers, files = self.swift.get_container(self.MEASURE_PREFIX, - delimiter='/', - full_listing=full, - limit=limit) - if not full: - files = files[size * part:] - return set(f['subdir'][:-1] for f in files if 'subdir' in f) - - def _list_measure_files_for_metric_id(self, metric_id): - headers, files = self.swift.get_container( - self.MEASURE_PREFIX, path=six.text_type(metric_id), - full_listing=True) - return files - - def _bulk_delete(self, container, objects): - objects = [quote(('/%s/%s' % (container, obj['name'])).encode('utf-8')) - for obj in objects] - resp = {} - headers, body = self.swift.post_account( - headers=self.POST_HEADERS, query_string='bulk-delete', - data=b''.join(obj.encode('utf-8') + b'\n' for obj in objects), - response_dict=resp) - if resp['status'] != 200: - raise storage.StorageError( - "Unable to bulk-delete, is bulk-delete enabled in Swift?") - resp = swift_utils.parse_api_response(headers, body) - LOG.debug('# of objects deleted: %s, # of objects skipped: %s', - resp['Number Deleted'], resp['Number Not Found']) - - def delete_unprocessed_measures_for_metric_id(self, metric_id): - files = self._list_measure_files_for_metric_id(metric_id) - self._bulk_delete(self.MEASURE_PREFIX, files) - - @contextlib.contextmanager - def process_measure_for_metric(self, metric): - files = self._list_measure_files_for_metric_id(metric.id) - - measures = [] - for f in files: - headers, data = self.swift.get_object( - self.MEASURE_PREFIX, f['name']) - measures.extend(self._unserialize_measures(f['name'], data)) - - yield measures - - # Now clean objects - self._bulk_delete(self.MEASURE_PREFIX, files) - def _store_metric_measures(self, metric, timestamp_key, aggregation, granularity, data, offset=None, version=3): self.swift.put_object( @@ -221,7 +120,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage, # Maybe it never has been created (no measure) raise else: - self._bulk_delete(container, files) + swift.bulk_delete(self.swift, container, files) try: self.swift.delete_container(container) except swclient.ClientException as e: diff --git a/setup.cfg b/setup.cfg index 5f6ab156..150a0276 100644 --- a/setup.cfg +++ b/setup.cfg @@ -111,6 +111,7 @@ gnocchi.storage = gnocchi.storage.incoming = ceph = gnocchi.storage.incoming.ceph:CephStorage file = gnocchi.storage.incoming.file:FileStorage + swift = gnocchi.storage.incoming.swift:SwiftStorage gnocchi.indexer = mysql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer -- GitLab From fb312488dd45b12d78efc2894546129c40e184ea Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 13 Dec 2016 15:57:55 +0100 Subject: [PATCH 0508/1483] Enable H904 hacking check This makes sure we interpolate log only if needed. Change-Id: Ib2c9553f2acc59c8bd47561803aebe1b770bcbe8 --- gnocchi/carbonara.py | 2 +- gnocchi/storage/_carbonara.py | 26 +++++++++++++------------- tox.ini | 1 + 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 58718088..7684e882 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -782,7 +782,7 @@ class AggregatedTimeSerie(TimeSerie): "timeseries. " "right_boundary_ts=%(right_boundary_ts)s, " "left_boundary_ts=%(left_boundary_ts)s, " - "groups=%(groups)s" % { + "groups=%(groups)s", { 'right_boundary_ts': right_boundary_ts, 'left_boundary_ts': left_boundary_ts, 'groups': list(grouped) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 23362c67..5b26a6f1 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -102,8 +102,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): ) LOG.debug( "Retrieve unaggregated measures " - "for %s in %.2fs" - % (metric.id, sw.elapsed())) + "for %s in %.2fs", + metric.id, sw.elapsed()) try: return carbonara.BoundTimeSerie.unserialize( raw_measures, block_size, back_window) @@ -365,8 +365,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): metric.archive_policy.aggregation_methods, metric.archive_policy.definition): LOG.debug( - "Checking if the metric %s needs migration for %s" - % (metric, agg_method)) + "Checking if the metric %s needs migration for %s", + metric, agg_method) try: all_keys = self._list_split_keys_for_metric( @@ -427,7 +427,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): metric_id) except coordination.LockAcquireFailed: LOG.debug("Cannot acquire lock for metric %s, postponing " - "unprocessed measures deletion" % metric_id) + "unprocessed measures deletion", metric_id) for metric in metrics: lock = self._lock(metric.id) @@ -438,15 +438,15 @@ class CarbonaraBasedStorage(storage.StorageDriver): continue try: locksw = timeutils.StopWatch().start() - LOG.debug("Processing measures for %s" % metric) + LOG.debug("Processing measures for %s", metric) with self.incoming.process_measure_for_metric(metric) \ as measures: self._compute_and_store_timeseries(metric, measures) - LOG.debug("Metric %s locked during %.2f seconds" % - (metric.id, locksw.elapsed())) + LOG.debug("Metric %s locked during %.2f seconds", + metric.id, locksw.elapsed()) except Exception: - LOG.debug("Metric %s locked during %.2f seconds" % - (metric.id, locksw.elapsed())) + LOG.debug("Metric %s locked during %.2f seconds", + metric.id, locksw.elapsed()) if sync: raise LOG.error("Error processing new measures", exc_info=True) @@ -457,7 +457,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): # NOTE(mnaser): The metric could have been handled by # another worker, ignore if no measures. if len(measures) == 0: - LOG.debug("Skipping %s (already processed)" % metric) + LOG.debug("Skipping %s (already processed)", metric) return measures = sorted(measures, key=operator.itemgetter(0)) @@ -530,8 +530,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): ((number_of_operations * len(measures)) / elapsed) ) LOG.debug("Computed new metric %s with %d new measures " - "in %.2f seconds%s" - % (metric.id, len(measures), elapsed, perf)) + "in %.2f seconds%s", + metric.id, len(measures), elapsed, perf) self._store_unaggregated_timeserie(metric, ts.serialize()) diff --git a/tox.ini b/tox.ini index f4447303..edfcadae 100644 --- a/tox.ini +++ b/tox.ini @@ -120,6 +120,7 @@ commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- {posargs} [flake8] exclude = .tox,.eggs,doc show-source = true +enable-extensions = H904 [testenv:genconfig] deps = .[mysql,postgresql,test,file,ceph,swift,s3] -- GitLab From 14c58fc17ce90cc16de5c11d80ac352677abcb25 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Dec 2016 18:13:13 +0100 Subject: [PATCH 0509/1483] carbonara: log a message and do not fail if a file is missing When rewriting data for compression, it's possible a file ___ is missing, because of some data corruption. In that case the following traceback happens: Traceback (most recent call last): File "/usr/lib/python2.7/site-packages/gnocchi/storage/_carbonara.py", line 557, in process_new_measures ignore_too_old_timestamps=True) File "/usr/lib/python2.7/site-packages/gnocchi/carbonara.py", line 217, in set_values before_truncate_callback(self) File "/usr/lib/python2.7/site-packages/gnocchi/storage/_carbonara.py", line 551, in _map_add_measures for aggregation in agg_methods)) File "/usr/lib/python2.7/site-packages/gnocchi/storage/_carbonara.py", line 675, in _map_no_thread return list(itertools.starmap(method, list_of_args)) File "/usr/lib/python2.7/site-packages/gnocchi/storage/_carbonara.py", line 316, in _add_measures oldest_mutable_timestamp) File "/usr/lib/python2.7/site-packages/gnocchi/storage/_carbonara.py", line 249, in _store_timeserie_split offset, data = split.serialize(key, compressed=write_full) AttributeError: 'NoneType' object has no attribute 'serialize' This patch makes the driver log a warning and return, so it just ignores the failure and continues anyway. Change-Id: I4f367b2418c8be0067746c88bcce74ca756acf4e --- gnocchi/carbonara.py | 27 +++++-- gnocchi/storage/_carbonara.py | 16 ++++- gnocchi/tests/test_storage.py | 130 +++++++++++++++++++++++++++++++++- 3 files changed, 164 insertions(+), 9 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 58718088..1f9d37fa 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -74,6 +74,12 @@ class UnknownAggregationMethod(Exception): "Unknown aggregation method `%s'" % agg) +class InvalidData(ValueError): + """Error raised when data are corrupted.""" + def __init__(self): + super(InvalidData, self).__init__("Unable to unpack, invalid data") + + def round_timestamp(ts, freq): return pandas.Timestamp( (pandas.Timestamp(ts).value // freq) * freq) @@ -225,8 +231,11 @@ class BoundTimeSerie(TimeSerie): nb_points = ( len(uncompressed) // cls._SERIALIZATION_TIMESTAMP_VALUE_LEN ) - deserial = struct.unpack("<" + "Q" * nb_points + "d" * nb_points, - uncompressed) + try: + deserial = struct.unpack("<" + "Q" * nb_points + "d" * nb_points, + uncompressed) + except struct.error: + raise InvalidData start = deserial[0] timestamps = [start] for delta in itertools.islice(deserial, 1, nb_points): @@ -505,9 +514,12 @@ class AggregatedTimeSerie(TimeSerie): # Compressed format uncompressed = lz4.loads(memoryview(data)[1:].tobytes()) nb_points = len(uncompressed) // cls.COMPRESSED_SERIAL_LEN - deserial = struct.unpack( - '<' + 'H' * nb_points + 'd' * nb_points, - uncompressed) + try: + deserial = struct.unpack( + '<' + 'H' * nb_points + 'd' * nb_points, + uncompressed) + except struct.error: + raise InvalidData for delta in itertools.islice(deserial, nb_points): ts = start + (delta * sampling) y.append(ts) @@ -518,7 +530,10 @@ class AggregatedTimeSerie(TimeSerie): nb_points = len(data) // cls.PADDED_SERIAL_LEN # NOTE(gordc): use '<' for standardized # little-endian byte order - deserial = struct.unpack('<' + '?d' * nb_points, data) + try: + deserial = struct.unpack('<' + '?d' * nb_points, data) + except struct.error: + raise InvalidData() # alternating split into 2 list and drop items with False flag for i, val in itertools.compress( six.moves.zip(six.moves.range(nb_points), diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 23362c67..5db37c9e 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -168,7 +168,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): try: return carbonara.AggregatedTimeSerie.unserialize( data, key, aggregation, granularity) - except ValueError: + except carbonara.InvalidData: LOG.error("Data corruption detected for %s " "aggregated `%s' timeserie, granularity `%s' " "around time `%s', ignoring.", @@ -246,6 +246,20 @@ class CarbonaraBasedStorage(storage.StorageDriver): else: split.merge(existing) + if split is None: + # `split' can be none if existing is None and no split was passed + # in order to rewrite and compress the data; in that case, it means + # the split key is present and listed, but some aggregation method + # or granularity is missing. That means data is corrupted, but it + # does not mean we have to fail, we can just do nothing and log a + # warning. + LOG.warning("No data found for metric %s, granularity %f " + "and aggregation method %s (split key %s): " + "possible data corruption", + metric, archive_policy_def.granularity, + aggregation, key) + return + offset, data = split.serialize(key, compressed=write_full) return self._store_metric_measures( diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 9815e33c..33cec577 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -71,9 +71,9 @@ class TestStorageDriver(tests_base.TestCase): ]) with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize', - side_effect=ValueError("boom!")): + side_effect=carbonara.InvalidData()): with mock.patch('gnocchi.carbonara.BoundTimeSerie.unserialize', - side_effect=ValueError("boom!")): + side_effect=carbonara.InvalidData()): self.trigger_processing() m = self.storage.get_measures(self.metric) @@ -327,6 +327,132 @@ class TestStorageDriver(tests_base.TestCase): (utils.datetime_utc(2016, 1, 10, 17, 12), 60.0, 46), ], self.storage.get_measures(self.metric, granularity=60.0)) + def test_rewrite_measures_corruption_missing_file(self): + # Create an archive policy that spans on several splits. Each split + # being 3600 points, let's go for 36k points so we have 10 splits. + apname = str(uuid.uuid4()) + ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)]) + self.index.create_archive_policy(ap) + self.metric = storage.Metric(uuid.uuid4(), ap) + self.index.create_metric(self.metric.id, str(uuid.uuid4()), + str(uuid.uuid4()), + apname) + + # First store some points scattered across different splits + self.storage.incoming.add_measures(self.metric, [ + storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44), + ]) + self.trigger_processing() + + splits = {'1451520000.0', '1451736000.0', '1451952000.0'} + self.assertEqual(splits, + self.storage._list_split_keys_for_metric( + self.metric, "mean", 60.0)) + + if self.storage.WRITE_FULL: + assertCompressedIfWriteFull = self.assertTrue + else: + assertCompressedIfWriteFull = self.assertFalse + + data = self.storage._get_measures( + self.metric, '1451520000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451736000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451952000.0', "mean", 60.0) + assertCompressedIfWriteFull( + carbonara.AggregatedTimeSerie.is_compressed(data)) + + self.assertEqual([ + (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), + ], self.storage.get_measures(self.metric, granularity=60.0)) + + # Test what happens if we delete the latest split and then need to + # compress it! + self.storage._delete_metric_measures(self.metric, + '1451952000.0', + 'mean', 60.0) + + # Now store brand new points that should force a rewrite of one of the + # split (keep in mind the back window size in one hour here). We move + # the BoundTimeSerie processing timeserie far away from its current + # range. + self.storage.incoming.add_measures(self.metric, [ + storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46), + ]) + self.trigger_processing() + + def test_rewrite_measures_corruption_bad_data(self): + # Create an archive policy that spans on several splits. Each split + # being 3600 points, let's go for 36k points so we have 10 splits. + apname = str(uuid.uuid4()) + ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)]) + self.index.create_archive_policy(ap) + self.metric = storage.Metric(uuid.uuid4(), ap) + self.index.create_metric(self.metric.id, str(uuid.uuid4()), + str(uuid.uuid4()), + apname) + + # First store some points scattered across different splits + self.storage.incoming.add_measures(self.metric, [ + storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44), + ]) + self.trigger_processing() + + splits = {'1451520000.0', '1451736000.0', '1451952000.0'} + self.assertEqual(splits, + self.storage._list_split_keys_for_metric( + self.metric, "mean", 60.0)) + + if self.storage.WRITE_FULL: + assertCompressedIfWriteFull = self.assertTrue + else: + assertCompressedIfWriteFull = self.assertFalse + + data = self.storage._get_measures( + self.metric, '1451520000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451736000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451952000.0', "mean", 60.0) + assertCompressedIfWriteFull( + carbonara.AggregatedTimeSerie.is_compressed(data)) + + self.assertEqual([ + (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), + ], self.storage.get_measures(self.metric, granularity=60.0)) + + # Test what happens if we write garbage + self.storage._store_metric_measures( + self.metric, '1451952000.0', "mean", 60.0, b"oh really?") + + # Now store brand new points that should force a rewrite of one of the + # split (keep in mind the back window size in one hour here). We move + # the BoundTimeSerie processing timeserie far away from its current + # range. + self.storage.incoming.add_measures(self.metric, [ + storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46), + ]) + self.trigger_processing() + def test_updated_measures(self): self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), -- GitLab From 5edc39ad91aaac65811a6f61c563812850e565fd Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 17 Nov 2016 23:37:00 +0000 Subject: [PATCH 0510/1483] fill series when aggregating cross metrics this fills in series so that overlap of all series is not required to aggregate. we only fill in if at least one of the metrics contains a point in the timeslot. Closes-Bug: #1642661 Change-Id: I1a29f0095387c326bdc87af760a3ccb4a95de828 --- doc/source/rest.j2 | 27 +++- doc/source/rest.yaml | 4 + gnocchi/carbonara.py | 105 ++++++++------- gnocchi/rest/__init__.py | 24 +++- gnocchi/storage/__init__.py | 5 +- gnocchi/storage/_carbonara.py | 5 +- gnocchi/tests/gabbi/gabbits/aggregation.yaml | 54 ++++++++ gnocchi/tests/test_carbonara.py | 120 ++++++++++++++++++ ...ll-cross-aggregation-2de54c7c30b2eb67.yaml | 6 + 9 files changed, 284 insertions(+), 66 deletions(-) create mode 100644 releasenotes/notes/backfill-cross-aggregation-2de54c7c30b2eb67.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index b41e33f9..b67d375c 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -533,16 +533,29 @@ well. Resampling is done prior to any reaggregation if both parameters are specified. -Also aggregation across metrics have different behavior depending -on if boundary are set ('start' and 'stop') and if 'needed_overlap' is set. +Also, aggregation across metrics have different behavior depending +on whether boundary values are set ('start' and 'stop') and if 'needed_overlap' +is set. If boundaries are not set, Gnocchi makes the aggregation only with points -at timestamp present in all timeseries. +at timestamp present in all timeseries. When boundaries are set, Gnocchi +expects that we have certain percent of timestamps common between timeseries, +this percent is controlled by needed_overlap (defaulted with 100%). If this +percent is not reached an error is returned. + +The ability to fill in points missing from a subset of timeseries is supported +by specifying a `fill` value. Valid fill values include any valid float or +`null` which will compute aggregation with only the points that exist. The +`fill` parameter will not backfill timestamps which contain no points in any +of the timeseries. Only timestamps which have datapoints in at least one of +the timeseries is returned. + +.. note:: + + A granularity must be specified when using the `fill` parameter. + +{{ scenarios['get-across-metrics-measures-by-metric-ids-fill']['doc'] }} -But when boundaries are set, Gnocchi expects that we have certain -percent of timestamps common between timeseries, this percent is controlled -by needed_overlap (defaulted with 100%). If this percent is not reached an -error is returned. Capabilities ============ diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index aba6b785..4d95c460 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -726,6 +726,10 @@ request: | GET /v1/aggregation/metric?metric={{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }}&metric={{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['metrics']['cpu.util'] }}&aggregation=mean&reaggregation=min HTTP/1.1 +- name: get-across-metrics-measures-by-metric-ids-fill + request: | + GET /v1/aggregation/metric?metric={{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }}&metric={{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['metrics']['cpu.util'] }}&fill=0&granularity=1 HTTP/1.1 + - name: append-metrics-to-resource request: | POST /v1/resource/generic/{{ scenarios['create-resource-instance-with-metrics']['response'].json['id'] }}/metric HTTP/1.1 diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 7684e882..5c32a7ec 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -713,7 +713,8 @@ class AggregatedTimeSerie(TimeSerie): @staticmethod def aggregated(timeseries, aggregation, from_timestamp=None, - to_timestamp=None, needed_percent_of_overlap=100.0): + to_timestamp=None, needed_percent_of_overlap=100.0, + fill=None): index = ['timestamp', 'granularity'] columns = ['timestamp', 'granularity', 'value'] @@ -737,57 +738,65 @@ class AggregatedTimeSerie(TimeSerie): set(ts.sampling for ts in timeseries) ) - grouped = pandas.concat(dataframes).groupby(level=index) left_boundary_ts = None right_boundary_ts = None - maybe_next_timestamp_is_left_boundary = False - - left_holes = 0 - right_holes = 0 - holes = 0 - for (timestamp, __), group in grouped: - if group.count()['value'] != number_of_distinct_datasource: - maybe_next_timestamp_is_left_boundary = True - if left_boundary_ts is not None: - right_holes += 1 + if fill is not None: + fill_df = pandas.concat(dataframes, axis=1) + if fill != 'null': + fill_df = fill_df.fillna(fill) + single_df = pandas.concat([series for __, series in + fill_df.iteritems()]).to_frame() + grouped = single_df.groupby(level=index) + else: + grouped = pandas.concat(dataframes).groupby(level=index) + maybe_next_timestamp_is_left_boundary = False + + left_holes = 0 + right_holes = 0 + holes = 0 + for (timestamp, __), group in grouped: + if group.count()['value'] != number_of_distinct_datasource: + maybe_next_timestamp_is_left_boundary = True + if left_boundary_ts is not None: + right_holes += 1 + else: + left_holes += 1 + elif maybe_next_timestamp_is_left_boundary: + left_boundary_ts = timestamp + maybe_next_timestamp_is_left_boundary = False else: - left_holes += 1 - elif maybe_next_timestamp_is_left_boundary: - left_boundary_ts = timestamp - maybe_next_timestamp_is_left_boundary = False - else: - right_boundary_ts = timestamp + right_boundary_ts = timestamp + holes += right_holes + right_holes = 0 + + if to_timestamp is not None: + holes += left_holes + if from_timestamp is not None: holes += right_holes - right_holes = 0 - - if to_timestamp is not None: - holes += left_holes - if from_timestamp is not None: - holes += right_holes - - if to_timestamp is not None or from_timestamp is not None: - maximum = len(grouped) - percent_of_overlap = (float(maximum - holes) * 100.0 / - float(maximum)) - if percent_of_overlap < needed_percent_of_overlap: - raise UnAggregableTimeseries( - 'Less than %f%% of datapoints overlap in this ' - 'timespan (%.2f%%)' % (needed_percent_of_overlap, - percent_of_overlap)) - if (needed_percent_of_overlap > 0 and - (right_boundary_ts == left_boundary_ts or - (right_boundary_ts is None - and maybe_next_timestamp_is_left_boundary))): - LOG.debug("We didn't find points that overlap in those " - "timeseries. " - "right_boundary_ts=%(right_boundary_ts)s, " - "left_boundary_ts=%(left_boundary_ts)s, " - "groups=%(groups)s", { - 'right_boundary_ts': right_boundary_ts, - 'left_boundary_ts': left_boundary_ts, - 'groups': list(grouped) - }) - raise UnAggregableTimeseries('No overlap') + + if to_timestamp is not None or from_timestamp is not None: + maximum = len(grouped) + percent_of_overlap = (float(maximum - holes) * 100.0 / + float(maximum)) + if percent_of_overlap < needed_percent_of_overlap: + raise UnAggregableTimeseries( + 'Less than %f%% of datapoints overlap in this ' + 'timespan (%.2f%%)' % (needed_percent_of_overlap, + percent_of_overlap)) + if (needed_percent_of_overlap > 0 and + (right_boundary_ts == left_boundary_ts or + (right_boundary_ts is None + and maybe_next_timestamp_is_left_boundary))): + LOG.debug("We didn't find points that overlap in those " + "timeseries. " + "right_boundary_ts=%(right_boundary_ts)s, " + "left_boundary_ts=%(left_boundary_ts)s, " + "groups=%(groups)s", { + 'right_boundary_ts': right_boundary_ts, + 'left_boundary_ts': left_boundary_ts, + 'groups': list(grouped) + }) + raise UnAggregableTimeseries('No overlap') # NOTE(sileht): this call the aggregation method on already # aggregated values, for some kind of aggregation this can diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index da4d2e7e..98238608 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1496,7 +1496,7 @@ class AggregationResourceController(rest.RestController): @pecan.expose('json') def post(self, start=None, stop=None, aggregation='mean', reaggregation=None, granularity=None, needed_overlap=100.0, - groupby=None, refresh=False, resample=None): + groupby=None, fill=None, refresh=False, resample=None): # First, set groupby in the right format: a sorted list of unique # strings. groupby = sorted(set(arg_to_list(groupby))) @@ -1520,7 +1520,7 @@ class AggregationResourceController(rest.RestController): for r in resources))) return AggregationController.get_cross_metric_measures_from_objs( metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap, refresh, resample) + granularity, needed_overlap, fill, refresh, resample) def groupper(r): return tuple((attr, r[attr]) for attr in groupby) @@ -1534,7 +1534,7 @@ class AggregationResourceController(rest.RestController): "group": dict(key), "measures": AggregationController.get_cross_metric_measures_from_objs( # noqa metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap, refresh, resample) + granularity, needed_overlap, fill, refresh, resample) }) return results @@ -1564,7 +1564,7 @@ class AggregationController(rest.RestController): aggregation='mean', reaggregation=None, granularity=None, - needed_overlap=100.0, + needed_overlap=100.0, fill=None, refresh=False, resample=None): try: needed_overlap = float(needed_overlap) @@ -1611,6 +1611,15 @@ class AggregationController(rest.RestController): except ValueError as e: abort(400, e) + if fill is not None: + if granularity is None: + abort(400, "Unable to fill without a granularity") + try: + fill = float(fill) + except ValueError as e: + if fill != 'null': + abort(400, "fill must be a float or \'null\': %s" % e) + try: if strutils.bool_from_string(refresh): pecan.request.storage.process_new_measures( @@ -1625,7 +1634,7 @@ class AggregationController(rest.RestController): else: measures = pecan.request.storage.get_cross_metric_measures( metrics, start, stop, aggregation, - reaggregation, resample, granularity, needed_overlap) + reaggregation, resample, granularity, needed_overlap, fill) # Replace timestamp keys by their string versions return [(timestamp.isoformat(), offset, v) for timestamp, offset, v in measures] @@ -1640,7 +1649,8 @@ class AggregationController(rest.RestController): @pecan.expose('json') def get_metric(self, metric=None, start=None, stop=None, aggregation='mean', reaggregation=None, granularity=None, - needed_overlap=100.0, refresh=False, resample=None): + needed_overlap=100.0, fill=None, + refresh=False, resample=None): # Check RBAC policy metric_ids = arg_to_list(metric) metrics = pecan.request.indexer.list_metrics(ids=metric_ids) @@ -1652,7 +1662,7 @@ class AggregationController(rest.RestController): missing_metric_ids.pop())) return self.get_cross_metric_measures_from_objs( metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap, refresh, resample) + granularity, needed_overlap, fill, refresh, resample) class CapabilityController(rest.RestController): diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 6d8fa5f0..a9619019 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -257,8 +257,8 @@ class StorageDriver(object): def get_cross_metric_measures(metrics, from_timestamp=None, to_timestamp=None, aggregation='mean', reaggregation=None, resample=None, - granularity=None, - needed_overlap=None): + granularity=None, needed_overlap=None, + fill=None): """Get aggregated measures of multiple entities. :param entities: The entities measured to aggregate. @@ -269,6 +269,7 @@ class StorageDriver(object): :param reaggregation: The type of aggregation to compute on the retrieved measures. :param resample: The granularity to resample to. + :param fill: The value to use to fill in missing data in series. """ for metric in metrics: if aggregation not in metric.archive_policy.aggregation_methods: diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 5b26a6f1..02c40b63 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -538,7 +538,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): def get_cross_metric_measures(self, metrics, from_timestamp=None, to_timestamp=None, aggregation='mean', reaggregation=None, resample=None, - granularity=None, needed_overlap=100.0): + granularity=None, needed_overlap=100.0, + fill=None): super(CarbonaraBasedStorage, self).get_cross_metric_measures( metrics, from_timestamp, to_timestamp, aggregation, reaggregation, resample, granularity, needed_overlap) @@ -584,7 +585,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): for timestamp, r, v in carbonara.AggregatedTimeSerie.aggregated( tss, reaggregation, from_timestamp, to_timestamp, - needed_overlap)] + needed_overlap, fill)] except carbonara.UnAggregableTimeseries as e: raise storage.MetricUnaggregatable(metrics, e.reason) diff --git a/gnocchi/tests/gabbi/gabbits/aggregation.yaml b/gnocchi/tests/gabbi/gabbits/aggregation.yaml index 19fdd897..71a237f8 100644 --- a/gnocchi/tests/gabbi/gabbits/aggregation.yaml +++ b/gnocchi/tests/gabbi/gabbits/aggregation.yaml @@ -59,6 +59,8 @@ tests: value: 3.1 - timestamp: "2015-03-06T14:34:12" value: 2 + - timestamp: "2015-03-06T14:35:12" + value: 5 status: 202 - name: get metric list to get aggregates @@ -128,6 +130,43 @@ tests: - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] + - name: get metric list to push metric 6 + GET: /v1/metric + + - name: get measure aggregates with fill zero + GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1&fill=0 + response_json_paths: + $: + - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + - ['2015-03-06T14:35:12+00:00', 1.0, 2.5] + + - name: get metric list to push metric 7 + GET: /v1/metric + + - name: get measure aggregates with fill null + GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1&fill=null + response_json_paths: + $: + - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + - ['2015-03-06T14:35:12+00:00', 1.0, 5.0] + + - name: get metric list to push metric 8 + GET: /v1/metric + + - name: get measure aggregates with fill missing granularity + GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&fill=0 + status: 400 + + - name: get metric list to push metric 9 + GET: /v1/metric + + - name: get measure aggregates with bad fill + GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1&fill=asdf + status: 400 + + # Aggregation by resource and metric_name - name: post a resource @@ -180,6 +219,8 @@ tests: value: 3.1 - timestamp: "2015-03-06T14:34:12" value: 2 + - timestamp: "2015-03-06T14:35:12" + value: 5 status: 202 - name: get measure aggregates by granularity from resources with refresh @@ -264,6 +305,19 @@ tests: - ['2015-03-06T14:33:57+00:00', 1.0, 3.1] - ['2015-03-06T14:34:12+00:00', 1.0, 2.0] + - name: get measure aggregates from resources with fill zero + POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&fill=0 + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + response_json_paths: + $: + - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + - ['2015-03-06T14:35:12+00:00', 1.0, 2.5] + + # Some negative tests - name: get measure aggregates with wrong GET diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 5f88379c..1ba41c9b 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -511,6 +511,126 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0) ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + def test_aggregated_some_overlap_with_fill_zero(self): + tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) + + tsb1.set_values([ + (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 1), + (datetime.datetime(2014, 1, 1, 12, 5, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 6, 0), 7), + (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), + (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) + + tsb2.set_values([ + (datetime.datetime(2014, 1, 1, 12, 0, 0), 6), + (datetime.datetime(2014, 1, 1, 12, 1, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 2, 0), 13), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 24), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 5, 0), 16), + (datetime.datetime(2014, 1, 1, 12, 6, 0), 12), + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) + + output = carbonara.AggregatedTimeSerie.aggregated([ + tsc1['return'], tsc2['return']], aggregation='mean', fill=0) + + self.assertEqual([ + (datetime.datetime(2014, 1, 1, 12, 0, 0), 60.0, 3.0), + (datetime.datetime(2014, 1, 1, 12, 1, 0), 60.0, 1.0), + (datetime.datetime(2014, 1, 1, 12, 2, 0), 60.0, 6.5), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 60.0, 16.5), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 60.0, 2.5), + (datetime.datetime(2014, 1, 1, 12, 5, 0), 60.0, 9.0), + (datetime.datetime(2014, 1, 1, 12, 6, 0), 60.0, 9.5), + (datetime.datetime(2014, 1, 1, 12, 7, 0), 60.0, 2.5), + (datetime.datetime(2014, 1, 1, 12, 8, 0), 60.0, 1.5), + ], output) + + def test_aggregated_some_overlap_with_fill_null(self): + tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) + + tsb1.set_values([ + (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 1), + (datetime.datetime(2014, 1, 1, 12, 5, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 6, 0), 7), + (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), + (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) + + tsb2.set_values([ + (datetime.datetime(2014, 1, 1, 12, 0, 0), 6), + (datetime.datetime(2014, 1, 1, 12, 1, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 2, 0), 13), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 24), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), + (datetime.datetime(2014, 1, 1, 12, 5, 0), 16), + (datetime.datetime(2014, 1, 1, 12, 6, 0), 12), + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) + + output = carbonara.AggregatedTimeSerie.aggregated([ + tsc1['return'], tsc2['return']], aggregation='mean', fill='null') + + self.assertEqual([ + (datetime.datetime(2014, 1, 1, 12, 0, 0), 60.0, 6.0), + (datetime.datetime(2014, 1, 1, 12, 1, 0), 60.0, 2.0), + (datetime.datetime(2014, 1, 1, 12, 2, 0), 60.0, 13.0), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 60.0, 16.5), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 60.0, 2.5), + (datetime.datetime(2014, 1, 1, 12, 5, 0), 60.0, 9.0), + (datetime.datetime(2014, 1, 1, 12, 6, 0), 60.0, 9.5), + (datetime.datetime(2014, 1, 1, 12, 7, 0), 60.0, 5.0), + (datetime.datetime(2014, 1, 1, 12, 8, 0), 60.0, 3.0), + ], output) + + def test_aggregate_no_points_with_fill_zero(self): + tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) + + tsb1.set_values([ + (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 1), + (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), + (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) + + tsb2.set_values([ + (datetime.datetime(2014, 1, 1, 12, 0, 0), 6), + (datetime.datetime(2014, 1, 1, 12, 1, 0), 2), + (datetime.datetime(2014, 1, 1, 12, 2, 0), 13), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 24), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) + + output = carbonara.AggregatedTimeSerie.aggregated([ + tsc1['return'], tsc2['return']], aggregation='mean', fill=0) + + self.assertEqual([ + (datetime.datetime(2014, 1, 1, 12, 0, 0), 60.0, 3.0), + (datetime.datetime(2014, 1, 1, 12, 1, 0), 60.0, 1.0), + (datetime.datetime(2014, 1, 1, 12, 2, 0), 60.0, 6.5), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 60.0, 16.5), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 60.0, 2.5), + (datetime.datetime(2014, 1, 1, 12, 7, 0), 60.0, 2.5), + (datetime.datetime(2014, 1, 1, 12, 8, 0), 60.0, 1.5), + ], output) + def test_fetch_agg_pct(self): ts = {'sampling': 1, 'size': 3600 * 24, 'agg': '90pct'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) diff --git a/releasenotes/notes/backfill-cross-aggregation-2de54c7c30b2eb67.yaml b/releasenotes/notes/backfill-cross-aggregation-2de54c7c30b2eb67.yaml new file mode 100644 index 00000000..cdfeee45 --- /dev/null +++ b/releasenotes/notes/backfill-cross-aggregation-2de54c7c30b2eb67.yaml @@ -0,0 +1,6 @@ +--- +features: + - Add support to backfill timestamps with missing points in a subset of + timeseries when computing aggregation across multiple metrics. User can + specify `fill` value with either a float or `null` value. A granularity + must be specified in addition to `fill`. -- GitLab From 85a39e37fb6397edc06b2aa327a0073ba138032f Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 14 Dec 2016 15:21:54 +0100 Subject: [PATCH 0511/1483] All granularity input should be parsed as timespan Closes-bug: #1649908 Change-Id: I35279564f93dedf7eeb80ec3dae98013175a7aa1 --- gnocchi/rest/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index da4d2e7e..f36cd588 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -486,7 +486,7 @@ class MetricController(rest.RestController): else: measures = pecan.request.storage.get_measures( self.metric, start, stop, aggregation, - float(granularity) if granularity is not None else None, + Timespan(granularity) if granularity is not None else None, resample) # Replace timestamp keys by their string versions return [(timestamp.isoformat(), offset, v) @@ -1599,9 +1599,9 @@ class AggregationController(rest.RestController): return [] if granularity is not None: try: - granularity = float(granularity) + granularity = Timespan(granularity) except ValueError as e: - abort(400, "granularity must be a float: %s" % e) + abort(400, e) if resample: if not granularity: -- GitLab From 57bbee6e54d2cc90e11041d7bb1dc6543fbf7c5a Mon Sep 17 00:00:00 2001 From: "xuan.mingyi" Date: Tue, 18 Oct 2016 06:34:42 +0000 Subject: [PATCH 0512/1483] add mysql minimum version check gnocchi indexer can't be used under mysql 5.6.4 Change-Id: Iab008e5f99fa759de8ea6d00d079fc5bc86fadc8 Closes-Bug: #1634374 Co-Authored-By: Hanxi Liu --- gnocchi/indexer/sqlalchemy.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index dfe13971..e1caec05 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -267,6 +267,12 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): sqlalchemy_utils.create_database(new_url) return new_url + @staticmethod + def _is_rdbms_compatible(connection): + dialect = connection.dialect.dialect_description + version = connection.dialect.server_version_info + return not (dialect.startswith('mysql') and version < (5, 6, 4)) + @staticmethod def dress_url(url): # If no explicit driver has been set, we default to pymysql @@ -283,6 +289,11 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): self.conf = conf self.facade = PerInstanceFacade(conf) + with self.facade.reader_connection() as connection: + if not self._is_rdbms_compatible(connection): + raise RuntimeError("MySQL minimum required version is " + "5.6.4") + def disconnect(self): self.facade.dispose() -- GitLab From cd82f8dd062f5463f0959f74996bdbb9cdf4f2ec Mon Sep 17 00:00:00 2001 From: Andy McCrae Date: Fri, 16 Dec 2016 11:27:24 +0000 Subject: [PATCH 0513/1483] Revert "add mysql minimum version check" This reverts commit 57bbee6e54d2cc90e11041d7bb1dc6543fbf7c5a. This patch fails with MariaDB, and other MySQL variants, as the version numbering is not consistent. Change-Id: I789eeb026c69d6e150403cd4e248eace34f443d0 --- gnocchi/indexer/sqlalchemy.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 8a057747..bcfc2c28 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -267,12 +267,6 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): sqlalchemy_utils.create_database(new_url) return new_url - @staticmethod - def _is_rdbms_compatible(connection): - dialect = connection.dialect.dialect_description - version = connection.dialect.server_version_info - return not (dialect.startswith('mysql') and version < (5, 6, 4)) - @staticmethod def dress_url(url): # If no explicit driver has been set, we default to pymysql @@ -289,11 +283,6 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): self.conf = conf self.facade = PerInstanceFacade(conf) - with self.facade.reader_connection() as connection: - if not self._is_rdbms_compatible(connection): - raise RuntimeError("MySQL minimum required version is " - "5.6.4") - def disconnect(self): self.facade.dispose() -- GitLab From 4a7a3cc1c7c9a1a3aaa117944499f31cb01028e6 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 16 Dec 2016 14:05:13 +0100 Subject: [PATCH 0514/1483] rest: catch create_metric duplicate This fixes the following error: DBDuplicateEntry: (pymysql.err.IntegrityError) (1062, u"Duplicate entry 'xxxxxxxx-disk.ephemeral' for key 'uniq_metric0resource_id0name'") Change-Id: I5c1cf3753d0282a8e095f3b3ce7d7f0559eaffc0 --- gnocchi/indexer/sqlalchemy.py | 2 ++ gnocchi/rest/__init__.py | 14 +++++++++++++- gnocchi/tests/test_indexer.py | 22 ++++++++++++++++++++++ 3 files changed, 37 insertions(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index bcfc2c28..c0ed0a2d 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -649,6 +649,8 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): try: with self.facade.writer() as session: session.add(m) + except exception.DBDuplicateEntry: + raise indexer.NamedMetricAlreadyExists(name) except exception.DBReferenceError as e: if (e.constraint == 'fk_metric_ap_name_ap_name'): diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index da4d2e7e..5ca8a229 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1400,6 +1400,7 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): known_names = [m.name for m in metrics] if strutils.bool_from_string(create_metrics): user_id, project_id = get_user_and_project() + already_exists_names = [] for name in names: if name not in known_names: metric = MetricsController.MetricSchema({ @@ -1414,7 +1415,9 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): unit=metric.get('unit'), archive_policy_name=metric[ 'archive_policy_name']) - except indexer.NoSuchResource as e: + except indexer.NamedMetricAlreadyExists as e: + already_exists_names.append(e.metric) + except indexer.NoSuchResource: unknown_resources.add(resource_id) except indexer.IndexerException as e: # This catch NoSuchArchivePolicy, which is unlikely @@ -1423,6 +1426,15 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): else: known_metrics.append(m) + if already_exists_names: + # Add metrics created in the meantime + known_names.extend(already_exists_names) + known_metrics.extend( + pecan.request.indexer.list_metrics( + names=already_exists_names, + resource_id=resource_id) + ) + elif len(names) != len(metrics): unknown_metrics.extend( ["%s/%s" % (six.text_type(resource_id), m) diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 55264b6e..148bdecc 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -162,6 +162,28 @@ class TestIndexerDriver(tests_base.TestCase): m2 = self.index.list_metrics(id=r1) self.assertEqual([m], m2) + def test_create_named_metric_duplicate(self): + m1 = uuid.uuid4() + r1 = uuid.uuid4() + name = "foobar" + user = str(uuid.uuid4()) + project = str(uuid.uuid4()) + self.index.create_resource('generic', r1, user, project) + m = self.index.create_metric(m1, user, project, "low", + name=name, + resource_id=r1) + self.assertEqual(m1, m.id) + self.assertEqual(m.created_by_user_id, user) + self.assertEqual(m.created_by_project_id, project) + self.assertEqual(name, m.name) + self.assertEqual(r1, m.resource_id) + m2 = self.index.list_metrics(id=m1) + self.assertEqual([m], m2) + + self.assertRaises(indexer.NamedMetricAlreadyExists, + self.index.create_metric, m1, user, project, "low", + name=name, resource_id=r1) + def test_expunge_metric(self): r1 = uuid.uuid4() user = str(uuid.uuid4()) -- GitLab From c5dd0d9031e34ef4fb52c95788adcf6f83d451ee Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 20 Dec 2016 11:42:01 +0100 Subject: [PATCH 0515/1483] gabbi: remove unused variable Change-Id: Ic39a7ec53b513f59d2d8fec5b88e80dda42720ea Signed-off-by: Julien Danjou --- gnocchi/tests/gabbi/fixtures.py | 1 - 1 file changed, 1 deletion(-) diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index 5694e206..4f818e4a 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -59,7 +59,6 @@ class ConfigFixture(fixture.GabbiFixture): def __init__(self): self.conf = None - self.db_url = None self.tmp_dir = None def start_fixture(self): -- GitLab From 1dcd2fd163836dc344437eee70740143ee11e842 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Wed, 21 Dec 2016 09:46:39 +1100 Subject: [PATCH 0516/1483] [doc] Note lack of constraints is a choice Change-Id: Iffefe7d79e773cf2b8df903353b5f81bc5b30ba1 --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index edfcadae..b0e37db1 100644 --- a/tox.ini +++ b/tox.ini @@ -26,6 +26,7 @@ setenv = deps = .[test] postgresql: .[postgresql,{env:GNOCCHI_STORAGE_DEPS}] mysql: .[mysql,{env:GNOCCHI_STORAGE_DEPS}] +# NOTE(tonyb): This project has chosen to *NOT* consume upper-constraints.txt commands = doc8 --ignore-path doc/source/rest.rst doc/source oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf -- GitLab From 36329117728e1ca75d951f87c99430ff3c29e864 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 28 Dec 2016 15:30:30 +0100 Subject: [PATCH 0517/1483] Fix expected content-type and move CORS tests to gabbi MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Content-Type is not automatically set by WebOb anymore. That also broke the CORS test as keystonemiddleware is broken. This patch moves the test to gabbi. While not strictly identical – it does not test Keystone anymore – it ought to be enough. Change-Id: I0f23481d5f75694da23d05c9ef88005a0f2c27d7 --- gnocchi/gendoc.py | 6 ++-- gnocchi/tests/gabbi/fixtures.py | 9 ++++++ gnocchi/tests/gabbi/gabbits/base.yaml | 6 ++-- gnocchi/tests/gabbi/gabbits/cors.yaml | 21 +++++++++++++ gnocchi/tests/gabbi/gabbits/history.yaml | 2 +- gnocchi/tests/gabbi/gabbits/resource.yaml | 2 +- gnocchi/tests/test_rest.py | 36 ----------------------- 7 files changed, 39 insertions(+), 43 deletions(-) create mode 100644 gnocchi/tests/gabbi/gabbits/cors.yaml diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index 780d4e20..240b8319 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -19,6 +19,7 @@ import json import jinja2 import six import six.moves +import webob.request import yaml from gnocchi.tests import test_rest @@ -117,8 +118,9 @@ def setup(app): fake_file.seek(0) request = webapp.RequestClass.from_file(fake_file) - # TODO(jd) Fix this lame bug in webob - if request.method in ("DELETE"): + # TODO(jd) Fix this lame bug in webob < 1.7 + if (hasattr(webob.request, "http_method_probably_has_body") + and request.method == "DELETE"): # Webob has a bug it does not read the body for DELETE, l4m3r clen = request.content_length if clen is None: diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index 4f818e4a..37ce9ebe 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -23,6 +23,8 @@ from unittest import case import warnings from gabbi import fixture +from oslo_config import cfg +from oslo_middleware import cors import sqlalchemy_utils from gnocchi import indexer @@ -79,6 +81,13 @@ class ConfigFixture(fixture.GabbiFixture): os.path.abspath('etc/gnocchi/api-paste.ini'), 'api') + # NOTE(sileht): This is not concurrency safe, but only this tests file + # deal with cors, so we are fine. set_override don't work because cors + # group doesn't yet exists, and we the CORS middleware is created it + # register the option and directly copy value of all configurations + # options making impossible to override them properly... + cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com") + self.conf = conf self.tmp_dir = data_tmp_dir diff --git a/gnocchi/tests/gabbi/gabbits/base.yaml b/gnocchi/tests/gabbi/gabbits/base.yaml index 675407c7..5410524f 100644 --- a/gnocchi/tests/gabbi/gabbits/base.yaml +++ b/gnocchi/tests/gabbi/gabbits/base.yaml @@ -7,7 +7,7 @@ tests: desc: Root URL must return information about API versions GET: / response_headers: - content-type: application/json; charset=UTF-8 + content-type: /^application\/json/ response_json_paths: $.versions.[0].id: "v1.0" $.versions.[0].status: "CURRENT" @@ -24,7 +24,7 @@ tests: points: 20 status: 201 response_headers: - content-type: /application\/json/ + content-type: /^application\/json/ location: $SCHEME://$NETLOC/v1/archive_policy/test1 response_json_paths: $.name: test1 @@ -91,7 +91,7 @@ tests: desc: Resources index page should return list of type associated with a URL GET: /v1/resource/ response_headers: - content-type: application/json; charset=UTF-8 + content-type: /^application\/json/ status: 200 response_json_paths: $.generic: $SCHEME://$NETLOC/v1/resource/generic diff --git a/gnocchi/tests/gabbi/gabbits/cors.yaml b/gnocchi/tests/gabbi/gabbits/cors.yaml new file mode 100644 index 00000000..bd2395d5 --- /dev/null +++ b/gnocchi/tests/gabbi/gabbits/cors.yaml @@ -0,0 +1,21 @@ +fixtures: + - ConfigFixture + +tests: + - name: get CORS headers for non-allowed + OPTIONS: /v1/status + request_headers: + Origin: http://notallowed.com + Access-Control-Request-Method: GET + response_forbidden_headers: + - Access-Control-Allow-Origin + - Access-Control-Allow-Methods + + - name: get CORS headers for allowed + OPTIONS: /v1/status + request_headers: + Origin: http://foobar.com + Access-Control-Request-Method: GET + response_headers: + Access-Control-Allow-Origin: http://foobar.com + Access-Control-Allow-Methods: GET diff --git a/gnocchi/tests/gabbi/gabbits/history.yaml b/gnocchi/tests/gabbi/gabbits/history.yaml index 11b5983f..4b3b2bb2 100644 --- a/gnocchi/tests/gabbi/gabbits/history.yaml +++ b/gnocchi/tests/gabbi/gabbits/history.yaml @@ -35,7 +35,7 @@ tests: status: 201 response_headers: location: $SCHEME://$NETLOC/v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 - content-type: application/json; charset=UTF-8 + content-type: /^application\/json/ response_json_paths: $.created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea $.created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index da3ab42b..83c71623 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -131,7 +131,7 @@ tests: status: 201 response_headers: location: $SCHEME://$NETLOC/v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 - content-type: application/json; charset=UTF-8 + content-type: /^application\/json/ response_json_paths: $.created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea $.created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 72cf41c4..9772dbe2 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -24,8 +24,6 @@ import uuid from keystonemiddleware import fixture as ksm_fixture import mock -from oslo_config import cfg -from oslo_middleware import cors from oslo_utils import timeutils import six from stevedore import extension @@ -133,13 +131,6 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): self.path_get('etc/gnocchi/api-paste.ini'), group="api") - # NOTE(sileht): This is not concurrency safe, but only this tests file - # deal with cors, so we are fine. set_override don't work because - # cors group doesn't yet exists, and we the CORS middleware is created - # it register the option and directly copy value of all configurations - # options making impossible to override them properly... - cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com") - self.auth_token_fixture = self.useFixture( ksm_fixture.AuthTokenFixture()) self.auth_token_fixture.add_token_data( @@ -181,33 +172,6 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): class RootTest(RestTest): - - def _do_test_cors(self): - resp = self.app.options( - "/v1/status", - headers={'Origin': 'http://notallowed.com', - 'Access-Control-Request-Method': 'GET'}, - status=200) - headers = dict(resp.headers) - self.assertNotIn("Access-Control-Allow-Origin", headers) - self.assertNotIn("Access-Control-Allow-Methods", headers) - resp = self.app.options( - "/v1/status", - headers={'origin': 'http://foobar.com', - 'Access-Control-Request-Method': 'GET'}, - status=200) - headers = dict(resp.headers) - self.assertIn("Access-Control-Allow-Origin", headers) - self.assertIn("Access-Control-Allow-Methods", headers) - - def test_cors_invalid_token(self): - with self.app.use_invalid_token(): - self._do_test_cors() - - def test_cors_no_token(self): - with self.app.use_no_token(): - self._do_test_cors() - def test_deserialize_force_json(self): with self.app.use_admin_user(): self.app.post( -- GitLab From 5a6d3935bc935eb11f2c1dee9ba5ed9c8fe0e86e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 28 Dec 2016 15:30:30 +0100 Subject: [PATCH 0518/1483] Fix expected content-type and move CORS tests to gabbi MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Content-Type is not automatically set by WebOb anymore. That also broke the CORS test as keystonemiddleware is broken. This patch moves the test to gabbi. While not strictly identical – it does not test Keystone anymore – it ought to be enough. Change-Id: I0f23481d5f75694da23d05c9ef88005a0f2c27d7 --- gnocchi/gendoc.py | 6 ++-- gnocchi/tests/gabbi/fixtures.py | 9 ++++++ gnocchi/tests/gabbi/gabbits/base.yaml | 6 ++-- gnocchi/tests/gabbi/gabbits/cors.yaml | 21 +++++++++++++ gnocchi/tests/gabbi/gabbits/history.yaml | 2 +- gnocchi/tests/gabbi/gabbits/resource.yaml | 2 +- gnocchi/tests/test_rest.py | 36 ----------------------- 7 files changed, 39 insertions(+), 43 deletions(-) create mode 100644 gnocchi/tests/gabbi/gabbits/cors.yaml diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index 780d4e20..240b8319 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -19,6 +19,7 @@ import json import jinja2 import six import six.moves +import webob.request import yaml from gnocchi.tests import test_rest @@ -117,8 +118,9 @@ def setup(app): fake_file.seek(0) request = webapp.RequestClass.from_file(fake_file) - # TODO(jd) Fix this lame bug in webob - if request.method in ("DELETE"): + # TODO(jd) Fix this lame bug in webob < 1.7 + if (hasattr(webob.request, "http_method_probably_has_body") + and request.method == "DELETE"): # Webob has a bug it does not read the body for DELETE, l4m3r clen = request.content_length if clen is None: diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index df83524f..0df7fb9e 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -23,6 +23,8 @@ from unittest import case import warnings from gabbi import fixture +from oslo_config import cfg +from oslo_middleware import cors import sqlalchemy_utils from gnocchi import indexer @@ -80,6 +82,13 @@ class ConfigFixture(fixture.GabbiFixture): os.path.abspath('etc/gnocchi/api-paste.ini'), 'api') + # NOTE(sileht): This is not concurrency safe, but only this tests file + # deal with cors, so we are fine. set_override don't work because cors + # group doesn't yet exists, and we the CORS middleware is created it + # register the option and directly copy value of all configurations + # options making impossible to override them properly... + cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com") + self.conf = conf self.tmp_dir = data_tmp_dir diff --git a/gnocchi/tests/gabbi/gabbits/base.yaml b/gnocchi/tests/gabbi/gabbits/base.yaml index 675407c7..5410524f 100644 --- a/gnocchi/tests/gabbi/gabbits/base.yaml +++ b/gnocchi/tests/gabbi/gabbits/base.yaml @@ -7,7 +7,7 @@ tests: desc: Root URL must return information about API versions GET: / response_headers: - content-type: application/json; charset=UTF-8 + content-type: /^application\/json/ response_json_paths: $.versions.[0].id: "v1.0" $.versions.[0].status: "CURRENT" @@ -24,7 +24,7 @@ tests: points: 20 status: 201 response_headers: - content-type: /application\/json/ + content-type: /^application\/json/ location: $SCHEME://$NETLOC/v1/archive_policy/test1 response_json_paths: $.name: test1 @@ -91,7 +91,7 @@ tests: desc: Resources index page should return list of type associated with a URL GET: /v1/resource/ response_headers: - content-type: application/json; charset=UTF-8 + content-type: /^application\/json/ status: 200 response_json_paths: $.generic: $SCHEME://$NETLOC/v1/resource/generic diff --git a/gnocchi/tests/gabbi/gabbits/cors.yaml b/gnocchi/tests/gabbi/gabbits/cors.yaml new file mode 100644 index 00000000..bd2395d5 --- /dev/null +++ b/gnocchi/tests/gabbi/gabbits/cors.yaml @@ -0,0 +1,21 @@ +fixtures: + - ConfigFixture + +tests: + - name: get CORS headers for non-allowed + OPTIONS: /v1/status + request_headers: + Origin: http://notallowed.com + Access-Control-Request-Method: GET + response_forbidden_headers: + - Access-Control-Allow-Origin + - Access-Control-Allow-Methods + + - name: get CORS headers for allowed + OPTIONS: /v1/status + request_headers: + Origin: http://foobar.com + Access-Control-Request-Method: GET + response_headers: + Access-Control-Allow-Origin: http://foobar.com + Access-Control-Allow-Methods: GET diff --git a/gnocchi/tests/gabbi/gabbits/history.yaml b/gnocchi/tests/gabbi/gabbits/history.yaml index 11b5983f..4b3b2bb2 100644 --- a/gnocchi/tests/gabbi/gabbits/history.yaml +++ b/gnocchi/tests/gabbi/gabbits/history.yaml @@ -35,7 +35,7 @@ tests: status: 201 response_headers: location: $SCHEME://$NETLOC/v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 - content-type: application/json; charset=UTF-8 + content-type: /^application\/json/ response_json_paths: $.created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea $.created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index 2730e093..a9e9c131 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -131,7 +131,7 @@ tests: status: 201 response_headers: location: $SCHEME://$NETLOC/v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 - content-type: application/json; charset=UTF-8 + content-type: /^application\/json/ response_json_paths: $.created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea $.created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index d9ac6037..159c0610 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -24,8 +24,6 @@ import uuid from keystonemiddleware import fixture as ksm_fixture import mock -from oslo_config import cfg -from oslo_middleware import cors from oslo_utils import timeutils import six from stevedore import extension @@ -132,13 +130,6 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): self.path_get('etc/gnocchi/api-paste.ini'), group="api") - # NOTE(sileht): This is not concurrency safe, but only this tests file - # deal with cors, so we are fine. set_override don't work because - # cors group doesn't yet exists, and we the CORS middleware is created - # it register the option and directly copy value of all configurations - # options making impossible to override them properly... - cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com") - self.auth_token_fixture = self.useFixture( ksm_fixture.AuthTokenFixture()) self.auth_token_fixture.add_token_data( @@ -180,33 +171,6 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): class RootTest(RestTest): - - def _do_test_cors(self): - resp = self.app.options( - "/v1/status", - headers={'Origin': 'http://notallowed.com', - 'Access-Control-Request-Method': 'GET'}, - status=200) - headers = dict(resp.headers) - self.assertNotIn("Access-Control-Allow-Origin", headers) - self.assertNotIn("Access-Control-Allow-Methods", headers) - resp = self.app.options( - "/v1/status", - headers={'origin': 'http://foobar.com', - 'Access-Control-Request-Method': 'GET'}, - status=200) - headers = dict(resp.headers) - self.assertIn("Access-Control-Allow-Origin", headers) - self.assertIn("Access-Control-Allow-Methods", headers) - - def test_cors_invalid_token(self): - with self.app.use_invalid_token(): - self._do_test_cors() - - def test_cors_no_token(self): - with self.app.use_no_token(): - self._do_test_cors() - def test_deserialize_force_json(self): with self.app.use_admin_user(): self.app.post( -- GitLab From 9200ae3048623fcd69b566388ba77aba3a1b23c3 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 24 Nov 2016 17:58:24 +0100 Subject: [PATCH 0519/1483] storage: split s3 driver Change-Id: Ie572227bc7c0574e7686696f5605f79a46c0b406 --- gnocchi/storage/common/s3.py | 81 +++++++++++++++ gnocchi/storage/incoming/s3.py | 151 +++++++++++++++++++++++++++ gnocchi/storage/s3.py | 182 ++------------------------------- setup.cfg | 1 + 4 files changed, 244 insertions(+), 171 deletions(-) create mode 100644 gnocchi/storage/common/s3.py create mode 100644 gnocchi/storage/incoming/s3.py diff --git a/gnocchi/storage/common/s3.py b/gnocchi/storage/common/s3.py new file mode 100644 index 00000000..eb6c0660 --- /dev/null +++ b/gnocchi/storage/common/s3.py @@ -0,0 +1,81 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log +import tenacity +try: + import boto3 + import botocore.exceptions +except ImportError: + boto3 = None + botocore = None + +from gnocchi import utils + +LOG = log.getLogger(__name__) + + +def retry_if_operationaborted(exception): + return (isinstance(exception, botocore.exceptions.ClientError) + and exception.response['Error'].get('Code') == "OperationAborted") + + +def get_connection(conf): + if boto3 is None: + raise RuntimeError("boto3 unavailable") + conn = boto3.client( + 's3', + endpoint_url=conf.s3_endpoint_url, + region_name=conf.s3_region_name, + aws_access_key_id=conf.s3_access_key_id, + aws_secret_access_key=conf.s3_secret_access_key) + return conn, conf.s3_region_name, conf.s3_bucket_prefix + + +# NOTE(jd) OperationAborted might be raised if we try to create the bucket +# for the first time at the same time +@tenacity.retry( + stop=tenacity.stop_after_attempt(10), + wait=tenacity.wait_fixed(0.5), + retry=tenacity.retry_if_exception(retry_if_operationaborted) +) +def create_bucket(conn, name, region_name): + if region_name: + kwargs = dict(CreateBucketConfiguration={ + "LocationConstraint": region_name, + }) + else: + kwargs = {} + return conn.create_bucket(Bucket=name, **kwargs) + + +def bulk_delete(conn, bucket, objects): + # NOTE(jd) The maximum object to delete at once is 1000 + # TODO(jd) Parallelize? + deleted = 0 + for obj_slice in utils.grouper(objects, 1000): + d = { + 'Objects': [{'Key': o} for o in obj_slice], + # FIXME(jd) Use Quiet mode, but s3rver does not seem to + # support it + # 'Quiet': True, + } + response = conn.delete_objects( + Bucket=bucket, + Delete=d) + deleted += len(response['Deleted']) + LOG.debug('%s objects deleted, %s objects skipped', + deleted, len(objects) - deleted) diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/storage/incoming/s3.py new file mode 100644 index 00000000..1554833f --- /dev/null +++ b/gnocchi/storage/incoming/s3.py @@ -0,0 +1,151 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from collections import defaultdict +import contextlib +import datetime +import logging +import uuid + +import six + + +from gnocchi.storage.common import s3 +from gnocchi.storage.incoming import _carbonara + +boto3 = s3.boto3 +botocore = s3.botocore + +LOG = logging.getLogger(__name__) + + +class S3Storage(_carbonara.CarbonaraBasedStorage): + + def __init__(self, conf): + super(S3Storage, self).__init__(conf) + self.s3, self._region_name, self._bucket_prefix = ( + s3.get_connection(conf) + ) + + self._bucket_name_measures = ( + self._bucket_prefix + "-" + self.MEASURE_PREFIX + ) + try: + s3.create_bucket(self.s3, self._bucket_name_measures, + self._region_name) + except botocore.exceptions.ClientError as e: + if e.response['Error'].get('Code') not in ( + "BucketAlreadyExists", "BucketAlreadyOwnedByYou" + ): + raise + + def _store_new_measures(self, metric, data): + now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") + self.s3.put_object( + Bucket=self._bucket_name_measures, + Key=(six.text_type(metric.id) + + "/" + + six.text_type(uuid.uuid4()) + + now), + Body=data) + + def _build_report(self, details): + metric_details = defaultdict(int) + response = {} + while response.get('IsTruncated', True): + if 'NextContinuationToken' in response: + kwargs = { + 'ContinuationToken': response['NextContinuationToken'] + } + else: + kwargs = {} + response = self.s3.list_objects_v2( + Bucket=self._bucket_name_measures, + **kwargs) + for c in response.get('Contents', ()): + metric, metric_file = c['Key'].split("/", 1) + metric_details[metric] += 1 + return (len(metric_details), sum(metric_details.values()), + metric_details if details else None) + + def list_metric_with_measures_to_process(self, size, part, full=False): + if full: + limit = 1000 # 1000 is the default anyway + else: + limit = size * (part + 1) + + metrics = set() + response = {} + # Handle pagination + while response.get('IsTruncated', True): + if 'NextContinuationToken' in response: + kwargs = { + 'ContinuationToken': response['NextContinuationToken'] + } + else: + kwargs = {} + response = self.s3.list_objects_v2( + Bucket=self._bucket_name_measures, + Delimiter="/", + MaxKeys=limit, + **kwargs) + for p in response.get('CommonPrefixes', ()): + metrics.add(p['Prefix'].rstrip('/')) + + if full: + return metrics + + return metrics[size * part:] + + def _list_measure_files_for_metric_id(self, metric_id): + files = set() + response = {} + while response.get('IsTruncated', True): + if 'NextContinuationToken' in response: + kwargs = { + 'ContinuationToken': response['NextContinuationToken'] + } + else: + kwargs = {} + response = self.s3.list_objects_v2( + Bucket=self._bucket_name_measures, + Prefix=six.text_type(metric_id) + "/", + **kwargs) + + for c in response.get('Contents', ()): + files.add(c['Key']) + + return files + + def delete_unprocessed_measures_for_metric_id(self, metric_id): + files = self._list_measure_files_for_metric_id(metric_id) + s3.bulk_delete(self.s3, self._bucket_name_measures, files) + + @contextlib.contextmanager + def process_measure_for_metric(self, metric): + files = self._list_measure_files_for_metric_id(metric.id) + + measures = [] + for f in files: + response = self.s3.get_object( + Bucket=self._bucket_name_measures, + Key=f) + measures.extend( + self._unserialize_measures(f, response['Body'].read())) + + yield measures + + # Now clean objects + s3.bulk_delete(self.s3, self._bucket_name_measures, files) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index e4d5a838..5b789629 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -13,26 +13,17 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from collections import defaultdict -import contextlib -import datetime import logging import os -import uuid from oslo_config import cfg -import six -import tenacity -try: - import boto3 - import botocore.exceptions -except ImportError: - boto3 = None from gnocchi import storage from gnocchi.storage import _carbonara -from gnocchi.storage.incoming import _carbonara as incoming_carbonara -from gnocchi import utils +from gnocchi.storage.common import s3 + +boto3 = s3.boto3 +botocore = s3.botocore LOG = logging.getLogger(__name__) @@ -59,49 +50,15 @@ def retry_if_operationaborted(exception): and exception.response['Error'].get('Code') == "OperationAborted") -class S3Storage(_carbonara.CarbonaraBasedStorage, - incoming_carbonara.CarbonaraBasedStorage): +class S3Storage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = True def __init__(self, conf): super(S3Storage, self).__init__(conf) - if boto3 is None: - raise RuntimeError("boto3 unavailable") - self.s3 = boto3.client( - 's3', - endpoint_url=conf.s3_endpoint_url, - region_name=conf.s3_region_name, - aws_access_key_id=conf.s3_access_key_id, - aws_secret_access_key=conf.s3_secret_access_key) - self._region_name = conf.s3_region_name - self._bucket_prefix = conf.s3_bucket_prefix - self._bucket_name_measures = ( - self._bucket_prefix + "-" + self.MEASURE_PREFIX + self.s3, self._region_name, self._bucket_prefix = ( + s3.get_connection(conf) ) - try: - self._create_bucket(self._bucket_name_measures) - except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') not in ( - "BucketAlreadyExists", "BucketAlreadyOwnedByYou" - ): - raise - - # NOTE(jd) OperationAborted might be raised if we try to create the bucket - # for the first time at the same time - @tenacity.retry( - stop=tenacity.stop_after_attempt(10), - wait=tenacity.wait_fixed(0.5), - retry=tenacity.retry_if_exception(retry_if_operationaborted) - ) - def _create_bucket(self, name): - if self._region_name: - kwargs = dict(CreateBucketConfiguration={ - "LocationConstraint": self._region_name, - }) - else: - kwargs = {} - return self.s3.create_bucket(Bucket=name, **kwargs) def _bucket_name(self, metric): return '%s-%s' % (self._bucket_prefix, str(metric.id)) @@ -113,130 +70,13 @@ class S3Storage(_carbonara.CarbonaraBasedStorage, def _create_metric(self, metric): try: - self._create_bucket(self._bucket_name(metric)) + s3.create_bucket(self.s3, self._bucket_name(metric), + self._region_name) except botocore.exceptions.ClientError as e: if e.response['Error'].get('Code') != "BucketAlreadyExists": raise # raise storage.MetricAlreadyExists(metric) - def _store_new_measures(self, metric, data): - now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") - self.s3.put_object( - Bucket=self._bucket_name_measures, - Key=(six.text_type(metric.id) - + "/" - + six.text_type(uuid.uuid4()) - + now), - Body=data) - - def _build_report(self, details): - metric_details = defaultdict(int) - response = {} - while response.get('IsTruncated', True): - if 'NextContinuationToken' in response: - kwargs = { - 'ContinuationToken': response['NextContinuationToken'] - } - else: - kwargs = {} - response = self.s3.list_objects_v2( - Bucket=self._bucket_name_measures, - **kwargs) - for c in response.get('Contents', ()): - metric, metric_file = c['Key'].split("/", 1) - metric_details[metric] += 1 - return (len(metric_details), sum(metric_details.values()), - metric_details if details else None) - - def list_metric_with_measures_to_process(self, size, part, full=False): - if full: - limit = 1000 # 1000 is the default anyway - else: - limit = size * (part + 1) - - metrics = set() - response = {} - # Handle pagination - while response.get('IsTruncated', True): - if 'NextContinuationToken' in response: - kwargs = { - 'ContinuationToken': response['NextContinuationToken'] - } - else: - kwargs = {} - response = self.s3.list_objects_v2( - Bucket=self._bucket_name_measures, - Delimiter="/", - MaxKeys=limit, - **kwargs) - for p in response.get('CommonPrefixes', ()): - metrics.add(p['Prefix'].rstrip('/')) - - if full: - return metrics - - return metrics[size * part:] - - def _list_measure_files_for_metric_id(self, metric_id): - files = set() - response = {} - while response.get('IsTruncated', True): - if 'NextContinuationToken' in response: - kwargs = { - 'ContinuationToken': response['NextContinuationToken'] - } - else: - kwargs = {} - response = self.s3.list_objects_v2( - Bucket=self._bucket_name_measures, - Prefix=six.text_type(metric_id) + "/", - **kwargs) - - for c in response.get('Contents', ()): - files.add(c['Key']) - - return files - - def _bulk_delete(self, bucket, objects): - # NOTE(jd) The maximum object to delete at once is 1000 - # TODO(jd) Parallelize? - deleted = 0 - for obj_slice in utils.grouper(objects, 1000): - d = { - 'Objects': [{'Key': o} for o in obj_slice], - # FIXME(jd) Use Quiet mode, but s3rver does not seem to - # support it - # 'Quiet': True, - } - response = self.s3.delete_objects( - Bucket=bucket, - Delete=d) - deleted += len(response['Deleted']) - LOG.debug('%s objects deleted, %s objects skipped', - deleted, - len(objects) - deleted) - - def delete_unprocessed_measures_for_metric_id(self, metric_id): - files = self._list_measure_files_for_metric_id(metric_id) - self._bulk_delete(self._bucket_name_measures, files) - - @contextlib.contextmanager - def process_measure_for_metric(self, metric): - files = self._list_measure_files_for_metric_id(metric.id) - - measures = [] - for f in files: - response = self.s3.get_object( - Bucket=self._bucket_name_measures, - Key=f) - measures.extend( - self._unserialize_measures(f, response['Body'].read())) - - yield measures - - # Now clean objects - self._bulk_delete(self._bucket_name_measures, files) - def _store_metric_measures(self, metric, timestamp_key, aggregation, granularity, data, offset=0, version=3): self.s3.put_object( @@ -271,8 +111,8 @@ class S3Storage(_carbonara.CarbonaraBasedStorage, # Maybe it never has been created (no measure) return raise - self._bulk_delete(bucket, [c['Key'] - for c in response.get('Contents', ())]) + s3.bulk_delete(self.s3, bucket, + [c['Key'] for c in response.get('Contents', ())]) try: self.s3.delete_bucket(Bucket=bucket) except botocore.exceptions.ClientError as e: diff --git a/setup.cfg b/setup.cfg index 150a0276..e89ee8df 100644 --- a/setup.cfg +++ b/setup.cfg @@ -112,6 +112,7 @@ gnocchi.storage.incoming = ceph = gnocchi.storage.incoming.ceph:CephStorage file = gnocchi.storage.incoming.file:FileStorage swift = gnocchi.storage.incoming.swift:SwiftStorage + s3 = gnocchi.storage.incoming.s3:S3Storage gnocchi.indexer = mysql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer -- GitLab From ca43bec43ea108d94a39f7602f61e87215a58668 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Mon, 2 Jan 2017 14:15:45 +0000 Subject: [PATCH 0520/1483] Adjust testr group_regex to not group on 'prefix' The previous regex meant that all the prefix tests were running in their own group of >400 tests. The new regex speads them out but keeps the desired-per YAML file grouping. In a sufficiently concurrent setting things speeds things up nicely. Change-Id: I64a61b49e230e92bd91c950c63843e5ad7712030 --- .testr.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.testr.conf b/.testr.conf index cafb2f6e..c274843c 100644 --- a/.testr.conf +++ b/.testr.conf @@ -2,4 +2,4 @@ test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} ${PYTHON:-python} -m subunit.run discover -t . ${OS_TEST_PATH:-gnocchi/tests} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list -group_regex=(gabbi\.(suitemaker|driver)\.test_gabbi_([^_]+))_ +group_regex=(gabbi\.suitemaker\.test_gabbi((_prefix_|_live_|_)([^_]+)))_ -- GitLab From 2cb85a7a4f885b9c2cd6ab27c4395de7be469500 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 3 Jan 2017 12:30:01 +0100 Subject: [PATCH 0521/1483] devstack: prepare ceph keyring before using it Change-Id: I0918f4193f97a9427aa25daf1ee92455383f8bc5 --- devstack/plugin.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 336fe719..d920802b 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -472,12 +472,12 @@ if is_service_enabled gnocchi-api; then configure_keystone_for_gnocchi elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Gnocchi" - configure_gnocchi - create_gnocchi_accounts if _is_ceph_enabled && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then echo_summary "Configuring Gnocchi for Ceph" configure_ceph_gnocchi fi + configure_gnocchi + create_gnocchi_accounts elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing Gnocchi" init_gnocchi -- GitLab From 9376c00cb42d370e0dd12474da683133fc79d154 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 24 Nov 2016 09:19:07 +0100 Subject: [PATCH 0522/1483] Introduce new storage groups for storage Change-Id: Ic965b0c7589c5e6ecfff6d4f5a9e0c4c36c8aaa3 --- gnocchi/opts.py | 20 ++++++++++++++------ gnocchi/storage/__init__.py | 4 +--- setup.cfg | 2 +- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 54be4427..f31499b3 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -11,6 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import copy import itertools from oslo_config import cfg @@ -25,6 +26,16 @@ import gnocchi.storage.file import gnocchi.storage.s3 import gnocchi.storage.swift +_STORAGE_OPTS = list(itertools.chain(gnocchi.storage.ceph.OPTS, + gnocchi.storage.file.OPTS, + gnocchi.storage.swift.OPTS, + gnocchi.storage.s3.OPTS)) + + +_INCOMING_OPTS = copy.deepcopy(_STORAGE_OPTS) +for opt in _INCOMING_OPTS: + opt.default = '${storage.%s}' % opt.name + def list_opts(): return [ @@ -43,12 +54,9 @@ def list_opts(): help=('The maximum number of items returned in a ' 'single response from a collection resource')), )), - ("storage", itertools.chain(gnocchi.storage._carbonara.OPTS, - gnocchi.storage.OPTS, - gnocchi.storage.ceph.OPTS, - gnocchi.storage.file.OPTS, - gnocchi.storage.swift.OPTS, - gnocchi.storage.s3.OPTS)), + ("storage", (_STORAGE_OPTS + gnocchi.storage._carbonara.OPTS + + gnocchi.storage.OPTS)), + ("incoming", _INCOMING_OPTS), ("statsd", ( cfg.StrOpt('host', default='0.0.0.0', diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index a9619019..b69574b1 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -157,11 +157,9 @@ def get_driver_class(namespace, conf): def get_driver(conf): """Return the configured driver.""" d = get_driver_class('gnocchi.storage', conf)(conf.storage) - # TODO(sileht): Temporary set incoming driver here - # until we split all drivers try: d.incoming = get_driver_class( - 'gnocchi.storage.incoming', conf)(conf.storage) + 'gnocchi.incoming', conf)(conf.incoming) except stevedore.exception.NoMatches: # Fallback to legacy driver d.incoming = d diff --git a/setup.cfg b/setup.cfg index e89ee8df..6e19983c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -108,7 +108,7 @@ gnocchi.storage = file = gnocchi.storage.file:FileStorage s3 = gnocchi.storage.s3:S3Storage -gnocchi.storage.incoming = +gnocchi.incoming = ceph = gnocchi.storage.incoming.ceph:CephStorage file = gnocchi.storage.incoming.file:FileStorage swift = gnocchi.storage.incoming.swift:SwiftStorage -- GitLab From 00d842cb285c230feeb839077b07a98ea49450aa Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 24 Nov 2016 20:00:18 +0100 Subject: [PATCH 0523/1483] storage: remove temporary incoming setup Change-Id: I28a0f5547367ac3b062ad0026e4bbaa9585c686a --- gnocchi/storage/__init__.py | 18 +++++------------- gnocchi/storage/_carbonara.py | 4 ++-- gnocchi/storage/ceph.py | 4 ++-- gnocchi/storage/file.py | 4 ++-- gnocchi/storage/s3.py | 4 ++-- gnocchi/storage/swift.py | 4 ++-- 6 files changed, 15 insertions(+), 23 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index b69574b1..401869a6 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -17,7 +17,6 @@ import operator from oslo_config import cfg from oslo_log import log from stevedore import driver -import stevedore.exception from gnocchi import exceptions from gnocchi import indexer @@ -156,27 +155,20 @@ def get_driver_class(namespace, conf): def get_driver(conf): """Return the configured driver.""" - d = get_driver_class('gnocchi.storage', conf)(conf.storage) - try: - d.incoming = get_driver_class( - 'gnocchi.incoming', conf)(conf.incoming) - except stevedore.exception.NoMatches: - # Fallback to legacy driver - d.incoming = d - return d + incoming = get_driver_class('gnocchi.incoming', conf)(conf.incoming) + return get_driver_class('gnocchi.storage', conf)(conf.storage, incoming) class StorageDriver(object): - def __init__(self, conf): - self.incoming = None + def __init__(self, conf, incoming): + self.incoming = incoming @staticmethod def stop(): pass def upgrade(self, index): - if self.incoming is not self: - self.incoming.upgrade(index) + self.incoming.upgrade(index) def process_background_tasks(self, index, metrics, sync=False): """Process background tasks for this storage. diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 0a542e59..0f561c2c 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -60,8 +60,8 @@ class CorruptionError(ValueError): class CarbonaraBasedStorage(storage.StorageDriver): UPGRADE_BATCH_SIZE = 1000 - def __init__(self, conf): - super(CarbonaraBasedStorage, self).__init__(conf) + def __init__(self, conf, incoming): + super(CarbonaraBasedStorage, self).__init__(conf, incoming) self.aggregation_workers_number = conf.aggregation_workers_number if self.aggregation_workers_number == 1: # NOTE(jd) Avoid using futures at all if we don't want any threads. diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index cc9f06a8..d4cc31a2 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -44,8 +44,8 @@ rados = ceph.rados class CephStorage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = False - def __init__(self, conf): - super(CephStorage, self).__init__(conf) + def __init__(self, conf, incoming): + super(CephStorage, self).__init__(conf, incoming) self.rados, self.ioctx = ceph.create_rados_connection(conf) def stop(self): diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 12d40a33..a9934d23 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -36,8 +36,8 @@ OPTS = [ class FileStorage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = True - def __init__(self, conf): - super(FileStorage, self).__init__(conf) + def __init__(self, conf, incoming): + super(FileStorage, self).__init__(conf, incoming) self.basepath = conf.file_basepath self.basepath_tmp = os.path.join(self.basepath, 'tmp') utils.ensure_paths([self.basepath_tmp]) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 5b789629..917036a3 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -54,8 +54,8 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = True - def __init__(self, conf): - super(S3Storage, self).__init__(conf) + def __init__(self, conf, incoming): + super(S3Storage, self).__init__(conf, incoming) self.s3, self._region_name, self._bucket_prefix = ( s3.get_connection(conf) ) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 6df94ac7..76eedb3b 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -71,8 +71,8 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = True - def __init__(self, conf): - super(SwiftStorage, self).__init__(conf) + def __init__(self, conf, incoming): + super(SwiftStorage, self).__init__(conf, incoming) self.swift = swift.get_connection(conf) self._container_prefix = conf.swift_container_prefix -- GitLab From 72fdba704d8b3862b2b0adfb0e38c8429149c1b0 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 25 Nov 2016 08:57:20 +0100 Subject: [PATCH 0524/1483] metricd: move metricd options in metricd group Change-Id: I1b3ac1f62c7b91eb287eac7d145a90a2fb577a24 --- gnocchi/cli.py | 6 +++--- gnocchi/opts.py | 15 +++++++++++++++ gnocchi/storage/__init__.py | 12 ------------ 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 65dfceb3..3a7ba02a 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -122,7 +122,7 @@ class MetricReporting(MetricProcessBase): def __init__(self, worker_id, conf): super(MetricReporting, self).__init__( - worker_id, conf, conf.storage.metric_reporting_delay) + worker_id, conf, conf.metricd.metric_reporting_delay) def _run_job(self): try: @@ -146,7 +146,7 @@ class MetricScheduler(MetricProcessBase): def __init__(self, worker_id, conf, queue): super(MetricScheduler, self).__init__( - worker_id, conf, conf.storage.metric_processing_delay) + worker_id, conf, conf.metricd.metric_processing_delay) self._coord, self._my_id = utils.get_coordinator_and_start( conf.storage.coordination_url) self.queue = queue @@ -249,7 +249,7 @@ class MetricJanitor(MetricProcessBase): def __init__(self, worker_id, conf): super(MetricJanitor, self).__init__( - worker_id, conf, conf.storage.metric_cleanup_delay) + worker_id, conf, conf.metricd.metric_cleanup_delay) def _run_job(self): try: diff --git a/gnocchi/opts.py b/gnocchi/opts.py index f31499b3..f816409c 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -44,6 +44,21 @@ def list_opts(): cfg.IntOpt('workers', min=1, help='Number of workers for Gnocchi metric daemons. ' 'By default the available number of CPU is used.'), + cfg.IntOpt('metric_processing_delay', + default=60, + deprecated_group='storage', + help="How many seconds to wait between " + "scheduling new metrics to process"), + cfg.IntOpt('metric_reporting_delay', + deprecated_group='storage', + default=120, + help="How many seconds to wait between " + "metric ingestion reporting"), + cfg.IntOpt('metric_cleanup_delay', + deprecated_group='storage', + default=300, + help="How many seconds to wait between " + "cleaning of expired data"), )), ("api", ( cfg.StrOpt('paste_config', diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 401869a6..bb3f000f 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -26,18 +26,6 @@ OPTS = [ cfg.StrOpt('driver', default='file', help='Storage driver to use'), - cfg.IntOpt('metric_processing_delay', - default=60, - help="How many seconds to wait between " - "scheduling new metrics to process"), - cfg.IntOpt('metric_reporting_delay', - default=120, - help="How many seconds to wait between " - "metric ingestion reporting"), - cfg.IntOpt('metric_cleanup_delay', - default=300, - help="How many seconds to wait between " - "cleaning of expired data"), ] LOG = log.getLogger(__name__) -- GitLab From bb687e17150bcf9ac8f8fa3b84043d4c58ae6181 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Dec 2016 18:13:13 +0100 Subject: [PATCH 0525/1483] carbonara: log a message and do not fail if a file is missing When rewriting data for compression, it's possible a file ___ is missing, because of some data corruption. In that case the following traceback happens: Traceback (most recent call last): File "/usr/lib/python2.7/site-packages/gnocchi/storage/_carbonara.py", line 557, in process_new_measures ignore_too_old_timestamps=True) File "/usr/lib/python2.7/site-packages/gnocchi/carbonara.py", line 217, in set_values before_truncate_callback(self) File "/usr/lib/python2.7/site-packages/gnocchi/storage/_carbonara.py", line 551, in _map_add_measures for aggregation in agg_methods)) File "/usr/lib/python2.7/site-packages/gnocchi/storage/_carbonara.py", line 675, in _map_no_thread return list(itertools.starmap(method, list_of_args)) File "/usr/lib/python2.7/site-packages/gnocchi/storage/_carbonara.py", line 316, in _add_measures oldest_mutable_timestamp) File "/usr/lib/python2.7/site-packages/gnocchi/storage/_carbonara.py", line 249, in _store_timeserie_split offset, data = split.serialize(key, compressed=write_full) AttributeError: 'NoneType' object has no attribute 'serialize' This patch makes the driver log a warning and return, so it just ignores the failure and continues anyway. Change-Id: I4f367b2418c8be0067746c88bcce74ca756acf4e --- gnocchi/carbonara.py | 27 +++++-- gnocchi/storage/_carbonara.py | 16 ++++- gnocchi/tests/test_storage.py | 130 +++++++++++++++++++++++++++++++++- 3 files changed, 164 insertions(+), 9 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index b0af065c..c5f9e773 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -74,6 +74,12 @@ class UnknownAggregationMethod(Exception): "Unknown aggregation method `%s'" % agg) +class InvalidData(ValueError): + """Error raised when data are corrupted.""" + def __init__(self): + super(InvalidData, self).__init__("Unable to unpack, invalid data") + + def round_timestamp(ts, freq): return pandas.Timestamp( (pandas.Timestamp(ts).value // freq) * freq) @@ -225,8 +231,11 @@ class BoundTimeSerie(TimeSerie): nb_points = ( len(uncompressed) // cls._SERIALIZATION_TIMESTAMP_VALUE_LEN ) - deserial = struct.unpack("<" + "Q" * nb_points + "d" * nb_points, - uncompressed) + try: + deserial = struct.unpack("<" + "Q" * nb_points + "d" * nb_points, + uncompressed) + except struct.error: + raise InvalidData start = deserial[0] timestamps = [start] for delta in itertools.islice(deserial, 1, nb_points): @@ -501,9 +510,12 @@ class AggregatedTimeSerie(TimeSerie): # Compressed format uncompressed = lz4.loads(memoryview(data)[1:].tobytes()) nb_points = len(uncompressed) // cls.COMPRESSED_SERIAL_LEN - deserial = struct.unpack( - '<' + 'H' * nb_points + 'd' * nb_points, - uncompressed) + try: + deserial = struct.unpack( + '<' + 'H' * nb_points + 'd' * nb_points, + uncompressed) + except struct.error: + raise InvalidData for delta in itertools.islice(deserial, nb_points): ts = start + (delta * sampling) y.append(ts) @@ -514,7 +526,10 @@ class AggregatedTimeSerie(TimeSerie): nb_points = len(data) // cls.PADDED_SERIAL_LEN # NOTE(gordc): use '<' for standardized # little-endian byte order - deserial = struct.unpack('<' + '?d' * nb_points, data) + try: + deserial = struct.unpack('<' + '?d' * nb_points, data) + except struct.error: + raise InvalidData() # alternating split into 2 list and drop items with False flag for i, val in itertools.compress( six.moves.zip(six.moves.range(nb_points), diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index a32772d7..c418dff4 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -168,7 +168,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): try: return carbonara.AggregatedTimeSerie.unserialize( data, key, aggregation, granularity) - except ValueError: + except carbonara.InvalidData: LOG.error("Data corruption detected for %s " "aggregated `%s' timeserie, granularity `%s' " "around time `%s', ignoring." @@ -246,6 +246,20 @@ class CarbonaraBasedStorage(storage.StorageDriver): else: split.merge(existing) + if split is None: + # `split' can be none if existing is None and no split was passed + # in order to rewrite and compress the data; in that case, it means + # the split key is present and listed, but some aggregation method + # or granularity is missing. That means data is corrupted, but it + # does not mean we have to fail, we can just do nothing and log a + # warning. + LOG.warning("No data found for metric %s, granularity %f " + "and aggregation method %s (split key %s): " + "possible data corruption", + metric, archive_policy_def.granularity, + aggregation, key) + return + offset, data = split.serialize(key, compressed=write_full) return self._store_metric_measures( diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 7ab25375..96d64fa1 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -67,9 +67,9 @@ class TestStorageDriver(tests_base.TestCase): ]) with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize', - side_effect=ValueError("boom!")): + side_effect=carbonara.InvalidData()): with mock.patch('gnocchi.carbonara.BoundTimeSerie.unserialize', - side_effect=ValueError("boom!")): + side_effect=carbonara.InvalidData()): self.trigger_processing() m = self.storage.get_measures(self.metric) @@ -310,6 +310,132 @@ class TestStorageDriver(tests_base.TestCase): (utils.datetime_utc(2016, 1, 10, 17, 12), 60.0, 46), ], self.storage.get_measures(self.metric, granularity=60.0)) + def test_rewrite_measures_corruption_missing_file(self): + # Create an archive policy that spans on several splits. Each split + # being 3600 points, let's go for 36k points so we have 10 splits. + apname = str(uuid.uuid4()) + ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)]) + self.index.create_archive_policy(ap) + self.metric = storage.Metric(uuid.uuid4(), ap) + self.index.create_metric(self.metric.id, str(uuid.uuid4()), + str(uuid.uuid4()), + apname) + + # First store some points scattered across different splits + self.storage.add_measures(self.metric, [ + storage.Measure(utils.datetime_utc(2016, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2016, 1, 2, 13, 7, 31), 42), + storage.Measure(utils.datetime_utc(2016, 1, 4, 14, 9, 31), 4), + storage.Measure(utils.datetime_utc(2016, 1, 6, 15, 12, 45), 44), + ]) + self.trigger_processing() + + splits = {'1451520000.0', '1451736000.0', '1451952000.0'} + self.assertEqual(splits, + self.storage._list_split_keys_for_metric( + self.metric, "mean", 60.0)) + + if self.storage.WRITE_FULL: + assertCompressedIfWriteFull = self.assertTrue + else: + assertCompressedIfWriteFull = self.assertFalse + + data = self.storage._get_measures( + self.metric, '1451520000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451736000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451952000.0', "mean", 60.0) + assertCompressedIfWriteFull( + carbonara.AggregatedTimeSerie.is_compressed(data)) + + self.assertEqual([ + (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), + ], self.storage.get_measures(self.metric, granularity=60.0)) + + # Test what happens if we delete the latest split and then need to + # compress it! + self.storage._delete_metric_measures(self.metric, + '1451952000.0', + 'mean', 60.0) + + # Now store brand new points that should force a rewrite of one of the + # split (keep in mind the back window size in one hour here). We move + # the BoundTimeSerie processing timeserie far away from its current + # range. + self.storage.add_measures(self.metric, [ + storage.Measure(utils.datetime_utc(2016, 1, 10, 16, 18, 45), 45), + storage.Measure(utils.datetime_utc(2016, 1, 10, 17, 12, 45), 46), + ]) + self.trigger_processing() + + def test_rewrite_measures_corruption_bad_data(self): + # Create an archive policy that spans on several splits. Each split + # being 3600 points, let's go for 36k points so we have 10 splits. + apname = str(uuid.uuid4()) + ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)]) + self.index.create_archive_policy(ap) + self.metric = storage.Metric(uuid.uuid4(), ap) + self.index.create_metric(self.metric.id, str(uuid.uuid4()), + str(uuid.uuid4()), + apname) + + # First store some points scattered across different splits + self.storage.add_measures(self.metric, [ + storage.Measure(utils.datetime_utc(2016, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2016, 1, 2, 13, 7, 31), 42), + storage.Measure(utils.datetime_utc(2016, 1, 4, 14, 9, 31), 4), + storage.Measure(utils.datetime_utc(2016, 1, 6, 15, 12, 45), 44), + ]) + self.trigger_processing() + + splits = {'1451520000.0', '1451736000.0', '1451952000.0'} + self.assertEqual(splits, + self.storage._list_split_keys_for_metric( + self.metric, "mean", 60.0)) + + if self.storage.WRITE_FULL: + assertCompressedIfWriteFull = self.assertTrue + else: + assertCompressedIfWriteFull = self.assertFalse + + data = self.storage._get_measures( + self.metric, '1451520000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451736000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451952000.0', "mean", 60.0) + assertCompressedIfWriteFull( + carbonara.AggregatedTimeSerie.is_compressed(data)) + + self.assertEqual([ + (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), + ], self.storage.get_measures(self.metric, granularity=60.0)) + + # Test what happens if we write garbage + self.storage._store_metric_measures( + self.metric, '1451952000.0', "mean", 60.0, b"oh really?") + + # Now store brand new points that should force a rewrite of one of the + # split (keep in mind the back window size in one hour here). We move + # the BoundTimeSerie processing timeserie far away from its current + # range. + self.storage.add_measures(self.metric, [ + storage.Measure(utils.datetime_utc(2016, 1, 10, 16, 18, 45), 45), + storage.Measure(utils.datetime_utc(2016, 1, 10, 17, 12, 45), 46), + ]) + self.trigger_processing() + def test_updated_measures(self): self.storage.add_measures(self.metric, [ storage.Measure(utils.datetime_utc(2014, 1, 1, 12, 0, 1), 69), -- GitLab From 6449ed81ea84a1d77d88707de0d1940c277cf15c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 3 Jan 2017 14:07:19 +0100 Subject: [PATCH 0526/1483] run-tests: use case rather than if/elif/else Change-Id: I4ec57a2ff3057ee2fe3ac49cab66cfe8628ab2d6 --- run-tests.sh | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/run-tests.sh b/run-tests.sh index 6577fd52..ecb9797d 100755 --- a/run-tests.sh +++ b/run-tests.sh @@ -7,23 +7,25 @@ do export GNOCCHI_TEST_STORAGE_DRIVER=$storage for indexer in ${GNOCCHI_TEST_INDEXER_DRIVERS} do - if [ "$GNOCCHI_TEST_STORAGE_DRIVER" == "ceph" ]; then - pifpaf run ceph -- pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* - elif [ "$GNOCCHI_TEST_STORAGE_DRIVER" == "s3" ] - then - if ! which s3rver >/dev/null 2>&1 - then - mkdir npm-s3rver - export NPM_CONFIG_PREFIX=npm-s3rver - npm install s3rver --global - export PATH=$PWD/npm-s3rver/bin:$PATH - fi - pifpaf -e GNOCCHI_STORAGE run s3rver -- \ - pifpaf -e GNOCCHI_INDEXER run $indexer -- \ - ./tools/pretty_tox.sh $* - - else - pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* - fi + case $GNOCCHI_TEST_STORAGE_DRIVER in + ceph) + pifpaf run ceph -- pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* + ;; + s3) + if ! which s3rver >/dev/null 2>&1 + then + mkdir npm-s3rver + export NPM_CONFIG_PREFIX=npm-s3rver + npm install s3rver --global + export PATH=$PWD/npm-s3rver/bin:$PATH + fi + pifpaf -e GNOCCHI_STORAGE run s3rver -- \ + pifpaf -e GNOCCHI_INDEXER run $indexer -- \ + ./tools/pretty_tox.sh $* + ;; + *) + pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* + ;; + esac done done -- GitLab From 246b7873b413418bc244daba6a424f27f5f26e01 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 27 Dec 2016 18:32:04 +0100 Subject: [PATCH 0527/1483] archive_policy: lighten the default archive policies This reduces by one or two thirds the default archive policies CPU consumption for 'low' and 'medium' by removing one or two definitions. Also make it clearer that using low and medium is going to be fasten in terms of CPU as the consumption depends on the number of definition in an archive policy. Change-Id: Iaba3b2ef88858ad777147d2859180d9a27658f1c --- doc/source/architecture.rst | 37 ++++++++------- gnocchi/archive_policy.py | 21 ++++----- gnocchi/tests/base.py | 45 +++++++++++++++++-- ...ult-archive-policies-455561c027edf4ad.yaml | 5 +++ 4 files changed, 75 insertions(+), 33 deletions(-) create mode 100644 releasenotes/notes/lighten-default-archive-policies-455561c027edf4ad.yaml diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index ca4e882e..91d156ca 100755 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -85,14 +85,14 @@ same "one year, one minute aggregations" resolution, the space used will go up to a maximum of 6 × 4.1 MiB = 24.6 MiB. -How to set the archive policy and granularity ---------------------------------------------- - -In Gnocchi, the archive policy is expressed in number of points. If your -archive policy defines a policy of 10 points with a granularity of 1 second, -the time series archive will keep up to 10 seconds, each representing an -aggregation over 1 second. This means the time series will at maximum retain 10 -seconds of data (sometimes a bit more) between the more recent point and the +How to define archive policies +------------------------------ + +In Gnocchi, the archive policy definitions are expressed in number of points. +If your archive policy defines a policy of 10 points with a granularity of 1 +second, the time series archive will keep up to 10 seconds, each representing +an aggregation over 1 second. This means the time series will at maximum retain +10 seconds of data (sometimes a bit more) between the more recent point and the oldest point. That does not mean it will be 10 consecutive seconds: there might be a gap if data is fed irregularly. @@ -112,6 +112,12 @@ This would represent 6125 points × 9 = 54 KiB per aggregation method. If you use the 8 standard aggregation method, your metric will take up to 8 × 54 KiB = 432 KiB of disk space. +Be aware that the more definitions you set in an archive policy, the more CPU +it will consume. Therefore, creating an archive policy with 2 definitons (e.g. +1 second granularity for 1 day and 1 minute granularity for 1 month) will +consume twice CPU than just one definition (e.g. just 1 second granularity for +1 day). + Default archive policies ------------------------ @@ -119,19 +125,16 @@ By default, 3 archive policies are created using the default archive policy list (listed in `default_aggregation_methods`, i.e. mean, min, max, sum, std, count): -- low (maximum estimated size per metric: 5 KiB) +- low (maximum estimated size per metric: 406 MiB) - * 5 minutes granularity over 1 hour - * 1 hour granularity over 1 day - * 1 day granularity over 1 month + * 5 minutes granularity over 30 days -- medium (maximum estimated size per metric: 139 KiB) +- medium (maximum estimated size per metric: 887 KiB) - * 1 minute granularity over 1 day - * 1 hour granularity over 1 week - * 1 day granularity over 1 year + * 1 minute granularity over 7 days + * 1 hour granularity over 365 days -- high (maximum estimated size per metric: 1 578 KiB) +- high (maximum estimated size per metric: 1 057 KiB) * 1 second granularity over 1 hour * 1 minute granularity over 1 week diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py index b5685f34..c4b1904d 100644 --- a/gnocchi/archive_policy.py +++ b/gnocchi/archive_policy.py @@ -211,22 +211,19 @@ class ArchivePolicyItem(dict): DEFAULT_ARCHIVE_POLICIES = { 'low': ArchivePolicy( "low", 0, [ - # 5 minutes resolution for an hour - ArchivePolicyItem(granularity=300, points=12), - # 1 hour resolution for a day - ArchivePolicyItem(granularity=3600, points=24), - # 1 day resolution for a month - ArchivePolicyItem(granularity=3600 * 24, points=30), + # 5 minutes resolution for 30 days + ArchivePolicyItem(granularity=300, + timespan=30 * 24 * 60 * 60), ], ), 'medium': ArchivePolicy( "medium", 0, [ - # 1 minute resolution for an day - ArchivePolicyItem(granularity=60, points=60 * 24), - # 1 hour resolution for a week - ArchivePolicyItem(granularity=3600, points=7 * 24), - # 1 day resolution for a year - ArchivePolicyItem(granularity=3600 * 24, points=365), + # 1 minute resolution for 7 days + ArchivePolicyItem(granularity=60, + timespan=7 * 24 * 60 * 60), + # 1 hour resolution for 365 days + ArchivePolicyItem(granularity=3600, + timespan=365 * 24 * 60 * 60), ], ), 'high': ArchivePolicy( diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 5da70b57..f1ab226d 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -179,12 +179,50 @@ class TestCase(base.BaseTestCase): ARCHIVE_POLICIES = { 'no_granularity_match': archive_policy.ArchivePolicy( "no_granularity_match", - 0, - [ + 0, [ # 2 second resolution for a day archive_policy.ArchivePolicyItem( granularity=2, points=3600 * 24), - ], + ], + ), + 'low': archive_policy.ArchivePolicy( + "low", 0, [ + # 5 minutes resolution for an hour + archive_policy.ArchivePolicyItem( + granularity=300, points=12), + # 1 hour resolution for a day + archive_policy.ArchivePolicyItem( + granularity=3600, points=24), + # 1 day resolution for a month + archive_policy.ArchivePolicyItem( + granularity=3600 * 24, points=30), + ], + ), + 'medium': archive_policy.ArchivePolicy( + "medium", 0, [ + # 1 minute resolution for an day + archive_policy.ArchivePolicyItem( + granularity=60, points=60 * 24), + # 1 hour resolution for a week + archive_policy.ArchivePolicyItem( + granularity=3600, points=7 * 24), + # 1 day resolution for a year + archive_policy.ArchivePolicyItem( + granularity=3600 * 24, points=365), + ], + ), + 'high': archive_policy.ArchivePolicy( + "high", 0, [ + # 1 second resolution for an hour + archive_policy.ArchivePolicyItem( + granularity=1, points=3600), + # 1 minute resolution for a week + archive_policy.ArchivePolicyItem( + granularity=60, points=60 * 24 * 7), + # 1 hour resolution for a year + archive_policy.ArchivePolicyItem( + granularity=3600, points=365 * 24), + ], ), } @@ -238,7 +276,6 @@ class TestCase(base.BaseTestCase): self.coord.stop() self.archive_policies = self.ARCHIVE_POLICIES.copy() - self.archive_policies.update(archive_policy.DEFAULT_ARCHIVE_POLICIES) for name, ap in six.iteritems(self.archive_policies): # Create basic archive policies try: diff --git a/releasenotes/notes/lighten-default-archive-policies-455561c027edf4ad.yaml b/releasenotes/notes/lighten-default-archive-policies-455561c027edf4ad.yaml new file mode 100644 index 00000000..a213d3e3 --- /dev/null +++ b/releasenotes/notes/lighten-default-archive-policies-455561c027edf4ad.yaml @@ -0,0 +1,5 @@ +--- +other: + - The default archive policies "low" and "medium" are now storing less data + than they used to be. They are only using respectively 1 and 2 definition + of archiving policy, which speeds up by 66% and 33% their computing speed. -- GitLab From c751087f41a2a108fab1f1ac6b6edafc0821d4a4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 27 Dec 2016 18:45:02 +0100 Subject: [PATCH 0528/1483] tools: import a small tools to compute size of archive policies Change-Id: I131664319cc9f9c8eda9b5c24e0de04664da3f25 --- tools/gnocchi-archive-policy-size.py | 49 ++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100755 tools/gnocchi-archive-policy-size.py diff --git a/tools/gnocchi-archive-policy-size.py b/tools/gnocchi-archive-policy-size.py new file mode 100755 index 00000000..f3fbe784 --- /dev/null +++ b/tools/gnocchi-archive-policy-size.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# +# Copyright (c) 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from gnocchi import utils + + +WORST_CASE_BYTES_PER_POINT = 8.04 + + +if (len(sys.argv) - 1) % 2 != 0: + print("Usage: %s ... " + % sys.argv[0]) + sys.exit(1) + + +def sizeof_fmt(num, suffix='B'): + for unit in ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi'): + if abs(num) < 1024.0: + return "%3.1f%s%s" % (num, unit, suffix) + num /= 1024.0 + return "%.1f%s%s" % (num, 'Yi', suffix) + + +size = 0 +for g, t in utils.grouper(sys.argv[1:], 2): + granularity = utils.to_timespan(g) + timespan = utils.to_timespan(t) + points = timespan.total_seconds() / granularity.total_seconds() + cursize = points * WORST_CASE_BYTES_PER_POINT + size += cursize + print("%s over %s = %d points = %s" % (g, t, points, sizeof_fmt(cursize))) + +print("Total: " + sizeof_fmt(size)) -- GitLab From 3b1415ebe40b4b7d1daafee228cec5ac4fed9296 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 19 Dec 2016 12:17:37 +0100 Subject: [PATCH 0529/1483] rest: make sure 409 is returned when double creating resource with non-UUID Change-Id: If2cc8b3c952923180044b46f652f40d844864a35 --- .../tests/gabbi/gabbits/transformedids.yaml | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/gnocchi/tests/gabbi/gabbits/transformedids.yaml b/gnocchi/tests/gabbi/gabbits/transformedids.yaml index fdf1d974..a4d9a357 100644 --- a/gnocchi/tests/gabbi/gabbits/transformedids.yaml +++ b/gnocchi/tests/gabbi/gabbits/transformedids.yaml @@ -30,6 +30,34 @@ tests: status: 201 # Check transformed uuids across the URL hierarchy + - name: post new resource non uuid for duplication test + POST: /v1/resource/generic + data: + id: generic zero + user_id: 0fbb231484614b1a80131fc22f6afc9c + project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + metrics: + cpu.util: + archive_policy_name: medium + status: 201 + response_json_paths: + created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c + created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + response_headers: + # is a UUID + location: /v1/resource/generic/[a-f0-9-]{36}/ + + - name: post new resource non uuid duplication + POST: /v1/resource/generic + data: + id: generic zero + user_id: 0fbb231484614b1a80131fc22f6afc9c + project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + metrics: + cpu.util: + archive_policy_name: medium + status: 409 + - name: post new resource non uuid POST: /v1/resource/generic data: -- GitLab From 6964ac23a2f9e15047f18c3c1f5ab86457c991b8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 3 Jan 2017 14:16:30 +0100 Subject: [PATCH 0530/1483] Ship api-paste.ini out of etc/ It makes less sense to change this file by default, but the user still can specify a different path, copy and edit this file if needed. This just hides more of the internal soup to end users. Change-Id: I0f64eaa73f30d7e45a867ecd14c22bc4ad36ca50 --- devstack/settings | 2 +- gnocchi/opts.py | 8 ++++++-- {etc/gnocchi => gnocchi/rest}/api-paste.ini | 0 gnocchi/tests/gabbi/fixtures.py | 4 ---- gnocchi/tests/test_rest.py | 3 --- 5 files changed, 7 insertions(+), 10 deletions(-) rename {etc/gnocchi => gnocchi/rest}/api-paste.ini (100%) diff --git a/devstack/settings b/devstack/settings index b693db70..21db8c03 100644 --- a/devstack/settings +++ b/devstack/settings @@ -6,7 +6,7 @@ enable_service gnocchi-statsd GNOCCHI_DIR=$DEST/gnocchi GNOCCHI_CONF_DIR=/etc/gnocchi GNOCCHI_CONF=$GNOCCHI_CONF_DIR/gnocchi.conf -GNOCCHI_PASTE_CONF=$GNOCCHI_CONF_DIR/api-paste.ini +GNOCCHI_PASTE_CONF=$GNOCCHI_DIR/gnocchi/rest/api-paste.ini GNOCCHI_LOG_DIR=/var/log/gnocchi GNOCCHI_AUTH_CACHE_DIR=${GNOCCHI_AUTH_CACHE_DIR:-/var/cache/gnocchi} GNOCCHI_WSGI_DIR=${GNOCCHI_WSGI_DIR:-/var/www/gnocchi} diff --git a/gnocchi/opts.py b/gnocchi/opts.py index f816409c..f9672bb6 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -13,10 +13,11 @@ # under the License. import copy import itertools +import os +import uuid from oslo_config import cfg from oslo_middleware import cors -import uuid import gnocchi.archive_policy import gnocchi.indexer @@ -62,7 +63,10 @@ def list_opts(): )), ("api", ( cfg.StrOpt('paste_config', - default='api-paste.ini', + default=os.path.abspath( + os.path.join( + os.path.dirname(__file__), + "rest", "api-paste.ini")), help='Path to API Paste configuration.'), cfg.IntOpt('max_limit', default=1000, diff --git a/etc/gnocchi/api-paste.ini b/gnocchi/rest/api-paste.ini similarity index 100% rename from etc/gnocchi/api-paste.ini rename to gnocchi/rest/api-paste.ini diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index 37ce9ebe..632d1531 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -77,10 +77,6 @@ class ConfigFixture(fixture.GabbiFixture): conf = service.prepare_service([], default_config_files=dcf) - conf.set_override('paste_config', - os.path.abspath('etc/gnocchi/api-paste.ini'), - 'api') - # NOTE(sileht): This is not concurrency safe, but only this tests file # deal with cors, so we are fine. set_override don't work because cors # group doesn't yet exists, and we the CORS middleware is created it diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 9772dbe2..e9be2c06 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -127,9 +127,6 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): def setUp(self): super(RestTest, self).setUp() - self.conf.set_override('paste_config', - self.path_get('etc/gnocchi/api-paste.ini'), - group="api") self.auth_token_fixture = self.useFixture( ksm_fixture.AuthTokenFixture()) -- GitLab From 37db178436f3b0c8b6214c2d1a51789f49e8387a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 3 Jan 2017 14:23:35 +0100 Subject: [PATCH 0531/1483] Move default policy.json away from etc/ This makes it easier to install Gnocchi while still having the proper default easy to override. Change-Id: Ie923e05d0c25d0352d6352dc6c47d90bdf3249d5 --- {etc/gnocchi => gnocchi/rest}/policy.json | 0 gnocchi/service.py | 8 ++++++-- gnocchi/tests/base.py | 14 -------------- gnocchi/tests/gabbi/fixtures.py | 3 --- 4 files changed, 6 insertions(+), 19 deletions(-) rename {etc/gnocchi => gnocchi/rest}/policy.json (100%) diff --git a/etc/gnocchi/policy.json b/gnocchi/rest/policy.json similarity index 100% rename from etc/gnocchi/policy.json rename to gnocchi/rest/policy.json diff --git a/gnocchi/service.py b/gnocchi/service.py index 9557e5cc..069b0bc7 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 Red Hat, Inc. +# Copyright (c) 2016-2017 Red Hat, Inc. # Copyright (c) 2015 eNovance # Copyright (c) 2013 Mirantis Inc. # @@ -14,6 +14,7 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. +import os from oslo_config import cfg from oslo_db import options as db_options @@ -37,7 +38,10 @@ def prepare_service(args=None, conf=None, # FIXME(jd) Use the pkg_entry info to register the options of these libs log.register_options(conf) db_options.set_defaults(conf) - policy_opts.set_defaults(conf) + policy_opts.set_defaults(conf, policy_file=os.path.abspath( + os.path.join( + os.path.dirname(__file__), + "rest", "policy.json"))) # Register our own Gnocchi options for group, options in opts.list_opts(): diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 5da70b57..ebb30f3d 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -188,25 +188,11 @@ class TestCase(base.BaseTestCase): ), } - @staticmethod - def path_get(project_file=None): - root = os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..', - ) - ) - if project_file: - return os.path.join(root, project_file) - return root - @classmethod def setUpClass(self): super(TestCase, self).setUpClass() self.conf = service.prepare_service([], default_config_files=[]) - self.conf.set_override('policy_file', - self.path_get('etc/gnocchi/policy.json'), - group="oslo_policy") # NOTE(jd) This allows to test S3 on AWS if not os.getenv("AWS_ACCESS_KEY_ID"): diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index 632d1531..49b01cd4 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -95,9 +95,6 @@ class ConfigFixture(fixture.GabbiFixture): # and thus should override conf settings. if 'DEVSTACK_GATE_TEMPEST' not in os.environ: conf.set_override('driver', 'file', 'storage') - conf.set_override('policy_file', - os.path.abspath('etc/gnocchi/policy.json'), - group="oslo_policy") conf.set_override('file_basepath', data_tmp_dir, 'storage') # NOTE(jd) All of that is still very SQL centric but we only support -- GitLab From 98a49616c2697554128dcdffb5c443baa0165329 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 18 Oct 2016 18:33:44 +0200 Subject: [PATCH 0532/1483] rest: add auth_mode to pick authentication mode Change-Id: Id517841ac4e7074402f6090cc6779235efa8b314 --- devstack/plugin.sh | 4 ++-- devstack/settings | 1 - doc/source/configuration.rst | 15 ++++----------- gnocchi/opts.py | 2 ++ gnocchi/rest/api-paste.ini | 12 ++++-------- gnocchi/rest/app.py | 4 +++- gnocchi/tests/gabbi/fixtures.py | 3 ++- gnocchi/tests/test_rest.py | 7 +++++-- .../notes/auth_type_option-c335b219afba5569.yaml | 5 +++++ 9 files changed, 27 insertions(+), 26 deletions(-) create mode 100644 releasenotes/notes/auth_type_option-c335b219afba5569.yaml diff --git a/devstack/plugin.sh b/devstack/plugin.sh index d920802b..5fa2ba63 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -276,12 +276,12 @@ function configure_gnocchi { fi if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] ; then - iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline gnocchi+auth + iniset $GNOCCHI_CONF api auth_mode keystone if is_service_enabled gnocchi-grafana; then iniset $GNOCCHI_CONF cors allowed_origin ${GRAFANA_URL} fi else - iniset $GNOCCHI_PASTE_CONF pipeline:main pipeline gnocchi+noauth + inidelete $GNOCCHI_CONF api auth_mode fi # Configure the indexer database diff --git a/devstack/settings b/devstack/settings index 21db8c03..b45ceebb 100644 --- a/devstack/settings +++ b/devstack/settings @@ -6,7 +6,6 @@ enable_service gnocchi-statsd GNOCCHI_DIR=$DEST/gnocchi GNOCCHI_CONF_DIR=/etc/gnocchi GNOCCHI_CONF=$GNOCCHI_CONF_DIR/gnocchi.conf -GNOCCHI_PASTE_CONF=$GNOCCHI_DIR/gnocchi/rest/api-paste.ini GNOCCHI_LOG_DIR=/var/log/gnocchi GNOCCHI_AUTH_CACHE_DIR=${GNOCCHI_AUTH_CACHE_DIR:-/var/cache/gnocchi} GNOCCHI_WSGI_DIR=${GNOCCHI_WSGI_DIR:-/var/www/gnocchi} diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 7c5fcce7..c923ee49 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -55,20 +55,13 @@ Gnocchi provides these indexer drivers: .. _`PostgreSQL`: http://postgresql.org .. _`MySQL`: http://mysql.com -Configuring the WSGI pipeline +Configuring authentication ----------------------------- -The API server leverages `Paste Deployment`_ to manage its configuration. You -can edit the `/etc/gnocchi/api-paste.ini` to tweak the WSGI pipeline of the -Gnocchi REST HTTP server. By default, no authentication middleware is enabled, -meaning your request will have to provides the authentication headers. - -Gnocchi is easily connectable with `OpenStack Keystone`_. If you successfully +The API server supports different authentication methods. `OpenStack Keystone`_ +is supported but by default, no authentication is enabled. If you successfully installed the `keystone` flavor using `pip` (see :ref:`installation`), you can -edit the `api-paste.ini` file to add the Keystone authentication middleware:: - - [pipeline:main] - pipeline = gnocchi+auth +set `api.auth_mode` to `keystone` to enable Keystone authentication. .. _`Paste Deployment`: http://pythonpaste.org/deploy/ .. _`OpenStack Keystone`: http://launchpad.net/keystone diff --git a/gnocchi/opts.py b/gnocchi/opts.py index f9672bb6..3631ab88 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -68,6 +68,8 @@ def list_opts(): os.path.dirname(__file__), "rest", "api-paste.ini")), help='Path to API Paste configuration.'), + cfg.StrOpt('auth_mode', + help='Authentication mode to use.'), cfg.IntOpt('max_limit', default=1000, help=('The maximum number of items returned in a ' diff --git a/gnocchi/rest/api-paste.ini b/gnocchi/rest/api-paste.ini index 8ff7fffd..0dd92d5e 100644 --- a/gnocchi/rest/api-paste.ini +++ b/gnocchi/rest/api-paste.ini @@ -1,23 +1,19 @@ -# Use gnocchi+auth in the pipeline if you want to use keystone authentication -[pipeline:main] -pipeline = gnocchi+noauth - -[composite:gnocchi+noauth] +[composite:gnocchi] use = egg:Paste#urlmap / = gnocchiversions_pipeline /v1 = gnocchiv1+noauth /healthcheck = healthcheck -[composite:gnocchi+auth] +[composite:gnocchi+keystone] use = egg:Paste#urlmap / = gnocchiversions_pipeline -/v1 = gnocchiv1+auth +/v1 = gnocchiv1+keystone /healthcheck = healthcheck [pipeline:gnocchiv1+noauth] pipeline = http_proxy_to_wsgi gnocchiv1 -[pipeline:gnocchiv1+auth] +[pipeline:gnocchiv1+keystone] pipeline = http_proxy_to_wsgi keystone_authtoken gnocchiv1 [pipeline:gnocchiversions_pipeline] diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 58966f48..94837919 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -76,7 +76,7 @@ global APPCONFIGS APPCONFIGS = {} -def load_app(conf, appname=None, indexer=None, storage=None, +def load_app(conf, indexer=None, storage=None, not_implemented_middleware=True): global APPCONFIGS @@ -102,6 +102,8 @@ def load_app(conf, appname=None, indexer=None, storage=None, APPCONFIGS[configkey] = config LOG.info("WSGI config used: %s", cfg_path) + appname = "gnocchi" + ("+" + conf.api.auth_mode + if conf.api.auth_mode else "") app = deploy.loadapp("config:" + cfg_path, name=appname, global_conf={'configkey': configkey}) return cors.CORS(app, conf=conf) diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index 49b01cd4..ce14a69a 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -111,6 +111,8 @@ class ConfigFixture(fixture.GabbiFixture): # Set pagination to a testable value conf.set_override('max_limit', 7, 'api') + # Those tests do not use any auth + conf.set_override("auth_mode", None, 'api') self.index = index @@ -118,7 +120,6 @@ class ConfigFixture(fixture.GabbiFixture): s.upgrade(index) LOAD_APP_KWARGS = { - 'appname': 'gnocchi+noauth', 'storage': s, 'indexer': index, 'conf': conf, diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index e9be2c06..130af7ce 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -152,9 +152,12 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): project_id=TestingApp.PROJECT_ID_2, role_list=["member"]) + if self.auth: + self.conf.set_override("auth_mode", "keystone", group="api") + else: + self.conf.set_override("auth_mode", None, group="api") + self.app = TestingApp(app.load_app(conf=self.conf, - appname="gnocchi+auth" - if self.auth else "gnocchi+noauth", indexer=self.index, storage=self.storage, not_implemented_middleware=False), diff --git a/releasenotes/notes/auth_type_option-c335b219afba5569.yaml b/releasenotes/notes/auth_type_option-c335b219afba5569.yaml new file mode 100644 index 00000000..53727864 --- /dev/null +++ b/releasenotes/notes/auth_type_option-c335b219afba5569.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - >- + The new `auth_type` option specifies which authentication system to use for + the REST API. Its default is still `noauth`. -- GitLab From da0498e3f6e6d4d562c5eddba0121a6dd8570ddb Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 20 Oct 2016 15:24:16 +0200 Subject: [PATCH 0533/1483] rest: introduce auth_helper to filter resources This introduce a new plugin system called auth_helper, that can be used to decide how to filter resources depending on the authentication done. That does not change the current filtering rules applied with Keystone authentication, but it removes the artificial filtering that was done when no authentication method was configured. Change-Id: I8b6834a10812f16aed808d3a219be9fd86214f4e --- doc/source/configuration.rst | 4 +- gnocchi/opts.py | 4 ++ gnocchi/rest/__init__.py | 53 ++-------------- gnocchi/rest/api-paste.ini | 2 +- gnocchi/rest/app.py | 8 ++- gnocchi/rest/auth_helper.py | 63 +++++++++++++++++++ gnocchi/tests/gabbi/fixtures.py | 2 +- gnocchi/tests/test_rest.py | 2 +- .../auth_type_pluggable-76a3c73cac8eec6a.yaml | 5 ++ setup.cfg | 4 ++ 10 files changed, 93 insertions(+), 54 deletions(-) create mode 100644 gnocchi/rest/auth_helper.py create mode 100644 releasenotes/notes/auth_type_pluggable-76a3c73cac8eec6a.yaml diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index c923ee49..63e3506b 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -58,8 +58,8 @@ Gnocchi provides these indexer drivers: Configuring authentication ----------------------------- -The API server supports different authentication methods. `OpenStack Keystone`_ -is supported but by default, no authentication is enabled. If you successfully +The API server supports different authentication methods: `noauth` (the +default) or `keystone` to use `OpenStack Keystone`_. If you successfully installed the `keystone` flavor using `pip` (see :ref:`installation`), you can set `api.auth_mode` to `keystone` to enable Keystone authentication. diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 3631ab88..69e4d344 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -18,6 +18,7 @@ import uuid from oslo_config import cfg from oslo_middleware import cors +from stevedore import extension import gnocchi.archive_policy import gnocchi.indexer @@ -69,6 +70,9 @@ def list_opts(): "rest", "api-paste.ini")), help='Path to API Paste configuration.'), cfg.StrOpt('auth_mode', + default="noauth", + choices=extension.ExtensionManager( + "gnocchi.rest.auth_helper").names(), help='Authentication mode to use.'), cfg.IntOpt('max_limit', default=1000, diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 739944a0..aaad46d1 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -90,44 +90,6 @@ def enforce(rule, target): abort(403) -def _get_list_resource_policy_filter(rule, resource_type, user, project): - try: - # Check if the policy allows the user to list any resource - enforce(rule, { - "resource_type": resource_type, - }) - except webob.exc.HTTPForbidden: - policy_filter = [] - try: - # Check if the policy allows the user to list resources linked - # to their project - enforce(rule, { - "resource_type": resource_type, - "project_id": project, - }) - except webob.exc.HTTPForbidden: - pass - else: - policy_filter.append({"=": {"project_id": project}}) - try: - # Check if the policy allows the user to list resources linked - # to their created_by_project - enforce(rule, { - "resource_type": resource_type, - "created_by_project_id": project, - }) - except webob.exc.HTTPForbidden: - pass - else: - policy_filter.append({"=": {"created_by_project_id": project}}) - - if not policy_filter: - # We need to have at least one policy filter in place - abort(403, "Insufficient privileges") - - return {"or": policy_filter} - - def set_resp_location_hdr(location): location = '%s%s' % (pecan.request.script_name, location) # NOTE(sileht): according the pep-3333 the headers must be @@ -1035,9 +997,8 @@ class ResourcesController(rest.RestController): history = get_history(kwargs) pagination_opts = get_pagination_options( kwargs, RESOURCE_DEFAULT_PAGINATION) - user, project = get_user_and_project() - policy_filter = _get_list_resource_policy_filter( - "list resource", self._resource_type, user, project) + policy_filter = pecan.request.auth_helper.get_resource_policy_filter( + "list resource", self._resource_type) try: # FIXME(sileht): next API version should returns @@ -1065,9 +1026,8 @@ class ResourcesController(rest.RestController): abort(400, "caution: the query can not be empty, or it will \ delete entire database") - user, project = get_user_and_project() - policy_filter = _get_list_resource_policy_filter( - "delete resources", self._resource_type, user, project) + policy_filter = pecan.request.auth_helper.get_resource_policy_filter( + "delete resources", self._resource_type) if policy_filter: attr_filter = {"and": [policy_filter, attr_filter]} @@ -1238,9 +1198,8 @@ class SearchResourceTypeController(rest.RestController): pagination_opts = get_pagination_options( kwargs, RESOURCE_DEFAULT_PAGINATION) - user, project = get_user_and_project() - policy_filter = _get_list_resource_policy_filter( - "search resource", self._resource_type, user, project) + policy_filter = pecan.request.auth_helper.get_resource_policy_filter( + "search resource", self._resource_type) if policy_filter: if attr_filter: attr_filter = {"and": [ diff --git a/gnocchi/rest/api-paste.ini b/gnocchi/rest/api-paste.ini index 0dd92d5e..d198362d 100644 --- a/gnocchi/rest/api-paste.ini +++ b/gnocchi/rest/api-paste.ini @@ -1,4 +1,4 @@ -[composite:gnocchi] +[composite:gnocchi+noauth] use = egg:Paste#urlmap / = gnocchiversions_pipeline /v1 = gnocchiv1+noauth diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 94837919..05658a4b 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -23,6 +23,7 @@ from oslo_policy import policy from paste import deploy import pecan from pecan import jsonify +from stevedore import driver import webob.exc from gnocchi import exceptions @@ -46,12 +47,16 @@ class GnocchiHook(pecan.hooks.PecanHook): self.indexer = indexer self.conf = conf self.policy_enforcer = policy.Enforcer(conf) + self.auth_helper = driver.DriverManager("gnocchi.rest.auth_helper", + conf.api.auth_mode, + invoke_on_load=True).driver def on_route(self, state): state.request.storage = self.storage state.request.indexer = self.indexer state.request.conf = self.conf state.request.policy_enforcer = self.policy_enforcer + state.request.auth_helper = self.auth_helper class NotImplementedMiddleware(object): @@ -102,8 +107,7 @@ def load_app(conf, indexer=None, storage=None, APPCONFIGS[configkey] = config LOG.info("WSGI config used: %s", cfg_path) - appname = "gnocchi" + ("+" + conf.api.auth_mode - if conf.api.auth_mode else "") + appname = "gnocchi+" + conf.api.auth_mode app = deploy.loadapp("config:" + cfg_path, name=appname, global_conf={'configkey': configkey}) return cors.CORS(app, conf=conf) diff --git a/gnocchi/rest/auth_helper.py b/gnocchi/rest/auth_helper.py new file mode 100644 index 00000000..2edb65e2 --- /dev/null +++ b/gnocchi/rest/auth_helper.py @@ -0,0 +1,63 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2016 Red Hat, Inc. +# Copyright © 2014-2015 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import webob + +from gnocchi import rest + + +class KeystoneAuthHelper(object): + @staticmethod + def get_resource_policy_filter(rule, resource_type): + try: + # Check if the policy allows the user to list any resource + rest.enforce(rule, { + "resource_type": resource_type, + }) + except webob.exc.HTTPForbidden: + user, project = rest.get_user_and_project() + policy_filter = [] + try: + # Check if the policy allows the user to list resources linked + # to their project + rest.enforce(rule, { + "resource_type": resource_type, + "project_id": project, + }) + except webob.exc.HTTPForbidden: + pass + else: + policy_filter.append({"=": {"project_id": project}}) + try: + # Check if the policy allows the user to list resources linked + # to their created_by_project + rest.enforce(rule, { + "resource_type": resource_type, + "created_by_project_id": project, + }) + except webob.exc.HTTPForbidden: + pass + else: + policy_filter.append({"=": {"created_by_project_id": project}}) + + if not policy_filter: + # We need to have at least one policy filter in place + rest.abort(403, "Insufficient privileges") + + return {"or": policy_filter} + + +NoAuthHelper = KeystoneAuthHelper diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index ce14a69a..39a94dc6 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -112,7 +112,7 @@ class ConfigFixture(fixture.GabbiFixture): # Set pagination to a testable value conf.set_override('max_limit', 7, 'api') # Those tests do not use any auth - conf.set_override("auth_mode", None, 'api') + conf.set_override("auth_mode", "noauth", 'api') self.index = index diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 130af7ce..b352a150 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -155,7 +155,7 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): if self.auth: self.conf.set_override("auth_mode", "keystone", group="api") else: - self.conf.set_override("auth_mode", None, group="api") + self.conf.set_override("auth_mode", "noauth", group="api") self.app = TestingApp(app.load_app(conf=self.conf, indexer=self.index, diff --git a/releasenotes/notes/auth_type_pluggable-76a3c73cac8eec6a.yaml b/releasenotes/notes/auth_type_pluggable-76a3c73cac8eec6a.yaml new file mode 100644 index 00000000..f198eb8a --- /dev/null +++ b/releasenotes/notes/auth_type_pluggable-76a3c73cac8eec6a.yaml @@ -0,0 +1,5 @@ +--- +features: + - >- + The REST API authentication mechanism is now pluggable. You can write your + own plugin to specify how segregation and policy should be enforced. diff --git a/setup.cfg b/setup.cfg index 6e19983c..fbd25736 100644 --- a/setup.cfg +++ b/setup.cfg @@ -122,6 +122,10 @@ gnocchi.indexer = gnocchi.aggregates = moving-average = gnocchi.aggregates.moving_stats:MovingAverage +gnocchi.rest.auth_helper = + noauth = gnocchi.rest.auth_helper:NoAuthHelper + keystone = gnocchi.rest.auth_helper:KeystoneAuthHelper + console_scripts = gnocchi-upgrade = gnocchi.cli:upgrade gnocchi-statsd = gnocchi.cli:statsd -- GitLab From fc10dba6eb98e9eae583b251789e2941dcdfe515 Mon Sep 17 00:00:00 2001 From: Yurii Prokulevych Date: Wed, 14 Dec 2016 10:49:32 +0100 Subject: [PATCH 0534/1483] Tests to confirm resources are searchable. Change-Id: Ifa2e3a3f008595d3c9b5b8ff7cf3a642b58c43e9 --- .../gabbi/gabbits-live/search-resource.yaml | 274 ++++++++++++++++++ 1 file changed, 274 insertions(+) create mode 100644 gnocchi/tests/gabbi/gabbits-live/search-resource.yaml diff --git a/gnocchi/tests/gabbi/gabbits-live/search-resource.yaml b/gnocchi/tests/gabbi/gabbits-live/search-resource.yaml new file mode 100644 index 00000000..1242e19a --- /dev/null +++ b/gnocchi/tests/gabbi/gabbits-live/search-resource.yaml @@ -0,0 +1,274 @@ +# +# Tests to confirm resources are searchable. Run against a live setup. +# URL: http://gnocchi.xyz/rest.html#searching-for-resources +# +# Instance-ResourceID-1: a64ca14f-bc7c-45b0-aa85-42cd2179e1e2 +# Instance-ResourceID-2: 7ccccfa0-92ce-4225-80ca-3ac9cb122d6a +# Instance-ResourceID-3: c442a47c-eb33-46ce-9665-f3aa0bef54e7 +# +# UserID-1: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07 +# UserID-2: 81d82ef3-4deb-499d-9270-9aeb5a3ec5fe +# +# ProjectID-1: c9a5f184-c0d0-4daa-83c3-af6fdc0879e6 +# ProjectID-2: 40eba01c-b348-49b8-803f-67123251a00a +# +# ImageID-1: 7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d +# ImageID-2: b01f2588-89dc-46b2-897b-fffae1e10975 +# + +defaults: + request_headers: + x-auth-token: $ENVIRON['GNOCCHI_SERVICE_TOKEN'] + +tests: + # + # Setup resource types if don't exist + # + + - name: create new resource type 'instance' + POST: /v1/resource_type + status: 201 + request_headers: + content-type: application/json + data: + name: instance + attributes: + display_name: + type: string + required: True + flavor_id: + type: string + required: True + host: + type: string + required: True + image_ref: + type: string + required: False + server_group: + type: string + required: False + + - name: create new resource type 'image' + POST: /v1/resource_type + status: 201 + request_headers: + content-type: application/json + data: + name: image + attributes: + name: + type: string + required: True + disk_format: + type: string + required: True + container_format: + type: string + required: True + + # + # Setup test resources + # + - name: helper. create instance resource-1 + POST: /v1/resource/instance + request_headers: + content-type: application/json + data: + display_name: vm-gabbi-1 + id: a64ca14f-bc7c-45b0-aa85-42cd2179e1e2 + user_id: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07 + flavor_id: "1" + image_ref: 7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d + host: compute-0-gabbi.localdomain + project_id: c9a5f184-c0d0-4daa-83c3-af6fdc0879e6 + status: 201 + + - name: helper. create instance resource-2 + POST: /v1/resource/instance + request_headers: + content-type: application/json + data: + display_name: vm-gabbi-2 + id: 7ccccfa0-92ce-4225-80ca-3ac9cb122d6a + user_id: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07 + flavor_id: "2" + image_ref: b01f2588-89dc-46b2-897b-fffae1e10975 + host: compute-1-gabbi.localdomain + project_id: c9a5f184-c0d0-4daa-83c3-af6fdc0879e6 + status: 201 + + - name: helper. create instance resource-3 + POST: /v1/resource/instance + request_headers: + content-type: application/json + data: + display_name: vm-gabbi-3 + id: c442a47c-eb33-46ce-9665-f3aa0bef54e7 + user_id: 81d82ef3-4deb-499d-9270-9aeb5a3ec5fe + flavor_id: "2" + image_ref: b01f2588-89dc-46b2-897b-fffae1e10975 + host: compute-1-gabbi.localdomain + project_id: 40eba01c-b348-49b8-803f-67123251a00a + status: 201 + + - name: helper. create image resource-1 + POST: /v1/resource/image + request_headers: + content-type: application/json + data: + id: 7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d + container_format: bare + disk_format: qcow2 + name: gabbi-image-1 + user_id: 81d82ef3-4deb-499d-9270-9aeb5a3ec5fe + project_id: 40eba01c-b348-49b8-803f-67123251a00a + status: 201 + + # + # Actual tests + # + + - name: search for all resources with a specific user_id + desc: search through all resource types + POST: /v1/search/resource/generic + request_headers: + content-type: application/json + data: + =: + user_id: 81d82ef3-4deb-499d-9270-9aeb5a3ec5fe + status: 200 + response_json_paths: + $.`len`: 2 + response_json_paths: + $.[0].type: instance + $.[1].type: image + $.[0].id: c442a47c-eb33-46ce-9665-f3aa0bef54e7 + $.[1].id: 7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d + + - name: search for all resources of instance type create by specific user_id + desc: all instances created by a specified user + POST: /v1/search/resource/generic + request_headers: + content-type: application/json + data: + and: + - =: + type: instance + - =: + user_id: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07 + status: 200 + response_json_paths: + $.`len`: 2 + response_strings: + - '"id": "a64ca14f-bc7c-45b0-aa85-42cd2179e1e2"' + - '"id": "7ccccfa0-92ce-4225-80ca-3ac9cb122d6a"' + response_json_paths: + $.[0].id: a64ca14f-bc7c-45b0-aa85-42cd2179e1e2 + $.[1].id: 7ccccfa0-92ce-4225-80ca-3ac9cb122d6a + $.[0].type: instance + $.[1].type: instance + $.[0].metrics.`len`: 0 + $.[1].metrics.`len`: 0 + + - name: search for all resources with a specific project_id + desc: search for all resources in a specific project + POST: /v1/search/resource/generic + request_headers: + content-type: application/json + data: + =: + project_id: c9a5f184-c0d0-4daa-83c3-af6fdc0879e6 + status: 200 + response_json_paths: + $.`len`: 2 + + - name: search for intances on a specific compute using "like" keyword + desc: search for vms hosted on a specific compute node + POST: /v1/search/resource/instance + request_headers: + content-type: application/json + data: + like: + host: 'compute-1-gabbi%' + response_json_paths: + $.`len`: 2 + response_strings: + - '"project_id": "40eba01c-b348-49b8-803f-67123251a00a"' + - '"project_id": "c9a5f184-c0d0-4daa-83c3-af6fdc0879e6"' + - '"user_id": "33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07"' + - '"user_id": "81d82ef3-4deb-499d-9270-9aeb5a3ec5fe"' + - '"display_name": "vm-gabbi-2"' + - '"display_name": "vm-gabbi-3"' + + - name: search for instances using complex search with "like" keyword and user_id + desc: search for vms of specified user hosted on a specific compute node + POST: /v1/search/resource/instance + request_headers: + content-type: application/json + data: + and: + - like: + host: 'compute-%-gabbi%' + - =: + user_id: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07 + response_json_paths: + $.`len`: 2 + response_strings: + - '"display_name": "vm-gabbi-1"' + - '"display_name": "vm-gabbi-2"' + - '"project_id": "c9a5f184-c0d0-4daa-83c3-af6fdc0879e6"' + + - name: search for resources of instance or image type with specific user_id + desc: search for all image or instance resources created by a specific user + POST: /v1/search/resource/generic + request_headers: + content-type: application/json + data: + and: + - =: + user_id: 81d82ef3-4deb-499d-9270-9aeb5a3ec5fe + + - or: + - =: + type: instance + + - =: + type: image + status: 200 + response_json_paths: + $.`len`: 2 + response_strings: + - '"type": "image"' + - '"type": "instance"' + - '"id": "7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d"' + - '"id": "c442a47c-eb33-46ce-9665-f3aa0bef54e7"' + + # + # Tear down resources + # + + - name: helper. delete instance resource-1 + DELETE: /v1/resource/instance/a64ca14f-bc7c-45b0-aa85-42cd2179e1e2 + status: 204 + + - name: helper. delete instance resource-2 + DELETE: /v1/resource/instance/7ccccfa0-92ce-4225-80ca-3ac9cb122d6a + status: 204 + + - name: helper. delete instance resource-3 + DELETE: /v1/resource/instance/c442a47c-eb33-46ce-9665-f3aa0bef54e7 + status: 204 + + - name: helper. delete image resource + DELETE: /v1/resource/image/7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d + status: 204 + + - name: helper. delete resource-type instance + DELETE: /v1/resource_type/instance + status: 204 + + - name: helper. delete resource-type image + DELETE: /v1/resource_type/image + status: 204 + -- GitLab From 178a34916231d0bde14a914d216ec7fb0b25c818 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 6 Jan 2017 11:10:56 +0100 Subject: [PATCH 0535/1483] indexer: fix resource type update Currently when we update a resource type the history table is broken because a column is missing. This change fixes that. Closes-bug: #1649261 Change-Id: I938c7263824bae0a01634fa48fa784a91ae49499 --- gnocchi/indexer/sqlalchemy.py | 24 ++++++++++++++---------- gnocchi/tests/test_indexer.py | 23 +++++++++++++++++++++++ 2 files changed, 37 insertions(+), 10 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index c0ed0a2d..df53ee7a 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -390,6 +390,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): del_attributes=None): if not add_attributes and not del_attributes: return + add_attributes = add_attributes or [] + del_attributes = del_attributes or [] + self._set_resource_type_state(name, "updating", "active") try: @@ -399,16 +402,17 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): with self.facade.writer_connection() as connection: ctx = migration.MigrationContext.configure(connection) op = operations.Operations(ctx) - with op.batch_alter_table(rt.tablename) as batch_op: - for attr in del_attributes: - batch_op.drop_column(attr) - for attr in add_attributes: - # TODO(sileht): When attr.required is True, we have - # to pass a default. rest layer current protect us, - # requied = True is not yet allowed - batch_op.add_column(sqlalchemy.Column( - attr.name, attr.satype, - nullable=not attr.required)) + for table in [rt.tablename, '%s_history' % rt.tablename]: + with op.batch_alter_table(table) as batch_op: + for attr in del_attributes: + batch_op.drop_column(attr) + for attr in add_attributes: + # TODO(sileht): When attr.required is True, we + # have to pass a default. rest layer current + # protect us, requied = True is not yet allowed + batch_op.add_column(sqlalchemy.Column( + attr.name, attr.satype, + nullable=not attr.required)) rt.state = "active" rt.updated_at = utils.utcnow() diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 148bdecc..53f62c6e 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -1120,6 +1120,29 @@ class TestIndexerDriver(tests_base.TestCase): self.assertEqual("indexer_test", r.type) self.assertEqual("col1_value", r.col1) + # Update the resource type + add_attrs = mgr.resource_type_from_dict("indexer_test", { + "col2": {"type": "number", "required": False, + "max": 100, "min": 0} + }, "creating").attributes + self.index.update_resource_type("indexer_test", + add_attributes=add_attrs) + + # Check the new attribute + r = self.index.get_resource("indexer_test", rid) + self.assertIsNone(r.col2) + + self.index.update_resource("indexer_test", rid, col2=10) + + rl = self.index.list_resources('indexer_test', + {"=": {"id": rid}}, + history=True, + sorts=['revision_start:asc', + 'started_at:asc']) + self.assertEqual(2, len(rl)) + self.assertIsNone(rl[0].col2) + self.assertEqual(10, rl[1].col2) + # Deletion self.assertRaises(indexer.ResourceTypeInUse, self.index.delete_resource_type, -- GitLab From d55f6f0b01af4f941db5aef002061bb092e9de07 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 6 Jan 2017 21:30:30 +0100 Subject: [PATCH 0536/1483] ceph: Workaround for oslo.config interpolation bug oslo.config interpolation doesn't work if the interpoled value is None. This change workaround the issue. Related-bug: #1654621 Change-Id: I61bebc5d0753d10db5b8bc2c74b661e7e9c81aed --- gnocchi/opts.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 69e4d344..bbfc1835 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -28,6 +28,21 @@ import gnocchi.storage.file import gnocchi.storage.s3 import gnocchi.storage.swift + +# NOTE(sileht): The oslo.config interpolation is buggy when the value +# is None, this replaces it by the expected empty string. +# Fix will perhaps be fixed by https://review.openstack.org/#/c/417496/ +# But it seems some projects are relaying on the bug... +class CustomStrSubWrapper(cfg.ConfigOpts.StrSubWrapper): + def __getitem__(self, key): + value = super(CustomStrSubWrapper, self).__getitem__(key) + if value is None: + return '' + return value + +cfg.ConfigOpts.StrSubWrapper = CustomStrSubWrapper + + _STORAGE_OPTS = list(itertools.chain(gnocchi.storage.ceph.OPTS, gnocchi.storage.file.OPTS, gnocchi.storage.swift.OPTS, -- GitLab From 054b306d7d0911362c14f82c691ebe4b942609a7 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 6 Jan 2017 10:32:27 +0100 Subject: [PATCH 0537/1483] Create a base exception for resource type error To avoid to catch different types of exception in rest layer. This change transforms any exception due to validation failure in resource_type module into a InvalidResourceAttribute. Change-Id: I46d066cf3f7fcf7b17f96545e87a954ff7c515d3 --- gnocchi/resource_type.py | 8 ++++++-- gnocchi/rest/__init__.py | 10 ++++------ 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/gnocchi/resource_type.py b/gnocchi/resource_type.py index df6f7189..c78ff73e 100644 --- a/gnocchi/resource_type.py +++ b/gnocchi/resource_type.py @@ -32,7 +32,11 @@ INVALID_NAMES = [ VALID_CHARS = re.compile("[a-zA-Z0-9][a-zA-Z0-9_]*") -class InvalidResourceAttributeName(Exception): +class InvalidResourceAttribute(ValueError): + pass + + +class InvalidResourceAttributeName(InvalidResourceAttribute): """Error raised when the resource attribute name is invalid.""" def __init__(self, name): super(InvalidResourceAttributeName, self).__init__( @@ -40,7 +44,7 @@ class InvalidResourceAttributeName(Exception): self.name = name -class InvalidResourceAttributeValue(ValueError): +class InvalidResourceAttributeValue(InvalidResourceAttribute): """Error raised when the resource attribute min is greater than max""" def __init__(self, min, max): super(InvalidResourceAttributeValue, self).__init__( diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index aaad46d1..138c3a5d 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -778,8 +778,8 @@ class ResourceTypeController(rest.RestController): try: add_attrs = schema.attributes_from_dict(add_attrs) - except resource_type.InvalidResourceAttributeName as e: - abort(400, e) + except resource_type.InvalidResourceAttribute as e: + abort(400, "Invalid input: %s" % e) # TODO(sileht): Add a default field on an attribute # to be able to fill non-nullable column on sql side. @@ -824,10 +824,8 @@ class ResourceTypesController(rest.RestController): try: rt = schema.resource_type_from_dict(**body) - except resource_type.InvalidResourceAttributeName as e: - abort(400, e) - except resource_type.InvalidResourceAttributeValue as e: - abort(400, e) + except resource_type.InvalidResourceAttribute as e: + abort(400, "Invalid input: %s" % e) enforce("create resource type", body) try: -- GitLab From 020dce5c790e4be3251f58ce2081c084cb85d263 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 10 Jan 2017 16:56:39 +0100 Subject: [PATCH 0538/1483] test: remove unnused method Change-Id: Ie27274cf48d927c4603868b7615e46bcf7fd8f8c --- gnocchi/tests/test_rest.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index b352a150..858ccb2d 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -97,17 +97,6 @@ class TestingApp(webtest.TestApp): finally: self.token = old_token - @contextlib.contextmanager - def use_no_token(self): - # We don't skip for no self.auth to ensure - # some test returns the same thing with auth or not - old_token = self.token - self.token = None - try: - yield - finally: - self.token = old_token - def do_request(self, req, *args, **kwargs): if self.auth and self.token is not None: req.headers['X-Auth-Token'] = self.token -- GitLab From 39265b4916c967a56ff379634545517bc879a12c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 28 Nov 2016 14:11:07 +0100 Subject: [PATCH 0539/1483] Merge project and user id in a creator field This patches merge the user and project identifier in a single field which is creator (for created_by_*) for resources and metrics. The old fields are still returned, and still works as they are translated to the equivalent creator fields. Change-Id: I05dad874218aa9139a83e73c5ba2af638501ad9e --- gnocchi/indexer/__init__.py | 8 +- .../aba5a217ca9b_merge_created_in_creator.py | 53 ++++ gnocchi/indexer/sqlalchemy.py | 41 ++- gnocchi/indexer/sqlalchemy_base.py | 32 +- gnocchi/opts.py | 6 + gnocchi/resource_type.py | 3 +- gnocchi/rest/__init__.py | 78 +++-- gnocchi/rest/auth_helper.py | 50 ++- gnocchi/statsd.py | 11 +- gnocchi/storage/__init__.py | 9 +- gnocchi/tests/gabbi/gabbits/async.yaml | 3 + gnocchi/tests/test_aggregates.py | 4 +- gnocchi/tests/test_indexer.py | 299 +++++++++--------- gnocchi/tests/test_rest.py | 4 + gnocchi/tests/test_statsd.py | 4 +- gnocchi/tests/test_storage.py | 7 +- .../notes/creator_field-6b715c917f6afc93.yaml | 6 + 17 files changed, 361 insertions(+), 257 deletions(-) create mode 100644 gnocchi/indexer/alembic/versions/aba5a217ca9b_merge_created_in_creator.py create mode 100644 releasenotes/notes/creator_field-6b715c917f6afc93.yaml diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 36ba19b8..af8f43fa 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -49,8 +49,7 @@ class Resource(object): and self.revision == other.revision and self.revision_start == other.revision_start and self.revision_end == other.revision_end - and self.created_by_user_id == other.created_by_user_id - and self.created_by_project_id == other.created_by_project_id + and self.creator == other.creator and self.user_id == other.user_id and self.project_id == other.project_id and self.started_at == other.started_at @@ -330,7 +329,7 @@ class IndexerDriver(object): raise exceptions.NotImplementedError @staticmethod - def create_metric(id, created_by_user_id, created_by_project_id, + def create_metric(id, creator, archive_policy_name, name=None, unit=None, resource_id=None): raise exceptions.NotImplementedError @@ -345,7 +344,8 @@ class IndexerDriver(object): raise exceptions.NotImplementedError @staticmethod - def create_resource(resource_type, id, user_id, project_id, + def create_resource(resource_type, id, creator, + user_id=None, project_id=None, started_at=None, ended_at=None, metrics=None, **kwargs): raise exceptions.NotImplementedError diff --git a/gnocchi/indexer/alembic/versions/aba5a217ca9b_merge_created_in_creator.py b/gnocchi/indexer/alembic/versions/aba5a217ca9b_merge_created_in_creator.py new file mode 100644 index 00000000..72339057 --- /dev/null +++ b/gnocchi/indexer/alembic/versions/aba5a217ca9b_merge_created_in_creator.py @@ -0,0 +1,53 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""merge_created_in_creator + +Revision ID: aba5a217ca9b +Revises: 5c4f93e5bb4 +Create Date: 2016-12-06 17:40:25.344578 + +""" + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'aba5a217ca9b' +down_revision = '5c4f93e5bb4' +branch_labels = None +depends_on = None + + +def upgrade(): + for table_name in ("resource", "resource_history", "metric"): + creator_col = sa.Column("creator", sa.String(255)) + created_by_user_id_col = sa.Column("created_by_user_id", + sa.String(255)) + created_by_project_id_col = sa.Column("created_by_project_id", + sa.String(255)) + op.add_column(table_name, creator_col) + t = sa.sql.table( + table_name, creator_col, + created_by_user_id_col, created_by_project_id_col) + op.execute( + t.update().values( + creator=( + created_by_user_id_col + ":" + created_by_project_id_col + )).where((created_by_user_id_col is not None) + | (created_by_project_id_col is not None))) + op.drop_column(table_name, "created_by_user_id") + op.drop_column(table_name, "created_by_project_id") diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index df53ee7a..1e805ecd 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -640,12 +640,10 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): return apr @retry_on_deadlock - def create_metric(self, id, created_by_user_id, created_by_project_id, - archive_policy_name, + def create_metric(self, id, creator, archive_policy_name, name=None, unit=None, resource_id=None): m = Metric(id=id, - created_by_user_id=created_by_user_id, - created_by_project_id=created_by_project_id, + creator=creator, archive_policy_name=archive_policy_name, name=name, unit=unit, @@ -711,8 +709,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): @retry_on_deadlock def create_resource(self, resource_type, id, - created_by_user_id, created_by_project_id, - user_id=None, project_id=None, + creator, user_id=None, project_id=None, started_at=None, ended_at=None, metrics=None, **kwargs): if (started_at is not None @@ -726,8 +723,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): r = resource_cls( id=id, type=resource_type, - created_by_user_id=created_by_user_id, - created_by_project_id=created_by_project_id, + creator=creator, user_id=user_id, project_id=project_id, started_at=started_at, @@ -830,10 +826,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): update = session.query(Metric).filter( Metric.id == value, Metric.status == 'active', - (Metric.created_by_user_id - == r.created_by_user_id), - (Metric.created_by_project_id - == r.created_by_project_id), + Metric.creator == r.creator, ).update({"resource_id": r.id, "name": name}) except exception.DBDuplicateEntry: raise indexer.NamedMetricAlreadyExists(name) @@ -843,8 +836,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): unit = value.get('unit') ap_name = value['archive_policy_name'] m = Metric(id=uuid.uuid4(), - created_by_user_id=r.created_by_user_id, - created_by_project_id=r.created_by_project_id, + creator=r.creator, archive_policy_name=ap_name, name=name, unit=unit, @@ -1146,8 +1138,8 @@ class QueryTransformer(object): def _handle_unary_op(cls, engine, table, op, node): return op(cls.build_filter(engine, table, node)) - @staticmethod - def _handle_binary_op(engine, table, op, nodes): + @classmethod + def _handle_binary_op(cls, engine, table, op, nodes): try: field_name, value = list(nodes.items())[0] except Exception: @@ -1161,6 +1153,23 @@ class QueryTransformer(object): # weird results based on string comparison. It's useless and it # does not work at all with seconds or anything. Just skip it. raise exceptions.NotImplementedError + elif field_name == "created_by_user_id": + creator = getattr(table, "creator") + if op == operator.eq: + return creator.like("%s:%%" % value) + elif op == operator.ne: + return sqlalchemy.not_(creator.like("%s:%%" % value)) + elif op == cls.binary_operators[u"like"]: + return creator.like("%s:%%" % value) + raise indexer.QueryValueError(value, field_name) + elif field_name == "created_by_project_id": + if op == operator.eq: + return creator.like("%%:%s" % value) + elif op == operator.ne: + return sqlalchemy.not_(creator.like("%%:%s" % value)) + elif op == cls.binary_operators[u"like"]: + return creator.like("%%:%s" % value) + raise indexer.QueryValueError(value, field_name) else: try: attr = getattr(table, field_name) diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index 74fc33f6..db1a1408 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -174,10 +174,7 @@ class Metric(Base, GnocchiBase, storage.Metric): name="fk_metric_ap_name_ap_name"), nullable=False) archive_policy = sqlalchemy.orm.relationship(ArchivePolicy, lazy="joined") - created_by_user_id = sqlalchemy.Column( - sqlalchemy.String(255)) - created_by_project_id = sqlalchemy.Column( - sqlalchemy.String(255)) + creator = sqlalchemy.Column(sqlalchemy.String(255)) resource_id = sqlalchemy.Column( sqlalchemy_utils.UUIDType(), sqlalchemy.ForeignKey('resource.id', @@ -193,8 +190,7 @@ class Metric(Base, GnocchiBase, storage.Metric): def jsonify(self): d = { "id": self.id, - "created_by_user_id": self.created_by_user_id, - "created_by_project_id": self.created_by_project_id, + "creator": self.creator, "name": self.name, "unit": self.unit, } @@ -207,6 +203,14 @@ class Metric(Base, GnocchiBase, storage.Metric): d['archive_policy_name'] = self.archive_policy_name else: d['archive_policy'] = self.archive_policy + + if self.creator is None: + d['created_by_user_id'] = d['created_by_project_id'] = None + else: + d['created_by_user_id'], _, d['created_by_project_id'] = ( + self.creator.partition(":") + ) + return d def __eq__(self, other): @@ -217,8 +221,7 @@ class Metric(Base, GnocchiBase, storage.Metric): return ((isinstance(other, Metric) and self.id == other.id and self.archive_policy_name == other.archive_policy_name - and self.created_by_user_id == other.created_by_user_id - and self.created_by_project_id == other.created_by_project_id + and self.creator == other.creator and self.name == other.name and self.unit == other.unit and self.resource_id == other.resource_id) @@ -298,6 +301,14 @@ class ResourceJsonifier(indexer.Resource): if 'metrics' not in sqlalchemy.inspect(self).unloaded: d['metrics'] = dict((m.name, six.text_type(m.id)) for m in self.metrics) + + if self.creator is None: + d['created_by_user_id'] = d['created_by_project_id'] = None + else: + d['created_by_user_id'], _, d['created_by_project_id'] = ( + self.creator.partition(":") + ) + return d @@ -318,10 +329,7 @@ class ResourceMixin(ResourceJsonifier): cls.__tablename__), nullable=False) - created_by_user_id = sqlalchemy.Column( - sqlalchemy.String(255)) - created_by_project_id = sqlalchemy.Column( - sqlalchemy.String(255)) + creator = sqlalchemy.Column(sqlalchemy.String(255)) started_at = sqlalchemy.Column(TimestampUTC, nullable=False, default=lambda: utils.utcnow()) revision_start = sqlalchemy.Column(TimestampUTC, nullable=False, diff --git a/gnocchi/opts.py b/gnocchi/opts.py index bbfc1835..02cdb3f4 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -110,10 +110,16 @@ def list_opts(): help='Resource UUID to use to identify statsd in Gnocchi'), cfg.StrOpt( 'user_id', + deprecated_for_removal=True, help='User ID to use to identify statsd in Gnocchi'), cfg.StrOpt( 'project_id', + deprecated_for_removal=True, help='Project ID to use to identify statsd in Gnocchi'), + cfg.StrOpt( + 'creator', + default="${statsd.user_id}:${statsd.project_id}", + help='Creator value to use to identify statsd in Gnocchi'), cfg.StrOpt( 'archive_policy_name', help='Archive policy name to use when creating metrics'), diff --git a/gnocchi/resource_type.py b/gnocchi/resource_type.py index c78ff73e..ad1bcddb 100644 --- a/gnocchi/resource_type.py +++ b/gnocchi/resource_type.py @@ -26,7 +26,8 @@ INVALID_NAMES = [ "revision", "revision_start", "revision_end", "started_at", "ended_at", "user_id", "project_id", - "created_by_user_id", "created_by_project_id", "get_metric" + "created_by_user_id", "created_by_project_id", "get_metric", + "creator", ] VALID_CHARS = re.compile("[a-zA-Z0-9][a-zA-Z0-9_]*") diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 138c3a5d..f534d60f 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -57,13 +57,6 @@ def abort(status_code, detail='', headers=None, comment=None, **kw): return pecan.abort(status_code, detail, headers, comment, **kw) -def get_user_and_project(): - headers = pecan.request.headers - user_id = headers.get("X-User-Id") - project_id = headers.get("X-Project-Id") - return (user_id, project_id) - - def enforce(rule, target): """Return the user and project the request should be limited to. @@ -71,17 +64,13 @@ def enforce(rule, target): :param target: The target to enforce on. """ - headers = pecan.request.headers - user_id, project_id = get_user_and_project() - creds = { - 'roles': headers.get("X-Roles", "").split(","), - 'user_id': user_id, - 'project_id': project_id, - 'domain_id': headers.get("X-Domain-Id", ""), - } + creds = pecan.request.auth_helper.get_auth_info(pecan.request.headers) if not isinstance(target, dict): - target = target.__dict__ + if hasattr(target, "jsonify"): + target = target.jsonify() + else: + target = target.__dict__ # Flatten dict target = dict(dictutils.flatten_dict_to_keypairs(d=target, separator='.')) @@ -514,11 +503,11 @@ class MetricsController(rest.RestController): else: definition['archive_policy_name'] = ap.name - user_id, project_id = get_user_and_project() + creator = pecan.request.auth_helper.get_current_user( + pecan.request.headers) enforce("create metric", { - "created_by_user_id": user_id, - "created_by_project_id": project_id, + "creator": creator, "archive_policy_name": archive_policy_name, "name": name, "unit": definition.get('unit'), @@ -528,12 +517,13 @@ class MetricsController(rest.RestController): @pecan.expose('json') def post(self): - user, project = get_user_and_project() + creator = pecan.request.auth_helper.get_current_user( + pecan.request.headers) body = deserialize_and_validate(self.MetricSchema) try: m = pecan.request.indexer.create_metric( uuid.uuid4(), - user, project, + creator, name=body.get('name'), unit=body.get('unit'), archive_policy_name=body['archive_policy_name']) @@ -546,25 +536,28 @@ class MetricsController(rest.RestController): @staticmethod @pecan.expose('json') def get_all(**kwargs): + # Compat with old user/project API + provided_user_id = kwargs.get('user_id') + provided_project_id = kwargs.get('project_id') + if provided_user_id is None and provided_project_id is None: + provided_creator = kwargs.get('creator') + else: + provided_creator = ( + (provided_user_id or "") + + ":" + + (provided_project_id or "") + ) try: enforce("list all metric", {}) except webob.exc.HTTPForbidden: enforce("list metric", {}) - user_id, project_id = get_user_and_project() - provided_user_id = kwargs.get('user_id') - provided_project_id = kwargs.get('project_id') - if ((provided_user_id and user_id != provided_user_id) - or (provided_project_id and project_id != provided_project_id)): - abort( - 403, "Insufficient privileges to filter by user/project") - else: - user_id = kwargs.get('user_id') - project_id = kwargs.get('project_id') + creator = pecan.request.auth_helper.get_current_user( + pecan.request.headers) + if provided_creator and creator != provided_creator: + abort(403, "Insufficient privileges to filter by user/project") attr_filter = {} - if user_id is not None: - attr_filter['created_by_user_id'] = user_id - if project_id is not None: - attr_filter['created_by_project_id'] = project_id + if provided_creator is not None: + attr_filter['creator'] = provided_creator attr_filter.update(get_pagination_options( kwargs, METRIC_DEFAULT_PAGINATION)) try: @@ -969,12 +962,13 @@ class ResourcesController(rest.RestController): } target.update(body) enforce("create resource", target) - user, project = get_user_and_project() + creator = pecan.request.auth_helper.get_current_user( + pecan.request.headers) rid = body['id'] del body['id'] try: resource = pecan.request.indexer.create_resource( - self._resource_type, rid, user, project, + self._resource_type, rid, creator, **body) except (ValueError, indexer.NoSuchMetric, @@ -996,7 +990,7 @@ class ResourcesController(rest.RestController): pagination_opts = get_pagination_options( kwargs, RESOURCE_DEFAULT_PAGINATION) policy_filter = pecan.request.auth_helper.get_resource_policy_filter( - "list resource", self._resource_type) + pecan.request.headers, "list resource", self._resource_type) try: # FIXME(sileht): next API version should returns @@ -1025,6 +1019,7 @@ class ResourcesController(rest.RestController): delete entire database") policy_filter = pecan.request.auth_helper.get_resource_policy_filter( + pecan.request.headers, "delete resources", self._resource_type) if policy_filter: @@ -1197,7 +1192,7 @@ class SearchResourceTypeController(rest.RestController): kwargs, RESOURCE_DEFAULT_PAGINATION) policy_filter = pecan.request.auth_helper.get_resource_policy_filter( - "search resource", self._resource_type) + pecan.request.headers, "search resource", self._resource_type) if policy_filter: if attr_filter: attr_filter = {"and": [ @@ -1352,7 +1347,8 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): known_names = [m.name for m in metrics] if strutils.bool_from_string(create_metrics): - user_id, project_id = get_user_and_project() + creator = pecan.request.auth_helper.get_current_user( + pecan.request.headers) already_exists_names = [] for name in names: if name not in known_names: @@ -1362,7 +1358,7 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): try: m = pecan.request.indexer.create_metric( uuid.uuid4(), - user_id, project_id, + creator=creator, resource_id=resource_id, name=metric.get('name'), unit=metric.get('unit'), diff --git a/gnocchi/rest/auth_helper.py b/gnocchi/rest/auth_helper.py index 2edb65e2..bb4a2d99 100644 --- a/gnocchi/rest/auth_helper.py +++ b/gnocchi/rest/auth_helper.py @@ -21,37 +21,62 @@ from gnocchi import rest class KeystoneAuthHelper(object): @staticmethod - def get_resource_policy_filter(rule, resource_type): + def get_current_user(headers): + # FIXME(jd) should have domain but should not break existing :( + user_id = headers.get("X-User-Id", "") + project_id = headers.get("X-Project-Id", "") + return user_id + ":" + project_id + + @staticmethod + def get_auth_info(headers): + user_id = headers.get("X-User-Id") + project_id = headers.get("X-Project-Id") + return { + "user": (user_id or "") + ":" + (project_id or ""), + "user_id": user_id, + "project_id": project_id, + 'domain_id': headers.get("X-Domain-Id"), + 'roles': headers.get("X-Roles", "").split(","), + } + + @staticmethod + def get_resource_policy_filter(headers, rule, resource_type): try: # Check if the policy allows the user to list any resource rest.enforce(rule, { "resource_type": resource_type, }) except webob.exc.HTTPForbidden: - user, project = rest.get_user_and_project() policy_filter = [] + project_id = headers.get("X-Project-Id") + try: # Check if the policy allows the user to list resources linked # to their project rest.enforce(rule, { "resource_type": resource_type, - "project_id": project, + "project_id": project_id, }) except webob.exc.HTTPForbidden: pass else: - policy_filter.append({"=": {"project_id": project}}) + policy_filter.append({"=": {"project_id": project_id}}) + try: # Check if the policy allows the user to list resources linked # to their created_by_project rest.enforce(rule, { "resource_type": resource_type, - "created_by_project_id": project, + "created_by_project_id": project_id, }) except webob.exc.HTTPForbidden: pass else: - policy_filter.append({"=": {"created_by_project_id": project}}) + if project_id: + policy_filter.append( + {"like": {"creator": "%:" + project_id}}) + else: + policy_filter.append({"=": {"creator": None}}) if not policy_filter: # We need to have at least one policy filter in place @@ -60,4 +85,15 @@ class KeystoneAuthHelper(object): return {"or": policy_filter} -NoAuthHelper = KeystoneAuthHelper +class NoAuthHelper(KeystoneAuthHelper): + @staticmethod + def get_current_user(headers): + # FIXME(jd) Should be a single header + user_id = headers.get("X-User-Id") + project_id = headers.get("X-Project-Id") + if user_id: + if project_id: + return user_id + ":" + project_id + return user_id + if project_id: + return project_id diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index 87f6595b..267df497 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -41,8 +41,7 @@ class Stats(object): try: self.indexer.create_resource('generic', self.conf.statsd.resource_id, - self.conf.statsd.user_id, - self.conf.statsd.project_id) + self.conf.statsd.creator) except indexer.ResourceAlreadyExists: LOG.debug("Resource %s already exists", self.conf.statsd.resource_id) @@ -107,8 +106,7 @@ class Stats(object): ap_name = self._get_archive_policy_name(metric_name) metric = self.indexer.create_metric( uuid.uuid4(), - self.conf.statsd.user_id, - self.conf.statsd.project_id, + self.conf.statsd.creator, archive_policy_name=ap_name, name=metric_name, resource_id=self.conf.statsd.resource_id) @@ -167,9 +165,8 @@ class StatsdServer(object): def start(): conf = service.prepare_service() - for field in ["resource_id", "user_id", "project_id"]: - if conf.statsd[field] is None: - raise cfg.RequiredOptError(field, cfg.OptGroup("statsd")) + if conf.statsd.resource_id is None: + raise cfg.RequiredOptError("resource_id", cfg.OptGroup("statsd")) stats = Stats(conf) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index bb3f000f..958f0065 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -44,14 +44,12 @@ class Measure(object): class Metric(object): def __init__(self, id, archive_policy, - created_by_user_id=None, - created_by_project_id=None, + creator=None, name=None, resource_id=None): self.id = id self.archive_policy = archive_policy - self.created_by_user_id = created_by_user_id - self.created_by_project_id = created_by_project_id + self.creator = creator self.name = name self.resource_id = resource_id @@ -65,8 +63,7 @@ class Metric(object): return (isinstance(other, Metric) and self.id == other.id and self.archive_policy == other.archive_policy - and self.created_by_user_id == other.created_by_user_id - and self.created_by_project_id == other.created_by_project_id + and self.creator == other.creator and self.name == other.name and self.resource_id == other.resource_id) diff --git a/gnocchi/tests/gabbi/gabbits/async.yaml b/gnocchi/tests/gabbi/gabbits/async.yaml index d31e4692..fd2f97ae 100644 --- a/gnocchi/tests/gabbi/gabbits/async.yaml +++ b/gnocchi/tests/gabbi/gabbits/async.yaml @@ -59,6 +59,9 @@ tests: - name: get some measures GET: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures + request_headers: + x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147 + x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d poll: count: 50 delay: .1 diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index ba2d6246..9fc0b9d5 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -58,9 +58,7 @@ class TestAggregates(tests_base.TestCase): measures = [storage.Measure( utils.dt_in_unix_ns(start_time + incr * n), val) for n, val in enumerate(data)] - self.index.create_metric(metric.id, - str(uuid.uuid4()), str(uuid.uuid4()), - 'medium') + self.index.create_metric(metric.id, str(uuid.uuid4()), 'medium') self.storage.incoming.add_measures(metric, measures) metrics = self.storage.incoming.list_metric_with_measures_to_process( None, None, full=True) diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 53f62c6e..80d0d0e6 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -117,8 +117,7 @@ class TestIndexerDriver(tests_base.TestCase): self.index.delete_archive_policy, str(uuid.uuid4())) metric_id = uuid.uuid4() - self.index.create_metric(metric_id, str(uuid.uuid4()), - str(uuid.uuid4()), "low") + self.index.create_metric(metric_id, str(uuid.uuid4()), "low") self.assertRaises(indexer.ArchivePolicyInUse, self.index.delete_archive_policy, "low") @@ -150,12 +149,10 @@ class TestIndexerDriver(tests_base.TestCase): def test_create_metric(self): r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - m = self.index.create_metric(r1, user, project, "low") + creator = str(uuid.uuid4()) + m = self.index.create_metric(r1, creator, "low") self.assertEqual(r1, m.id) - self.assertEqual(m.created_by_user_id, user) - self.assertEqual(m.created_by_project_id, project) + self.assertEqual(m.creator, creator) self.assertIsNone(m.name) self.assertIsNone(m.unit) self.assertIsNone(m.resource_id) @@ -166,29 +163,26 @@ class TestIndexerDriver(tests_base.TestCase): m1 = uuid.uuid4() r1 = uuid.uuid4() name = "foobar" - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - self.index.create_resource('generic', r1, user, project) - m = self.index.create_metric(m1, user, project, "low", + creator = str(uuid.uuid4()) + self.index.create_resource('generic', r1, creator) + m = self.index.create_metric(m1, creator, "low", name=name, resource_id=r1) self.assertEqual(m1, m.id) - self.assertEqual(m.created_by_user_id, user) - self.assertEqual(m.created_by_project_id, project) + self.assertEqual(m.creator, creator) self.assertEqual(name, m.name) self.assertEqual(r1, m.resource_id) m2 = self.index.list_metrics(id=m1) self.assertEqual([m], m2) self.assertRaises(indexer.NamedMetricAlreadyExists, - self.index.create_metric, m1, user, project, "low", + self.index.create_metric, m1, creator, "low", name=name, resource_id=r1) def test_expunge_metric(self): r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - m = self.index.create_metric(r1, user, project, "low") + creator = str(uuid.uuid4()) + m = self.index.create_metric(r1, creator, "low") self.index.delete_metric(m.id) try: self.index.expunge_metric(m.id) @@ -205,15 +199,41 @@ class TestIndexerDriver(tests_base.TestCase): m.id) def test_create_resource(self): + r1 = uuid.uuid4() + creator = str(uuid.uuid4()) + rc = self.index.create_resource('generic', r1, creator) + self.assertIsNotNone(rc.started_at) + self.assertIsNotNone(rc.revision_start) + self.assertEqual({"id": r1, + "revision_start": rc.revision_start, + "revision_end": None, + "creator": creator, + "created_by_user_id": creator, + "created_by_project_id": "", + "user_id": None, + "project_id": None, + "started_at": rc.started_at, + "ended_at": None, + "original_resource_id": None, + "type": "generic", + "metrics": {}}, + rc.jsonify()) + rg = self.index.get_resource('generic', r1, with_metrics=True) + self.assertEqual(rc, rg) + self.assertEqual(rc.metrics, rg.metrics) + + def test_split_user_project_for_legacy_reasons(self): r1 = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) - rc = self.index.create_resource('generic', r1, user, project) + creator = user + ":" + project + rc = self.index.create_resource('generic', r1, creator) self.assertIsNotNone(rc.started_at) self.assertIsNotNone(rc.revision_start) self.assertEqual({"id": r1, "revision_start": rc.revision_start, "revision_end": None, + "creator": creator, "created_by_user_id": user, "created_by_project_id": project, "user_id": None, @@ -241,19 +261,17 @@ class TestIndexerDriver(tests_base.TestCase): def test_create_resource_already_exists(self): r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - self.index.create_resource('generic', r1, user, project) + creator = str(uuid.uuid4()) + self.index.create_resource('generic', r1, creator) self.assertRaises(indexer.ResourceAlreadyExists, self.index.create_resource, - 'generic', r1, user, project) + 'generic', r1, creator) def test_create_resource_with_new_metrics(self): r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) + creator = str(uuid.uuid4()) rc = self.index.create_resource( - 'generic', r1, user, project, + 'generic', r1, creator, metrics={"foobar": {"archive_policy_name": "low"}}) self.assertEqual(1, len(rc.metrics)) m = self.index.list_metrics(id=rc.metrics[0].id) @@ -269,16 +287,13 @@ class TestIndexerDriver(tests_base.TestCase): r1) def test_delete_resource_with_metrics(self): - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) + creator = str(uuid.uuid4()) e1 = uuid.uuid4() e2 = uuid.uuid4() - self.index.create_metric(e1, user, project, - archive_policy_name="low") - self.index.create_metric(e2, user, project, - archive_policy_name="low") + self.index.create_metric(e1, creator, archive_policy_name="low") + self.index.create_metric(e2, creator, archive_policy_name="low") r1 = uuid.uuid4() - self.index.create_resource('generic', r1, user, project, + self.index.create_resource('generic', r1, creator, metrics={'foo': e1, 'bar': e2}) self.index.delete_resource(r1) self.assertRaises(indexer.NoSuchResource, @@ -296,17 +311,14 @@ class TestIndexerDriver(tests_base.TestCase): def test_create_resource_with_start_timestamp(self): r1 = uuid.uuid4() ts = utils.datetime_utc(2014, 1, 1, 23, 34, 23, 1234) - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - rc = self.index.create_resource( - 'generic', - r1, user, project, - started_at=ts) + creator = str(uuid.uuid4()) + rc = self.index.create_resource('generic', r1, creator, started_at=ts) self.assertEqual({"id": r1, "revision_start": rc.revision_start, "revision_end": None, - "created_by_user_id": user, - "created_by_project_id": project, + "creator": creator, + "created_by_user_id": creator, + "created_by_project_id": "", "user_id": None, "project_id": None, "started_at": ts, @@ -321,23 +333,21 @@ class TestIndexerDriver(tests_base.TestCase): r1 = uuid.uuid4() e1 = uuid.uuid4() e2 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - self.index.create_metric(e1, - user, project, + creator = str(uuid.uuid4()) + self.index.create_metric(e1, creator, archive_policy_name="low") - self.index.create_metric(e2, - user, project, + self.index.create_metric(e2, creator, archive_policy_name="low") - rc = self.index.create_resource('generic', r1, user, project, + rc = self.index.create_resource('generic', r1, creator, metrics={'foo': e1, 'bar': e2}) self.assertIsNotNone(rc.started_at) self.assertIsNotNone(rc.revision_start) self.assertEqual({"id": r1, "revision_start": rc.revision_start, "revision_end": None, - "created_by_user_id": user, - "created_by_project_id": project, + "creator": creator, + "created_by_user_id": creator, + "created_by_project_id": "", "user_id": None, "project_id": None, "started_at": rc.started_at, @@ -351,8 +361,9 @@ class TestIndexerDriver(tests_base.TestCase): self.assertEqual({"id": r1, "revision_start": r.revision_start, "revision_end": None, - "created_by_user_id": user, - "created_by_project_id": project, + "creator": creator, + "created_by_user_id": creator, + "created_by_project_id": "", "type": "generic", "started_at": rc.started_at, "ended_at": None, @@ -373,9 +384,8 @@ class TestIndexerDriver(tests_base.TestCase): def test_update_resource_end_timestamp(self): r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - self.index.create_resource('generic', r1, user, project) + creator = str(uuid.uuid4()) + self.index.create_resource('generic', r1, creator) self.index.update_resource( 'generic', r1, @@ -387,8 +397,7 @@ class TestIndexerDriver(tests_base.TestCase): self.assertIsNone(r.revision_end) self.assertIsNotNone(r.revision_start) self.assertEqual(r1, r.id) - self.assertEqual(user, r.created_by_user_id) - self.assertEqual(project, r.created_by_project_id) + self.assertEqual(creator, r.creator) self.assertEqual(utils.datetime_utc(2043, 1, 1, 2, 3, 4), r.ended_at) self.assertEqual("generic", r.type) self.assertEqual(0, len(r.metrics)) @@ -403,8 +412,9 @@ class TestIndexerDriver(tests_base.TestCase): "revision_start": r.revision_start, "revision_end": None, "ended_at": None, - "created_by_user_id": user, - "created_by_project_id": project, + "created_by_project_id": "", + "created_by_user_id": creator, + "creator": creator, "user_id": None, "project_id": None, "type": "generic", @@ -416,14 +426,10 @@ class TestIndexerDriver(tests_base.TestCase): r1 = uuid.uuid4() e1 = uuid.uuid4() e2 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - self.index.create_metric(e1, user, project, - archive_policy_name="low") - self.index.create_resource('generic', r1, user, project, - metrics={'foo': e1}) - self.index.create_metric(e2, user, project, - archive_policy_name="low") + creator = str(uuid.uuid4()) + self.index.create_metric(e1, creator, archive_policy_name="low") + self.index.create_resource('generic', r1, creator, metrics={'foo': e1}) + self.index.create_metric(e2, creator, archive_policy_name="low") rc = self.index.update_resource('generic', r1, metrics={'bar': e2}) r = self.index.get_resource('generic', r1, with_metrics=True) self.assertEqual(rc, r) @@ -432,13 +438,12 @@ class TestIndexerDriver(tests_base.TestCase): r1 = uuid.uuid4() e1 = uuid.uuid4() e2 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - self.index.create_metric(e1, user, project, + creator = str(uuid.uuid4()) + self.index.create_metric(e1, creator, archive_policy_name="low") - self.index.create_metric(e2, user, project, + self.index.create_metric(e2, creator, archive_policy_name="low") - self.index.create_resource('generic', r1, user, project, + self.index.create_resource('generic', r1, creator, metrics={'foo': e1}) rc = self.index.update_resource('generic', r1, metrics={'bar': e2}, append_metrics=True) @@ -452,13 +457,12 @@ class TestIndexerDriver(tests_base.TestCase): r1 = uuid.uuid4() e1 = uuid.uuid4() e2 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - self.index.create_metric(e1, user, project, + creator = str(uuid.uuid4()) + self.index.create_metric(e1, creator, archive_policy_name="low") - self.index.create_metric(e2, user, project, + self.index.create_metric(e2, creator, archive_policy_name="low") - self.index.create_resource('generic', r1, user, project, + self.index.create_resource('generic', r1, creator, metrics={'foo': e1}) self.assertRaises(indexer.NamedMetricAlreadyExists, @@ -476,12 +480,11 @@ class TestIndexerDriver(tests_base.TestCase): "min_length": 2, "max_length": 15} }, 'creating') r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) + creator = str(uuid.uuid4()) # Create self.index.create_resource_type(rtype) - rc = self.index.create_resource(resource_type, r1, user, project, + rc = self.index.create_resource(resource_type, r1, creator, col1="foo") rc = self.index.update_resource(resource_type, r1, col1="foo") r = self.index.get_resource(resource_type, r1, with_metrics=True) @@ -496,9 +499,8 @@ class TestIndexerDriver(tests_base.TestCase): }, 'creating') self.index.create_resource_type(rtype) r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - rc = self.index.create_resource(resource_type, r1, user, project, + creator = str(uuid.uuid4()) + rc = self.index.create_resource(resource_type, r1, creator, col1="foo") updated = self.index.update_resource(resource_type, r1, col1="foo", create_revision=False) @@ -511,9 +513,8 @@ class TestIndexerDriver(tests_base.TestCase): def test_update_resource_ended_at_fail(self): r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - self.index.create_resource('generic', r1, user, project) + creator = str(uuid.uuid4()) + self.index.create_resource('generic', r1, creator) self.assertRaises( indexer.ResourceValueError, self.index.update_resource, @@ -549,7 +550,7 @@ class TestIndexerDriver(tests_base.TestCase): def test_update_non_existent_resource(self): r1 = uuid.uuid4() e1 = uuid.uuid4() - self.index.create_metric(e1, str(uuid.uuid4()), str(uuid.uuid4()), + self.index.create_metric(e1, str(uuid.uuid4()), archive_policy_name="low") self.assertRaises(indexer.NoSuchResource, self.index.update_resource, @@ -569,13 +570,12 @@ class TestIndexerDriver(tests_base.TestCase): r1 = uuid.uuid4() e1 = uuid.uuid4() e2 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - self.index.create_metric(e1, user, project, + creator = str(uuid.uuid4()) + self.index.create_metric(e1, creator, archive_policy_name="low") - self.index.create_metric(e2, user, project, + self.index.create_metric(e2, creator, archive_policy_name="low") - rc = self.index.create_resource('generic', r1, user, project, + rc = self.index.create_resource('generic', r1, creator, metrics={'foo': e1, 'bar': e2}) self.index.delete_metric(e1) self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric, e1) @@ -587,8 +587,9 @@ class TestIndexerDriver(tests_base.TestCase): "revision_start": rc.revision_start, "revision_end": None, "ended_at": None, - "created_by_user_id": user, - "created_by_project_id": project, + "creator": creator, + "created_by_project_id": "", + "created_by_user_id": creator, "user_id": None, "project_id": None, "original_resource_id": None, @@ -626,7 +627,7 @@ class TestIndexerDriver(tests_base.TestCase): r1 = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) - g = self.index.create_resource('generic', r1, user, project, + g = self.index.create_resource('generic', r1, user + ":" + project, user, project) resources = self.index.list_resources( 'generic', @@ -638,34 +639,46 @@ class TestIndexerDriver(tests_base.TestCase): attribute_filter={"=": {"user_id": 'bad-user'}}) self.assertEqual(0, len(resources)) - def test_list_resources_by_created_by_user(self): + def test_list_resources_by_created_by_user_id(self): r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - g = self.index.create_resource('generic', r1, user, project) + creator = str(uuid.uuid4()) + g = self.index.create_resource('generic', r1, creator + ":" + creator) + resources = self.index.list_resources( + 'generic', + attribute_filter={"=": {"created_by_user_id": creator}}) + self.assertEqual([g], resources) + resources = self.index.list_resources( + 'generic', + attribute_filter={"=": {"created_by_user_id": 'bad-user'}}) + self.assertEqual([], resources) + + def test_list_resources_by_creator(self): + r1 = uuid.uuid4() + creator = str(uuid.uuid4()) + g = self.index.create_resource('generic', r1, creator) resources = self.index.list_resources( 'generic', - attribute_filter={"=": {"created_by_user_id": user}}) + attribute_filter={"=": {"creator": creator}}) self.assertEqual(1, len(resources)) self.assertEqual(g, resources[0]) resources = self.index.list_resources( 'generic', - attribute_filter={"=": {"created_by_user_id": 'bad-user'}}) + attribute_filter={"=": {"creator": 'bad-user'}}) self.assertEqual(0, len(resources)) def test_list_resources_by_user_with_details(self): r1 = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) - g = self.index.create_resource('generic', r1, user, project, + creator = user + ":" + project + g = self.index.create_resource('generic', r1, creator, user, project) mgr = self.index.get_resource_type_schema() resource_type = str(uuid.uuid4()) self.index.create_resource_type( mgr.resource_type_from_dict(resource_type, {}, 'creating')) r2 = uuid.uuid4() - i = self.index.create_resource(resource_type, r2, - user, project, + i = self.index.create_resource(resource_type, r2, creator, user, project) resources = self.index.list_resources( 'generic', @@ -680,8 +693,8 @@ class TestIndexerDriver(tests_base.TestCase): r1 = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) - g = self.index.create_resource('generic', r1, user, project, - user, project) + creator = user + ":" + project + g = self.index.create_resource('generic', r1, creator, user, project) resources = self.index.list_resources( 'generic', attribute_filter={"=": {"project_id": project}}) @@ -697,14 +710,13 @@ class TestIndexerDriver(tests_base.TestCase): user = str(uuid.uuid4()) project = str(uuid.uuid4()) g = self.index.create_resource( - 'generic', r1, user, project, - user_id=user, project_id=project, + 'generic', r1, user + ":" + project, user, project, started_at=utils.datetime_utc(2010, 1, 1, 12, 0), ended_at=utils.datetime_utc(2010, 1, 1, 13, 0)) resources = self.index.list_resources( 'generic', attribute_filter={"and": [ - {"=": {"project_id": project}}, + {"=": {"user_id": user}}, {">": {"lifespan": 1800}}, ]}) self.assertEqual(1, len(resources)) @@ -795,10 +807,10 @@ class TestIndexerDriver(tests_base.TestCase): new_user = str(uuid.uuid4()) new_project = str(uuid.uuid4()) - self.index.create_metric(e, user, project, + self.index.create_metric(e, user + ":" + project, archive_policy_name="low") - self.index.create_resource('generic', rid, user, project, + self.index.create_resource('generic', rid, user + ":" + project, user, project, metrics={'foo': e}) r2 = self.index.update_resource('generic', rid, user_id=new_user, @@ -821,18 +833,16 @@ class TestIndexerDriver(tests_base.TestCase): rid = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) + creator = user + ":" + project new_user = str(uuid.uuid4()) new_project = str(uuid.uuid4()) - self.index.create_metric(e1, user, project, - archive_policy_name="low") - self.index.create_metric(e2, user, project, - archive_policy_name="low") - self.index.create_metric(uuid.uuid4(), user, project, + self.index.create_metric(e1, creator, archive_policy_name="low") + self.index.create_metric(e2, creator, archive_policy_name="low") + self.index.create_metric(uuid.uuid4(), creator, archive_policy_name="low") - r1 = self.index.create_resource('generic', rid, user, project, - user, project, + r1 = self.index.create_resource('generic', rid, creator, user, project, metrics={'foo': e1, 'bar': e2} ).jsonify() r2 = self.index.update_resource('generic', rid, user_id=new_user, @@ -859,6 +869,7 @@ class TestIndexerDriver(tests_base.TestCase): e1 = uuid.uuid4() e2 = uuid.uuid4() rid = uuid.uuid4() + creator = str(uuid.uuid4()) user = str(uuid.uuid4()) project = str(uuid.uuid4()) new_user = str(uuid.uuid4()) @@ -872,14 +883,14 @@ class TestIndexerDriver(tests_base.TestCase): "min_length": 2, "max_length": 15} }, 'creating')) - self.index.create_metric(e1, user, project, + self.index.create_metric(e1, creator, archive_policy_name="low") - self.index.create_metric(e2, user, project, + self.index.create_metric(e2, creator, archive_policy_name="low") - self.index.create_metric(uuid.uuid4(), user, project, + self.index.create_metric(uuid.uuid4(), creator, archive_policy_name="low") - r1 = self.index.create_resource(resource_type, rid, user, project, + r1 = self.index.create_resource(resource_type, rid, creator, user, project, col1="foo", metrics={'foo': e1, 'bar': e2} @@ -911,10 +922,9 @@ class TestIndexerDriver(tests_base.TestCase): # database for all tests and the tests are running concurrently, but # for now it'll be better than nothing. r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) + creator = str(uuid.uuid4()) g = self.index.create_resource( - 'generic', r1, user, project, + 'generic', r1, creator, started_at=utils.datetime_utc(2000, 1, 1, 23, 23, 23), ended_at=utils.datetime_utc(2000, 1, 3, 23, 23, 23)) r2 = uuid.uuid4() @@ -923,7 +933,7 @@ class TestIndexerDriver(tests_base.TestCase): self.index.create_resource_type( mgr.resource_type_from_dict(resource_type, {}, 'creating')) i = self.index.create_resource( - resource_type, r2, user, project, + resource_type, r2, creator, started_at=utils.datetime_utc(2000, 1, 1, 23, 23, 23), ended_at=utils.datetime_utc(2000, 1, 4, 23, 23, 23)) resources = self.index.list_resources( @@ -977,10 +987,11 @@ class TestIndexerDriver(tests_base.TestCase): r2 = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) + creator = user + ":" + project metrics = {'foo': {'archive_policy_name': 'medium'}} - g1 = self.index.create_resource('generic', r1, user, project, + g1 = self.index.create_resource('generic', r1, creator, user, project, metrics=metrics) - g2 = self.index.create_resource('generic', r2, user, project, + g2 = self.index.create_resource('generic', r2, creator, user, project, metrics=metrics) metrics = self.index.list_metrics(ids=[g1['metrics'][0]['id'], @@ -1008,35 +1019,29 @@ class TestIndexerDriver(tests_base.TestCase): def test_get_metric(self): e1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - self.index.create_metric(e1, - user, project, - archive_policy_name="low") + creator = str(uuid.uuid4()) + self.index.create_metric(e1, creator, archive_policy_name="low") metric = self.index.list_metrics(id=e1) self.assertEqual(1, len(metric)) metric = metric[0] self.assertEqual(e1, metric.id) - self.assertEqual(metric.created_by_user_id, user) - self.assertEqual(metric.created_by_project_id, project) + self.assertEqual(metric.creator, creator) self.assertIsNone(metric.name) self.assertIsNone(metric.resource_id) def test_get_metric_with_details(self): e1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) + creator = str(uuid.uuid4()) self.index.create_metric(e1, - user, project, + creator, archive_policy_name="low") metric = self.index.list_metrics(id=e1) self.assertEqual(1, len(metric)) metric = metric[0] self.assertEqual(e1, metric.id) - self.assertEqual(metric.created_by_user_id, user) - self.assertEqual(metric.created_by_project_id, project) + self.assertEqual(metric.creator, creator) self.assertIsNone(metric.name) self.assertIsNone(metric.resource_id) self.assertEqual(self.archive_policies['low'], metric.archive_policy) @@ -1050,15 +1055,10 @@ class TestIndexerDriver(tests_base.TestCase): def test_list_metrics(self): e1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - self.index.create_metric(e1, - user, project, - archive_policy_name="low") + creator = str(uuid.uuid4()) + self.index.create_metric(e1, creator, archive_policy_name="low") e2 = uuid.uuid4() - self.index.create_metric(e2, - user, project, - archive_policy_name="low") + self.index.create_metric(e2, creator, archive_policy_name="low") metrics = self.index.list_metrics() id_list = [m.id for m in metrics] self.assertIn(e1, id_list) @@ -1070,10 +1070,7 @@ class TestIndexerDriver(tests_base.TestCase): def test_list_metrics_delete_status(self): e1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - self.index.create_metric(e1, - user, project, + self.index.create_metric(e1, str(uuid.uuid4()), archive_policy_name="low") self.index.delete_metric(e1) metrics = self.index.list_metrics() diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 858ccb2d..a5374ec3 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -622,9 +622,13 @@ class ResourceTest(RestTest): if self.auth: self.resource['created_by_user_id'] = TestingApp.USER_ID self.resource['created_by_project_id'] = TestingApp.PROJECT_ID + self.resource['creator'] = ( + TestingApp.USER_ID + ":" + TestingApp.PROJECT_ID + ) else: self.resource['created_by_user_id'] = None self.resource['created_by_project_id'] = None + self.resource['creator'] = None self.resource['ended_at'] = None self.resource['metrics'] = {} if 'user_id' not in self.resource: diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index 6d6cb790..4a35af02 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -35,10 +35,8 @@ class TestStatsd(tests_base.TestCase): self.conf.set_override("resource_id", str(uuid.uuid4()), "statsd") - self.conf.set_override("user_id", + self.conf.set_override("creator", self.STATSD_USER_ID, "statsd") - self.conf.set_override("project_id", - self.STATSD_PROJECT_ID, "statsd") self.conf.set_override("archive_policy_name", self.STATSD_ARCHIVE_POLICY_NAME, "statsd") diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 33cec577..816cb78c 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -40,7 +40,6 @@ class TestStorageDriver(tests_base.TestCase): m = storage.Metric(uuid.uuid4(), self.archive_policies[archive_policy_name]) m_sql = self.index.create_metric(m.id, str(uuid.uuid4()), - str(uuid.uuid4()), archive_policy_name) return m, m_sql @@ -249,7 +248,6 @@ class TestStorageDriver(tests_base.TestCase): self.index.create_archive_policy(ap) self.metric = storage.Metric(uuid.uuid4(), ap) self.index.create_metric(self.metric.id, str(uuid.uuid4()), - str(uuid.uuid4()), apname) # First store some points scattered across different splits @@ -335,7 +333,6 @@ class TestStorageDriver(tests_base.TestCase): self.index.create_archive_policy(ap) self.metric = storage.Metric(uuid.uuid4(), ap) self.index.create_metric(self.metric.id, str(uuid.uuid4()), - str(uuid.uuid4()), apname) # First store some points scattered across different splits @@ -399,7 +396,6 @@ class TestStorageDriver(tests_base.TestCase): self.index.create_archive_policy(ap) self.metric = storage.Metric(uuid.uuid4(), ap) self.index.create_metric(self.metric.id, str(uuid.uuid4()), - str(uuid.uuid4()), apname) # First store some points scattered across different splits @@ -810,8 +806,7 @@ class TestStorageDriver(tests_base.TestCase): name = str(uuid.uuid4()) ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)]) self.index.create_archive_policy(ap) - m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()), - str(uuid.uuid4()), name) + m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()), name) m = self.index.list_metrics(ids=[m.id])[0] self.storage.incoming.add_measures(m, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 0), 1), diff --git a/releasenotes/notes/creator_field-6b715c917f6afc93.yaml b/releasenotes/notes/creator_field-6b715c917f6afc93.yaml new file mode 100644 index 00000000..e9b3bfd1 --- /dev/null +++ b/releasenotes/notes/creator_field-6b715c917f6afc93.yaml @@ -0,0 +1,6 @@ +--- +deprecations: + - >- + The `created_by_user_id` and `created_by_project_id` field are now + deprecated and being merged into a unique `creator` field. The old fields + are still returned and managed by the API for now. -- GitLab From e534ebb642466855d32e67fc7094e5e336732782 Mon Sep 17 00:00:00 2001 From: akrzos Date: Wed, 11 Jan 2017 07:49:23 -0500 Subject: [PATCH 0540/1483] Fix a typo in estimated sizing per metric under archive policies Change-Id: Ib86f7247ba2ff27336ef4529e64fd86503690304 --- doc/source/architecture.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index 91d156ca..e5313f47 100755 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -125,7 +125,7 @@ By default, 3 archive policies are created using the default archive policy list (listed in `default_aggregation_methods`, i.e. mean, min, max, sum, std, count): -- low (maximum estimated size per metric: 406 MiB) +- low (maximum estimated size per metric: 406 KiB) * 5 minutes granularity over 30 days -- GitLab From 1aaf8bec4d36509dea1e01a29488574d9e52a079 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 11 Jan 2017 18:12:34 +0100 Subject: [PATCH 0541/1483] Remove broken script The dump_archive_file does not exist anymore in Carbonara. Change-Id: Ia5ecb2778c219e40ce638c57d9a4723093a686b3 --- setup.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index fbd25736..3812d341 100644 --- a/setup.cfg +++ b/setup.cfg @@ -130,7 +130,6 @@ console_scripts = gnocchi-upgrade = gnocchi.cli:upgrade gnocchi-statsd = gnocchi.cli:statsd gnocchi-metricd = gnocchi.cli:metricd - carbonara-dump = gnocchi.carbonara:dump_archive_file wsgi_scripts = gnocchi-api = gnocchi.rest.app:build_wsgi_app -- GitLab From 410b1643bf2aebfaae53261cbe2045d461d058f2 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 11 Jan 2017 18:24:50 +0100 Subject: [PATCH 0542/1483] Add gnocchi-config-generator This is a new tool that allows to not worry about the oslo-config-generator configuration file path. Change-Id: Iec9097c67fcf0161c69f0bf76e89a910238ea210 --- devstack/plugin.sh | 4 ---- doc/source/configuration.rst | 2 +- gnocchi/cli.py | 8 ++++++- gnocchi/genconfig.py | 13 ++++++---- .../gnocchi-config-generator.conf | 1 - gnocchi/tests/test_bin.py | 24 +++++++++++++++++++ ...chi_config_generator-0fc337ba8e3afd5f.yaml | 5 ++++ setup.cfg | 3 +-- tox.ini | 4 ++-- 9 files changed, 49 insertions(+), 15 deletions(-) rename {etc/gnocchi => gnocchi}/gnocchi-config-generator.conf (88%) create mode 100644 gnocchi/tests/test_bin.py create mode 100644 releasenotes/notes/gnocchi_config_generator-0fc337ba8e3afd5f.yaml diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 5fa2ba63..f271aa62 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -228,10 +228,6 @@ function configure_gnocchi { # Configure logging iniset $GNOCCHI_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" - # Install the configuration files - cp $GNOCCHI_DIR/etc/gnocchi/* $GNOCCHI_CONF_DIR - - # Set up logging if [ "$SYSLOG" != "False" ]; then iniset $GNOCCHI_CONF DEFAULT use_syslog "True" diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 63e3506b..0f471cf1 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -10,7 +10,7 @@ easily created by running: :: - oslo-config-generator --config-file=/etc/gnocchi/gnocchi-config-generator.conf --output-file=/etc/gnocchi/gnocchi.conf + gnocchi-config-generator > /etc/gnocchi/gnocchi.conf The configuration file should be pretty explicit, but here are some of the base options you want to change and configure: diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 3a7ba02a..7ba3b229 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -1,5 +1,5 @@ # Copyright (c) 2013 Mirantis Inc. -# Copyright (c) 2015-2016 Red Hat +# Copyright (c) 2015-2017 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import multiprocessing +import sys import threading import time @@ -30,6 +31,7 @@ import tooz from tooz import coordination from gnocchi import archive_policy +from gnocchi import genconfig from gnocchi import indexer from gnocchi import service from gnocchi import statsd as statsd_service @@ -40,6 +42,10 @@ from gnocchi import utils LOG = log.getLogger(__name__) +def config_generator(): + return genconfig.prehook(None, sys.argv[1:]) + + def upgrade(): conf = cfg.ConfigOpts() conf.register_cli_opts([ diff --git a/gnocchi/genconfig.py b/gnocchi/genconfig.py index 84a2feb9..0eba7359 100644 --- a/gnocchi/genconfig.py +++ b/gnocchi/genconfig.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2016 Red Hat, Inc. +# Copyright © 2016-2017 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -13,12 +13,17 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import os -def prehook(cmd): +def prehook(cmd, args=None): + if args is None: + args = ['--output-file', 'etc/gnocchi/gnocchi.conf'] try: from oslo_config import generator - generator.main(['--config-file', - 'etc/gnocchi/gnocchi-config-generator.conf']) + generator.main( + ['--config-file', + '%s/gnocchi-config-generator.conf' % os.path.dirname(__file__)] + + args) except Exception as e: print("Unable to build sample configuration file: %s" % e) diff --git a/etc/gnocchi/gnocchi-config-generator.conf b/gnocchi/gnocchi-config-generator.conf similarity index 88% rename from etc/gnocchi/gnocchi-config-generator.conf rename to gnocchi/gnocchi-config-generator.conf index 741f015f..df6e9880 100644 --- a/etc/gnocchi/gnocchi-config-generator.conf +++ b/gnocchi/gnocchi-config-generator.conf @@ -1,5 +1,4 @@ [DEFAULT] -output_file = etc/gnocchi/gnocchi.conf wrap_width = 79 namespace = gnocchi namespace = oslo.db diff --git a/gnocchi/tests/test_bin.py b/gnocchi/tests/test_bin.py new file mode 100644 index 00000000..e70bb865 --- /dev/null +++ b/gnocchi/tests/test_bin.py @@ -0,0 +1,24 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import subprocess + +from oslotest import base + + +class BinTestCase(base.BaseTestCase): + def test_gnocchi_config_generator_run(self): + subp = subprocess.Popen(['gnocchi-config-generator']) + self.assertEqual(0, subp.wait()) diff --git a/releasenotes/notes/gnocchi_config_generator-0fc337ba8e3afd5f.yaml b/releasenotes/notes/gnocchi_config_generator-0fc337ba8e3afd5f.yaml new file mode 100644 index 00000000..73af05f2 --- /dev/null +++ b/releasenotes/notes/gnocchi_config_generator-0fc337ba8e3afd5f.yaml @@ -0,0 +1,5 @@ +--- +features: + - >- + The `gnocchi-config-generator` program can now generates a default + configuration file, usable as a template for custom tweaking. diff --git a/setup.cfg b/setup.cfg index fbd25736..08fe573e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -92,8 +92,6 @@ pre-hook.build_config = gnocchi.genconfig.prehook [files] packages = gnocchi -data_files = - etc/gnocchi = etc/gnocchi/* [entry_points] gnocchi.indexer.sqlalchemy.resource_type_attribute = @@ -127,6 +125,7 @@ gnocchi.rest.auth_helper = keystone = gnocchi.rest.auth_helper:KeystoneAuthHelper console_scripts = + gnocchi-config-generator = gnocchi.cli:config_generator gnocchi-upgrade = gnocchi.cli:upgrade gnocchi-statsd = gnocchi.cli:statsd gnocchi-metricd = gnocchi.cli:metricd diff --git a/tox.ini b/tox.ini index b0e37db1..3816a770 100644 --- a/tox.ini +++ b/tox.ini @@ -29,7 +29,7 @@ deps = .[test] # NOTE(tonyb): This project has chosen to *NOT* consume upper-constraints.txt commands = doc8 --ignore-path doc/source/rest.rst doc/source - oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf + gnocchi-config-generator {toxinidir}/run-tests.sh {posargs} [testenv:py35-postgresql-file-upgrade-from-3.0] @@ -125,7 +125,7 @@ enable-extensions = H904 [testenv:genconfig] deps = .[mysql,postgresql,test,file,ceph,swift,s3] -commands = oslo-config-generator --config-file=etc/gnocchi/gnocchi-config-generator.conf +commands = gnocchi-config-generator [testenv:docs] # This does not work, see: https://bitbucket.org/hpk42/tox/issues/302 -- GitLab From 546bd69b22a4bca500b0e23520f261e9f01dd052 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 11 Jan 2017 19:03:45 +0100 Subject: [PATCH 0543/1483] ceph: fix data compression when oldest_mutable_timestamp == next(key) Change-Id: I1c66b720d2c1424f022898ed4afd9ca820965b68 Closes-Bug: #1655422 --- gnocchi/storage/_carbonara.py | 2 +- gnocchi/tests/test_storage.py | 87 +++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 0f561c2c..ba0ee626 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -230,7 +230,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): oldest_mutable_timestamp): # NOTE(jd) We write the full split only if the driver works that way # (self.WRITE_FULL) or if the oldest_mutable_timestamp is out of range. - write_full = self.WRITE_FULL or next(key) < oldest_mutable_timestamp + write_full = self.WRITE_FULL or next(key) <= oldest_mutable_timestamp key_as_str = str(key) if write_full: try: diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 816cb78c..eb55e839 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -325,6 +325,93 @@ class TestStorageDriver(tests_base.TestCase): (utils.datetime_utc(2016, 1, 10, 17, 12), 60.0, 46), ], self.storage.get_measures(self.metric, granularity=60.0)) + def test_rewrite_measures_oldest_mutable_timestamp_eq_next_key(self): + """See LP#1655422""" + # Create an archive policy that spans on several splits. Each split + # being 3600 points, let's go for 36k points so we have 10 splits. + apname = str(uuid.uuid4()) + ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)]) + self.index.create_archive_policy(ap) + self.metric = storage.Metric(uuid.uuid4(), ap) + self.index.create_metric(self.metric.id, str(uuid.uuid4()), + apname) + + # First store some points scattered across different splits + self.storage.incoming.add_measures(self.metric, [ + storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4), + storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44), + ]) + self.trigger_processing() + + splits = {'1451520000.0', '1451736000.0', '1451952000.0'} + self.assertEqual(splits, + self.storage._list_split_keys_for_metric( + self.metric, "mean", 60.0)) + + if self.storage.WRITE_FULL: + assertCompressedIfWriteFull = self.assertTrue + else: + assertCompressedIfWriteFull = self.assertFalse + + data = self.storage._get_measures( + self.metric, '1451520000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451736000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451952000.0', "mean", 60.0) + assertCompressedIfWriteFull( + carbonara.AggregatedTimeSerie.is_compressed(data)) + + self.assertEqual([ + (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), + ], self.storage.get_measures(self.metric, granularity=60.0)) + + # Now store brand new points that should force a rewrite of one of the + # split (keep in mind the back window size in one hour here). We move + # the BoundTimeSerie processing timeserie far away from its current + # range. + + # Here we test a special case where the oldest_mutable_timestamp will + # be 2016-01-10TOO:OO:OO = 1452384000.0, our new split key. + self.storage.incoming.add_measures(self.metric, [ + storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 0, 12), 45), + ]) + self.trigger_processing() + + self.assertEqual({'1452384000.0', '1451736000.0', + '1451520000.0', '1451952000.0'}, + self.storage._list_split_keys_for_metric( + self.metric, "mean", 60.0)) + data = self.storage._get_measures( + self.metric, '1451520000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451736000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451952000.0', "mean", 60.0) + # Now this one is compressed because it has been rewritten! + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1452384000.0', "mean", 60.0) + assertCompressedIfWriteFull( + carbonara.AggregatedTimeSerie.is_compressed(data)) + + self.assertEqual([ + (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), + (utils.datetime_utc(2016, 1, 10, 0, 12), 60.0, 45), + ], self.storage.get_measures(self.metric, granularity=60.0)) + def test_rewrite_measures_corruption_missing_file(self): # Create an archive policy that spans on several splits. Each split # being 3600 points, let's go for 36k points so we have 10 splits. -- GitLab From a9c1383992ed2039cf42beee988e81113a7d33e9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 11 Jan 2017 19:03:45 +0100 Subject: [PATCH 0545/1483] ceph: fix data compression when oldest_mutable_timestamp == next(key) Change-Id: I1c66b720d2c1424f022898ed4afd9ca820965b68 Closes-Bug: #1655422 --- gnocchi/storage/_carbonara.py | 2 +- gnocchi/tests/test_storage.py | 88 +++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+), 1 deletion(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index c418dff4..e3d934c4 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -230,7 +230,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): oldest_mutable_timestamp): # NOTE(jd) We write the full split only if the driver works that way # (self.WRITE_FULL) or if the oldest_mutable_timestamp is out of range. - write_full = self.WRITE_FULL or next(key) < oldest_mutable_timestamp + write_full = self.WRITE_FULL or next(key) <= oldest_mutable_timestamp key_as_str = str(key) if write_full: try: diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 96d64fa1..eb32ad5e 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -310,6 +310,94 @@ class TestStorageDriver(tests_base.TestCase): (utils.datetime_utc(2016, 1, 10, 17, 12), 60.0, 46), ], self.storage.get_measures(self.metric, granularity=60.0)) + def test_rewrite_measures_oldest_mutable_timestamp_eq_next_key(self): + """See LP#1655422""" + # Create an archive policy that spans on several splits. Each split + # being 3600 points, let's go for 36k points so we have 10 splits. + apname = str(uuid.uuid4()) + ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)]) + self.index.create_archive_policy(ap) + self.metric = storage.Metric(uuid.uuid4(), ap) + self.index.create_metric(self.metric.id, str(uuid.uuid4()), + str(uuid.uuid4()), + apname) + + # First store some points scattered across different splits + self.storage.add_measures(self.metric, [ + storage.Measure(utils.datetime_utc(2016, 1, 1, 12, 0, 1), 69), + storage.Measure(utils.datetime_utc(2016, 1, 2, 13, 7, 31), 42), + storage.Measure(utils.datetime_utc(2016, 1, 4, 14, 9, 31), 4), + storage.Measure(utils.datetime_utc(2016, 1, 6, 15, 12, 45), 44), + ]) + self.trigger_processing() + + splits = {'1451520000.0', '1451736000.0', '1451952000.0'} + self.assertEqual(splits, + self.storage._list_split_keys_for_metric( + self.metric, "mean", 60.0)) + + if self.storage.WRITE_FULL: + assertCompressedIfWriteFull = self.assertTrue + else: + assertCompressedIfWriteFull = self.assertFalse + + data = self.storage._get_measures( + self.metric, '1451520000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451736000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451952000.0', "mean", 60.0) + assertCompressedIfWriteFull( + carbonara.AggregatedTimeSerie.is_compressed(data)) + + self.assertEqual([ + (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), + ], self.storage.get_measures(self.metric, granularity=60.0)) + + # Now store brand new points that should force a rewrite of one of the + # split (keep in mind the back window size in one hour here). We move + # the BoundTimeSerie processing timeserie far away from its current + # range. + + # Here we test a special case where the oldest_mutable_timestamp will + # be 2016-01-10TOO:OO:OO = 1452384000.0, our new split key. + self.storage.add_measures(self.metric, [ + storage.Measure(utils.datetime_utc(2016, 1, 10, 0, 12), 45), + ]) + self.trigger_processing() + + self.assertEqual({'1452384000.0', '1451736000.0', + '1451520000.0', '1451952000.0'}, + self.storage._list_split_keys_for_metric( + self.metric, "mean", 60.0)) + data = self.storage._get_measures( + self.metric, '1451520000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451736000.0', "mean", 60.0) + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1451952000.0', "mean", 60.0) + # Now this one is compressed because it has been rewritten! + self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) + data = self.storage._get_measures( + self.metric, '1452384000.0', "mean", 60.0) + assertCompressedIfWriteFull( + carbonara.AggregatedTimeSerie.is_compressed(data)) + + self.assertEqual([ + (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), + (utils.datetime_utc(2016, 1, 10, 0, 12), 60.0, 45), + ], self.storage.get_measures(self.metric, granularity=60.0)) + def test_rewrite_measures_corruption_missing_file(self): # Create an archive policy that spans on several splits. Each split # being 3600 points, let's go for 36k points so we have 10 splits. -- GitLab From c03958bdbd7e6747d6c8a84a822719f6949d1a80 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 13 Jan 2017 11:01:15 +0100 Subject: [PATCH 0546/1483] upgrade: fix OS_AUTH_TYPE variable name The variable name is wrong. It still works because pifpaf exports the right one later in the script though. Change-Id: If99f9530f13861f5acd240416947d827e7b70b3d --- run-upgrade-tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index 2b69558e..8c970135 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -1,7 +1,7 @@ #!/bin/bash set -e -export OS_AUTH_PLUGIN=gnocchi-noauth +export OS_AUTH_TYPE=gnocchi-noauth export GNOCCHI_ENDPOINT=http://localhost:8041 export GNOCCHI_USER_ID=99aae-4dc2-4fbc-b5b8-9688c470d9cc export GNOCCHI_PROJECT_ID=c8d27445-48af-457c-8e0d-1de7103eae1f -- GitLab From 5633edd6e0927ebab1b544aa9f16c20d0d774aa1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sat, 14 Jan 2017 20:12:40 +0100 Subject: [PATCH 0547/1483] don't override logging when loading alembic module Change-Id: I7aca56498846c9079d51faacfbc3c989f568d096 --- gnocchi/indexer/alembic/alembic.ini | 34 ----------------------------- gnocchi/indexer/alembic/env.py | 5 ----- 2 files changed, 39 deletions(-) diff --git a/gnocchi/indexer/alembic/alembic.ini b/gnocchi/indexer/alembic/alembic.ini index 6986fc52..db7340ac 100644 --- a/gnocchi/indexer/alembic/alembic.ini +++ b/gnocchi/indexer/alembic/alembic.ini @@ -1,37 +1,3 @@ [alembic] script_location = gnocchi.indexer:alembic sqlalchemy.url = postgresql://localhost/gnocchi - -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = WARN -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/gnocchi/indexer/alembic/env.py b/gnocchi/indexer/alembic/env.py index cf636cfa..47f58efb 100644 --- a/gnocchi/indexer/alembic/env.py +++ b/gnocchi/indexer/alembic/env.py @@ -16,7 +16,6 @@ """A test module to exercise the Gnocchi API with gabbi.""" from alembic import context -from logging import config as logconfig from gnocchi.indexer import sqlalchemy from gnocchi.indexer import sqlalchemy_base @@ -25,10 +24,6 @@ from gnocchi.indexer import sqlalchemy_base # access to the values within the .ini file in use. config = context.config -# Interpret the config file for Python logging. -# This line sets up loggers basically. -logconfig.fileConfig(config.config_file_name) - # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel -- GitLab From 9da4e07b94023ea92c47b3d0a801730404f95ae1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 15 Dec 2016 11:40:44 +0100 Subject: [PATCH 0548/1483] Introduce "basic" authentication mechanism Change-Id: I369de25a656fa56e960cfba2b768ea10eedd3957 --- doc/source/configuration.rst | 9 +- doc/source/rest.j2 | 25 ++-- gnocchi/gendoc.py | 2 +- gnocchi/opts.py | 2 +- gnocchi/rest/api-paste.ini | 6 + gnocchi/rest/auth_helper.py | 25 ++++ gnocchi/rest/policy.json | 2 +- gnocchi/tests/gabbi/fixtures.py | 3 +- gnocchi/tests/test_rest.py | 123 ++++++++++-------- ...auth-keystone-compat-e8f760591d593f07.yaml | 9 ++ run-upgrade-tests.sh | 3 + setup.cfg | 1 + tox.ini | 8 +- 13 files changed, 141 insertions(+), 77 deletions(-) create mode 100644 releasenotes/notes/noauth-keystone-compat-e8f760591d593f07.yaml diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 63e3506b..2d9408f1 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -58,10 +58,11 @@ Gnocchi provides these indexer drivers: Configuring authentication ----------------------------- -The API server supports different authentication methods: `noauth` (the -default) or `keystone` to use `OpenStack Keystone`_. If you successfully -installed the `keystone` flavor using `pip` (see :ref:`installation`), you can -set `api.auth_mode` to `keystone` to enable Keystone authentication. +The API server supports different authentication methods: `basic` (the default) +which uses the standard HTTP `Authorization` header or `keystone` to use +`OpenStack Keystone`_. If you successfully installed the `keystone` flavor +using `pip` (see :ref:`installation`), you can set `api.auth_mode` to +`keystone` to enable Keystone authentication. .. _`Paste Deployment`: http://pythonpaste.org/deploy/ .. _`OpenStack Keystone`: http://launchpad.net/keystone diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index b67d375c..1c83ed72 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -5,19 +5,18 @@ Authentication ============== -By default, no authentication is configured in Gnocchi. You need to provides -these headers in your HTTP requests: - -* X-User-Id -* X-Project-Id - -The `X-Roles` header can also be provided in order to match role based ACL -specified in `policy.json`, as `X-Domain-Id` to match domain based ACL. - -If you enable the OpenStack Keystone middleware, you only need to authenticate -against Keystone and provide `X-Auth-Token` header with a valid token for each -request sent to Gnocchi. The headers mentioned above will be filled -automatically based on your Keystone authorizations. +By default, the authentication is configured to the "basic" mode. You need to +provide an `Authorization' header in your HTTP requests with a valid username +(the password is not used). The "admin" password is granted all privileges, +whereas any other username is recognize as having standard permissions. + +You can customize permissions by specifying a different `policy_file` than the +default one. + +If you set the `api.auth_mode` value to `keystone`, the OpenStack Keystone +middleware will be enabled for authentication. It is then needed to +authenticate against Keystone and provide a `X-Auth-Token` header with a valid +token for each request sent to Gnocchi's API. Metrics ======= diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index 240b8319..996c715b 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -32,7 +32,7 @@ _RUN = False def _setup_test_app(): t = test_rest.RestTest() - t.auth = True + t.auth_mode = "basic" t.setUpClass() t.setUp() return t.app diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 02cdb3f4..6e7dca4a 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -85,7 +85,7 @@ def list_opts(): "rest", "api-paste.ini")), help='Path to API Paste configuration.'), cfg.StrOpt('auth_mode', - default="noauth", + default="basic", choices=extension.ExtensionManager( "gnocchi.rest.auth_helper").names(), help='Authentication mode to use.'), diff --git a/gnocchi/rest/api-paste.ini b/gnocchi/rest/api-paste.ini index d198362d..47bb3c32 100644 --- a/gnocchi/rest/api-paste.ini +++ b/gnocchi/rest/api-paste.ini @@ -4,6 +4,12 @@ use = egg:Paste#urlmap /v1 = gnocchiv1+noauth /healthcheck = healthcheck +[composite:gnocchi+basic] +use = egg:Paste#urlmap +/ = gnocchiversions_pipeline +/v1 = gnocchiv1+noauth +/healthcheck = healthcheck + [composite:gnocchi+keystone] use = egg:Paste#urlmap / = gnocchiversions_pipeline diff --git a/gnocchi/rest/auth_helper.py b/gnocchi/rest/auth_helper.py index bb4a2d99..c173c8de 100644 --- a/gnocchi/rest/auth_helper.py +++ b/gnocchi/rest/auth_helper.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. import webob +import werkzeug.http from gnocchi import rest @@ -97,3 +98,27 @@ class NoAuthHelper(KeystoneAuthHelper): return user_id if project_id: return project_id + + +class BasicAuthHelper(object): + @staticmethod + def get_current_user(headers): + auth = werkzeug.http.parse_authorization_header( + headers.get("Authorization")) + if auth is None: + rest.abort(401) + return auth.username + + def get_auth_info(self, headers): + user = self.get_current_user(headers) + roles = [] + if user == "admin": + roles.append("admin") + return { + "user": user, + "roles": roles + } + + @staticmethod + def get_resource_policy_filter(headers, rule, resource_type): + return None diff --git a/gnocchi/rest/policy.json b/gnocchi/rest/policy.json index 00aaeddd..51d39674 100644 --- a/gnocchi/rest/policy.json +++ b/gnocchi/rest/policy.json @@ -1,5 +1,5 @@ { - "admin_or_creator": "role:admin or project_id:%(created_by_project_id)s", + "admin_or_creator": "role:admin or user:%(creator)s or project_id:%(created_by_project_id)s", "resource_owner": "project_id:%(project_id)s", "metric_owner": "project_id:%(resource.project_id)s", diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/gabbi/fixtures.py index 39a94dc6..df98ed34 100644 --- a/gnocchi/tests/gabbi/fixtures.py +++ b/gnocchi/tests/gabbi/fixtures.py @@ -111,7 +111,8 @@ class ConfigFixture(fixture.GabbiFixture): # Set pagination to a testable value conf.set_override('max_limit', 7, 'api') - # Those tests do not use any auth + # Those tests uses noauth mode + # TODO(jd) Rewrite them for basic conf.set_override("auth_mode", "noauth", 'api') self.index = index diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index a5374ec3..1c73bb81 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -14,6 +14,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import base64 import calendar import contextlib import datetime @@ -57,28 +58,40 @@ class TestingApp(webtest.TestApp): INVALID_TOKEN = str(uuid.uuid4()) def __init__(self, *args, **kwargs): - self.auth = kwargs.pop('auth') + self.auth_mode = kwargs.pop('auth_mode') self.storage = kwargs.pop('storage') self.indexer = kwargs.pop('indexer') super(TestingApp, self).__init__(*args, **kwargs) # Setup Keystone auth_token fake cache self.token = self.VALID_TOKEN + # Setup default user for basic auth + self.user = self.USER_ID.encode('ascii') @contextlib.contextmanager def use_admin_user(self): - if not self.auth: - raise testcase.TestSkipped("No auth enabled") - old_token = self.token - self.token = self.VALID_TOKEN_ADMIN - try: - yield - finally: - self.token = old_token + if self.auth_mode == "keystone": + old_token = self.token + self.token = self.VALID_TOKEN_ADMIN + try: + yield + finally: + self.token = old_token + elif self.auth_mode == "basic": + old_user = self.user + self.user = b"admin" + try: + yield + finally: + self.user = old_user + elif self.auth_mode == "noauth": + raise testcase.TestSkipped("auth mode is noauth") + else: + raise RuntimeError("Unknown auth_mode") @contextlib.contextmanager def use_another_user(self): - if not self.auth: - raise testcase.TestSkipped("No auth enabled") + if self.auth_mode != "keystone": + raise testcase.TestSkipped("Auth mode is not Keystone") old_token = self.token self.token = self.VALID_TOKEN_2 try: @@ -88,8 +101,8 @@ class TestingApp(webtest.TestApp): @contextlib.contextmanager def use_invalid_token(self): - if not self.auth: - raise testcase.TestSkipped("No auth enabled") + if self.auth_mode != "keystone": + raise testcase.TestSkipped("Auth mode is not Keystone") old_token = self.token self.token = self.INVALID_TOKEN try: @@ -98,8 +111,16 @@ class TestingApp(webtest.TestApp): self.token = old_token def do_request(self, req, *args, **kwargs): - if self.auth and self.token is not None: - req.headers['X-Auth-Token'] = self.token + if self.auth_mode in "keystone": + if self.token is not None: + req.headers['X-Auth-Token'] = self.token + elif self.auth_mode == "basic": + req.headers['Authorization'] = ( + b"basic " + base64.b64encode(self.user + b":") + ) + elif self.auth_mode == "noauth": + req.headers['X-User-Id'] = self.USER_ID + req.headers['X-Project-Id'] = self.PROJECT_ID response = super(TestingApp, self).do_request(req, *args, **kwargs) metrics = self.storage.incoming.list_metric_with_measures_to_process( None, None, full=True) @@ -110,41 +131,40 @@ class TestingApp(webtest.TestApp): class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): scenarios = [ - ('noauth', dict(auth=False)), - ('keystone', dict(auth=True)), + ('basic', dict(auth_mode="basic")), + ('keystone', dict(auth_mode="keystone")), + ('noauth', dict(auth_mode="noauth")), ] def setUp(self): super(RestTest, self).setUp() - self.auth_token_fixture = self.useFixture( - ksm_fixture.AuthTokenFixture()) - self.auth_token_fixture.add_token_data( - is_v2=True, - token_id=TestingApp.VALID_TOKEN_ADMIN, - user_id=TestingApp.USER_ID_ADMIN, - user_name='adminusername', - project_id=TestingApp.PROJECT_ID_ADMIN, - role_list=['admin']) - self.auth_token_fixture.add_token_data( - is_v2=True, - token_id=TestingApp.VALID_TOKEN, - user_id=TestingApp.USER_ID, - user_name='myusername', - project_id=TestingApp.PROJECT_ID, - role_list=["member"]) - self.auth_token_fixture.add_token_data( - is_v2=True, - token_id=TestingApp.VALID_TOKEN_2, - user_id=TestingApp.USER_ID_2, - user_name='myusername2', - project_id=TestingApp.PROJECT_ID_2, - role_list=["member"]) - - if self.auth: - self.conf.set_override("auth_mode", "keystone", group="api") - else: - self.conf.set_override("auth_mode", "noauth", group="api") + if self.auth_mode == "keystone": + self.auth_token_fixture = self.useFixture( + ksm_fixture.AuthTokenFixture()) + self.auth_token_fixture.add_token_data( + is_v2=True, + token_id=TestingApp.VALID_TOKEN_ADMIN, + user_id=TestingApp.USER_ID_ADMIN, + user_name='adminusername', + project_id=TestingApp.PROJECT_ID_ADMIN, + role_list=['admin']) + self.auth_token_fixture.add_token_data( + is_v2=True, + token_id=TestingApp.VALID_TOKEN, + user_id=TestingApp.USER_ID, + user_name='myusername', + project_id=TestingApp.PROJECT_ID, + role_list=["member"]) + self.auth_token_fixture.add_token_data( + is_v2=True, + token_id=TestingApp.VALID_TOKEN_2, + user_id=TestingApp.USER_ID_2, + user_name='myusername2', + project_id=TestingApp.PROJECT_ID_2, + role_list=["member"]) + + self.conf.set_override("auth_mode", self.auth_mode, group="api") self.app = TestingApp(app.load_app(conf=self.conf, indexer=self.index, @@ -152,7 +172,7 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): not_implemented_middleware=False), storage=self.storage, indexer=self.index, - auth=self.auth) + auth_mode=self.auth_mode) # NOTE(jd) Used at least by docs @staticmethod @@ -619,16 +639,15 @@ class ResourceTest(RestTest): self.resource = self.attributes.copy() # Set original_resource_id self.resource['original_resource_id'] = self.resource['id'] - if self.auth: - self.resource['created_by_user_id'] = TestingApp.USER_ID + self.resource['created_by_user_id'] = TestingApp.USER_ID + if self.auth_mode in ("keystone", "noauth"): self.resource['created_by_project_id'] = TestingApp.PROJECT_ID self.resource['creator'] = ( TestingApp.USER_ID + ":" + TestingApp.PROJECT_ID ) - else: - self.resource['created_by_user_id'] = None - self.resource['created_by_project_id'] = None - self.resource['creator'] = None + elif self.auth_mode == "basic": + self.resource['created_by_project_id'] = "" + self.resource['creator'] = TestingApp.USER_ID self.resource['ended_at'] = None self.resource['metrics'] = {} if 'user_id' not in self.resource: diff --git a/releasenotes/notes/noauth-keystone-compat-e8f760591d593f07.yaml b/releasenotes/notes/noauth-keystone-compat-e8f760591d593f07.yaml new file mode 100644 index 00000000..0aaffc38 --- /dev/null +++ b/releasenotes/notes/noauth-keystone-compat-e8f760591d593f07.yaml @@ -0,0 +1,9 @@ +--- +upgrade: + - >- + The `auth_type` option has a new default value set to "basic". This mode + does not do any segregation and uses the standard HTTP `Authorization` + header for authentication. The old "noauth" authentication mechanism based + on the Keystone headers (`X-User-Id`, `X-Creator-Id` and `X-Roles`) and the + Keystone segregation rules, which was the default up to Gnocchi 3.0, is + still available. diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index 2b69558e..33fe24ae 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -91,6 +91,9 @@ pip install -q -U .[${GNOCCHI_VARIANT}] eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) +# Gnocchi 3.1 uses basic auth by default +export OS_AUTH_TYPE=gnocchi-basic +export GNOCCHI_USER=$GNOCCHI_USER_ID dump_data $GNOCCHI_DATA/new echo "* Checking output difference between Gnocchi $old_version and $new_version" diff --git a/setup.cfg b/setup.cfg index 3812d341..10297471 100644 --- a/setup.cfg +++ b/setup.cfg @@ -125,6 +125,7 @@ gnocchi.aggregates = gnocchi.rest.auth_helper = noauth = gnocchi.rest.auth_helper:NoAuthHelper keystone = gnocchi.rest.auth_helper:KeystoneAuthHelper + basic = gnocchi.rest.auth_helper:BasicAuthHelper console_scripts = gnocchi-upgrade = gnocchi.cli:upgrade diff --git a/tox.ini b/tox.ini index b0e37db1..d8382577 100644 --- a/tox.ini +++ b/tox.ini @@ -42,7 +42,7 @@ usedevelop = False setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.0,<3.1 pifpaf>=0.13 - gnocchiclient + gnocchiclient>=2.8.0 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:py27-mysql-ceph-upgrade-from-3.0] @@ -54,7 +54,7 @@ skip_install = True usedevelop = False setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.0,<3.1 - gnocchiclient + gnocchiclient>=2.8.0 pifpaf>=0.13 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} @@ -68,7 +68,7 @@ usedevelop = False setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=2.2,<2.3 pifpaf>=0.13 - gnocchiclient + gnocchiclient>=2.8.0 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:py27-mysql-ceph-upgrade-from-2.2] @@ -80,7 +80,7 @@ skip_install = True usedevelop = False setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=2.2,<2.3 - gnocchiclient + gnocchiclient>=2.8.0 pifpaf>=0.13 cradox # cradox is required because 2.2 extra names are incorrect -- GitLab From ab4d74cc8e42197bcd4d53a0d31eaa20794268a5 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 13 Jan 2017 15:26:21 +0100 Subject: [PATCH 0549/1483] ceph: enhance the documentation Change-Id: I713c1e1741c76018e845408b4ab9059e45feecba --- doc/source/install.rst | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index e7260517..056e0677 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -67,11 +67,21 @@ install extra variants using, for example:: Ceph requirements ----------------- -Gnocchi leverages omap API of librados, but this is available in python binding -only since python-rados >= 9.1.0. To handle this, Gnocchi uses 'cradox' python -library which has exactly the same API but works with Ceph >= 0.80.0. +The ceph driver need to have a ceph user and a pool already created. They can +be created for example with: -If Ceph and python-rados are >= 9.1.0, cradox python library becomes optional +:: + + ceph osd pool create metrics 8 8 + ceph auth get-or-create client.gnocchi mon "allow r" osd "allow rwx pool=metrics" + + +Gnocchi leverages some librados features (omap, async, operation context) +available in python binding only since python-rados >= 10.1.0. To handle this, +Gnocchi uses 'cradox' python library which has exactly the same API but works +with Ceph >= 0.80.0. + +If Ceph and python-rados are >= 10.1.0, cradox python library becomes optional but is still recommended. -- GitLab From b0b8c1aeb2b582ab298802da8519b80a30ccd8e9 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 12 Jan 2017 16:41:27 +0100 Subject: [PATCH 0550/1483] tests: Fix upgrade script Currently the upgrade tests create only one resource instead of four. This change fixes that. Change-Id: I6c4ec1365228b1e092328ef1732f07b770e70c74 --- run-upgrade-tests.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index d2078c32..4ac340bd 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -19,7 +19,7 @@ dump_data(){ dir="$1" mkdir -p $dir echo "* Dumping measures aggregations to $dir" - for resource_id in $RESOURCE_IDS; do + for resource_id in ${RESOURCE_IDS[@]}; do for agg in min max mean sum ; do gnocchi measures show --aggregation $agg --resource-id $resource_id metric > $dir/${agg}.txt done @@ -30,14 +30,14 @@ inject_data() { echo "* Injecting measures in Gnocchi" # TODO(sileht): Generate better data that ensure we have enought split that cover all # situation - for resource_id in $RESOURCE_IDS; do + for resource_id in ${RESOURCE_IDS[@]}; do gnocchi resource create generic --attribute id:$resource_id -n metric:high >/dev/null done { echo -n '{' resource_sep="" - for resource_id in $RESOURCE_IDS; do + for resource_id in ${RESOURCE_IDS[@]}; do echo -n "$resource_sep \"$resource_id\": { \"metric\": [ " measures_sep="" for i in $(seq 0 10 288000); do -- GitLab From fece42914520a66dc6b613e43d502c5aa3fa714e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 11 Jan 2017 15:53:10 +0100 Subject: [PATCH 0551/1483] rest: reject / as resource id and metric name This change reject with a 400 error any resource id or metric name with a '/'. Existing metric/resource are update to replace the '/' by a '_'. Change-Id: I7fb97b5439119ad74035003c66c2d62272f7097f --- .../versions/397987e38570_no_more_slash.py | 184 ++++++++++++++++++ gnocchi/rest/__init__.py | 2 + gnocchi/tests/gabbi/gabbits/metric.yaml | 11 ++ gnocchi/tests/gabbi/gabbits/resource.yaml | 28 +++ .../tests/gabbi/gabbits/transformedids.yaml | 11 ++ gnocchi/utils.py | 2 + .../notes/forbid-slash-b3ec2bc77cc34b49.yaml | 7 + run-upgrade-tests.sh | 37 +++- 8 files changed, 275 insertions(+), 7 deletions(-) create mode 100644 gnocchi/indexer/alembic/versions/397987e38570_no_more_slash.py create mode 100644 releasenotes/notes/forbid-slash-b3ec2bc77cc34b49.yaml diff --git a/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash.py b/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash.py new file mode 100644 index 00000000..e898e1a8 --- /dev/null +++ b/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash.py @@ -0,0 +1,184 @@ +# Copyright 2017 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""no-more-slash + +Revision ID: 397987e38570 +Revises: aba5a217ca9b +Create Date: 2017-01-11 16:32:40.421758 + +""" +import uuid + +from alembic import op +import six +import sqlalchemy as sa +import sqlalchemy_utils + +from gnocchi import utils + +# revision identifiers, used by Alembic. +revision = '397987e38570' +down_revision = 'aba5a217ca9b' +branch_labels = None +depends_on = None + +resource_type_table = sa.Table( + 'resource_type', + sa.MetaData(), + sa.Column('tablename', sa.String(35), nullable=False) +) + +resource_table = sa.Table( + 'resource', + sa.MetaData(), + sa.Column('id', + sqlalchemy_utils.types.uuid.UUIDType(), + nullable=False), + sa.Column('original_resource_id', sa.String(255)), + sa.Column('type', sa.String(255)) +) + +resourcehistory_table = sa.Table( + 'resource_history', + sa.MetaData(), + sa.Column('id', + sqlalchemy_utils.types.uuid.UUIDType(), + nullable=False), + sa.Column('original_resource_id', sa.String(255)) +) + +metric_table = sa.Table( + 'metric', + sa.MetaData(), + sa.Column('id', + sqlalchemy_utils.types.uuid.UUIDType(), + nullable=False), + sa.Column('name', sa.String(255)), + sa.Column('resource_id', sqlalchemy_utils.types.uuid.UUIDType()) + +) + + +uuidtype = sqlalchemy_utils.types.uuid.UUIDType() + + +def upgrade(): + connection = op.get_bind() + + resource_type_tables = {} + resource_type_tablenames = [ + rt.tablename + for rt in connection.execute(resource_type_table.select()) + if rt.tablename != "generic" + ] + + op.drop_constraint("fk_metric_resource_id_resource_id", "metric", + type_="foreignkey") + for table in resource_type_tablenames: + op.drop_constraint("fk_%s_id_resource_id" % table, table, + type_="foreignkey") + + resource_type_tables[table] = sa.Table( + table, + sa.MetaData(), + sa.Column('id', + sqlalchemy_utils.types.uuid.UUIDType(), + nullable=False), + ) + resource_type_tables["%s_history" % table] = sa.Table( + "%s_history" % table, + sa.MetaData(), + sa.Column('id', + sqlalchemy_utils.types.uuid.UUIDType(), + nullable=False), + ) + + for resource in connection.execute(resource_table.select().where( + resource_table.c.original_resource_id.like('%/%'))): + new_original_resource_id = resource.original_resource_id.replace( + '/', '_') + if six.PY2: + new_original_resource_id = new_original_resource_id.encode('utf-8') + new_id = sa.literal(uuidtype.process_bind_param( + str(uuid.uuid5(utils.RESOURCE_ID_NAMESPACE, + new_original_resource_id)), + connection.dialect)) + + # resource table + connection.execute( + resource_table.update().where( + resource_table.c.id == resource.id + ).values( + id=new_id, + original_resource_id=new_original_resource_id + ) + ) + # resource history table + connection.execute( + resourcehistory_table.update().where( + resourcehistory_table.c.id == resource.id + ).values( + id=new_id, + original_resource_id=new_original_resource_id + ) + ) + + if resource.type != "generic": + rtable = resource_type_tables[resource.type] + htable = resource_type_tables["%s_history" % resource.type] + + # resource table (type) + connection.execute( + rtable.update().where( + rtable.c.id == resource.id + ).values(id=new_id) + ) + # resource history table (type) + connection.execute( + htable.update().where( + htable.c.id == resource.id + ).values(id=new_id) + ) + + # Metric + connection.execute( + metric_table.update().where( + metric_table.c.resource_id == resource.id + ).values( + resource_id=new_id + ) + ) + + for table in resource_type_tablenames: + op.create_foreign_key("fk_%s_id_resource_id" % table, + table, "resource", + ("id",), ("id",), + ondelete="CASCADE") + + op.create_foreign_key("fk_metric_resource_id_resource_id", + "metric", "resource", + ("resource_id",), ("id",), + ondelete="SET NULL") + + for metric in connection.execute(metric_table.select().where( + metric_table.c.name.like("%/%"))): + connection.execute( + metric_table.update().where( + metric_table.c.id == metric.id + ).values( + name=metric.name.replace('/', '_'), + ) + ) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index f534d60f..4070b337 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -488,6 +488,8 @@ class MetricsController(rest.RestController): archive_policy_name = definition.get('archive_policy_name') name = definition.get('name') + if name and '/' in name: + abort(400, "'/' is not supported in metric name") if archive_policy_name is None: try: ap = pecan.request.indexer.get_archive_policy_for_metric(name) diff --git a/gnocchi/tests/gabbi/gabbits/metric.yaml b/gnocchi/tests/gabbi/gabbits/metric.yaml index 1b4b9c8b..5b596a42 100644 --- a/gnocchi/tests/gabbi/gabbits/metric.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric.yaml @@ -78,6 +78,17 @@ tests: $.name: disk.io.rate $.unit: B/s + - name: create metric with invalid name + POST: /v1/metric + request_headers: + content-type: application/json + data: + name: "disk/io/rate" + unit: "B/s" + status: 400 + response_strings: + - "'/' is not supported in metric name" + - name: create metric with name and over length unit POST: /v1/metric request_headers: diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/gabbi/gabbits/resource.yaml index 83c71623..91b4b323 100644 --- a/gnocchi/tests/gabbi/gabbits/resource.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource.yaml @@ -189,6 +189,20 @@ tests: - "Invalid input: required key not provided @ data[" - "'display_name']" + - name: post instance with invalid metric name + POST: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + metrics: + "disk/iops": + archive_policy_name: medium + status: 400 + response_strings: + - "'/' is not supported in metric name" + - name: post instance resource POST: $LAST_URL request_headers: @@ -363,6 +377,20 @@ tests: response_json_paths: $.metrics.'disk.io.rate': $RESPONSE["$.metrics.'disk.io.rate'"] + - name: patch instance with invalid metric name + PATCH: $LAST_URL + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + data: + metrics: + "disk/iops": + archive_policy_name: medium + status: 400 + response_strings: + - "'/' is not supported in metric name" + # Failure modes for history - name: post instance history diff --git a/gnocchi/tests/gabbi/gabbits/transformedids.yaml b/gnocchi/tests/gabbi/gabbits/transformedids.yaml index a4d9a357..b5ab2092 100644 --- a/gnocchi/tests/gabbi/gabbits/transformedids.yaml +++ b/gnocchi/tests/gabbi/gabbits/transformedids.yaml @@ -58,6 +58,17 @@ tests: archive_policy_name: medium status: 409 + - name: post new resource with invalid uuid + POST: /v1/resource/generic + data: + id: 'id-with-/' + user_id: 0fbb231484614b1a80131fc22f6afc9c + project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + status: 400 + response_strings: + - "Invalid input: not a valid value for dictionary value @ data[" + - "'id'] " + - name: post new resource non uuid POST: /v1/resource/generic data: diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 59e0b56d..7adc8db9 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -40,6 +40,8 @@ RESOURCE_ID_NAMESPACE = uuid.UUID('0a7a15ff-aa13-4ac2-897c-9bdf30ce175b') def ResourceUUID(value): + if '/' in value: + raise ValueError("'/' is not supported in resource id") try: try: return uuid.UUID(value) diff --git a/releasenotes/notes/forbid-slash-b3ec2bc77cc34b49.yaml b/releasenotes/notes/forbid-slash-b3ec2bc77cc34b49.yaml new file mode 100644 index 00000000..5999cb7f --- /dev/null +++ b/releasenotes/notes/forbid-slash-b3ec2bc77cc34b49.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - \'/\' in resource id and metric name have been accepted by mistake, because + they can be POSTed but not GETed/PATCHed/DELETEd. Now this char is forbidden + in resource id and metric name, REST api will return 400 if it presents. + Metric name and resource id already present with a \'/\' have their \'/\' replaced + by \'_\'. diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index 4ac340bd..a14ae2e1 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -7,19 +7,25 @@ export GNOCCHI_USER_ID=99aae-4dc2-4fbc-b5b8-9688c470d9cc export GNOCCHI_PROJECT_ID=c8d27445-48af-457c-8e0d-1de7103eae1f export GNOCCHI_DATA=$(mktemp -d -t gnocchi.XXXX) +GDATE=$((which gdate >/dev/null && echo gdate) || echo date) + +old_version=$(pip freeze | sed -n '/gnocchi==/s/.*==\(.*\)/\1/p') +[ "${old_version:0:1}" == "3" ] && have_resource_type_post=1 + RESOURCE_IDS=( "5a301761-aaaa-46e2-8900-8b4f6fe6675a" "5a301761-bbbb-46e2-8900-8b4f6fe6675a" "5a301761-cccc-46e2-8900-8b4f6fe6675a" ) -GDATE=$((which gdate >/dev/null && echo gdate) || echo date) +[ "$have_resource_type_post" ] && RESOURCE_ID_EXT="5a301761/dddd/46e2/8900/8b4f6fe6675a" dump_data(){ dir="$1" mkdir -p $dir echo "* Dumping measures aggregations to $dir" - for resource_id in ${RESOURCE_IDS[@]}; do + gnocchi resource list > $dir/resources.list + for resource_id in ${RESOURCE_IDS[@]} $RESOURCE_ID_EXT; do for agg in min max mean sum ; do gnocchi measures show --aggregation $agg --resource-id $resource_id metric > $dir/${agg}.txt done @@ -30,14 +36,18 @@ inject_data() { echo "* Injecting measures in Gnocchi" # TODO(sileht): Generate better data that ensure we have enought split that cover all # situation + for resource_id in ${RESOURCE_IDS[@]}; do - gnocchi resource create generic --attribute id:$resource_id -n metric:high >/dev/null + gnocchi resource create generic --attribute id:$resource_id -n metric:high > /dev/null done + gnocchi resource-type create ext > /dev/null + gnocchi resource create ext --attribute id:$RESOURCE_ID_EXT -n metric:high > /dev/null + { echo -n '{' resource_sep="" - for resource_id in ${RESOURCE_IDS[@]}; do + for resource_id in ${RESOURCE_IDS[@]} $RESOURCE_ID_EXT; do echo -n "$resource_sep \"$resource_id\": { \"metric\": [ " measures_sep="" for i in $(seq 0 10 288000); do @@ -73,7 +83,6 @@ else STORAGE_URL=file://$GNOCCHI_DATA fi -old_version=$(pip freeze | sed -n '/gnocchi==/s/.*==\(.*\)/\1/p') if [ "${old_version:0:5}" == "2.2.0" ]; then # NOTE(sileht): temporary fix a gnocchi 2.2.0 bug # https://review.openstack.org/#/c/369011/ @@ -81,6 +90,7 @@ if [ "${old_version:0:5}" == "2.2.0" ]; then fi eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) +gnocchi resource delete $GNOCCHI_STATSD_RESOURCE_ID inject_data $GNOCCHI_DATA dump_data $GNOCCHI_DATA/old pifpaf_stop @@ -89,12 +99,25 @@ new_version=$(python setup.py --version) echo "* Upgrading Gnocchi from $old_version to $new_version" pip install -q -U .[${GNOCCHI_VARIANT}] - -eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) +eval $(pifpaf --debug run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) # Gnocchi 3.1 uses basic auth by default export OS_AUTH_TYPE=gnocchi-basic export GNOCCHI_USER=$GNOCCHI_USER_ID + +gnocchi resource delete $GNOCCHI_STATSD_RESOURCE_ID + +RESOURCE_IDS=( + "5a301761-aaaa-46e2-8900-8b4f6fe6675a" + "5a301761-bbbb-46e2-8900-8b4f6fe6675a" + "5a301761-cccc-46e2-8900-8b4f6fe6675a" +) +# NOTE(sileht): / are now _ +[ "$have_resource_type_post" ] && RESOURCE_ID_EXT="5a301761_dddd_46e2_8900_8b4f6fe6675a" dump_data $GNOCCHI_DATA/new +# NOTE(sileht): change the output of the old gnocchi to compare with the new without '/' +sed -i -e "s,5a301761/dddd/46e2/8900/8b4f6fe6675a,5a301761_dddd_46e2_8900_8b4f6fe6675a,g" \ + -e "s,19235bb9-35ca-5f55-b7db-165cfb033c86,fe1bdabf-d94c-5b3a-af1e-06bdff53f228,g" $GNOCCHI_DATA/old/resources.list + echo "* Checking output difference between Gnocchi $old_version and $new_version" diff -uNr $GNOCCHI_DATA/old $GNOCCHI_DATA/new -- GitLab From ea9ae167e61e20e8417906b383ade7deaf529839 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 13 Jan 2017 13:06:12 +0100 Subject: [PATCH 0552/1483] mysql: fix timestamp upgrade mysql float to datetime upgrade was using from_unixtime function that depend on the locale. This change sets the locale to utf8 first before doing the migration Change-Id: I865c1f249990bd44369b33acb94d8609aefe4bab --- .../alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py b/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py index 9df79fae..5cc7412b 100644 --- a/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py +++ b/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py @@ -39,6 +39,7 @@ depends_on = None def upgrade(): bind = op.get_bind() if bind and bind.engine.name == "mysql": + op.execute("SET time_zone = '+00:00'") # NOTE(jd) So that crappy engine that is MySQL does not have "ALTER # TABLE … USING …". We need to copy everything and convert… for table_name, column_name in (("resource", "started_at"), -- GitLab From db02e6b170367e93fdb4e2e13644c585256f8199 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 13 Jan 2017 14:24:35 +0100 Subject: [PATCH 0553/1483] opts: list entry points with pkg_resources rather than stevedore Stevedore tries to *load* the plugin, whereas we just want the list of names. pkg_resources does not do that. Change-Id: Ib5d7d9c4e3133410d23f8718a3449df3f4347a08 --- gnocchi/opts.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 6e7dca4a..d2acd06e 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -13,12 +13,13 @@ # under the License. import copy import itertools +import operator import os +import pkg_resources import uuid from oslo_config import cfg from oslo_middleware import cors -from stevedore import extension import gnocchi.archive_policy import gnocchi.indexer @@ -86,8 +87,9 @@ def list_opts(): help='Path to API Paste configuration.'), cfg.StrOpt('auth_mode', default="basic", - choices=extension.ExtensionManager( - "gnocchi.rest.auth_helper").names(), + choices=list(map(operator.attrgetter("name"), + pkg_resources.iter_entry_points( + "gnocchi.rest.auth_helper"))), help='Authentication mode to use.'), cfg.IntOpt('max_limit', default=1000, -- GitLab From 2aae94ae4e29962764f047f0bb073dc9a2378f69 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 12 Jan 2017 16:46:07 +0100 Subject: [PATCH 0554/1483] storage: add more debug information to trace behaviour That should help when doing performances analysis to spot what is happening. Change-Id: Ic08c643117d45bf52f35a6f5be63fba5f1a1ac60 --- gnocchi/storage/_carbonara.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index ba0ee626..d3f8eb32 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -321,6 +321,9 @@ class CarbonaraBasedStorage(storage.StorageDriver): if previous_oldest_mutable_key != oldest_mutable_key: for key in existing_keys: if previous_oldest_mutable_key <= key < oldest_mutable_key: + LOG.debug( + "Compressing previous split %s (%s) for metric %s", + key, aggregation, metric) # NOTE(jd) Rewrite it entirely for fun (and later for # compression). For that, we just pass None as split. self._store_timeserie_split( @@ -331,6 +334,9 @@ class CarbonaraBasedStorage(storage.StorageDriver): for key, split in ts.split(): if key >= oldest_key_to_keep: + LOG.debug( + "Storing split %s (%s) for metric %s", + key, aggregation, metric) self._store_timeserie_split( metric, key, split, aggregation, archive_policy_def, oldest_mutable_timestamp) @@ -340,6 +346,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): raise NotImplementedError def delete_metric(self, metric, sync=False): + LOG.debug("Deleting metric %s", metric) with self._lock(metric.id)(blocking=sync): # If the metric has never been upgraded, we need to delete this # here too -- GitLab From b8ef321655462c46d4d5a64af520657d1d2999ee Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 12 Jan 2017 17:19:59 +0100 Subject: [PATCH 0555/1483] archive_policy: provide a boolean storage archive policy by default Change-Id: I02fe33fa6ba043b68eeed1499596c45cd4be2983 --- doc/source/architecture.rst | 32 +++++++++++++++---- gnocchi/archive_policy.py | 8 +++++ .../archive_policy_bool-9313cae7122c4a2f.yaml | 5 +++ 3 files changed, 39 insertions(+), 6 deletions(-) create mode 100644 releasenotes/notes/archive_policy_bool-9313cae7122c4a2f.yaml diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index e5313f47..6d89b0b7 100755 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -121,21 +121,41 @@ consume twice CPU than just one definition (e.g. just 1 second granularity for Default archive policies ------------------------ -By default, 3 archive policies are created using the default archive policy -list (listed in `default_aggregation_methods`, i.e. mean, min, max, sum, std, -count): +By default, 3 archive policies are created when calling `gnocchi-upgrade`: +*low*, *medium* and *high*. The name both describes the storage space and CPU +usage needs. They use `default_aggregation_methods` which is by default set to +*mean*, *min*, *max*, *sum*, *std*, *count*. -- low (maximum estimated size per metric: 406 KiB) +A fourth archive policy named `bool` is also provided by default and is +designed to store only boolean values (i.e. 0 and 1). It only stores one data +point for each second (using the `last` aggregation method), with a one year +retention period. The maximum optimistic storage size is estimated based on the +assumption that no other value than 0 and 1 are sent as measures. If other +values are sent, the maximum pessimistic storage size is taken into account. + +- low * 5 minutes granularity over 30 days + * aggregation methods used: `default_aggregation_methods` + * maximum estimated size per metric: 406 KiB -- medium (maximum estimated size per metric: 887 KiB) +- medium * 1 minute granularity over 7 days * 1 hour granularity over 365 days + * aggregation methods used: `default_aggregation_methods` + * maximum estimated size per metric: 887 KiB -- high (maximum estimated size per metric: 1 057 KiB) +- high * 1 second granularity over 1 hour * 1 minute granularity over 1 week * 1 hour granularity over 1 year + * aggregation methods used: `default_aggregation_methods` + * maximum estimated size per metric: 1 057 KiB + +- bool + * 1 second granularity over 1 year + * aggregation methods used: *last* + * maximum optimistic size per metric: 1 539 KiB + * maximum pessimistic size per metric: 277 172 KiB diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py index c4b1904d..54c64cc2 100644 --- a/gnocchi/archive_policy.py +++ b/gnocchi/archive_policy.py @@ -209,6 +209,14 @@ class ArchivePolicyItem(dict): DEFAULT_ARCHIVE_POLICIES = { + 'bool': ArchivePolicy( + "bool", 3600, [ + # 1 second resolution for 365 days + ArchivePolicyItem(granularity=1, + timespan=365 * 24 * 60 * 60), + ], + aggregation_methods=("last",), + ), 'low': ArchivePolicy( "low", 0, [ # 5 minutes resolution for 30 days diff --git a/releasenotes/notes/archive_policy_bool-9313cae7122c4a2f.yaml b/releasenotes/notes/archive_policy_bool-9313cae7122c4a2f.yaml new file mode 100644 index 00000000..682a4e4c --- /dev/null +++ b/releasenotes/notes/archive_policy_bool-9313cae7122c4a2f.yaml @@ -0,0 +1,5 @@ +--- +features: + - >- + A new archive policy named *bool* is provided by default. It provides a + cheap and easy way to store boolean measures (0 and 1). -- GitLab From 40b6492a12e39153706583594769f9cb4767bae0 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 16 Jan 2017 15:46:36 +0000 Subject: [PATCH 0556/1483] cleanup invalid upgrade errors upgrade throws error if no v2 unaggregated file exists. if we upgrade v3, these errors should be ignored. with the assumption, any storage upgrades require major version bump, if no v2 unagg files found, then assume all storage is at least v3 and upgrade of metric (from v2) can be skipped. Change-Id: Ia4802faa7f6f6a13866e01df8f6da55aa20805b5 --- gnocchi/storage/_carbonara.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index ba0ee626..d1a93204 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -351,20 +351,23 @@ class CarbonaraBasedStorage(storage.StorageDriver): raise NotImplementedError def _check_for_metric_upgrade(self, metric): + # FIXME(gordc): this is only required for v2.x to v3.x storage upgrade. + # we should make storage version easily detectable rather than + # checking each metric individually lock = self._lock(metric.id) with lock: try: old_unaggregated = self._get_unaggregated_timeserie_and_unserialize_v2( # noqa metric) except (storage.MetricDoesNotExist, CorruptionError) as e: - # NOTE(jd) This case is not really possible – you can't - # have archives with splits and no unaggregated - # timeserie… - LOG.error( - "Unable to find unaggregated timeserie for " - "metric %s, unable to upgrade data: %s", + # This case can happen if v3.0 to v3.x or if no measures + # pushed. skip the rest of upgrade on metric. + LOG.debug( + "Unable to find v2 unaggregated timeserie for " + "metric %s, no data to upgrade: %s", metric.id, e) return + unaggregated = carbonara.BoundTimeSerie( ts=old_unaggregated.ts, block_size=metric.archive_policy.max_block_size, -- GitLab From b817a8b51dbb39859e11d0a02aa141057f5c61ba Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 19 Dec 2016 13:13:47 +0100 Subject: [PATCH 0557/1483] utils: allow ResourceUUID to convert UUID Change-Id: I8c26811577bc8175917d1860c73ec266439d49dd --- gnocchi/indexer/sqlalchemy.py | 3 +-- gnocchi/utils.py | 2 ++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 1e805ecd..964081d5 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1186,8 +1186,7 @@ class QueryTransformer(object): if isinstance(attr.type, base.TimestampUTC): converter = utils.to_datetime - elif (isinstance(attr.type, sqlalchemy_utils.UUIDType) - and not isinstance(value, uuid.UUID)): + elif isinstance(attr.type, sqlalchemy_utils.UUIDType): converter = utils.ResourceUUID elif isinstance(attr.type, types.String): converter = six.text_type diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 7adc8db9..cddbe0b4 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -40,6 +40,8 @@ RESOURCE_ID_NAMESPACE = uuid.UUID('0a7a15ff-aa13-4ac2-897c-9bdf30ce175b') def ResourceUUID(value): + if isinstance(value, uuid.UUID): + return value if '/' in value: raise ValueError("'/' is not supported in resource id") try: -- GitLab From b39eead9f03ae9f43dcaa13e80cd296d3c043c26 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 12 Jan 2017 16:59:02 +0100 Subject: [PATCH 0558/1483] Remove workaround to upgrade from 2.2.0 We've released 2.2.1. Change-Id: I739fd96b53d1271c5183154b6682a82d126ba2f2 --- 7bcd2a25.diff | 30 ------------------------------ run-upgrade-tests.sh | 6 ------ 2 files changed, 36 deletions(-) delete mode 100644 7bcd2a25.diff diff --git a/7bcd2a25.diff b/7bcd2a25.diff deleted file mode 100644 index 6a7b18e2..00000000 --- a/7bcd2a25.diff +++ /dev/null @@ -1,30 +0,0 @@ -From 7bcd2a259be0a35d9387a24329f55250efde3aec Mon Sep 17 00:00:00 2001 -From: Mehdi Abaakouk -Date: Mon, 12 Sep 2016 19:54:03 +0200 -Subject: [PATCH] ceph: Fix metricd start - -metricd can be started before api, in that case -metricd fail because the measure object don't yet exists. - -Change-Id: Id7822f16718e31d6a8916cec8a6b77194071a31e ---- - -diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py -index 15e1dad..d2ea4f8 100644 ---- a/gnocchi/storage/ceph.py -+++ b/gnocchi/storage/ceph.py -@@ -167,8 +167,12 @@ - def _list_object_names_to_process(self, prefix=""): - with rados.ReadOpCtx() as op: - omaps, ret = self.ioctx.get_omap_vals(op, "", prefix, -1) -- self.ioctx.operate_read_op( -- op, self.MEASURE_PREFIX, flag=self.OMAP_READ_FLAGS) -+ try: -+ self.ioctx.operate_read_op( -+ op, self.MEASURE_PREFIX, flag=self.OMAP_READ_FLAGS) -+ except rados.ObjectNotFound: -+ # API have still written nothing -+ return () - # NOTE(sileht): after reading the libradospy, I'm - # not sure that ret will have the correct value - # get_omap_vals transforms the C int to python int diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index a14ae2e1..ae021e82 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -83,12 +83,6 @@ else STORAGE_URL=file://$GNOCCHI_DATA fi -if [ "${old_version:0:5}" == "2.2.0" ]; then - # NOTE(sileht): temporary fix a gnocchi 2.2.0 bug - # https://review.openstack.org/#/c/369011/ - patch -p2 -d $VIRTUAL_ENV/lib/python*/site-packages/gnocchi < 7bcd2a25.diff -fi - eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) gnocchi resource delete $GNOCCHI_STATSD_RESOURCE_ID inject_data $GNOCCHI_DATA -- GitLab From 9a4f4149b40e61bd18b1120a8f6bef9fb945208f Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 18 Jan 2017 14:26:01 +0100 Subject: [PATCH 0559/1483] indexer: fix migration script "no_more_slash" The newly introduced no_more_slash indexer upgrade script won't work with custom resource types. This change fixes that. Change-Id: Ie0fabbfde0e8a0de24680a9924e96c716e2799ba --- .../versions/397987e38570_no_more_slash.py | 27 +++++-------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash.py b/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash.py index e898e1a8..77d58404 100644 --- a/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash.py +++ b/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash.py @@ -38,6 +38,7 @@ depends_on = None resource_type_table = sa.Table( 'resource_type', sa.MetaData(), + sa.Column('name', sa.String(255), nullable=False), sa.Column('tablename', sa.String(35), nullable=False) ) @@ -79,32 +80,25 @@ def upgrade(): connection = op.get_bind() resource_type_tables = {} - resource_type_tablenames = [ - rt.tablename + resource_type_tablenames = dict( + (rt.name, rt.tablename) for rt in connection.execute(resource_type_table.select()) if rt.tablename != "generic" - ] + ) op.drop_constraint("fk_metric_resource_id_resource_id", "metric", type_="foreignkey") - for table in resource_type_tablenames: + for name, table in resource_type_tablenames.items(): op.drop_constraint("fk_%s_id_resource_id" % table, table, type_="foreignkey") - resource_type_tables[table] = sa.Table( + resource_type_tables[name] = sa.Table( table, sa.MetaData(), sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), ) - resource_type_tables["%s_history" % table] = sa.Table( - "%s_history" % table, - sa.MetaData(), - sa.Column('id', - sqlalchemy_utils.types.uuid.UUIDType(), - nullable=False), - ) for resource in connection.execute(resource_table.select().where( resource_table.c.original_resource_id.like('%/%'))): @@ -138,7 +132,6 @@ def upgrade(): if resource.type != "generic": rtable = resource_type_tables[resource.type] - htable = resource_type_tables["%s_history" % resource.type] # resource table (type) connection.execute( @@ -146,12 +139,6 @@ def upgrade(): rtable.c.id == resource.id ).values(id=new_id) ) - # resource history table (type) - connection.execute( - htable.update().where( - htable.c.id == resource.id - ).values(id=new_id) - ) # Metric connection.execute( @@ -162,7 +149,7 @@ def upgrade(): ) ) - for table in resource_type_tablenames: + for (name, table) in resource_type_tablenames.items(): op.create_foreign_key("fk_%s_id_resource_id" % table, table, "resource", ("id",), ("id",), -- GitLab From 5005d598bc6f8ebf99aeb931583693f429de437a Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 9 Jan 2017 08:17:17 +0100 Subject: [PATCH 0560/1483] rest: returns orignal resource id When batching measures the gnocchi id is returned in 'Unknown Resources' exception. While the caller have been using non UUID id. This change returns a dict instead of a list. With the mapping between the original id and the gnocchi one. The list to dict convertion allows to keep compatibility with the the current ceilometer dispatcher. (see: https://github.com/openstack/ceilometer/blob/master/ceilometer/dispatcher/gnocchi.py#L407) It can be update later to support both format the list and the dict and stay compatible with an old and the new Gnocchi exception. Depends-On: I384ca489055201f579e4549371677bff8c3d9ed8 Change-Id: I7b631d17c879b9cd5e56fa8e06b61d5348a182d5 --- gnocchi/rest/__init__.py | 18 +++++++---- .../tests/gabbi/gabbits/batch-measures.yaml | 30 ++++++++++++++++--- 2 files changed, 38 insertions(+), 10 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 4070b337..05514021 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1332,7 +1332,7 @@ class SearchMetricController(rest.RestController): class ResourcesMetricsMeasuresBatchController(rest.RestController): MeasuresBatchSchema = voluptuous.Schema( - {utils.ResourceUUID: {six.text_type: MeasuresListSchema}} + {ResourceID: {six.text_type: MeasuresListSchema}} ) @pecan.expose('json') @@ -1341,9 +1341,12 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): known_metrics = [] unknown_metrics = [] - unknown_resources = set() - for resource_id in body: - names = body[resource_id].keys() + unknown_resources = [] + body_by_rid = {} + for original_resource_id, resource_id in body: + body_by_rid[resource_id] = body[(original_resource_id, + resource_id)] + names = body[(original_resource_id, resource_id)].keys() metrics = pecan.request.indexer.list_metrics( names=names, resource_id=resource_id) @@ -1369,7 +1372,9 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): except indexer.NamedMetricAlreadyExists as e: already_exists_names.append(e.metric) except indexer.NoSuchResource: - unknown_resources.add(resource_id) + unknown_resources.append({ + 'resource_id': six.text_type(resource_id), + 'original_resource_id': original_resource_id}) except indexer.IndexerException as e: # This catch NoSuchArchivePolicy, which is unlikely # be still possible @@ -1407,7 +1412,8 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): storage = pecan.request.storage.incoming with futures.ThreadPoolExecutor(max_workers=THREADS) as executor: list(executor.map(lambda x: storage.add_measures(*x), - ((metric, body[metric.resource_id][metric.name]) + ((metric, + body_by_rid[metric.resource_id][metric.name]) for metric in known_metrics))) pecan.response.status = 202 diff --git a/gnocchi/tests/gabbi/gabbits/batch-measures.yaml b/gnocchi/tests/gabbi/gabbits/batch-measures.yaml index 9eedcea5..6e9812ea 100644 --- a/gnocchi/tests/gabbi/gabbits/batch-measures.yaml +++ b/gnocchi/tests/gabbi/gabbits/batch-measures.yaml @@ -194,7 +194,7 @@ tests: $: - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] - - name: push measurements to unknown named metrics and resource with create_metrics + - name: push measurements to unknown named metrics and resource with create_metrics with uuid resource id POST: /v1/batch/resources/metrics/measures?create_metrics=true request_headers: content-type: application/json @@ -216,9 +216,31 @@ tests: status: 400 response_json_paths: $.description.cause: "Unknown resources" - $.description.detail.`sorted`: - - "aaaaaaaa-d63b-4cdd-be89-111111111111" - - "bbbbbbbb-d63b-4cdd-be89-111111111111" + $.description.detail[/original_resource_id]: + - original_resource_id: "aaaaaaaa-d63b-4cdd-be89-111111111111" + resource_id: "aaaaaaaa-d63b-4cdd-be89-111111111111" + - original_resource_id: "bbbbbbbb-d63b-4cdd-be89-111111111111" + resource_id: "bbbbbbbb-d63b-4cdd-be89-111111111111" + + - name: push measurements to unknown named metrics and resource with create_metrics with non uuid resource id + POST: /v1/batch/resources/metrics/measures?create_metrics=true + request_headers: + content-type: application/json + accept: application/json + data: + foobar: + auto.test: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + + status: 400 + response_json_paths: + $.description.cause: "Unknown resources" + $.description.detail: + - resource_id: "301dbf9a-4fce-52b6-9010-4484c469dcec" + original_resource_id: "foobar" - name: push measurements to named metrics and resource with create_metrics with wrong measure objects POST: /v1/batch/resources/metrics/measures?create_metrics=true -- GitLab From 7ceed2e4c12bdb52ca4239f4faa6a87be7409cce Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 18 Jan 2017 19:45:58 +0100 Subject: [PATCH 0561/1483] add metricd tester for profiling Change-Id: I25c4d441d4f7a41dd85eb9feb795923af844dcec --- gnocchi/cli.py | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 7ba3b229..96c3a1c7 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -317,6 +317,30 @@ class MetricdServiceManager(cotyledon.ServiceManager): self.queue.close() +def metricd_tester(conf): + # NOTE(sileht): This method is designed to be profiled, we + # want to avoid issues with profiler and os.fork(), that + # why we don't use the MetricdServiceManager. + index = indexer.get_driver(conf) + index.connect() + s = storage.get_driver(conf) + metrics = s.incoming.list_metric_with_measures_to_process( + conf.stop_after_processing_metrics, 0) + s.process_new_measures(index, metrics, True) + + def metricd(): - conf = service.prepare_service() - MetricdServiceManager(conf).run() + conf = cfg.ConfigOpts() + conf.register_cli_opts([ + cfg.IntOpt("stop-after-processing-metrics", + default=0, + min=0, + help="Number of metrics to process without workers, " + "for testing purpose"), + ]) + conf = service.prepare_service(conf=conf) + + if conf.stop_after_processing_metrics: + metricd_tester(conf) + else: + MetricdServiceManager(conf).run() -- GitLab From ff5bbc42a1db08e53a6d6022cf8076cf58be376f Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 17 Jan 2017 11:46:48 +0100 Subject: [PATCH 0562/1483] carbonara: prepare datetime for pandas.to_datetime() We don't need the slow pandas magic to handle our datetime format. This change built directly numpy array with the correct format and then pass it to pandas. This increases boundtimeserie Unserialization speed ~12% This increases aggregatedtimeserie Unserialization speed ~13-15% Change-Id: I034c73346bb5ecf738f6e6b437b5aca6d6cd2d89 --- gnocchi/carbonara.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 62eb1567..e8d9c8af 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -28,6 +28,7 @@ import struct import time import lz4 +import numpy import pandas import six @@ -129,7 +130,8 @@ class TimeSerie(object): @staticmethod def _timestamps_and_values_from_dict(values): - timestamps = pandas.to_datetime(list(values.keys()), unit='ns') + timestamps = numpy.array(list(values.keys()), dtype='datetime64[ns]') + timestamps = pandas.to_datetime(timestamps) v = list(values.values()) if v: return timestamps, v @@ -239,11 +241,12 @@ class BoundTimeSerie(TimeSerie): start = deserial[0] timestamps = [start] for delta in itertools.islice(deserial, 1, nb_points): - ts = start + delta - timestamps.append(ts) - start = ts + start += delta + timestamps.append(start) + timestamps = numpy.array(timestamps, dtype='datetime64[ns]') + return cls.from_data( - pandas.to_datetime(timestamps, unit='ns'), + pandas.to_datetime(timestamps), deserial[nb_points:], block_size=block_size, back_window=back_window) @@ -508,6 +511,7 @@ class AggregatedTimeSerie(TimeSerie): @classmethod def unserialize(cls, data, start, agg_method, sampling): x, y = [], [] + start = float(start) if data: if cls.is_compressed(data): @@ -542,7 +546,9 @@ class AggregatedTimeSerie(TimeSerie): x.append(val) y.append(start + (i * sampling)) - y = pandas.to_datetime(y, unit='s') + y = numpy.array(y, dtype='float64') * 10e8 + y = numpy.array(y, dtype='datetime64[ns]') + y = pandas.to_datetime(y) return cls.from_data(sampling, agg_method, y, x) def get_split_key(self, timestamp=None): -- GitLab From eaa16814a522c659215112d46d7861fdfe46317b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 17 Jan 2017 21:42:45 +0100 Subject: [PATCH 0563/1483] carbonara: remove a pandas.iteritems() pandas.Series.iteritems() is damn slow, this use numpy matrix to create the uncompressed binary serialization. Performance for uncompressed serialization increase is ~400% Performance for compressed serialization increase is ~700% Change-Id: Id395d971fa4e52fa989255dc14b952100ef71a99 --- gnocchi/carbonara.py | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index e8d9c8af..473e95b2 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -589,11 +589,10 @@ class AggregatedTimeSerie(TimeSerie): # initialize list to store alternating delimiter, float entries if compressed: # NOTE(jd) Use a double delta encoding for timestamps - timestamps = [] - for i in self.ts.index: - v = i.value - timestamps.append(int((v - start) // offset_div)) - start = v + timestamps = numpy.insert( + numpy.diff(self.ts.index) // offset_div, + 0, int((self.first.value - start) // offset_div)) + timestamps = list(numpy.array(timestamps, dtype='int')) values = self.ts.values.tolist() return None, b"c" + lz4.dumps(struct.pack( '<' + 'H' * len(timestamps) + 'd' * len(values), @@ -608,14 +607,21 @@ class AggregatedTimeSerie(TimeSerie): # series runs until and initialize list to store alternating # delimiter, float entries first = self.first.value # NOTE(jd) needed because faster - e_offset = int( - (self.last.value - first) // offset_div) + 1 - serial = [False] * e_offset * 2 - for i, v in self.ts.iteritems(): - # overwrite zero padding with real points and set flag True - loc = int((i.value - first) // offset_div) * 2 - serial[loc] = True - serial[loc + 1] = v + e_offset = int((self.last.value - first) // offset_div) + 1 + + # Fill everything with zero + serial = numpy.zeros(e_offset * 2, dtype='float64') + + # Get location of ones + locs = (numpy.cumsum(numpy.diff(self.ts.index)) // offset_div) * 2 + locs = numpy.insert(locs, 0, 0) + locs = numpy.array(locs, dtype='int') + + # extract values + serial[locs] = numpy.ones(len(self.ts), dtype='float64') + serial[locs + 1] = numpy.array(self.ts.values.tolist(), + dtype='float64') + offset = int((first - start) // offset_div) * self.PADDED_SERIAL_LEN return offset, struct.pack('<' + '?d' * e_offset, *serial) -- GitLab From ddf69cf6607d1c24f40027e9bca9c7aebc32c56f Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 17 Jan 2017 23:37:08 +0100 Subject: [PATCH 0564/1483] carbonara: handle timestamps from struct with numpy This increases performance for unserialize aggregatedtimeseries ~28% This increases performance for unserialize boundtimeseries ~19% This increases performance for serialize boundtimeseries ~400% This increases performance for serialize aggregatedtimeseries ~56% Change-Id: I839fe2be03e2e5466e4a77570388fb6343f79d71 --- gnocchi/carbonara.py | 34 ++++++++++++---------------------- 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 473e95b2..f403340a 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -18,7 +18,6 @@ import datetime import functools -import itertools import logging import math import numbers @@ -238,11 +237,8 @@ class BoundTimeSerie(TimeSerie): uncompressed) except struct.error: raise InvalidData - start = deserial[0] - timestamps = [start] - for delta in itertools.islice(deserial, 1, nb_points): - start += delta - timestamps.append(start) + timestamps = numpy.cumsum(numpy.array(deserial[:nb_points], + dtype='int')) timestamps = numpy.array(timestamps, dtype='datetime64[ns]') return cls.from_data( @@ -253,12 +249,9 @@ class BoundTimeSerie(TimeSerie): def serialize(self): # NOTE(jd) Use a double delta encoding for timestamps - timestamps = [self.first.value] - start = self.first.value - for i in self.ts.index[1:]: - v = i.value - timestamps.append(v - start) - start = v + timestamps = numpy.insert(numpy.diff(self.ts.index), + 0, self.first.value) + timestamps = list(numpy.array(timestamps, dtype='int')) values = self.ts.values.tolist() return lz4.dumps(struct.pack( '<' + 'Q' * len(timestamps) + 'd' * len(values), @@ -524,10 +517,9 @@ class AggregatedTimeSerie(TimeSerie): uncompressed) except struct.error: raise InvalidData - for delta in itertools.islice(deserial, nb_points): - ts = start + (delta * sampling) - y.append(ts) - start = ts + y = numpy.cumsum( + numpy.array(deserial[:nb_points]) * sampling, + ) + start x = deserial[nb_points:] else: # Padded format @@ -539,12 +531,10 @@ class AggregatedTimeSerie(TimeSerie): except struct.error: raise InvalidData() # alternating split into 2 list and drop items with False flag - for i, val in itertools.compress( - six.moves.zip(six.moves.range(nb_points), - deserial[1::2]), - deserial[::2]): - x.append(val) - y.append(start + (i * sampling)) + everything = numpy.array(deserial, dtype='float64') + index = numpy.nonzero(everything[::2])[0] + y = index * sampling + start + x = everything[1::2][index] y = numpy.array(y, dtype='float64') * 10e8 y = numpy.array(y, dtype='datetime64[ns]') -- GitLab From 5596bdea4848d0594990a5adde74045637e69431 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 18 Jan 2017 08:31:26 +0100 Subject: [PATCH 0565/1483] carbonara: Don't use clean_ts() Checking if the index have duplicate have a high cost. And everything is already ordered and unique. So we don't need to do this on each timeserie creation. We have two place where we need to order the series. When new data are added (set_values()), when we build the object from v2 format (from_data()). This increases the perf ~33% for each kind of timeseries objects. Change-Id: If530df7a3da07555eb6d8447fa17c72adc6015a6 --- gnocchi/carbonara.py | 15 ++++++++++----- gnocchi/storage/_carbonara.py | 3 ++- gnocchi/tests/test_carbonara.py | 7 +++---- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index f403340a..9ecb1364 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -95,7 +95,7 @@ class TimeSerie(object): def __init__(self, ts=None): if ts is None: ts = pandas.Series() - self.ts = self.clean_ts(ts) + self.ts = ts @staticmethod def clean_ts(ts): @@ -106,8 +106,12 @@ class TimeSerie(object): return ts @classmethod - def from_data(cls, timestamps=None, values=None): - return cls(pandas.Series(values, timestamps)) + def from_data(cls, timestamps=None, values=None, clean=False): + ts = pandas.Series(values, timestamps) + if clean: + # For format v2 + ts = cls.clean_ts(ts) + return cls(ts) @classmethod def from_tuples(cls, timestamps_values): @@ -293,6 +297,7 @@ class BoundTimeSerie(TimeSerie): seconds=i * random.randint(1, 10), microseconds=random.randint(1, 999999)) for i in six.moves.range(points)]) + pts = pts.sort_index() ts = cls(ts=pts) t0 = time.time() for i in six.moves.range(serialize_times): @@ -409,8 +414,7 @@ class AggregatedTimeSerie(TimeSerie): PADDED_SERIAL_LEN = struct.calcsize(" Date: Wed, 18 Jan 2017 08:54:53 +0100 Subject: [PATCH 0566/1483] carbonara: use numpy for serialization This change uses numpy for serialisation instead of struct. This increases serialization perf for compressed aggregated timeseries by 310%. This increases serialization perf for uncompressed aggregated timeseries by 100%. This increases serialization perf for bound timeseries by 261%. More details about the whole diff before and after all the patches in the branch: before: http://paste.openstack.org/show/595291/ after: http://paste.openstack.org/show/595333/ Change-Id: I94957f0744cf9eba471c4bf1a75c5bda4557f5ca --- gnocchi/carbonara.py | 45 ++++++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 9ecb1364..0e8512d4 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -28,6 +28,7 @@ import time import lz4 import numpy +import numpy.lib.recfunctions import pandas import six @@ -255,11 +256,11 @@ class BoundTimeSerie(TimeSerie): # NOTE(jd) Use a double delta encoding for timestamps timestamps = numpy.insert(numpy.diff(self.ts.index), 0, self.first.value) - timestamps = list(numpy.array(timestamps, dtype='int')) - values = self.ts.values.tolist() - return lz4.dumps(struct.pack( - '<' + 'Q' * len(timestamps) + 'd' * len(values), - *(timestamps + values))) + timestamps = numpy.array(timestamps, dtype='uint64') + values = numpy.array(self.ts.values, dtype='float64') + payload = (timestamps.astype(' Date: Wed, 18 Jan 2017 11:32:19 +0100 Subject: [PATCH 0567/1483] carbonara: use numpy for unserialization This change uses numpy for unserialisation instead of struct. This increases unserialization perf for compressed aggregated timeseries by 279%. This increases unserialization perf for uncompressed aggregated timeseries by 311%. This increases unserialization perf for bound timeseries by 91%. More details about the whole diff before and after all the patches in the branch: Change-Id: If7d3f7161c5cbb0a1ae51813997434bd362a31b8 before: http://paste.openstack.org/show/595291/ after: http://paste.openstack.org/show/595345/ --- gnocchi/carbonara.py | 53 ++++++++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 0e8512d4..f65f1296 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -230,6 +230,7 @@ class BoundTimeSerie(TimeSerie): self._truncate() _SERIALIZATION_TIMESTAMP_VALUE_LEN = struct.calcsize(" Date: Thu, 19 Jan 2017 09:25:35 +0100 Subject: [PATCH 0568/1483] carbonara: Add benchmark for split() Add some number about split() method of carbonara. Change-Id: I032be66a8f15ef4dbefd2c7486a305cf8929203e --- gnocchi/carbonara.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index f65f1296..96f9a601 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -739,6 +739,12 @@ class AggregatedTimeSerie(TimeSerie): % (((points * 2 * 8) / ((t1 - t0) / serialize_times)) / (1024.0 * 1024.0))) + t0 = time.time() + for i in six.moves.range(serialize_times): + list(ts.split()) + t1 = time.time() + print(" split() speed: %.8f s" % ((t1 - t0) / serialize_times)) + @staticmethod def aggregated(timeseries, aggregation, from_timestamp=None, to_timestamp=None, needed_percent_of_overlap=100.0, -- GitLab From e1219e2997ab966b2a394e1524c2288ca04e0c9f Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 20 Jan 2017 08:06:03 +0100 Subject: [PATCH 0569/1483] indexer: fix datetime with mysql >= 5.7.17 The upgrade script 5c4f93e5bb4_mysql_float_to_timestamp haven't set a default for new datetime column make upgrade on mysql >= 5.7.17 to fail. This set a server default for datetime columns. Change-Id: I8bd16eaa202e8e63ff38baa0ff00c20e4c639148 --- .../5c4f93e5bb4_mysql_float_to_timestamp.py | 53 ++++++++++++------- gnocchi/indexer/sqlalchemy_base.py | 7 +++ 2 files changed, 41 insertions(+), 19 deletions(-) diff --git a/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py b/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py index 5cc7412b..b2f72b44 100644 --- a/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py +++ b/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py @@ -36,33 +36,40 @@ branch_labels = None depends_on = None +timestamp_default = '1000-01-01 00:00:00.000000' + + def upgrade(): bind = op.get_bind() - if bind and bind.engine.name == "mysql": + + if bind.engine.name == "mysql": op.execute("SET time_zone = '+00:00'") - # NOTE(jd) So that crappy engine that is MySQL does not have "ALTER - # TABLE … USING …". We need to copy everything and convert… - for table_name, column_name in (("resource", "started_at"), - ("resource", "ended_at"), - ("resource", "revision_start"), - ("resource_history", "started_at"), - ("resource_history", "ended_at"), - ("resource_history", "revision_start"), - ("resource_history", "revision_end"), - ("resource_type", "updated_at")): - nullable = column_name == "ended_at" + # NOTE(jd) So that crappy engine that is MySQL does not have "ALTER + # TABLE … USING …". We need to copy everything and convert… + for table_name, column_name in (("resource", "started_at"), + ("resource", "ended_at"), + ("resource", "revision_start"), + ("resource_history", "started_at"), + ("resource_history", "ended_at"), + ("resource_history", "revision_start"), + ("resource_history", "revision_end"), + ("resource_type", "updated_at")): - existing_type = sa.types.DECIMAL( - precision=20, scale=6, asdecimal=True) + nullable = column_name == "ended_at" + server_default = None if nullable else timestamp_default + + if bind.engine.name == "mysql": existing_col = sa.Column( column_name, - existing_type, + sa.types.DECIMAL(precision=20, scale=6, asdecimal=True), nullable=nullable) temp_col = sa.Column( column_name + "_ts", sqlalchemy_base.TimestampUTC(), - nullable=nullable) + nullable=nullable, + server_default=server_default, + ) op.add_column(table_name, temp_col) t = sa.sql.table(table_name, existing_col, temp_col) op.execute(t.update().values( @@ -70,8 +77,16 @@ def upgrade(): op.drop_column(table_name, column_name) op.alter_column(table_name, column_name + "_ts", - nullable=nullable, - type_=sqlalchemy_base.TimestampUTC(), + existing_type=sqlalchemy_base.TimestampUTC(), existing_nullable=nullable, - existing_type=existing_type, + existing_server_default=server_default, + server_default=server_default, new_column_name=column_name) + else: + op.alter_column( + table_name, + column_name, + existing_type=sqlalchemy_base.TimestampUTC(), + existing_nullable=nullable, + existing_server_default=None, + server_default=None if nullable else timestamp_default) diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index db1a1408..6170c23a 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -43,6 +43,9 @@ COMMON_TABLES_ARGS = {'mysql_charset': "utf8", 'mysql_engine': "InnoDB"} +timestamp_default = sqlalchemy.text("'1000-01-01 00:00:00.000000'") + + class PreciseTimestamp(types.TypeDecorator): """Represents a timestamp precise to the microsecond. @@ -284,6 +287,7 @@ class ResourceType(Base, GnocchiBase, resource_type.ResourceType): # MySQL is not a Timestamp, so it would # not store a timestamp but a date as an # integer. + server_default=timestamp_default, default=lambda: utils.utcnow()) def to_baseclass(self): @@ -331,8 +335,10 @@ class ResourceMixin(ResourceJsonifier): creator = sqlalchemy.Column(sqlalchemy.String(255)) started_at = sqlalchemy.Column(TimestampUTC, nullable=False, + server_default=timestamp_default, default=lambda: utils.utcnow()) revision_start = sqlalchemy.Column(TimestampUTC, nullable=False, + server_default=timestamp_default, default=lambda: utils.utcnow()) ended_at = sqlalchemy.Column(TimestampUTC) user_id = sqlalchemy.Column(sqlalchemy.String(255)) @@ -374,6 +380,7 @@ class ResourceHistory(ResourceMixin, Base, GnocchiBase): name="fk_rh_id_resource_id"), nullable=False) revision_end = sqlalchemy.Column(TimestampUTC, nullable=False, + server_default=timestamp_default, default=lambda: utils.utcnow()) metrics = sqlalchemy.orm.relationship( Metric, primaryjoin="Metric.resource_id == ResourceHistory.id", -- GitLab From a5e9cd52fae1a424bc1b481ca9d13ed52408827c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 20 Jan 2017 13:42:29 +0100 Subject: [PATCH 0570/1483] Remove py{27,35} from valid tox targets It appears people are trying to run those but they are not valid. Let's remove them from the list of envlist at list. Change-Id: I21bc5bf3c3852302e75f2b2b20a22741c533eb5d Closes-Bug: #1657879 --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 355d6b80..da873315 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] minversion = 1.8 -envlist = py{35,27},py{35,27}-{postgresql,mysql}{,-all,-file,-swift,-ceph,-s3},pep8,bashate,py35-postgresql-file-upgrade-from-2.2,py27-mysql-ceph-upgrade-from-2.2 +envlist = py{35,27}-{postgresql,mysql}{,-all,-file,-swift,-ceph,-s3},pep8,bashate,py35-postgresql-file-upgrade-from-2.2,py27-mysql-ceph-upgrade-from-2.2 [testenv] usedevelop = True -- GitLab From ce79e4b30705e5ee7c277a02ee86e55d514780ff Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 20 Jan 2017 16:57:12 +0100 Subject: [PATCH 0571/1483] Remove non-existent -all suffix in tox targets Change-Id: Iab2a8aa8f056c95fcb01537b33d336e337b54c1b --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index da873315..a694ff7d 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] minversion = 1.8 -envlist = py{35,27}-{postgresql,mysql}{,-all,-file,-swift,-ceph,-s3},pep8,bashate,py35-postgresql-file-upgrade-from-2.2,py27-mysql-ceph-upgrade-from-2.2 +envlist = py{35,27}-{postgresql,mysql}{,-file,-swift,-ceph,-s3},pep8,bashate,py35-postgresql-file-upgrade-from-2.2,py27-mysql-ceph-upgrade-from-2.2 [testenv] usedevelop = True -- GitLab From c7b4f407fc9e9e9a97d965acf89e6d3414cfefc1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 18 Jan 2017 21:15:57 +0100 Subject: [PATCH 0572/1483] carbonara: don't use groupby for split We don't need to groupby for the split, the series is already sorted. The new version just split the timeseries with numpy directly. This is x100 faster This also remove mark the new timeseries as clean. Because their are already ordered and unique. Change-Id: Ibd4d6deda22cbe4a13454d3e52afe5f06489af33 --- gnocchi/carbonara.py | 21 ++++++++++++++++----- requirements.txt | 2 +- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 96f9a601..f05aafb0 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -461,12 +461,23 @@ class AggregatedTimeSerie(TimeSerie): return aggregation_method_func_name, q def split(self): - groupby = self.ts.groupby(functools.partial( - SplitKey.from_timestamp_and_sampling, sampling=self.sampling)) - for group, ts in groupby: - yield (SplitKey(group, self.sampling), + # NOTE(sileht): We previously use groupby with + # SplitKey.from_timestamp_and_sampling, but + # this is slow because pandas can do that on any kind DataFrame + # but we have ordered timestamps, so don't need + # to iter the whole series. + freq = self.sampling * SplitKey.POINTS_PER_SPLIT + ix = numpy.array(self.ts.index, 'float64') / 10e8 + keys, counts = numpy.unique((ix // freq) * freq, return_counts=True) + start = 0 + for key, count in six.moves.zip(keys, counts): + end = start + count + if key == -0.0: + key = abs(key) + yield (SplitKey(key, self.sampling), AggregatedTimeSerie(self.sampling, self.aggregation_method, - ts)) + self.ts[start:end])) + start = end @classmethod def from_timeseries(cls, timeseries, sampling, aggregation_method, diff --git a/requirements.txt b/requirements.txt index a341dcf4..129659ae 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ pbr -numpy +numpy>=1.9.0 iso8601 oslo.config>=2.6.0 oslo.log>=2.3.0 -- GitLab From ed028da65cae3fcd41295ec9522ad0a70ee26db7 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 19 Jan 2017 14:47:00 +0100 Subject: [PATCH 0573/1483] carbonara: add merge() benchmark Change-Id: I3907c50509fb1136469b95ddc8f60122746dbc6f --- gnocchi/carbonara.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index f05aafb0..1c0b32a9 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -756,6 +756,18 @@ class AggregatedTimeSerie(TimeSerie): t1 = time.time() print(" split() speed: %.8f s" % ((t1 - t0) / serialize_times)) + # NOTE(sileht): propose a new series with half overload timestamps + pts = ts.ts.copy(deep=True) + tsbis = cls(ts=pts, sampling=sampling, aggregation_method='mean') + tsbis.ts.reindex(tsbis.ts.index - + datetime.timedelta(seconds=sampling * points / 2)) + + t0 = time.time() + for i in six.moves.range(serialize_times): + ts.merge(tsbis) + t1 = time.time() + print(" merge() speed: %.8f s" % ((t1 - t0) / serialize_times)) + @staticmethod def aggregated(timeseries, aggregation, from_timestamp=None, to_timestamp=None, needed_percent_of_overlap=100.0, -- GitLab From fdefb4110d7cba8621f49cf70c9793a34413ac5f Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 19 Jan 2017 18:18:40 +0000 Subject: [PATCH 0574/1483] serialise: simplify array format serialise series into the output format right away instead of storing in intermediate formats. minor bump in performance: - 5% for compressed - 4% for uncomressed Change-Id: I5e6b72a2e6807a2839e89160594b2a1a42324f31 --- gnocchi/carbonara.py | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 1c0b32a9..5643c714 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -257,10 +257,9 @@ class BoundTimeSerie(TimeSerie): # NOTE(jd) Use a double delta encoding for timestamps timestamps = numpy.insert(numpy.diff(self.ts.index), 0, self.first.value) - timestamps = numpy.array(timestamps, dtype='uint64') - values = numpy.array(self.ts.values, dtype='float64') - payload = (timestamps.astype(' Date: Fri, 20 Jan 2017 22:13:56 +0100 Subject: [PATCH 0575/1483] sqlalchemy: fix compat search on created_by_project_id Change-Id: I138bbceda7b2ad35e49a74e7b91d6bed760a5115 --- gnocchi/indexer/sqlalchemy.py | 1 + gnocchi/tests/gabbi/gabbits/search.yaml | 13 ++++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 964081d5..48960885 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1163,6 +1163,7 @@ class QueryTransformer(object): return creator.like("%s:%%" % value) raise indexer.QueryValueError(value, field_name) elif field_name == "created_by_project_id": + creator = getattr(table, "creator") if op == operator.eq: return creator.like("%%:%s" % value) elif op == operator.ne: diff --git a/gnocchi/tests/gabbi/gabbits/search.yaml b/gnocchi/tests/gabbi/gabbits/search.yaml index 4503454a..d3c3a3d1 100644 --- a/gnocchi/tests/gabbi/gabbits/search.yaml +++ b/gnocchi/tests/gabbi/gabbits/search.yaml @@ -57,6 +57,17 @@ tests: response_json_paths: $.`len`: 2 + - name: search like created_by_project_id + POST: /v1/search/resource/generic + request_headers: + content-type: application/json + data: + eq: + created_by_project_id: + - f3d41b770cc14f0bb94a1d5be9c0e3ea + response_json_paths: + $.`len`: 0 + - name: search in_ query string POST: /v1/search/resource/generic?filter=id%20in%20%5Bfaef212f-0bf4-4030-a461-2186fef79be0%2C%20df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e%5D request_headers: @@ -70,4 +81,4 @@ tests: content-type: application/json data: {} response_json_paths: - $.`len`: 2 \ No newline at end of file + $.`len`: 2 -- GitLab From 67807cdd1cde8424b9036a3559cb6138819a06d8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 21 Jan 2017 18:05:44 +0100 Subject: [PATCH 0576/1483] tests: increase benchmark timeout for Carbonara Many bencharmsk were added on Carbonara, so it takes longer to run the bench. Change-Id: I41e290df6483d37a8408595306b4aa06ddc1d2e3 --- gnocchi/tests/test_carbonara.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 24f0ef14..e1e516c7 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -28,7 +28,7 @@ from gnocchi import carbonara class TestBoundTimeSerie(base.BaseTestCase): def test_benchmark(self): - self.useFixture(fixtures.Timeout(120, gentle=True)) + self.useFixture(fixtures.Timeout(300, gentle=True)) carbonara.BoundTimeSerie.benchmark() @staticmethod -- GitLab From 3b99004f6e59e0f6c1dc46adbeb30356f85fd389 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 21 Jan 2017 18:54:17 +0100 Subject: [PATCH 0577/1483] utils: use proper timedelta conversion The current conversion works in most cases with Pandas 0.19, but fails with Pandas 0.17. Use the proper parsing method. Change-Id: I69f34d7f8c6b2794ce88b7a9651ce36e9f38b59c --- gnocchi/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index cddbe0b4..50dc303b 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -140,7 +140,7 @@ def to_timespan(value): seconds = float(value) except Exception: try: - seconds = pd.Timedelta(six.text_type(value)).total_seconds() + seconds = pd.to_timedelta(value).total_seconds() except Exception: raise ValueError("Unable to parse timespan") if seconds <= 0: -- GitLab From 34cc6421348b1ac4abdf9c720efc3fe20f2b20a0 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 19 Jan 2017 16:32:42 +0100 Subject: [PATCH 0578/1483] carbonara: add resample() benchmark This change add some numbers about the resample method. Change-Id: Ifa2079fa23d1db84c4f8eb0933198098ac6914d5 --- gnocchi/carbonara.py | 14 +++++++++++++- gnocchi/tests/test_carbonara.py | 2 +- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 5643c714..1e2498c5 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -681,7 +681,7 @@ class AggregatedTimeSerie(TimeSerie): """Run a speed benchmark!""" points = SplitKey.POINTS_PER_SPLIT sampling = 5 - serialize_times = 50 + resample = 35 now = datetime.datetime(2015, 4, 3, 23, 11) @@ -708,6 +708,7 @@ class AggregatedTimeSerie(TimeSerie): for x in six.moves.range(points)]), ]: print(title) + serialize_times = 50 pts = pandas.Series(values, [now + datetime.timedelta(seconds=i*sampling) for i in six.moves.range(points)]) @@ -766,6 +767,17 @@ class AggregatedTimeSerie(TimeSerie): t1 = time.time() print(" merge() speed: %.8f s" % ((t1 - t0) / serialize_times)) + for agg in ['mean', 'sum', 'max', 'min', 'std', 'median', 'first', + 'last', 'count', '5pct', '90pct']: + serialize_times = 3 if agg.endswith('pct') else 10 + ts = cls(ts=pts, sampling=sampling, aggregation_method=agg) + t0 = time.time() + for i in six.moves.range(serialize_times): + ts.resample(resample) + t1 = time.time() + print(" resample(%s) speed: %.8f s" % (agg, (t1 - t0) / + serialize_times)) + @staticmethod def aggregated(timeseries, aggregation, from_timestamp=None, to_timestamp=None, needed_percent_of_overlap=100.0, diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index e1e516c7..ebec7dbd 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -114,7 +114,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): [3, 5, 6]) def test_benchmark(self): - self.useFixture(fixtures.Timeout(120, gentle=True)) + self.useFixture(fixtures.Timeout(240, gentle=True)) carbonara.AggregatedTimeSerie.benchmark() def test_fetch_basic(self): -- GitLab From 417dd916a25c3d49be6f64be884dcbd3a10ace07 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sat, 21 Jan 2017 17:11:42 +0100 Subject: [PATCH 0579/1483] carbonara: add tests for each aggregation Change-Id: Iacd056c78c059abb20bc9516d1447f5f5830e918 --- gnocchi/tests/test_carbonara.py | 43 ++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index ebec7dbd..bba3e094 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -114,7 +114,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): [3, 5, 6]) def test_benchmark(self): - self.useFixture(fixtures.Timeout(240, gentle=True)) + self.useFixture(fixtures.Timeout(120, gentle=True)) carbonara.AggregatedTimeSerie.benchmark() def test_fetch_basic(self): @@ -206,6 +206,47 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual(5.9000000000000004, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) + def _do_test_aggregation(self, name, v1, v2): + ts = carbonara.TimeSerie.from_tuples( + [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), + (datetime.datetime(2014, 1, 1, 12, 0, 4), 6), + (datetime.datetime(2014, 1, 1, 12, 0, 9), 5), + (datetime.datetime(2014, 1, 1, 12, 1, 4), 8), + (datetime.datetime(2014, 1, 1, 12, 1, 6), 9)]) + ts = self._resample(ts, 60, name) + + self.assertEqual(2, len(ts)) + self.assertEqual(v1, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) + self.assertEqual(v2, ts[datetime.datetime(2014, 1, 1, 12, 1, 0)]) + + def test_aggregation_first(self): + self._do_test_aggregation('first', 3, 8) + + def test_aggregation_last(self): + self._do_test_aggregation('last', 5, 9) + + def test_aggregation_count(self): + self._do_test_aggregation('count', 3, 2) + + def test_aggregation_sum(self): + self._do_test_aggregation('sum', 14, 17) + + def test_aggregation_mean(self): + self._do_test_aggregation('mean', 4.666666666666667, 8.5) + + def test_aggregation_median(self): + self._do_test_aggregation('median', 5.0, 8.5) + + def test_aggregation_min(self): + self._do_test_aggregation('min', 3, 8) + + def test_aggregation_max(self): + self._do_test_aggregation('max', 6, 9) + + def test_aggregation_std(self): + self._do_test_aggregation('std', 1.5275252316519465, + 0.70710678118654757) + def test_different_length_in_timestamps_and_data(self): self.assertRaises(ValueError, carbonara.AggregatedTimeSerie.from_data, -- GitLab From d2435dcbcb0f6ece7c658d59a2d803eabfc2b911 Mon Sep 17 00:00:00 2001 From: Jake Yip Date: Mon, 23 Jan 2017 12:35:42 +1100 Subject: [PATCH 0580/1483] Fix error message Change-Id: I1b160385b735af0a08ecd45059f7af5f546357a3 --- gnocchi/indexer/__init__.py | 2 +- gnocchi/tests/test_rest.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 46fd058c..d1a6e169 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -162,7 +162,7 @@ class ResourceAttributeError(IndexerException, AttributeError): """Error raised when an attribute does not exist for a resource type.""" def __init__(self, resource, attribute): super(ResourceAttributeError, self).__init__( - "Resource %s has no %s attribute" % (resource, attribute)) + "Resource type %s has no %s attribute" % (resource, attribute)) self.resource = resource, self.attribute = attribute diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 7eb0da7c..762154a7 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -1463,7 +1463,7 @@ class ResourceTest(RestTest): "/v1/search/resource/" + self.resource_type, params={"=": {"foobar": "baz"}}, status=400) - self.assertIn("Resource " + self.resource_type + self.assertIn("Resource type " + self.resource_type + " has no foobar attribute", result.text) -- GitLab From 9f3de466179252ef8227cfec065d629f2d8c8f57 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sat, 21 Jan 2017 14:13:47 +0100 Subject: [PATCH 0581/1483] carbonara: resample() with scipy/numpy scipy ndimage processing is very good at doing simple math of big matrix. Many aggregation methods can be replaced by scipy.ndimage aggregation method. Some other by simple numpy methods. Performance increase of resample is: min/max/sum/mean/median/count/first/last ~ x200 std x2 percentile x4 Co-Authored-By: gord chung Change-Id: I2174add1bccc36beeb8ff26d94942b4fa3aca679 --- gnocchi/carbonara.py | 92 +++++++++++++++++++++++++++++++++++++++++--- requirements.txt | 1 + 2 files changed, 87 insertions(+), 6 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 1e2498c5..9069af0a 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -30,6 +30,7 @@ import lz4 import numpy import numpy.lib.recfunctions import pandas +from scipy import ndimage import six # NOTE(sileht): pandas relies on time.strptime() @@ -86,6 +87,85 @@ def round_timestamp(ts, freq): (pandas.Timestamp(ts).value // freq) * freq) +class GroupedTimeSeries(object): + def __init__(self, ts, granularity): + # NOTE(sileht): The whole class assumes ts is ordered and don't have + # duplicate timestamps, it uses numpy.unique that sorted list, but + # we always assume the orderd to be the same as the input. + freq = granularity * 10e8 + self._ts = ts + self.indexes = (numpy.array(ts.index, 'float') // freq) * freq + self.tstamps, self.counts = numpy.unique(self.indexes, + return_counts=True) + + def mean(self): + return self._scipy_aggregate(ndimage.mean) + + def sum(self): + return self._scipy_aggregate(ndimage.sum) + + def min(self): + return self._scipy_aggregate(ndimage.minimum) + + def max(self): + return self._scipy_aggregate(ndimage.maximum) + + def median(self): + return self._scipy_aggregate(ndimage.median) + + def std(self): + # NOTE(sileht): ndimage.standard_deviation is really more performant + # but it use ddof=0, to get the same result as pandas we have to use + # ddof=1. If one day scipy allow to pass ddof, this should be changed. + return self._scipy_aggregate(ndimage.labeled_comprehension, + remove_unique=True, + func=functools.partial(numpy.std, ddof=1), + out_dtype='float64', + default=None) + + def _count(self): + timestamps = numpy.array(self.tstamps, 'datetime64[ns]') + return (self.counts, timestamps) + + def count(self): + return pandas.Series(*self._count()) + + def last(self): + counts, timestamps = self._count() + cumcounts = numpy.cumsum(counts) - 1 + values = self._ts.values[cumcounts] + return pandas.Series(values, pandas.to_datetime(timestamps)) + + def first(self): + counts, timestamps = self._count() + counts = numpy.insert(counts[:-1], 0, 0) + cumcounts = numpy.cumsum(counts) + values = self._ts.values[cumcounts] + return pandas.Series(values, pandas.to_datetime(timestamps)) + + def quantile(self, q): + return self._scipy_aggregate(ndimage.labeled_comprehension, + func=functools.partial( + numpy.percentile, + q=q, + ), + out_dtype='float64', + default=None) + + def _scipy_aggregate(self, method, remove_unique=False, *args, **kwargs): + if remove_unique: + locs = numpy.argwhere(self.counts > 1).T[0] + + values = method(self._ts.values, self.indexes, self.tstamps, + *args, **kwargs) + timestamps = numpy.array(self.tstamps, 'datetime64[ns]') + + if remove_unique: + timestamps = timestamps[locs] + values = values[locs] + return pandas.Series(values, pandas.to_datetime(timestamps)) + + class TimeSerie(object): """A representation of series of a timestamp with a value. @@ -161,14 +241,14 @@ class TimeSerie(object): except IndexError: return - def group_serie(self, granularity, start=None): + def group_serie(self, granularity, start=0): # NOTE(jd) Our whole serialization system is based on Epoch, and we # store unsigned integer, so we can't store anything before Epoch. # Sorry! if self.ts.index[0].value < 0: raise BeforeEpochError(self.ts.index[0]) - return self.ts[start:].groupby(functools.partial( - round_timestamp, freq=granularity * 10e8)) + + return GroupedTimeSeries(self.ts[start:], granularity) class BoundTimeSerie(TimeSerie): @@ -450,10 +530,10 @@ class AggregatedTimeSerie(TimeSerie): q = None m = AggregatedTimeSerie._AGG_METHOD_PCT_RE.match(aggregation_method) if m: - q = float(m.group(1)) / 100 + q = float(m.group(1)) aggregation_method_func_name = 'quantile' else: - if not hasattr(pandas.core.groupby.SeriesGroupBy, + if not hasattr(GroupedTimeSeries, aggregation_method): raise UnknownAggregationMethod(aggregation_method) aggregation_method_func_name = aggregation_method @@ -494,7 +574,7 @@ class AggregatedTimeSerie(TimeSerie): agg_name, q = cls._get_agg_method(aggregation_method) return cls(sampling, aggregation_method, ts=cls._resample_grouped(grouped_serie, agg_name, - q).dropna(), + q), max_size=max_size) def __eq__(self, other): diff --git a/requirements.txt b/requirements.txt index 129659ae..6bbcbeeb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,6 +8,7 @@ oslo.serialization>=1.4.0 oslo.utils>=3.18.0 oslo.middleware>=3.22.0 pandas>=0.17.0 +scipy>=0.18.1 # BSD pecan>=0.9 futures jsonpatch -- GitLab From 24d2210faa5da40454cb497b500c4636b493af86 Mon Sep 17 00:00:00 2001 From: Jake Yip Date: Mon, 23 Jan 2017 20:45:19 +1100 Subject: [PATCH 0582/1483] Remove redundant comma Change-Id: I722d91d111d4d2cc7455010252c16c35636bccc4 --- gnocchi/indexer/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index d1a6e169..50c9217a 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -163,7 +163,7 @@ class ResourceAttributeError(IndexerException, AttributeError): def __init__(self, resource, attribute): super(ResourceAttributeError, self).__init__( "Resource type %s has no %s attribute" % (resource, attribute)) - self.resource = resource, + self.resource = resource self.attribute = attribute -- GitLab From 2e29652984711a2749067fbdacd6c9198f85b345 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 23 Jan 2017 13:03:47 +0000 Subject: [PATCH 0583/1483] increase timeout new aggregate benchmarks add a lot of time to test Change-Id: I7d2f6c2a514ddec47bf3d92049299700d1648fc9 --- gnocchi/tests/test_carbonara.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index bba3e094..7c16487a 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -114,7 +114,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): [3, 5, 6]) def test_benchmark(self): - self.useFixture(fixtures.Timeout(120, gentle=True)) + self.useFixture(fixtures.Timeout(300, gentle=True)) carbonara.AggregatedTimeSerie.benchmark() def test_fetch_basic(self): -- GitLab From c4027ce04803f333a1257d03bac27911ff2c5ed3 Mon Sep 17 00:00:00 2001 From: gord chung Date: Sat, 21 Jan 2017 23:53:53 +0000 Subject: [PATCH 0584/1483] stop validating aggregation on init we never use the values and this should already be validated by either REST api or archive policy Change-Id: I68405a9600df67c76802e23add32ab3e7e4dbe9a --- gnocchi/carbonara.py | 7 +------ gnocchi/tests/test_carbonara.py | 7 ------- 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 9069af0a..f75f4b3a 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -504,10 +504,6 @@ class AggregatedTimeSerie(TimeSerie): """ super(AggregatedTimeSerie, self).__init__(ts) - - self.aggregation_method_func_name, self.q = self._get_agg_method( - aggregation_method) - self.sampling = self._to_offset(sampling).nanos / 10e8 self.max_size = max_size self.aggregation_method = aggregation_method @@ -533,8 +529,7 @@ class AggregatedTimeSerie(TimeSerie): q = float(m.group(1)) aggregation_method_func_name = 'quantile' else: - if not hasattr(GroupedTimeSeries, - aggregation_method): + if not hasattr(GroupedTimeSeries, aggregation_method): raise UnknownAggregationMethod(aggregation_method) aggregation_method_func_name = aggregation_method return aggregation_method_func_name, q diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index bba3e094..59efb21e 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -156,13 +156,6 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertRaises(carbonara.BeforeEpochError, ts.group_serie, 60) - def test_bad_percentile(self): - for bad_percentile in ('0pct', '100pct', '-1pct', '123pct'): - self.assertRaises(carbonara.UnknownAggregationMethod, - carbonara.AggregatedTimeSerie, - sampling='1Min', - aggregation_method=bad_percentile) - @staticmethod def _resample(ts, sampling, agg, max_size=None): grouped = ts.group_serie(sampling) -- GitLab From 23614016944b760025beb66faef4d3ce4a1862ad Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 23 Jan 2017 14:49:30 +0100 Subject: [PATCH 0585/1483] indexer: catch another mysql exception Mysql can also raise error 1412 when we change a resource type. oslo_db.exception.DBError: (pymysql.err.InternalError) (1412, u'Table definition has changed, please retry transaction') This change catches it with a pymysql contants helper. Change-Id: Ib20c9e05f9977af4c261432b35ec51be6670b0e3 --- gnocchi/indexer/sqlalchemy.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 48960885..a5e4f058 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1050,8 +1050,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): e, sqlalchemy.exc.ProgrammingError) or not isinstance( e.orig, pymysql.err.ProgrammingError) - or (e.orig.args[0] - != pymysql.constants.ER.NO_SUCH_TABLE)): + or (e.orig.args[0] not in + [pymysql.constants.ER.NO_SUCH_TABLE, + pymysql.constants.ER.TABLE_DEF_CHANGED])): raise return all_resources -- GitLab From ac7ae99f7ef325563875c26656cf63790b341f82 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 16 Nov 2016 20:36:25 +0000 Subject: [PATCH 0586/1483] drop gnocchi__container object we just use filestore properties in gnocchi__container object. the gnocchi__none object can handle this as it's only being used for object content. Change-Id: Ia26638a007be2df5641b9828ce0a60065ee8a54a Closes-Bug: #1614965 --- gnocchi/storage/ceph.py | 42 ++++++++++++++++++++++++++++------------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index d4cc31a2..a887a85a 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -56,16 +56,29 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): lock = self._lock(metric.id) with lock: container = "gnocchi_%s_container" % metric.id + unagg_obj = self._build_unaggregated_timeserie_path(metric, 3) try: xattrs = tuple(k for k, v in self.ioctx.get_xattrs(container)) except rados.ObjectNotFound: + # this means already upgraded or some corruption? move on. pass else: + # if xattrs are found, it means we're coming from + # gnocchiv2. migrate to omap accordingly. + if xattrs: + keys = xattrs + # if no xattrs but object exists, it means it already + # migrated to v3 and now upgrade to use single object + else: + with rados.ReadOpCtx() as op: + omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1) + self.ioctx.operate_read_op(op, container) + keys = (k for k, __ in omaps) with rados.WriteOpCtx() as op: - self.ioctx.set_omap(op, xattrs, tuple([b""] * len(xattrs))) - self.ioctx.operate_write_op(op, container) - for xattr in xattrs: - self.ioctx.rm_xattr(container, xattr) + self.ioctx.set_omap(op, keys, + tuple([b""] * len(keys))) + self.ioctx.operate_write_op(op, unagg_obj) + self.ioctx.remove_object(container) super(CephStorage, self)._check_for_metric_upgrade(metric) @staticmethod @@ -83,11 +96,11 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): return False def _create_metric(self, metric): - name = "gnocchi_%s_container" % metric.id + name = self._build_unaggregated_timeserie_path(metric, 3) if self._object_exists(name): raise storage.MetricAlreadyExists(metric) else: - self.ioctx.write_full(name, b"metric created") + self.ioctx.write_full(name, b"") def _store_metric_measures(self, metric, timestamp_key, aggregation, granularity, data, offset=None, version=3): @@ -99,7 +112,8 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): self.ioctx.write(name, data, offset=offset) with rados.WriteOpCtx() as op: self.ioctx.set_omap(op, (name,), (b"",)) - self.ioctx.operate_write_op(op, "gnocchi_%s_container" % metric.id) + self.ioctx.operate_write_op( + op, self._build_unaggregated_timeserie_path(metric, 3)) def _delete_metric_measures(self, metric, timestamp_key, aggregation, granularity, version=3): @@ -107,7 +121,8 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): aggregation, granularity, version) with rados.WriteOpCtx() as op: self.ioctx.remove_omap_keys(op, (name,)) - self.ioctx.operate_write_op(op, "gnocchi_%s_container" % metric.id) + self.ioctx.operate_write_op( + op, self._build_unaggregated_timeserie_path(metric, 3)) self.ioctx.aio_remove(name) def _delete_metric(self, metric): @@ -115,15 +130,15 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1) try: self.ioctx.operate_read_op( - op, "gnocchi_%s_container" % metric.id) + op, self._build_unaggregated_timeserie_path(metric, 3)) except rados.ObjectNotFound: return if ret == errno.ENOENT: return for name, _ in omaps: self.ioctx.aio_remove(name) - for name in ('container', 'none'): - self.ioctx.aio_remove("gnocchi_%s_%s" % (metric.id, name)) + self.ioctx.aio_remove( + self._build_unaggregated_timeserie_path(metric, 3)) def _get_measures(self, metric, timestamp_key, aggregation, granularity, version=3): @@ -132,7 +147,8 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): aggregation, granularity, version) return self._get_object_content(name) except rados.ObjectNotFound: - if self._object_exists("gnocchi_%s_container" % metric.id): + if self._object_exists( + self._build_unaggregated_timeserie_path(metric, 3)): raise storage.AggregationDoesNotExist(metric, aggregation) else: raise storage.MetricDoesNotExist(metric) @@ -143,7 +159,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1) try: self.ioctx.operate_read_op( - op, "gnocchi_%s_container" % metric.id) + op, self._build_unaggregated_timeserie_path(metric, 3)) except rados.ObjectNotFound: raise storage.MetricDoesNotExist(metric) if ret == errno.ENOENT: -- GitLab From 451dbdad875cfe077de91ca5a04a52e240670986 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 23 Jan 2017 16:04:52 +0100 Subject: [PATCH 0587/1483] Add a release note about storage/incoming split Change-Id: I816efa2159de894b2c3976360a1ab9ccae91128f --- releasenotes/notes/storage-incoming-586b3e81de8deb4f.yaml | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 releasenotes/notes/storage-incoming-586b3e81de8deb4f.yaml diff --git a/releasenotes/notes/storage-incoming-586b3e81de8deb4f.yaml b/releasenotes/notes/storage-incoming-586b3e81de8deb4f.yaml new file mode 100644 index 00000000..f1d63bb6 --- /dev/null +++ b/releasenotes/notes/storage-incoming-586b3e81de8deb4f.yaml @@ -0,0 +1,6 @@ +--- +features: + - The storage of new measures that ought to be processed by *metricd* can now + be stored using different storage drivers. By default, the driver used is + still the regular storage driver configured. See the `[incoming]` section + in the configuration file. -- GitLab From f97a586419315e90e58444531954b73f089706a9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 23 Jan 2017 14:08:04 +0100 Subject: [PATCH 0588/1483] Update Pandas requirements to 0.18 After further testing, there's a few things that are not working with Pandas 0.17: - pandas.to_timestamp() wants bytes and not unicode strings, so there'a need to convert. This has been fixed in Pandas 0.18 - The Carbonara merge() method does not work with Pandas 0.17, and has been fixed in 0.18 (merge was introduced in c74c5c9289956f0aa73eed4146ca5f3f2f6407f2) Change-Id: I8d38c58b92c21c31e67ddbed446a90acb337e179 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6bbcbeeb..ea3d96a0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ oslo.policy>=0.3.0 oslo.serialization>=1.4.0 oslo.utils>=3.18.0 oslo.middleware>=3.22.0 -pandas>=0.17.0 +pandas>=0.18.0 scipy>=0.18.1 # BSD pecan>=0.9 futures -- GitLab From a7bc20c7a40d639ac25317101ef411f1fa4548c1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 23 Jan 2017 19:35:05 +0100 Subject: [PATCH 0589/1483] Revert "indexer: fix datetime with mysql >= 5.7.17" This reverts commit e1219e2997ab966b2a394e1524c2288ca04e0c9f. And does a smaller change and does not set useful default to not trigger the mysql issue Change-Id: I8e8e2ec01bc29e88e665f3f469a2241497312e82 --- .../5c4f93e5bb4_mysql_float_to_timestamp.py | 53 +++++++------------ gnocchi/indexer/sqlalchemy_base.py | 7 --- 2 files changed, 19 insertions(+), 41 deletions(-) diff --git a/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py b/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py index b2f72b44..824a3e93 100644 --- a/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py +++ b/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py @@ -36,40 +36,33 @@ branch_labels = None depends_on = None -timestamp_default = '1000-01-01 00:00:00.000000' - - def upgrade(): bind = op.get_bind() - - if bind.engine.name == "mysql": + if bind and bind.engine.name == "mysql": op.execute("SET time_zone = '+00:00'") + # NOTE(jd) So that crappy engine that is MySQL does not have "ALTER + # TABLE … USING …". We need to copy everything and convert… + for table_name, column_name in (("resource", "started_at"), + ("resource", "ended_at"), + ("resource", "revision_start"), + ("resource_history", "started_at"), + ("resource_history", "ended_at"), + ("resource_history", "revision_start"), + ("resource_history", "revision_end"), + ("resource_type", "updated_at")): - # NOTE(jd) So that crappy engine that is MySQL does not have "ALTER - # TABLE … USING …". We need to copy everything and convert… - for table_name, column_name in (("resource", "started_at"), - ("resource", "ended_at"), - ("resource", "revision_start"), - ("resource_history", "started_at"), - ("resource_history", "ended_at"), - ("resource_history", "revision_start"), - ("resource_history", "revision_end"), - ("resource_type", "updated_at")): + nullable = column_name == "ended_at" - nullable = column_name == "ended_at" - server_default = None if nullable else timestamp_default - - if bind.engine.name == "mysql": + existing_type = sa.types.DECIMAL( + precision=20, scale=6, asdecimal=True) existing_col = sa.Column( column_name, - sa.types.DECIMAL(precision=20, scale=6, asdecimal=True), + existing_type, nullable=nullable) temp_col = sa.Column( column_name + "_ts", sqlalchemy_base.TimestampUTC(), - nullable=nullable, - server_default=server_default, - ) + nullable=True) op.add_column(table_name, temp_col) t = sa.sql.table(table_name, existing_col, temp_col) op.execute(t.update().values( @@ -77,16 +70,8 @@ def upgrade(): op.drop_column(table_name, column_name) op.alter_column(table_name, column_name + "_ts", - existing_type=sqlalchemy_base.TimestampUTC(), + nullable=nullable, + type_=sqlalchemy_base.TimestampUTC(), existing_nullable=nullable, - existing_server_default=server_default, - server_default=server_default, + existing_type=existing_type, new_column_name=column_name) - else: - op.alter_column( - table_name, - column_name, - existing_type=sqlalchemy_base.TimestampUTC(), - existing_nullable=nullable, - existing_server_default=None, - server_default=None if nullable else timestamp_default) diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index 6170c23a..db1a1408 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -43,9 +43,6 @@ COMMON_TABLES_ARGS = {'mysql_charset': "utf8", 'mysql_engine': "InnoDB"} -timestamp_default = sqlalchemy.text("'1000-01-01 00:00:00.000000'") - - class PreciseTimestamp(types.TypeDecorator): """Represents a timestamp precise to the microsecond. @@ -287,7 +284,6 @@ class ResourceType(Base, GnocchiBase, resource_type.ResourceType): # MySQL is not a Timestamp, so it would # not store a timestamp but a date as an # integer. - server_default=timestamp_default, default=lambda: utils.utcnow()) def to_baseclass(self): @@ -335,10 +331,8 @@ class ResourceMixin(ResourceJsonifier): creator = sqlalchemy.Column(sqlalchemy.String(255)) started_at = sqlalchemy.Column(TimestampUTC, nullable=False, - server_default=timestamp_default, default=lambda: utils.utcnow()) revision_start = sqlalchemy.Column(TimestampUTC, nullable=False, - server_default=timestamp_default, default=lambda: utils.utcnow()) ended_at = sqlalchemy.Column(TimestampUTC) user_id = sqlalchemy.Column(sqlalchemy.String(255)) @@ -380,7 +374,6 @@ class ResourceHistory(ResourceMixin, Base, GnocchiBase): name="fk_rh_id_resource_id"), nullable=False) revision_end = sqlalchemy.Column(TimestampUTC, nullable=False, - server_default=timestamp_default, default=lambda: utils.utcnow()) metrics = sqlalchemy.orm.relationship( Metric, primaryjoin="Metric.resource_id == ResourceHistory.id", -- GitLab From f27ec588a8dc050fde35b20dca735ae560be42ab Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 19 Dec 2016 13:17:04 +0100 Subject: [PATCH 0590/1483] sqlalchemy: use a list rather than if/elif to convert type in queries Change-Id: I4d7ab97ddd383c8c18fb4c6cba1b531f37af36c4 --- gnocchi/indexer/sqlalchemy.py | 41 ++++++++++++++++------------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index a5e4f058..bcb037dc 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1128,6 +1128,14 @@ class QueryTransformer(object): u"∧": sqlalchemy.and_, } + converters = ( + (base.TimestampUTC, utils.to_datetime), + (sqlalchemy_utils.UUIDType, utils.ResourceUUID), + (types.String, six.text_type), + (types.Integer, int), + (types.Numeric, float), + ) + @classmethod def _handle_multiple_op(cls, engine, table, op, nodes): return op(*[ @@ -1184,28 +1192,17 @@ class QueryTransformer(object): # Convert value to the right type if value is not None: - converter = None - - if isinstance(attr.type, base.TimestampUTC): - converter = utils.to_datetime - elif isinstance(attr.type, sqlalchemy_utils.UUIDType): - converter = utils.ResourceUUID - elif isinstance(attr.type, types.String): - converter = six.text_type - elif isinstance(attr.type, types.Integer): - converter = int - elif isinstance(attr.type, types.Numeric): - converter = float - - if converter: - try: - if isinstance(value, list): - # we got a list for in_ operator - value = [converter(v) for v in value] - else: - value = converter(value) - except Exception: - raise indexer.QueryValueError(value, field_name) + for klass, converter in cls.converters: + if isinstance(attr.type, klass): + try: + if isinstance(value, list): + # we got a list for in_ operator + value = [converter(v) for v in value] + else: + value = converter(value) + except Exception: + raise indexer.QueryValueError(value, field_name) + break return op(attr, value) -- GitLab From 0a1b3742b6af756138ddb8fcebebbf2b4755eac7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 19 Dec 2016 18:06:41 +0100 Subject: [PATCH 0591/1483] rest,indexer: handle ResourceUUID conversion in the REST API This avoids leaking the conversion into the indexer entirely. Change-Id: Ifbbacfea9bd8d625b1f5a10aeaec4b41342949a3 --- gnocchi/indexer/sqlalchemy.py | 1 - gnocchi/rest/__init__.py | 7 ++++++- gnocchi/tests/test_indexer.py | 10 ---------- 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index bcb037dc..85645fb1 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1130,7 +1130,6 @@ class QueryTransformer(object): converters = ( (base.TimestampUTC, utils.to_datetime), - (sqlalchemy_utils.UUIDType, utils.ResourceUUID), (types.String, six.text_type), (types.Integer, int), (types.Numeric, float), diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 05514021..4e8e2f3d 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1159,7 +1159,12 @@ class SearchResourceTypeController(rest.RestController): u"!=", u"≠", u"ne", u"in", u"like", - ): voluptuous.All(voluptuous.Length(min=1, max=1), dict), + ): voluptuous.All( + voluptuous.Length(min=1, max=1), + voluptuous.Any( + {"id": voluptuous.Any( + utils.ResourceUUID, [utils.ResourceUUID]), + voluptuous.Extra: voluptuous.Extra})), voluptuous.Any( u"and", u"∨", u"or", u"∧", diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 80d0d0e6..4316eccb 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -767,16 +767,6 @@ class TestIndexerDriver(tests_base.TestCase): else: self.fail("Some resources were not found") - def test_list_resource_weird_uuid(self): - r = self.index.list_resources( - 'generic', attribute_filter={"=": {"id": "f00bar"}}) - self.assertEqual(0, len(r)) - self.assertRaises( - indexer.QueryValueError, - self.index.list_resources, - 'generic', - attribute_filter={"=": {"id": "f00bar" * 50}}) - def test_list_resource_attribute_type_numeric(self): """Test that we can pass an integer to filter on a string type.""" mgr = self.index.get_resource_type_schema() -- GitLab From 5f580161cb3e7b57582e026f7071567f74b8a2dc Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 6 Jan 2017 09:03:15 +0100 Subject: [PATCH 0592/1483] allow required=True when patching resource type We have currently two limitations about resource type update. * We cannot set required=True on a new attribute * We cannot update a attribute This change introduces the notion of 'options' for the description of a new attribute or an attribute to update. And implements the first item on top of this "option". The option added is 'fill' to known to fill all row of existing resources with a default value. And 'fill' is required if 'required' is set to allow new attribute with required=True. Change-Id: If0bd609ed586b6fbe4fe7877ece237e55baa7d45 --- doc/source/rest.yaml | 5 + gnocchi/indexer/sqlalchemy.py | 19 +- gnocchi/indexer/sqlalchemy_extension.py | 26 ++- gnocchi/resource_type.py | 60 +++++- gnocchi/rest/__init__.py | 10 +- .../tests/gabbi/gabbits/resource-type.yaml | 177 ++++++++++++++++++ gnocchi/tests/test_indexer.py | 7 +- ...-required-attributes-f446c220d54c8eb7.yaml | 6 + 8 files changed, 287 insertions(+), 23 deletions(-) create mode 100644 releasenotes/notes/resource-type-required-attributes-f446c220d54c8eb7.yaml diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 4d95c460..396576ee 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -455,6 +455,11 @@ "path": "/attributes/awesome-stuff", "value": {"type": "bool", "required": false} }, + { + "op": "add", + "path": "/attributes/required-stuff", + "value": {"type": "bool", "required": true, "options": {"fill": true}} + }, { "op": "remove", "path": "/attributes/prefix" diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index bcb037dc..cc2aab0b 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -397,6 +397,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): try: with self.facade.independent_writer() as session: + engine = session.connection() rt = self._get_resource_type(session, name) with self.facade.writer_connection() as connection: @@ -407,12 +408,22 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): for attr in del_attributes: batch_op.drop_column(attr) for attr in add_attributes: - # TODO(sileht): When attr.required is True, we - # have to pass a default. rest layer current - # protect us, requied = True is not yet allowed + server_default = attr.for_filling( + engine.dialect) batch_op.add_column(sqlalchemy.Column( attr.name, attr.satype, - nullable=not attr.required)) + nullable=not attr.required, + server_default=server_default)) + + # We have all rows filled now, we can remove + # the server_default + if server_default is not None: + batch_op.alter_column( + column_name=attr.name, + existing_type=attr.satype, + existing_server_default=server_default, + existing_nullable=not attr.required, + server_default=None) rt.state = "active" rt.updated_at = utils.utcnow() diff --git a/gnocchi/indexer/sqlalchemy_extension.py b/gnocchi/indexer/sqlalchemy_extension.py index 058d31b2..bc4d8418 100644 --- a/gnocchi/indexer/sqlalchemy_extension.py +++ b/gnocchi/indexer/sqlalchemy_extension.py @@ -20,19 +20,37 @@ import sqlalchemy_utils from gnocchi import resource_type -class StringSchema(resource_type.StringSchema): +class SchemaMixin(object): + def for_filling(self, dialect): + # NOTE(sileht): This must be used only for patching resource type + # to fill all row with a default value and then switch back the + # server_default to None + if self.fill is None: + return None + + # NOTE(sileht): server_default must be converted in sql element + return sqlalchemy.literal(self.fill) + + +class StringSchema(resource_type.StringSchema, SchemaMixin): @property def satype(self): return sqlalchemy.String(self.max_length) -class UUIDSchema(resource_type.UUIDSchema): +class UUIDSchema(resource_type.UUIDSchema, SchemaMixin): satype = sqlalchemy_utils.UUIDType() + def for_filling(self, dialect): + if self.fill is None: + return False # Don't set any server_default + return sqlalchemy.literal( + self.satype.process_bind_param(self.fill, dialect)) + -class NumberSchema(resource_type.NumberSchema): +class NumberSchema(resource_type.NumberSchema, SchemaMixin): satype = sqlalchemy.Float(53) -class BoolSchema(resource_type.BoolSchema): +class BoolSchema(resource_type.BoolSchema, SchemaMixin): satype = sqlalchemy.Boolean diff --git a/gnocchi/resource_type.py b/gnocchi/resource_type.py index ad1bcddb..73b75564 100644 --- a/gnocchi/resource_type.py +++ b/gnocchi/resource_type.py @@ -55,24 +55,63 @@ class InvalidResourceAttributeValue(InvalidResourceAttribute): self.max = max +class InvalidResourceAttributeOption(InvalidResourceAttribute): + """Error raised when the resource attribute name is invalid.""" + def __init__(self, name, option, reason): + super(InvalidResourceAttributeOption, self).__init__( + "Option '%s' of resource attribute %s is invalid: %s" % + (option, str(name), str(reason))) + self.name = name + self.option = option + self.reason = reason + + +# NOTE(sileht): This is to store the behavior of some operations: +# * fill, to set a default value to all existing resource type +# +# in the future for example, we can allow to change the length of +# a string attribute, if the new one is shorter, we can add a option +# to define the behavior like: +# * resize = trunc or reject +OperationOptions = { + voluptuous.Optional('fill'): object +} + + class CommonAttributeSchema(object): meta_schema_ext = {} schema_ext = None - def __init__(self, type, name, required): + def __init__(self, type, name, required, options=None): if (len(name) > 63 or name in INVALID_NAMES or not VALID_CHARS.match(name)): raise InvalidResourceAttributeName(name) self.name = name self.required = required + self.fill = None + + # options is set only when we update a resource type + if options is not None: + fill = options.get("fill") + if fill is None and required: + raise InvalidResourceAttributeOption( + name, "fill", "must not be empty if required=True") + elif fill is not None: + # Ensure fill have the correct attribute type + try: + self.fill = voluptuous.Schema(self.schema_ext)(fill) + except voluptuous.Error as e: + raise InvalidResourceAttributeOption(name, "fill", e) @classmethod - def meta_schema(cls): + def meta_schema(cls, for_update=False): d = { voluptuous.Required('type'): cls.typename, voluptuous.Required('required', default=True): bool } + if for_update: + d[voluptuous.Required('options', default={})] = OperationOptions if callable(cls.meta_schema_ext): d.update(cls.meta_schema_ext()) else: @@ -94,12 +133,12 @@ class StringSchema(CommonAttributeSchema): typename = "string" def __init__(self, min_length, max_length, *args, **kwargs): - super(StringSchema, self).__init__(*args, **kwargs) if min_length > max_length: raise InvalidResourceAttributeValue(min_length, max_length) self.min_length = min_length self.max_length = max_length + super(StringSchema, self).__init__(*args, **kwargs) meta_schema_ext = { voluptuous.Required('min_length', default=0): @@ -131,12 +170,11 @@ class NumberSchema(CommonAttributeSchema): typename = "number" def __init__(self, min, max, *args, **kwargs): - super(NumberSchema, self).__init__(*args, **kwargs) if max is not None and min is not None and min > max: raise InvalidResourceAttributeValue(min, max) - self.min = min self.max = max + super(NumberSchema, self).__init__(*args, **kwargs) meta_schema_ext = { voluptuous.Required('min', default=None): voluptuous.Any( @@ -182,9 +220,21 @@ class ResourceTypeSchemaManager(stevedore.ExtensionManager): } }) + type_schemas = tuple([ext.plugin.meta_schema(for_update=True) + for ext in self.extensions]) + self._schema_for_update = voluptuous.Schema({ + "name": six.text_type, + voluptuous.Required("attributes", default={}): { + six.text_type: voluptuous.Any(*tuple(type_schemas)) + } + }) + def __call__(self, definition): return self._schema(definition) + def for_update(self, definition): + return self._schema_for_update(definition) + def attributes_from_dict(self, attributes): return ResourceTypeAttributes( self[attr["type"]].plugin(name=name, **attr) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 05514021..5204fae9 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -755,7 +755,7 @@ class ResourceTypeController(rest.RestController): # Validate that the whole new resource_type is valid schema = pecan.request.indexer.get_resource_type_schema() try: - rt_json_next = voluptuous.Schema(schema, required=True)( + rt_json_next = voluptuous.Schema(schema.for_update, required=True)( rt_json_next) except voluptuous.Error as e: abort(400, "Invalid input: %s" % e) @@ -776,14 +776,6 @@ class ResourceTypeController(rest.RestController): except resource_type.InvalidResourceAttribute as e: abort(400, "Invalid input: %s" % e) - # TODO(sileht): Add a default field on an attribute - # to be able to fill non-nullable column on sql side. - # And obviousy remove this limitation - for attr in add_attrs: - if attr.required: - abort(400, ValueError("Adding required attributes is not yet " - "possible.")) - try: return pecan.request.indexer.update_resource_type( self._name, add_attributes=add_attrs, diff --git a/gnocchi/tests/gabbi/gabbits/resource-type.yaml b/gnocchi/tests/gabbi/gabbits/resource-type.yaml index 786cf27e..b41326be 100644 --- a/gnocchi/tests/gabbi/gabbits/resource-type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource-type.yaml @@ -348,6 +348,47 @@ tests: required: False min_length: 0 max_length: 255 + - op: add + path: /attributes/newfilled + value: + type: string + required: False + min_length: 0 + max_length: 255 + options: + fill: "filled" + - op: add + path: /attributes/newbool + value: + type: bool + required: True + options: + fill: True + - op: add + path: /attributes/newint + value: + type: number + required: True + min: 0 + max: 255 + options: + fill: 15 + - op: add + path: /attributes/newstring + value: + type: string + required: True + min_length: 0 + max_length: 255 + options: + fill: "foobar" + - op: add + path: /attributes/newuuid + value: + type: uuid + required: True + options: + fill: "00000000-0000-0000-0000-000000000000" - op: remove path: /attributes/foobar status: 200 @@ -385,6 +426,62 @@ tests: required: False min_length: 0 max_length: 255 + newfilled: + type: string + required: False + min_length: 0 + max_length: 255 + newstring: + type: string + required: True + min_length: 0 + max_length: 255 + newbool: + type: bool + required: True + newint: + type: number + required: True + min: 0 + max: 255 + newuuid: + type: uuid + required: True + + - name: post a new resource attribute with missing fill + url: /v1/resource_type/my_custom_resource + method: patch + request_headers: + x-roles: admin + content-type: application/json-patch+json + data: + - op: add + path: /attributes/missing + value: + type: bool + required: True + options: {} + status: 400 + response_strings: + - "Invalid input: Option 'fill' of resource attribute missing is invalid: must not be empty if required=True" + + - name: post a new resource attribute with incorrect fill + url: /v1/resource_type/my_custom_resource + method: patch + request_headers: + x-roles: admin + content-type: application/json-patch+json + data: + - op: add + path: /attributes/incorrect + value: + type: number + required: True + options: + fill: "a-string" + status: 400 + response_strings: + - "Invalid input: Option 'fill' of resource attribute incorrect is invalid: expected Real" - name: get the new custom resource type url: /v1/resource_type/my_custom_resource @@ -422,6 +519,65 @@ tests: required: False min_length: 0 max_length: 255 + newfilled: + type: string + required: False + min_length: 0 + max_length: 255 + newstring: + type: string + required: True + min_length: 0 + max_length: 255 + newbool: + type: bool + required: True + newint: + type: number + required: True + min: 0 + max: 255 + newuuid: + type: uuid + required: True + + - name: control new attributes of existing resource + GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 + request_headers: + content-type: application/json + status: 200 + response_json_paths: + $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747 + $.name: foo + $.newstuff: null + $.newfilled: "filled" + $.newbool: true + $.newint: 15 + $.newstring: foobar + $.newuuid: "00000000-0000-0000-0000-000000000000" + + - name: control new attributes of existing resource history + GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast + request_headers: + content-type: application/json + response_json_paths: + $.`len`: 2 + $[0].id: d11edfca-4393-4fda-b94d-b05a3a1b3747 + $[0].name: bar + $[0].newstuff: null + $[0].newfilled: "filled" + $[0].newbool: true + $[0].newint: 15 + $[0].newstring: foobar + $[0].newuuid: "00000000-0000-0000-0000-000000000000" + $[1].id: d11edfca-4393-4fda-b94d-b05a3a1b3747 + $[1].name: foo + $[1].newstuff: null + $[1].newfilled: "filled" + $[1].newbool: true + $[1].newint: 15 + $[1].newstring: foobar + $[1].newuuid: "00000000-0000-0000-0000-000000000000" # Invalid patch @@ -476,6 +632,27 @@ tests: required: False min_length: 0 max_length: 255 + newfilled: + type: string + required: False + min_length: 0 + max_length: 255 + newstring: + type: string + required: True + min_length: 0 + max_length: 255 + newbool: + type: bool + required: True + newint: + type: number + required: True + min: 0 + max: 255 + newuuid: + type: uuid + required: True - name: delete/add the same resource attribute url: /v1/resource_type/my_custom_resource diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 80d0d0e6..a87f59dc 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -1120,7 +1120,9 @@ class TestIndexerDriver(tests_base.TestCase): # Update the resource type add_attrs = mgr.resource_type_from_dict("indexer_test", { "col2": {"type": "number", "required": False, - "max": 100, "min": 0} + "max": 100, "min": 0}, + "col3": {"type": "number", "required": True, + "max": 100, "min": 0, "options": {'fill': 15}} }, "creating").attributes self.index.update_resource_type("indexer_test", add_attributes=add_attrs) @@ -1128,6 +1130,7 @@ class TestIndexerDriver(tests_base.TestCase): # Check the new attribute r = self.index.get_resource("indexer_test", rid) self.assertIsNone(r.col2) + self.assertEqual(15, r.col3) self.index.update_resource("indexer_test", rid, col2=10) @@ -1139,6 +1142,8 @@ class TestIndexerDriver(tests_base.TestCase): self.assertEqual(2, len(rl)) self.assertIsNone(rl[0].col2) self.assertEqual(10, rl[1].col2) + self.assertEqual(15, rl[0].col3) + self.assertEqual(15, rl[1].col3) # Deletion self.assertRaises(indexer.ResourceTypeInUse, diff --git a/releasenotes/notes/resource-type-required-attributes-f446c220d54c8eb7.yaml b/releasenotes/notes/resource-type-required-attributes-f446c220d54c8eb7.yaml new file mode 100644 index 00000000..a91c8176 --- /dev/null +++ b/releasenotes/notes/resource-type-required-attributes-f446c220d54c8eb7.yaml @@ -0,0 +1,6 @@ +--- +features: + - When updating a resource attribute, it's now possible to pass the option + 'fill' for each attribute to fill existing resources. + - required=True is now supported when updating resource type. This requires + the option 'fill' to be set. -- GitLab From 5b974f3e9e7a26d51bfb1a893f783fdf3d6a6621 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 25 Jan 2017 15:55:36 +0100 Subject: [PATCH 0593/1483] Required some configuration options When IntOpt is used with a default, and the user put workers = the value retreived is None. If we want oslo.config to bail out if the user override the default with None and doesn't provide a 'int', we must set required=True. Change-Id: I1890ae4a156ecf0e432c96e08a24bfba91739190 --- gnocchi/opts.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index d2acd06e..ffe2c222 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -60,21 +60,25 @@ def list_opts(): ("indexer", gnocchi.indexer.OPTS), ("metricd", ( cfg.IntOpt('workers', min=1, + required=True, help='Number of workers for Gnocchi metric daemons. ' 'By default the available number of CPU is used.'), cfg.IntOpt('metric_processing_delay', default=60, + required=True, deprecated_group='storage', help="How many seconds to wait between " "scheduling new metrics to process"), cfg.IntOpt('metric_reporting_delay', deprecated_group='storage', default=120, + required=True, help="How many seconds to wait between " "metric ingestion reporting"), cfg.IntOpt('metric_cleanup_delay', deprecated_group='storage', default=300, + required=True, help="How many seconds to wait between " "cleaning of expired data"), )), @@ -93,6 +97,7 @@ def list_opts(): help='Authentication mode to use.'), cfg.IntOpt('max_limit', default=1000, + required=True, help=('The maximum number of items returned in a ' 'single response from a collection resource')), )), -- GitLab From 05ad54862f12a967ecee9fbdd55214cba8f54bca Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 25 Jan 2017 21:15:05 +0100 Subject: [PATCH 0594/1483] tests: clean upgrade variable These variable are actually overwritten by pifpaf (currently). As pifpaf will likely remove these variable in the future (since the default is now basic), let's set them and use the actually defined value by pifpaf. Change-Id: I9f157e9c74377091747b5ae699048e84ca1283c6 --- run-upgrade-tests.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index ae021e82..2fe0f7aa 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -1,10 +1,6 @@ #!/bin/bash set -e -export OS_AUTH_TYPE=gnocchi-noauth -export GNOCCHI_ENDPOINT=http://localhost:8041 -export GNOCCHI_USER_ID=99aae-4dc2-4fbc-b5b8-9688c470d9cc -export GNOCCHI_PROJECT_ID=c8d27445-48af-457c-8e0d-1de7103eae1f export GNOCCHI_DATA=$(mktemp -d -t gnocchi.XXXX) GDATE=$((which gdate >/dev/null && echo gdate) || echo date) @@ -84,6 +80,10 @@ else fi eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) +# Override default to be sure to use noauth +export OS_AUTH_TYPE=gnocchi-noauth +export GNOCCHI_USER_ID=admin +export GNOCCHI_PROJECT_ID=admin gnocchi resource delete $GNOCCHI_STATSD_RESOURCE_ID inject_data $GNOCCHI_DATA dump_data $GNOCCHI_DATA/old -- GitLab From c2c8ceff45b0c6ad67a296d5f810e8b9b7cb9058 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 25 Jan 2017 21:16:50 +0100 Subject: [PATCH 0595/1483] doc: move devstack at the end This is not what users are looking for first. Change-Id: Ib2c43b6eef3ec7ac1534df99b35e39de9a30ad9b --- doc/source/install.rst | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index 056e0677..da923879 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -2,26 +2,6 @@ Installation ============== -Installation Using Devstack -=========================== - -To enable Gnocchi in devstack, add the following to local.conf: - -:: - - enable_plugin gnocchi https://github.com/openstack/gnocchi master - -To enable Grafana support in devstack, you can also enable `gnocchi-grafana`:: - - enable_service gnocchi-grafana - -Then, you can start devstack: - -:: - - ./stack.sh - - .. _installation: Installation @@ -113,3 +93,23 @@ that your indexer and storage are properly upgraded. Run the following: 5. Start the new Gnocchi API server, `gnocchi-metricd` and `gnocchi-statsd` daemons + + +Installation Using Devstack +=========================== + +To enable Gnocchi in devstack, add the following to local.conf: + +:: + + enable_plugin gnocchi https://github.com/openstack/gnocchi master + +To enable Grafana support in devstack, you can also enable `gnocchi-grafana`:: + + enable_service gnocchi-grafana + +Then, you can start devstack: + +:: + + ./stack.sh -- GitLab From 60ae8fa95ce8f2536946755e512befb07c929696 Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 6 Jan 2017 19:41:44 +0000 Subject: [PATCH 0596/1483] cleanup noauth path - we don't need to set keystone_authtoken values - setting OS_AUTH_TYPE breaks all other clients since it's global so just leave it and let user set to client specifically. this will not make integration gate work with noauth as there are other commands which require gnocchi to be registered in catalog Change-Id: I3f451a40cdda12072daa69744467403236f633a7 --- devstack/plugin.sh | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index f271aa62..4cced8d4 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -242,9 +242,6 @@ function configure_gnocchi { iniset $GNOCCHI_CONF storage coordination_url "$GNOCCHI_COORDINATOR_URL" fi - # Configure auth token middleware - configure_auth_token_middleware $GNOCCHI_CONF gnocchi $GNOCCHI_AUTH_CACHE_DIR - if is_service_enabled gnocchi-statsd ; then iniset $GNOCCHI_CONF statsd resource_id $GNOCCHI_STATSD_RESOURCE_ID iniset $GNOCCHI_CONF statsd project_id $GNOCCHI_STATSD_PROJECT_ID @@ -272,6 +269,8 @@ function configure_gnocchi { fi if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] ; then + # Configure auth token middleware + configure_auth_token_middleware $GNOCCHI_CONF gnocchi $GNOCCHI_AUTH_CACHE_DIR iniset $GNOCCHI_CONF api auth_mode keystone if is_service_enabled gnocchi-grafana; then iniset $GNOCCHI_CONF cors allowed_origin ${GRAFANA_URL} @@ -433,14 +432,6 @@ function start_gnocchi { fi fi - # Create a default policy - if [ "$GNOCCHI_USE_KEYSTONE" == "False" ]; then - export OS_AUTH_TYPE=gnocchi-noauth - export GNOCCHI_USER_ID=`uuidgen` - export GNOCCHI_PROJECT_ID=`uuidgen` - export GNOCCHI_ENDPOINT="$(gnocchi_service_url)" - fi - # run metricd last so we are properly waiting for swift and friends run_process gnocchi-metricd "$GNOCCHI_BIN_DIR/gnocchi-metricd -d -v --config-file $GNOCCHI_CONF" run_process gnocchi-statsd "$GNOCCHI_BIN_DIR/gnocchi-statsd -d -v --config-file $GNOCCHI_CONF" -- GitLab From 2bc624f1eba9c065c4cb0891809de33663440fd3 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 24 Jan 2017 15:53:24 +0100 Subject: [PATCH 0597/1483] noauth: force user to be provided Change-Id: If92800ed5162624340a409ee6c06cf2eb8915ba9 --- gnocchi/rest/auth_helper.py | 1 + gnocchi/tests/gabbi/gabbits/aggregation.yaml | 5 +++++ gnocchi/tests/gabbi/gabbits/base.yaml | 5 +++++ gnocchi/tests/gabbi/gabbits/batch-measures.yaml | 5 +++++ gnocchi/tests/gabbi/gabbits/metric-granularity.yaml | 5 +++++ gnocchi/tests/gabbi/gabbits/metric-timestamp-format.yaml | 5 +++++ gnocchi/tests/gabbi/gabbits/metric.yaml | 5 +++++ gnocchi/tests/gabbi/gabbits/search-metric.yaml | 5 +++++ gnocchi/tests/gabbi/gabbits/search.yaml | 5 +++++ .../notes/noauth-force-headers-dda926ce83f810e8.yaml | 5 +++++ 10 files changed, 46 insertions(+) create mode 100644 releasenotes/notes/noauth-force-headers-dda926ce83f810e8.yaml diff --git a/gnocchi/rest/auth_helper.py b/gnocchi/rest/auth_helper.py index c173c8de..46c0893c 100644 --- a/gnocchi/rest/auth_helper.py +++ b/gnocchi/rest/auth_helper.py @@ -98,6 +98,7 @@ class NoAuthHelper(KeystoneAuthHelper): return user_id if project_id: return project_id + rest.abort(401, "Unable to determine current user") class BasicAuthHelper(object): diff --git a/gnocchi/tests/gabbi/gabbits/aggregation.yaml b/gnocchi/tests/gabbi/gabbits/aggregation.yaml index 71a237f8..9c3b871e 100644 --- a/gnocchi/tests/gabbi/gabbits/aggregation.yaml +++ b/gnocchi/tests/gabbi/gabbits/aggregation.yaml @@ -1,6 +1,11 @@ fixtures: - ConfigFixture +defaults: + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + tests: - name: create archive policy desc: for later use diff --git a/gnocchi/tests/gabbi/gabbits/base.yaml b/gnocchi/tests/gabbi/gabbits/base.yaml index 5410524f..eeb3ed9f 100644 --- a/gnocchi/tests/gabbi/gabbits/base.yaml +++ b/gnocchi/tests/gabbi/gabbits/base.yaml @@ -1,6 +1,11 @@ fixtures: - ConfigFixture +defaults: + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + tests: - name: get information on APIs diff --git a/gnocchi/tests/gabbi/gabbits/batch-measures.yaml b/gnocchi/tests/gabbi/gabbits/batch-measures.yaml index 6e9812ea..6cc3710c 100644 --- a/gnocchi/tests/gabbi/gabbits/batch-measures.yaml +++ b/gnocchi/tests/gabbi/gabbits/batch-measures.yaml @@ -1,6 +1,11 @@ fixtures: - ConfigFixture +defaults: + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + tests: - name: create archive policy desc: for later use diff --git a/gnocchi/tests/gabbi/gabbits/metric-granularity.yaml b/gnocchi/tests/gabbi/gabbits/metric-granularity.yaml index c6de61d3..d67e548f 100644 --- a/gnocchi/tests/gabbi/gabbits/metric-granularity.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric-granularity.yaml @@ -1,6 +1,11 @@ fixtures: - ConfigFixture +defaults: + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + tests: - name: create archive policy desc: for later use diff --git a/gnocchi/tests/gabbi/gabbits/metric-timestamp-format.yaml b/gnocchi/tests/gabbi/gabbits/metric-timestamp-format.yaml index 7f212071..f4522880 100644 --- a/gnocchi/tests/gabbi/gabbits/metric-timestamp-format.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric-timestamp-format.yaml @@ -1,6 +1,11 @@ fixtures: - ConfigFixture +defaults: + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + tests: - name: create archive policy desc: for later use diff --git a/gnocchi/tests/gabbi/gabbits/metric.yaml b/gnocchi/tests/gabbi/gabbits/metric.yaml index 5b596a42..c27600f3 100644 --- a/gnocchi/tests/gabbi/gabbits/metric.yaml +++ b/gnocchi/tests/gabbi/gabbits/metric.yaml @@ -1,6 +1,11 @@ fixtures: - ConfigFixture +defaults: + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + tests: - name: wrong metric desc: https://bugs.launchpad.net/gnocchi/+bug/1429949 diff --git a/gnocchi/tests/gabbi/gabbits/search-metric.yaml b/gnocchi/tests/gabbi/gabbits/search-metric.yaml index 13492971..ae93637c 100644 --- a/gnocchi/tests/gabbi/gabbits/search-metric.yaml +++ b/gnocchi/tests/gabbi/gabbits/search-metric.yaml @@ -6,6 +6,11 @@ fixtures: - ConfigFixture +defaults: + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + tests: - name: create archive policy desc: for later use diff --git a/gnocchi/tests/gabbi/gabbits/search.yaml b/gnocchi/tests/gabbi/gabbits/search.yaml index d3c3a3d1..c8f9bc2d 100644 --- a/gnocchi/tests/gabbi/gabbits/search.yaml +++ b/gnocchi/tests/gabbi/gabbits/search.yaml @@ -6,6 +6,11 @@ fixtures: - ConfigFixture +defaults: + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + tests: - name: typo of search GET: /v1/search/notexists diff --git a/releasenotes/notes/noauth-force-headers-dda926ce83f810e8.yaml b/releasenotes/notes/noauth-force-headers-dda926ce83f810e8.yaml new file mode 100644 index 00000000..004ef170 --- /dev/null +++ b/releasenotes/notes/noauth-force-headers-dda926ce83f810e8.yaml @@ -0,0 +1,5 @@ +--- +other: + - >- + The `noauth` authentication mode now requires that the `X-User-Id` and/or + `X-Project-Id` to be present. -- GitLab From e011cdd731a27b677ccf6a9103018da3c6d44e00 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 26 Jan 2017 10:10:14 +0100 Subject: [PATCH 0598/1483] storage: Read incoming config correctly This change reads the incoming config from the incoming section instead of the storage section. Change-Id: Ica245fa9accd7a33750725ca7e981e9fd25ff192 --- gnocchi/opts.py | 6 +++--- gnocchi/storage/__init__.py | 8 +++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index d2acd06e..edd03780 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -44,7 +44,8 @@ class CustomStrSubWrapper(cfg.ConfigOpts.StrSubWrapper): cfg.ConfigOpts.StrSubWrapper = CustomStrSubWrapper -_STORAGE_OPTS = list(itertools.chain(gnocchi.storage.ceph.OPTS, +_STORAGE_OPTS = list(itertools.chain(gnocchi.storage.OPTS, + gnocchi.storage.ceph.OPTS, gnocchi.storage.file.OPTS, gnocchi.storage.swift.OPTS, gnocchi.storage.s3.OPTS)) @@ -96,8 +97,7 @@ def list_opts(): help=('The maximum number of items returned in a ' 'single response from a collection resource')), )), - ("storage", (_STORAGE_OPTS + gnocchi.storage._carbonara.OPTS + - gnocchi.storage.OPTS)), + ("storage", (_STORAGE_OPTS + gnocchi.storage._carbonara.OPTS)), ("incoming", _INCOMING_OPTS), ("statsd", ( cfg.StrOpt('host', diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 958f0065..8b3f7f9f 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -135,13 +135,15 @@ def get_driver_class(namespace, conf): :param conf: The conf to use to determine the driver. """ return driver.DriverManager(namespace, - conf.storage.driver).driver + conf.driver).driver def get_driver(conf): """Return the configured driver.""" - incoming = get_driver_class('gnocchi.incoming', conf)(conf.incoming) - return get_driver_class('gnocchi.storage', conf)(conf.storage, incoming) + incoming = get_driver_class('gnocchi.incoming', conf.incoming)( + conf.incoming) + return get_driver_class('gnocchi.storage', conf.storage)( + conf.storage, incoming) class StorageDriver(object): -- GitLab From e87710d23f3e7ec0e2de84608596b2ab1fd486f4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 25 Jan 2017 16:21:40 +0100 Subject: [PATCH 0599/1483] tests: use GNU sed where needed Change-Id: I8afa162938ef4ad195346f4e94563192ffd5c52b --- run-upgrade-tests.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index ae021e82..f772a2bc 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -8,6 +8,7 @@ export GNOCCHI_PROJECT_ID=c8d27445-48af-457c-8e0d-1de7103eae1f export GNOCCHI_DATA=$(mktemp -d -t gnocchi.XXXX) GDATE=$((which gdate >/dev/null && echo gdate) || echo date) +GSED=$((which gsed >/dev/null && echo gsed) || echo sed) old_version=$(pip freeze | sed -n '/gnocchi==/s/.*==\(.*\)/\1/p') [ "${old_version:0:1}" == "3" ] && have_resource_type_post=1 @@ -110,8 +111,8 @@ RESOURCE_IDS=( dump_data $GNOCCHI_DATA/new # NOTE(sileht): change the output of the old gnocchi to compare with the new without '/' -sed -i -e "s,5a301761/dddd/46e2/8900/8b4f6fe6675a,5a301761_dddd_46e2_8900_8b4f6fe6675a,g" \ - -e "s,19235bb9-35ca-5f55-b7db-165cfb033c86,fe1bdabf-d94c-5b3a-af1e-06bdff53f228,g" $GNOCCHI_DATA/old/resources.list +$GSED -i -e "s,5a301761/dddd/46e2/8900/8b4f6fe6675a,5a301761_dddd_46e2_8900_8b4f6fe6675a,g" \ + -e "s,19235bb9-35ca-5f55-b7db-165cfb033c86,fe1bdabf-d94c-5b3a-af1e-06bdff53f228,g" $GNOCCHI_DATA/old/resources.list echo "* Checking output difference between Gnocchi $old_version and $new_version" diff -uNr $GNOCCHI_DATA/old $GNOCCHI_DATA/new -- GitLab From 7d602de2174f99d448626f5324a50b2987e40fb8 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 26 Jan 2017 14:37:31 +0100 Subject: [PATCH 0600/1483] carbonara: numpy 1.9 have some bug with ? for bool 2017-01-26 13:30:07.205 414713 ERROR gnocchi.storage._carbonara File "/usr/lib/python2.7/site-packages/gnocchi/carbonara.py", line 629, in serialize 2017-01-26 13:30:07.205 414713 ERROR gnocchi.storage._carbonara (ones, values), names='b, v', formats=' Date: Thu, 26 Jan 2017 20:50:29 +0100 Subject: [PATCH 0601/1483] Revert "indexer: catch another mysql exception" This reverts commit 23614016944b760025beb66faef4d3ce4a1862ad. This fix is not the correct one. This kind of backtrace actually happens: Traceback (most recent call last): File "gnocchi/tests/base.py", line 55, in skip_if_not_implemented return func(*args, **kwargs) File "gnocchi/tests/test_indexer.py", line 814, in test_list_resources_without_history details=True) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/oslo_db/api.py", line 151, in wrapper ectxt.value = e.inner_exc File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/oslo_utils/excutils.py", line 220, in __exit__ self.force_reraise() File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/oslo_utils/excutils.py", line 196, in force_reraise six.reraise(self.type_, self.value, self.tb) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/oslo_db/api.py", line 139, in wrapper return f(*args, **kwargs) File "gnocchi/indexer/sqlalchemy.py", line 1050, in list_resources all_resources.extend(q.all()) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2638, in all return list(self) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2790, in __iter__ return self._execute_and_instances(context) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2813, in _execute_and_instances result = conn.execute(querycontext.statement, self._params) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 945, in execute return meth(self, multiparams, params) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/sqlalchemy/sql/elements.py", line 263, in _execute_on_connection return connection._execute_clauseelement(self, multiparams, params) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1053, in _execute_clauseelement compiled_sql, distilled_params File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1189, in _execute_context context) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1389, in _handle_dbapi_exception util.raise_from_cause(newraise, exc_info) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause reraise(type(exception), exception, tb=exc_tb, cause=cause) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context context) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/sqlalchemy/engine/default.py", line 470, in do_execute cursor.execute(statement, parameters) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/pymysql/cursors.py", line 166, in execute result = self._query(query) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/pymysql/cursors.py", line 322, in _query conn.query(q) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/pymysql/connections.py", line 835, in query self._affected_rows = self._read_query_result(unbuffered=unbuffered) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/pymysql/connections.py", line 1019, in _read_query_result result.read() File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/pymysql/connections.py", line 1302, in read first_packet = self.connection._read_packet() File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/pymysql/connections.py", line 981, in _read_packet packet.check_error() File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/pymysql/connections.py", line 393, in check_error err.raise_mysql_exception(self._data) File "/home/jenkins/workspace/gate-gnocchi-tox-db-py27-mysql-ubuntu-xenial/.tox/py27-mysql/local/lib/python2.7/site-packages/pymysql/err.py", line 107, in raise_mysql_exception raise errorclass(errno, errval) oslo_db.exception.DBError: (pymysql.err.InternalError) (1412, u'Table definition has changed, please retry transaction') [SQL: u'SELECT resource.creator AS resource_creator, resource.started_at AS resource_started_at, resource.revision_start AS resource_revision_start, resource.ended_at AS resource_ended_at, resource.user_id AS resource_user_id, resource.project_id AS resource_project_id, resource.original_resource_id AS resource_original_resource_id, rt_19d8ff7b16a44b85b5f274521cf609f2.id AS rt_19d8ff7b16a44b85b5f274521cf609f2_id, resource.id AS resource_id, resource.type AS resource_type, rt_19d8ff7b16a44b85b5f274521cf609f2.col1 AS rt_19d8ff7b16a44b85b5f274521cf609f2_col1, archive_policy_1.name AS archive_policy_1_name, archive_policy_1.back_window AS archive_policy_1_back_window, archive_policy_1.definition AS archive_policy_1_definition, archive_policy_1.aggregation_methods AS archive_policy_1_aggregation_methods, metric_1.id AS metric_1_id, metric_1.archive_policy_name AS metric_1_archive_policy_name, metric_1.creator AS metric_1_creator, metric_1.resource_id AS metric_1_resource_id, metric_1.name AS metric_1_name, metric_1.unit AS metric_1_unit, metric_1.status AS metric_1_status \nFROM resource INNER JOIN rt_19d8ff7b16a44b85b5f274521cf609f2 ON resource.id = rt_19d8ff7b16a44b85b5f274521cf609f2.id LEFT OUTER JOIN metric AS metric_1 ON resource.id = metric_1.resource_id AND metric_1.status = %(status_1)s LEFT OUTER JOIN archive_policy AS archive_policy_1 ON archive_policy_1.name = metric_1.archive_policy_name \nWHERE rt_19d8ff7b16a44b85b5f274521cf609f2.id IN (%(id_1)s)'] [parameters: {u'id_1': bytearray(b'hG\xd3J\xeadOM\xackb\x9fKb\xe4s'), u'status_1': 'active'}] which is not a ProgrammingError and which means we need a global decorator to catch it. Change-Id: I65fdfba31c0c282e23190ceb3daadbaa2a0c0dbf --- gnocchi/indexer/sqlalchemy.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index a7dbb7c5..513a9fe3 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1061,9 +1061,8 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): e, sqlalchemy.exc.ProgrammingError) or not isinstance( e.orig, pymysql.err.ProgrammingError) - or (e.orig.args[0] not in - [pymysql.constants.ER.NO_SUCH_TABLE, - pymysql.constants.ER.TABLE_DEF_CHANGED])): + or (e.orig.args[0] + != pymysql.constants.ER.NO_SUCH_TABLE)): raise return all_resources -- GitLab From fc0e38c1dea9894e56bb7995f44cf29c138db034 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 26 Jan 2017 22:19:51 +0100 Subject: [PATCH 0602/1483] mysql: retry on table changes As seen in the backtrace from 0d268f5df8a5b82f3bfe1128a5043131da956bb9, MySQL now wants us to retry if the table definition changes. Change-Id: Ib8552529e73cee4ea92c151bbe3b9879e93a3a7c Signed-off-by: Julien Danjou --- gnocchi/indexer/sqlalchemy.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index a7dbb7c5..89858482 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -61,11 +61,23 @@ _marker = indexer._marker LOG = log.getLogger(__name__) +def _retry_on_exceptions(exc): + return ( + pymysql and + isinstance(exc, exception.DBError) and + isinstance(exc.inner_exception, sqlalchemy.exc.InternalError) and + isinstance(exc.inner_exception.orig, pymysql.err.InternalError) and + (exc.inner_exception.orig.args[0] == + pymysql.constants.ER.TABLE_DEF_CHANGED) + ) + + def retry_on_deadlock(f): return oslo_db.api.wrap_db_retry(retry_on_deadlock=True, max_retries=20, retry_interval=0.1, - max_retry_interval=2)(f) + max_retry_interval=2, + exception_checker=_retry_on_exceptions)(f) class PerInstanceFacade(object): -- GitLab From 9ea432183772767634d61218bbba48508c960c9b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 26 Jan 2017 22:39:56 +0100 Subject: [PATCH 0603/1483] sqlalchemy: factorize retry on transaction issue/table def change This moves the retry condition to a common code for MySQL and PostgreSQL. Change-Id: I1ddc78f64f3d909b1fa917e04717f2ccbd194d1d --- gnocchi/indexer/sqlalchemy.py | 82 +++++++++++++++-------------------- 1 file changed, 35 insertions(+), 47 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 89858482..e548a4b3 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -62,14 +62,28 @@ LOG = log.getLogger(__name__) def _retry_on_exceptions(exc): - return ( + if not isinstance(exc, exception.DBError): + return False + inn_e = exception.inner_exception + if not isinstance(inn_e, sqlalchemy.exc.InternalError): + return False + return (( pymysql and - isinstance(exc, exception.DBError) and - isinstance(exc.inner_exception, sqlalchemy.exc.InternalError) and - isinstance(exc.inner_exception.orig, pymysql.err.InternalError) and - (exc.inner_exception.orig.args[0] == - pymysql.constants.ER.TABLE_DEF_CHANGED) - ) + isinstance(inn_e.orig, pymysql.err.InternalError) and + (inn_e.orig.args[0] == pymysql.constants.ER.TABLE_DEF_CHANGED) + ) or ( + # HACK(jd) Sometimes, PostgreSQL raises an error such as "current + # transaction is aborted, commands ignored until end of transaction + # block" on its own catalog, so we need to retry, but this is not + # caught by oslo.db as a deadlock. This is likely because when we use + # Base.metadata.create_all(), sqlalchemy itself gets an error it does + # not catch or something. So this is why this function exists. To + # paperover I guess. + psycopg2 + and isinstance(inn_e.orig, psycopg2.InternalError) + # current transaction is aborted + and inn_e.orig.pgcode == '25P02' + )) def retry_on_deadlock(f): @@ -176,34 +190,13 @@ class ResourceClassMapper(object): tables = [Base.metadata.tables[mappers["resource"].__tablename__], Base.metadata.tables[mappers["history"].__tablename__]] - try: - with facade.writer_connection() as connection: - Base.metadata.create_all(connection, tables=tables) - except exception.DBError as e: - if self._is_current_transaction_aborted(e): - raise exception.RetryRequest(e) - raise + with facade.writer_connection() as connection: + Base.metadata.create_all(connection, tables=tables) # NOTE(sileht): no need to protect the _cache with a lock # get_classes cannot be called in state creating self._cache[resource_type.tablename] = mappers - @staticmethod - def _is_current_transaction_aborted(exception): - # HACK(jd) Sometimes, PostgreSQL raises an error such as "current - # transaction is aborted, commands ignored until end of transaction - # block" on its own catalog, so we need to retry, but this is not - # caught by oslo.db as a deadlock. This is likely because when we use - # Base.metadata.create_all(), sqlalchemy itself gets an error it does - # not catch or something. So this is why this function exists. To - # paperover I guess. - inn_e = exception.inner_exception - return (psycopg2 - and isinstance(inn_e, sqlalchemy.exc.InternalError) - and isinstance(inn_e.orig, psycopg2.InternalError) - # current transaction is aborted - and inn_e.orig.pgcode == '25P02') - @retry_on_deadlock def unmap_and_delete_tables(self, resource_type, facade): if resource_type.state != "deleting": @@ -225,25 +218,20 @@ class ResourceClassMapper(object): # so this code cannot be triggerred anymore for this # resource_type with facade.writer_connection() as connection: - try: - for table in tables: - for fk in table.foreign_key_constraints: - try: - self._safe_execute( - connection, - sqlalchemy.schema.DropConstraint(fk)) - except exception.DBNonExistentConstraint: - pass - for table in tables: + for table in tables: + for fk in table.foreign_key_constraints: try: - self._safe_execute(connection, - sqlalchemy.schema.DropTable(table)) - except exception.DBNonExistentTable: + self._safe_execute( + connection, + sqlalchemy.schema.DropConstraint(fk)) + except exception.DBNonExistentConstraint: pass - except exception.DBError as e: - if self._is_current_transaction_aborted(e): - raise exception.RetryRequest(e) - raise + for table in tables: + try: + self._safe_execute(connection, + sqlalchemy.schema.DropTable(table)) + except exception.DBNonExistentTable: + pass # NOTE(sileht): If something goes wrong here, we are currently # fucked, that why we expose the state to the superuser. -- GitLab From e066d53fd57023d9a903656c6a8e0ceb87eb2912 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 27 Jan 2017 10:58:39 +0100 Subject: [PATCH 0604/1483] tests: factorize measure generation to speed up test They will all the same value unfortunately, but it should speed things up a lot. Calling date is slow. Change-Id: Iad7ff19528905fa16e7867fafdb662a443ab12f0 --- run-upgrade-tests.sh | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index f772a2bc..a400a139 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -46,17 +46,16 @@ inject_data() { gnocchi resource create ext --attribute id:$RESOURCE_ID_EXT -n metric:high > /dev/null { + measures_sep="" + MEASURES=$(for i in $(seq 0 10 288000); do + now=$($GDATE --iso-8601=s -d "-${i}minute") ; value=$((RANDOM % 13 + 52)) + echo -n "$measures_sep {\"timestamp\": \"$now\", \"value\": $value }" + measures_sep="," + done) echo -n '{' resource_sep="" for resource_id in ${RESOURCE_IDS[@]} $RESOURCE_ID_EXT; do - echo -n "$resource_sep \"$resource_id\": { \"metric\": [ " - measures_sep="" - for i in $(seq 0 10 288000); do - now=$($GDATE --iso-8601=s -d "-${i}minute") ; value=$((RANDOM % 13 + 52)) - echo -n "$measures_sep {\"timestamp\": \"$now\", \"value\": $value }" - measures_sep="," - done - echo -n "] }" + echo -n "$resource_sep \"$resource_id\": { \"metric\": [ $MEASURES ] }" resource_sep="," done echo -n '}' -- GitLab From 77d02efad67586baed928d9fdfe8d5d24e1b54fa Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 27 Jan 2017 16:56:46 +0100 Subject: [PATCH 0605/1483] tests: do not create a resource with a custom resource type in 2.2 This actually creates a resource whose id is "" since $RESOURCE_ID_EXT is empty. Change-Id: I10c797dfb356de9707e17a3871af53235f576798 --- run-upgrade-tests.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index a400a139..4220e252 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -42,8 +42,11 @@ inject_data() { gnocchi resource create generic --attribute id:$resource_id -n metric:high > /dev/null done - gnocchi resource-type create ext > /dev/null - gnocchi resource create ext --attribute id:$RESOURCE_ID_EXT -n metric:high > /dev/null + if [ "$have_resource_type_post" ] + then + gnocchi resource-type create ext > /dev/null + gnocchi resource create ext --attribute id:$RESOURCE_ID_EXT -n metric:high > /dev/null + fi { measures_sep="" -- GitLab From cd8d85e9d5505667802f8a17db5fa361521714a4 Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 27 Jan 2017 17:20:50 +0000 Subject: [PATCH 0606/1483] ceph: set return limit when grabbing metrics to process, we only grab a chunk. there's no point grabbing uncapped amount and taking small chunk near front. Change-Id: I9ac64037705f08a3133f6d845ced516aab50d40f --- gnocchi/storage/incoming/ceph.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index 1ea98780..51fc2471 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -105,9 +105,9 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): metric_details[metric] += 1 return len(metrics), count, metric_details if details else None - def _list_object_names_to_process(self, prefix=""): + def _list_object_names_to_process(self, prefix="", limit=-1): with rados.ReadOpCtx() as op: - omaps, ret = self.ioctx.get_omap_vals(op, "", prefix, -1) + omaps, ret = self.ioctx.get_omap_vals(op, "", prefix, limit) try: self.ioctx.operate_read_op( op, self.MEASURE_PREFIX, flag=self.OMAP_READ_FLAGS) @@ -125,7 +125,8 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): return (k for k, v in omaps) def list_metric_with_measures_to_process(self, size, part, full=False): - names = self._list_object_names_to_process() + names = self._list_object_names_to_process(limit=-1 if full else + size * (part + 1)) if full: objs_it = names else: -- GitLab From dcb2a6ad75dfd217e05462228cbeaa60a787812a Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 26 Jan 2017 22:49:44 +0000 Subject: [PATCH 0607/1483] fix live gabbi test - should be using PATCH and not default (GET) when add/delete attributes... also should be resource_type and not resource - seach only uses POST and not GET Change-Id: I906ed5c7c646bcb194fa94e15e4f44df6fc2df81 --- gnocchi/tests/gabbi/gabbits-live/live.yaml | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits-live/live.yaml b/gnocchi/tests/gabbi/gabbits-live/live.yaml index f63e1bb9..e12a63cd 100644 --- a/gnocchi/tests/gabbi/gabbits-live/live.yaml +++ b/gnocchi/tests/gabbi/gabbits-live/live.yaml @@ -499,7 +499,8 @@ tests: location: $SCHEME://$NETLOC/v1/resource_type/myresource - name: add an attribute - url: /v1/resource/myresource + url: /v1/resource_type/myresource + method: PATCH request_headers: content-type: application/json-patch+json data: @@ -507,15 +508,24 @@ tests: path: "/attributes/awesome-stuff" value: {"type": "bool", "required": false} status: 200 + response_json_paths: + $.name: myresource + $.attributes."awesome-stuff".type: bool + $.attributes.[*].`len`: 2 - name: remove an attribute - url: /v1/resource/myresource + url: /v1/resource_type/myresource + method: PATCH request_headers: content-type: application/json-patch+json data: - op: "remove" path: "/attributes/awesome-stuff" status: 200 + response_json_paths: + $.name: myresource + $.attributes.display_name.type: string + $.attributes.[*].`len`: 1 - name: myresource resource bad accept desc: Expect 406 on bad accept type @@ -662,10 +672,12 @@ tests: - name: typo of search url: /v1/search/notexists + method: POST status: 404 - name: typo of search in resource url: /v1/search/resource/foobar + method: POST status: 404 - name: search with invalid uuid -- GitLab From 7dd8a326d5fccfe811ab848a66e75edd146afecf Mon Sep 17 00:00:00 2001 From: gord chung Date: Sat, 28 Jan 2017 17:45:22 -0500 Subject: [PATCH 0608/1483] supporting disabling reporting process the report generation process may require significant resources (especially for ceph). it is only required for logging so support disabling it. Change-Id: I81b60eed651b75e6d1c021a837aa3be980015ed4 --- gnocchi/cli.py | 3 ++- gnocchi/opts.py | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 96c3a1c7..2dbf52e9 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -298,7 +298,8 @@ class MetricdServiceManager(cotyledon.ServiceManager): self.metric_processor_id = self.add( MetricProcessor, args=(self.conf, self.queue), workers=conf.metricd.workers) - self.add(MetricReporting, args=(self.conf,)) + if self.conf.metricd.metric_reporting_delay >= 0: + self.add(MetricReporting, args=(self.conf,)) self.add(MetricJanitor, args=(self.conf,)) self.register_hooks(on_reload=self.on_reload) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index ffe2c222..dc7046aa 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -72,9 +72,11 @@ def list_opts(): cfg.IntOpt('metric_reporting_delay', deprecated_group='storage', default=120, + min=-1, required=True, help="How many seconds to wait between " - "metric ingestion reporting"), + "metric ingestion reporting. Set value to -1 to " + "disable reporting"), cfg.IntOpt('metric_cleanup_delay', deprecated_group='storage', default=300, -- GitLab From 35ae34536ddefc5ce0d46a59a64f9e8a2521ba8e Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 26 Jan 2017 22:45:56 +0000 Subject: [PATCH 0609/1483] modernise gabbi tests gabbi 1.7 supports defining url+method in a single line to make syntax less verbose and more specific. Change-Id: Ib9ee3a26d634aab0a3f123932996e3a4835b9814 --- gnocchi/tests/gabbi/gabbits-live/live.yaml | 184 +++++++----------- gnocchi/tests/gabbi/gabbits/aggregation.yaml | 2 +- gnocchi/tests/gabbi/gabbits/archive-rule.yaml | 2 - gnocchi/tests/gabbi/gabbits/history.yaml | 1 - gnocchi/tests/gabbi/gabbits/pagination.yaml | 66 +++---- .../tests/gabbi/gabbits/resource-type.yaml | 29 +-- 6 files changed, 100 insertions(+), 184 deletions(-) diff --git a/gnocchi/tests/gabbi/gabbits-live/live.yaml b/gnocchi/tests/gabbi/gabbits-live/live.yaml index e12a63cd..8f3bbef4 100644 --- a/gnocchi/tests/gabbi/gabbits-live/live.yaml +++ b/gnocchi/tests/gabbi/gabbits-live/live.yaml @@ -9,13 +9,12 @@ defaults: tests: - name: check / - url: / + GET: / # Fail to create archive policy - name: wrong archive policy content type desc: attempt to create archive policy with invalid content-type - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: text/plain status: 415 @@ -24,16 +23,14 @@ tests: - name: wrong method desc: attempt to create archive policy with 'PUT' method - url: /v1/archive_policy - method: PUT + PUT: /v1/archive_policy request_headers: content-type: application/json status: 405 - name: invalid authZ desc: x-auth-token is invalid - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-auth-token: 'hello' @@ -45,8 +42,7 @@ tests: - name: bad archive policy body desc: archive policy contains invalid key 'cowsay' - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json data: @@ -57,8 +53,7 @@ tests: - name: missing definition desc: archive policy is missing 'definition' keyword - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json data: @@ -69,8 +64,7 @@ tests: - name: empty definition desc: empty definition for archive policy - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json data: @@ -82,8 +76,7 @@ tests: - name: wrong value definition desc: invalid type of 'definition' key - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json data: @@ -95,8 +88,7 @@ tests: - name: useless definition desc: invalid archive policy definition - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json data: @@ -113,8 +105,7 @@ tests: - name: create archive policy desc: create archve policy 'gabbilive' for live tests - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json data: @@ -139,7 +130,7 @@ tests: - name: get archive policy desc: retrieve archive policy 'gabbilive' and asster its values - url: $LOCATION + GET: $LOCATION response_headers: content-type: /application/json/ response_json_paths: @@ -159,7 +150,7 @@ tests: - name: get wrong accept desc: invalid 'accept' header - url: /v1/archive_policy/medium + GET: /v1/archive_policy/medium request_headers: accept: text/plain status: 406 @@ -168,22 +159,19 @@ tests: - name: post single archive desc: unexpected 'POST' request to archive policy - url: /v1/archive_policy/gabbilive - method: POST + POST: /v1/archive_policy/gabbilive status: 405 - name: put single archive desc: unexpected 'PUT' request to archive policy - url: /v1/archive_policy/gabbilive - method: PUT + PUT: /v1/archive_policy/gabbilive status: 405 # Duplicated archive policy names ain't allowed - name: create duplicate archive policy desc: create archve policy 'gabbilive' for live tests - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json data: @@ -198,8 +186,7 @@ tests: # Create a unicode named policy - name: post unicode policy name - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json data: @@ -214,29 +201,26 @@ tests: name: ✔éñ☃ - name: retrieve unicode policy name - url: $LOCATION + GET: $LOCATION response_json_paths: name: ✔éñ☃ - name: delete unicode archive policy - url: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 - method: DELETE + DELETE: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 status: 204 # It really is gone - name: confirm delete desc: assert deleted unicode policy is not available - method: GET - url: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 + GET: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 status: 404 # Fail to delete one that does not exist - name: delete missing archive desc: delete non-existent archive policy - url: /v1/archive_policy/grandiose - method: DELETE + DELETE: /v1/archive_policy/grandiose status: 404 response_strings: - Archive policy grandiose does not exist @@ -244,8 +228,7 @@ tests: # Attempt to create illogical policies - name: create illogical policy - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json data: @@ -259,8 +242,7 @@ tests: - timespan ≠ granularity × points - name: create identical granularities policy - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json data: @@ -276,8 +258,7 @@ tests: - name: policy invalid unit desc: invalid unit for archive policy 'timespan' key - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json data: @@ -292,8 +273,7 @@ tests: # - name: create archive policy rule1 - url: /v1/archive_policy_rule - method: POST + POST: /v1/archive_policy_rule request_headers: content-type: application/json data: @@ -307,8 +287,7 @@ tests: $.name: gabbilive_rule - name: create invalid archive policy rule - url: /v1/archive_policy_rule - method: POST + POST: /v1/archive_policy_rule request_headers: content-type: application/json data: @@ -317,8 +296,7 @@ tests: status: 400 - name: missing auth archive policy rule - url: /v1/archive_policy_rule - method: POST + POST: /v1/archive_policy_rule request_headers: content-type: application/json x-auth-token: 'hello' @@ -329,8 +307,7 @@ tests: status: 401 - name: wrong archive policy rule content type - url: /v1/archive_policy_rule - method: POST + POST: /v1/archive_policy_rule request_headers: content-type: text/plain status: 415 @@ -338,8 +315,7 @@ tests: - Unsupported Media Type - name: bad archive policy rule body - url: /v1/archive_policy_rule - method: POST + POST: /v1/archive_policy_rule request_headers: content-type: application/json data: @@ -351,18 +327,18 @@ tests: # get an archive policy rules - name: get all archive policy rules - url: /v1/archive_policy_rule + GET: /v1/archive_policy_rule status: 200 response_strings: '"metric_pattern": "live.*", "archive_policy_name": "gabbilive", "name": "gabbilive_rule"' - name: get unknown archive policy rule - url: /v1/archive_policy_rule/foo + GET: /v1/archive_policy_rule/foo status: 404 - name: get archive policy rule - url: /v1/archive_policy_rule/gabbilive_rule + GET: /v1/archive_policy_rule/gabbilive_rule status: 200 response_json_paths: $.metric_pattern: "live.*" @@ -371,8 +347,7 @@ tests: - name: delete archive policy in use desc: fails due to https://bugs.launchpad.net/gnocchi/+bug/1569781 - url: /v1/archive_policy/gabbilive - method: DELETE + DELETE: /v1/archive_policy/gabbilive status: 400 # @@ -381,14 +356,13 @@ tests: - name: get all metrics - url: /v1/metric + GET: /v1/metric status: 200 - name: create metric with name and rule - url: /v1/metric + POST: /v1/metric request_headers: content-type: application/json - method: post data: name: "live.io.rate" status: 201 @@ -397,15 +371,13 @@ tests: $.name: live.io.rate - name: delete metric - url: $LOCATION - method: DELETE + DELETE: $LOCATION status: 204 - name: create metric with name and policy - url: /v1/metric + POST: /v1/metric request_headers: content-type: application/json - method: post data: name: "aagabbi.live.metric" archive_policy_name: "gabbilive" @@ -415,19 +387,17 @@ tests: $.name: "aagabbi.live.metric" - name: get valid metric id - url: $LOCATION + GET: $LOCATION status: 200 response_json_paths: $.archive_policy.name: gabbilive - name: delete the metric - url: /v1/metric/$RESPONSE['$.id'] - method: DELETE + DELETE: /v1/metric/$RESPONSE['$.id'] status: 204 - name: create metric bad archive policy - url: /v1/metric - method: POST + POST: /v1/metric request_headers: content-type: application/json data: @@ -437,8 +407,7 @@ tests: - Archive policy 2e2675aa-105e-4664-a30d-c407e6a0ea7f does not exist - name: create metric bad content-type - url: /v1/metric - method: POST + POST: /v1/metric request_headers: content-type: plain/text data: '{"archive_policy_name": "cookies"}' @@ -450,13 +419,11 @@ tests: # - name: delete archive policy rule - url: /v1/archive_policy_rule/gabbilive_rule - method: DELETE + DELETE: /v1/archive_policy_rule/gabbilive_rule status: 204 - name: confirm delete archive policy rule - url: /v1/archive_policy_rule/gabbilive_rule - method: DELETE + DELETE: /v1/archive_policy_rule/gabbilive_rule status: 404 @@ -465,25 +432,24 @@ tests: # - name: root of resource - url: /v1/resource + GET: /v1/resource response_json_paths: $.generic: $SCHEME://$NETLOC/v1/resource/generic - name: typo of resource - url: /v1/resoue + GET: /v1/resoue status: 404 - name: typo of resource extra - url: /v1/resource/foobar + GET: /v1/resource/foobar status: 404 - name: generic resource - url: /v1/resource/generic + GET: /v1/resource/generic status: 200 - name: post resource type - url: /v1/resource_type - method: post + POST: /v1/resource_type request_headers: content-type: application/json data: @@ -499,8 +465,7 @@ tests: location: $SCHEME://$NETLOC/v1/resource_type/myresource - name: add an attribute - url: /v1/resource_type/myresource - method: PATCH + PATCH: /v1/resource_type/myresource request_headers: content-type: application/json-patch+json data: @@ -514,8 +479,7 @@ tests: $.attributes.[*].`len`: 2 - name: remove an attribute - url: /v1/resource_type/myresource - method: PATCH + PATCH: /v1/resource_type/myresource request_headers: content-type: application/json-patch+json data: @@ -531,7 +495,7 @@ tests: desc: Expect 406 on bad accept type request_headers: accept: text/plain - url: /v1/resource/myresource + GET: /v1/resource/myresource status: 406 response_strings: - 406 Not Acceptable @@ -540,12 +504,11 @@ tests: desc: failover accept media type appropriately request_headers: accept: text/plain, application/json; q=0.8 - url: /v1/resource/myresource + GET: /v1/resource/myresource status: 200 - name: post myresource resource - url: /v1/resource/myresource - method: post + POST: /v1/resource/myresource request_headers: content-type: application/json data: @@ -564,7 +527,7 @@ tests: $.display_name: "myvm" - name: get myresource resource - url: $LOCATION + GET: $LOCATION status: 200 response_json_paths: $.id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e @@ -573,9 +536,7 @@ tests: $.display_name: "myvm" - name: search for myresource resource via user_id - #url: /v1/search/resource/generic - url: /v1/search/resource/myresource - method: POST + POST: /v1/search/resource/myresource request_headers: content-type: application/json data: @@ -588,8 +549,7 @@ tests: $..display_name: myvm - name: search for myresource resource via user_id and 'generic' type - url: /v1/search/resource/generic - method: POST + POST: /v1/search/resource/generic request_headers: content-type: application/json data: @@ -599,8 +559,7 @@ tests: '"user_id": "126204ef-989a-46fd-999b-ee45c8108f31"' - name: search for myresource resource via user_id and project_id - url: /v1/search/resource/generic - method: POST + POST: /v1/search/resource/generic request_headers: content-type: application/json data: @@ -613,8 +572,7 @@ tests: '"id": "2ae35573-7f9f-4bb1-aae8-dad8dff5706e"' - name: patch myresource resource - url: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e - method: patch + PATCH: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e request_headers: content-type: application/json data: @@ -624,10 +582,9 @@ tests: display_name: myvm2 - name: post some measures to the metric on myresource - url: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures + POST: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures request_headers: content-type: application/json - method: POST data: - timestamp: "2015-03-06T14:33:57" value: 2 @@ -636,7 +593,7 @@ tests: status: 202 - name: get myresource measures with poll - url: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures + GET: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures # wait up to 60 seconds before policy is deleted poll: count: 60 @@ -671,18 +628,15 @@ tests: # - name: typo of search - url: /v1/search/notexists - method: POST + POST: /v1/search/notexists status: 404 - name: typo of search in resource - url: /v1/search/resource/foobar - method: POST + POST: /v1/search/resource/foobar status: 404 - name: search with invalid uuid - url: /v1/search/resource/generic - method: POST + POST: /v1/search/resource/generic request_headers: content-type: application/json data: @@ -691,26 +645,22 @@ tests: - name: delete myresource resource - url: /v1/resource/generic/2ae35573-7f9f-4bb1-aae8-dad8dff5706e - method: DELETE + DELETE: /v1/resource/generic/2ae35573-7f9f-4bb1-aae8-dad8dff5706e status: 204 # assert resource is really deleted - name: assert resource resource is deleted - url: /v1/resource/generic/2ae35573-7f9f-4bb1-aae8-dad8dff5706e - method: GET + GET: /v1/resource/generic/2ae35573-7f9f-4bb1-aae8-dad8dff5706e status: 404 - name: post myresource resource no data - url: /v1/resource/generic - method: post + POST: /v1/resource/generic request_headers: content-type: application/json status: 400 - name: delete single archive policy cleanup - url: /v1/archive_policy/gabbilive - method: DELETE + DELETE: /v1/archive_policy/gabbilive poll: count: 360 delay: 1 @@ -723,5 +673,5 @@ tests: status: 204 - name: confirm delete of cleanup - url: /v1/archive_policy/gabbilive + GET: /v1/archive_policy/gabbilive status: 404 diff --git a/gnocchi/tests/gabbi/gabbits/aggregation.yaml b/gnocchi/tests/gabbi/gabbits/aggregation.yaml index 9c3b871e..39e0bd8b 100644 --- a/gnocchi/tests/gabbi/gabbits/aggregation.yaml +++ b/gnocchi/tests/gabbi/gabbits/aggregation.yaml @@ -39,7 +39,7 @@ tests: status: 201 - name: get metric list to push metric 1 - url: /v1/metric + GET: /v1/metric - name: push measurements to metric 1 POST: /v1/metric/$RESPONSE['$[0].id']/measures diff --git a/gnocchi/tests/gabbi/gabbits/archive-rule.yaml b/gnocchi/tests/gabbi/gabbits/archive-rule.yaml index 10d0c7e4..bc3ea60a 100644 --- a/gnocchi/tests/gabbi/gabbits/archive-rule.yaml +++ b/gnocchi/tests/gabbi/gabbits/archive-rule.yaml @@ -26,7 +26,6 @@ tests: - name: create archive policy rule1 POST: /v1/archive_policy_rule - method: POST request_headers: content-type: application/json x-roles: admin @@ -42,7 +41,6 @@ tests: - name: create archive policy rule 2 POST: /v1/archive_policy_rule - method: POST request_headers: content-type: application/json x-roles: admin diff --git a/gnocchi/tests/gabbi/gabbits/history.yaml b/gnocchi/tests/gabbi/gabbits/history.yaml index 4b3b2bb2..0bdc47fd 100644 --- a/gnocchi/tests/gabbi/gabbits/history.yaml +++ b/gnocchi/tests/gabbi/gabbits/history.yaml @@ -57,7 +57,6 @@ tests: - name: patch resource project_id PATCH: $LAST_URL - method: patch request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea diff --git a/gnocchi/tests/gabbi/gabbits/pagination.yaml b/gnocchi/tests/gabbi/gabbits/pagination.yaml index 4967cad1..c6ece552 100644 --- a/gnocchi/tests/gabbi/gabbits/pagination.yaml +++ b/gnocchi/tests/gabbi/gabbits/pagination.yaml @@ -288,8 +288,7 @@ tests: # - name: create archive policy desc: for later use - url: /v1/archive_policy - method: POST + POST: /v1/archive_policy request_headers: content-type: application/json x-roles: admin @@ -300,76 +299,69 @@ tests: status: 201 - name: create metric with name1 - url: /v1/metric + POST: /v1/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json - method: post data: name: "dummy1" archive_policy_name: dummy_policy status: 201 - name: create metric with name2 - url: /v1/metric + POST: /v1/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json - method: post data: name: "dummy2" archive_policy_name: dummy_policy status: 201 - name: create metric with name3 - url: /v1/metric + POST: /v1/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json - method: post data: name: "dummy3" archive_policy_name: dummy_policy status: 201 - name: create metric with name4 - url: /v1/metric + POST: /v1/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json - method: post data: name: "dummy4" archive_policy_name: dummy_policy status: 201 - name: create metric with name5 - url: /v1/metric + POST: /v1/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json - method: post data: name: "dummy5" archive_policy_name: dummy_policy status: 201 - name: list all default order - url: /v1/metric - method: get + GET: /v1/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json - name: list first two metrics default order - url: /v1/metric?limit=2 - method: get + GET: /v1/metric?limit=2 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -380,16 +372,14 @@ tests: $[1].name: $RESPONSE['$[1].name'] - name: list all default order again - url: /v1/metric - method: get + GET: /v1/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json - name: list next three metrics default order - url: /v1/metric?limit=4&marker=$RESPONSE['$[1].id'] - method: get + GET: /v1/metric?limit=4&marker=$RESPONSE['$[1].id'] request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -401,8 +391,7 @@ tests: $[2].name: $RESPONSE['$[4].name'] - name: list first two metrics order by user without direction - url: /v1/metric?limit=2&sort=name - method: get + GET: /v1/metric?limit=2&sort=name request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -414,8 +403,7 @@ tests: $[1].name: dummy2 - name: list first two metrics order by user - url: /v1/metric?limit=2&sort=name:asc - method: get + GET: /v1/metric?limit=2&sort=name:asc request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -426,8 +414,7 @@ tests: $[1].name: dummy2 - name: list next third metrics order by user - url: /v1/metric?limit=4&sort=name:asc&marker=$RESPONSE['$[1].id'] - method: get + GET: /v1/metric?limit=4&sort=name:asc&marker=$RESPONSE['$[1].id'] request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -443,41 +430,37 @@ tests: # - name: create metric with name6 - url: /v1/metric + POST: /v1/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json - method: post data: archive_policy_name: dummy_policy status: 201 - name: create metric with name7 - url: /v1/metric + POST: /v1/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json - method: post data: archive_policy_name: dummy_policy status: 201 - name: create metric with name8 - url: /v1/metric + POST: /v1/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json - method: post data: archive_policy_name: dummy_policy status: 201 - name: default metric limit - url: /v1/metric - method: get + GET: /v1/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -490,8 +473,7 @@ tests: # - name: metric invalid sort_key - url: /v1/metric?sort=invalid:asc - method: get + GET: /v1/metric?sort=invalid:asc request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -499,8 +481,7 @@ tests: status: 400 - name: metric invalid sort_dir - url: /v1/metric?sort=id:invalid - method: get + GET: /v1/metric?sort=id:invalid request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -508,8 +489,7 @@ tests: status: 400 - name: metric invalid marker - url: /v1/metric?marker=d44b3f4c-27bc-4ace-b81c-2a8e60026874 - method: get + GET: /v1/metric?marker=d44b3f4c-27bc-4ace-b81c-2a8e60026874 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -517,8 +497,7 @@ tests: status: 400 - name: metric invalid negative limit - url: /v1/metric?limit=-2 - method: get + GET: /v1/metric?limit=-2 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -526,8 +505,7 @@ tests: status: 400 - name: metric invalid limit - url: /v1/metric?limit=invalid - method: get + GET: /v1/metric?limit=invalid request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea diff --git a/gnocchi/tests/gabbi/gabbits/resource-type.yaml b/gnocchi/tests/gabbi/gabbits/resource-type.yaml index b41326be..dae792f0 100644 --- a/gnocchi/tests/gabbi/gabbits/resource-type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource-type.yaml @@ -25,8 +25,7 @@ tests: status: 403 - name: post resource type with existing name - url: /v1/resource_type - method: post + POST: /v1/resource_type request_headers: x-roles: admin content-type: application/json @@ -335,8 +334,7 @@ tests: # CRUD resource type attributes - name: post a new resource attribute - url: /v1/resource_type/my_custom_resource - method: patch + PATCH: /v1/resource_type/my_custom_resource request_headers: x-roles: admin content-type: application/json-patch+json @@ -449,8 +447,7 @@ tests: required: True - name: post a new resource attribute with missing fill - url: /v1/resource_type/my_custom_resource - method: patch + PATCH: /v1/resource_type/my_custom_resource request_headers: x-roles: admin content-type: application/json-patch+json @@ -466,8 +463,7 @@ tests: - "Invalid input: Option 'fill' of resource attribute missing is invalid: must not be empty if required=True" - name: post a new resource attribute with incorrect fill - url: /v1/resource_type/my_custom_resource - method: patch + PATCH: /v1/resource_type/my_custom_resource request_headers: x-roles: admin content-type: application/json-patch+json @@ -484,7 +480,7 @@ tests: - "Invalid input: Option 'fill' of resource attribute incorrect is invalid: expected Real" - name: get the new custom resource type - url: /v1/resource_type/my_custom_resource + GET: /v1/resource_type/my_custom_resource response_json_paths: $.name: my_custom_resource $.attributes: @@ -582,8 +578,7 @@ tests: # Invalid patch - name: add/delete the same resource attribute - url: /v1/resource_type/my_custom_resource - method: patch + PATCH: /v1/resource_type/my_custom_resource request_headers: x-roles: admin content-type: application/json-patch+json @@ -655,8 +650,7 @@ tests: required: True - name: delete/add the same resource attribute - url: /v1/resource_type/my_custom_resource - method: patch + PATCH: /v1/resource_type/my_custom_resource request_headers: x-roles: admin content-type: application/json-patch+json @@ -675,8 +669,7 @@ tests: - "can't remove non-existent object 'what'" - name: patch a resource attribute replace - url: /v1/resource_type/my_custom_resource - method: patch + PATCH: /v1/resource_type/my_custom_resource request_headers: x-roles: admin content-type: application/json-patch+json @@ -694,8 +687,7 @@ tests: - "'op']" - name: patch a resource attribute type not exist - url: /v1/resource_type/my_custom_resource - method: patch + PATCH: /v1/resource_type/my_custom_resource request_headers: x-roles: admin content-type: application/json-patch+json @@ -710,8 +702,7 @@ tests: status: 400 - name: patch a resource attribute type unknown - url: /v1/resource_type/my_custom_resource - method: patch + PATCH: /v1/resource_type/my_custom_resource request_headers: x-roles: admin content-type: application/json-patch+json -- GitLab From 34b7d303a6b4c15328ccdda91e5b35cf7e4966c7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 31 Jan 2017 10:13:51 +0100 Subject: [PATCH 0610/1483] tests: specify columns to compare in resource list gnocchiclient 3.0.0 changed and changes dynamically the column names (created_by_* is now creator with Gnocchi >= 3.1) so list the column we need to compare explicitly. Change-Id: I44c8c519823d0a85c54390fd4faf8bde40931989 --- run-upgrade-tests.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index 4220e252..ec3984d4 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -25,7 +25,7 @@ dump_data(){ dir="$1" mkdir -p $dir echo "* Dumping measures aggregations to $dir" - gnocchi resource list > $dir/resources.list + gnocchi resource list -c id -c type -c project_id -c user_id -c original_resource_id -c started_at -c ended_at -c revision_start -c revision_end > $dir/resources.list for resource_id in ${RESOURCE_IDS[@]} $RESOURCE_ID_EXT; do for agg in min max mean sum ; do gnocchi measures show --aggregation $agg --resource-id $resource_id metric > $dir/${agg}.txt @@ -89,6 +89,8 @@ fi eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) gnocchi resource delete $GNOCCHI_STATSD_RESOURCE_ID inject_data $GNOCCHI_DATA +# Encode resource id as it contains slashes and gnocchiclient does not encode it +[ "$have_resource_type_post" ] && RESOURCE_ID_EXT="19235bb9-35ca-5f55-b7db-165cfb033c86" dump_data $GNOCCHI_DATA/old pifpaf_stop -- GitLab From ad4b851c7fbf4b914d1c2b6c55957d4315423326 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 19 Dec 2016 12:18:09 +0100 Subject: [PATCH 0611/1483] =?UTF-8?q?rest:=20string=20=E2=86=92=20UUID=20c?= =?UTF-8?q?onversion=20for=20resource.id=20to=20be=20unique=20per=20user?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This changes the UUID5 based mechanism so it depends on the user trying to CRUD the resource. This makes sure that when using this kind of transformation, the resource id is converted to a unique id for the user, while preventing conflicting if every user wants to create a "foobar" resource. Change-Id: Iebaf3b9f8e0a198af0156008710e0c1253dc5f9d Closes-Bug: #1617918 --- ...97987e38570_no_more_slash_and_reencode.py} | 26 +++++-- gnocchi/rest/__init__.py | 71 +++++++++++-------- gnocchi/tests/gabbi/gabbits/base.yaml | 4 +- .../tests/gabbi/gabbits/batch-measures.yaml | 2 +- .../tests/gabbi/gabbits/resource-type.yaml | 15 ++-- .../tests/gabbi/gabbits/transformedids.yaml | 24 ++++++- gnocchi/utils.py | 25 +++---- .../notes/uuid5-change-8a8c467d2b2d4c85.yaml | 12 ++++ run-upgrade-tests.sh | 8 ++- 9 files changed, 123 insertions(+), 64 deletions(-) rename gnocchi/indexer/alembic/versions/{397987e38570_no_more_slash.py => 397987e38570_no_more_slash_and_reencode.py} (87%) create mode 100644 releasenotes/notes/uuid5-change-8a8c467d2b2d4c85.yaml diff --git a/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash.py b/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py similarity index 87% rename from gnocchi/indexer/alembic/versions/397987e38570_no_more_slash.py rename to gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py index 77d58404..34363257 100644 --- a/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash.py +++ b/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py @@ -13,7 +13,7 @@ # under the License. # -"""no-more-slash +"""Remove slashes from original resource IDs, recompute their id with creator Revision ID: 397987e38570 Revises: aba5a217ca9b @@ -49,7 +49,8 @@ resource_table = sa.Table( sqlalchemy_utils.types.uuid.UUIDType(), nullable=False), sa.Column('original_resource_id', sa.String(255)), - sa.Column('type', sa.String(255)) + sa.Column('type', sa.String(255)), + sa.Column('creator', sa.String(255)) ) resourcehistory_table = sa.Table( @@ -100,15 +101,28 @@ def upgrade(): nullable=False), ) - for resource in connection.execute(resource_table.select().where( - resource_table.c.original_resource_id.like('%/%'))): + for resource in connection.execute(resource_table.select()): + + if resource_table.c.original_resource_id is None: + # statsd resource has no original_resource_id and is NULL + continue + + try: + orig_as_uuid = uuid.UUID( + str(resource_table.c.original_resource_id)) + except ValueError: + pass + else: + if orig_as_uuid == resource_table.c.id: + continue + new_original_resource_id = resource.original_resource_id.replace( '/', '_') if six.PY2: new_original_resource_id = new_original_resource_id.encode('utf-8') new_id = sa.literal(uuidtype.process_bind_param( - str(uuid.uuid5(utils.RESOURCE_ID_NAMESPACE, - new_original_resource_id)), + str(utils.ResourceUUID( + new_original_resource_id, resource.creator)), connection.dialect)) # resource table diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 41139969..6878b5ec 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -14,6 +14,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import functools import itertools import uuid @@ -848,8 +849,10 @@ class ResourceController(rest.RestController): def __init__(self, resource_type, id): self._resource_type = resource_type + creator = pecan.request.auth_helper.get_current_user( + pecan.request.headers) try: - self.id = utils.ResourceUUID(id) + self.id = utils.ResourceUUID(id, creator) except ValueError: abort(404, indexer.NoSuchResource(id)) self.metric = NamedMetricController(str(self.id), self._resource_type) @@ -929,8 +932,15 @@ def schema_for(resource_type): return ResourceSchema(resource_type.schema) -def ResourceID(value): - return (six.text_type(value), utils.ResourceUUID(value)) +def ResourceUUID(value, creator): + try: + return utils.ResourceUUID(value, creator) + except ValueError as e: + raise voluptuous.Invalid(e) + + +def ResourceID(value, creator): + return (six.text_type(value), ResourceUUID(value, creator)) class ResourcesController(rest.RestController): @@ -946,7 +956,9 @@ class ResourcesController(rest.RestController): # NOTE(sileht): we need to copy the dict because when change it # and we don't want that next patch call have the "id" schema = dict(schema_for(self._resource_type)) - schema["id"] = ResourceID + creator = pecan.request.auth_helper.get_current_user( + pecan.request.headers) + schema["id"] = functools.partial(ResourceID, creator=creator) body = deserialize_and_validate(schema) body["original_resource_id"], body["id"] = body["id"] @@ -956,8 +968,6 @@ class ResourcesController(rest.RestController): } target.update(body) enforce("create resource", target) - creator = pecan.request.auth_helper.get_current_user( - pecan.request.headers) rid = body['id'] del body['id'] try: @@ -1003,8 +1013,7 @@ class ResourcesController(rest.RestController): def delete(self, **kwargs): # NOTE(sileht): Don't allow empty filter, this is going to delete # the entire database. - attr_filter = deserialize_and_validate( - SearchResourceTypeController.ResourceSearchSchema) + attr_filter = deserialize_and_validate(ResourceSearchSchema) # the voluptuous checks everything, but it is better to # have this here. @@ -1129,16 +1138,16 @@ class QueryStringSearchAttrFilter(object): return cls._parsed_query2dict(parsed_query) -def _ResourceSearchSchema(v): - """Helper method to indirect the recursivity of the search schema""" - return SearchResourceTypeController.ResourceSearchSchema(v) +def ResourceSearchSchema(v): + return _ResourceSearchSchema()(v) -class SearchResourceTypeController(rest.RestController): - def __init__(self, resource_type): - self._resource_type = resource_type +def _ResourceSearchSchema(): + user = pecan.request.auth_helper.get_current_user( + pecan.request.headers) + _ResourceUUID = functools.partial(ResourceUUID, creator=user) - ResourceSearchSchema = voluptuous.Schema( + return voluptuous.Schema( voluptuous.All( voluptuous.Length(min=0, max=1), { @@ -1155,31 +1164,36 @@ class SearchResourceTypeController(rest.RestController): voluptuous.Length(min=1, max=1), voluptuous.Any( {"id": voluptuous.Any( - utils.ResourceUUID, [utils.ResourceUUID]), + [_ResourceUUID], _ResourceUUID), voluptuous.Extra: voluptuous.Extra})), voluptuous.Any( u"and", u"∨", u"or", u"∧", u"not", ): voluptuous.All( - [_ResourceSearchSchema], voluptuous.Length(min=1) + [ResourceSearchSchema], voluptuous.Length(min=1) ) } ) ) - @classmethod - def parse_and_validate_qs_filter(cls, query): + +class SearchResourceTypeController(rest.RestController): + def __init__(self, resource_type): + self._resource_type = resource_type + + @staticmethod + def parse_and_validate_qs_filter(query): try: attr_filter = QueryStringSearchAttrFilter.parse(query) except InvalidQueryStringSearchAttrFilter as e: raise abort(400, e) - return voluptuous.Schema(cls.ResourceSearchSchema, + return voluptuous.Schema(ResourceSearchSchema, required=True)(attr_filter) def _search(self, **kwargs): if pecan.request.body: - attr_filter = deserialize_and_validate(self.ResourceSearchSchema) + attr_filter = deserialize_and_validate(ResourceSearchSchema) elif kwargs.get("filter"): attr_filter = self.parse_and_validate_qs_filter(kwargs["filter"]) else: @@ -1328,13 +1342,16 @@ class SearchMetricController(rest.RestController): class ResourcesMetricsMeasuresBatchController(rest.RestController): - MeasuresBatchSchema = voluptuous.Schema( - {ResourceID: {six.text_type: MeasuresListSchema}} - ) - @pecan.expose('json') def post(self, create_metrics=False): - body = deserialize_and_validate(self.MeasuresBatchSchema) + creator = pecan.request.auth_helper.get_current_user( + pecan.request.headers) + MeasuresBatchSchema = voluptuous.Schema( + {functools.partial(ResourceID, creator=creator): + {six.text_type: MeasuresListSchema}} + ) + + body = deserialize_and_validate(MeasuresBatchSchema) known_metrics = [] unknown_metrics = [] @@ -1349,8 +1366,6 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): known_names = [m.name for m in metrics] if strutils.bool_from_string(create_metrics): - creator = pecan.request.auth_helper.get_current_user( - pecan.request.headers) already_exists_names = [] for name in names: if name not in known_names: diff --git a/gnocchi/tests/gabbi/gabbits/base.yaml b/gnocchi/tests/gabbi/gabbits/base.yaml index eeb3ed9f..ef097711 100644 --- a/gnocchi/tests/gabbi/gabbits/base.yaml +++ b/gnocchi/tests/gabbi/gabbits/base.yaml @@ -134,13 +134,13 @@ tests: project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea status: 201 response_headers: - location: $SCHEME://$NETLOC/v1/resource/generic/8d835270-2834-5e55-a693-fd0cf91cba3d + location: $SCHEME://$NETLOC/v1/resource/generic/2d869568-70d4-5ed6-9891-7d7a3bbf572d response_json_paths: type: generic started_at: "2014-01-03T02:02:02+00:00" project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea created_by_project_id: 99d13f22-3618-4288-82b8-6512ded77e4f - id: 8d835270-2834-5e55-a693-fd0cf91cba3d + id: 2d869568-70d4-5ed6-9891-7d7a3bbf572d original_resource_id: 1.2.3.4 - name: get status denied diff --git a/gnocchi/tests/gabbi/gabbits/batch-measures.yaml b/gnocchi/tests/gabbi/gabbits/batch-measures.yaml index 6cc3710c..ae3b454e 100644 --- a/gnocchi/tests/gabbi/gabbits/batch-measures.yaml +++ b/gnocchi/tests/gabbi/gabbits/batch-measures.yaml @@ -244,7 +244,7 @@ tests: response_json_paths: $.description.cause: "Unknown resources" $.description.detail: - - resource_id: "301dbf9a-4fce-52b6-9010-4484c469dcec" + - resource_id: "6b8e287d-c01a-538c-979b-a819ee49de5d" original_resource_id: "foobar" - name: push measurements to named metrics and resource with create_metrics with wrong measure objects diff --git a/gnocchi/tests/gabbi/gabbits/resource-type.yaml b/gnocchi/tests/gabbi/gabbits/resource-type.yaml index dae792f0..9ffd74e3 100644 --- a/gnocchi/tests/gabbi/gabbits/resource-type.yaml +++ b/gnocchi/tests/gabbi/gabbits/resource-type.yaml @@ -6,6 +6,11 @@ fixtures: - ConfigFixture +defaults: + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + tests: - name: list resource type @@ -222,8 +227,6 @@ tests: - name: post invalid resource POST: /v1/resource/my_custom_resource request_headers: - x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c - x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea content-type: application/json data: id: d11edfca-4393-4fda-b94d-b05a3a1b3747 @@ -239,8 +242,6 @@ tests: - name: post invalid resource uuid POST: $LAST_URL request_headers: - x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c - x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea content-type: application/json data: id: d11edfca-4393-4fda-b94d-b05a3a1b3747 @@ -258,8 +259,6 @@ tests: - name: post custom resource POST: $LAST_URL request_headers: - x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c - x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea content-type: application/json data: id: d11edfca-4393-4fda-b94d-b05a3a1b3747 @@ -276,8 +275,6 @@ tests: - name: patch custom resource PATCH: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 request_headers: - x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c - x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea content-type: application/json data: name: foo @@ -303,8 +300,6 @@ tests: - name: post resource with default POST: /v1/resource/my_custom_resource request_headers: - x-user-id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c - x-project-id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea content-type: application/json data: id: c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59 diff --git a/gnocchi/tests/gabbi/gabbits/transformedids.yaml b/gnocchi/tests/gabbi/gabbits/transformedids.yaml index b5ab2092..cc544f11 100644 --- a/gnocchi/tests/gabbi/gabbits/transformedids.yaml +++ b/gnocchi/tests/gabbi/gabbits/transformedids.yaml @@ -66,8 +66,26 @@ tests: project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea status: 400 response_strings: - - "Invalid input: not a valid value for dictionary value @ data[" - - "'id'] " + - "'/' is not supported in resource id" + + + - name: post new resource non uuid again different user + POST: /v1/resource/generic + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9b + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + data: + id: generic zero + metrics: + cpu.util: + archive_policy_name: medium + status: 201 + response_json_paths: + created_by_user_id: 0fbb231484614b1a80131fc22f6afc9b + created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + response_headers: + # is a UUID + location: /v1/resource/generic/[a-f0-9-]{36}/ - name: post new resource non uuid POST: /v1/resource/generic @@ -151,7 +169,7 @@ tests: archive_policy_name: medium status: 400 response_strings: - - not a valid value for + - transformable resource id >255 max allowed characters for dictionary value - name: post long non uuid resource id POST: $LAST_URL diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 50dc303b..1b3cd476 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -39,23 +39,24 @@ LOG = log.getLogger(__name__) RESOURCE_ID_NAMESPACE = uuid.UUID('0a7a15ff-aa13-4ac2-897c-9bdf30ce175b') -def ResourceUUID(value): +def ResourceUUID(value, creator): if isinstance(value, uuid.UUID): return value if '/' in value: raise ValueError("'/' is not supported in resource id") try: - try: - return uuid.UUID(value) - except ValueError: - if len(value) <= 255: - if six.PY2: - value = value.encode('utf-8') - return uuid.uuid5(RESOURCE_ID_NAMESPACE, value) - raise ValueError( - 'transformable resource id >255 max allowed characters') - except Exception as e: - raise ValueError(e) + return uuid.UUID(value) + except ValueError: + if len(value) <= 255: + # value/creator must be str (unicode) in Python 3 and str (bytes) + # in Python 2. It's not logical, I know. + if six.PY2: + value = value.encode('utf-8') + creator = creator.encode('utf-8') + return uuid.uuid5(RESOURCE_ID_NAMESPACE, + value + "\x00" + creator) + raise ValueError( + 'transformable resource id >255 max allowed characters') def UUID(value): diff --git a/releasenotes/notes/uuid5-change-8a8c467d2b2d4c85.yaml b/releasenotes/notes/uuid5-change-8a8c467d2b2d4c85.yaml new file mode 100644 index 00000000..ec6b6c51 --- /dev/null +++ b/releasenotes/notes/uuid5-change-8a8c467d2b2d4c85.yaml @@ -0,0 +1,12 @@ +--- +issues: + - >- + The conversion mechanism provided by the API to convert non-UUID resource + id to UUID is now also based on the user creating/accessing the resource. + This makes sure that the conversion generates a unique UUID for the user + and that several users can use the same string as `original_resource_id`. +upgrade: + - >- + Since `original_resource_id` is now unique per creator, that means users + cannot refer to resource by using the `original_resource_id` if the + resource was not created by them. diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index 4220e252..5bcdd73f 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -109,12 +109,16 @@ RESOURCE_IDS=( "5a301761-cccc-46e2-8900-8b4f6fe6675a" ) # NOTE(sileht): / are now _ -[ "$have_resource_type_post" ] && RESOURCE_ID_EXT="5a301761_dddd_46e2_8900_8b4f6fe6675a" +# NOTE(jdanjou): and we reencode for admin:admin, but we cannot authenticate as +# admin:admin in basic since ":" is forbidden in any username, so let's use the direct +# computed ID +[ "$have_resource_type_post" ] && RESOURCE_ID_EXT="517920a9-2e50-58b8-88e8-25fd7aae1d8f" + dump_data $GNOCCHI_DATA/new # NOTE(sileht): change the output of the old gnocchi to compare with the new without '/' $GSED -i -e "s,5a301761/dddd/46e2/8900/8b4f6fe6675a,5a301761_dddd_46e2_8900_8b4f6fe6675a,g" \ - -e "s,19235bb9-35ca-5f55-b7db-165cfb033c86,fe1bdabf-d94c-5b3a-af1e-06bdff53f228,g" $GNOCCHI_DATA/old/resources.list + -e "s,19235bb9-35ca-5f55-b7db-165cfb033c86,517920a9-2e50-58b8-88e8-25fd7aae1d8f,g" $GNOCCHI_DATA/old/resources.list echo "* Checking output difference between Gnocchi $old_version and $new_version" diff -uNr $GNOCCHI_DATA/old $GNOCCHI_DATA/new -- GitLab From 2c0f2b69308b12c1d5f4e3890837365525af326b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 26 Jan 2017 19:30:37 +0100 Subject: [PATCH 0612/1483] indexer: make sure original_resource_id is never NULL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently the original resource id is never NULL… except when statsd create its resource! So that's the only resource with a NULL original_resource_id. Change-Id: If0b27630961808287d6b4d84b340dc9510191096 --- ...a63d3d186_original_resource_id_not_null.py | 39 +++++++++++++++++++ gnocchi/indexer/sqlalchemy.py | 4 ++ gnocchi/indexer/sqlalchemy_base.py | 3 +- gnocchi/tests/test_indexer.py | 39 +++++++++++++++---- 4 files changed, 77 insertions(+), 8 deletions(-) create mode 100644 gnocchi/indexer/alembic/versions/1e1a63d3d186_original_resource_id_not_null.py diff --git a/gnocchi/indexer/alembic/versions/1e1a63d3d186_original_resource_id_not_null.py b/gnocchi/indexer/alembic/versions/1e1a63d3d186_original_resource_id_not_null.py new file mode 100644 index 00000000..fa29a598 --- /dev/null +++ b/gnocchi/indexer/alembic/versions/1e1a63d3d186_original_resource_id_not_null.py @@ -0,0 +1,39 @@ +# Copyright 2017 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Make sure resource.original_resource_id is NOT NULL + +Revision ID: 1e1a63d3d186 +Revises: 397987e38570 +Create Date: 2017-01-26 19:33:35.209688 + +""" + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '1e1a63d3d186' +down_revision = '397987e38570' +branch_labels = None +depends_on = None + + +def upgrade(): + for table_name in ('resource', 'resource_history'): + op.alter_column(table_name, "original_resource_id", nullable=False, + existing_type=sa.String(255), + existing_nullable=True) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index b9c58235..07ccac03 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -734,17 +734,21 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): def create_resource(self, resource_type, id, creator, user_id=None, project_id=None, started_at=None, ended_at=None, metrics=None, + original_resource_id=None, **kwargs): if (started_at is not None and ended_at is not None and started_at > ended_at): raise ValueError( "Start timestamp cannot be after end timestamp") + if original_resource_id is None: + original_resource_id = str(id) with self.facade.writer() as session: resource_cls = self._resource_type_to_mappers( session, resource_type)['resource'] r = resource_cls( id=id, + original_resource_id=original_resource_id, type=resource_type, creator=creator, user_id=user_id, diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index db1a1408..a8ff6482 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -337,7 +337,8 @@ class ResourceMixin(ResourceJsonifier): ended_at = sqlalchemy.Column(TimestampUTC) user_id = sqlalchemy.Column(sqlalchemy.String(255)) project_id = sqlalchemy.Column(sqlalchemy.String(255)) - original_resource_id = sqlalchemy.Column(sqlalchemy.String(255)) + original_resource_id = sqlalchemy.Column(sqlalchemy.String(255), + nullable=False) class Resource(ResourceMixin, Base, GnocchiBase): diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 0d221371..f6a29263 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -214,7 +214,32 @@ class TestIndexerDriver(tests_base.TestCase): "project_id": None, "started_at": rc.started_at, "ended_at": None, - "original_resource_id": None, + "original_resource_id": str(r1), + "type": "generic", + "metrics": {}}, + rc.jsonify()) + rg = self.index.get_resource('generic', r1, with_metrics=True) + self.assertEqual(rc, rg) + self.assertEqual(rc.metrics, rg.metrics) + + def test_create_resource_with_original_resource_id(self): + r1 = uuid.uuid4() + creator = str(uuid.uuid4()) + rc = self.index.create_resource('generic', r1, creator, + original_resource_id="foobar") + self.assertIsNotNone(rc.started_at) + self.assertIsNotNone(rc.revision_start) + self.assertEqual({"id": r1, + "revision_start": rc.revision_start, + "revision_end": None, + "creator": creator, + "created_by_user_id": creator, + "created_by_project_id": "", + "user_id": None, + "project_id": None, + "started_at": rc.started_at, + "ended_at": None, + "original_resource_id": "foobar", "type": "generic", "metrics": {}}, rc.jsonify()) @@ -240,7 +265,7 @@ class TestIndexerDriver(tests_base.TestCase): "project_id": None, "started_at": rc.started_at, "ended_at": None, - "original_resource_id": None, + "original_resource_id": str(r1), "type": "generic", "metrics": {}}, rc.jsonify()) @@ -323,7 +348,7 @@ class TestIndexerDriver(tests_base.TestCase): "project_id": None, "started_at": ts, "ended_at": None, - "original_resource_id": None, + "original_resource_id": str(r1), "type": "generic", "metrics": {}}, rc.jsonify()) r = self.index.get_resource('generic', r1, with_metrics=True) @@ -352,7 +377,7 @@ class TestIndexerDriver(tests_base.TestCase): "project_id": None, "started_at": rc.started_at, "ended_at": None, - "original_resource_id": None, + "original_resource_id": str(r1), "type": "generic", "metrics": {'foo': str(e1), 'bar': str(e2)}}, rc.jsonify()) @@ -369,7 +394,7 @@ class TestIndexerDriver(tests_base.TestCase): "ended_at": None, "user_id": None, "project_id": None, - "original_resource_id": None, + "original_resource_id": str(r1), "metrics": {'foo': str(e1), 'bar': str(e2)}}, r.jsonify()) @@ -419,7 +444,7 @@ class TestIndexerDriver(tests_base.TestCase): "project_id": None, "type": "generic", "started_at": r.started_at, - "original_resource_id": None, + "original_resource_id": str(r1), "metrics": {}}, r.jsonify()) def test_update_resource_metrics(self): @@ -592,7 +617,7 @@ class TestIndexerDriver(tests_base.TestCase): "created_by_user_id": creator, "user_id": None, "project_id": None, - "original_resource_id": None, + "original_resource_id": str(r1), "type": "generic", "metrics": {'bar': str(e2)}}, r.jsonify()) -- GitLab From 5a8d3a48ca1f223e3e74f2151fddd91d197791d3 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 30 Jan 2017 16:53:13 +0000 Subject: [PATCH 0613/1483] paginate ceph report generation ceph driver stores all data as keys in a single object. this could result in hundreds of thousands or more keys which will cause ceph to timeout if called all at once. alternatively, paginate results and if the marker is processed and removed during report generation, fail and retry next time. Change-Id: I2161ba735fb4ac72680c733251cd84a9f4ea3c15 --- gnocchi/cli.py | 4 ++++ gnocchi/rest/__init__.py | 8 ++++++-- gnocchi/storage/incoming/__init__.py | 4 ++++ gnocchi/storage/incoming/ceph.py | 30 +++++++++++++++++++--------- 4 files changed, 35 insertions(+), 11 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 96c3a1c7..8b4c94c3 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -36,6 +36,7 @@ from gnocchi import indexer from gnocchi import service from gnocchi import statsd as statsd_service from gnocchi import storage +from gnocchi.storage import incoming from gnocchi import utils @@ -137,6 +138,9 @@ class MetricReporting(MetricProcessBase): "metrics wait to be processed.", report['summary']['measures'], report['summary']['metrics']) + except incoming.ReportGenerationError: + LOG.warning("Unable to compute backlog. Retrying at next " + "interval.") except Exception: LOG.error("Unexpected error during pending measures reporting", exc_info=True) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 41139969..dc565e06 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -37,6 +37,7 @@ from gnocchi import indexer from gnocchi import json from gnocchi import resource_type from gnocchi import storage +from gnocchi.storage import incoming from gnocchi import utils @@ -1649,8 +1650,11 @@ class StatusController(rest.RestController): @pecan.expose('json') def get(details=True): enforce("get status", {}) - report = pecan.request.storage.incoming.measures_report( - strutils.bool_from_string(details)) + try: + report = pecan.request.storage.incoming.measures_report( + strutils.bool_from_string(details)) + except incoming.ReportGenerationError: + abort(503, 'Unable to generate status. Please retry.') report_dict = {"storage": {"summary": report['summary']}} if 'details' in report: report_dict["storage"]["measures_to_process"] = report['details'] diff --git a/gnocchi/storage/incoming/__init__.py b/gnocchi/storage/incoming/__init__.py index 3cdd4a57..30b26ac3 100644 --- a/gnocchi/storage/incoming/__init__.py +++ b/gnocchi/storage/incoming/__init__.py @@ -17,6 +17,10 @@ from gnocchi import exceptions +class ReportGenerationError(Exception): + pass + + # TODO(sileht): We inherit from this storage driver temporary # until we moved out all incoming code from here. class StorageDriver(object): diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index 51fc2471..ef9ded73 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -93,21 +93,33 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): flags=self.OMAP_WRITE_FLAGS) def _build_report(self, details): - names = self._list_object_names_to_process() + LIMIT = 1000 metrics = set() count = 0 metric_details = defaultdict(int) - for name in names: - count += 1 - metric = name.split("_")[1] - metrics.add(metric) - if details: - metric_details[metric] += 1 + marker = "" + while True: + names = list(self._list_object_names_to_process(marker=marker, + limit=LIMIT)) + if names and names[0] < marker: + raise _carbonara.ReportGenerationError("Unable to cleanly " + "compute backlog.") + for name in names: + count += 1 + metric = name.split("_")[1] + metrics.add(metric) + if details: + metric_details[metric] += 1 + if len(names) < LIMIT: + break + else: + marker = name + return len(metrics), count, metric_details if details else None - def _list_object_names_to_process(self, prefix="", limit=-1): + def _list_object_names_to_process(self, prefix="", marker="", limit=-1): with rados.ReadOpCtx() as op: - omaps, ret = self.ioctx.get_omap_vals(op, "", prefix, limit) + omaps, ret = self.ioctx.get_omap_vals(op, marker, prefix, limit) try: self.ioctx.operate_read_op( op, self.MEASURE_PREFIX, flag=self.OMAP_READ_FLAGS) -- GitLab From 6e41c34c318fd91997e886fdc1a4a2502ba293fc Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 1 Feb 2017 17:36:47 +0100 Subject: [PATCH 0614/1483] tools: make measure injector works without gnocchiclient Change-Id: I838b86ef92a44bb47e059a62ca40457008b96164 --- devstack/gate/post_test_hook.sh | 1 - tools/measures_injector.py | 21 ++++++++++++--------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/devstack/gate/post_test_hook.sh b/devstack/gate/post_test_hook.sh index 9cbb922f..948e60ca 100755 --- a/devstack/gate/post_test_hook.sh +++ b/devstack/gate/post_test_hook.sh @@ -46,7 +46,6 @@ curl -X GET ${GNOCCHI_SERVICE_URL}/v1/archive_policy -H "Content-Type: applicati sudo gnocchi-upgrade # Just ensure tools still works -gnocchi metric create sudo -E -H -u stack $GNOCCHI_DIR/tools/measures_injector.py --metrics 1 --batch-of-measures 2 --measures-per-batch 2 # NOTE(sileht): on swift job permissions are wrong, I don't known why diff --git a/tools/measures_injector.py b/tools/measures_injector.py index 01e6a385..ebaef520 100755 --- a/tools/measures_injector.py +++ b/tools/measures_injector.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import random +import uuid from concurrent import futures from oslo_config import cfg @@ -28,7 +29,9 @@ from gnocchi import utils def injector(): conf = cfg.ConfigOpts() conf.register_cli_opts([ - cfg.IntOpt("metrics"), + cfg.IntOpt("metrics", default=1, min=1), + cfg.StrOpt("archive-policy-name", default="low"), + cfg.StrOpt("creator", default="admin"), cfg.IntOpt("batch-of-measures", default=1000), cfg.IntOpt("measures-per-batch", default=10), ]) @@ -37,11 +40,12 @@ def injector(): index.connect() s = storage.get_driver(conf) - metrics = index.list_metrics() - if conf.metrics: - metrics = metrics[:conf.metrics] + def todo(): + metric = index.create_metric( + uuid.uuid4(), + creator=conf.creator, + archive_policy_name=conf.archive_policy_name) - def todo(metric): for _ in six.moves.range(conf.batch_of_measures): measures = [ storage.Measure( @@ -49,10 +53,9 @@ def injector(): for __ in six.moves.range(conf.measures_per_batch)] s.incoming.add_measures(metric, measures) - with futures.ThreadPoolExecutor(max_workers=len(metrics)) as executor: - # We use 'list' to iterate all threads here to raise the first - # exception now, not much choice - list(executor.map(todo, metrics)) + with futures.ThreadPoolExecutor(max_workers=conf.metrics) as executor: + for m in six.moves.range(conf.metrics): + executor.submit(todo) if __name__ == '__main__': -- GitLab From 66526b295be1a2d80a33f96c0297fad1d2520f59 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 1 Feb 2017 16:24:49 +0100 Subject: [PATCH 0615/1483] devstack: do not install gnocchiclient This is not needed anymore for devstack, and this brings more problem that it solves. Change-Id: Ic3db6fcc6ace226230ce8a51863b57450df40e06 --- devstack/plugin.sh | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 4cced8d4..65e54463 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -33,11 +33,6 @@ XTRACE=$(set +o | grep xtrace) set -o xtrace -# Defaults -# -------- -GITDIR["python-gnocchiclient"]=$DEST/python-gnocchiclient -GITREPO["python-gnocchiclient"]=${GNOCCHICLIENT_REPO:-${GIT_BASE}/openstack/python-gnocchiclient.git} - if [ -z "$GNOCCHI_DEPLOY" ]; then # Default GNOCCHI_DEPLOY=simple @@ -116,15 +111,6 @@ function gnocchi_service_url { fi } -function install_gnocchiclient { - if use_library_from_git python-gnocchiclient; then - git_clone_by_name python-gnocchiclient - setup_dev_lib python-gnocchiclient - else - pip_install gnocchiclient - fi -} - # install redis # NOTE(chdent): We shouldn't rely on ceilometer being present so cannot # use its install_redis. There are enough packages now using redis @@ -383,8 +369,6 @@ function install_gnocchi { _gnocchi_install_grafana fi - install_gnocchiclient - [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && EXTRA_FLAVOR=,keystone # We don't use setup_package because we don't follow openstack/requirements -- GitLab From afe5c471d9905516e5b4175672e2fcabb77a026f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 2 Feb 2017 19:45:08 +0100 Subject: [PATCH 0616/1483] Add release notes for 3.1 Change-Id: I29fd929a03390f1276ad8143f596331e1e0f5b3d --- doc/source/releasenotes/3.1.rst | 6 ++++++ doc/source/releasenotes/index.rst | 1 + 2 files changed, 7 insertions(+) create mode 100644 doc/source/releasenotes/3.1.rst diff --git a/doc/source/releasenotes/3.1.rst b/doc/source/releasenotes/3.1.rst new file mode 100644 index 00000000..9673b4a8 --- /dev/null +++ b/doc/source/releasenotes/3.1.rst @@ -0,0 +1,6 @@ +=================================== + 3.1 Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/3.1 diff --git a/doc/source/releasenotes/index.rst b/doc/source/releasenotes/index.rst index f4ff78f2..9b4032fa 100644 --- a/doc/source/releasenotes/index.rst +++ b/doc/source/releasenotes/index.rst @@ -5,6 +5,7 @@ Release Notes :maxdepth: 2 unreleased + 3.1 3.0 2.2 2.1 -- GitLab From ba8c287c8a564b092c8b0bab5e10431d1b94d7e1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 3 Feb 2017 08:36:32 +0100 Subject: [PATCH 0617/1483] Remove obsolete comment Change-Id: Ia37ca1d801d91e959d1aed373a7f8673c89c7fe9 --- gnocchi/storage/incoming/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/gnocchi/storage/incoming/__init__.py b/gnocchi/storage/incoming/__init__.py index 30b26ac3..f1df9f33 100644 --- a/gnocchi/storage/incoming/__init__.py +++ b/gnocchi/storage/incoming/__init__.py @@ -21,8 +21,6 @@ class ReportGenerationError(Exception): pass -# TODO(sileht): We inherit from this storage driver temporary -# until we moved out all incoming code from here. class StorageDriver(object): @staticmethod -- GitLab From 1fb78ab40f095dd63a924415b9056a5767f41860 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 2 Feb 2017 09:19:16 +0100 Subject: [PATCH 0618/1483] tests: split two kind of functional tests We have two kinds of functional tests. Those run with the wsgi_intercepter and those run on a real Gnocchi server. We currently the wsgi_intercepter tests in two jobs, the classic tox pyXX and the functional one. This change rearrange tests to makje clear the purpose of each tests and to not run them twice in gate. Change-Id: Ie0339ed92b790bebb0f4a5ed0dc5b690fa8ce4fa --- gnocchi/tempest/scenario/__init__.py | 2 +- gnocchi/tests/{gabbi => functional}/__init__.py | 0 gnocchi/tests/{gabbi => functional}/fixtures.py | 0 gnocchi/tests/{gabbi => functional}/gabbits/aggregation.yaml | 0 gnocchi/tests/{gabbi => functional}/gabbits/archive-rule.yaml | 0 gnocchi/tests/{gabbi => functional}/gabbits/archive.yaml | 0 gnocchi/tests/{gabbi => functional}/gabbits/async.yaml | 0 gnocchi/tests/{gabbi => functional}/gabbits/base.yaml | 0 .../tests/{gabbi => functional}/gabbits/batch-measures.yaml | 0 gnocchi/tests/{gabbi => functional}/gabbits/cors.yaml | 0 gnocchi/tests/{gabbi => functional}/gabbits/healthcheck.yaml | 0 gnocchi/tests/{gabbi => functional}/gabbits/history.yaml | 0 .../{gabbi => functional}/gabbits/metric-granularity.yaml | 0 .../gabbits/metric-timestamp-format.yaml | 0 gnocchi/tests/{gabbi => functional}/gabbits/metric.yaml | 0 gnocchi/tests/{gabbi => functional}/gabbits/pagination.yaml | 0 .../{gabbi => functional}/gabbits/resource-aggregation.yaml | 0 .../tests/{gabbi => functional}/gabbits/resource-type.yaml | 0 gnocchi/tests/{gabbi => functional}/gabbits/resource.yaml | 0 .../tests/{gabbi => functional}/gabbits/search-metric.yaml | 0 gnocchi/tests/{gabbi => functional}/gabbits/search.yaml | 0 .../tests/{gabbi => functional}/gabbits/transformedids.yaml | 0 gnocchi/tests/{gabbi => functional}/test_gabbi.py | 2 +- gnocchi/tests/{gabbi => functional}/test_gabbi_prefix.py | 2 +- gnocchi/tests/functional_live/__init__.py | 0 .../{gabbi/gabbits-live => functional_live/gabbits}/live.yaml | 0 .../gabbits}/search-resource.yaml | 0 gnocchi/tests/{gabbi => functional_live}/test_gabbi_live.py | 2 +- tox.ini | 4 ++-- 29 files changed, 6 insertions(+), 6 deletions(-) rename gnocchi/tests/{gabbi => functional}/__init__.py (100%) rename gnocchi/tests/{gabbi => functional}/fixtures.py (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/aggregation.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/archive-rule.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/archive.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/async.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/base.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/batch-measures.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/cors.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/healthcheck.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/history.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/metric-granularity.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/metric-timestamp-format.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/metric.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/pagination.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/resource-aggregation.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/resource-type.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/resource.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/search-metric.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/search.yaml (100%) rename gnocchi/tests/{gabbi => functional}/gabbits/transformedids.yaml (100%) rename gnocchi/tests/{gabbi => functional}/test_gabbi.py (96%) rename gnocchi/tests/{gabbi => functional}/test_gabbi_prefix.py (95%) create mode 100644 gnocchi/tests/functional_live/__init__.py rename gnocchi/tests/{gabbi/gabbits-live => functional_live/gabbits}/live.yaml (100%) rename gnocchi/tests/{gabbi/gabbits-live => functional_live/gabbits}/search-resource.yaml (100%) rename gnocchi/tests/{gabbi => functional_live}/test_gabbi_live.py (98%) diff --git a/gnocchi/tempest/scenario/__init__.py b/gnocchi/tempest/scenario/__init__.py index 835aec82..3e011b88 100644 --- a/gnocchi/tempest/scenario/__init__.py +++ b/gnocchi/tempest/scenario/__init__.py @@ -54,7 +54,7 @@ class GnocchiGabbiTest(tempest.test.BaseTestCase): port = parsed_url.port test_dir = os.path.join(os.path.dirname(__file__), '..', '..', - 'tests', 'gabbi', 'gabbits-live') + 'tests', 'functional_live', 'gabbits') cls.tests = driver.build_tests( test_dir, unittest.TestLoader(), host=host, port=port, prefix=prefix, diff --git a/gnocchi/tests/gabbi/__init__.py b/gnocchi/tests/functional/__init__.py similarity index 100% rename from gnocchi/tests/gabbi/__init__.py rename to gnocchi/tests/functional/__init__.py diff --git a/gnocchi/tests/gabbi/fixtures.py b/gnocchi/tests/functional/fixtures.py similarity index 100% rename from gnocchi/tests/gabbi/fixtures.py rename to gnocchi/tests/functional/fixtures.py diff --git a/gnocchi/tests/gabbi/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/aggregation.yaml rename to gnocchi/tests/functional/gabbits/aggregation.yaml diff --git a/gnocchi/tests/gabbi/gabbits/archive-rule.yaml b/gnocchi/tests/functional/gabbits/archive-rule.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/archive-rule.yaml rename to gnocchi/tests/functional/gabbits/archive-rule.yaml diff --git a/gnocchi/tests/gabbi/gabbits/archive.yaml b/gnocchi/tests/functional/gabbits/archive.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/archive.yaml rename to gnocchi/tests/functional/gabbits/archive.yaml diff --git a/gnocchi/tests/gabbi/gabbits/async.yaml b/gnocchi/tests/functional/gabbits/async.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/async.yaml rename to gnocchi/tests/functional/gabbits/async.yaml diff --git a/gnocchi/tests/gabbi/gabbits/base.yaml b/gnocchi/tests/functional/gabbits/base.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/base.yaml rename to gnocchi/tests/functional/gabbits/base.yaml diff --git a/gnocchi/tests/gabbi/gabbits/batch-measures.yaml b/gnocchi/tests/functional/gabbits/batch-measures.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/batch-measures.yaml rename to gnocchi/tests/functional/gabbits/batch-measures.yaml diff --git a/gnocchi/tests/gabbi/gabbits/cors.yaml b/gnocchi/tests/functional/gabbits/cors.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/cors.yaml rename to gnocchi/tests/functional/gabbits/cors.yaml diff --git a/gnocchi/tests/gabbi/gabbits/healthcheck.yaml b/gnocchi/tests/functional/gabbits/healthcheck.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/healthcheck.yaml rename to gnocchi/tests/functional/gabbits/healthcheck.yaml diff --git a/gnocchi/tests/gabbi/gabbits/history.yaml b/gnocchi/tests/functional/gabbits/history.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/history.yaml rename to gnocchi/tests/functional/gabbits/history.yaml diff --git a/gnocchi/tests/gabbi/gabbits/metric-granularity.yaml b/gnocchi/tests/functional/gabbits/metric-granularity.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/metric-granularity.yaml rename to gnocchi/tests/functional/gabbits/metric-granularity.yaml diff --git a/gnocchi/tests/gabbi/gabbits/metric-timestamp-format.yaml b/gnocchi/tests/functional/gabbits/metric-timestamp-format.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/metric-timestamp-format.yaml rename to gnocchi/tests/functional/gabbits/metric-timestamp-format.yaml diff --git a/gnocchi/tests/gabbi/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/metric.yaml rename to gnocchi/tests/functional/gabbits/metric.yaml diff --git a/gnocchi/tests/gabbi/gabbits/pagination.yaml b/gnocchi/tests/functional/gabbits/pagination.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/pagination.yaml rename to gnocchi/tests/functional/gabbits/pagination.yaml diff --git a/gnocchi/tests/gabbi/gabbits/resource-aggregation.yaml b/gnocchi/tests/functional/gabbits/resource-aggregation.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/resource-aggregation.yaml rename to gnocchi/tests/functional/gabbits/resource-aggregation.yaml diff --git a/gnocchi/tests/gabbi/gabbits/resource-type.yaml b/gnocchi/tests/functional/gabbits/resource-type.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/resource-type.yaml rename to gnocchi/tests/functional/gabbits/resource-type.yaml diff --git a/gnocchi/tests/gabbi/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/resource.yaml rename to gnocchi/tests/functional/gabbits/resource.yaml diff --git a/gnocchi/tests/gabbi/gabbits/search-metric.yaml b/gnocchi/tests/functional/gabbits/search-metric.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/search-metric.yaml rename to gnocchi/tests/functional/gabbits/search-metric.yaml diff --git a/gnocchi/tests/gabbi/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/search.yaml rename to gnocchi/tests/functional/gabbits/search.yaml diff --git a/gnocchi/tests/gabbi/gabbits/transformedids.yaml b/gnocchi/tests/functional/gabbits/transformedids.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits/transformedids.yaml rename to gnocchi/tests/functional/gabbits/transformedids.yaml diff --git a/gnocchi/tests/gabbi/test_gabbi.py b/gnocchi/tests/functional/test_gabbi.py similarity index 96% rename from gnocchi/tests/gabbi/test_gabbi.py rename to gnocchi/tests/functional/test_gabbi.py index ea2222d0..489bd546 100644 --- a/gnocchi/tests/gabbi/test_gabbi.py +++ b/gnocchi/tests/functional/test_gabbi.py @@ -20,7 +20,7 @@ import os from gabbi import driver import wsgi_intercept -from gnocchi.tests.gabbi import fixtures +from gnocchi.tests.functional import fixtures wsgi_intercept.STRICT_RESPONSE_HEADERS = True diff --git a/gnocchi/tests/gabbi/test_gabbi_prefix.py b/gnocchi/tests/functional/test_gabbi_prefix.py similarity index 95% rename from gnocchi/tests/gabbi/test_gabbi_prefix.py rename to gnocchi/tests/functional/test_gabbi_prefix.py index e007e4ea..0a77ceeb 100644 --- a/gnocchi/tests/gabbi/test_gabbi_prefix.py +++ b/gnocchi/tests/functional/test_gabbi_prefix.py @@ -19,7 +19,7 @@ import os from gabbi import driver -from gnocchi.tests.gabbi import fixtures +from gnocchi.tests.functional import fixtures TESTS_DIR = 'gabbits' diff --git a/gnocchi/tests/functional_live/__init__.py b/gnocchi/tests/functional_live/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/gnocchi/tests/gabbi/gabbits-live/live.yaml b/gnocchi/tests/functional_live/gabbits/live.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits-live/live.yaml rename to gnocchi/tests/functional_live/gabbits/live.yaml diff --git a/gnocchi/tests/gabbi/gabbits-live/search-resource.yaml b/gnocchi/tests/functional_live/gabbits/search-resource.yaml similarity index 100% rename from gnocchi/tests/gabbi/gabbits-live/search-resource.yaml rename to gnocchi/tests/functional_live/gabbits/search-resource.yaml diff --git a/gnocchi/tests/gabbi/test_gabbi_live.py b/gnocchi/tests/functional_live/test_gabbi_live.py similarity index 98% rename from gnocchi/tests/gabbi/test_gabbi_live.py rename to gnocchi/tests/functional_live/test_gabbi_live.py index 63bc7c08..9e013a9e 100644 --- a/gnocchi/tests/gabbi/test_gabbi_live.py +++ b/gnocchi/tests/functional_live/test_gabbi_live.py @@ -21,7 +21,7 @@ from gabbi import driver import six.moves.urllib.parse as urlparse -TESTS_DIR = 'gabbits-live' +TESTS_DIR = 'gabbits' def load_tests(loader, tests, pattern): diff --git a/tox.ini b/tox.ini index a694ff7d..8f92aa3a 100644 --- a/tox.ini +++ b/tox.ini @@ -96,7 +96,7 @@ deps = hacking>=0.12,<0.13 commands = flake8 [testenv:py27-gate] -setenv = OS_TEST_PATH=gnocchi/tests/gabbi +setenv = OS_TEST_PATH=gnocchi/tests/functional_live GABBI_LIVE=1 passenv = {[testenv]passenv} GNOCCHI_SERVICE* sitepackages = True @@ -106,7 +106,7 @@ commands = {toxinidir}/tools/pretty_tox.sh '{posargs}' # This target provides a shortcut to running just the gabbi tests. [testenv:py27-gabbi] deps = .[test,postgresql,file] -setenv = OS_TEST_PATH=gnocchi/tests/gabbi +setenv = OS_TEST_PATH=gnocchi/tests/functional basepython = python2.7 commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- {toxinidir}/tools/pretty_tox.sh '{posargs}' -- GitLab From 6f09a699535a1b53b021a8d8f70ca55bfe5222de Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 7 Feb 2017 15:58:49 +0100 Subject: [PATCH 0619/1483] s3: fix minimum botocore version Botocore is mainly used as a dependency of boto3, but Gnocchi actually requires version 1.5 minimum to have list_objects_v2 available. Change-Id: Ie5e4760c630d9906b57eda48dbd42165108c96e7 --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index a6badb4a..5678169c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -36,6 +36,7 @@ postgresql = alembic>=0.7.6,!=0.8.1 s3 = boto3 + botocore>=1.5 msgpack-python lz4 tooz>=1.38 -- GitLab From e8139066612e2958e29c75befcbdf2583ca273cb Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 7 Feb 2017 15:58:19 +0100 Subject: [PATCH 0620/1483] s3: fix new metric listing It's impossible to index a set Change-Id: Idbd0de3452828d0b19946c09b31c27656a1d959b --- gnocchi/storage/incoming/s3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/storage/incoming/s3.py index 1554833f..f6628ea5 100644 --- a/gnocchi/storage/incoming/s3.py +++ b/gnocchi/storage/incoming/s3.py @@ -107,7 +107,7 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): if full: return metrics - return metrics[size * part:] + return sorted(list(metrics))[size * part:] def _list_measure_files_for_metric_id(self, metric_id): files = set() -- GitLab From 17ed44ba4541defb4b580fbf261f97169ede0773 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 7 Feb 2017 18:24:14 +0100 Subject: [PATCH 0621/1483] s3: use a different bucket prefix for each test That allows to run more tests! Change-Id: I1c99878eef37fa59d7a806d0e8c838fd85214d3f --- gnocchi/tests/base.py | 5 +++++ gnocchi/tests/test_storage.py | 16 ---------------- 2 files changed, 5 insertions(+), 16 deletions(-) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index e5412981..0388d1ed 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -294,6 +294,11 @@ class TestCase(base.BaseTestCase): os.getenv("CEPH_CONF"), pool_name), shell=True) self.conf.set_override('ceph_pool', pool_name, 'storage') + # Override the bucket prefix to be unique to avoid concurrent access + # with any other test + self.conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26], + "storage") + self.storage = storage.get_driver(self.conf) # NOTE(jd) Do not upgrade the storage. We don't really need the storage # upgrade for now, and the code that upgrade from pre-1.3 diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index eb55e839..6fc8f9e9 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -55,10 +55,6 @@ class TestStorageDriver(tests_base.TestCase): def test_corrupted_data(self): if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): self.skipTest("This driver is not based on Carbonara") - if self.conf.storage.driver == "s3": - self.skipTest( - "This test does not work with S3 as backend as the S3 driver " - "has no fake client, and tests run in parallel.") self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), @@ -81,10 +77,6 @@ class TestStorageDriver(tests_base.TestCase): self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), m) def test_list_metric_with_measures_to_process(self): - if self.conf.storage.driver == "s3": - self.skipTest( - "This test does not work with S3 as backend as the S3 driver " - "has no fake client, and tests run in parallel.") metrics = self.storage.incoming.list_metric_with_measures_to_process( None, None, full=True) self.assertEqual(set(), metrics) @@ -150,10 +142,6 @@ class TestStorageDriver(tests_base.TestCase): @mock.patch('gnocchi.carbonara.SplitKey.POINTS_PER_SPLIT', 48) def test_add_measures_update_subset_split(self): - if self.conf.storage.driver == "s3": - self.skipTest( - "This test does not work with S3 as backend as the S3 driver " - "has no fake client, and tests run in parallel.") m, m_sql = self._create_metric('medium') measures = [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, i, j, 0), 100) @@ -177,10 +165,6 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual(1, count) def test_add_measures_update_subset(self): - if self.conf.storage.driver == "s3": - self.skipTest( - "This test does not work with S3 as backend as the S3 driver " - "has no fake client, and tests run in parallel.") m, m_sql = self._create_metric('medium') measures = [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, i, j, 0), 100) -- GitLab From 3219e9465c4d8390ee23dd2319c9d6accdba6463 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 2 Feb 2017 11:05:38 +0100 Subject: [PATCH 0622/1483] doc: various update and add architecture diagram Change-Id: I9c92d59543272628e9183a580ed36de76cf4f382 --- README.rst | 5 +- doc/source/architecture.png | Bin 0 -> 60234 bytes doc/source/architecture.rst | 139 +++++++++------------------- doc/source/configuration.rst | 174 ----------------------------------- doc/source/index.rst | 55 +++++------ doc/source/install.rst | 63 +++++++++++-- doc/source/running.rst | 110 +++++++++++++++++++--- 7 files changed, 226 insertions(+), 320 deletions(-) create mode 100644 doc/source/architecture.png delete mode 100644 doc/source/configuration.rst diff --git a/README.rst b/README.rst index a6cc77ea..2d76f543 100644 --- a/README.rst +++ b/README.rst @@ -7,10 +7,7 @@ Gnocchi is a multi-tenant timeseries, metrics and resources database. It provides an `HTTP REST`_ interface to create and manipulate the data. It is designed to store metrics at a very large scale while providing access to -metrics and resources information to operators and users. - -Gnocchi is part of the `OpenStack` project. While Gnocchi has support for -OpenStack, it is fully able to work stand-alone. +metrics and resources information and history. You can read the full documentation online at http://gnocchi.xyz. diff --git a/doc/source/architecture.png b/doc/source/architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..a54f873fb11a5010eec2e5322b3207bebabb6c67 GIT binary patch literal 60234 zcmZ_0Wk8f$_dZOggoKC;a-@+)I*)V+NOz}F14Fk642{x_G)RMV4Im&bJ#;g4NcaEd z9Q{4#{qTO_!!Y;0_gec}Yh5e$guYdh!NYlmgM@^HCnqbZiiCs;LqbCNiG=}tV&q2t z4GGB~Nlx;$y8Fy-I}>3!2q^(sas zJtHE0W7YEb)bH3eR)Bs?kv?T@^C$N21wvfq6)m4u`n=-7W+>Z@*cS52zG5abn{9p# zulA;OEo*r)I~m9x4C(x0WPjRKLII^{GBWX0Bc)T!aaWku)_Htb!BA zyD@a%C(->@CG}dw54qR#Q*7-EvLOyjKR20t-cAYN)qtNl{LvQSUV+WayP{YE86!~C zjBI~7pECaU41KDo6WJdA+k`)7@-5sjImsv~%4|7hXLbh6=<54YNk1*Je5>xyyRsGa zo9=->Q50M}40ngsGxR8*ygM4E&Q3P*bggf$prIe;$`4?i5Fm!{`pyp|DX^5Wrum6S9M18!&F;4}vc_dw0Y-CEWSBEAVU}cfT4nEJ zG=k%2?YH=e`)*0|bFxaKj}oXstu2@tQx1B+aZHqy?F_OV9+i=Pvy=JExwgVLkLp>l ztGExvx!cujVwNb_uk#l;TSS2xZ$HrP%I>#aNd`EXA+r_0Y70yzI#DYQR&2-Bbd8!Y z3ZCFg`KVwFdAEVQW~$m89>MN>r}>F1+Jd{*?Ho#K)}_@E7e5bc!c__v8-Ht%_AfJ> zLvOD}u_ljikGupVr*}seg^2NR`zxE$uV)d{Mk37hdp;4n7(O!W-V$vtf8_nUY&$@M z;IyaJUH7`KVQ!v+DuTU(;6#t?UwDL|nQK44yE$1p)37n4BZxD-muXzCyDI%70g96x zZ>?kcoFw6r$Hxl9SGJ2{-<{vc`s`1VzxtCRr-Jn{n)@`?E+{FS@;^_;d=9{Hl~qDR zHdG5u3FmT@{okhE+=TwsHW0j1!6Srzx*?#Id~s6ze;cA^B`$QxP^F;sSR|0j*?{yZOn^yg^IQ+q&94)r~Fh&#{o8q2}vY7 z@qbLfvHPcYiMKn_8C?L<;ut2T!)9<7LI8A04|%iwCZodW|NJN>hFgc*!G0tG(QAJh zvL$ONt+~a3PF3@&ALi_7ec)h2rr)}IeEsO9d{3x*qLFLkMoN#hfkxecIeMBvN9?KU z2aw?1Y=!C4y~) zS@ySHOtEM}sG2!(W>-}F6C+_mF)X%tM=$z0lko~vCiVZbAcoYI`^Mz+qiH-(Nh?qb zKZVgwp@db{&#=Qc|NEAt$Ylvi-8b?{iQCG9h8J+k(B*;X1d{_G%#FqDJvejwt;o)J zA4=~V`DN#bGR|b8;OqbM!j`B?QWK=g}r#l>TE6-#(`X z=qXfz&3pKp7ga~R-B@bp06;)|_!U(wT=+>=nt<3HwHHi7vdezn`GJ)$yOAfl7wh<_ zMEz0p8ajGx5m-=~J?PaN-9IS&20*#3v4{{;kGbSv-v7O3;I;XRdK*jH%1j4y+4rb~ zK5q2HQLh6je)ucY)4XZ^-pkg~&yICm1{qMAu4H#!d3+`cvj)l>L@LRq2K{eGsX}d= zu6A^GOJ-)IS6oN3_>Qh{Re1TtJdHdloOrcqmBYb|X9~~4P5(ty{j6gYyMx2KS7pW~ zd^`JF?mRfrHlIn2!wy;gbBxfE^{a}Vt@gcxlYu@qs1(Gg?84~>x7b72YEH*1)79I; zd$M1yCwPZ!mA+sdB$1WaPX4SkYUB^#(c>)gml%s1Dk~P)6D|6Vmyy!sZCE%^lww(U zw^P0(rvApMO(E%h4jlZCQaZD2-Xus+=KQ8D)TV3Ba$UtAgj&a9cnB{atmb{J=9tZadT;k!Mh69CgCAu0UdsL1 zIOm_Er|w7sw2fF{0U7E?8Bl6Gw(LsGo_~ef<7u%Hz2VzWe5idaP%SrU`JYz=pOAQ` z*&JBrFRk^2gF|+%T7_zTjsyAhp`{OmXs2nJ<6JK;s;!EqCW^N^#g;?IMzyH(k01M- zRcIsF>a6nQjzNUHpzj!Lu8Y0RSXDw;UIIG<;`2l)Np)3!dBf1*u|(xgr9$QTLO8gB zn?j~q6l@8Z4oMpTQ&E^wsFeol*27bvLE1 z_BGlx-o^6*`m7H|WL7Kp1LupInUO{S*Yeiz)O$o>Ht0Xb>G}A7I+0B4#JSXu738PP z(ct_Y60jamk)*P47I7xyyF@(sb3BL&vy+#=*BG|JzfCDH#H|~)^|WQkm+cR?`N%k^ z)Gi{mb>Z4(%fj5-P3386tcFnEH9IaYD}oP}PwzEwQ?Q@*!B#D=o0cD~lV+K50d7-1 z_O&4B_lrh+`4P^x(T`ZRW5Ns!#=y4TZlKn3c6?!Br0WcdfcbtIQv?tLBm1dJ0m^;> zSe0*~*qLK*E(NY5e-D&+eykE8fWzW8^T9Z-5oKIOL9djx{|}VecySaaw+6agO8EPM zA0Of`;Mme_ZfoiE8Io*J)za99I+i0yKum9XoUB`B-@@=mAWO~>8L0oKiI68M@4q`s zA=E)8LFe<8@KMbz5hGpz4@Ub?)O2tDnOCUn;uHOEVP-$%IhZ?29$9UE`mM0<4RLGN z6~fHuJ&gGeDMA!L(VN{+zZRUlHju_qa#rXh7+7WeS)M%)R^mWlK%x%KDe-?}sUkOj zX-t5L-}}A`F7pBpdV#T#zEioL>1pQYih~b;;iE6}3zJ7__9GNu{y2dUw4TJ&FhoS2ofxTLr@YA1%SfvV%Tzwj1E%NfkU^TwJV8=}As_|Siy0rd#u_FCsvA6wz9lbQL~ zRzxv|Q9`(cf!a}+N1DKuM$m>SwAVTI8UJx$4NLq#&&Wz{lF@;sWx3RBEoWJd%<*Dl zGOj$GbY~W0)l}aP$?MKU#YIHu^4$n7UHd5?T~0P$SL%?0sc`phmFl9+O5}okBAl`a zRT7c|d_ccIiYpXw9#2!{@%21Y=JCIn*Mby1b7Wwd z^4VoA4O3CO_nxwxf>1(2qE_N!ow-c=((75T=oWjMdpS3I3a-6{`Kgecd15qM>-9w=WR!eHTyN17Ap=~z8A+-w@2Yn52^v4#grDWC98b( zM4xTqgD26gu8{M7Xxn^|^1*pkIL`fk|G->)T$=aj4`;iW;d27!pZ%rW!~U;EtMw1I zhLu$`an8hmBrIFQw`EzkBYo^`X)#rnA0?c)!|=K%=l4NTKcP*;Gh%?BSA@sJ#26}{ zUjv-TdFoPN*XCsg>ggAZ-}iYvF9~&e{s3&Od9(b$o2bUR8?z}qwh*O?3)CH8%!tLb{qD@v@Qx-&+7sc=V~=SIowVR zZ%Yqc^v!lnY2GpA*i5z2(J>rE*)R<$ceTYMvZA=iZBPa(RJ0s1qDXRAiv`7;89_=N zQ6p(af9xhZhq1b&sqY7TB!bPr^`vp*>v>tyWlalcTj5=7v#YKB^=X&^jitKXrl#o0 z;P~#Yovi$fl`oZoBK?c*f-jmt+WJ|x^oZhX2~}jNhbTxQAli&zjFf+fys*X(NpRsG zM@LO^z$q{a{``1>vbU&6#!FrQ=q|})QS?ID&w%|!WPtb8u%EEw1fNxl2FGF5RIIA1 zYs%updwz+ZX8o9rxh7*{A1j3t)~5zh^WcjN#P@l}GTO#wCDWRL0^H*B>;D{{q)k!9 zXpGgJ7smYyrl{(k01 zi6bCeh;npq`wNeO%cou!5mV6~HG=!#2eJRyAp+WIRY1eEm3}feH{aRny?C9efBiL= zEBH8c%2#XlK5BN7{#y0@68A`HL^qkCbOwP}LiG0`_s7PT3xDl0$@eU}7-9>7u3)v> z-Xqm!-x{6%rF`rlPX1JdUu$Pu)sS7ty{lyWLcPN6riXHC%t*s4nf0pB$-qsI)4XL0 zGaPV5Y4$@UpeYXv3Xr(*bP>l*z$xa5tUT+xHHe8c3nH)ALnMX3OjvAVYDUbP@nyu- z(onq{gbt0+VPXS_>oNb>4cE48Nowy~7Zm(dHzP0HL4m$C=$UX{%~}v)wwZ~Exu>dp zUq<%lL@niu^Y}&;^75<&$Qx#;lMbS~5*CsR+scp% zTE$2Tg4eT5zzk_Cglo0?<^04z>Ww$IcqlxcdYf*&bN3|COiPY_VWNRP`%MMW8}jE_ zouA*n89=W!e>PnxsoF{QY0dA)8WZg`S<1~@6>5^!yNzUF%U}K;ib~u}s7gt076nyl zk6A3Pj!DHsKB<|0KPNC?XPzB!v8D?SO~ht4^cCV^xVGcJx51@86=yk!|FCLTTA|c} zg()^kiD$#?*f*$Q0Nz->E41mq%RlKJiGH&3&CPGh`Sop%1$=h00XEH47`=x4gvxMl z6>QDhr;Fhd5#{zW;-qzon4fnnBq&fJG+x-|mmF<6Z~0=*7VJDLtb$NHFs#o!)4^!n zEP6i;U5V-W`UwH?)+eo|EM<;S-Nq?*1Wxy0MERc#M- z0o#9)`_gDffGj`i>k`{(CMK=moMhpTtb%hk%0g*dWsya#)|TP7L`HPo%->061x;z) z!`a~2FC?+n_;_6Id;HE~&4~W)8svdIzcrJjg<~PkNVQC5^wvjrJMOn96s!80g^5l{ zrr+*IXU|Xe{K!kcLM)XFo=cLVYx*;FRA)tvIKF;z==}n`#)KV5#L$2#S1{)9^YfGS zi$H7U%eci^zvJSZ7*RTooYq@@3UfR0r>PqWu1a0KXWS)Rrn+Jde<`^KoigA4A{q3oS-`?D6C#9yIiS}iHA&nPD!oaiCVmu z&Mx3itsEA)B{){qp-doKtMI=0m30T7S~zu`{t^~%T&*CM$mZ#5eQnM&er>i7Oc^nt&^pBGHN9h|EBLR)^Mf~nlWnI&3>(dYQ$jT!E@Fxe){V(JQ)i`*n#jY;zy4s zu-;%P$3sFA^tNZSke0a6OVTNqR<`W*C?^$%jNc`&FDK;Il`Ox6O{T$(l~7{*Kt+Gt zQSc#9%uK*SX|_dk#s6WaKP3MBc}eYnlvD< zqw|Z1-Jclj7{=~0A=W01ZU+u_X4cK&Xi4dxr(XyFl^sR8_nuZOUxr?DDu(tF0gSug zHDwNs65rCI{p6exUHd38+|2OK%msr`JR|`djPNU)N$_)Qd*#+vqcmFJoupiHnJ(61 zGZpK8CUjSU&2hl^cr9@nOj&E0$IUyr5+f9wC7@sVNJzR`ZO$?_R1owzhw$S14R%G0 zaRa3sbg9G)p!3=2tsmO%Mi#gFjpja2l^<+{wiz&8s@?Fl-I)0We_qV>h~_yweawrj zq|m)Zxq9eJjdciQc%?FeQ5`xqu6c5lX-b`GX1e(hwKp9&z{?=-VmJjs%M4`vxp~oX zlDO3(6mO#zLVLv8Uo)$a$QAVZ0H*Hze~w*%w6 z26*9qLNLvajF}BJ7DuX&K|;7eM#1o^@G39J{A%xFn+9_v~Rzo3Z+J5KMWx1w9VNvbCOz2bFBvv_Dlb}pFg z+j&a0bP5nD1K^n1d!-erT|Trn;__x7z>m0+EvlXFQy{{A`=)q{VRzU{}HPp8Y z9p}o5;<>cCPf}=w`dWhZXLuEEWPfo%1 z=fr2{yc8)rtjL;i_*fnA5_}*&4M{ngRrb%b@ zvQfsq^5BsD4-n^#HW7tEI>Lpp_P&_ZpyFy78J>uR;;l>tYHkoReBWteTCmKi`IWbw z{Hz;i70p^8!#e+|B4ti7K#+)xr9LA8I3CBYNL@2(^QJzanVQIV*;Y65oDF=%*HSNg z9B9UJe=>D?rvk}5JpbB!70Aw9ENa(rVRmt;zqWM*zt}*d)@{*ZipyV!u*XfK zO|BxG#-o%4jGbQ}$yn95zCxTmF%&HkWujYub45xMwQ@8a+18_ZyU=;+cN)C^be4g* zV8y_9dP{H$I(BXVa+0$^PwGEpsfw4@4AEhFo)cy^-oDuDnF;{)kDMep6?_h@8_F5y z+6HHved$FF4F#U+`i=pVxixXB=BYaKSH+o^GB?Ls%82+gp@g(#N5Q?6Vx^_e(3^`+ zhnc%3e8l~P{X70Xx&WZ8Xw(Q2!rU-*dx~#`p5{>y$D#?H=Yirn?}tCrMz`ci(pYw) z#K*44mh}%Jq?SjAr`Ang9o<^pY}G}S_iuWQnBZ6L@odjXXx;RSW#)fU8K5nsR9mLJ zKDghYx=3a!_=TzO#4W&c>ioVf#;iB<>O0e&sQYJX+O|dTol#38^WDw|)!k?-JbYpXsfWTn5-;piApqabNQ-6-wNKeama5yxGdX& zKT^Gi9>xHGl=B5l-_5-$ex8j!XAoh;;Q^4SCdtT`aq_f@vMYk^(aFNbhOxw-og1kt z#L)RO9^DY?7VBfZK;Ew}@tz*OEa=UwzojjXi>o`aNAJNY2Q{?q?l+x9rCeXAgmI?w z(ETE`#DCGY>@|EPBYFV~Z6y{pOw^jZ|HRIRP*;`ql<4zUP+BPyx`~uXF}llA^C0Ls zU>l$*_6F#bAHvf%ROZq^=ffVE#md{bbgT=EkO^bAK@)ET)$-8Z0y_# z`QE2?%kLL9D1#bq+R2*~>{~pP5x=rriVT|4)(&VIAc)|#7awoxZ=Z-+&K+PTGU}cl z|G3NO5UH_t7rjHYw(=83qj)Pk#`nu56pQ}#(tIhU6S6eG@MO7-qy5EB8jY|Nl*SV+ z=@$oF*1-?r2ccDykI)KXN54xzYsM{o{udyJ3~$L(_Oyc@iB~s~8pElgE6UFd4KwEo z&WqM=n$>z61PVsOy(otoN^Fg$ipxs&99Q8p;bJ~or!aPJd&ESvNF(HA407&w(xY(Z zZ95%}@ARCjD|z`@y zt*zgYUI4`B9B8uP*A48qVjqa<2Dcnzm?$gM`mMhocLo^Y@VSIxMA~2cd^JQv;JHZa zlpH~oiGT{OZu9hB41S3zDKzv8IbwX5)@iImTp~s~B~zvCgQwUFY}il4W&Lx6%gaty z6Z>y2+N*tqPgx6MZn}z;PU8aPMo#h%qzZnvREYM{CQ~($a({`xr5zZ)8mWfHnnR5JN6l(AbWX#;o?iq z2I2A1+$1U#XY}Xc8Z1G>H+h|=vf4`!pT>7`b)6y?8@%y#5~c%jXZyARmgzJT!c&~M zAfk*p#NvR4opD@evRrDc@LLR7XQB(nW4)k-g`x4I08}oZavq$^6L7V!ZQD>lfd9-x z^mm^?;lsLFma$(06~1l5$<35@2V>;sSj*5mIb0+Kh1{!>HKSR3=AnpzECa#GY z=%oU4Dm|WAq$aMvlun{XD5=qR4+rdrbw_380_VbVDZ$c7Kmid=`!fi zgPqN3!irESnoCJXKEvt&Oh4`EA|=#=VtxF-O$MvuueA&3;hie8Ja|2DIP+^|uKkqG zAt7Sja-6ewq_TbRQBP+~iMv<9}Z(6I^BM{S_z% zNN|=X#v9;=`+9P?Hm&r(7`wUno9XP>wa-=WWy=R0wJm3#)+${4pAGq2Q}4Js&GFK< zD<(nstfXgf`wB90TG$I~?|755CT-JP+P7CI*8_`V?&+#Qgg~@K0lfhFhiyPVHz$j5 z3>*mg>}9pGR&gUeP29fRJRt|31TEO)McUwNZU6j6!OJ*nZo&UqP#UU_20z(;ui+OT zxPz7MEJ*6ETcG-T*PmLGFK#1)_~&LY)IWbRqG|2HaOog$iF(HgD1l@tQZpn``}HJ8 zb1jvc=F~K2iefL3NyArs@;iMxWKAL#f}|;LtEFMVNSuG+)#0!i{^~F@abw9^h7~ft z09ITwBeoqhkn^#I#l3l7-~ub&9h}nwD_Ug8v4Ld^6n{&N9rUh#{>R2=M6j_N_m2ah z56(Uel}Pug*F?nQ7tGM?O=>Z}@o#i3Td4hdpLF%b3D!Q2#{Jt>Ok(RN72vH``Jk;mdfIo;1SdrWlgue18 zBn5v`y2G0Cv0Q)+lmHGa8km-_w8&lhVbBGqC~IgZ**}uv{}dlU@Jsbbz?#vX8@Biw z2pAyV!R0YPNRMtk?EFVsvi@|iFlyxA26qzcAbHY|MgqE172Dlnd~7K8Oz*yk@64)T z=`90QD_v#Sd;amn$ZGiC;S(mj0b9CXuu-d2y^m=&0G6 z^E+LxsI%er{qlHIH!rBnL*JKi!l$Z2OoZH}p~(HYlEjP0LSQC3wxaKO@o=;-Qsq(s zZ?@(yug0F|pmSabCjMth?25W*D%TPcQI@#bQ}$k*dq4m#XeRK z;6sNKwuB#_gJrp)(&d4#)IB48@3v*VA?n33#3=2w;cGr2CgV{S<4E$HV5S9_h5 z`of4lnyuqa>NEWt8EK~_n4P7IW(`UHgjKv(VaWY-)B2PO( zF5vo*xxJG0FFJxk3^3YtG~cnNZF5jaR_dr{44zW#-Vwv1;5we@?&gw7Wc>gHb&)tJ3kau zkn@Ef@s7?e&Mks^SC%UFgbXVi^PLyBjS z!c8dUwRP13qt3CRjRCAFA0Q?@uw>?aKz?zQ7E6BGG)Uh04B|PS#!(W<5`!ZD_@yNc zj9vCi9DgqfZ8`@-pU6L@qJMlgo*N~+Mh^B_G}-+YnCG~q6gg@l&vUmp;<&k+usQ6` zls4;hDDTe9pbFEXV(j{==Vf{tpI(`-g>m;iv)Z)e)T}GW&_)B0&BRcWNzNaU;lPeI z-oEvevR7$_lL2;Idb8lLrZt-TT`!+{tN!y+`WZDagz!(G!fYW&RE$R!$-!NutV#^H zAKkZH(_1y z3lFkcgudCj7|}3KK3bB5lvmQNa>C6^$FykbEmR1XyW;(`z<+(X?uDh7rxTHD^gz%3 zbyvK56KjW8>)UKUZ-G1AgpZLMzO^061TVh_75cv_@f87}lo4@Z07;_E(Lr9cWk-|s zPwhRG_AXBZ7SA9{!xiz`3*vn706?N8LXHuvt$Tx*k8Xx77^&i{t?j?6nGd5^-rN`w z9g^`wUI$rYm&7OqRc~-WA3>fAP8CG;2sqs|8TV|unA}i=k|SF}tz5k(V}OE8;PITA zKM?zul>ySPNeeT{Vx+_N+fE5C6(j#}|4(x*`Nf3{1C=Yq<2#;l4ux;{_&HRVR)fp$ zQ_(}(c%V9a3$}TFpU~jge~%46EBSTSi;?WxdK-N98m{IAO(jzwAdm1%Gs_$U z1^V)v1M8(48kBCA$M|oBb>nuKxrCAZ3yl8c`CjO)%}W(xMPMXv{c~Zjd^={X6N4lp z7zjBG@5=NBU%oFX7Z|{)n57enGngLlvQlEYAndvZ=rZgIS5SA-CElYDtDP!7bb3M&jIRxY{?T( zYN{F5^H=R8Xk*o)ZWGUXkt86A(0J(nHp5(24mKd+gqjxY2lQGV4j7R|O@l5oX&7!- z`B$4g(1=hH0_jFpO2Y3nmj*~T(oFu%!%f$Y)HZbRHUvA?k6ywyVKqUdbSc$8J#MGA z)LH*M*bSz>H|?ztlP%gYM*j>&Hjj7bQGJt)!Ma|=Pixp@-lb4-P25Q7{Fr^?SnV|U zVx9-Qflc-h6@3F34fRwWb`&RzTfsgBjWlBIYSoT2;&VIcXzy^oF+@;2ehoJd>pW^0 zs&E@zb_`jdlY_tTtc^c!u-56Z{#_XW;_CN&B(mI9+|FM|uUY!bo0P!ik3hr6%NXep z$+))|1G-*SlY0&-+^%1V<^yuh#8`p^E%>R2{n{4T(gaowe*?2??+r^pZ!NVlf z>mLjGo{y#^@zT}86#$F4vomidQh^!w6bb8p0___da3%{&Lvj7y#3C6q$JPCL)cL^Q z5>jL!Bb|{}lkvGlU|O=`T4*xk)?kcOrr+tXa7ct2qqlOj_p*NX=Kv3MI_>=%H1_R7 z--7lcb@urG#qa@aFauRo8PKt_dpz*Kod{^YE|LdcC9=ufehdHkDKYF-TYa`&KcmPi zG_SBx$15S!^jn`SG`!0q{+1Q_U27ZCdRkl|m;J|%Ff4w9l&NG8uTJofQh_uxs&GG0 z%(JOy|9RUuF1VFo>U6mM2pFvNvdX?2LAKV`3stjcY}az_?ycPgnneXv*ziTN>y>e! zM^jQtnHBkIibAeU7D4q`=E30WxCR6os|oLTptn?%weq(fdJ@4|d~u3-yvcek2c?yGb6aq& zXg4s_=I*Q|Ty`8Cq6SzSqBA+Ce|Z%5aRv{RV$oR4f@>5DOm=*V80aAb)&oe|hlmej zr;A=MVq2bDdfA%|L+L&^Oe{rQd=RNi+EDFzYcC@at zOsorPv2E&{)9`ECOg!w08JiTS!T~-kf!11 zYMF{2Fo?g;Tzp7|1}Pvb(K}=|*&hFZZ?)lc$3VMfCvi`9$7X72yd$d+n50cc1lN{+ zy*a%r1Z?8f!tLn~Sn@yZ7pm0+3YGNq0MB$JDU=SsyVxb}k{MhprW^r&w-5taG`M1X zym8+l&A(+wZI#k9EMvRByQXf_RO^Nezppi5zO4!k2r##?h@JN zDsEHT6A zepUD$@m}~`pwm~hTa@abKyeWby)rWrF1?-Ae`s6TZ!frWk%)6^(Bx0^Q9}3PPsy$4 zQ##4Kj|aQ8iS2S&TqArnQA!wzmL#7bBS3l|^Y8F%N;udmW?3qTc5&WE%B0M{FJ06W zn}Uxo&%I-#5Xlzmuovs@TYaYd6;;wJr<6;%=3L-~aT~F)(@yny`s(MQUdYn}(A{O9 zPl}1u-%dbqXjv9@6LHQxIB1+xTBS9a5(;9|DVz7xheD-lZi~t}Rwm(gE#1AT~kjN3E$Tj==+Sshs0rkB!x&6Do$EDoQ2~=-`=F4Yg zOurU5?NL?@T&G6nFApdPlOf5M?3UMR%zIx$>~qR2GH!<|XW35FJElAX#d@BX<6Ngr zP9p?{`rMIIHuw^^*BKs&#%4k=!NSuZJ7Ac)U)mUvCjiffd?GB(hqX31O@Uk~fSR%l z3j!WBA~GQQ^?%yWfK3l3;go=$c)YB7C9;^g|GDK-6Ky^`&K^=M_j=!|r1Y$D;A{>m z^vzvh$UcOBnr77EITL}oXo}z~zt)^cIDCH}{zPGB18*Yww3^ylQEEt9;BD}u1Pq+zR?mB=g8Ys+*ozn05stAE4S zzm&L;QMPj#7|P^%jFvRm+UHbJjf!tpNebtRR#!gxQN|2A7Ex`U3Hv1&txk0&TjMp0 z5v|T%Bi8h@dU)$hl-_HG#w%(6Z0s|BP*G2o@7yP_s|k&Z(;+2^Q+@Yvkron z>eM;nF5@5E0?8-XOBE>2(gX{6tS1gSoF6Bq)OuY)iN@TCmlUchS=$@`zNf=bA`*>; zC3t)~X+Si?G`%w@ zQ3#Vny*HGphV2^U@KKQPODqp=+GRMNbNo6S4 z+?=bL=)|{qf{gg2?o=d#{lK~mikPQ6NMbh*CEafY}tqC~Tm+{l)~$iM+{;F{G`2|qG=UV5HlzO#ah&* zV+>QSpP}}(Y>UT0;Ps}1Au9P<&0&M(#{JMYiLdOu9)+am6znL?EN$U_8taVr2i^LJ zd&RDaJ1T^8%o5YT0`s8UTzaKNGaHGzrIL{eE;b$4ZUCbNZI1Sc!_B%$UO&Gb!_!p; zcK+G2E05z@kMr{ln=FsA=0)T4Uc|t);3DCnJK2&=ic<{Dc=2is_-Y7Zf%2(-}1Zf+m18so8e3L z_s225k&DF!$0-jTQd0werj(nDyy48I!TH!UoTD^{G-lXC`yhDdCgVKGm&EyWb;}`> zxwX0cp4SJ4bu&rCJ+mGbm8&%xC7Q-sI@?OC(m zfzOfEM+3AFH)OU!NvVT3X%TLh76m&X;93sp7a+=D2W_ai36sBr^;(JFSk4WPi}SjI+u?yGjIx_zjMOYNHZm*U~ZC<8r9gNtOM2j#Q%l}BD) zlcmS^KN9JIVqu6ov9a%S+o#;c4J5jnA*kp{@zbNezPY&!n623s2V{)Wjax-~0&2L=Z0-FXo)6I`zua+@~OV3oP4Jz}%sw_RxLg#lf2CHH_+HbX?cp_dDnn6rI?KK)|PzLOn zMi=%NCrI{SCd2!!ZTvh(XzsEMH$4D(pc+Q;wo(Q)W>GxZ8K=#T)6P^jAnAJvRxpnz zUAo>&>GW_NT&~b@9_%yX3nR-r@}8xfX`XI#xV@?le$&Q#6h$EB zXEVw7Ea*7ClIoqS{n3uv{=2zg@xjZk!FjUt<^5=l%|x!Mq3Pg`3(EF0$pF)pm=OT;l`yb!lNW0oXKp;$Q``%`KZWWhpc11{- zID*A(aofhx;j;uR3c|?ync;fS^;KaQfzbc+^_pKt+TjAvG$@th~V=3LQ5pXVt#qbu>E!?&Ckwz4iqqO zdqlGuR>BOM)w-=cYUTen+oCKwWE6MiFa|OfB@kcUm^v7^RjMW?EnM|ZtaRyrM0yoKAn>dbNkRTTjm{$a3g_3PBj59Ae-o%ZKLu#BE76-;x zaP<>q9(%0ewqUP^mULv_dENMP75^p~xb*dr{S`XHV#9O26%}7CO=GX&H@j2s70?zIzQ3dpYOCt4qaxfi76EaH>E);tb5wyH`x zuQzUV>SE-NXh^KxGAH_dXWS{)e=?`X8T3_YiPy2*PeWB(o9mTV@0c3X3U+ZSge0fa zt(tthr-t4cPCP^2Fk3QnM2H`I5R2|lVsS_;JsjdKKmc#KhwTs8zU>ldlEC0?@pFXIWh(nK?Xr>@+fU7{p5eHmU~>Sy zE{FY{aMUA4SRKhS*yH(=UZcQGuzVyP)s|Dgz-a@s`f~Y9jvVD`gX(iF45qWD1#ob_9*OgD%SKbG~mkGLLQAJ zAVXV(NK^>TaW!4jH{V;6pAAt2im*=K#TTo@B04#_#h8q+9_zjYO1wsrqghdDsB?2f z@=Uc=0|M=QdRc}~b0oe9v?YLL9hc4-yV}9f>qeA;ZJP=V^)e(iLV+88_$`Ey(`0IE zV%1*IK@GU=x`^vyCu&4o^7k^}E}ACHL?>bRGl9n`dR32zikH(Q25uYTw=N2il)2ww zZR9`|<(+4U!~?AZWm*UIW6jRKA2Via_|KdCOhfHo?ji8a^Fa3Xb_G{jU|MKtGv$$J z*q@Kr4aJ+gF><3PiOF!TbIE>)j_u5EPR+I8j_#%$au{H;J4XL%y(l1K6w@5@ma zj6l-JmQo%jpi%fc5$~>MV|#eFt5!xh&&R6?12ZTZ%i_`OQB$h>P-c}v;~K?xIZ zO!oG>8+Go7ONP$lAXCwdSl5NAcM^zq)CiF zjL*w5XE(P_iJy^#GLY|*$w3u8d!E-s;s22!Wtc}?9_z6@w(rc^w_fsC52^(aUO?Bj zU4=kZbEN7{OZuXMHNJ^!s&%F5U{6=iuFo8(gR;IP#*GRTu&0@I`B||s+u{O(hW@mL>#%eOM^_Ww z_4xPP!^RajCA)7M%=kSKMW~h{>TN~a906wST*&t1jrTnWbdVT1ys1trDY-l6J?_HZFICbM8k)$;%tX!o-G(#lQ2Ah1}MCw71r z)WAsTH&!GYQkXT~ii}WI*udk9S{05griCfhce5eJ8x?cr_6O}RYM3iY-qfu^6wu+b zpZc2C9;cc*`Yl{G+bVSlRPFoJQ)Wzahpp_S^X;ZX6v*?vo&0tMDdhogX&H1Q9y4%o ze8*IU%>vWcOWXNKh0NyF?Onpj#I^X$tg74mW3PMq*FZwMxJByNL0>D2knZK5Rz5-t zXxgF7$n+M;O#FQUb#|o-s@?_hU!sl!$6_5Hek~y4AgZ%D(rG)mN~nxbW^RwaC$X;D zLqqT2x@);`@O1ghakK%}=%eZk`Rn>1YqD;_jujMUZok7Dr;75AE`1xVC`|^Kdh`Itvv~^1Y>!fg zELSd_k-bNGg1Up0h%T50rk~l1ylTKjZXdXqBk& z`3Q+BLz0?sO}E7@xG2mmiZ6i2Fe2K3Oyyl(Rqji=T>t|7KVf7T0Cj^zXEY8o#Bk|v z+u7Tq_QeBS!aTld$!d7iW}99)iVhNmR;X2YCUEx8v*V-T0nxX?`qN4!cGt+++da;> zQYm0rYvJyuH=4lGP#Zz5cs~pD+}Mtc8y4Tz8%cYA9x;%jO}y1f`{gpcqI0Jwf-=vZ zqNwrgHDut037+kT&DC~(Ia#^TAkAQhl(!a!K&@1As8R%#$~;sB7H1;;8IVkR*t=b9tLg*>w5NyM?((7YJW6*Fh<4A{@H@0DF3MH zRg14_#P8n&bESUS71~Q`H@3##8e@m@s{S-2MesB%49A7K6R}}cTRYtgcmL#kkp1ql zt?%~srTDVJf)F_HjzHY_&F}sp@w4^enUNsz#fQO^5bVmWI}>Zl(!c}z%kY(qhrlbc zCU!A4F)>u<-z#NlEJ|uU$iS`F9qxdmEy~J1zy0`S-HM)@>_0K$UTV_ybBB%ss)CX3 zXn53|KMqb8&umjiK>T}se&ukcq>4Mro58UXFj+>sVx3!I%_YBbB zBz>i-#)+S{Je{7U6OPGju7Gf#da24-Ac1_4fcMj(+3B(ANQSj(#GhX^G$NZF)^^RuIpyZSN_MnmN&DT50 zXdLMdfQplsNTdE z1~3*TzR?M6E*GYsTQE!0Hd>Zr+}`(J^cRWtS|MBe8Bo(qWSJ5dCEe;{xDxfAf8z6_ z{n?INe{elzH77O}!iqz|sBm&(ZH&cY$5 z>lCCTojq@sKBP0qkJ2_6WG^i^@I#8s(G6TW_gIP@2a>;HgCRNHwxv7$3T85a(d3+; zK5fC5!{25q?77Q?GU=PX9K5^^R!&7V}@X;KB@+Us3b21$i4rN1MpDDP+HmK z7aXc*rWQJS2iS8wABcfRb}++r{ALI|dn-nkH0knU>=&EP%xXHpkuv5OJ%^)`E1{yv(K#EP_gW>r4;D;%W-$_ z*hV>8<#=e+)OM|kyg<-l<$k}K%K)=b*4~{oOkVYYb=uDBNB$v0`Qg81ebGj0dr75%$GQ|8 zlsXfXH#4$HdZ@aZ0)t*IZ8jL5y({!{|B(t*F{=OWmUE@(uM$y{Z1DH9dGPSBVavWR ze7dVtmPiNh!LAUIh%i`7r`V+)YLz*fjIuueLzM7AuQl+<*GhENle15WP59O52kxJK z$*72anAGf~Qe(9KbVI7ia(%r(Lh)!yD8jpiRK+?#hqw}*z-ds5?Dz`IpSk;aydTQF zVgpIrd@)udNrf=k>|)etRPRqgGGCI6)$v68kw%uWVqF;p2vKU;g}l5Q-$3T$Sx2C7 zOt9~#u0aN5nkHsnBkRnX>wxoYDXcr>9?LG_dA@Cvf z;^a9XEZZ+|`br8<<7DmPnu;|FF%RY~F4M_0ZFofvybF@qT71OX$G}S3x3zS6u%jUK zQc+`#H(O@nqZTYc=G8epju@Mc+_wCHpBq3~Rw9q~_{yk$R*c%8$*#iuVdgiBvoC#P zlL8_EFRA@2`V5yGiET|Uuzz7Qb};AQo8r+^v#?CGQolZ;^?tHNY9&h&=M4&4#k9mG6yc1O4|$VcMvfK~ zbb=UsnafZ#&Y*h7>s$)Y;2T(L8g*r7cDkKHr<}4vI_KXiG0)(<^Y9YQP&#~fLuNHU znliV(T5H08=~sf{+AR@rI^guB{5CdjKAfRj^C@t+DAfl4Vg97`5RIVpaaF|9mGLvl zNoJ&N9&;yi+3hAm{L-HnjkT@@c3g~TE48}wyrKMcu9&O-%vBwbZ6mnCHP!3QHdCGG zl-Wb4P|arMdXto0d{zU7PFc^9!(T;#sdyr?aKPJl>XLF^rZz~g#jz9vKUU${t@)Mv zEWHC4q6HV?TvOGDl>Ox6zf3(1>_=$MOnN<%nMs^~xJ0jqYCQRYH}tf8tfnO04ADCw z$wcXp<;HJ)rgYCC!QGaSHF1S3;XH8L1Ahry`(MW?VPFE^_qGW;{B~07dKRPQ9D4-R z6SQjbWrU*kcU=m69G1-|X4=$ZuBI<#Pq541%EDWeZ6MKX!?}v|1a@)>y~2%ArdO@* z&CwO;%`?P6%+Ac2!`KzbU~(cDIS5rnT}CgoNycG~e~JcXDk5)Cfo49yFN$c_xlB(a zU{l6jU<(3zjf`hCIWj!Z*pZ|2*?xry9Mh!g$_1PN)kMA34Is(h7$eUb{+4FveBivV z#N4gk=IFRSp{U)UK8;uCq7$_mhSu0JbltUVHdO{I$2bw*M1;o-C+tS>E*@|VvggrE zSbXws92a>P?h{Y4aZ!HNP(me^@T(f8O&!rC{@-OMH8-5;GS!^FpfB)#KK2J05iY%| zb=jfy!d$&}&=dL?(>-lKWo#a&U-d^|TeEekrhj+yW!dcrul{b_a{kC;O9B9nGE!aq zXK(K5deLx7gG-EGQrzCXs`}h->(~(>`YYFFq?EapG$evDziQf8>*K}cxg6{cUBnKh z@m;_Q*UcM2`6iwpmsSf=h)JG?ccQ2yQb_}Jo!m|$o)iuL0A^?ejb2|mmX$G0xJ!+B zJw~-=CJuSO7IQ67n6K=g(_XQmi+X=cW}=*qb6qjb8W7}!Fx~kJD<*4sFhAkF;;Hw| z?b09@;uqyN)j1<-3wAi7Zk@7M8(uj^2Sox~6fOUa;?m~S!PxTVG|6c&*02~i=ese^ zbKDo-)Auf6@oT1425p=TY1S_My}!#vm_~E!JD7;$ue4Ld=Q1GcsBQN?H=wl0MQwWXIjG%8 z%*LKMD0OO?^TI8=Jp#6mB|1qVZR~7*-Zt;3S&*W;IOy~xX=N}q0PSCgp@ycuhuJlu zjgtuAw+u#-vnM-WIRz644mww1jVa@{NxQPN9cG(8<<`!nt0FrDU?QAFwS_XgR<{!6 zeObgyVgh|dioBFJKZl&E70bnKX9gY<4j>-j97`gNRBY)`d*@P68KC9waindrYfKmD zALkSE^A{#9ur_{_McpFl(cHg70D)Vwu_lwTzdmgINXWTo8V*f_KJT{nB7@ zMqL=B4D?HBsE|mzfm$PjsOS`?9~LT&&%IPr&gT|*&dSlEiPVnJNBgznCi?n=S{dX z4|1PPM4^I>O>5>U#kVc~37IIk1V@-PLj92n97~ZEH%Qqh{eNE0v{;7m_bDweeOf?~RdqAjrcCQOR9 zOjVhWm~C3@xv*9>YQCSDv-pAZCWE;R8kmurVx~24h@UWg4^#fB_)kzpaRFb^*=B6p zX$Q#EsoR?5=2KJ~IUtS%>~HA>rxVM(*XN`lH3|PNK2ky1HlqKkX)3yRu6IZf+54kt z?36W$fg0yB0&5aJ>xuAk9+WaW*Ok`mA5_tUkrsau!k|X_no;dLVEK8riqTn&Mfimi z%86S>zpPX!VOD$(c;EFo3@4xFA zC}x6e5ov_E)=-=--v72?pIEdL_~^5W#-B{=r5&W@dpfi0!5oWE6~UeQGnRu`~;)Mu^ z4W5n9r*)y_L3Z)vS%;FZa{BuOuzb3qyN_;SDgTD5?R;8T%>7$(JSvjaPbQvG%ih6J zlIUZ+6oC_(zT@C#e5X~* zIr;K2y{J!GwO{`@rR7=UdeZ1VHMiq#WwsU6K{STTLW7?w3{T;*G23t1!GU~vYs4Zg>V<}cwf-~*8=)+-ekG)zR`A{# z8#wM3j;Uq*9IoIOcj})Ds@hli9lsl~`w`9(d6R^7TV90q_F}+YUbR{z0FFWxRKO4! zS^sD1YH%0?mh&%WK<`U<{T4MW@zZpt46I%--2ceD|sFD-VEkFWIX6nO?U6|?V? zplzPSG$&(f(!Li)M=4fOb>b4C&5ftc0=cWIgCYBBe*oOACkf@`y09}!7mq4vL?JxS z#aalzqC)B7lV+lcXk=1Gu{WxPqRVNg4g|F-bj?vx(;95zhfpV!VJWZmSFmb-b}F@C z*Sv(bef+BFc#EY#O=&9m_a{A?OcDidO2VVQ*LFiIf4a83)Uhf^zS%uAU0_r z6;J_cm)6FwQs^tj2`Jq+a~8;f=W)@k1OB=TALC$zzyaZyZNhd!ek$KkhXoeRfM6(Q zs_i79uPT-s%B8|Wt~5O3&Yi31zMJ81)Hp9FPT+1nYzp*n_jK?em1mG*0c*E6xfJ+G za-ed4gilA_nV_8rP*vEP3PMqrz%O378K;Qq@_+>U{#cX!%}s?7sy5v%oRJ1iQ2a`{ z>)qHjn8=Ooc8mRd-b{GuozDtvFCL$C1;upef!fT%3Iihy?z-997x{ffQ7lM{8B9J zH3nOD9sh~waq97wd3FDs7BmUi?$Z*+iuGe&<=elmPIF4ksUhsDU1-FJ*Uy_oV#R8l z2bESi^pvOc0<7s^N)yNKv=(lqw{TjZ|sYk(K(mQL5M zyj-i4&u&U62U<37(>=X}{NFktfBgA4_=H_L4=)G1}f)O8aoF9#{w# zhC}%YXzBv^izI5`?tmIcboSnl3oJjW7TMQByu8=LY6*pC>?8jM>NiaLEHaQ2JLNU; znT4%LuJgOgjD-I?kSd7o*1RN%gbz`66EiRlrEZP$1MV!G{sMx%1Ae(`eq0ea0~__H zCz=TVfM={wSJFW{ACqFtlH-&@zO`2L^52z`66BIe$F-oy90fMHdSfDkaMjfHx*+qFg3t0Mh#fRo zp!X_SC#JeTUa^n0Yd^*2l_(4DYB0+-j*c2VybyhZz|c+JGq{z`h{4X6t3?z#0 z$<%k3z(hNeDq@QH!3lk+NZ!}~ZIH4S6rgaSjG`7qI&E%0uMXg6savWvG2<9%V7*NATd;Mb zVxfHQn;^q4q~oSCVf=&jHRLp#aGf#G@H*Y!QE4U3TR+EbFDY@)et*2ba{}6`z=vO+ zJdNq=yg&bUQG2{dCQ^^mfyqp6)o_P%Q72r6<&-sEUC;_HC-1q#oa+~HZzqZ^962P2 zFn?91(hh{Bzh3Zn+j4~xPXJzt2r1=Qcj0~{J1FSlc{w5L#2@j47f0h36w4*rA;BwF z9Zg%*FypWbI40j?Ae{{hn3J{$9=GHl1=U1s#rM%FHT!m*GOZh z(ot@`??IG7CeKWWiNDx?8@n7zN-yWb$dn;vl~fKFb7q;2)GE|_iCt1{lWZS(?&t*7y^5<7FQi>XpV^jq>r{ihTaVR- zd9Gm~jG*I}Xb|ED#!oqo!0#wWVXo0T;_iAoTPe^saayK zBH1Jc)!3Y(j8Fw?>PC255qozO9A3m(LWrl$92(1#q*bXC`(5U&oy@Ca*Lh2F~X7IHuW=fN+123AAR2NO=%O?;BUaC+g?ogGS# z3+Kd&xNnpip{$`#w$92;?Qu5x|IVbvr&mcmUOfa`!GVNlA`KUHgy)W6tZlNfOkdSV zYhXmTH7$kuh}AF`bi@f>^OvB3x-9(ikhzE={KhZ71+Q@yDQ)eFXu7*PgNd@VE*pAy z6RIPA8yEVWfrRl;*Cc-#1%6u&h2Qx_v#9anr1^cX!!2xC z;7}OQKAT6Ob?hR3jYXBADQR-}eEH(sQ!BrcLe-!`V(j8 zah*?b#Kt?5dx%w@fmx|TI5swbL6tWkSBS6>Lc&XQ1ar?aV6 zg&Pil8?l4iBRZ56y1+AjpW(E-N7j?e74EEGSA4x%k@8-brAUtrv`1p0h)aGyYg;Z< z@yqr=#HnnjoO1e4ZRffqC$sYQJA7K{zt^H(X?z{OcHKco#@_MBZ6O7BCd-*CyMzD0 zQS7%x7yn_gN5-kqaTpW2qKoTKP6b~vX{pm1`gyWIjIP5Mjx)l zZR!KkbxeE>N|D8OJYLk9lxg3QRo<~V&XinwmQ&nj=!^!?pH{aam_B0UR;PoU)c36_ zCMn5hkvY^kPq7p#feLd zEhuWfVGCKtcG;)j)sy zaaqt=q>lJJL12@t!PSBI35W<5hXu-Gqw%=4pG4xeZTHiPu9O?+gs?HWcfB&~tCSfHtjlTh+DP1eQCt$d)RzP5ohk z504_W(bmqNYV$G3^nnsH80vlC?8A<3p{#Pe$e)+bhPXex*HgfcZ6#M$MHyNGzUm!_{+=X zIRifB6fGif*y>nJ(w2J~_X@1sfg8f$bmb9jmwwPdgVl7%Nh+xfRICRd99Hn2lZr4| zgrQGExB7j@t}us3qP;=RbaLwS5bBAbY`(Fpu|wPX3*2&@C5x_8&vIX}4N12J*(`Q0 zN85=Km-ubjHJeH3R~}7^pZnBvSPeFp!FNlh7k=x*%PxyAUq@NM&jxNLO~^Vwfd(Du z)tEt_(g?mRfn0F`2i%Wnq*m3t7Xw^A>9V1 zyLBSePYAFvoA9+O#>gBcW?@Xc0)8p9Gv6e`uxkpiHN-vFeJ?+REt*`6_DGfBA{oaG zr84QbyH|1>NA9~Wb!WT1@}sZC;)BHha^~;uCK`Kwo1pyj`tQ$#_HD?tzvRezX=29> z^yf_H9$CKEt>fRBppHp+`rDT>c#xqt(s&-<7$TYIzY!B^;R1Bp*&TG%{K_w0 zOI!g?hh2m_=o5TZ;A;2J>AQ36#ELHkXDjWW1IcBINA2iDfN?c`n|W#WrQvTM&!u2gXc4C%O-eWHqM_9mGaCkk2Uc339BVRSE(SE0FtY6 z?&ZpUXt?Inc9VmUWZ>xTh90&ov19k$WGujg=-W&I7mP~cZiiRr;gqL24N=>14$sC~ z?Gi49s0YP;2K=JWY=P(|YJWH%)vmP#WR8`0eKUz!{QXf97TL3O7az~;`DOo(kV3N_ zjs%^AXDO&>v6r@jd5n9j3*P(eJzMB+UT#s@S*Uz&xMgzLs~b~4`BFKF!e`JA`y%|b z>(t$0kAidro*XP7w$LH)N@nF)XoMgW`{9JXc(Mn#yz{W2ny))Bz9aT>c7K7dF%wlF zXkl7_zm9$WcaZ-KH^n;f{(0ZZ!{5y6faMk8V?E~sc2O0OS}q%JU>a^@PLb;e)3?Q@ ziwB&%g!{YkmXfyk7|iV2gdA1wafJsPM4;YS6(gt_>OX*CWb~Z7}Hwcpq{J!gC2Mo~U+NgeY|hPa~SbYq)e+ zzj8LYU8a@4^>m zFgJ=|+|PUAMPYApSpQ|;^aY4s^BU`HfT zsZ0v?5TEW3PjKsfb!TQxhHP?V(9kTM8wzEE8%jFmhS_>%+NAcAZEJyvCD0&<_~1~S zVuaTQ#7G<9ci_N-8&ETZUPjhoV>*Bwv8+m!l)OP*d(Y1D^7}gI``qSL)OKJOT%P`? zdfd&<`K^8Aulp6~Fa&Eg&jXMVS*xF;9ukV@Om86YznSoW>jG!%adzW!44pkwEJ&Nw zA{KKpZ#h1m-|ejMu`Vu!lOylPPC-3-xzKx?vhF<*VDs*WH(E&%nIpx9-_uhkSL#3H2WM~b8g~5^;#3RSmqaenEFQXQ zA`$QB&~7u7pS<^7F+cANyr=xt{~%ZUn&vU2$UleNeMNo&k5ByvIaLWUv;wSA485Rs zMH#CZL$~mQ2tdr3i0c=?I0Z~yh!kW znDEyxdiuUV+kejw_^w))yG^-tg--#^P2^FfkciSdH7@h z#8~*w@RW9!#*)P1z2J*R{SqMA@V3)8W ziQ06fR#jCo#0lGAQ%?>;qQINCExGT$2%+wb$mXfgV2^@4SleXCmG{J#NE0& z*e>FCK~&D#okBVyWFCb&r-842I$I6%s210#4SCm;^q$&qmMC^WF3|yDzNeTMhJBRj zs8XvceGx)OT@q;hy?Xw*<=))*K+l|X|3bGz4u96@!#-6Q$)}HQG6)Mn7}n?y4WLy8 zn+x4P7umu&wX+3bKH`fx77~heLfQy(1q!W*V^ykk{6gd!1S(MsAexivX`hkm*8p)> zWH7JqX~ftW+J(1=uZqgi12IHDEV0GKtJZqpp-|u`&>;{ZKjkN=r+m2_IyFy{=+yUV z>POrfgXaKYzDAvu#3x$!1;>JTgpt{^Xp~S;ph6)2HpkpjkcbHK*ZCRTJ}tbydVl{QzdwA!t%Y__e&xeK{&S|heRgZ1|#M)hi!u)#`0Wlec>L9X0QF^~vh3nzB#^xaI zSc0mL4s+E*gv3i{=0SfwOL$z!NQK~z(nR^lgh51^C<2siu@>^k;CV`1BlJs6?OnOF zmd?L*tr*(}4Y{oE-c6=!%6`(!SF;d>)N~5yA%L6ae4=}W&R^D(AgqGhn!kWQ!q6167t^a7vL!vZ*JZ|Ynf$F{o%~y~{S=1dhHR^1` z`fKnU!vt_eE8t%}8&}{XB2EKnn%9@GATFM>FCSGcll>e&Rv{q@qR#OZ1GnhK6l>VPt!u z<_GTJ{g!bzG8)hs2NpwkkFp~;HCE<~$Y$Z*Ct4b#tB9@Tg9$G1@!-myq&%c;1M21hfFra=J34!E% zmplVs0-m+W()EW8JhOjtXF&5hT7k`$;jMuHj?E}(fGNh2XhmC(VX=noEw*UL~-ubSM5qA*h!d?d|O>Qb6`{f(SQtO(sei$yT}h~znCTQ4cQ%3}}(7G8k< z9K2kj$C;SJQ1VG?d(Gv#LKeY0o29=_RuYP^eluSn4DXnZ7#NOFAUr8i70g^8nL>qg zTfrrk6UEo)>WZ~`uEFVBswV1uF4ZB^- z0Esj8^`$}OO0}YDYp0U^`X7H;+7h4NUi@&EfB7Rq3DEQPw{Vt!2!{Ss5+Kva<-p%! zerK470O71C>i1dh3i1JXRt~xa1ov3hV~d8VkuyKy>SD-3D^xC+P9`W$c;y%sx#^qd z=CI=8fHEiJZ3Fska#Lnqv!|~fbq{=o^pi@D+C_Exg`TBm3%Y0 zl)T2n|2t#22meB3`>UY~1~d>qvl9!ktU5k{5!I$o)Un@5qj56TOOoO-WdF=>I*``f zNJQ*AK>F}kh?AWj+D8%2GdPo6H#X*c42En%AZ+o1v!rq*_N>Y?pJAmqk^Hl8p~U6- z0lz!LIHrIBjeMe!^PKGLPP;X}ted(C=2m*P&O^HV?B*H2+d}DO&wWpd7S)}g>t~1c zl%2oHLmuuZJCt@ZNArwh*IRDnocye^xj2ER-da~Dj&FJw8!pZ}=L^pEXBYzJr4AR5 zuP8hCFaADURe0TcR|W*!_*TZCR3IY4Vr>1Xm-hE``Z`}Hq(HycR7}I`cnL$ceB7Yy(rFwG=UbG_cN4wK7WUXT-JVrO5{%P#?Sc7D zMtt{o5beF)&Ooa*S2D`4WX{q1W;X&}t!$>mrgoQ?z1z?ivlOIV#Jj5=8I+(!3iyVQ z^#K2*OJXGZnVEc~03j=o=j6Oj@uKD*Aiftl+t!|-nKzF`nTHeGMf3dDA_(K;ouJaL zW9qj=??hc+norkZQjza404$VX9XN0=9&i&e2*VqJ7Vh=ZX2h&PD)2t@=)o?;BnXWo zCx7-*{BD&v56-z78^Y5yj$H%MlwhQQ7XfzzKhbC#4O-O00wFcE41rG~&@6&LRu8)o z?iN7;CS8?5Wb6%g3)s$gJt6BR@^vKr>n3X3h!FlGlN6vniZ_ecMRgJdr|*|YwAtr| zFDdd>gJ_8aA0>8Q%X_Qeeb&2hq?|#&I97}bu_jijyX<9kt3QHB8t9ET07)DgDR)D- zs$|KNPD`|wHEY3RF zmt6NA*gFc&aw$(6tX7vu4#A1=pTR%wXI$E>#X({XNq8O?kqWN^&PA{Tm2LF;g?tV;f7YsG z{ghoEK}da*e7By+Zx`f#2lX)}p&`>OnZZqhu2>SOEwYcG$8WongWQdwAev1F)`u7% zyEkY5iQP8o)1l}yZlV>MWuN-93gTwZgpp1;UaxV+B$YYPqW+ZM8f~!CfaB;uL=+I* z0K~oPNeK9J#I5<8N>87-GZHa)m3TpuXs z)ujcx$Ms}!d;@URhN0?4s3CmK2=umXEA9n+2Y3VWCY@;?*+?dWT{yhU00Cs&7i1Bc z!=U7&ZmRVjy4aOUs0eKFXyr zVsXs_!q{LLJe~a^!&VSB&1#ESRDaWZCVPL}H%mE!TxroMAokEaJ99kaiIb=Fybv9& zi+HpzatyGwVCZ28Sgcz}+SE-`PbABAevi-4h5fjK6U1x0a; zOhIZMn$nLEU9-6Fs_5j8Qt0!*XIxX`IP5!3$;F(;^aQ!#?PwE^`2%^h|&FdFmUVxU+6xkoGSB+S7vTd ze3A^UAMqdft@{;tmLPDE@3a*1*y|x<7ado`g`|ACKFc~uUVr4enJ?RFc@KkM_V4-M z6@xf5GSaTKC4fy*rBZq6UPMMUd&>btcFz;8&2|r61s?B4BJ!sq3@}PKDa441*Ef8v z32<-*rut8ub(I-`^grX)@D%+H);AgTm@TkY>w&}Yd;XJ3<*|X~aik1>k5jwgr#VT( zb-!f)99rE04UfL@(T|*1OO^HpB(`OZo_}!viA131XhV%|)4l@1gAD91|NUi%sg@7l zvQt}pO5X4CIG{9*5Xzru`u}$6J<8FLIW=(7T1+U=MBfEv1R$;$K^MFkm3&E;>_u3L z$KhoGisCH)Ppd)`OzQB!Hqa8Fkm}UY@N!INuTeqXdLV)Bq1S|sljTrhpmg+y`u{EB zjWaK4<+?5saCDm?un_2O6iO#)jEmRM`fNN+~LluI+QV0-LpPKbAAHYB9rBp z<5L#9qUYvEccu%4S@Cl%GmF3Xw|8lt=?C-?kP_<=U)Jx?9Hf}1)_RBCv_whj+q-en?XLKWQ6UC7Sl zSKQ}FWHe0aXpm!Ldn?J@l&#y`8$ZX>I)kt$x#$%S25@+lFdnf2yuCLiQh(BbQslbzx{44L#yRqn*>RV{XfaX_l zm!9X0!C`ax-#uhgnZRQ*T$9T?{>3%chc)GY2ZKkm2r;~_2LPKu*-YUlTM}K3UErdQ zV&AqLAHmCPw4YQW3htr0 z*6$*Y)N~L@Y({63#h0E}Zga6dd(3txIgv~|aWE^kXK;Wuu^<3VboBm53l1VBZbc-j zvVT3vfr?zRU?@Lu&DI&7Ln9fS2Qe0QVyEt5K2*l+A~dbW>~t99-onnpfUFi5NW$I1 z73m{H0Ut&Hia03cYr2o$Eeln7ktc;Mc(lNFv@o#st)R)FpRA`h#x~Q@8J#~26S6u~ zL7^NXM@w88;301dLfYs7O}jVj$D@PRIhS8OfjQ$Hz|Pz45+>qSzMV}GF^F1y@Dv@&s=~aCg1kzIiDo z^R!~ed!Tx9KcK86erc00{ui82G_2WT7CT|&p!6^zpR{}tF>%wt(0*V@IN z1d`hz_!P}!Z76`wI7(YAtewTFjTdP)cbUWFVUdthu|FdCR5o>g z{V++%)c)0Yr#3C|z_8$K`%vre%`2umlMcRR&QO!0IdAmNEC8mcQTd+YPNMGFpCWTK z9^U)YY6EE4y4y+Yu`XR1A>!ja?t3(gx4=*y#RgcColN*r$uP*|hD3r71=Jgo-e#_M zAohtQ=FSb{QAKb&ZWJ#)(=LA&oN|<|(IlyKwEGx(@gUqRrU}j+>$s=~?{>f6*9P0d zoQOk=h2H903~KixS)@w+KvYfdiMG7|!IWju95hcYA}_>MKno{wqnT7*O?Gs<$9E{0 zGvpXLN230AuU~v0ccK>DGdsF&lIBhJV|6@XcUp}YQ#!Os8q zTe9fRS;d77CJ;dP`nPTRG3Pos)2D%GHTJPfZHME!pPgxGQQ>M>BURWtVi+JL1WaQ) z2|KaLD%DImLGq~G@ed+jyQYx9CMx0Dl-|!C=`O&nde=vnzMb_~(%f!+l}YP=WC7RR zYK8bbwnIm(b}`t+XFx%;EgP}1Ybq8)NajD`s~fmq)i;V0c={rmO1LN$4?tDzimplO zs%LdwfCA(UZ^+4&9fpcn;W|(Dn4kbtJW>hb$(7p?_X|Esr$63`0JI&E;6n9@EM3cH zR1B}VLOQ~qIX&lsCT@#5b)=zxQl@VwFf6zAg-7S-Y95b`oR9p`A zBM5H|t%s*ghtvI+mgR25842^<;jZ!@+Uj(p+dPt#YsBo(yymT3Q>w2vGAbxo{pX|g zUE2PjS@8$9r(IV+Tbx@ylMh{ak4_-NU-KL=#z-s)nl*ngb3^TdcWSxe1v%w4gEIj( z=paa@G~JOt4-W`y?q`MKLhEmPoZ>?%wjKH4fxT0wvR26HypEkVC*y#^yOFe=v>jZt zJpLh6@QoV{Rnp_0=kAZL)HYK^nWw{_&D#)T$!~qWnL!Oue1=@FS%8i#r1wla@SWbcx&*F}kzjzo- z{fZ5L+r_jTwK>an4+p7s5Xs{04>8?)h-51-DQVkW@pZ5#g_!S^Zo)aWD_5LN_j-bx zJw@#&58!d4J;1}>hw{45AOPS=r(;^6C z7SX+Bh{AUwgcE2Mhr8S3b9$ar>gZt&Vt{xq-&SveOo=4CZmltF!PWQH>^Xf0_u$E5 zjjDZLHjssh0DTbtJly}#^_F2(by2(U+H?rgDN;&zhlEH80wUcY(%lUkq*J7n7U>k} z-hhO3cPY}b>5gx0p7)&h{P}+ArPp3-&M`-i`=0B>WtkQLGt7{;cD6`hz=6H3mLAJg zocxBd`BY<|l;y;Y!#%L%0BkE10=k0ia}sOu)ROp>T|DQ0Q>^e~!|R*lRR|frnTHs( z!k~x$U0UTGm_%mMe~#Law>v9vpr5mWkZnz9S6t^u%>QaVprD&X0TbZDw?)4_NkP0j z6AK;@ze0OS&2w=g7~H8Vc0fvh^&N3NmLNbcGlLuOYJ%ibRAr-pLK-e2U$Yv*i%5AM>n1(6OAhF6EcUHi2&X8f7Go8xmb8tYXTV=(2d!u zP%Gcbd}0TT+-uHnlBksz(26zZ?eq>fEP9_K&S|YQ8159L1Sa)vNS|vlfs|Db`g~c? z-whS3eNP9~loYCphZR4m4hvr^+)u}aazDpUK9%4(Y?0^fwlP|ae+u36Gzfrj%plc^ zabQne?37zzJI&f5f8x$3$2YwvGnW(?E40%UKyV3I2&CLb?QB7seBr;0f3mB$@_`(y zozl8z5DoAqapmdiC!pX+q1<;Q+1Omws@iN-j!{xt_LXb^_f+SJbok9*kfbJl6pfw^ zyjAz^_EB{c)=2%S2g6chFj*!^+-p@Orcb4#Of11tPK6))6tV4lk(o{vM@zV*wZfvo`3hr8CcByb z3QIGvOC7dDF;3aQ+~W>*Gx4m)^SJG&?+sxU*GsOLD|-hWN8G>cnTm{WdZ3EIAeOMrWKo*Ies@8o|T| z+gTmG6#d-GoJ()l|FyyKDD{;x7USZQ+}}uFI<5@Dj%C+}-jY+V zqHUGJYIKcgE)3Z5Z>U5>s2YN2^rPHp^T@ZR$wt?^863PC!~a|6lr_ISNH-zU zV37Jnh??zAS#j{vI73pid)4E)2FXY@%fG!K7m4gXi=p@T@Lv0kt2KVAS^cK+PbBqp z0BI*#Ta>GXhdw8i%=F85=hz455QMp9^AVwT3QW?T(`0qboQvcujSCmyFW@*}yQdqB zsxrBDz}=UpR@skYQHzGcf0^0}h^VUO+DeNsoiSB->Xfg{)2V5M)WC=#^kReLvO zDDr88Y`wkwsBCaO!wP@Y`~O{CC0BRJSHy`!g2VrEc-gW(g_De6@!dW~ly_s0vAkDo zFt{azmW(D+c_ap@);$Z(?4a)^CF!emc0#)KKQSG&fcLbM_E5xwo=%oF7eyS9jt*$* z;Qr9%7!r!K*~in8C#vL@wX4^ssm}xEyBGT9ExPNMI)|L2ulKYJ6Fmz0a@?igVCg>$ z%|JU2p6Ep=wj8Z@cx7oX*GsnzWS0JFvw=fanFQVUGqLWwuHx^);{V!y-`Gt5vFcap zC;ANSdUg-$_-Tiz;D48e&;QT)J2*&L!vJbjT2(^TNi@<1GfdKml((auMCo3kTlF=# zE9W#;#ccjfeUbq8)lcD{RWoG6{=X}c*62j61o}yLe*1+S#U8^Y$1MIo(XItz0mV5N z>NI{~4pO`)C>-;7$VkjX;df_!r;ZgkFwzWc9VL*r^S?_`{U)y@2!^x5N2t9bn@|p# z-InZS`p?QyL0UGmxr}_zB-38wb4k0a{w7y7&HYx_%TMlBKIo3_tA}iuRZj|90yzEy z-E4RyKKKU@{#XXLgl!dc4ixjj91Tr6v;HKew%vy+NToCf;$ zq(Q3OSJ~A~8HQL}gPqP)2c%tPw(%wGEZW5O2=n}JQQAwM{8eDCd1TdgV@HgK=199o zqPjBK9{$pfcS#ABder1uK@s&+5c}f2FV|1^rO#%V`~v3n1~5UI_DawI5l~JrCZ3@^ zQVm4|c`#ju4j3rZ+=dm+g$^sl9qSy99}oRAX33`O`(qDf8ZWV|&dsfSV24mP3APLZ z?YieHr^f$Rt8<>XRbqmjawI&QJ|lp2EZuH`go}jlXr5E1Lg?}3gf43aZh*~AK%!ti z>0W?jd5^W%qi-E9by&&!1OHb*GD_dXyFkMwCUwJr(*E>RkI|r5VmoLOrExq}$gK=f zN>iPmS2xcUy&t%MCkft7`=fvNB*QYRLER1hf0lx@kXTbm&cs48#|ew-g_{JHc|$57 z#J1(j%H&3KL*^ML*KQwO`DnId!yQtN@}++!r3YBuO!rZeJ}LAT_`hmMl$7Rp+!P8L zSo|dZrNr{L?woeAC<=`9XMZs+_DmA;HACXn}f8TT(%0|fl(B(JOf zSG-<$bgyVXqQNV(GQh&0@#ig|OQ!%yr}HuO-N4}?ZeL<)~=ec$?Q zH92WYDMhE&Vcz8U;V+8qH`f}>`Nlvwung=DTiO(t##8h6bpET2z%;fH30u%Pn(M8) zXUJd85-N;N&k6THE~55r8~|+Jw#+GhjcuweiReOZS__>~<~|Rlprnp*U)pk2$W_$Y zPuLmzTP^y(qS8Q`WF4YCVYja{)<3^Gg&lZr&HEqem>|JmnOaBn9ASPF$GG)>N&$hslfQwEJgDLzl=;YJ(; z55ZLV@fGTdDy4#YGlkP-^roAOCw4+}mwnWVD*hj#uhA|gs;YZo;s5eTOu1{;w;$V8 zw;-FsV~Wwti3_VnA8{gmmk>V}z4d+RUD0^|YPm>~_iL|l&C11xF$1DT zs&sti{+63US{1~SQ_BV)L1A2UupX3rcNCPL#5|MA^lALe(qndc=IR3~NoZmeM7oU3 zjvg2Dz#W(a#R;C3mlVSxdReUwoTy=IXd516iAE)lj3C9w4lULW)@VG!ow= zk|hbKv*}VW{>|=M!+-&#i8h*(hf$m}dyITD{@%a@2+`R!%ViVML?>>x0(N?)&4y!r zg6K?+AvtEWMf$vof0Xp!}dp|WcOVHd*y~fWMQubIMN{Gg7MhjfP=NG7et0u|h_di1| z|A|jG7M4DQ3-%1c9zo;9e-`EB9BIW+)jgrz1*cPyk+}l>gU`Sw0NmC&WOMDu@e6rZ z9+PIZxPXB4;N$&xc(J*asISLfqPyX9t|utI1uTw9*eG#*+^Q(XBGQ1qk*daNfm>9{ z-D+Ok9pq0W=ycUxYe4ejK}SO{GmGfAgRyJ}^1I>P2{{P-Y*Ju#QA9i`6w*Yd76pz% zH^Oh8CU022k`7S4^)6wXPqqq^f+15+N)y5M zhgbMc6m=K()%M1i5vGOYAeFp?4nEczWHFX;Kklq;_iV$}W)}I?;W2Eu1=ah!E{d_z zL}x5&uD8OXSA$*XakLx=yh#xR7+DA=1N@JY{Mu;Vjv|DS(b!dQwakhC#V_($@11)7 zwlmB&tX0|rHp6>tWp7zbd!)vBqqo8&0(WT9RkH&jE>-ec-FMZe?ru0hH$_(3()__#)kFC0?nk-tZP%3qm^6N>vIM1eU zbS;BCujPH*Ui-`unL*)a&^@if2a!YN9eM9HK#`L}9A)0m?GzYzz~@nz!9JL3CFvNB zs;ha+(HJg%3rxFKRscsPZvuBsX;_UuuIXREX8jE{YKS1rL0vQh57U61E4(fI2|~aX zWB0)Qf}_G-Kt-CKpRcanWYdxGx2+t?thUC6@ugyi(#x)!b@Bk8qp?PbFB%ei^ep0C zF*$6|Df7TujWMU(iJ3&PuN|Dget7IYkq8-ei^`mJ)(bgw%CU5OX1Ua#8QVp#%*Y^| zF9S<#YYfHdZ?YJHaWepBv!3B|T7jVog4ZIUSLT^~dvKd=0=VH>e8O_uPQ&r($NBkF zLLZUSssOJt|HJ#Hzt)Gf}CT5V%5@FZe0uKgySp$yguzG(?mTO zp-QLLY_Hy@NS`%6@AG(r`o>Q-$zt`R_urG(s$;a&H~jL63$>Hx5qD@-bzm2wPxBSh zeK3g2GEe}NCnom9e7XpOnZR=h(V^n6D!#w&;5Bc(h4t^DaES0Nog~IL~ z2T}$1%$CT)WWks@o`~Q6#5LPq2m_$knD(qVTVYa=y0&$sYv{?C{=~7wsA?P?x5&Ad z-1}p&?;EIJ9bWsIr9V`Fd_{Da20yRA{~-)cm;;W0i4`<7G!_N@%>PcG+ctcXlnE1& z^4(&O@>dgOJycjlR__|D_Q+|F3%|Ep8)}+mTRH3Imkpo z#E*iCE0la5#LkB&4eqMSVY2%r=&Sf11Q(u7om&vIoVQu|sOol*d_SvvbBRKBPvH_}fE%Dq3?E558tF5{L!feEDL^L!t#f93u)++?=tJ7Ab<@!8c%op2O zJok^cQjcay1subkZ%0|eeU#<8!li{j!Z;=OKgG(<`GiTbLvdY}_fOlGM32arN6}II z3CY0*1eU(pcagvq+B=(vT0;rkN6`fT_tvXvCKhSqobQ{N*DCjRi_KO6T>+;*RliI7 zaC@+iWr-%GEcxi?WTYYHv62Sr`CVJYy)`ZuPt{CS?QWVGbqHrhAW^d`b!G z_dbPR6yC`AoCw*W9mv$I@{`vp5n+LD@psUB6Tcge|GR}%K0Db-&Zk~AAEDAc9a_Gw zhu8d71ZfvpM(Q(4#1S@xBQ`(3G9mqn7=X`dROOz8QaL*=U8isCpt!S>q3;oe>jswS z(~vQrF5U=&EdKxGqPcgRYQ4-rHd+~pG+cO<7b%;x@&n)+^Y&3|p5k0d!SL@Xr^sE= zAnNw!P+cu~;lW(7Nj$eN4;6@T1|7j9GZlM=E!O`LK%TWq>e=pTmhByTdw6U@ny&dR z_ID{6;xVuI*tzt-{L!dSAGz0+Z-01UGedVN6c7zp(kR4sxtJ678`b%t@zg>j%i#&A z7$x=*C@w5~NL(2)M%?;uRbM{b$H3=0mXq4%zp~~og-^x0+`M0UUR=y7Y-D*z2cvw} z;YA};U8jr+2;?49!1ZFx9c6aF7CW1r;wAjeR98fN-;H^EKj1smXQR z2>D-*6RT-NO}jfvc-xiYx|W{s(5Hw@ z#E^E~z1cM~EGa2wy-&KolNoi72+}oK*e}G=4j#CnDT~k1GJ%i+``l|94c!gzm5tP0 z%Tr>Z-U4H1tma{hYe4e%RcRUPwSR}=!syMkatg9_0>U->)HaE z*sw3|uEj?LAek9dZ_qnUNd5DOkmiMS$a~mp8ur>Nb>k|_Pc@!OJ5OI z#<)?hJ2Z}JChcvb4m}(K1xY|l5U0NRjjXO+XrwaJ2i_^-^s=d3s7|Q_hZ7I)`*wbpR}K4>2+QV~*Xi4XPI zb`muK#dl+(7duUgCFHzrr#c%9*ak{x!vAxY>5LMU1!~K+f1daq9?AMmt+|}ujGJ%H z&0=0(e&5RR0WIWwdtI}Q-@;@t%_YtHe!diSysfC2NJ}$X39RAUGmruoky_?aNzRLb z_&3sGG3VQd_>=Fjx{9yHn*~;+aqU>_;?i9#LJ^0*_Dwe8=rJBt&F8LwdU|}WVvN|W zsz%rSqh?${#A=<{svGqA- z(`gLPR`hAe#5|zk-kPBD6`L(2DZuD3HHyY{I040jM2gWY)33u11 zDr025@*I6aAt$zek_Aa;VdQfCPBhTlcsRLIY;3?R4MK#Ai8qxvD}3H73~|&_OdL^m zs#eKq>*swTM^ew?P$OR;n_jL&gGYhh`LV4)rSaBYtYy#HKvhsttJ-i^W|>ZX|2ea%*l5~tuBb#=od4^4J+I%yjU8;rpPsizLv_Dk zz+b6sM?Usb*UEABaH~;f6OkEiw$(ZE&Q74Co=*AC8#n?L_w5?L)i-voRi-(gG-l$- z$aU2Dx^GEnYHHlD$>K#bZN6kYicvQrDzYZh)@H>EXF7<49?UbMW>fT^ETsA38>TN) zvLJ_>1(u8ytBH8cVZwA|!>Dhi2w>)Jes95VrTH3g+r*aJT2CddH|QRJ(j2xoopTi+ zPRsIE)mrdsD{roUr8Q7KO&nou;Mcd+xZh`U`nqmW%;YM^x1kX5-C7B6&AuzPx1)J!U%rW~{deAKm+~2~Wuy)T~75Ak_;@nIA}b z68+jUtBuV)nw6G9ENn#bUHKIy=ZiHv7U?9I!`R`(!JjH$cS5E%3 zW{shYiV_aXX$`@%t;lP1F)rE4e4zp^`jDG=3)i*2YuCx2PYt}T-WhPaB$s~?Fwn~K zEzX?v*Wk%(__Gw25P$w&zbOYe(D94g`Wj;qTk=Nd>z8~|eb-k6k^MNyj`~8~icjY< zef|6RJs7gGJkHmH_DQLbH)&{o5I~ELkd1RAIbN8W|6Cz9iHI#8zJ>WpE+>ZvsGMM7 z@Cg|oB*XTtlg$22-Sk!~C^(Y6Q*w$eA;Q-0Qeu79`7Hv_C*$-yi1?7}84FqYnqp8^ zkmXihp1NGfYHhJu+*PnujLkW&F@3(>H~DxoGAwgq`)h@lRhvnfTiM8_nr2O zj{aKCjQlPtQw8n;Qk<8)->(MJ&`%bUSrhtgD3wQ3#IMjR(O0nH%f+lLC7<_R8H1z6 zHFQ`8tIF%tPttOJZ+;MUM|vk>5P#wxBK>g{DtSnua~6B_E*nfTtEA=V)ojC2%jrr2 ziOBI#K$vv&MtQT!@Yba~&$f<_eOjjb+*gs&A8)m$2B(O5cU(1A-)9I_$uK@*)AVrN z`W0qB9-#-Hi14AFNRYd|}r_8o98WAPpngqF0aA_9Otme;I(m-={dMZRMd2fq+0o8x=ny6aD41N+AD($l{iOUn zs)jvKPIye|HksaFnrT4o+3MNc(;6_xT#z;6ZnyKi@hnI&;X2C=bXft%JtwdIJf6ql z^k^3zHhe={>7Q=PC**f)Y_zvOa%wy^XS1A|Q%dmM_Z?@8-EFWX4~)_=VvY3iO{=)+CGu7Cn@nM==5+pF$F{T^mV8LUE4LJ?4?m1_;5y0}0uxc3jC5Eab1$$s(_D zmWUt+px5!l)WN%^y9jt{4dlPVg#oqy!VZp>UQbuO-RfAn!ERzaQKku@c)MF3WywsV zciByX>}*i~%Q=@@OQ}Sx@@X>i2MhiT-u>HgbHUBof?DNNq?p^k!qBNj27QmCF({*BbNiG#7sL2PWAcpW$V(lH_HW#UW=?PRWB_GK)NBk`0!eGC z_;&?FYTSWOMVaN~bX%bw>e3#nPevHwG#qI1I9?SThOlN6yI&yIkCnA(fZs$)w=$ZC zY>e#|?rxsozi<*5mj(QG(X@MkuT%@ORSSjuAUVM!&lvapQ-7-q)XLQ1k=H<@Cv0HP z=mx{b-p|LREb&34#vxp_m>AtobU8M0RHMc|;!q>$`r$S>D`WB>--D&CQ*0Giq!rIS zpGu=AkC#5Kb_!WRt7F8os!yCB!v>w2pkZ!K$}_F=n38eG#=gJ5hok2=!Sv(qS^8}* zMCAcxGoqHpY7hKx;ju{$-jC>9C!SL{@gLrLc~cxMI<+3-fXlM}(k%@z#$8eq&GMKzXoUp)EH^kmdL zppj?nCojYpc2sYb`6&Ip@X%uB<48Zq=i3d_D5#PE+D!S!3A+GYK@ns}Wsa3U&Et8I zgwO=)ll$yvrGnlNQtOK<)ym7MN| zTHW_mBw_*$`rUoC!MpX~;3ZXIb3{0|!)P!aDwhHxK+K8ZG8Az2`1ddx$)TXGJy9ysK@PMkh`VG;O4DDA?7> z0l4ZY*fdc^oJ+*@ezO-@{|-egT1e1sDSba-14}v0PQ2Iu8YUSSbDCe%4foCB()ZCK z8Ee2ALaZ9oH;^;IUWu50Ui8OaWk_%ACn`Z~ebtvVWK{Pyh}u#}ar4Wpr#Zo2w%p5@ z4pl%pmzQ3QOeX7x1S})KAQzXlpqTGnIAo}(yZQYi&g^ljot>~(O`JsYM{UcR--d{x zc8S81-tu;tSkO^_%Pe&lCi+{-J#!;^0KFUu2Se6-xnbxh19f^U?WY^HwU`*I-h+Np zL9)f}lIM{NyUhZ-(ut$vbNe5yDOK$mn7~WH8WT0s#fL(ANdkMp??>>0i=D;DxR=xL z>6`C@wA>qMJrt_ey)fhx+s8z;~Wmw;UpB4KGq)+n6E2;UO#iN~U zAeG#AkwTt+emZ75z5U&B?l{Hkc}C#EXzz%BO?6+ zp{5+xNj_N|h}vSPgUJ#VquU*?MrMvDC&isSdoboN9Z%FId-OCOPVMn#BTaa30jgy# zCjDm{T5^2w28jYB<0ei(kggwU{zA@e%@^P*7Myp{aqTMmy$9$XE(}T7{+Z7x$BZ`; zCH^eNn9e$c&%EYt{VZ1-gz{&i3lZ{PdP`BF${XRo75l5#h9@HGNRr@%H8?;D<$0C0cCil7 zjZrX=PPppqimk^0X`-QyMEMyGNiwS*`@SC;S<@N3)BFHYPtfhk1+dG`n)Rl&N2=P- z-h^1y@(6pbiIb7WLq-?{2GOtPlfRkMLhIe4X0( zRoCmhV=S0|&FjRoFNXsB;?HrjFO!ef$(G*!qQ<3E-9M8lt`osuJNI>Lo{eCt2*;oS zl?9}~aAvrg+50nHE{r0)CzW?~*a&JD*!mQBId zw?Av=?!HX!AAFrfsQOfOwMU5BnXa$1by}oPCQFysmq@!2ujLsT`@QRNiMnR`3D4;!CsD&yAf? zdgd_$#aUs?Lj z%~Zmv!EiIK6PshJqQw0|Z+t1%Qr2=12MBO|S<`*4zSIg_xcsmw>_%U3Sjk%<+Xy+uKVw z=p^5FTtpD<5qJas7uP9_?WBlaAQCARg~Qcv{;TAkr~0mzrASNy^YTc3}PE3wR}(ePj7te>`q zAGKCqzdUU{tVl|6Hy}g>3@<~DPe3MQ!l)4vG1WST)AeA~62eJRYlL^_(Iw4X)N_2t zs0H_i2Ra|K>@sJGcjcz5n4VzIFTWoHdW&yTlKGmh(>{@?!r|>IJ>f?Js+jp^*qu{E z+b0YsOGy0=VZoM+;?8I0p=MSL4bxvtb%;O ziTqb7E(8L4-1RHcj}lo%9^W^pl+z*HonUud#8ugnKn7cRgrAbs=X3z=KDo!4h$X{~eGKH27Q`7qP%c?Mw^8^8fQd4(Sh`Wgu6|>cdQh zd!RMZlvLN+9mXwL7#0-nS<5e6$4MIz+c;h;q+;1Vy>;jcIxx7 z=2!j$3r`ohq#31J)%7;mXI}n2)6B5XHLOHEFdlT#6a!}efy`>Gbj=^l;-f=!bRrFJSd}@g~nlqKLLkjQ*jcT8bPsw~YWl zSd87QQaQ9Fu2ZIwpaDA47j}tGbtO?hI6=iKJf!%feDMB%tO`tV=qE+vO^8NVx^vIM zqiGDTs35Dp19G6N-{SG8Y>fjsZ%|i1e7XVfv{Z{~MlYZ&hLdjpXAQ=*6ubG{9wbaW zyRtz(6Bk8^qNO-xXH#yxv4`JX|9uLi#~2AWB+ zkm+2qJdt<7QSs}m4dDlQd-4b^(waIFqRxPTBvp0Cm~g_7yK_d6Ol~Z4*6Mp})A-Z- z>iTByw<6+C@w96E z)VkU3uwzw4>Hm8V&$@ZV75U!ozX27;r(x|oXr^0V!}Ud)nz~ye6Hj_` zyDSP>{0b?qLBQX9O38(WBcj^~QV#hcne8^_WyD+;bkwKFoOOtGf(dhB>7i3bK|?o~ zZ0BIHJTee@ma*~B3?08bOXua7;#D?uejMPSKte9N(V0xQ$Ge6jDCv1n!+KJ-qvtx0 zPPz@6YWl(4o=QduD@#&&k>ciKamRH`PlWsRLjEzUT>^8kcv{p4<)?`GVJ7 zkDEP$$$KQ{sQY=%kRXoOX_5|aA*IKT0O6si9I~7TGrVP2$=?KUEcbGa<<2W$!US#` za(@0r0rZCLzsMr=6RZlsFFH?3#?FI&DUv0_@iBA@_xQ9;4v_%3`uc5s56CXbpKh%< z(#8v6!FbeUx+AbgM!wD`&qsv}bQ8&egzWVvwID>1NaHEeP3$^m&_f`LEvT6Jp$AsI zM*&$4D<^w52ADAft-X!{tD%T5DM^9D@)Rm!}Q?4mu97jA2=Bp zXx-=k9aKu3_#+*vDp`^N{^$_d7@{3LU50%jj`)=$##iGnbVczG!As<0)%^Y zoA0K4?I|xP31)k_{GX&;CqD;E7%Dq}B)rRWu^Rw^fUJ>NS!k$1RbSgbA`*kG&8F3e zX*ZW)Mz_fma=3Du7@WAPdZXSdijZ8KJAf0k;P1$=b)5d^LGz#gTp=95pQ%qrJZ(Y$ z<2^=?*LIA?A6h*i1flU;&CW7QNkN;Qn9Y71@%S7WxxliP%DpF|9H2*mnXlolj=rI* zM?5E~X;ktG!++wP%CaPA`I@26hh!e#`vxxFp&EGX@zM74Y_Hr+h?T;g|q|fI?FN zo#ANU6hkJ<>2(Vt!36CA7avgMBV``2$=3$GG!x4lUh}jk>q9plq@qJ$GNtp?Hf?FSHMWU{(AB? z&4bMVT{OfJHX2sJ{f4=9nXM6Q#-8X|EJk0 z9&xJ*DHXI%QN*RAE*!hPBxWSq!elCshf#AkCo&ss4 z0+jIap$SHWkQ3?Ogt$-C$367&L8$$@;PAyz%;U_jDQT|Kp^#1zb$tAxyi`|VB(9;g zcrDS-%qs`r2qKuEPyn2hWW4@Iy$Y**$`UMh`lM>mmkAZHF+vSE_Jv)1a{5>WmJ&>n z3aITp(LVpYsQ!~4v6dT$c+>te7Jd;b7nok4*VToQKp{}EEOB}-jy}ObZ9h{bf}2j1 zu#{zQXW6|7o6ysJ&)OQcSjuUL>FBXn>{svozjLLm-#1nRuvGQG&ezFE)~QI=z0L&D zjWVhUcT1FU6cP+v`u`1LWx7-&tZFnIz`0tLaZ#VUbza(2UFY|+ApXW~rPr|tAdCNn zNHwp~0SFo-lxqmvICvW9RYf+Iu6G&&5fYz&3hlcj^FNyxO>mRA>DBQ_Hf- zxcv?0<$ND#-ZH2oya+C*k>o@4V2+H;+9sWN;dNJYyv(`|UOx9d=F#B(nHS(w1BUAQ0jTP)?Eq^y_YESRAW+`yN3j(B z#viyjEWvlJ$hU$owO{?)<4vt(}-v%`QKA%9OL}A^S7ic{Ht{_%W6wRuOD(rRrMRbCX>111d{1y zo4`DEQ#S}@O%=g!yL9*VC%0>Dt>w8ubhY{JbkN?1u1erF$G{&yXdmbn82^BsEa?atN=X%|pUJ@s16UhsUhp4QoZp-iBv^PC~bLZe(HkXcF$g%gu2kF-exl8e>X@IW`*pxF> zu3XM88YF?5NE6tl(RCF2;+sj6C*^9-pW;!~SxpKMM4G}}pp=GnD>xUO*X-)6Zq?sJ z?MZ<+w;!CO(8|X?_Tfu^TeM?)J>CMLDT_|q5wXC$7u2*C-gBtCS=)1caL%bx{!yS3 z8heU2*yU{>SgFoD;{Hfr4$||GY_u*vhSly>2Beqx3|VqY$C$j3EeNzTE&XV!#)H;O z1l+V&7Q%(7)o|v@00WUNk=1pc9}Pv*24;UXx=Z?DJJEuPfr6BeQv&NK;p~Le0^;v< z+pgOZbzdQ3GUa128BrzsvvsC?osFDb>CKtG&eZj%#Q(qN}Nce11CL;5_6p8d%%B>GO>S^5)x!!H|cMhz@s=!mR3 z`;)8j0;d~o-0(+Ou<*IY3`W3)P2#%Gi*T%K(eCff!GeSter*WG&-u^<$d7y89du(? z^XxMAn?_Z881Ecz(X{7dafc!iJ_S@5&Nf4f{OzvMAZ>G2Xo%vThe7~-K*t)AplBb; zMQ9%i6um$9m9ZPS+vn;;xgJ$9L}H{yMC^c(Sy#Wl1J3gU6865VNCjFm17xrStnznx z{~Dnm0)OXk<6=Cs1*APrvuMt8CtiG;Xk(FMZ77vgl29RdoMu?n3#H;VH`uFL*se#R z!QjUO)Py=7nKPa)=IuD#X&ctfI>I}DgG|yZ)DAGt=M>gkE-~|G6=o!PQcB|S-3lrI zw%-%(3QJI5OwR%bent*yL|piRwXiVPwIUW8Km_FUsRb6@ zKjDp7X`E)5w8=wVAv`a}ToqrXCID%pk)nXs;9+asal@cae=~d$^|5d40T|GjSj>xc z7I=0pQyYIZh5wq3?1aYu?#7hEgznAgM9U5Sy9h%15%MycP?;1`JF3+D*s6Og|NYN# z#Udi+=2{*^=o)K>oX;wMOk@V=y)%nb>-1m)B=m}>m$E1IF96R4KO5dQFcY8yIn)nm z^h=HRPCL76ilXax1z>DgO0ROfYAWj9Zj8kOkfVWbLd)voI1NB54q*C%r8efUs3#J5>`!S(T`<5To$Hl*WB1wM?pxqz^-C8Zn zpZ0iv3HnX4e`I7+&Ap`O529p=WWdV?9%!Emccv7?Kqw^1efx=YyPcb{UOl#|mMCgm zh7^+;5NfCKLC>=-7t=;NC#%}1L+r+ z#5k+Nz)3>KPtiEXe83jl_uIP~%3B$q_i0Qbp{@a<#0h$A{qEv;;5pC%Agme0A21vE zwC8y*8Q8e_!Fx`t$A{jO^WL6Roq&_|4CiKty#_lGr*|LA4SiEHT}+L1K}M*tE&6j`~vU+86F}x8U-47^nT{^Yy~8#kyd_u$Rw}@Nf%h>-o9FhW-nimgb=o zjg4j*01XfN_#V(l`VeGxOby+F&HL}$ij-_MCV#7nV+0D0zf!W9=>};@SQ3DTD`!;ES8s=4*^sWL<;sYxy6ICYi9W7!_=ay_bWX)2A` z9Yz6Fn{}VHAJS>~=z14EirXh_WM=f!wsoitiHioHUV;dHPyxZt2Q`na`g^;3h!QbYHv{++yn=K7~!we{na*#p#pA$K0Tzh74Kk)5Sn zrj+nFXodPMZYzmdcTQ6R>ZyCr4S8E`&-lPQk^%8uWXCO%e7&!@@1@K--imz0e6tt( zfqX9A-YFSw&)Dw)ze*q4NE5$W*n%ZBiEW6jH)4Ar*{@qP1lPZW$B%qDm-pbM6>4u- z932q(xc?$!Bz0?TwsUbBz3Ea@@Y%2%?)sd0kdttE@O<#tzSp2v-`lm|`5V1*pQl74 z8zS9rhOgF+Dfz0!u9N+K^I&ewZa7<5L&zS)pAKuBVO8ux&7lZmZe`}!l(I9-8^b=TPYQ|xoQSqkj$814NOJl`X!O8S6 zo7B_pwAJRk+5I{5<$j(fza}i03)+gg_Hj0)X_D7-(20NA4xVi&7K;|i_3x=&5T2-S*p)vFkf}saspR z{?j9Y0)J4n9-iuP9k> z-m^inO}?)(zhW<2ZEnEqwcvw$Ssk}}@NML$M~SuCp{~4y)a$pU**kbN5LfswmI5!t zTpLL(AG>8azbY$jyU-clT3)HF6N8W$hsP*I`_soI{I}16S_a$2F_f~fw3XT#%vj8h z0={-nwc|!>skvfvU8%)AC7`(k^M?kZl*4|r(Yjwn>J}EElCfA8YYbIL_>npqS9RZI-R6Sa{00{4t*i8RSA0BINO5Xs~=v zu!|*0RICo9U^55__NLbCYEWmjVxPFC)cJv*0Q}j2>*;+r+M~RT!aDr~)XrDdzN5X= zhDY-Spz&eCI@gaFqKPq$r)xW$g0!#Ebgt95++7G$&jpP(mLDCql-HCgikeiTOiS3b zw0=)*D4JXwJo3uobYBvj7YFw(=}{5i(llGqP+qn;v%9*e#09@HXC;E$gbb7^!CJC0 zoA>Jkn(Sc`uYO*37Xd=r&1c-dGfT(?|` z_V|x1u~5fMJ`nikb1?al65z#g!LJC>uP|&H5^0v(3{}pq|=i zMERteHZ-O-e4E~z99Ya26u1`On%4v}S%HFAdW}ta&Segm-Fq8=MXzg+ozHCXi~Yie z?z)BBhTGCVXP>qo#ekJ|==gfXbJOKv7ncWjM(eC~x+!;&xNk0{FH&u$eYmHDw>XM2 zM1VW4vA7hx-js${-)bhWgoVtHEpVr*OZ)n_|M<`9Ei^n!U}fIu05x<>mIZPCmS4wmv>p3wDEE4c|_~ zf_YO-`^DE%*Wmo~%0g{LI`HnP;a_*_r^)N}_LfsE4TKZqV?cWBZfc!A&kmZp*CKAk zo|z)L$JB{OOTbmPR>_c8zbmPrV0E8BBh`HKkWy8C%Hr+1_o9zD7wa2~jKJY*_G8^^ zW&ruo*NESvqDx-4zma~tLptD^6*ry>wKgYnl3GGlsu7KwPtC3?!nA%KZEv6NP-@;- zXt(A)btJC0K9f>5=sq_;GxH{Go2{vGHtxMe)j}v_eqLix|N9HeTW5p)>|6H0^JeR_ z#SBhMaKL3zH4r+~SWumxHLC03QFT{r?%hyz5;91Q%#e1uCRTTyOmzp8q8LLvyJCwb z37387;ArdsDwX2x{BW6Nol5BJ?H!1K)J{1pLzObc<-q zMZ*p)JT8yA7^$2lKKmHf+q}As=WKb0P;@jD5LxHKB$4~rU$y8@8SOnPuh#-|iG5}| z#r^169rbnnM=&~}7z2oxJqWYec>!CK%4e)!b2*jf|7-7B!;;GWHKmO)m0HeJR%)F| zZ89}A?$mo^cIL{H3OGsI|b(5zC!YbTD01H!q^{-gu zg&#ikca4syJ7a;HRg%&IKUH!=>FnNYMzbHIi4G9NTd5`K=o!D`S?{JAQwC74MBuycRD| zlx@?FfW4Z?om=G%@|{C0Ls`W|l3@g;MVRXsf zubyo17vw`_QcW4-|61nQm+jVp$#lC$g1+WvPh!*yeejgn2@^P1b>_c`~B`lfzwG%LfAJE)+5#=K1a&EI!aEc{s;Tvd_(Ro?5DEHBDJFFf<-1rj+yVTt&# zCYZGtb{=WI_WhLyH`_{)AbdgU<7>&p|- zI|U;ZVECnpSA5X2Ik3Z}BYNjSWM5AhWxMZ9^OY9Sj@4eQ3g&!7?%2bILuyAu zUE#g+_E-y;19HgwtU7V>){VAG$);6&$5+Qra<6?yv&~tNu5jy*(Uc9`9oP)N5^6>j zw{?~!@iGDptlf20-;#4?`EKdY&CPKykcoXmUxVnUUdfG}$SO^UiJ|71)1HYXx#mRG zb0cD@3bE46z?>ClGUlb4pxo7^P)lm~wPZ2ZAAwq9a23GR{4Nk&@Ox-6H07DN3WONu zSma@@oYw@7+Mn*vkE)peZNP-5m^U;BXJyd5StFV6yoWFk-~Su^(X9BYWkxBvC7|&L zHy$iBw;$CR;eL0PnKvXzxso72jaTUea<>!#v{&bswbr{%uNT&r02G0^ z4WGq&STgXIE?`$6aE)&SU#_?pWz;{_pe;SZ#t*}SMp+e{DwZKVcB9?eC{6!abFwA$Z+_2j zcvWwtv77jJ$FtHvYreIF$&lhWiVS}x6^kLO(1=>DrD_PdbH!LKfGtaO$kecY^@}zE zLvc;8EasHTg~iK~)136jwr=ch)-WTM=h~*8p0u^37Q}^6OXLIBcmKh)sucq-@P zxnOTe_}iRs2lF8TOrldW9#^>F+Aq|HeE|Arb^he9d{w2C4yId4tX&(`lt4#XILLeP zgp)PfO*!mOj>aPoal*P!iLfpY+mwgU!fTzSAXifa5Zr~xHEAk&uqV`zH&)5 zJZ?FDfVTg9L`m9SFwAEBzy*u4gX^lbiQDehs`^6?@oQq$xC$4&2NoK z+sT+u#hD%{l%U_A=r&FP8yqyqW9KwuYG(rs>7|THrB2>#0uV0*9loQRF0%_A%Pd@4 z_P53WpDy*)_zK_C>N3-Y8=Kv4$ZWoB_l!Hk6u;Zvd|cjD^4&k`bv~+ze>i?K)aS9H z=(|*r#d3~KhvPnC%F4DA2G49UO(e4R!`N>GW{F#NwG=1bjWat3<3++d!#iU~zwu~l zO>q6HaIf1Xr)1JAL!l_L`Gs?tB|A!ozU{AMU4xCau`^eAO&`2+P$XMR#h=;394e+u zTOjIx@r4h~L2nW>Wp@d7jm@h>E$m^Kl63T{z>XxP&Tnb*mXuC|ayDmvPh^Ym8YLkq z-k1@&51@BFC6Q#rxq;cPnLtbd3iV+6)@<6T5$(~_GaJqC7Mwz7-`N{9)WhQJWl>6< zI;wX+Y6+!o7RFJ2H1-EZ1)7yvg-*p`@UOMPgM8Ut#p)y+{pS{Gxx33 z-J5NtJhl^_z-?TUk6so4tW%d2Fy&DI7&h7-9KXL{tl+z1*Y)O48%V`7E@r2~ufz@c zz69^JEOQ8@COMAx`F8hFE+VdmHs8KXuWHoHjBO}jVol>@D5t=ZTV&7;!1aj^mt7fW`pdd2+sB;91G$t<$SW+^XUvg27bC@QK)8;intctQsE zozw^ax_jXdZlA9{m{mhE0TvC%4y0@u49awcx|(l?y);-k+d0_c5Fq&at4RzcGV;3MX1R4H2o# zlsOOCPB>#3_v6!mny(cLbr(9^Z)BD)d3J?#jcph+fBo2B9mILJOpgJ4a>cK2{{(zK zgNJq109FiWiIZtwFeovt0m8HSSMkbQt$$pC@8!pvRxWaZ@NQ%W=Cstr+AoAvu_>77 z2wQLSk2j7!7t@XCcvPGL5lBlRLRrk_wa9-1u(0PMIxcQZR+#w#NvsA4?tR8D(BQRd z8q@P+X?wOonr4m669|Hd0rnf#{DH>kMME}>h1@0)CCf=|zpUXsEI9z+MI4_rtjYIJ ze*`YwlHr7UI7_`xHB2?BJu*GuqglW2W%P00!b^0;MpdlleN1p$+NY{c0AEYPmCa)w z_0jxl?6)HXUi>|UbBp>-uiYi&a<~q3swP<>_1n;J#y4|Ta+^1mhUHY0HUL^1rKyB* za+MudKLVg}Vw&uhDZqTu3}Dh*16?}l?+f;mDxp{2H7akvE>824>Tu`Z9K@CfFn z$Tc&AUV4>4FGY0e;Fli<>ru!uhgW4jfPR+n)ZP2%8r0`5xOmp8$68b$IOo$>Ex}Sg zqqHzCVpzMcvbeo;Y!qT9F3Rd<&46Kzq2*h4Z69?Xm94~D^FBNZTZTq9%jN`|?W}oa zuCW<}xWabKOp*Fr7kR^26fR}GNXVRx6pHk(d|c3Y8fP0U6Rw0IH%ew0qFm}`U+@lT zR>ECiIgj!H&mt1|0hke}gSs&BjWHM#B}NGQY`usT{4kc>6LJhn0m~gFl2l;P41y3i z3^i9rm`r_{W3W{Odj~j-+jiJ_)VN}tJZ#3BQSU1yXCfCoC-09_i?Dmq$^)VFm10pc5GCwI z?Ek7fIY?tKwr!N8u8)veLUEoWy3{AA;muxj_bn}HP8aMwnr4F=3KH3+g)ylHowP*3 z%H{joqCQ3)Cbx#O$GfF{GF1jrD1M_w(4z{~H_3MFJ{KWlKXPxkZ8}zaR3;r9P8$l$ zUf0AKAh+C8PM2+RS+{dd+jGZs+kw|+9u>|FQOX_JKXjiQ+w;|S0~dqtZDYNO@3f~- zlcKpv)TWfm8zNBjPbe6tUOjVZC->+Dxl%%a8suf1CZw;llf$ z3R08f;tau<^G>Xq8F=A^7r-l%y?!SWZ^=Vd-kwji460kcDTErm`-(`f;V z=UKYuFJWk(0H-qduDiUq@>kbIqjMHKiwliL_t*+-Twxh6*nwSv2M{D5u_^tV8Bs^FGdI{9<(sjwmx0irP8zm{imip zj9Ay)9vmonm zti*|fCwGA?bJGD}oK4Aa8v00wO%5LQG-VgbQ!@llplg)fWtp5TRXjoJ6WP@2MTyr_ zYto2<0E1mR8#pLxN)($j_$r4!H=br*zxjPUD8n(w#>0+19Z$*(lZNE3)l>pTRh}G zb=TmyIeKNT8>`@Tmcsq2GK>pV0is z1YvGQmk-+HuO@B|1KP_1TbG8#MxiaU`n`duwR*Y2zN&QqxG1rC{U1GgB91AIqv@PUx;I$+OZ(pX^@XWg7?Vwd_0 zyb{+k{DAhWz|LYvaY&u&Z&&j?XVo|}!)p7s+`1vCYTO03&Lp&Ep={vEBy7Qbc`#GH zX?0gXQ>X6I^WO)ltSa@5>TiaQ)+O+Xu;!{jN z0y|P@4T{R-*r_gEoS*RYZlKIjE3BKkQ#&P=3ly=Zv^SK;GAMQc9k$iHqg(xC(oyC< zTt#VK%liScJIY@a5;NtC{Cc$=3il zdo;2M_aOwjmQMtXT`sCLGou?po0o5VbHhw^VZer(1%mx4JMb7Mk2o8{MFf&p3S3Q$ zgUK?|H(_C*oq;v8`EV@rdvcH<`0QHV6tGC$EoSCbKlxhcINC~aTyrqs9(kpr5SuRR z0$v10tmoez91l&U2lF4};xZC5`&-0*4ivvHPC9ttMcHjNcl;t;r`azW=DY zH7~6N&?J&pXM>L`+W-BfX7d1`@${Ol0>DQv`#-;A9wUOdP^QA_3EvKSPWe>shG;ZN z2CO;|&!3li~UgBNI}Uq+)r&(% /etc/gnocchi/gnocchi.conf - -The configuration file should be pretty explicit, but here are some of the base -options you want to change and configure: - - -+---------------------+---------------------------------------------------+ -| Option name | Help | -+=====================+===================================================+ -| storage.driver | The storage driver for metrics. | -+---------------------+---------------------------------------------------+ -| indexer.url | URL to your indexer. | -+---------------------+---------------------------------------------------+ -| storage.file_* | Configuration options to store files | -| | if you use the file storage driver. | -+---------------------+---------------------------------------------------+ -| storage.swift_* | Configuration options to access Swift | -| | if you use the Swift storage driver. | -+---------------------+---------------------------------------------------+ -| storage.ceph_* | Configuration options to access Ceph | -| | if you use the Ceph storage driver. | -+---------------------+---------------------------------------------------+ -| storage.s3_* | Configuration options to access S3 | -| | if you use the S3 storage driver. | -+---------------------+---------------------------------------------------+ - - -Gnocchi provides these storage drivers: - -- File (default) -- `Swift`_ -- `Ceph`_ -- `S3`_ - -Gnocchi provides these indexer drivers: - -- `PostgreSQL`_ (recommended) -- `MySQL`_ (at least version 5.6.4) - -.. _`Swift`: https://launchpad.net/swift -.. _`Ceph`: http://ceph.com/ -.. _`S3`: https://aws.amazon.com/s3/ -.. _`PostgreSQL`: http://postgresql.org -.. _`MySQL`: http://mysql.com - -Configuring authentication ------------------------------ - -The API server supports different authentication methods: `basic` (the default) -which uses the standard HTTP `Authorization` header or `keystone` to use -`OpenStack Keystone`_. If you successfully installed the `keystone` flavor -using `pip` (see :ref:`installation`), you can set `api.auth_mode` to -`keystone` to enable Keystone authentication. - -.. _`Paste Deployment`: http://pythonpaste.org/deploy/ -.. _`OpenStack Keystone`: http://launchpad.net/keystone - - -Driver notes -============ - -Carbonara based drivers (file, swift, ceph, s3) ------------------------------------------------ - -To ensure consistency across all *gnocchi-api* and *gnocchi-metricd* workers, -these drivers need a distributed locking mechanism. This is provided by the -'coordinator' of the `tooz`_ library. - -By default, the configured backend for `tooz`_ is the same as the indexer -(*PostgreSQL* or *MySQL*). This allows locking across workers from different -nodes. - -For a more robust multi-nodes deployment, the coordinator may be changed via -the `storage.coordination_url` configuration option to one of the other `tooz -backends`_. - -For example, to use Redis backend:: - - coordination_url = redis://?sentinel= - -or alternatively, to use the Zookeeper backend:: - - coordination_url = zookeeper:///hosts=&hosts= - -.. _`tooz`: http://docs.openstack.org/developer/tooz/ -.. _`tooz backends`: http://docs.openstack.org/developer/tooz/drivers.html - - -Ceph driver implementation details ----------------------------------- - -Each batch of measurements to process is stored into one rados object. -These objects are named `measures___` - -Also a special empty object called `measure` has the list of measures to -process stored in its omap attributes. - -Because of the asynchronous nature of how we store measurements in Gnocchi, -`gnocchi-metricd` needs to know the list of objects that are waiting to be -processed: - -- Listing rados objects for this is not a solution since it takes too much - time. -- Using a custom format into a rados object, would force us to use a lock - each time we would change it. - -Instead, the omaps of one empty rados object are used. No lock is needed to -add/remove an omap attribute. - -Also xattrs attributes are used to store the list of aggregations used for a -metric. So depending on the filesystem used by ceph OSDs, xattrs can have -a limitation in terms of numbers and size if Ceph is not correctly configured. -See `Ceph extended attributes documentation`_ for more details. - -Then, each Carbonara generated file is stored in *one* rados object. -So each metric has one rados object per aggregation in the archive policy. - -Because of this, the filling of OSDs can look less balanced compared to RBD. -Some objects will be big and others small, depending on how archive policies -are set up. - -We can imagine an unrealistic case such as retaining 1 point per second over -a year, in which case the rados object size will be ~384MB. - -Whereas in a more realistic scenario, a 4MB rados object (like RBD uses) could -result from: - -- 20 days with 1 point every second -- 100 days with 1 point every 5 seconds - -So, in realistic scenarios, the direct relation between the archive policy and -the size of the rados objects created by Gnocchi is not a problem. - - -Also Gnocchi can use `cradox`_ Python library if installed. This library is a -Python binding to librados written with `Cython`_, aiming to replace the one -written with `ctypes`_ provided by Ceph. -This new library will be part of next Ceph release (10.0.4). - -The new Cython binding divides the gnocchi-metricd times to process measures -by a large factor. - -So, if the Ceph installation doesn't use latest Ceph version, `cradox`_ can be -installed to improve the Ceph backend performance. - - -.. _`Ceph extended attributes documentation`: http://docs.ceph.com/docs/master/rados/configuration/filestore-config-ref/#extended-attributes -.. _`cradox`: https://pypi.python.org/pypi/cradox -.. _`Cython`: http://cython.org/ -.. _`ctypes`: https://docs.python.org/2/library/ctypes.html -.. _`rados.py`: https://docs.python.org/2/library/ctypes.htm://github.com/ceph/ceph/blob/hammer/src/pybind/rados.py - - -Swift driver implementation details ------------------------------------ - -The Swift driver leverages the bulk delete functionality provided by the bulk_ -middleware to minimise the amount of requests made to clean storage data. This -middleware must be enabled to ensure Gnocchi functions correctly. By default, -Swift has this middleware enabled in its pipeline. - -.. _bulk: http://docs.openstack.org/liberty/config-reference/content/object-storage-bulk-delete.html diff --git a/doc/source/index.rst b/doc/source/index.rst index 4d72f9c0..d822b380 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -10,32 +10,6 @@ Gnocchi – Metric as a Service .. include:: ../../README.rst :start-line: 6 - -Why Gnocchi? ------------- - -Gnocchi has been created to fulfill the need of a time series database usable -in the context of cloud computing: providing the ability to store large -quantities of metrics and being easily scalable. - -The Gnocchi project was started in 2014 as a spin-off of the `OpenStack -Ceilometer`_ project to address the performance issues that Ceilometer -encountered while using standard databases as a storage backends for metrics. -More information are available on `Julien's blog post on Gnocchi -`_. - -.. _`OpenStack Ceilometer`: http://launchpad.net/ceilometer - - -Use cases ---------- -Gnocchi is meant to be used to store time series and their associated resource -metadata. It’s therefore useful for example as: - -- Storage brick for a billing system -- Alarm-triggering or monitoring system -- Statistical usage of data - Key Features ------------ @@ -54,6 +28,34 @@ Key Features - Statsd protocol support - Collectd plugin support +Community +--------- +You can join Gnocchi's community via the following channels: + +- Bug tracker: https://bugs.launchpad.net/gnocchi +- IRC: #gnocchi on `Freenode `_ +- Mailing list: `openstack-dev@lists.openstack.org + `_ with + *[gnocchi]* in the `Subject` header. + +Why Gnocchi? +------------ + +Gnocchi has been created to fulfill the need of a time series database usable +in the context of cloud computing: providing the ability to store large +quantities of metrics. It has been designed to handle large amount of measures +being stored, while being performant, scalable and fault-tolerant. While doing +this, the goal was to be sure to not build any hard dependency on any complex +storage system. + +The Gnocchi project was started in 2014 as a spin-off of the `OpenStack +Ceilometer`_ project to address the performance issues that Ceilometer +encountered while using standard databases as a storage backends for metrics. +More information are available on `Julien's blog post on Gnocchi +`_. + +.. _`OpenStack Ceilometer`: https://docs.openstack.org/developer/ceilometer/ + Documentation ------------- @@ -62,7 +64,6 @@ Documentation architecture install - configuration running client rest diff --git a/doc/source/install.rst b/doc/source/install.rst index da923879..134361f2 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -11,8 +11,9 @@ To install Gnocchi using `pip`, just type:: pip install gnocchi -Depending on the drivers and features you want to use, you need to install -extra variants using, for example:: +Depending on the drivers and features you want to use (see :doc:`architecture` +for which driver to pick), you need to install extra variants using, for +example:: pip install gnocchi[postgresql,ceph,keystone] @@ -47,7 +48,7 @@ install extra variants using, for example:: Ceph requirements ----------------- -The ceph driver need to have a ceph user and a pool already created. They can +The ceph driver needs to have a Ceph user and a pool already created. They can be created for example with: :: @@ -65,11 +66,59 @@ If Ceph and python-rados are >= 10.1.0, cradox python library becomes optional but is still recommended. +Configuration +============= + +Gnocchi is configured by the `/etc/gnocchi/gnocchi.conf` file. + +No config file is provided with the source code; it will be created during the +installation. In case where no configuration file was installed, one can be +easily created by running: + +:: + + gnocchi-config-generator > /etc/gnocchi/gnocchi.conf + +The configuration file should be pretty explicit, but here are some of the base +options you want to change and configure: + ++---------------------+---------------------------------------------------+ +| Option name | Help | ++=====================+===================================================+ +| storage.driver | The storage driver for metrics. | ++---------------------+---------------------------------------------------+ +| indexer.url | URL to your indexer. | ++---------------------+---------------------------------------------------+ +| storage.file_* | Configuration options to store files | +| | if you use the file storage driver. | ++---------------------+---------------------------------------------------+ +| storage.swift_* | Configuration options to access Swift | +| | if you use the Swift storage driver. | ++---------------------+---------------------------------------------------+ +| storage.ceph_* | Configuration options to access Ceph | +| | if you use the Ceph storage driver. | ++---------------------+---------------------------------------------------+ +| storage.s3_* | Configuration options to access S3 | +| | if you use the S3 storage driver. | ++---------------------+---------------------------------------------------+ + +Configuring authentication +----------------------------- + +The API server supports different authentication methods: `basic` (the default) +which uses the standard HTTP `Authorization` header or `keystone` to use +`OpenStack Keystone`_. If you successfully installed the `keystone` flavor +using `pip` (see :ref:`installation`), you can set `api.auth_mode` to +`keystone` to enable Keystone authentication. + +.. _`Paste Deployment`: http://pythonpaste.org/deploy/ +.. _`OpenStack Keystone`: http://launchpad.net/keystone + Initialization ============== -Once you have configured Gnocchi properly (see :doc:`configuration`), you need -to initialize the indexer and storage: +Once you have configured Gnocchi properly you need to initialize the indexer +and storage: :: @@ -98,7 +147,7 @@ that your indexer and storage are properly upgraded. Run the following: Installation Using Devstack =========================== -To enable Gnocchi in devstack, add the following to local.conf: +To enable Gnocchi in `devstack`_, add the following to local.conf: :: @@ -113,3 +162,5 @@ Then, you can start devstack: :: ./stack.sh + +.. _devstack: http://devstack.org diff --git a/doc/source/running.rst b/doc/source/running.rst index 5a9f91eb..0cbdbb13 100644 --- a/doc/source/running.rst +++ b/doc/source/running.rst @@ -10,20 +10,8 @@ To run Gnocchi, simply run the HTTP server and metric daemon: gnocchi-metricd -Running As A WSGI Application -============================= - -It's possible – and strongly advised – to run Gnocchi through a WSGI -service such as `mod_wsgi`_ or any other WSGI application. The file -`gnocchi/rest/app.wsgi` provided with Gnocchi allows you to enable Gnocchi as -a WSGI application. -For other WSGI setup you can refer to the `pecan deployment`_ documentation. - -.. _`pecan deployment`: http://pecan.readthedocs.org/en/latest/deployment.html#deployment - - -How to scale out the Gnocchi HTTP REST API tier -=============================================== +Running API As A WSGI Application +================================= The Gnocchi API tier runs using WSGI. This means it can be run using `Apache httpd`_ and `mod_wsgi`_, or other HTTP daemon such as `uwsgi`_. You should @@ -32,10 +20,104 @@ have, usually around 1.5 × number of CPU. If one server is not enough, you can spawn any number of new API server to scale Gnocchi out, even on different machines. +The following uwsgi configuration file can be used:: + + [uwsgi] + http = localhost:8041 + # Set the correct path depending on your installation + wsgi-file = /usr/lib/python2.7/dist-packages/gnocchi/rest/app.wsgi + master = true + die-on-term = true + threads = 32 + # Adjust based on the number of CPU + processes = 32 + enabled-threads = true + thunder-lock = true + plugins = python + buffer-size = 65535 + lazy-apps = true + +Once written to `/etc/gnocchi/uwsgi.ini`, it can be launched this way:: + + uwsgi /etc/gnocchi/uwsgi.ini + .. _Apache httpd: http://httpd.apache.org/ .. _mod_wsgi: https://modwsgi.readthedocs.org/ .. _uwsgi: https://uwsgi-docs.readthedocs.org/ +How to define archive policies +============================== + +In Gnocchi, the archive policy definitions are expressed in number of points. +If your archive policy defines a policy of 10 points with a granularity of 1 +second, the time series archive will keep up to 10 seconds, each representing +an aggregation over 1 second. This means the time series will at maximum retain +10 seconds of data (sometimes a bit more) between the more recent point and the +oldest point. That does not mean it will be 10 consecutive seconds: there might +be a gap if data is fed irregularly. + +There is no expiry of data relative to the current timestamp. + +Therefore, both the archive policy and the granularity entirely depends on your +use case. Depending on the usage of your data, you can define several archiving +policies. A typical low grained use case could be:: + + 3600 points with a granularity of 1 second = 1 hour + 1440 points with a granularity of 1 minute = 24 hours + 720 points with a granularity of 1 hour = 30 days + 365 points with a granularity of 1 day = 1 year + +This would represent 6125 points × 9 = 54 KiB per aggregation method. If +you use the 8 standard aggregation method, your metric will take up to 8 × 54 +KiB = 432 KiB of disk space. + +Be aware that the more definitions you set in an archive policy, the more CPU +it will consume. Therefore, creating an archive policy with 2 definitons (e.g. +1 second granularity for 1 day and 1 minute granularity for 1 month) may +consume twice CPU than just one definition (e.g. just 1 second granularity for +1 day). + +Default archive policies +======================== + +By default, 3 archive policies are created when calling `gnocchi-upgrade`: +*low*, *medium* and *high*. The name both describes the storage space and CPU +usage needs. They use `default_aggregation_methods` which is by default set to +*mean*, *min*, *max*, *sum*, *std*, *count*. + +A fourth archive policy named `bool` is also provided by default and is +designed to store only boolean values (i.e. 0 and 1). It only stores one data +point for each second (using the `last` aggregation method), with a one year +retention period. The maximum optimistic storage size is estimated based on the +assumption that no other value than 0 and 1 are sent as measures. If other +values are sent, the maximum pessimistic storage size is taken into account. + +- low + + * 5 minutes granularity over 30 days + * aggregation methods used: `default_aggregation_methods` + * maximum estimated size per metric: 406 KiB + +- medium + + * 1 minute granularity over 7 days + * 1 hour granularity over 365 days + * aggregation methods used: `default_aggregation_methods` + * maximum estimated size per metric: 887 KiB + +- high + + * 1 second granularity over 1 hour + * 1 minute granularity over 1 week + * 1 hour granularity over 1 year + * aggregation methods used: `default_aggregation_methods` + * maximum estimated size per metric: 1 057 KiB + +- bool + * 1 second granularity over 1 year + * aggregation methods used: *last* + * maximum optimistic size per metric: 1 539 KiB + * maximum pessimistic size per metric: 277 172 KiB How many metricd workers do we need to run ========================================== -- GitLab From f186f89a4b0c5b4e06c1bc07983e93f1f8ce5f15 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 8 Feb 2017 18:10:00 +0000 Subject: [PATCH 0623/1483] fix bad slash migration we're incorrectly looking at the column when debating whether to reencode a resource. Closes-Bug: #1662990 Change-Id: I84a78139ca6e5e049f70fd9c84033e60bc13e035 --- .../versions/397987e38570_no_more_slash_and_reencode.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py b/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py index 34363257..80b9416e 100644 --- a/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py +++ b/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py @@ -103,17 +103,16 @@ def upgrade(): for resource in connection.execute(resource_table.select()): - if resource_table.c.original_resource_id is None: + if resource.original_resource_id is None: # statsd resource has no original_resource_id and is NULL continue try: - orig_as_uuid = uuid.UUID( - str(resource_table.c.original_resource_id)) + orig_as_uuid = uuid.UUID(str(resource.original_resource_id)) except ValueError: pass else: - if orig_as_uuid == resource_table.c.id: + if orig_as_uuid == resource.id: continue new_original_resource_id = resource.original_resource_id.replace( -- GitLab From 2d00eb339e1c81b5f50803b16dadd004768e607f Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 9 Feb 2017 17:06:15 +0000 Subject: [PATCH 0624/1483] gabbi: use history gabbi supports grabbing historical queries. leverage that so we don't need to make redundant requests to grab same data. Change-Id: I2ea32c5d4a36d32b184e643ace858a38448ef098 --- .../tests/functional/gabbits/aggregation.yaml | 57 ++++--------------- .../gabbits/metric-granularity.yaml | 8 +-- gnocchi/tests/functional/gabbits/metric.yaml | 54 ++++-------------- .../tests/functional/gabbits/pagination.yaml | 15 ++--- .../functional/gabbits/search-metric.yaml | 40 +++---------- setup.cfg | 2 +- 6 files changed, 39 insertions(+), 137 deletions(-) diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index 39e0bd8b..39c31d38 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -38,7 +38,7 @@ tests: archive_policy_name: low status: 201 - - name: get metric list to push metric 1 + - name: get metric list GET: /v1/metric - name: push measurements to metric 1 @@ -52,11 +52,8 @@ tests: value: 12 status: 202 - - name: get metric list to push metric 2 - GET: /v1/metric - - name: push measurements to metric 2 - POST: /v1/metric/$RESPONSE['$[1].id']/measures + POST: /v1/metric/$HISTORY['get metric list'].$RESPONSE['$[1].id']/measures request_headers: content-type: application/json data: @@ -68,28 +65,19 @@ tests: value: 5 status: 202 - - name: get metric list to get aggregates - GET: /v1/metric - - name: get measure aggregates by granularity not float - GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=foobar + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=foobar status: 400 - - name: get metric list to get aggregates for get with refresh - GET: /v1/metric - - name: get measure aggregates by granularity with refresh - GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1&refresh=true + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&refresh=true response_json_paths: $: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - - name: get metric list to get aggregates 2 - GET: /v1/metric - - name: get measure aggregates by granularity - GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1 + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1 poll: count: 10 delay: 1 @@ -98,11 +86,8 @@ tests: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - - name: get metric list to push metric 3 - GET: /v1/metric - - name: get measure aggregates by granularity with timestamps - GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 poll: count: 10 delay: 1 @@ -111,11 +96,8 @@ tests: - ['2015-03-06T14:30:00+00:00', 300.0, 15.05] - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - - name: get metric list to push metric 4 - GET: /v1/metric - - name: get measure aggregates and reaggregate - GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&reaggregation=min + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&reaggregation=min poll: count: 10 delay: 1 @@ -125,50 +107,35 @@ tests: - ['2015-03-06T14:33:57+00:00', 1.0, 3.1] - ['2015-03-06T14:34:12+00:00', 1.0, 2.0] - - name: get metric list to get aggregates 5 - GET: /v1/metric - - name: get measure aggregates and resample - GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1&resample=60 + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&resample=60 response_json_paths: $: - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] - - name: get metric list to push metric 6 - GET: /v1/metric - - name: get measure aggregates with fill zero - GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1&fill=0 + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&fill=0 response_json_paths: $: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - ['2015-03-06T14:35:12+00:00', 1.0, 2.5] - - name: get metric list to push metric 7 - GET: /v1/metric - - name: get measure aggregates with fill null - GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1&fill=null + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&fill=null response_json_paths: $: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - ['2015-03-06T14:35:12+00:00', 1.0, 5.0] - - name: get metric list to push metric 8 - GET: /v1/metric - - name: get measure aggregates with fill missing granularity - GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&fill=0 + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&fill=0 status: 400 - - name: get metric list to push metric 9 - GET: /v1/metric - - name: get measure aggregates with bad fill - GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&metric=$RESPONSE['$[1].id']&granularity=1&fill=asdf + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&fill=asdf status: 400 diff --git a/gnocchi/tests/functional/gabbits/metric-granularity.yaml b/gnocchi/tests/functional/gabbits/metric-granularity.yaml index d67e548f..47a5efe3 100644 --- a/gnocchi/tests/functional/gabbits/metric-granularity.yaml +++ b/gnocchi/tests/functional/gabbits/metric-granularity.yaml @@ -38,7 +38,7 @@ tests: value: 12 status: 202 - - name: get metric list for invalid granularity + - name: get metric list GET: /v1/metric status: 200 @@ -48,12 +48,8 @@ tests: response_strings: - Granularity '42.0' for metric $RESPONSE['$[0].id'] does not exist - - name: get metric list for granularity - GET: /v1/metric - status: 200 - - name: get measurements granularity - GET: /v1/metric/$RESPONSE['$[0].id']/measures?granularity=1 + GET: /v1/metric/$HISTORY['get metric list'].$RESPONSE['$[0].id']/measures?granularity=1 status: 200 poll: count: 50 diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index c27600f3..dbc75645 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -161,13 +161,13 @@ tests: response_strings: - Timestamp must be after Epoch - - name: get valid metric id for bad timestamp + - name: list valid metrics GET: /v1/metric response_json_paths: $[0].archive_policy.name: cookies - name: push measurements to metric with bad timestamp - POST: /v1/metric/$RESPONSE['$[0].id']/measures + POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures request_headers: content-type: application/json data: @@ -175,11 +175,8 @@ tests: value: 43.1 status: 400 - - name: get valid metric id again - GET: /v1/metric - - name: push measurements to metric epoch format - POST: /v1/metric/$RESPONSE['$[0].id']/measures + POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures request_headers: content-type: application/json data: @@ -187,11 +184,8 @@ tests: value: 43.1 status: 202 - - name: get valid metric id again 2 - GET: /v1/metric - - name: push measurements to metric - POST: /v1/metric/$RESPONSE['$[0].id']/measures + POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures request_headers: content-type: application/json data: @@ -199,30 +193,21 @@ tests: value: 12 status: 202 - - name: get valid metric id again 3 - GET: /v1/metric - - name: get measurements by start - GET: /v1/metric/$RESPONSE['$[0].id']/measures?refresh=true&start=2015-03-06T14:34 + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true&start=2015-03-06T14:34 response_json_paths: $: - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] - - name: get valid metric id again 4 - GET: /v1/metric - - name: get measurements from metric - GET: /v1/metric/$RESPONSE['$[0].id']/measures?refresh=true + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true response_json_paths: $: - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] - - name: get valid metric id again 5 - GET: /v1/metric - - name: push measurements to metric again - POST: /v1/metric/$RESPONSE['$[0].id']/measures + POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures request_headers: content-type: application/json data: @@ -234,31 +219,22 @@ tests: value: 11 status: 202 - - name: get valid metric id again 6 - GET: /v1/metric - - name: get measurements from metric and resample - GET: /v1/metric/$RESPONSE['$[0].id']/measures?refresh=true&resample=60&granularity=1 + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true&resample=60&granularity=1 response_json_paths: $: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, 14.0] - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] - - name: get valid metric id again 7 - GET: /v1/metric - - name: get measurements from metric and resample no granularity - GET: /v1/metric/$RESPONSE['$[0].id']/measures?resample=60 + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=60 status: 400 response_strings: - A granularity must be specified to resample - - name: get valid metric id again 8 - GET: /v1/metric - - name: get measurements from metric and bad resample - GET: /v1/metric/$RESPONSE['$[0].id']/measures?resample=abc + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=abc status: 400 - name: create valid metric two @@ -325,7 +301,7 @@ tests: $[0].archive_policy.name: cookies - name: get measure unknown aggregates - GET: /v1/aggregation/metric?metric=$RESPONSE['$[0].id']&aggregation=last + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list for aggregates'].$RESPONSE['$[0].id']&aggregation=last status: 404 response_strings: - Aggregation method 'last' for metric $RESPONSE['$[0].id'] does not exist @@ -336,14 +312,8 @@ tests: response_strings: - Metric cee6ef1f-52cc-4a16-bbb5-648aedfd1c37 does not exist - - name: get metric list for delete - GET: /v1/metric - status: 200 - response_json_paths: - $[0].archive_policy.name: cookies - - name: delete metric - DELETE: /v1/metric/$RESPONSE['$[0].id'] + DELETE: /v1/metric/$HISTORY['get metric list for aggregates'].$RESPONSE['$[0].id'] status: 204 - name: delete metric again diff --git a/gnocchi/tests/functional/gabbits/pagination.yaml b/gnocchi/tests/functional/gabbits/pagination.yaml index c6ece552..ef85a379 100644 --- a/gnocchi/tests/functional/gabbits/pagination.yaml +++ b/gnocchi/tests/functional/gabbits/pagination.yaml @@ -371,24 +371,17 @@ tests: $[0].name: $RESPONSE['$[0].name'] $[1].name: $RESPONSE['$[1].name'] - - name: list all default order again - GET: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - - name: list next three metrics default order - GET: /v1/metric?limit=4&marker=$RESPONSE['$[1].id'] + GET: /v1/metric?limit=4&marker=$HISTORY['list all default order'].$RESPONSE['$[1].id'] request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json response_json_paths: $.`len`: 3 - $[0].name: $RESPONSE['$[2].name'] - $[1].name: $RESPONSE['$[3].name'] - $[2].name: $RESPONSE['$[4].name'] + $[0].name: $HISTORY['list all default order'].$RESPONSE['$[2].name'] + $[1].name: $HISTORY['list all default order'].$RESPONSE['$[3].name'] + $[2].name: $HISTORY['list all default order'].$RESPONSE['$[4].name'] - name: list first two metrics order by user without direction GET: /v1/metric?limit=2&sort=name diff --git a/gnocchi/tests/functional/gabbits/search-metric.yaml b/gnocchi/tests/functional/gabbits/search-metric.yaml index ae93637c..4f477b71 100644 --- a/gnocchi/tests/functional/gabbits/search-metric.yaml +++ b/gnocchi/tests/functional/gabbits/search-metric.yaml @@ -70,74 +70,50 @@ tests: value: 12 status: 202 - - name: get metric id for search one + - name: get metric id GET: /v1/metric status: 200 response_json_paths: $[0].archive_policy.name: high - name: search with one correct granularity - POST: /v1/search/metric?metric_id=$RESPONSE['$[0].id']&granularity=1s + POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=1s request_headers: content-type: application/json data: "=": 12 status: 200 - - name: get metric id for search two - GET: /v1/metric - status: 200 - response_json_paths: - $[0].archive_policy.name: high - - name: search with multiple correct granularities - POST: /v1/search/metric?metric_id=$RESPONSE['$[0].id']&granularity=1second&granularity=2s + POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=1second&granularity=2s request_headers: content-type: application/json data: "=": 12 status: 200 - - name: get metric id for search three - GET: /v1/metric - status: 200 - response_json_paths: - $[0].archive_policy.name: high - - name: search with correct and incorrect granularities - POST: /v1/search/metric?metric_id=$RESPONSE['$[0].id']&granularity=1s&granularity=300 + POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=1s&granularity=300 request_headers: content-type: application/json data: "=": 12 status: 400 response_strings: - - Granularity '300.0' for metric $RESPONSE['$[0].id'] does not exist - - - name: get metric id for search four - GET: /v1/metric - status: 200 - response_json_paths: - $[0].archive_policy.name: high + - Granularity '300.0' for metric $HISTORY['get metric id'].$RESPONSE['$[0].id'] does not exist - name: search with incorrect granularity - POST: /v1/search/metric?metric_id=$RESPONSE['$[0].id']&granularity=300 + POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=300 request_headers: content-type: application/json data: "=": 12 status: 400 response_strings: - - Granularity '300.0' for metric $RESPONSE['$[0].id'] does not exist - - - name: get metric id for search five - GET: /v1/metric - status: 200 - response_json_paths: - $[0].archive_policy.name: high + - Granularity '300.0' for metric $HISTORY['get metric id'].$RESPONSE['$[0].id'] does not exist - name: search measure with wrong start - POST: /v1/search/metric?metric_id=$RESPONSE['$[0].id']&start=foobar + POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&start=foobar request_headers: content-type: application/json data: diff --git a/setup.cfg b/setup.cfg index a6badb4a..efbc99ed 100644 --- a/setup.cfg +++ b/setup.cfg @@ -65,7 +65,7 @@ doc = reno>=1.6.2 test = pifpaf>=0.12.0 - gabbi>=1.21.0 + gabbi>=1.30.0 coverage>=3.6 fixtures mock -- GitLab From 9b53bce0c5fb86fc8db3055d861d7b1f2715a00c Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 10 Feb 2017 14:22:04 -0500 Subject: [PATCH 0625/1483] ensure original_resource_id is not none statsd resource doesn't have an original_resource_id so we need to set it by copying over id during upgrade Change-Id: Id516c5b1d091ca01688d6ad8cf1076a998d15333 Closes-Bug: #1662849 --- ...a63d3d186_original_resource_id_not_null.py | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/gnocchi/indexer/alembic/versions/1e1a63d3d186_original_resource_id_not_null.py b/gnocchi/indexer/alembic/versions/1e1a63d3d186_original_resource_id_not_null.py index fa29a598..bd73b12b 100644 --- a/gnocchi/indexer/alembic/versions/1e1a63d3d186_original_resource_id_not_null.py +++ b/gnocchi/indexer/alembic/versions/1e1a63d3d186_original_resource_id_not_null.py @@ -23,6 +23,8 @@ Create Date: 2017-01-26 19:33:35.209688 from alembic import op import sqlalchemy as sa +from sqlalchemy import func +import sqlalchemy_utils # revision identifiers, used by Alembic. @@ -32,8 +34,33 @@ branch_labels = None depends_on = None +def clean_substr(col, start, length): + return func.lower(func.substr(func.hex(col), start, length)) + + def upgrade(): + bind = op.get_bind() for table_name in ('resource', 'resource_history'): + table = sa.Table(table_name, sa.MetaData(), + sa.Column('id', + sqlalchemy_utils.types.uuid.UUIDType(), + nullable=False), + sa.Column('original_resource_id', sa.String(255))) + + # NOTE(gordc): mysql stores id as binary so we need to rebuild back to + # string uuid. + if bind and bind.engine.name == "mysql": + vals = {'original_resource_id': + clean_substr(table.c.id, 1, 8) + '-' + + clean_substr(table.c.id, 9, 4) + '-' + + clean_substr(table.c.id, 13, 4) + '-' + + clean_substr(table.c.id, 17, 4) + '-' + + clean_substr(table.c.id, 21, 12)} + else: + vals = {'original_resource_id': table.c.id} + + op.execute(table.update().where( + table.c.original_resource_id.is_(None)).values(vals)) op.alter_column(table_name, "original_resource_id", nullable=False, existing_type=sa.String(255), existing_nullable=True) -- GitLab From 88cd1fc48096c5ecdf40ad9ced145285dab978e4 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 8 Feb 2017 18:45:34 +0000 Subject: [PATCH 0626/1483] test non-uuid and add debug for upgrade gate - show the output of dump to help debug - ensure non-uuids from v2.2 are valid - ensure statsd resource gets upgraded Change-Id: Ic31af11fcf542c8309052ee75b554aa15e8604c3 --- run-upgrade-tests.sh | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index 1a1a6059..1bc27b89 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -13,6 +13,7 @@ RESOURCE_IDS=( "5a301761-aaaa-46e2-8900-8b4f6fe6675a" "5a301761-bbbb-46e2-8900-8b4f6fe6675a" "5a301761-cccc-46e2-8900-8b4f6fe6675a" + "non-uuid" ) [ "$have_resource_type_post" ] && RESOURCE_ID_EXT="5a301761/dddd/46e2/8900/8b4f6fe6675a" @@ -21,7 +22,7 @@ dump_data(){ dir="$1" mkdir -p $dir echo "* Dumping measures aggregations to $dir" - gnocchi resource list -c id -c type -c project_id -c user_id -c original_resource_id -c started_at -c ended_at -c revision_start -c revision_end > $dir/resources.list + gnocchi resource list -c id -c type -c project_id -c user_id -c original_resource_id -c started_at -c ended_at -c revision_start -c revision_end | tee $dir/resources.list for resource_id in ${RESOURCE_IDS[@]} $RESOURCE_ID_EXT; do for agg in min max mean sum ; do gnocchi measures show --aggregation $agg --resource-id $resource_id metric > $dir/${agg}.txt @@ -87,7 +88,7 @@ eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) export OS_AUTH_TYPE=gnocchi-noauth export GNOCCHI_USER_ID=admin export GNOCCHI_PROJECT_ID=admin -gnocchi resource delete $GNOCCHI_STATSD_RESOURCE_ID +original_statsd_resource_id=$GNOCCHI_STATSD_RESOURCE_ID inject_data $GNOCCHI_DATA # Encode resource id as it contains slashes and gnocchiclient does not encode it [ "$have_resource_type_post" ] && RESOURCE_ID_EXT="19235bb9-35ca-5f55-b7db-165cfb033c86" @@ -103,12 +104,14 @@ eval $(pifpaf --debug run gnocchi --indexer-url $INDEXER_URL --storage-url $STOR export OS_AUTH_TYPE=gnocchi-basic export GNOCCHI_USER=$GNOCCHI_USER_ID +# pifpaf creates a new statsd resource on each start gnocchi resource delete $GNOCCHI_STATSD_RESOURCE_ID RESOURCE_IDS=( "5a301761-aaaa-46e2-8900-8b4f6fe6675a" "5a301761-bbbb-46e2-8900-8b4f6fe6675a" "5a301761-cccc-46e2-8900-8b4f6fe6675a" + "24d2e3ed-c7c1-550f-8232-56c48809a6d4" ) # NOTE(sileht): / are now _ # NOTE(jdanjou): and we reencode for admin:admin, but we cannot authenticate as @@ -120,7 +123,9 @@ dump_data $GNOCCHI_DATA/new # NOTE(sileht): change the output of the old gnocchi to compare with the new without '/' $GSED -i -e "s,5a301761/dddd/46e2/8900/8b4f6fe6675a,5a301761_dddd_46e2_8900_8b4f6fe6675a,g" \ - -e "s,19235bb9-35ca-5f55-b7db-165cfb033c86,517920a9-2e50-58b8-88e8-25fd7aae1d8f,g" $GNOCCHI_DATA/old/resources.list + -e "s,19235bb9-35ca-5f55-b7db-165cfb033c86,517920a9-2e50-58b8-88e8-25fd7aae1d8f,g" \ + -e "s,None ,${original_statsd_resource_id},g" \ + -e "s,37d1416a-381a-5b6c-99ef-37d89d95f1e1,24d2e3ed-c7c1-550f-8232-56c48809a6d4,g" $GNOCCHI_DATA/old/resources.list echo "* Checking output difference between Gnocchi $old_version and $new_version" diff -uNr $GNOCCHI_DATA/old $GNOCCHI_DATA/new -- GitLab From 8a51f1f4e833aada36487250d27d33e2eb2744cf Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 3 Feb 2017 14:23:45 +0100 Subject: [PATCH 0627/1483] tests: make gabbi test no rely on legacy resources types Change-Id: I56291d04d7db5ff7fda547ad817c3dace0a5889e --- gnocchi/tests/functional/fixtures.py | 2 +- .../functional/gabbits/resource-type.yaml | 8 +- .../tests/functional/gabbits/resource.yaml | 206 +++++++----------- 3 files changed, 82 insertions(+), 134 deletions(-) diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index df98ed34..b25ad694 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -107,7 +107,7 @@ class ConfigFixture(fixture.GabbiFixture): index = indexer.get_driver(conf) index.connect() - index.upgrade(create_legacy_resource_types=True) + index.upgrade() # Set pagination to a testable value conf.set_override('max_limit', 7, 'api') diff --git a/gnocchi/tests/functional/gabbits/resource-type.yaml b/gnocchi/tests/functional/gabbits/resource-type.yaml index 9ffd74e3..fca3aaa3 100644 --- a/gnocchi/tests/functional/gabbits/resource-type.yaml +++ b/gnocchi/tests/functional/gabbits/resource-type.yaml @@ -17,7 +17,7 @@ tests: desc: only legacy resource types are present GET: /v1/resource_type response_json_paths: - $.`len`: 15 + $.`len`: 1 # Some bad cases @@ -174,9 +174,9 @@ tests: desc: we have a resource type now GET: $LAST_URL response_json_paths: - $.`len`: 16 - $.[11].name: my_custom_resource - $.[11].state: active + $.`len`: 2 + $.[1].name: my_custom_resource + $.[1].state: active - name: get the custom resource type GET: /v1/resource_type/my_custom_resource diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index 91b4b323..a9d7e040 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -59,7 +59,7 @@ tests: - name: root of resource GET: /v1/resource response_json_paths: - $.volume: $SCHEME://$NETLOC/v1/resource/volume + $.generic: $SCHEME://$NETLOC/v1/resource/generic - name: typo of resource GET: /v1/resoue @@ -72,13 +72,13 @@ tests: # Explore that GETting a list of resources demonstrates the expected # behaviors notably with regard to content negotiation. - - name: instance resource - desc: there are no instance resources yet - GET: /v1/resource/instance + - name: generic resource list + desc: there are no generic resources yet + GET: /v1/resource/generic response_strings: - "[]" - - name: instance resource bad accept + - name: generic resource bad accept desc: Expect 406 on bad accept type GET: $LAST_URL request_headers: @@ -87,7 +87,7 @@ tests: response_strings: - 406 Not Acceptable - - name: instance resource complex accept + - name: generic resource complex accept desc: failover accept media type appropriately GET: $LAST_URL request_headers: @@ -160,36 +160,18 @@ tests: data: '{"id": "f93450f2-d8a5-4d67-9985-02511241e7d1", "started_at": "2014-01-03T02:02:02.000000", "user_id": "0fbb231484614b1a80131fc22f6afc9c", "project_id": "f3d41b770cc14f0bb94a1d5be9c0e3ea"}' status: 415 -# Create a new instance resource, demonstrate that including no data +# Create a new generic resource, demonstrate that including no data # gets a useful 400 response. - - name: post instance resource no data - POST: /v1/resource/instance + - name: post generic resource no data + POST: /v1/resource/generic request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json status: 400 - - name: post instance resource with missing data - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 75C44741-CC60-4033-804E-2D3098C7D2E9 - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - flavor_id: "2" - image_ref: http://image - host: compute1 - status: 400 - response_strings: - - "Invalid input: required key not provided @ data[" - - "'display_name']" - - - name: post instance with invalid metric name + - name: post generic with invalid metric name POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -203,7 +185,7 @@ tests: response_strings: - "'/' is not supported in metric name" - - name: post instance resource + - name: post generic resource to modify POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -213,44 +195,40 @@ tests: id: 75C44741-CC60-4033-804E-2D3098C7D2E9 user_id: 0fbb231484614b1a80131fc22f6afc9c project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - flavor_id: "2" - image_ref: http://image - host: compute1 - display_name: myvm status: 201 response_json_paths: $.metrics: {} # empty dictionary -# PATCH that instance resource to change its attributes and to +# PATCH that generic resource to change its attributes and to # associate metrics. If a metric does not exist there should be a # graceful failure. - - name: patch instance resource + - name: patch generic resource PATCH: $LOCATION request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json data: - host: compute2 + user_id: foobar status: 200 response_json_paths: - host: compute2 + user_id: foobar - - name: patch instance resource with same data + - name: patch generic resource with same data desc: Ensure no useless revision have been created - PATCH: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 + PATCH: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json data: - host: compute2 + user_id: foobar status: 200 response_json_paths: - host: compute2 + user_id: foobar revision_start: $RESPONSE['$.revision_start'] - - name: patch instance resource with id + - name: patch generic resource with id PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -263,7 +241,7 @@ tests: - "Invalid input: extra keys not allowed @ data[" - "'id']" - - name: patch instance with metrics + - name: patch generic with metrics PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -277,9 +255,9 @@ tests: response_strings: - '"disk.iops": ' - - name: get instance history + - name: get generic history desc: Ensure we can get the history - GET: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/history?sort=revision_end:asc-nullslast + GET: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9/history?sort=revision_end:asc-nullslast request_headers: request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -287,13 +265,11 @@ tests: content-type: application/json response_json_paths: $.`len`: 2 - $[0].host: compute1 - $[1].host: compute2 $[1].revision_end: null $[1].metrics.'disk.iops': $RESPONSE["metrics.'disk.iops'"] - - name: patch instance bad metric association - PATCH: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 + - name: patch generic bad metric association + PATCH: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -305,7 +281,7 @@ tests: response_strings: - Metric f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea does not exist - - name: patch instance with bad archive policy + - name: patch generic with bad archive policy PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -319,7 +295,7 @@ tests: response_strings: - Archive policy noexist does not exist - - name: patch instance with no archive policy rule + - name: patch generic with no archive policy rule PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -332,7 +308,7 @@ tests: response_strings: - No archive policy name specified and no archive policy rule found matching the metric name disk.iops - - name: patch instance with archive policy rule + - name: patch generic with archive policy rule PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -351,7 +327,7 @@ tests: x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json data: - host: compute2 + user_id: foobar - name: patch resource empty dict desc: an empty dict in patch is an existence check @@ -363,7 +339,7 @@ tests: data: "{}" status: 200 data: - host: compute2 + user_id: foobar - name: patch resource without change with metrics in response desc: an empty dict in patch is an existence check @@ -377,7 +353,7 @@ tests: response_json_paths: $.metrics.'disk.io.rate': $RESPONSE["$.metrics.'disk.io.rate'"] - - name: patch instance with invalid metric name + - name: patch generic with invalid metric name PATCH: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -393,16 +369,16 @@ tests: # Failure modes for history - - name: post instance history + - name: post generic history desc: should don't work - POST: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9/history + POST: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9/history request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json status: 405 - - name: delete instance history + - name: delete generic history desc: should don't work DELETE: $LAST_URL request_headers: @@ -415,7 +391,7 @@ tests: - name: patch resource no data desc: providing no data is an error - PATCH: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 + PATCH: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -439,7 +415,7 @@ tests: - name: patch noexit resource desc: "patching something that doesn't exist is a 404" - PATCH: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9 + PATCH: /v1/resource/generic/77777777-CC60-4033-804E-2D3098C7D2E9 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -460,7 +436,7 @@ tests: - name: get bad resource id desc: https://bugs.launchpad.net/gnocchi/+bug/1425588 - GET: /v1/resource/instance/noexist + GET: /v1/resource/generic/noexist request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -469,7 +445,7 @@ tests: - The resource could not be found. - name: get metrics for this not-existing resource - GET: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util + GET: /v1/resource/generic/77777777-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -478,19 +454,19 @@ tests: # List resources - - name: list instance resources no auth - GET: /v1/resource/instance + - name: list generic resources no auth + GET: /v1/resource/generic response_strings: - "[]" - - name: list instance resources + - name: list generic resources GET: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea response_json_paths: - $[0].host: compute2 - $[-1].host: compute2 + $[0].user_id: 0fbb231484614b1a80131fc22f6afc9c + $[-1].user_id: foobar - name: list all resources GET: /v1/resource/generic @@ -502,8 +478,8 @@ tests: # Metric handling when POSTing resources. - - name: post new instance with non-existent metrics - POST: /v1/resource/instance + - name: post new generic with non-existent metrics + POST: /v1/resource/generic request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -512,15 +488,11 @@ tests: id: 85C44741-CC60-4033-804E-2D3098C7D2E9 user_id: 0fbb231484614b1a80131fc22f6afc9c project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - flavor_id: "2" - image_ref: http://image - host: compute3 - display_name: myvm2 metrics: cpu.util: 10 status: 400 - - name: post new instance with metrics bad policy + - name: post new generic with metrics bad policy POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -530,16 +502,12 @@ tests: id: 85C44741-CC60-4033-804E-2D3098C7D2E9 user_id: 0fbb231484614b1a80131fc22f6afc9c project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - flavor_id: "2" - image_ref: http://image - host: compute3 - display_name: myvm2 metrics: cpu.util: archive_policy_name: noexist status: 400 - - name: post new instance with metrics no policy rule + - name: post new generic with metrics no policy rule POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -549,17 +517,13 @@ tests: id: 85BABE39-F7F7-455A-877B-62C22E11AA40 user_id: 0fbb231484614b1a80131fc22f6afc9c project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - flavor_id: "2" - image_ref: http://image - host: compute3 - display_name: myvm2 metrics: cpu.util: {} status: 400 response_strings: - No archive policy name specified and no archive policy rule found matching the metric name cpu.util - - name: post new instance with metrics using policy rule + - name: post new generic with metrics using policy rule POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -569,15 +533,11 @@ tests: id: 85BABE39-F7F7-455A-877B-62C22E11AA40 user_id: 0fbb231484614b1a80131fc22f6afc9c project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - flavor_id: "2" - image_ref: http://image - host: compute3 - display_name: myvm2 metrics: disk.io.rate: {} status: 201 - - name: post new instance with metrics + - name: post new generic with metrics POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -587,10 +547,6 @@ tests: id: d13982cb-4cce-4f84-a96e-7581be1e599c user_id: 0fbb231484614b1a80131fc22f6afc9c project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - flavor_id: "2" - image_ref: http://image - host: compute3 - display_name: myvm2 metrics: disk.util: archive_policy_name: medium @@ -599,7 +555,7 @@ tests: created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - - name: post new instance with metrics and un-normalized user/project id from keystone middleware + - name: post new generic with metrics and un-normalized user/project id from keystone middleware POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -607,10 +563,6 @@ tests: content-type: application/json data: id: 85C44741-CC60-4033-804E-2D3098C7D2E9 - flavor_id: "2" - image_ref: http://image - host: compute3 - display_name: myvm2 metrics: cpu.util: archive_policy_name: medium @@ -622,7 +574,7 @@ tests: - name: get metrics for this resource desc: with async measure handling this is a null test - GET: /v1/resource/instance/$RESPONSE['$.id']/metric/cpu.util/measures + GET: /v1/resource/generic/$RESPONSE['$.id']/metric/cpu.util/measures request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -632,14 +584,14 @@ tests: # Interrogate the NamedMetricController - - name: list the instances - GET: /v1/resource/instance + - name: list the generics + GET: /v1/resource/generic request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - - name: request metrics from one of the instances - GET: /v1/resource/instance/$RESPONSE['$[-1].id']/metric + - name: request metrics from one of the generics + GET: /v1/resource/generic/$RESPONSE['$[-1].id']/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -650,15 +602,15 @@ tests: - name: request metrics from non uuid metrics desc: 404 from GenericResourceController - GET: /v1/resource/instance/not.a.uuid/metric + GET: /v1/resource/generic/not.a.uuid/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json status: 404 - - name: request cpuutil metric from instance - GET: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util + - name: request cpuutil metric from generic + GET: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -666,7 +618,7 @@ tests: $.created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea $.archive_policy.name: medium - - name: try post cpuutil metric to instance + - name: try post cpuutil metric to generic POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -674,9 +626,9 @@ tests: content-type: application/json status: 405 - - name: request cpuutil measures from instance + - name: request cpuutil measures from generic desc: with async measure handling this is a null test - GET: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util/measures + GET: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util/measures request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -709,8 +661,8 @@ tests: $[0][1]: 1.0 $[0][2]: 43.100000000000001 - - name: post metric at instance - POST: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric + - name: post metric at generic + POST: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -721,7 +673,7 @@ tests: archive_policy_name: medium response_headers: - - name: post metric at instance with empty definition + - name: post metric at generic with empty definition POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -733,7 +685,7 @@ tests: response_strings: - No archive policy name specified and no archive policy rule found matching the metric name foo.bar - - name: post metric at instance using archive policy rule + - name: post metric at generic using archive policy rule POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -743,7 +695,7 @@ tests: data: disk.io.rate: {} - - name: duplicate metrics at instance + - name: duplicate metrics at generic POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -756,7 +708,7 @@ tests: response_strings: - Named metric electron.spin already exists - - name: post metrics at instance bad policy + - name: post metrics at generic bad policy POST: $LAST_URL request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c @@ -771,8 +723,8 @@ tests: # Check bad timestamps - - name: post new instance with bad timestamp - POST: /v1/resource/instance + - name: post new generic with bad timestamp + POST: /v1/resource/generic request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -781,10 +733,6 @@ tests: id: 95C44741-CC60-4033-804E-2D3098C7D2E9 user_id: 0fbb231484614b1a80131fc22f6afc9c project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - flavor_id: "2" - image_ref: http://image - host: compute3 - display_name: myvm2 metrics: cpu.util: archive_policy_name: medium @@ -798,7 +746,7 @@ tests: - name: post to non uuid metrics desc: 404 from GenericResourceController - POST: /v1/resource/instance/not.a.uuid/metric + POST: /v1/resource/generic/not.a.uuid/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -810,7 +758,7 @@ tests: - name: post to missing uuid metrics desc: 404 from NamedMetricController - POST: /v1/resource/instance/d5a5994e-ee90-11e4-88cf-685b35afa334/metric + POST: /v1/resource/generic/d5a5994e-ee90-11e4-88cf-685b35afa334/metric request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -824,7 +772,7 @@ tests: - name: post measure on unknown metric desc: 404 from NamedMetricController with metric error - POST: /v1/resource/instance/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/unknown/measures + POST: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/unknown/measures request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea @@ -836,17 +784,17 @@ tests: response_strings: - Metric unknown does not exist -# DELETE-ing instances +# DELETE-ing generics - - name: delete instance - DELETE: /v1/resource/instance/75C44741-CC60-4033-804E-2D3098C7D2E9 + - name: delete generic + DELETE: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea status: 204 - - name: delete noexist instance - DELETE: /v1/resource/instance/77777777-CC60-4033-804E-2D3098C7D2E9 + - name: delete noexist generic + DELETE: /v1/resource/generic/77777777-CC60-4033-804E-2D3098C7D2E9 request_headers: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea -- GitLab From dd7507426057eb3af99d9077880be021ae048330 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 3 Feb 2017 14:24:35 +0100 Subject: [PATCH 0628/1483] devstack: do not create legacy resource type if Ceilometer is enabled Ceilometer knows how to create its own resource types. Change-Id: I94ae031a72414262f091860a8611dbf171ad2737 --- devstack/plugin.sh | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 4cced8d4..62bc8d9f 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -347,11 +347,7 @@ function init_gnocchi { if is_service_enabled mysql postgresql; then recreate_database gnocchi fi - if is_service_enabled ceilometer; then - $GNOCCHI_BIN_DIR/gnocchi-upgrade --create-legacy-resource-types - else - $GNOCCHI_BIN_DIR/gnocchi-upgrade - fi + $GNOCCHI_BIN_DIR/gnocchi-upgrade } function preinstall_gnocchi { -- GitLab From 8d07689a6c14aea1dc618178eb63e5d2ed83c0b9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 3 Feb 2017 14:42:24 +0100 Subject: [PATCH 0629/1483] indexer: remove Ceilometer legacy resources Change-Id: Ia7d4ea3d1b9b631ac4c399ae7245e42a531f862a --- gnocchi/cli.py | 5 +-- gnocchi/indexer/__init__.py | 2 +- gnocchi/indexer/sqlalchemy.py | 37 ++++++------------- gnocchi/indexer/sqlalchemy_base.py | 14 ------- .../indexer/sqlalchemy/test_migrations.py | 2 +- ...ceilometer-resources-16da2061d6d3f506.yaml | 3 ++ 6 files changed, 18 insertions(+), 45 deletions(-) create mode 100644 releasenotes/notes/remove-legacy-ceilometer-resources-16da2061d6d3f506.yaml diff --git a/gnocchi/cli.py b/gnocchi/cli.py index fc9be13a..30591368 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -56,16 +56,13 @@ def upgrade(): help="Skip storage upgrade."), cfg.BoolOpt("skip-archive-policies-creation", default=False, help="Skip default archive policies creation."), - cfg.BoolOpt("create-legacy-resource-types", default=False, - help="Creation of Ceilometer legacy resource types.") ]) conf = service.prepare_service(conf=conf) index = indexer.get_driver(conf) index.connect() if not conf.skip_index: LOG.info("Upgrading indexer %s", index) - index.upgrade( - create_legacy_resource_types=conf.create_legacy_resource_types) + index.upgrade() if not conf.skip_storage: s = storage.get_driver(conf) LOG.info("Upgrading storage %s", s) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index fdbe7ce8..7d29ba92 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -273,7 +273,7 @@ class IndexerDriver(object): pass @staticmethod - def upgrade(nocreate=False, create_legacy_resource_types=False): + def upgrade(nocreate=False): pass @staticmethod diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 07ccac03..4cda9bd2 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -46,6 +46,7 @@ import sqlalchemy_utils from gnocchi import exceptions from gnocchi import indexer from gnocchi.indexer import sqlalchemy_base as base +from gnocchi import resource_type from gnocchi import utils Base = base.Base @@ -310,7 +311,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): def get_engine(self): return self.facade.get_engine() - def upgrade(self, nocreate=False, create_legacy_resource_types=False): + def upgrade(self, nocreate=False): from alembic import command from alembic import migration @@ -328,30 +329,16 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): else: command.upgrade(cfg, "head") - # TODO(sileht): generic shouldn't be a particular case - # we must create a rt_generic and rt_generic_history table - # like other type - for rt in base.get_legacy_resource_types(): - if not (rt.name == "generic" or create_legacy_resource_types): - continue - - try: - with self.facade.writer() as session: - session.add(rt) - except exception.DBDuplicateEntry: - continue - - if rt.name != "generic": - try: - self._RESOURCE_TYPE_MANAGER.map_and_create_tables( - rt, self.facade) - except Exception: - self._set_resource_type_state(rt.name, "creation_error") - LOG.exception('Fail to create tables for ' - 'resource_type "%s"', rt.name) - continue - - self._set_resource_type_state(rt.name, "active") + try: + with self.facade.writer() as session: + session.add( + ResourceType( + name="generic", + tablename="generic", + state="active", + attributes=resource_type.ResourceTypeAttributes())) + except exception.DBDuplicateEntry: + pass # NOTE(jd) We can have deadlock errors either here or later in # map_and_create_tables(). We can't decorate create_resource_type() diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index a8ff6482..da36b186 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -32,7 +32,6 @@ import sqlalchemy_utils from gnocchi import archive_policy from gnocchi import indexer -from gnocchi.indexer import sqlalchemy_legacy_resources as legacy from gnocchi import resource_type from gnocchi import storage from gnocchi import utils @@ -234,19 +233,6 @@ RESOURCE_TYPE_SCHEMA_MANAGER = resource_type.ResourceTypeSchemaManager( "gnocchi.indexer.sqlalchemy.resource_type_attribute") -def get_legacy_resource_types(): - resource_types = [] - for name, attributes in legacy.ceilometer_resources.items(): - tablename = legacy.ceilometer_tablenames.get(name, name) - attrs = RESOURCE_TYPE_SCHEMA_MANAGER.attributes_from_dict( - attributes) - resource_types.append(ResourceType(name=name, - tablename=tablename, - state="creating", - attributes=attrs)) - return resource_types - - class ResourceTypeAttributes(sqlalchemy_utils.JSONType): def process_bind_param(self, attributes, dialect): return super(ResourceTypeAttributes, self).process_bind_param( diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py index da44cb29..781236fd 100644 --- a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py +++ b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py @@ -50,7 +50,7 @@ class ModelsMigrationsSync( 'indexer') self.index = indexer.get_driver(self.conf) self.index.connect() - self.index.upgrade(nocreate=True, create_legacy_resource_types=True) + self.index.upgrade(nocreate=True) self.addCleanup(self._drop_database) def _drop_database(self): diff --git a/releasenotes/notes/remove-legacy-ceilometer-resources-16da2061d6d3f506.yaml b/releasenotes/notes/remove-legacy-ceilometer-resources-16da2061d6d3f506.yaml new file mode 100644 index 00000000..4d6e0f87 --- /dev/null +++ b/releasenotes/notes/remove-legacy-ceilometer-resources-16da2061d6d3f506.yaml @@ -0,0 +1,3 @@ +--- +deprecations: + - The creation of the legacy Ceilometer resource types has been removed. -- GitLab From c3793dd80d3fe1f5307cc26cbc45f9fa025558ce Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 3 Feb 2017 18:25:49 +0100 Subject: [PATCH 0630/1483] docs: generate docs for all stable versions This generate the documentation of all versions from git tags This uses a fresh database for running doc build and don't hardcode the doc source directory. Closes-Bug: #1566709 Change-Id: I9d0f1fcb2fe44177d95dee0915e849e48155f53d --- doc/source/conf.py | 7 +++++++ gnocchi/gendoc.py | 39 ++++++++++++++++++++++++++++++++++++++- tox.ini | 6 +++++- 3 files changed, 50 insertions(+), 2 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 51909160..109ddcd2 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -178,3 +178,10 @@ html_static_path = ['_static'] # Output file base name for HTML help builder. htmlhelp_basename = 'gnocchidoc' + +# Multiversion docs +scv_sort = ('semver',) +scv_greatest_tag = True +scv_priority = 'branches' +scv_whitelist_branches = ('master',) +scv_whitelist_tags = ('3.1.0', '3.0.4', '2.2.1') diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index 996c715b..7b9a8a11 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -15,6 +15,10 @@ # under the License. from __future__ import absolute_import import json +import os +import subprocess +import sys +import tempfile import jinja2 import six @@ -24,7 +28,6 @@ import yaml from gnocchi.tests import test_rest - # HACK(jd) Not sure why but Sphinx setup this multiple times, so we just avoid # doing several times the requests by using this global variable :( _RUN = False @@ -103,10 +106,44 @@ class ScenarioList(list): return super(ScenarioList, self).__getitem__(key) +multiversion_hack = """ +import sys +import os + +srcdir = os.path.join("%s", "..", "..") +os.chdir(srcdir) +sys.path.insert(0, srcdir) + +class FakeApp(object): + def info(self, *args, **kwasrgs): + pass + +import gnocchi.gendoc +gnocchi.gendoc.setup(FakeApp()) +""" + + def setup(app): global _RUN if _RUN: return + + # NOTE(sileht): On gnocchi.xyz, we build a multiversion of the docs + # all versions are built with the master gnocchi.gendoc sphinx extension. + # So the hack here run an other python script to generate the rest.rst + # file of old version of the module. + # It also drop the database before each run. + if sys.argv[0].endswith("sphinx-versioning"): + subprocess.call(["dropdb", os.environ['PGDATABASE']]) + subprocess.call(["createdb", os.environ['PGDATABASE']]) + + with tempfile.NamedTemporaryFile() as f: + f.write(multiversion_hack % app.confdir) + f.flush() + subprocess.call(['python', f.name]) + _RUN = True + return + webapp = _setup_test_app() # TODO(jd) Do not hardcode doc/source with open("doc/source/rest.yaml") as f: diff --git a/tox.ini b/tox.ini index a694ff7d..829f4be4 100644 --- a/tox.ini +++ b/tox.ini @@ -140,5 +140,9 @@ commands = doc8 --ignore-path doc/source/rest.rst doc/source [testenv:docs-gnocchi.xyz] deps = .[file,postgresql,test,doc] sphinx_rtd_theme + sphinxcontrib-versioning +# for 2.x doc + pytimeparse + retrying commands = - pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-build -D html_theme=sphinx_rtd_theme doc/source doc/build/html + pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-versioning build doc/source doc/build/html -- -D html_theme=sphinx_rtd_theme -- GitLab From 1dc358235670794119d7bf9362012445e152ce66 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 7 Feb 2017 18:23:52 +0100 Subject: [PATCH 0631/1483] s3: set maximum length for s3_bucket_prefix option Change-Id: I088d9f7b5079523f0ec5eb1a1282f0c145131f9e --- gnocchi/storage/s3.py | 3 +++ requirements.txt | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 917036a3..d5163dab 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -40,6 +40,9 @@ OPTS = [ default=os.getenv("AWS_SECRET_ACCESS_KEY"), help='S3 secret access key'), cfg.StrOpt('s3_bucket_prefix', + # Max bucket length is 63 and we use "-" as separator + # 63 - 1 - len(uuid) = 26 + max_length=26, default='gnocchi', help='Prefix to namespace metric bucket.'), ] diff --git a/requirements.txt b/requirements.txt index ea3d96a0..dae4c542 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ pbr numpy>=1.9.0 iso8601 -oslo.config>=2.6.0 +oslo.config>=2.7.0 oslo.log>=2.3.0 oslo.policy>=0.3.0 oslo.serialization>=1.4.0 -- GitLab From cc5c9f3e0d3b967dcf60c1b85063bfd72cf7f8c1 Mon Sep 17 00:00:00 2001 From: Anh Tran Date: Thu, 16 Feb 2017 15:23:37 +0700 Subject: [PATCH 0632/1483] Remove unused logging import Change-Id: I3a7d73881c12b502dd3ec9ce535524b907871ed2 --- gnocchi/storage/ceph.py | 3 --- gnocchi/storage/incoming/s3.py | 3 --- gnocchi/storage/incoming/swift.py | 3 --- gnocchi/storage/s3.py | 3 --- gnocchi/storage/swift.py | 3 --- 5 files changed, 15 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index a887a85a..c082b2fb 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -16,15 +16,12 @@ import errno from oslo_config import cfg -from oslo_log import log from gnocchi import storage from gnocchi.storage import _carbonara from gnocchi.storage.common import ceph -LOG = log.getLogger(__name__) - OPTS = [ cfg.StrOpt('ceph_pool', default='gnocchi', diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/storage/incoming/s3.py index 1554833f..747d583a 100644 --- a/gnocchi/storage/incoming/s3.py +++ b/gnocchi/storage/incoming/s3.py @@ -16,7 +16,6 @@ from collections import defaultdict import contextlib import datetime -import logging import uuid import six @@ -28,8 +27,6 @@ from gnocchi.storage.incoming import _carbonara boto3 = s3.boto3 botocore = s3.botocore -LOG = logging.getLogger(__name__) - class S3Storage(_carbonara.CarbonaraBasedStorage): diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py index 7b996c0c..74ee93be 100644 --- a/gnocchi/storage/incoming/swift.py +++ b/gnocchi/storage/incoming/swift.py @@ -16,7 +16,6 @@ import contextlib import datetime import uuid -from oslo_log import log import six from gnocchi.storage.common import swift @@ -25,8 +24,6 @@ from gnocchi.storage.incoming import _carbonara swclient = swift.swclient swift_utils = swift.swift_utils -LOG = log.getLogger(__name__) - class SwiftStorage(_carbonara.CarbonaraBasedStorage): def __init__(self, conf): diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 917036a3..e3e9bb3a 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -13,7 +13,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import logging import os from oslo_config import cfg @@ -25,8 +24,6 @@ from gnocchi.storage.common import s3 boto3 = s3.boto3 botocore = s3.botocore -LOG = logging.getLogger(__name__) - OPTS = [ cfg.StrOpt('s3_endpoint_url', help='S3 endpoint URL'), diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 76eedb3b..ce671962 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -15,7 +15,6 @@ # under the License. from oslo_config import cfg -from oslo_log import log from gnocchi import storage from gnocchi.storage import _carbonara @@ -24,8 +23,6 @@ from gnocchi.storage.common import swift swclient = swift.swclient swift_utils = swift.swift_utils -LOG = log.getLogger(__name__) - OPTS = [ cfg.StrOpt('swift_auth_version', default='1', -- GitLab From 260d5371ef8165d4bace5c776639af346ffaeeab Mon Sep 17 00:00:00 2001 From: Feth AREZKI Date: Fri, 10 Feb 2017 22:12:35 +0100 Subject: [PATCH 0633/1483] Document the possible locations of the config file. Perhaps a reference to oslo.config would be appropriate but I feel it would clutter the text. Looks like a config split into several files of a gnocchi.d directory is supported by gnocchi/oslo but I have no info about this. Change-Id: I56e5b35a63d321190d79c123f77ad4eeef3f419f --- doc/source/install.rst | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index 134361f2..10d4037f 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -69,7 +69,21 @@ but is still recommended. Configuration ============= -Gnocchi is configured by the `/etc/gnocchi/gnocchi.conf` file. +Configuration file +------------------- + +By default, gnocchi looks for its configuration file in the following places, +in order: + +* ``~/.gnocchi/gnocchi.conf`` +* ``~/gnocchi.conf`` +* ``/etc/gnocchi/gnocchi.conf`` +* ``/etc/gnocchi.conf`` +* ``~/gnocchi/gnocchi.conf.d`` +* ``~/gnocchi.conf.d`` +* ``/etc/gnocchi/gnocchi.conf.d`` +* ``/etc/gnocchi.conf.d`` + No config file is provided with the source code; it will be created during the installation. In case where no configuration file was installed, one can be @@ -77,7 +91,9 @@ easily created by running: :: - gnocchi-config-generator > /etc/gnocchi/gnocchi.conf + gnocchi-config-generator > /path/to/gnocchi.conf + +Configure Gnocchi by editing the appropriate file. The configuration file should be pretty explicit, but here are some of the base options you want to change and configure: -- GitLab From 98a0a8be0b43fb43ee2559e5282d9989f7c4d9e1 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 21 Feb 2017 15:58:03 +0000 Subject: [PATCH 0634/1483] remove openstack link it was leftover from something that's been removed. Change-Id: I92f7459ec375755ce33ae45b4df215362b7a7228 --- doc/source/index.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index d822b380..d5318fc7 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -73,5 +73,3 @@ Documentation collectd glossary releasenotes/index.rst - -.. _`OpenStack`: http://openstack.org -- GitLab From 19f48d42d2a0c35ed89fde812e791e7aeb1a728d Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 22 Feb 2017 13:01:49 +0100 Subject: [PATCH 0635/1483] ceph: Allow to configure timeout This change introduces ceph_timeout option to set ceph connection/operation timeout. Change-Id: I3bc7d1d05d0c6d00215d75ba67dfe0ce37519645 --- gnocchi/storage/ceph.py | 1 + gnocchi/storage/common/ceph.py | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index a887a85a..9ca5850b 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -33,6 +33,7 @@ OPTS = [ help='Ceph username (ie: admin without "client." prefix).'), cfg.StrOpt('ceph_secret', help='Ceph key', secret=True), cfg.StrOpt('ceph_keyring', help='Ceph keyring path.'), + cfg.IntOpt('ceph_timeout', help='Ceph connection timeout'), cfg.StrOpt('ceph_conffile', default='/etc/ceph/ceph.conf', help='Ceph configuration file.'), diff --git a/gnocchi/storage/common/ceph.py b/gnocchi/storage/common/ceph.py index 468bdb19..992c2675 100644 --- a/gnocchi/storage/common/ceph.py +++ b/gnocchi/storage/common/ceph.py @@ -36,6 +36,10 @@ def create_rados_connection(conf): options['keyring'] = conf.ceph_keyring if conf.ceph_secret: options['key'] = conf.ceph_secret + if conf.ceph_timeout: + options['rados_osd_op_timeout'] = conf.ceph_timeout + options['rados_mon_op_timeout'] = conf.ceph_timeout + options['client_mount_timeout'] = conf.ceph_timeout if not rados: raise ImportError("No module named 'rados' nor 'cradox'") -- GitLab From 46b9f3f0b62a1d84dd33b6be430d47064660d77d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 24 Feb 2017 21:02:49 +0100 Subject: [PATCH 0636/1483] doc: use regexp for doc version whitelisting We are never going to update this manually, so let's use a regexp. Change-Id: I4374f454f9694d7e7ff6821408e5b3a2da9ec7b6 --- doc/source/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 109ddcd2..42111e7a 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -183,5 +183,5 @@ htmlhelp_basename = 'gnocchidoc' scv_sort = ('semver',) scv_greatest_tag = True scv_priority = 'branches' -scv_whitelist_branches = ('master',) -scv_whitelist_tags = ('3.1.0', '3.0.4', '2.2.1') +scv_whitelist_branches = ('master', '^stable/(2\.1|2\.2|[3-9]\.)') +scv_whitelist_tags = ("^[2-9]\.",) -- GitLab From 4e82338b016b3006c567864b9c678b7f722bb1e5 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 28 Feb 2017 08:27:45 +0100 Subject: [PATCH 0637/1483] indexer: fix typo We looks at exception.inner_exception instead of exc.inner_exception. This change fixes that. Change-Id: Ic374a760ba8d3f658011705b401b29c05b39c47b --- gnocchi/indexer/sqlalchemy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 0f9cdb81..d9e98494 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -64,7 +64,7 @@ LOG = log.getLogger(__name__) def _retry_on_exceptions(exc): if not isinstance(exc, exception.DBError): return False - inn_e = exception.inner_exception + inn_e = exc.inner_exception if not isinstance(inn_e, sqlalchemy.exc.InternalError): return False return (( -- GitLab From 47b7de0e2bb1157524b7b5988b8c715bec1dcd39 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 28 Feb 2017 09:22:28 +0100 Subject: [PATCH 0638/1483] tests: Hide useless tests output Currently the tests output flow is corrupted by data logged during setUpClass() and gabbi fixtures, this change uses a oslotest fixture to hide them. Change-Id: Ic88d40461989a0c96f9c3dc03746c48b89bd3357 --- gnocchi/tests/base.py | 10 ++++++++++ gnocchi/tests/functional/fixtures.py | 9 +++++++++ 2 files changed, 19 insertions(+) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 0388d1ed..04cbb574 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -21,7 +21,9 @@ import uuid import fixtures from oslotest import base +from oslotest import log from oslotest import mockpatch +from oslotest import output import six from six.moves.urllib.parse import unquote try: @@ -229,6 +231,14 @@ class TestCase(base.BaseTestCase): @classmethod def setUpClass(self): super(TestCase, self).setUpClass() + + # NOTE(sileht): oslotest does this in setUp() but we + # need it here + self.output = output.CaptureOutput() + self.output.setUp() + self.log = log.ConfigureLogging() + self.log.setUp() + self.conf = service.prepare_service([], default_config_files=[]) diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index b25ad694..226ef724 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -25,6 +25,8 @@ import warnings from gabbi import fixture from oslo_config import cfg from oslo_middleware import cors +from oslotest import log +from oslotest import output import sqlalchemy_utils from gnocchi import indexer @@ -66,6 +68,11 @@ class ConfigFixture(fixture.GabbiFixture): def start_fixture(self): """Create necessary temp files and do the config dance.""" + self.output = output.CaptureOutput() + self.output.setUp() + self.log = log.ConfigureLogging() + self.log.setUp() + global LOAD_APP_KWARGS data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi') @@ -150,6 +157,8 @@ class ConfigFixture(fixture.GabbiFixture): shutil.rmtree(self.tmp_dir) self.conf.reset() + self.output.cleanUp() + self.log.cleanUp() class MetricdThread(threading.Thread): -- GitLab From 1975a9799ccb613e0a4415875a13ca6e3da4abc1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 1 Mar 2017 07:29:38 +0100 Subject: [PATCH 0639/1483] Ban alembic 0.9.0 alembic 0.9.0 introduces a new protection about uncommited transaction that doesn't seem to work with stamp(). This change ban the bugged version. Change-Id: I4b2ee8561bbc3cc163f25876405f5007fe6ecf9e --- setup.cfg | 4 ++-- tox.ini | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 4445c8b4..0d34d9bb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -27,13 +27,13 @@ mysql = oslo.db>=4.8.0,!=4.13.1,!=4.13.2,!=4.15.0 sqlalchemy sqlalchemy-utils - alembic>=0.7.6,!=0.8.1 + alembic>=0.7.6,!=0.8.1,!=0.9.0 postgresql = psycopg2 oslo.db>=4.8.0,!=4.13.1,!=4.13.2,!=4.15.0 sqlalchemy sqlalchemy-utils - alembic>=0.7.6,!=0.8.1 + alembic>=0.7.6,!=0.8.1,!=0.9.0 s3 = boto3 botocore>=1.5 diff --git a/tox.ini b/tox.ini index 32c2bebf..4a3b9b03 100644 --- a/tox.ini +++ b/tox.ini @@ -35,12 +35,14 @@ commands = [testenv:py35-postgresql-file-upgrade-from-3.0] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv +# FIXME(sileht): We set alembic version until next Gnocchi 3.0 is released envdir = upgrade recreate = True skip_install = True usedevelop = False setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.0,<3.1 + alembic<0.9.0 pifpaf>=0.13 gnocchiclient>=2.8.0 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} @@ -48,12 +50,14 @@ commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-te [testenv:py27-mysql-ceph-upgrade-from-3.0] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv +# FIXME(sileht): We set alembic version until next Gnocchi 3.0 is released envdir = upgrade recreate = True skip_install = True usedevelop = False setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.0,<3.1 + alembic<0.9.0 gnocchiclient>=2.8.0 pifpaf>=0.13 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} -- GitLab From cbbcc987eb4db82f08ffc11b47b8792334457411 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 1 Mar 2017 10:58:32 +0100 Subject: [PATCH 0640/1483] devstack: Allow to change the processing delay This change allows to change the processing delay of metricd. And set the default to 5 seconds since devstack is for testing. Change-Id: I80eac6726a3841514b4f9c8bb9d2fc2d9b16a103 --- devstack/plugin.sh | 1 + devstack/settings | 1 + 2 files changed, 2 insertions(+) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index d18ecd16..43313145 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -213,6 +213,7 @@ function configure_gnocchi { # Configure logging iniset $GNOCCHI_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" + iniset $GNOCCHI_CONF metricd metric_processing_delay "$GNOCCHI_METRICD_PROCESSING_DELAY" # Set up logging if [ "$SYSLOG" != "False" ]; then diff --git a/devstack/settings b/devstack/settings index b45ceebb..1d683536 100644 --- a/devstack/settings +++ b/devstack/settings @@ -11,6 +11,7 @@ GNOCCHI_AUTH_CACHE_DIR=${GNOCCHI_AUTH_CACHE_DIR:-/var/cache/gnocchi} GNOCCHI_WSGI_DIR=${GNOCCHI_WSGI_DIR:-/var/www/gnocchi} GNOCCHI_DATA_DIR=${GNOCCHI_DATA_DIR:-${DATA_DIR}/gnocchi} GNOCCHI_COORDINATOR_URL=${GNOCCHI_COORDINATOR_URL:-redis://localhost:6379} +GNOCCHI_METRICD_PROCESSING_DELAY=${GNOCCHI_METRICD_PROCESSING_DELAY:-5} # GNOCCHI_DEPLOY defines how Gnocchi is deployed, allowed values: # - mod_wsgi : Run Gnocchi under Apache HTTPd mod_wsgi -- GitLab From ec61d111d072ea92786c80bbe225670469581cfa Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 28 Feb 2017 22:06:15 +0100 Subject: [PATCH 0641/1483] Move msgpack to global requirements It's actually used in gnocchi.cli so it's needed by any driver. Change-Id: I711ecdbbf08a58cf3d26045bcbe0259e0480f976 --- requirements.txt | 1 + setup.cfg | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index dae4c542..f6f69b98 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,3 +23,4 @@ tenacity>=3.1.0 # Apache-2.0 WebOb>=1.4.1 Paste PasteDeploy +msgpack-python diff --git a/setup.cfg b/setup.cfg index 0d34d9bb..cd383cb3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -37,16 +37,13 @@ postgresql = s3 = boto3 botocore>=1.5 - msgpack-python lz4 tooz>=1.38 swift = python-swiftclient>=3.1.0 - msgpack-python lz4 tooz>=1.38 ceph = - msgpack-python lz4 tooz>=1.38 ceph_recommended_lib = @@ -54,7 +51,6 @@ ceph_recommended_lib = ceph_alternative_lib = python-rados>=10.1.0 # not available on pypi file = - msgpack-python lz4 tooz>=1.38 doc = -- GitLab From 1d6cdda07d4425812c3f37a829e25dd577c04570 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 2 Mar 2017 18:08:34 +0100 Subject: [PATCH 0642/1483] carbonara: remove misleading comment This should have been removed in commit 2bf39a5c96e46b53880af00ab0c161bdeb86b8b0 when the migration code from 1.3 to 2.x has been removed. Change-Id: I64ed701cb5fb32b44c38b8824c189e95fa53539e --- gnocchi/storage/_carbonara.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 8d2eeadd..8c576957 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -348,8 +348,6 @@ class CarbonaraBasedStorage(storage.StorageDriver): def delete_metric(self, metric, sync=False): LOG.debug("Deleting metric %s", metric) with self._lock(metric.id)(blocking=sync): - # If the metric has never been upgraded, we need to delete this - # here too self._delete_metric(metric) @staticmethod -- GitLab From 3a8ea9c25c75a938b3d189093571ea32a4860376 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 2 Mar 2017 16:49:18 +0000 Subject: [PATCH 0643/1483] delete unprocessed measures on expunge instead of cleaning up unprocessed measures of deleted metrics as they are visible, just defer it to when everything else is cleaned. this will leave processing workers to only process and cleanup is done exclusively by janitor. note: this might schedule delete metrics until cleaned, for now we assume we can live with this. Change-Id: Id19d4e1673e660f77576eaee5420d07d7cf65ab2 --- gnocchi/storage/_carbonara.py | 21 +++------------------ gnocchi/storage/incoming/ceph.py | 3 +++ 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 8d2eeadd..533ae688 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -18,7 +18,6 @@ import collections import datetime import itertools import operator -import uuid from concurrent import futures import iso8601 @@ -28,7 +27,6 @@ from oslo_log import log from oslo_utils import timeutils import six import six.moves -from tooz import coordination from gnocchi import carbonara from gnocchi import storage @@ -351,6 +349,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): # If the metric has never been upgraded, we need to delete this # here too self._delete_metric(metric) + self.incoming.delete_unprocessed_measures_for_metric_id(metric.id) @staticmethod def _delete_metric_measures(metric, timestamp_key, @@ -436,23 +435,9 @@ class CarbonaraBasedStorage(storage.StorageDriver): def process_new_measures(self, indexer, metrics_to_process, sync=False): + # process only active metrics. deleted metrics with unprocessed + # measures will be skipped until cleaned by janitor. metrics = indexer.list_metrics(ids=metrics_to_process) - # This build the list of deleted metrics, i.e. the metrics we have - # measures to process for but that are not in the indexer anymore. - deleted_metrics_id = (set(map(uuid.UUID, metrics_to_process)) - - set(m.id for m in metrics)) - for metric_id in deleted_metrics_id: - # NOTE(jd): We need to lock the metric otherwise we might delete - # measures that another worker might be processing. Deleting - # measurement files under its feet is not nice! - try: - with self._lock(metric_id)(blocking=sync): - self.incoming.delete_unprocessed_measures_for_metric_id( - metric_id) - except coordination.LockAcquireFailed: - LOG.debug("Cannot acquire lock for metric %s, postponing " - "unprocessed measures deletion", metric_id) - for metric in metrics: lock = self._lock(metric.id) # Do not block if we cannot acquire the lock, that means some other diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index ef9ded73..316d678e 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -148,6 +148,9 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): def delete_unprocessed_measures_for_metric_id(self, metric_id): object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) object_names = self._list_object_names_to_process(object_prefix) + if not object_names: + return + # Now clean objects and omap with rados.WriteOpCtx() as op: # NOTE(sileht): come on Ceph, no return code -- GitLab From 09fd8c14031df1049b3563c64de90a01b8c19872 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 6 Mar 2017 10:30:11 +0100 Subject: [PATCH 0644/1483] Mark noauth authentication as deprecated Change-Id: Iac5a286ed171b5be303b8c4ba8ad1e2a4071cbf9 --- gnocchi/rest/app.py | 6 ++++++ releasenotes/notes/deprecate-noauth-01b7e961d9a17e9e.yaml | 4 ++++ 2 files changed, 10 insertions(+) create mode 100644 releasenotes/notes/deprecate-noauth-01b7e961d9a17e9e.yaml diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 05658a4b..e5ee7c14 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -15,6 +15,7 @@ # under the License. import os import uuid +import warnings from oslo_config import cfg from oslo_log import log @@ -107,6 +108,11 @@ def load_app(conf, indexer=None, storage=None, APPCONFIGS[configkey] = config LOG.info("WSGI config used: %s", cfg_path) + + if conf.api.auth_mode == "noauth": + warnings.warn("The `noauth' authentication mode is deprecated", + category=DeprecationWarning) + appname = "gnocchi+" + conf.api.auth_mode app = deploy.loadapp("config:" + cfg_path, name=appname, global_conf={'configkey': configkey}) diff --git a/releasenotes/notes/deprecate-noauth-01b7e961d9a17e9e.yaml b/releasenotes/notes/deprecate-noauth-01b7e961d9a17e9e.yaml new file mode 100644 index 00000000..635097c6 --- /dev/null +++ b/releasenotes/notes/deprecate-noauth-01b7e961d9a17e9e.yaml @@ -0,0 +1,4 @@ +--- +deprecations: + - The `noauth` authentication mechanism is deprecated and will be removed in + a next version. -- GitLab From ba3dc7cd2300b0169e61080aee03c70d3cadab10 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 6 Mar 2017 14:16:53 +0000 Subject: [PATCH 0645/1483] simplify swift report we shouldn't need to create a new set to count number of unique metrics. just use the keys in metric_details dictionary. Change-Id: I85325be90d05c38702e606e2eca7ec72102785a1 --- gnocchi/storage/incoming/swift.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py index 74ee93be..9ddf6e19 100644 --- a/gnocchi/storage/incoming/swift.py +++ b/gnocchi/storage/incoming/swift.py @@ -43,12 +43,10 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): if details: headers, files = self.swift.get_container(self.MEASURE_PREFIX, full_listing=True) - metrics = set() for f in files: - metric, metric_files = f['name'].split("/", 1) + metric, __ = f['name'].split("/", 1) metric_details[metric] += 1 - metrics.add(metric) - nb_metrics = len(metrics) + nb_metrics = len(metric_details) else: headers, files = self.swift.get_container(self.MEASURE_PREFIX, delimiter='/', -- GitLab From a8eb25e04cdcec26161ae31e65a26b8a3f326daf Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 6 Mar 2017 15:14:01 +0000 Subject: [PATCH 0646/1483] use bytes for coordination tooz wants bytes, let's give it to it. Change-Id: I60b1cd6a6ab52aa8aad76a07dee8aea9f61a4633 --- gnocchi/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 1b3cd476..08a5008d 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -85,7 +85,7 @@ def _enable_coordination(coord): def get_coordinator_and_start(url): - my_id = str(uuid.uuid4()) + my_id = uuid.uuid4().bytes coord = coordination.get_coordinator(url, my_id) _enable_coordination(coord) return coord, my_id -- GitLab From eb1d64782a6db8bd3640ecf560879fcaf0af3474 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 27 Feb 2017 23:05:35 +0100 Subject: [PATCH 0647/1483] storage: Add redis driver This change adds a redis driver and uses it by default in devstack. Because reading/writting from/to disks is too slow in our testing environment. Change-Id: If617260a9d8e38dc9ba9311c832be333346dd41e --- bindep.txt | 1 + devstack/plugin.sh | 5 +- devstack/settings | 7 +- doc/source/architecture.rst | 2 + doc/source/install.rst | 4 + gnocchi/opts.py | 2 + gnocchi/storage/common/redis.py | 127 +++++++++++++++++ gnocchi/storage/incoming/redis.py | 87 ++++++++++++ gnocchi/storage/redis.py | 129 ++++++++++++++++++ gnocchi/tests/base.py | 9 ++ .../notes/redis-driver-299dc443170364bc.yaml | 5 + run-tests.sh | 4 +- setup.cfg | 7 + tox.ini | 6 +- 14 files changed, 388 insertions(+), 7 deletions(-) create mode 100644 gnocchi/storage/common/redis.py create mode 100644 gnocchi/storage/incoming/redis.py create mode 100644 gnocchi/storage/redis.py create mode 100644 releasenotes/notes/redis-driver-299dc443170364bc.yaml diff --git a/bindep.txt b/bindep.txt index cd6bd714..50e6e0ca 100644 --- a/bindep.txt +++ b/bindep.txt @@ -6,3 +6,4 @@ build-essential [platform:dpkg] libffi-dev [platform:dpkg] librados-dev [platform:dpkg] ceph [platform:dpkg] +redis-server [platform:dpkg] diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 43313145..9add5828 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -250,6 +250,9 @@ function configure_gnocchi { elif [[ "$GNOCCHI_STORAGE_BACKEND" = 'file' ]] ; then iniset $GNOCCHI_CONF storage driver file iniset $GNOCCHI_CONF storage file_basepath $GNOCCHI_DATA_DIR/ + elif [[ "$GNOCCHI_STORAGE_BACKEND" = 'redis' ]] ; then + iniset $GNOCCHI_CONF storage driver redis + iniset $GNOCCHI_CONF storage redis_url $GNOCCHI_REDIS_URL else echo "ERROR: could not configure storage driver" exit 1 @@ -353,7 +356,7 @@ function preinstall_gnocchi { # install_gnocchi() - Collect source and prepare function install_gnocchi { - if [ "${GNOCCHI_COORDINATOR_URL%%:*}" == "redis" ]; then + if [[ "$GNOCCHI_STORAGE_BACKEND" = 'redis' ]] || [[ "${GNOCCHI_COORDINATOR_URL%%:*}" == "redis" ]]; then _gnocchi_install_redis fi diff --git a/devstack/settings b/devstack/settings index 1d683536..769d149a 100644 --- a/devstack/settings +++ b/devstack/settings @@ -44,14 +44,17 @@ GNOCCHI_STATSD_RESOURCE_ID=${GNOCCHI_STATSD_RESOURCE_ID:-$(uuidgen)} GNOCCHI_STATSD_USER_ID=${GNOCCHI_STATSD_USER_ID:-$(uuidgen)} GNOCCHI_STATSD_PROJECT_ID=${GNOCCHI_STATSD_PROJECT_ID:-$(uuidgen)} -# ceph gnocchi info +# Ceph gnocchi info GNOCCHI_CEPH_USER=${GNOCCHI_CEPH_USER:-gnocchi} GNOCCHI_CEPH_POOL=${GNOCCHI_CEPH_POOL:-gnocchi} GNOCCHI_CEPH_POOL_PG=${GNOCCHI_CEPH_POOL_PG:-8} GNOCCHI_CEPH_POOL_PGP=${GNOCCHI_CEPH_POOL_PGP:-8} +# Redis gnocchi info +GNOCCHI_REDIS_URL=${GNOCCHI_REDIS_URL:-redis://localhost:6379} + # Gnocchi backend -GNOCCHI_STORAGE_BACKEND=${GNOCCHI_STORAGE_BACKEND:-file} +GNOCCHI_STORAGE_BACKEND=${GNOCCHI_STORAGE_BACKEND:-redis} # Grafana settings GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.4-1464167696.x86_64.rpm} diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index 29dbd249..f62a67c7 100755 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -44,6 +44,7 @@ Gnocchi currently offers different storage drivers: * `Ceph`_ (preferred) * `OpenStack Swift`_ * `S3`_ +* `Redis`_ The drivers are based on an intermediate library, named *Carbonara*, which handles the time series manipulation, since none of these storage technologies @@ -63,6 +64,7 @@ the recommended driver. .. _OpenStack Swift: http://docs.openstack.org/developer/swift/ .. _Ceph: https://ceph.com .. _`S3`: https://aws.amazon.com/s3/ +.. _`Redis`: https://redis.io Available index back-ends ~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/install.rst b/doc/source/install.rst index 10d4037f..5ed1b35b 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -31,6 +31,7 @@ The list of variants available is: * ceph_recommended_lib – provides Ceph (>=0.80) storage support * ceph_alternative_lib – provides Ceph (>=10.1.0) storage support * file – provides file driver support +* redis – provides Redis storage support * doc – documentation building support * test – unit and functional tests support @@ -117,6 +118,9 @@ options you want to change and configure: | storage.s3_* | Configuration options to access S3 | | | if you use the S3 storage driver. | +---------------------+---------------------------------------------------+ +| storage.redis_* | Configuration options to access Redis | +| | if you use the Redis storage driver. | ++---------------------+---------------------------------------------------+ Configuring authentication ----------------------------- diff --git a/gnocchi/opts.py b/gnocchi/opts.py index e86e9237..f8baeffd 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -26,6 +26,7 @@ import gnocchi.indexer import gnocchi.storage import gnocchi.storage.ceph import gnocchi.storage.file +import gnocchi.storage.redis import gnocchi.storage.s3 import gnocchi.storage.swift @@ -48,6 +49,7 @@ _STORAGE_OPTS = list(itertools.chain(gnocchi.storage.OPTS, gnocchi.storage.ceph.OPTS, gnocchi.storage.file.OPTS, gnocchi.storage.swift.OPTS, + gnocchi.storage.redis.OPTS, gnocchi.storage.s3.OPTS)) diff --git a/gnocchi/storage/common/redis.py b/gnocchi/storage/common/redis.py new file mode 100644 index 00000000..7986e25c --- /dev/null +++ b/gnocchi/storage/common/redis.py @@ -0,0 +1,127 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2017 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import + +from six.moves.urllib import parse + +from oslo_utils import strutils + +try: + import redis + from redis import sentinel +except ImportError: + redis = None + sentinel = None + + +CLIENT_ARGS = frozenset([ + 'db', + 'encoding', + 'retry_on_timeout', + 'socket_keepalive', + 'socket_timeout', + 'ssl', + 'ssl_certfile', + 'ssl_keyfile', + 'sentinel', + 'sentinel_fallback', +]) +""" +Keys that we allow to proxy from the coordinator configuration into the +redis client (used to configure the redis client internals so that +it works as you expect/want it to). + +See: http://redis-py.readthedocs.org/en/latest/#redis.Redis + +See: https://github.com/andymccurdy/redis-py/blob/2.10.3/redis/client.py +""" + +#: Client arguments that are expected/allowed to be lists. +CLIENT_LIST_ARGS = frozenset([ + 'sentinel_fallback', +]) + +#: Client arguments that are expected to be boolean convertible. +CLIENT_BOOL_ARGS = frozenset([ + 'retry_on_timeout', + 'ssl', +]) + +#: Client arguments that are expected to be int convertible. +CLIENT_INT_ARGS = frozenset([ + 'db', + 'socket_keepalive', + 'socket_timeout', +]) + +#: Default socket timeout to use when none is provided. +CLIENT_DEFAULT_SOCKET_TO = 30 + + +def get_client(conf): + if redis is None: + raise RuntimeError("python-redis unavailable") + parsed_url = parse.urlparse(conf.redis_url) + options = parse.parse_qs(parsed_url.query) + + kwargs = {} + if parsed_url.hostname: + kwargs['host'] = parsed_url.hostname + if parsed_url.port: + kwargs['port'] = parsed_url.port + else: + if not parsed_url.path: + raise ValueError("Expected socket path in parsed urls path") + kwargs['unix_socket_path'] = parsed_url.path + if parsed_url.password: + kwargs['password'] = parsed_url.password + + for a in CLIENT_ARGS: + if a not in options: + continue + if a in CLIENT_BOOL_ARGS: + v = strutils.bool_from_string(options[a][-1]) + elif a in CLIENT_LIST_ARGS: + v = options[a][-1] + elif a in CLIENT_INT_ARGS: + v = int(options[a][-1]) + else: + v = options[a][-1] + kwargs[a] = v + if 'socket_timeout' not in kwargs: + kwargs['socket_timeout'] = CLIENT_DEFAULT_SOCKET_TO + + # Ask the sentinel for the current master if there is a + # sentinel arg. + if 'sentinel' in kwargs: + sentinel_hosts = [ + tuple(fallback.split(':')) + for fallback in kwargs.get('sentinel_fallback', []) + ] + sentinel_hosts.insert(0, (kwargs['host'], kwargs['port'])) + sentinel_server = sentinel.Sentinel( + sentinel_hosts, + socket_timeout=kwargs['socket_timeout']) + sentinel_name = kwargs['sentinel'] + del kwargs['sentinel'] + if 'sentinel_fallback' in kwargs: + del kwargs['sentinel_fallback'] + master_client = sentinel_server.master_for(sentinel_name, **kwargs) + # The master_client is a redis.StrictRedis using a + # Sentinel managed connection pool. + return master_client + return redis.StrictRedis(**kwargs) diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py new file mode 100644 index 00000000..dcc9d529 --- /dev/null +++ b/gnocchi/storage/incoming/redis.py @@ -0,0 +1,87 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2017 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import contextlib +import datetime +import os +import uuid + +import six + +from gnocchi.storage.common import redis +from gnocchi.storage.incoming import _carbonara + + +class RedisStorage(_carbonara.CarbonaraBasedStorage): + + STORAGE_PREFIX = "incoming" + + def __init__(self, conf): + super(RedisStorage, self).__init__(conf) + self._client = redis.get_client(conf) + + def _build_measure_path(self, metric_id, random_id=None): + path = os.path.join(self.STORAGE_PREFIX, six.text_type(metric_id)) + if random_id: + if random_id is True: + now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") + random_id = six.text_type(uuid.uuid4()) + now + return os.path.join(path, random_id) + return path + + def _store_new_measures(self, metric, data): + path = self._build_measure_path(metric.id, True) + self._client.set(path.encode("utf8"), data) + + def _build_report(self, details): + match = os.path.join(self.STORAGE_PREFIX, "*") + metric_details = {} + for key in self._client.scan_iter(match=match.encode('utf8')): + metric = key.decode('utf8').split(os.path.sep)[1] + count = metric_details.setdefault(metric, 0) + count += 1 + return (len(metric_details.keys()), sum(metric_details.values()), + metric_details if details else None) + + def list_metric_with_measures_to_process(self, size, part, full=False): + match = os.path.join(self.STORAGE_PREFIX, "*") + keys = self._client.scan_iter(match=match.encode('utf8')) + measures = set([k.decode('utf8').split(os.path.sep)[1] for k in keys]) + if full: + return measures + return set(list(measures)[size * part:size * (part + 1)]) + + def _list_measures_container_for_metric_id(self, metric_id): + match = os.path.join(self._build_measure_path(metric_id), "*") + return list(self._client.scan_iter(match=match.encode("utf8"))) + + def delete_unprocessed_measures_for_metric_id(self, metric_id): + keys = self._list_measures_container_for_metric_id(metric_id) + if keys: + self._client.delete(*keys) + + @contextlib.contextmanager + def process_measure_for_metric(self, metric): + keys = self._list_measures_container_for_metric_id(metric.id) + measures = [] + for k in keys: + data = self._client.get(k) + sp_key = k.decode('utf8').split("/")[-1] + measures.extend(self._unserialize_measures(sp_key, data)) + + yield measures + + if keys: + self._client.delete(*keys) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py new file mode 100644 index 00000000..bfbaa670 --- /dev/null +++ b/gnocchi/storage/redis.py @@ -0,0 +1,129 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2017 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import os + +from oslo_config import cfg + +from gnocchi import storage +from gnocchi.storage import _carbonara +from gnocchi.storage.common import redis + + +OPTS = [ + cfg.StrOpt('redis_url', + default='redis://localhost:6379/', + help='Redis URL'), +] + + +class RedisStorage(_carbonara.CarbonaraBasedStorage): + WRITE_FULL = True + + STORAGE_PREFIX = "timeseries" + + def __init__(self, conf, incoming): + super(RedisStorage, self).__init__(conf, incoming) + self._client = redis.get_client(conf) + + def _build_metric_dir(self, metric): + return os.path.join(self.STORAGE_PREFIX, str(metric.id)) + + def _build_unaggregated_timeserie_path(self, metric, version=3): + return os.path.join( + self._build_metric_dir(metric), + 'none' + ("_v%s" % version if version else "")) + + def _build_metric_path(self, metric, aggregation): + return os.path.join(self._build_metric_dir(metric), + "agg_" + aggregation) + + def _build_metric_path_for_split(self, metric, aggregation, + timestamp_key, granularity, version=3): + path = os.path.join(self._build_metric_path(metric, aggregation), + timestamp_key + "_" + str(granularity)) + return path + '_v%s' % version if version else path + + def _create_metric(self, metric): + path = self._build_metric_dir(metric) + ret = self._client.set(path.encode("utf-8"), "created", nx=True) + if ret is None: + raise storage.MetricAlreadyExists(metric) + + def _store_unaggregated_timeserie(self, metric, data, version=3): + path = self._build_unaggregated_timeserie_path(metric, version) + self._client.set(path.encode("utf8"), data) + + def _get_unaggregated_timeserie(self, metric, version=3): + path = self._build_unaggregated_timeserie_path(metric, version) + data = self._client.get(path.encode("utf8")) + if data is None: + raise storage.MetricDoesNotExist(metric) + return data + + def _delete_unaggregated_timeserie(self, metric, version=3): + path = self._build_unaggregated_timeserie_path(metric, version) + data = self._client.get(path.encode("utf8")) + if data is None: + raise storage.MetricDoesNotExist(metric) + self._client.delete(path.encode("utf8")) + + def _list_split_keys_for_metric(self, metric, aggregation, granularity, + version=None): + path = self._build_metric_dir(metric) + if self._client.get(path.encode("utf8")) is None: + raise storage.MetricDoesNotExist(metric) + match = os.path.join(self._build_metric_path(metric, aggregation), + "*") + split_keys = set() + for key in self._client.scan_iter(match=match.encode("utf8")): + key = key.decode("utf8") + key = key.split(os.path.sep)[-1] + meta = key.split("_") + if meta[1] == str(granularity) and self._version_check(key, + version): + split_keys.add(meta[0]) + return split_keys + + def _delete_metric_measures(self, metric, timestamp_key, aggregation, + granularity, version=3): + path = self._build_metric_path_for_split( + metric, aggregation, timestamp_key, granularity, version) + self._client.delete(path.encode("utf8")) + + def _store_metric_measures(self, metric, timestamp_key, aggregation, + granularity, data, offset=None, version=3): + path = self._build_metric_path_for_split(metric, aggregation, + timestamp_key, granularity, + version) + self._client.set(path.encode("utf8"), data) + + def _delete_metric(self, metric): + path = self._build_metric_dir(metric) + self._client.delete(path.encode("utf8")) + + # Carbonara API + + def _get_measures(self, metric, timestamp_key, aggregation, granularity, + version=3): + path = self._build_metric_path_for_split( + metric, aggregation, timestamp_key, granularity, version) + data = self._client.get(path.encode("utf8")) + if data is None: + fpath = self._build_metric_dir(metric) + if self._client.get(fpath.encode("utf8")) is None: + raise storage.MetricDoesNotExist(metric) + raise storage.AggregationDoesNotExist(metric, aggregation) + return data diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 04cbb574..b28ab604 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -17,6 +17,7 @@ import functools import json import os import subprocess +import threading import uuid import fixtures @@ -178,6 +179,9 @@ class FakeSwiftClient(object): @six.add_metaclass(SkipNotImplementedMeta) class TestCase(base.BaseTestCase): + REDIS_DB_INDEX = 0 + REDIS_DB_LOCK = threading.Lock() + ARCHIVE_POLICIES = { 'no_granularity_match': archive_policy.ArchivePolicy( "no_granularity_match", @@ -310,6 +314,11 @@ class TestCase(base.BaseTestCase): "storage") self.storage = storage.get_driver(self.conf) + if self.conf.storage.driver == 'redis': + # Create one prefix per test + self.storage.STORAGE_PREFIX = str(uuid.uuid4()) + self.storage.incoming.STORAGE_PREFIX = str(uuid.uuid4()) + # NOTE(jd) Do not upgrade the storage. We don't really need the storage # upgrade for now, and the code that upgrade from pre-1.3 # (TimeSerieArchive) uses a lot of parallel lock, which makes tooz diff --git a/releasenotes/notes/redis-driver-299dc443170364bc.yaml b/releasenotes/notes/redis-driver-299dc443170364bc.yaml new file mode 100644 index 00000000..b8214f27 --- /dev/null +++ b/releasenotes/notes/redis-driver-299dc443170364bc.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + A Redis driver has been introduced for storing incoming measures and + computed timeseries. diff --git a/run-tests.sh b/run-tests.sh index ecb9797d..0e6d11f8 100755 --- a/run-tests.sh +++ b/run-tests.sh @@ -8,8 +8,8 @@ do for indexer in ${GNOCCHI_TEST_INDEXER_DRIVERS} do case $GNOCCHI_TEST_STORAGE_DRIVER in - ceph) - pifpaf run ceph -- pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* + ceph|redis) + pifpaf run $GNOCCHI_TEST_STORAGE_DRIVER -- pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* ;; s3) if ! which s3rver >/dev/null 2>&1 diff --git a/setup.cfg b/setup.cfg index 0d34d9bb..da5b5c8e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -40,6 +40,11 @@ s3 = msgpack-python lz4 tooz>=1.38 +redis = + redis>=2.10.0 # MIT + msgpack-python + lz4 + tooz>=1.38 swift = python-swiftclient>=3.1.0 msgpack-python @@ -106,12 +111,14 @@ gnocchi.storage = ceph = gnocchi.storage.ceph:CephStorage file = gnocchi.storage.file:FileStorage s3 = gnocchi.storage.s3:S3Storage + redis = gnocchi.storage.redis:RedisStorage gnocchi.incoming = ceph = gnocchi.storage.incoming.ceph:CephStorage file = gnocchi.storage.incoming.file:FileStorage swift = gnocchi.storage.incoming.swift:SwiftStorage s3 = gnocchi.storage.incoming.s3:S3Storage + redis = gnocchi.storage.incoming.redis:RedisStorage gnocchi.indexer = mysql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer diff --git a/tox.ini b/tox.ini index 4a3b9b03..7c1b72aa 100644 --- a/tox.ini +++ b/tox.ini @@ -9,19 +9,21 @@ passenv = LANG OS_DEBUG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_L setenv = GNOCCHI_TEST_STORAGE_DRIVER=file GNOCCHI_TEST_INDEXER_DRIVER=postgresql - GNOCCHI_TEST_STORAGE_DRIVERS=file swift ceph s3 + GNOCCHI_TEST_STORAGE_DRIVERS=file swift ceph s3 redis GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql file: GNOCCHI_TEST_STORAGE_DRIVERS=file swift: GNOCCHI_TEST_STORAGE_DRIVERS=swift ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph + redis: GNOCCHI_TEST_STORAGE_DRIVERS=redis s3: GNOCCHI_TEST_STORAGE_DRIVERS=s3 postgresql: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql: GNOCCHI_TEST_INDEXER_DRIVERS=mysql - GNOCCHI_STORAGE_DEPS=file,swift,s3,ceph,ceph_recommended_lib + GNOCCHI_STORAGE_DEPS=file,swift,s3,ceph,ceph_recommended_lib,redis ceph: GNOCCHI_STORAGE_DEPS=ceph,ceph_recommended_lib swift: GNOCCHI_STORAGE_DEPS=swift file: GNOCCHI_STORAGE_DEPS=file + redis: GNOCCHI_STORAGE_DEPS=redis s3: GNOCCHI_STORAGE_DEPS=s3 deps = .[test] postgresql: .[postgresql,{env:GNOCCHI_STORAGE_DEPS}] -- GitLab From 8040d06d3bd7e34d43d0c83057fda34521e6e165 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 7 Mar 2017 12:16:04 +0100 Subject: [PATCH 0648/1483] carbonara: remove numpy warning We ask scipy/numpy to compute std on serie with one timestamp. And numpy complains and return nan. Next we drop all nan. This change creates the list of timestamps that are not unique and then compute the std. numpy does not complain anymore and we don't have to remove nan. Closes-bug: #1663419 Change-Id: Ie1ddd244e42dcac4f91f741f37a980e452f91845 --- gnocchi/carbonara.py | 15 ++++++++------- gnocchi/tests/test_carbonara.py | 17 +++++++++++++++++ 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index db020ef4..f8ca56dd 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -154,15 +154,16 @@ class GroupedTimeSeries(object): def _scipy_aggregate(self, method, remove_unique=False, *args, **kwargs): if remove_unique: - locs = numpy.argwhere(self.counts > 1).T[0] + tstamps = self.tstamps[self.counts > 1] + else: + tstamps = self.tstamps - values = method(self._ts.values, self.indexes, self.tstamps, - *args, **kwargs) - timestamps = numpy.array(self.tstamps, 'datetime64[ns]') + if len(tstamps) == 0: + return pandas.Series() - if remove_unique: - timestamps = timestamps[locs] - values = values[locs] + values = method(self._ts.values, self.indexes, tstamps, + *args, **kwargs) + timestamps = numpy.array(tstamps, 'datetime64[ns]') return pandas.Series(values, pandas.to_datetime(timestamps)) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index f06675fd..4469eb2a 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -240,6 +240,23 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self._do_test_aggregation('std', 1.5275252316519465, 0.70710678118654757) + def test_aggregation_std_with_unique(self): + ts = carbonara.TimeSerie.from_tuples( + [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3)]) + ts = self._resample(ts, 60, 'std') + self.assertEqual(0, len(ts), ts.ts.values) + + ts = carbonara.TimeSerie.from_tuples( + [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), + (datetime.datetime(2014, 1, 1, 12, 0, 4), 6), + (datetime.datetime(2014, 1, 1, 12, 0, 9), 5), + (datetime.datetime(2014, 1, 1, 12, 1, 6), 9)]) + ts = self._resample(ts, 60, "std") + + self.assertEqual(1, len(ts)) + self.assertEqual(1.5275252316519465, + ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) + def test_different_length_in_timestamps_and_data(self): self.assertRaises(ValueError, carbonara.AggregatedTimeSerie.from_data, -- GitLab From a3cd3b7816cb4d55064349191ef3d0d6db690f8f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 1 Mar 2017 16:26:14 +0100 Subject: [PATCH 0649/1483] Test upgrade from 3.1 instead of 3.0 Change-Id: Iaa93967c89c2b01cd7f4576179b408abad7f1feb --- run-upgrade-tests.sh | 36 ++---------------------------------- tox.ini | 12 ++++++------ 2 files changed, 8 insertions(+), 40 deletions(-) diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index 1bc27b89..be2d188b 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -4,10 +4,8 @@ set -e export GNOCCHI_DATA=$(mktemp -d -t gnocchi.XXXX) GDATE=$((which gdate >/dev/null && echo gdate) || echo date) -GSED=$((which gsed >/dev/null && echo gsed) || echo sed) old_version=$(pip freeze | sed -n '/gnocchi==/s/.*==\(.*\)/\1/p') -[ "${old_version:0:1}" == "3" ] && have_resource_type_post=1 RESOURCE_IDS=( "5a301761-aaaa-46e2-8900-8b4f6fe6675a" @@ -16,8 +14,6 @@ RESOURCE_IDS=( "non-uuid" ) -[ "$have_resource_type_post" ] && RESOURCE_ID_EXT="5a301761/dddd/46e2/8900/8b4f6fe6675a" - dump_data(){ dir="$1" mkdir -p $dir @@ -39,12 +35,6 @@ inject_data() { gnocchi resource create generic --attribute id:$resource_id -n metric:high > /dev/null done - if [ "$have_resource_type_post" ] - then - gnocchi resource-type create ext > /dev/null - gnocchi resource create ext --attribute id:$RESOURCE_ID_EXT -n metric:high > /dev/null - fi - { measures_sep="" MEASURES=$(for i in $(seq 0 10 288000); do @@ -84,14 +74,10 @@ else fi eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) -# Override default to be sure to use noauth -export OS_AUTH_TYPE=gnocchi-noauth -export GNOCCHI_USER_ID=admin -export GNOCCHI_PROJECT_ID=admin +export OS_AUTH_TYPE=gnocchi-basic +export GNOCCHI_USER=$GNOCCHI_USER_ID original_statsd_resource_id=$GNOCCHI_STATSD_RESOURCE_ID inject_data $GNOCCHI_DATA -# Encode resource id as it contains slashes and gnocchiclient does not encode it -[ "$have_resource_type_post" ] && RESOURCE_ID_EXT="19235bb9-35ca-5f55-b7db-165cfb033c86" dump_data $GNOCCHI_DATA/old pifpaf_stop @@ -107,25 +93,7 @@ export GNOCCHI_USER=$GNOCCHI_USER_ID # pifpaf creates a new statsd resource on each start gnocchi resource delete $GNOCCHI_STATSD_RESOURCE_ID -RESOURCE_IDS=( - "5a301761-aaaa-46e2-8900-8b4f6fe6675a" - "5a301761-bbbb-46e2-8900-8b4f6fe6675a" - "5a301761-cccc-46e2-8900-8b4f6fe6675a" - "24d2e3ed-c7c1-550f-8232-56c48809a6d4" -) -# NOTE(sileht): / are now _ -# NOTE(jdanjou): and we reencode for admin:admin, but we cannot authenticate as -# admin:admin in basic since ":" is forbidden in any username, so let's use the direct -# computed ID -[ "$have_resource_type_post" ] && RESOURCE_ID_EXT="517920a9-2e50-58b8-88e8-25fd7aae1d8f" - dump_data $GNOCCHI_DATA/new -# NOTE(sileht): change the output of the old gnocchi to compare with the new without '/' -$GSED -i -e "s,5a301761/dddd/46e2/8900/8b4f6fe6675a,5a301761_dddd_46e2_8900_8b4f6fe6675a,g" \ - -e "s,19235bb9-35ca-5f55-b7db-165cfb033c86,517920a9-2e50-58b8-88e8-25fd7aae1d8f,g" \ - -e "s,None ,${original_statsd_resource_id},g" \ - -e "s,37d1416a-381a-5b6c-99ef-37d89d95f1e1,24d2e3ed-c7c1-550f-8232-56c48809a6d4,g" $GNOCCHI_DATA/old/resources.list - echo "* Checking output difference between Gnocchi $old_version and $new_version" diff -uNr $GNOCCHI_DATA/old $GNOCCHI_DATA/new diff --git a/tox.ini b/tox.ini index 4a3b9b03..d7fea39c 100644 --- a/tox.ini +++ b/tox.ini @@ -32,31 +32,31 @@ commands = gnocchi-config-generator {toxinidir}/run-tests.sh {posargs} -[testenv:py35-postgresql-file-upgrade-from-3.0] +[testenv:py35-postgresql-file-upgrade-from-3.1] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv -# FIXME(sileht): We set alembic version until next Gnocchi 3.0 is released +# FIXME(sileht): We set alembic version until next Gnocchi 3.1 is released envdir = upgrade recreate = True skip_install = True usedevelop = False setenv = GNOCCHI_VARIANT=test,postgresql,file -deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.0,<3.1 +deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 alembic<0.9.0 pifpaf>=0.13 gnocchiclient>=2.8.0 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} -[testenv:py27-mysql-ceph-upgrade-from-3.0] +[testenv:py27-mysql-ceph-upgrade-from-3.1] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv -# FIXME(sileht): We set alembic version until next Gnocchi 3.0 is released +# FIXME(sileht): We set alembic version until next Gnocchi 3.1 is released envdir = upgrade recreate = True skip_install = True usedevelop = False setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib -deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.0,<3.1 +deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 alembic<0.9.0 gnocchiclient>=2.8.0 pifpaf>=0.13 -- GitLab From 6d4abc3a6d47e883072e428211a24eb36e598b00 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 27 Feb 2017 11:56:19 +0100 Subject: [PATCH 0650/1483] Remove upgrade code from 2.2 and 3.0 We don't care anymore in this next release. Change-Id: I0c6f3166b1eb5226a7d435cca3d2f14ce7991741 Sem-Ver: api-break --- gnocchi/storage/_carbonara.py | 106 --------- gnocchi/storage/ceph.py | 29 --- gnocchi/storage/incoming/_carbonara.py | 13 +- gnocchi/tests/storage/test_carbonara.py | 210 ------------------ ...val-from-2.2-and-3.0-a01fc64ecb39c327.yaml | 4 + tox.ini | 30 +-- 6 files changed, 9 insertions(+), 383 deletions(-) delete mode 100644 gnocchi/tests/storage/test_carbonara.py create mode 100644 releasenotes/notes/upgrade-code-removal-from-2.2-and-3.0-a01fc64ecb39c327.yaml diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 70a05148..6ab6b931 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -21,7 +21,6 @@ import operator from concurrent import futures import iso8601 -import msgpack from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils @@ -354,83 +353,6 @@ class CarbonaraBasedStorage(storage.StorageDriver): aggregation, granularity, version=3): raise NotImplementedError - def _check_for_metric_upgrade(self, metric): - # FIXME(gordc): this is only required for v2.x to v3.x storage upgrade. - # we should make storage version easily detectable rather than - # checking each metric individually - lock = self._lock(metric.id) - with lock: - try: - old_unaggregated = self._get_unaggregated_timeserie_and_unserialize_v2( # noqa - metric) - except (storage.MetricDoesNotExist, CorruptionError) as e: - # This case can happen if v3.0 to v3.x or if no measures - # pushed. skip the rest of upgrade on metric. - LOG.debug( - "Unable to find v2 unaggregated timeserie for " - "metric %s, no data to upgrade: %s", - metric.id, e) - return - - unaggregated = carbonara.BoundTimeSerie( - ts=old_unaggregated.ts, - block_size=metric.archive_policy.max_block_size, - back_window=metric.archive_policy.back_window) - # Upgrade unaggregated timeserie to v3 - self._store_unaggregated_timeserie( - metric, unaggregated.serialize()) - oldest_mutable_timestamp = ( - unaggregated.first_block_timestamp() - ) - for agg_method, d in itertools.product( - metric.archive_policy.aggregation_methods, - metric.archive_policy.definition): - LOG.debug( - "Checking if the metric %s needs migration for %s", - metric, agg_method) - - try: - all_keys = self._list_split_keys_for_metric( - metric, agg_method, d.granularity, version=2) - except storage.MetricDoesNotExist: - # Just try the next metric, this one has no measures - break - else: - LOG.info("Migrating metric %s to new format", metric) - timeseries = filter( - lambda x: x is not None, - self._map_in_thread( - self._get_measures_and_unserialize_v2, - ((metric, key, agg_method, d.granularity) - for key in all_keys)) - ) - ts = carbonara.AggregatedTimeSerie.from_timeseries( - sampling=d.granularity, - aggregation_method=agg_method, - timeseries=timeseries, max_size=d.points) - for key, split in ts.split(): - self._store_timeserie_split( - metric, key, split, - ts.aggregation_method, - d, oldest_mutable_timestamp) - for key in all_keys: - self._delete_metric_measures( - metric, key, agg_method, - d.granularity, version=None) - self._delete_unaggregated_timeserie(metric, version=None) - LOG.info("Migrated metric %s to new format", metric) - - def upgrade(self, index): - marker = None - while True: - metrics = [(metric,) for metric in - index.list_metrics(limit=self.UPGRADE_BATCH_SIZE, - marker=marker)] - self._map_in_thread(self._check_for_metric_upgrade, metrics) - if len(metrics) == 0: - break - marker = metrics[-1][0].id - def process_new_measures(self, indexer, metrics_to_process, sync=False): # process only active metrics. deleted metrics with unprocessed @@ -645,31 +567,3 @@ class CarbonaraBasedStorage(storage.StorageDriver): # We use 'list' to iterate all threads here to raise the first # exception now, not much choice return list(executor.map(lambda args: method(*args), list_of_args)) - - @staticmethod - def _unserialize_timeserie_v2(data): - return carbonara.TimeSerie.from_data( - *carbonara.TimeSerie._timestamps_and_values_from_dict( - msgpack.loads(data, encoding='utf-8')['values']), - clean=True) - - def _get_unaggregated_timeserie_and_unserialize_v2(self, metric): - """Unserialization method for unaggregated v2 timeseries.""" - data = self._get_unaggregated_timeserie(metric, version=None) - try: - return self._unserialize_timeserie_v2(data) - except ValueError: - LOG.error("Data corruption detected for %s ignoring.", metric.id) - - def _get_measures_and_unserialize_v2(self, metric, key, - aggregation, granularity): - """Unserialization method for upgrading v2 objects. Upgrade only.""" - data = self._get_measures( - metric, key, aggregation, granularity, version=None) - try: - return self._unserialize_timeserie_v2(data) - except ValueError: - LOG.error("Data corruption detected for %s " - "aggregated `%s' timeserie, granularity `%s' " - "around time `%s', ignoring.", - metric.id, aggregation, granularity, key) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index f261d41c..4d5d930b 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -50,35 +50,6 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): ceph.close_rados_connection(self.rados, self.ioctx) super(CephStorage, self).stop() - def _check_for_metric_upgrade(self, metric): - lock = self._lock(metric.id) - with lock: - container = "gnocchi_%s_container" % metric.id - unagg_obj = self._build_unaggregated_timeserie_path(metric, 3) - try: - xattrs = tuple(k for k, v in self.ioctx.get_xattrs(container)) - except rados.ObjectNotFound: - # this means already upgraded or some corruption? move on. - pass - else: - # if xattrs are found, it means we're coming from - # gnocchiv2. migrate to omap accordingly. - if xattrs: - keys = xattrs - # if no xattrs but object exists, it means it already - # migrated to v3 and now upgrade to use single object - else: - with rados.ReadOpCtx() as op: - omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1) - self.ioctx.operate_read_op(op, container) - keys = (k for k, __ in omaps) - with rados.WriteOpCtx() as op: - self.ioctx.set_omap(op, keys, - tuple([b""] * len(keys))) - self.ioctx.operate_write_op(op, unagg_obj) - self.ioctx.remove_object(container) - super(CephStorage, self)._check_for_metric_upgrade(metric) - @staticmethod def _get_object_name(metric, timestamp_key, aggregation, granularity, version=3): diff --git a/gnocchi/storage/incoming/_carbonara.py b/gnocchi/storage/incoming/_carbonara.py index 0c349e9d..dc77d2d1 100644 --- a/gnocchi/storage/incoming/_carbonara.py +++ b/gnocchi/storage/incoming/_carbonara.py @@ -18,7 +18,6 @@ import itertools import struct from oslo_log import log -from oslo_serialization import msgpackutils import pandas import six.moves @@ -38,14 +37,10 @@ class CarbonaraBasedStorage(incoming.StorageDriver): measures = struct.unpack( "<" + self._MEASURE_SERIAL_FORMAT * nb_measures, data) except struct.error: - # This either a corruption, either a v2 measures - try: - return msgpackutils.loads(data) - except ValueError: - LOG.error( - "Unable to decode measure %s, possible data corruption", - measure_id) - raise + LOG.error( + "Unable to decode measure %s, possible data corruption", + measure_id) + raise return six.moves.zip( pandas.to_datetime(measures[::2], unit='ns'), itertools.islice(measures, 1, len(measures), 2)) diff --git a/gnocchi/tests/storage/test_carbonara.py b/gnocchi/tests/storage/test_carbonara.py deleted file mode 100644 index 62b89482..00000000 --- a/gnocchi/tests/storage/test_carbonara.py +++ /dev/null @@ -1,210 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2015-2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime -import itertools -import uuid - -import mock -import msgpack -import six - -from gnocchi import carbonara -from gnocchi import storage -from gnocchi.storage import _carbonara -from gnocchi.tests import base as tests_base -from gnocchi import utils - - -def _serialize_v2(split): - d = {'values': dict((timestamp.value, float(v)) - for timestamp, v - in six.iteritems(split.ts.dropna()))} - return msgpack.dumps(d) - - -class TestCarbonaraMigration(tests_base.TestCase): - def setUp(self): - super(TestCarbonaraMigration, self).setUp() - if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): - self.skipTest("This driver is not based on Carbonara") - - self.metric = storage.Metric(uuid.uuid4(), - self.archive_policies['low']) - - self.storage._create_metric(self.metric) - - with mock.patch('gnocchi.carbonara.SplitKey.' - 'POINTS_PER_SPLIT', 14400): - bts = carbonara.BoundTimeSerie( - block_size=self.metric.archive_policy.max_block_size, - back_window=self.metric.archive_policy.back_window) - # NOTE: there is a split at 2016-07-18 on granularity 300 - values = ((datetime.datetime(2016, 7, 17, 23, 59, 0), 4), - (datetime.datetime(2016, 7, 17, 23, 59, 4), 5), - (datetime.datetime(2016, 7, 17, 23, 59, 9), 6), - (datetime.datetime(2016, 7, 18, 0, 0, 0), 7), - (datetime.datetime(2016, 7, 18, 0, 0, 4), 8), - (datetime.datetime(2016, 7, 18, 0, 0, 9), 9)) - - def _before_truncate(bound_timeserie): - for d, agg in itertools.product( - self.metric.archive_policy.definition, - ['mean', 'max']): - grouped = bound_timeserie.group_serie( - d.granularity, carbonara.round_timestamp( - bound_timeserie.first, d.granularity * 10e8)) - - aggts = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, d.granularity, agg, max_size=d.points) - - for key, split in aggts.split(): - self.storage._store_metric_measures( - self.metric, - str(key), - agg, d.granularity, - _serialize_v2(split), offset=None, version=None) - - bts.set_values(values, before_truncate_callback=_before_truncate) - self.storage._store_unaggregated_timeserie(self.metric, - _serialize_v2(bts), - version=None) - - def upgrade(self): - with mock.patch.object(self.index, 'list_metrics') as f: - f.side_effect = [[self.metric], []] - self.storage.upgrade(self.index) - - def test_get_measures(self): - with mock.patch.object( - self.storage, '_get_measures_and_unserialize', - side_effect=self.storage._get_measures_and_unserialize_v2): - self.assertEqual([ - (utils.datetime_utc(2016, 7, 17), 86400, 5), - (utils.datetime_utc(2016, 7, 18), 86400, 8), - (utils.datetime_utc(2016, 7, 17, 23), 3600, 5), - (utils.datetime_utc(2016, 7, 18, 0), 3600, 8), - (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 5), - (utils.datetime_utc(2016, 7, 18, 0), 300, 8) - ], self.storage.get_measures(self.metric)) - - self.assertEqual([ - (utils.datetime_utc(2016, 7, 17), 86400, 6), - (utils.datetime_utc(2016, 7, 18), 86400, 9), - (utils.datetime_utc(2016, 7, 17, 23), 3600, 6), - (utils.datetime_utc(2016, 7, 18, 0), 3600, 9), - (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 6), - (utils.datetime_utc(2016, 7, 18, 0), 300, 9) - ], self.storage.get_measures(self.metric, aggregation='max')) - - self.upgrade() - - self.assertEqual([ - (utils.datetime_utc(2016, 7, 17), 86400, 5), - (utils.datetime_utc(2016, 7, 18), 86400, 8), - (utils.datetime_utc(2016, 7, 17, 23), 3600, 5), - (utils.datetime_utc(2016, 7, 18, 0), 3600, 8), - (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 5), - (utils.datetime_utc(2016, 7, 18, 0), 300, 8) - ], self.storage.get_measures(self.metric)) - - self.assertEqual([ - (utils.datetime_utc(2016, 7, 17), 86400, 6), - (utils.datetime_utc(2016, 7, 18), 86400, 9), - (utils.datetime_utc(2016, 7, 17, 23), 3600, 6), - (utils.datetime_utc(2016, 7, 18, 0), 3600, 9), - (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 6), - (utils.datetime_utc(2016, 7, 18, 0), 300, 9) - ], self.storage.get_measures(self.metric, aggregation='max')) - - with mock.patch.object( - self.storage, '_get_measures_and_unserialize', - side_effect=self.storage._get_measures_and_unserialize_v2): - self.assertRaises( - storage.AggregationDoesNotExist, - self.storage.get_measures, self.metric) - - self.assertRaises( - storage.AggregationDoesNotExist, - self.storage.get_measures, self.metric, aggregation='max') - - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2016, 7, 18), 69), - storage.Measure(utils.dt_to_unix_ns(2016, 7, 18, 1, 1), 64), - ]) - - with mock.patch.object(self.index, 'list_metrics') as f: - f.side_effect = [[self.metric], []] - self.storage.process_background_tasks( - self.index, [str(self.metric.id)], sync=True) - - self.assertEqual([ - (utils.datetime_utc(2016, 7, 17), 86400, 6), - (utils.datetime_utc(2016, 7, 18), 86400, 69), - (utils.datetime_utc(2016, 7, 17, 23), 3600, 6), - (utils.datetime_utc(2016, 7, 18, 0), 3600, 69), - (utils.datetime_utc(2016, 7, 18, 1), 3600, 64), - (utils.datetime_utc(2016, 7, 18, 0), 300, 69), - (utils.datetime_utc(2016, 7, 18, 1), 300, 64) - ], self.storage.get_measures(self.metric, aggregation='max')) - - def test_upgrade_upgraded_storage(self): - with mock.patch.object( - self.storage, '_get_measures_and_unserialize', - side_effect=self.storage._get_measures_and_unserialize_v2): - self.assertEqual([ - (utils.datetime_utc(2016, 7, 17), 86400, 5), - (utils.datetime_utc(2016, 7, 18), 86400, 8), - (utils.datetime_utc(2016, 7, 17, 23), 3600, 5), - (utils.datetime_utc(2016, 7, 18, 0), 3600, 8), - (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 5), - (utils.datetime_utc(2016, 7, 18, 0), 300, 8) - ], self.storage.get_measures(self.metric)) - - self.assertEqual([ - (utils.datetime_utc(2016, 7, 17), 86400, 6), - (utils.datetime_utc(2016, 7, 18), 86400, 9), - (utils.datetime_utc(2016, 7, 17, 23), 3600, 6), - (utils.datetime_utc(2016, 7, 18, 0), 3600, 9), - (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 6), - (utils.datetime_utc(2016, 7, 18, 0), 300, 9) - ], self.storage.get_measures(self.metric, aggregation='max')) - - self.upgrade() - self.upgrade() - - self.assertEqual([ - (utils.datetime_utc(2016, 7, 17), 86400, 5), - (utils.datetime_utc(2016, 7, 18), 86400, 8), - (utils.datetime_utc(2016, 7, 17, 23), 3600, 5), - (utils.datetime_utc(2016, 7, 18, 0), 3600, 8), - (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 5), - (utils.datetime_utc(2016, 7, 18, 0), 300, 8) - ], self.storage.get_measures(self.metric)) - - self.assertEqual([ - (utils.datetime_utc(2016, 7, 17), 86400, 6), - (utils.datetime_utc(2016, 7, 18), 86400, 9), - (utils.datetime_utc(2016, 7, 17, 23), 3600, 6), - (utils.datetime_utc(2016, 7, 18, 0), 3600, 9), - (utils.datetime_utc(2016, 7, 17, 23, 55), 300, 6), - (utils.datetime_utc(2016, 7, 18, 0), 300, 9) - ], self.storage.get_measures(self.metric, aggregation='max')) - - def test_delete_metric_not_upgraded(self): - # Make sure that we delete everything (e.g. objects + container) - # correctly even if the metric has not been upgraded. - self.storage.delete_metric(self.metric) - self.assertEqual([], self.storage.get_measures(self.metric)) diff --git a/releasenotes/notes/upgrade-code-removal-from-2.2-and-3.0-a01fc64ecb39c327.yaml b/releasenotes/notes/upgrade-code-removal-from-2.2-and-3.0-a01fc64ecb39c327.yaml new file mode 100644 index 00000000..bd0480ca --- /dev/null +++ b/releasenotes/notes/upgrade-code-removal-from-2.2-and-3.0-a01fc64ecb39c327.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + The storage upgrade is only supported from version 3.1. diff --git a/tox.ini b/tox.ini index d7fea39c..bf78f5bb 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] minversion = 1.8 -envlist = py{35,27}-{postgresql,mysql}{,-file,-swift,-ceph,-s3},pep8,bashate,py35-postgresql-file-upgrade-from-2.2,py27-mysql-ceph-upgrade-from-2.2 +envlist = py{35,27}-{postgresql,mysql}{,-file,-swift,-ceph,-s3},pep8,bashate [testenv] usedevelop = True @@ -62,34 +62,6 @@ deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 pifpaf>=0.13 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} -[testenv:py35-postgresql-file-upgrade-from-2.2] -# We should always recreate since the script upgrade -# Gnocchi we can't reuse the virtualenv -envdir = upgrade -recreate = True -skip_install = True -usedevelop = False -setenv = GNOCCHI_VARIANT=test,postgresql,file -deps = gnocchi[{env:GNOCCHI_VARIANT}]>=2.2,<2.3 - pifpaf>=0.13 - gnocchiclient>=2.8.0 -commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} - -[testenv:py27-mysql-ceph-upgrade-from-2.2] -# We should always recreate since the script upgrade -# Gnocchi we can't reuse the virtualenv -envdir = upgrade -recreate = True -skip_install = True -usedevelop = False -setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib -deps = gnocchi[{env:GNOCCHI_VARIANT}]>=2.2,<2.3 - gnocchiclient>=2.8.0 - pifpaf>=0.13 - cradox -# cradox is required because 2.2 extra names are incorrect -commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} - [testenv:bashate] deps = bashate commands = bashate -v devstack/plugin.sh devstack/gate/gate_hook.sh devstack/gate/post_test_hook.sh -- GitLab From 5e3d4ec1b33500cc2fcb95f6a7785a58141253b9 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 7 Mar 2017 23:41:06 +0000 Subject: [PATCH 0651/1483] cleanup unused var we don't have this anymore since upgrade was removed Change-Id: Ic49bc921eaacf96595780bd4bd2e950307c703b4 --- gnocchi/storage/_carbonara.py | 1 - 1 file changed, 1 deletion(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 6ab6b931..9c68e21d 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -55,7 +55,6 @@ class CorruptionError(ValueError): class CarbonaraBasedStorage(storage.StorageDriver): - UPGRADE_BATCH_SIZE = 1000 def __init__(self, conf, incoming): super(CarbonaraBasedStorage, self).__init__(conf, incoming) -- GitLab From 35fe3b093ec97a83e41ca425b5d133202e60921b Mon Sep 17 00:00:00 2001 From: gecong1973 Date: Thu, 9 Mar 2017 10:18:45 +0800 Subject: [PATCH 0652/1483] Using fixtures.MockPatch instead of mockpatch.Patch This module has been deprecated in favor of fixtures.MockPatch. Change-Id: I52569c71989cbcb9ab34b7106e5d2a68abe204fb --- gnocchi/tests/base.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 04cbb574..7795ee8d 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -22,7 +22,6 @@ import uuid import fixtures from oslotest import base from oslotest import log -from oslotest import mockpatch from oslotest import output import six from six.moves.urllib.parse import unquote @@ -289,7 +288,7 @@ class TestCase(base.BaseTestCase): def setUp(self): super(TestCase, self).setUp() if swexc: - self.useFixture(mockpatch.Patch( + self.useFixture(fixtures.MockPatch( 'swiftclient.client.Connection', FakeSwiftClient)) -- GitLab From 693b5cb8de5469c9a9603f96ea887e0ce1064a72 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 9 Mar 2017 09:36:49 -0500 Subject: [PATCH 0653/1483] fix redis report report isn't keeping count properly. Change-Id: I6bd4d58ec237095c9ecbde048c440f1e531537df --- gnocchi/storage/incoming/redis.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py index dcc9d529..d1b22409 100644 --- a/gnocchi/storage/incoming/redis.py +++ b/gnocchi/storage/incoming/redis.py @@ -13,6 +13,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import collections import contextlib import datetime import os @@ -47,11 +48,10 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): def _build_report(self, details): match = os.path.join(self.STORAGE_PREFIX, "*") - metric_details = {} + metric_details = collections.defaultdict(int) for key in self._client.scan_iter(match=match.encode('utf8')): metric = key.decode('utf8').split(os.path.sep)[1] - count = metric_details.setdefault(metric, 0) - count += 1 + metric_details[metric] += 1 return (len(metric_details.keys()), sum(metric_details.values()), metric_details if details else None) -- GitLab From 14eb4c50e89e4b8c7d4f93ca836fcb7caed778df Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 28 Feb 2017 08:52:19 +0100 Subject: [PATCH 0654/1483] file, s3, swift: create incoming buckets/containers on upgrade And not on __init__ Change-Id: Id40fbfc01428d25a10a222ec5029ebf4d279e3af --- gnocchi/storage/incoming/file.py | 3 +++ gnocchi/storage/incoming/s3.py | 3 +++ gnocchi/storage/incoming/swift.py | 3 +++ gnocchi/tests/base.py | 8 ++------ gnocchi/tests/test_statsd.py | 6 +++--- 5 files changed, 14 insertions(+), 9 deletions(-) diff --git a/gnocchi/storage/incoming/file.py b/gnocchi/storage/incoming/file.py index 743e68ab..439a3ab2 100644 --- a/gnocchi/storage/incoming/file.py +++ b/gnocchi/storage/incoming/file.py @@ -30,6 +30,9 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): self.basepath = conf.file_basepath self.basepath_tmp = os.path.join(self.basepath, 'tmp') self.measure_path = os.path.join(self.basepath, 'measure') + + def upgrade(self, indexer): + super(FileStorage, self).upgrade(indexer) utils.ensure_paths([self.basepath_tmp, self.measure_path]) def _build_measure_path(self, metric_id, random_id=None): diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/storage/incoming/s3.py index 885c67fd..259d1bab 100644 --- a/gnocchi/storage/incoming/s3.py +++ b/gnocchi/storage/incoming/s3.py @@ -39,6 +39,9 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): self._bucket_name_measures = ( self._bucket_prefix + "-" + self.MEASURE_PREFIX ) + + def upgrade(self, indexer): + super(S3Storage, self).upgrade(indexer) try: s3.create_bucket(self.s3, self._bucket_name_measures, self._region_name) diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py index 9ddf6e19..5052f6c7 100644 --- a/gnocchi/storage/incoming/swift.py +++ b/gnocchi/storage/incoming/swift.py @@ -29,6 +29,9 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): def __init__(self, conf): super(SwiftStorage, self).__init__(conf) self.swift = swift.get_connection(conf) + + def upgrade(self, indexer): + super(SwiftStorage, self).upgrade(indexer) self.swift.put_container(self.MEASURE_PREFIX) def _store_new_measures(self, metric, data): diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index d46d0b32..455a76a2 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -313,17 +313,13 @@ class TestCase(base.BaseTestCase): "storage") self.storage = storage.get_driver(self.conf) + if self.conf.storage.driver == 'redis': # Create one prefix per test self.storage.STORAGE_PREFIX = str(uuid.uuid4()) self.storage.incoming.STORAGE_PREFIX = str(uuid.uuid4()) - # NOTE(jd) Do not upgrade the storage. We don't really need the storage - # upgrade for now, and the code that upgrade from pre-1.3 - # (TimeSerieArchive) uses a lot of parallel lock, which makes tooz - # explodes because MySQL does not support that many connections in real - # life. - # self.storage.upgrade(self.index) + self.storage.upgrade(self.index) def tearDown(self): self.index.disconnect() diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index 4a35af02..fc0713d6 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -40,10 +40,10 @@ class TestStatsd(tests_base.TestCase): self.conf.set_override("archive_policy_name", self.STATSD_ARCHIVE_POLICY_NAME, "statsd") - # NOTE(jd) Always use self.stats.storage and self.stats.indexer to - # pick at the right storage/indexer used by the statsd server, and not - # new instances from the base test class. self.stats = statsd.Stats(self.conf) + # Replace storage/indexer with correct ones that have been upgraded + self.stats.storage = self.storage + self.stats.indexer = self.index self.server = statsd.StatsdServer(self.stats) def test_flush_empty(self): -- GitLab From 2354c59c5490488659e370be09e04c805e16ef65 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 13 Mar 2017 14:26:22 +0100 Subject: [PATCH 0655/1483] carbonara: use lz4 0.9.0 new interfaces Change-Id: I3e5eb4bc0c40e9b81a83083a950d01067fbc559e --- gnocchi/carbonara.py | 11 ++++++----- setup.cfg | 10 +++++----- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index f8ca56dd..55b89466 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -26,7 +26,7 @@ import re import struct import time -import lz4 +import lz4.block import numpy import numpy.lib.recfunctions import pandas @@ -315,7 +315,7 @@ class BoundTimeSerie(TimeSerie): @classmethod def unserialize(cls, data, block_size, back_window): - uncompressed = lz4.loads(data) + uncompressed = lz4.block.decompress(data) nb_points = ( len(uncompressed) // cls._SERIALIZATION_TIMESTAMP_VALUE_LEN ) @@ -341,7 +341,7 @@ class BoundTimeSerie(TimeSerie): timestamps = numpy.array(timestamps, dtype='=1.5 - lz4 + lz4>=0.9.0 tooz>=1.38 redis = redis>=2.10.0 # MIT msgpack-python - lz4 + lz4>=0.9.0 tooz>=1.38 swift = python-swiftclient>=3.1.0 - lz4 + lz4>=0.9.0 tooz>=1.38 ceph = - lz4 + lz4>=0.9.0 tooz>=1.38 ceph_recommended_lib = cradox>=1.0.9 ceph_alternative_lib = python-rados>=10.1.0 # not available on pypi file = - lz4 + lz4>=0.9.0 tooz>=1.38 doc = oslosphinx>=2.2.0 -- GitLab From 37519248871d9092cd678f1397094c68fefdd3c5 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 16 Mar 2017 16:25:25 +0000 Subject: [PATCH 0656/1483] redis: switch incoming to use list redis provides data structures, we shouldn't creat a new key for every single 'measure object'. this patch stores all incoming measures in a list under a key associated with metric. Change-Id: Idfb5026fa90148c8f15b401893de196baea1f92c --- gnocchi/storage/incoming/redis.py | 42 +++++++++++-------------------- 1 file changed, 15 insertions(+), 27 deletions(-) diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py index d1b22409..3e2d68fd 100644 --- a/gnocchi/storage/incoming/redis.py +++ b/gnocchi/storage/incoming/redis.py @@ -15,9 +15,7 @@ # under the License. import collections import contextlib -import datetime import os -import uuid import six @@ -33,25 +31,19 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): super(RedisStorage, self).__init__(conf) self._client = redis.get_client(conf) - def _build_measure_path(self, metric_id, random_id=None): - path = os.path.join(self.STORAGE_PREFIX, six.text_type(metric_id)) - if random_id: - if random_id is True: - now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") - random_id = six.text_type(uuid.uuid4()) + now - return os.path.join(path, random_id) - return path + def _build_measure_path(self, metric_id): + return os.path.join(self.STORAGE_PREFIX, six.text_type(metric_id)) def _store_new_measures(self, metric, data): - path = self._build_measure_path(metric.id, True) - self._client.set(path.encode("utf8"), data) + path = self._build_measure_path(metric.id) + self._client.rpush(path.encode("utf8"), data) def _build_report(self, details): match = os.path.join(self.STORAGE_PREFIX, "*") metric_details = collections.defaultdict(int) for key in self._client.scan_iter(match=match.encode('utf8')): metric = key.decode('utf8').split(os.path.sep)[1] - metric_details[metric] += 1 + metric_details[metric] = self._client.llen(key) return (len(metric_details.keys()), sum(metric_details.values()), metric_details if details else None) @@ -63,25 +55,21 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): return measures return set(list(measures)[size * part:size * (part + 1)]) - def _list_measures_container_for_metric_id(self, metric_id): - match = os.path.join(self._build_measure_path(metric_id), "*") - return list(self._client.scan_iter(match=match.encode("utf8"))) - def delete_unprocessed_measures_for_metric_id(self, metric_id): - keys = self._list_measures_container_for_metric_id(metric_id) - if keys: - self._client.delete(*keys) + self._client.delete(self._build_measure_path(metric_id)) @contextlib.contextmanager def process_measure_for_metric(self, metric): - keys = self._list_measures_container_for_metric_id(metric.id) + key = self._build_measure_path(metric.id) + item_len = self._client.llen(key) + # lrange is inclusive on both ends, decrease to grab exactly n items + item_len = item_len - 1 if item_len else item_len measures = [] - for k in keys: - data = self._client.get(k) - sp_key = k.decode('utf8').split("/")[-1] - measures.extend(self._unserialize_measures(sp_key, data)) + for i, data in enumerate(self._client.lrange(key, 0, item_len)): + measures.extend(self._unserialize_measures( + '%s-%s' % (metric.id, i), data)) yield measures - if keys: - self._client.delete(*keys) + # ltrim is inclusive, bump 1 to remove up to and including nth item + self._client.ltrim(key, item_len + 1, -1) -- GitLab From 6769d3403b97ac19f36fadd34d7d5263b9498d8d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 16 Mar 2017 16:52:52 +0100 Subject: [PATCH 0657/1483] utils: fix Epoch timestamp parsing Timestamp in Epoch format (floats) parsing is broken when the values passed are strings that could be converted to floats and not floats directly. Change-Id: I5876f4aff065dee9894b6750e9cdbc467248e3b6 --- gnocchi/tests/functional/gabbits/metric.yaml | 6 ++++++ gnocchi/tests/test_utils.py | 18 ++++++++++++++++++ gnocchi/utils.py | 8 +++++++- 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index dbc75645..e987c81c 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -199,6 +199,12 @@ tests: $: - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - name: get measurements by start with epoch + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true&start=1425652440 + response_json_paths: + $: + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - name: get measurements from metric GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true response_json_paths: diff --git a/gnocchi/tests/test_utils.py b/gnocchi/tests/test_utils.py index e14c0ecf..d8319e3d 100644 --- a/gnocchi/tests/test_utils.py +++ b/gnocchi/tests/test_utils.py @@ -39,3 +39,21 @@ class TestUtils(tests_base.TestCase): dt = datetime.datetime(2015, 1, 1, 15, 0, tzinfo=iso8601.iso8601.FixedOffset(5, 0, '+5h')) self._do_test_datetime_to_unix_timezone_change(1420106400.0, dt) + + def test_to_timestamps_epoch(self): + self.assertEqual( + utils.to_datetime("1425652440"), + datetime.datetime(2015, 3, 6, 14, 34, + tzinfo=iso8601.iso8601.UTC)) + self.assertEqual( + utils.to_datetime("1425652440.4"), + datetime.datetime(2015, 3, 6, 14, 34, 0, 400000, + tzinfo=iso8601.iso8601.UTC)) + self.assertEqual( + utils.to_datetime(1425652440), + datetime.datetime(2015, 3, 6, 14, 34, + tzinfo=iso8601.iso8601.UTC)) + self.assertEqual( + utils.to_datetime(utils.to_timestamp(1425652440.4)), + datetime.datetime(2015, 3, 6, 14, 34, 0, 400000, + tzinfo=iso8601.iso8601.UTC)) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 1b3cd476..6794a490 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -103,7 +103,13 @@ def to_timestamps(values): is_valid_timestamp(values[0])): times = pd.to_datetime(values, utc=True, box=False) else: - times = (utcnow() + pd.to_timedelta(values)).values + try: + float(values[0]) + except ValueError: + times = (utcnow() + pd.to_timedelta(values)).values + else: + times = pd.to_datetime(list(map(float, values)), + utc=True, box=False, unit='s') except ValueError: raise ValueError("Unable to convert timestamps") -- GitLab From 5c73d18477d1ca3ef745f81c1b2f4967a1eef5d7 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 16 Mar 2017 20:44:55 -0400 Subject: [PATCH 0658/1483] fix redis storage delete we aren't deleting the correct key Change-Id: I2c7ad7e2701d1bf639686ea5d1d6d08fdc3013ec --- gnocchi/storage/redis.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index bfbaa670..2cc0ad21 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -111,8 +111,11 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): self._client.set(path.encode("utf8"), data) def _delete_metric(self, metric): - path = self._build_metric_dir(metric) - self._client.delete(path.encode("utf8")) + match = os.path.join(self._build_metric_dir(metric), '*') + # FIXME(gordc): this should be done in redis pipeline but i'm + # switching this to hashes anyways so it is what it is. + for key in self._client.scan_iter(match=match.encode("utf8")): + self._client.delete(key) # Carbonara API -- GitLab From db0b890e15c23e8bd870563b6ea3e564065db1d6 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 16 Mar 2017 22:19:43 +0000 Subject: [PATCH 0659/1483] test that storage is deleted we should actually verify that the unaggregated measures and aggregated measures are actually gone and not just that it's ignored. Change-Id: Ibfd18d9fa8a889656fcea2bd278bf7e6f13606e8 --- gnocchi/tests/test_storage.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 6fc8f9e9..95b1f47c 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -98,6 +98,10 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.storage.delete_metric(self.metric, sync=True) self.trigger_processing() + self.assertEqual([], self.storage.get_measures(self.metric)) + self.assertRaises(storage.MetricDoesNotExist, + self.storage._get_unaggregated_timeserie, + self.metric) def test_delete_nonempty_metric_unprocessed(self): self.storage.incoming.add_measures(self.metric, [ -- GitLab From bf917cde2d55ec8377566ae3f2b1d5566b2cd4d4 Mon Sep 17 00:00:00 2001 From: fengchaoyang Date: Fri, 17 Mar 2017 14:04:09 +0800 Subject: [PATCH 0660/1483] Modify outdated parameters comment Change-Id: I2278fca696a73190896318e7c22f1798460c7c20 --- gnocchi/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 8b3f7f9f..e6416f44 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -163,7 +163,7 @@ class StorageDriver(object): This calls :func:`process_new_measures` to process new measures :param index: An indexer to be used for querying metrics - :param block_size: number of metrics to process + :param metrics: The list of metrics waiting for processing :param sync: If True, then process everything synchronously and raise on error :type sync: bool -- GitLab From 45689c1f71c998aac24896404c5b830d9862f917 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 20 Mar 2017 11:47:05 +0100 Subject: [PATCH 0661/1483] devstack: Change URL of grafana plugin source. Grafana plugin source have moved, this change changes the URL. Change-Id: I9592bd1eedf43fb2787431810998710216209f5c --- devstack/settings | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/settings b/devstack/settings index 769d149a..2ac7d52a 100644 --- a/devstack/settings +++ b/devstack/settings @@ -61,5 +61,5 @@ GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://grafanarel.s3.amazonaws.com/builds/gr GRAFANA_DEB_PKG=${GRAFANA_DEB_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.4-1464167696_amd64.deb} GRAFANA_PLUGIN_VERSION=${GRAFANA_PLUGIN_VERSION} GRAFANA_PLUGINS_DIR=${GRAFANA_PLUGINS_DIR:-$DEST/grafana-gnocchi-datasource} -GRAFANA_PLUGINS_REPO=${GRAFANA_PLUGINS_REPO:-http://github.com/sileht/grafana-gnocchi-datasource.git} +GRAFANA_PLUGINS_REPO=${GRAFANA_PLUGINS_REPO:-http://github.com/gnocchixyz/grafana-gnocchi-datasource.git} GRAFANA_URL=${GRAFANA_URL:-http://$HOST_IP:3000} -- GitLab From f9fc5a7449139f5fb3d410083bf7565e74a3e31b Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Tue, 21 Mar 2017 16:11:37 +0000 Subject: [PATCH 0662/1483] Correct bad use response_strings in live.yaml response_strings wants a list, not a string. In gabbi earlier than 1.33.0 it accept a string and then iterated checking for each single character in the response body. So still passing. This fixes recent failures by putting the necessary '-' at the start of the search string. It's an easy bug to make and then never notice, which is why gabbi 1.33.0 is all uptight about it. Change-Id: I02042c0b5005a50b3760c9aae67ad4d0eeb69e04 --- gnocchi/tests/functional_live/gabbits/live.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gnocchi/tests/functional_live/gabbits/live.yaml b/gnocchi/tests/functional_live/gabbits/live.yaml index 8f3bbef4..b102da2a 100644 --- a/gnocchi/tests/functional_live/gabbits/live.yaml +++ b/gnocchi/tests/functional_live/gabbits/live.yaml @@ -146,7 +146,7 @@ tests: $.definition[2].points: 5 $.definition[2].timespan: "0:05:00" response_strings: - '"aggregation_methods": ["max", "min", "mean"]' + - '"aggregation_methods": ["max", "min", "mean"]' - name: get wrong accept desc: invalid 'accept' header @@ -330,7 +330,7 @@ tests: GET: /v1/archive_policy_rule status: 200 response_strings: - '"metric_pattern": "live.*", "archive_policy_name": "gabbilive", "name": "gabbilive_rule"' + - '"metric_pattern": "live.*", "archive_policy_name": "gabbilive", "name": "gabbilive_rule"' - name: get unknown archive policy rule GET: /v1/archive_policy_rule/foo @@ -556,7 +556,7 @@ tests: =: id: "2ae35573-7f9f-4bb1-aae8-dad8dff5706e" response_strings: - '"user_id": "126204ef-989a-46fd-999b-ee45c8108f31"' + - '"user_id": "126204ef-989a-46fd-999b-ee45c8108f31"' - name: search for myresource resource via user_id and project_id POST: /v1/search/resource/generic @@ -569,7 +569,7 @@ tests: - =: project_id: "98e785d7-9487-4159-8ab8-8230ec37537a" response_strings: - '"id": "2ae35573-7f9f-4bb1-aae8-dad8dff5706e"' + - '"id": "2ae35573-7f9f-4bb1-aae8-dad8dff5706e"' - name: patch myresource resource PATCH: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e -- GitLab From 35866d63aa03eb0419c4cc9fc5497f6a284cf252 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 16 Mar 2017 21:42:37 +0000 Subject: [PATCH 0663/1483] redis: use hashes for aggregated storage redis provides hash data structure. so let's just store all our data under a common metric key and have a field-key for each obj Change-Id: I2d1622a5c2f26f677df143d714cbd9dad5f17284 --- gnocchi/storage/redis.py | 99 +++++++++++++++++++--------------------- 1 file changed, 46 insertions(+), 53 deletions(-) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 2cc0ad21..e0e69723 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -33,100 +33,93 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = True STORAGE_PREFIX = "timeseries" + FIELD_SEP = '_' def __init__(self, conf, incoming): super(RedisStorage, self).__init__(conf, incoming) self._client = redis.get_client(conf) - def _build_metric_dir(self, metric): + def _metric_key(self, metric): return os.path.join(self.STORAGE_PREFIX, str(metric.id)) - def _build_unaggregated_timeserie_path(self, metric, version=3): - return os.path.join( - self._build_metric_dir(metric), - 'none' + ("_v%s" % version if version else "")) + @staticmethod + def _unaggregated_field(version=3): + return 'none' + ("_v%s" % version if version else "") - def _build_metric_path(self, metric, aggregation): - return os.path.join(self._build_metric_dir(metric), - "agg_" + aggregation) - - def _build_metric_path_for_split(self, metric, aggregation, - timestamp_key, granularity, version=3): - path = os.path.join(self._build_metric_path(metric, aggregation), - timestamp_key + "_" + str(granularity)) + @classmethod + def _aggregated_field_for_split(cls, aggregation, timestamp_key, + granularity, version=3): + path = cls.FIELD_SEP.join([timestamp_key, aggregation, + str(granularity)]) return path + '_v%s' % version if version else path def _create_metric(self, metric): - path = self._build_metric_dir(metric) - ret = self._client.set(path.encode("utf-8"), "created", nx=True) - if ret is None: + key = self._metric_key(metric).encode("utf8") + if self._client.exists(key): raise storage.MetricAlreadyExists(metric) + self._client.hset( + key, self._unaggregated_field().encode("utf8"), None) def _store_unaggregated_timeserie(self, metric, data, version=3): - path = self._build_unaggregated_timeserie_path(metric, version) - self._client.set(path.encode("utf8"), data) + self._client.hset(self._metric_key(metric).encode("utf8"), + self._unaggregated_field(version).encode("utf8"), + data) def _get_unaggregated_timeserie(self, metric, version=3): - path = self._build_unaggregated_timeserie_path(metric, version) - data = self._client.get(path.encode("utf8")) + data = self._client.hget( + self._metric_key(metric).encode("utf8"), + self._unaggregated_field(version).encode("utf8")) if data is None: raise storage.MetricDoesNotExist(metric) return data def _delete_unaggregated_timeserie(self, metric, version=3): - path = self._build_unaggregated_timeserie_path(metric, version) - data = self._client.get(path.encode("utf8")) - if data is None: - raise storage.MetricDoesNotExist(metric) - self._client.delete(path.encode("utf8")) + # FIXME(gordc): this really doesn't need to be part of abstract + # do it part of _delete_metric + pass def _list_split_keys_for_metric(self, metric, aggregation, granularity, version=None): - path = self._build_metric_dir(metric) - if self._client.get(path.encode("utf8")) is None: + key = self._metric_key(metric).encode("utf8") + if not self._client.exists(key): raise storage.MetricDoesNotExist(metric) - match = os.path.join(self._build_metric_path(metric, aggregation), - "*") split_keys = set() - for key in self._client.scan_iter(match=match.encode("utf8")): - key = key.decode("utf8") - key = key.split(os.path.sep)[-1] - meta = key.split("_") - if meta[1] == str(granularity) and self._version_check(key, - version): - split_keys.add(meta[0]) + # FIXME(gordc): version shouldn't be None but it's not used anywhere + hashes = self._client.hscan_iter( + key, match=self._aggregated_field_for_split(aggregation, '*', + granularity, 3)) + for f, __ in hashes: + meta = f.decode("utf8").split(self.FIELD_SEP, 1) + split_keys.add(meta[0]) return split_keys def _delete_metric_measures(self, metric, timestamp_key, aggregation, granularity, version=3): - path = self._build_metric_path_for_split( - metric, aggregation, timestamp_key, granularity, version) - self._client.delete(path.encode("utf8")) + key = self._metric_key(metric) + field = self._aggregated_field_for_split( + aggregation, timestamp_key, granularity, version) + self._client.hdel(key.encode("utf8"), field.encode("utf8")) def _store_metric_measures(self, metric, timestamp_key, aggregation, granularity, data, offset=None, version=3): - path = self._build_metric_path_for_split(metric, aggregation, - timestamp_key, granularity, - version) - self._client.set(path.encode("utf8"), data) + key = self._metric_key(metric) + field = self._aggregated_field_for_split( + aggregation, timestamp_key, granularity, version) + self._client.hset(key.encode("utf8"), field.encode("utf8"), data) def _delete_metric(self, metric): - match = os.path.join(self._build_metric_dir(metric), '*') - # FIXME(gordc): this should be done in redis pipeline but i'm - # switching this to hashes anyways so it is what it is. - for key in self._client.scan_iter(match=match.encode("utf8")): - self._client.delete(key) + self._client.delete(self._metric_key(metric).encode("utf8")) # Carbonara API def _get_measures(self, metric, timestamp_key, aggregation, granularity, version=3): - path = self._build_metric_path_for_split( - metric, aggregation, timestamp_key, granularity, version) - data = self._client.get(path.encode("utf8")) + key = self._metric_key(metric) + field = self._aggregated_field_for_split( + aggregation, timestamp_key, granularity, version) + data = self._client.hget(key.encode("utf8"), field.encode("utf8")) if data is None: - fpath = self._build_metric_dir(metric) - if self._client.get(fpath.encode("utf8")) is None: + if not self._client.exists(key.encode("utf8")): raise storage.MetricDoesNotExist(metric) raise storage.AggregationDoesNotExist(metric, aggregation) return data -- GitLab From 157d097083f358212acf2cc9738c52adf4d09f84 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 20 Mar 2017 08:49:03 -0400 Subject: [PATCH 0664/1483] redis: increase query limit scan retrieves 10 results at a time. this means it makes 100 queries if there are 1000 keys. let's return 1000 at a time to decrease number of requests. Change-Id: Id419e4df972835227df75cd6b0bb3b2aa8fd9d49 --- gnocchi/storage/incoming/redis.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py index 3e2d68fd..08c8c737 100644 --- a/gnocchi/storage/incoming/redis.py +++ b/gnocchi/storage/incoming/redis.py @@ -39,17 +39,17 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): self._client.rpush(path.encode("utf8"), data) def _build_report(self, details): - match = os.path.join(self.STORAGE_PREFIX, "*") + match = os.path.join(self.STORAGE_PREFIX, "*").encode('utf8') metric_details = collections.defaultdict(int) - for key in self._client.scan_iter(match=match.encode('utf8')): + for key in self._client.scan_iter(match=match, count=1000): metric = key.decode('utf8').split(os.path.sep)[1] metric_details[metric] = self._client.llen(key) return (len(metric_details.keys()), sum(metric_details.values()), metric_details if details else None) def list_metric_with_measures_to_process(self, size, part, full=False): - match = os.path.join(self.STORAGE_PREFIX, "*") - keys = self._client.scan_iter(match=match.encode('utf8')) + match = os.path.join(self.STORAGE_PREFIX, "*").encode('utf8') + keys = self._client.scan_iter(match=match, count=1000) measures = set([k.decode('utf8').split(os.path.sep)[1] for k in keys]) if full: return measures -- GitLab From 4d0d66a365a7a18e348640ad15501e7d381d6eb9 Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 17 Mar 2017 15:29:52 +0000 Subject: [PATCH 0665/1483] default to v3 format we only really support v3 anyways Change-Id: I7ff197b7194350e71ca0783064f24f874fafbb73 --- gnocchi/storage/_carbonara.py | 6 ++---- gnocchi/storage/ceph.py | 2 +- gnocchi/storage/file.py | 2 +- gnocchi/storage/redis.py | 5 ++--- gnocchi/storage/s3.py | 2 +- gnocchi/storage/swift.py | 2 +- 6 files changed, 8 insertions(+), 11 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 9c68e21d..0733ec00 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -123,7 +123,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): @staticmethod def _list_split_keys_for_metric(metric, aggregation, granularity, - version=None): + version=3): raise NotImplementedError @staticmethod @@ -132,9 +132,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): Version should be last attribute and start with 'v' """ - attrs = name.split("_") - return not v or (not attrs[-1].startswith('v') if v == 2 - else attrs[-1] == 'v%s' % v) + return name.split("_")[-1] == 'v%s' % v def get_measures(self, metric, from_timestamp=None, to_timestamp=None, aggregation='mean', granularity=None, resample=None): diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 4d5d930b..e242e085 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -123,7 +123,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): raise storage.MetricDoesNotExist(metric) def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=None): + version=3): with rados.ReadOpCtx() as op: omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1) try: diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index a9934d23..5a91de3f 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -108,7 +108,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): raise def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=None): + version=3): try: files = os.listdir(self._build_metric_path(metric, aggregation)) except OSError as e: diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index e0e69723..ea86ed98 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -79,15 +79,14 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): pass def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=None): + version=3): key = self._metric_key(metric).encode("utf8") if not self._client.exists(key): raise storage.MetricDoesNotExist(metric) split_keys = set() - # FIXME(gordc): version shouldn't be None but it's not used anywhere hashes = self._client.hscan_iter( key, match=self._aggregated_field_for_split(aggregation, '*', - granularity, 3)) + granularity, version)) for f, __ in hashes: meta = f.decode("utf8").split(self.FIELD_SEP, 1) split_keys.add(meta[0]) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 9decfb47..67d8d3b1 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -136,7 +136,7 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): return response['Body'].read() def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=None): + version=3): bucket = self._bucket_name(metric) keys = set() response = {} diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index ce671962..da2dffa0 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -144,7 +144,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): return contents def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=None): + version=3): container = self._container_name(metric) try: headers, files = self.swift.get_container( -- GitLab From fd344f6c53b5a47010546b6d7d0cc60e5569fc28 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 20 Mar 2017 19:08:41 +0000 Subject: [PATCH 0666/1483] drop delete_unaggregated_timeserie method unaggregated timeserie should only be deleted when metric is deleted. also, for the most part, unaggregated data is grouped in same key/bucket/container/folder so it can easily be deleted with other objects rather than handled individually Change-Id: Ied585409e9a08b4e73b4b470422faccce0d33db8 --- gnocchi/storage/_carbonara.py | 4 ---- gnocchi/storage/redis.py | 5 ----- gnocchi/storage/s3.py | 11 ----------- gnocchi/storage/swift.py | 10 ---------- 4 files changed, 30 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 9c68e21d..ec9c1b48 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -117,10 +117,6 @@ class CarbonaraBasedStorage(storage.StorageDriver): granularity, data, offset=None, version=3): raise NotImplementedError - @staticmethod - def _delete_unaggregated_timeserie(metric, version=3): - raise NotImplementedError - @staticmethod def _list_split_keys_for_metric(metric, aggregation, granularity, version=None): diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index e0e69723..fbf6a22f 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -73,11 +73,6 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): raise storage.MetricDoesNotExist(metric) return data - def _delete_unaggregated_timeserie(self, metric, version=3): - # FIXME(gordc): this really doesn't need to be part of abstract - # do it part of _delete_metric - pass - def _list_split_keys_for_metric(self, metric, aggregation, granularity, version=None): key = self._metric_key(metric).encode("utf8") diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 9decfb47..7e8e3ccb 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -93,7 +93,6 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): timestamp_key, aggregation, granularity, version)) def _delete_metric(self, metric): - self._delete_unaggregated_timeserie(metric) bucket = self._bucket_name(metric) response = {} while response.get('IsTruncated', True): @@ -188,13 +187,3 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): Bucket=self._bucket_name(metric), Key=self._build_unaggregated_timeserie_path(version), Body=data) - - def _delete_unaggregated_timeserie(self, metric, version=3): - try: - self.s3.delete_object( - Bucket=self._bucket_name(metric), - Key=self._build_unaggregated_timeserie_path(version)) - except botocore.exceptions.ClientError as e: - code = e.response['Error'].get('Code') - if code not in ("NoSuchKey", "NoSuchBucket"): - raise diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index ce671962..ac31af77 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -107,7 +107,6 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): version)) def _delete_metric(self, metric): - self._delete_unaggregated_timeserie(metric) container = self._container_name(metric) try: headers, files = self.swift.get_container( @@ -184,12 +183,3 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): self.swift.put_object(self._container_name(metric), self._build_unaggregated_timeserie_path(version), data) - - def _delete_unaggregated_timeserie(self, metric, version=3): - try: - self.swift.delete_object( - self._container_name(metric), - self._build_unaggregated_timeserie_path(version)) - except swclient.ClientException as e: - if e.http_status != 404: - raise -- GitLab From b408b5d103b610037d509c9739b10c360238b074 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 20 Mar 2017 18:27:10 +0000 Subject: [PATCH 0667/1483] follow redis key naming conventions generally, redis uses ':' as a separater for key schema. no need to use a os specific implementation. Change-Id: If3780d6ca961f1e67643763f552d2a35c4beb057 --- gnocchi/storage/common/redis.py | 2 ++ gnocchi/storage/incoming/redis.py | 11 +++++------ gnocchi/storage/redis.py | 4 +--- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/gnocchi/storage/common/redis.py b/gnocchi/storage/common/redis.py index 7986e25c..54a5a350 100644 --- a/gnocchi/storage/common/redis.py +++ b/gnocchi/storage/common/redis.py @@ -28,6 +28,8 @@ except ImportError: sentinel = None +SEP = ':' + CLIENT_ARGS = frozenset([ 'db', 'encoding', diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py index 08c8c737..258f14cb 100644 --- a/gnocchi/storage/incoming/redis.py +++ b/gnocchi/storage/incoming/redis.py @@ -15,7 +15,6 @@ # under the License. import collections import contextlib -import os import six @@ -32,25 +31,25 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): self._client = redis.get_client(conf) def _build_measure_path(self, metric_id): - return os.path.join(self.STORAGE_PREFIX, six.text_type(metric_id)) + return redis.SEP.join([self.STORAGE_PREFIX, six.text_type(metric_id)]) def _store_new_measures(self, metric, data): path = self._build_measure_path(metric.id) self._client.rpush(path.encode("utf8"), data) def _build_report(self, details): - match = os.path.join(self.STORAGE_PREFIX, "*").encode('utf8') + match = redis.SEP.join([self.STORAGE_PREFIX, "*"]).encode('utf8') metric_details = collections.defaultdict(int) for key in self._client.scan_iter(match=match, count=1000): - metric = key.decode('utf8').split(os.path.sep)[1] + metric = key.decode('utf8').split(redis.SEP)[1] metric_details[metric] = self._client.llen(key) return (len(metric_details.keys()), sum(metric_details.values()), metric_details if details else None) def list_metric_with_measures_to_process(self, size, part, full=False): - match = os.path.join(self.STORAGE_PREFIX, "*").encode('utf8') + match = redis.SEP.join([self.STORAGE_PREFIX, "*"]).encode('utf8') keys = self._client.scan_iter(match=match, count=1000) - measures = set([k.decode('utf8').split(os.path.sep)[1] for k in keys]) + measures = set([k.decode('utf8').split(redis.SEP)[1] for k in keys]) if full: return measures return set(list(measures)[size * part:size * (part + 1)]) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 418cf98d..d8de7380 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -13,8 +13,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import os - from oslo_config import cfg from gnocchi import storage @@ -40,7 +38,7 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): self._client = redis.get_client(conf) def _metric_key(self, metric): - return os.path.join(self.STORAGE_PREFIX, str(metric.id)) + return redis.SEP.join([self.STORAGE_PREFIX, str(metric.id)]) @staticmethod def _unaggregated_field(version=3): -- GitLab From cd9551b963c171a84b85175a15b79dac5c726afd Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 22 Mar 2017 13:05:39 +0100 Subject: [PATCH 0668/1483] tempest: don't use Ceilometer's resource type When we run tempest and ceilometer is deployed, this tests will fail because image and instance type already exists. This change suffixes the resource types with -like to avoid the conflict. Change-Id: Ibd50a4e205550dbd144457aadf1bb45f12cb51cc --- .../gabbits/search-resource.yaml | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/gnocchi/tests/functional_live/gabbits/search-resource.yaml b/gnocchi/tests/functional_live/gabbits/search-resource.yaml index 1242e19a..a11c9879 100644 --- a/gnocchi/tests/functional_live/gabbits/search-resource.yaml +++ b/gnocchi/tests/functional_live/gabbits/search-resource.yaml @@ -25,13 +25,13 @@ tests: # Setup resource types if don't exist # - - name: create new resource type 'instance' + - name: create new resource type 'instance-like' POST: /v1/resource_type status: 201 request_headers: content-type: application/json data: - name: instance + name: instance-like attributes: display_name: type: string @@ -49,13 +49,13 @@ tests: type: string required: False - - name: create new resource type 'image' + - name: create new resource type 'image-like' POST: /v1/resource_type status: 201 request_headers: content-type: application/json data: - name: image + name: image-like attributes: name: type: string @@ -70,8 +70,8 @@ tests: # # Setup test resources # - - name: helper. create instance resource-1 - POST: /v1/resource/instance + - name: helper. create instance-like resource-1 + POST: /v1/resource/instance-like request_headers: content-type: application/json data: @@ -84,8 +84,8 @@ tests: project_id: c9a5f184-c0d0-4daa-83c3-af6fdc0879e6 status: 201 - - name: helper. create instance resource-2 - POST: /v1/resource/instance + - name: helper. create instance-like resource-2 + POST: /v1/resource/instance-like request_headers: content-type: application/json data: @@ -98,8 +98,8 @@ tests: project_id: c9a5f184-c0d0-4daa-83c3-af6fdc0879e6 status: 201 - - name: helper. create instance resource-3 - POST: /v1/resource/instance + - name: helper. create instance-like resource-3 + POST: /v1/resource/instance-like request_headers: content-type: application/json data: @@ -112,8 +112,8 @@ tests: project_id: 40eba01c-b348-49b8-803f-67123251a00a status: 201 - - name: helper. create image resource-1 - POST: /v1/resource/image + - name: helper. create image-like resource-1 + POST: /v1/resource/image-like request_headers: content-type: application/json data: @@ -141,12 +141,12 @@ tests: response_json_paths: $.`len`: 2 response_json_paths: - $.[0].type: instance - $.[1].type: image + $.[0].type: instance-like + $.[1].type: image-like $.[0].id: c442a47c-eb33-46ce-9665-f3aa0bef54e7 $.[1].id: 7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d - - name: search for all resources of instance type create by specific user_id + - name: search for all resources of instance-like type create by specific user_id desc: all instances created by a specified user POST: /v1/search/resource/generic request_headers: @@ -154,7 +154,7 @@ tests: data: and: - =: - type: instance + type: instance-like - =: user_id: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07 status: 200 @@ -166,8 +166,8 @@ tests: response_json_paths: $.[0].id: a64ca14f-bc7c-45b0-aa85-42cd2179e1e2 $.[1].id: 7ccccfa0-92ce-4225-80ca-3ac9cb122d6a - $.[0].type: instance - $.[1].type: instance + $.[0].type: instance-like + $.[1].type: instance-like $.[0].metrics.`len`: 0 $.[1].metrics.`len`: 0 @@ -185,7 +185,7 @@ tests: - name: search for intances on a specific compute using "like" keyword desc: search for vms hosted on a specific compute node - POST: /v1/search/resource/instance + POST: /v1/search/resource/instance-like request_headers: content-type: application/json data: @@ -203,7 +203,7 @@ tests: - name: search for instances using complex search with "like" keyword and user_id desc: search for vms of specified user hosted on a specific compute node - POST: /v1/search/resource/instance + POST: /v1/search/resource/instance-like request_headers: content-type: application/json data: @@ -219,8 +219,8 @@ tests: - '"display_name": "vm-gabbi-2"' - '"project_id": "c9a5f184-c0d0-4daa-83c3-af6fdc0879e6"' - - name: search for resources of instance or image type with specific user_id - desc: search for all image or instance resources created by a specific user + - name: search for resources of instance-like or image-like type with specific user_id + desc: search for all image-like or instance-like resources created by a specific user POST: /v1/search/resource/generic request_headers: content-type: application/json @@ -231,16 +231,16 @@ tests: - or: - =: - type: instance + type: instance-like - =: - type: image + type: image-like status: 200 response_json_paths: $.`len`: 2 response_strings: - - '"type": "image"' - - '"type": "instance"' + - '"type": "image-like"' + - '"type": "instance-like"' - '"id": "7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d"' - '"id": "c442a47c-eb33-46ce-9665-f3aa0bef54e7"' @@ -248,27 +248,27 @@ tests: # Tear down resources # - - name: helper. delete instance resource-1 - DELETE: /v1/resource/instance/a64ca14f-bc7c-45b0-aa85-42cd2179e1e2 + - name: helper. delete instance-like resource-1 + DELETE: /v1/resource/instance-like/a64ca14f-bc7c-45b0-aa85-42cd2179e1e2 status: 204 - - name: helper. delete instance resource-2 - DELETE: /v1/resource/instance/7ccccfa0-92ce-4225-80ca-3ac9cb122d6a + - name: helper. delete instance-like resource-2 + DELETE: /v1/resource/instance-like/7ccccfa0-92ce-4225-80ca-3ac9cb122d6a status: 204 - - name: helper. delete instance resource-3 - DELETE: /v1/resource/instance/c442a47c-eb33-46ce-9665-f3aa0bef54e7 + - name: helper. delete instance-like resource-3 + DELETE: /v1/resource/instance-like/c442a47c-eb33-46ce-9665-f3aa0bef54e7 status: 204 - - name: helper. delete image resource - DELETE: /v1/resource/image/7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d + - name: helper. delete image-like resource + DELETE: /v1/resource/image-like/7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d status: 204 - - name: helper. delete resource-type instance - DELETE: /v1/resource_type/instance + - name: helper. delete resource-type instance-like + DELETE: /v1/resource_type/instance-like status: 204 - - name: helper. delete resource-type image - DELETE: /v1/resource_type/image + - name: helper. delete resource-type image-like + DELETE: /v1/resource_type/image-like status: 204 -- GitLab From 3841edcb1083e7bc8e514a0548d252ae22fd6e76 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 21 Mar 2017 20:45:53 +0000 Subject: [PATCH 0669/1483] redis: drop encoding key is always 'incoming:uuid' and according to the internet it is default encoded to utf8 already[1]. we do need to decode the output but only for keys/fields because our values are not utf8 strings but some lz4 compressed binary so we can't blindly decode using decode_responses. [1] http://stackoverflow.com/questions/35709765/encapsulating-unicode-from-redis Change-Id: I4695c0be6a34076f086374e088f5e0492b2d73bc --- gnocchi/storage/incoming/redis.py | 6 +++--- gnocchi/storage/redis.py | 27 ++++++++++++--------------- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py index 258f14cb..32908aca 100644 --- a/gnocchi/storage/incoming/redis.py +++ b/gnocchi/storage/incoming/redis.py @@ -35,10 +35,10 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): def _store_new_measures(self, metric, data): path = self._build_measure_path(metric.id) - self._client.rpush(path.encode("utf8"), data) + self._client.rpush(path, data) def _build_report(self, details): - match = redis.SEP.join([self.STORAGE_PREFIX, "*"]).encode('utf8') + match = redis.SEP.join([self.STORAGE_PREFIX, "*"]) metric_details = collections.defaultdict(int) for key in self._client.scan_iter(match=match, count=1000): metric = key.decode('utf8').split(redis.SEP)[1] @@ -47,7 +47,7 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): metric_details if details else None) def list_metric_with_measures_to_process(self, size, part, full=False): - match = redis.SEP.join([self.STORAGE_PREFIX, "*"]).encode('utf8') + match = redis.SEP.join([self.STORAGE_PREFIX, "*"]) keys = self._client.scan_iter(match=match, count=1000) measures = set([k.decode('utf8').split(redis.SEP)[1] for k in keys]) if full: diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index d8de7380..fe9fbc42 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -52,28 +52,25 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): return path + '_v%s' % version if version else path def _create_metric(self, metric): - key = self._metric_key(metric).encode("utf8") + key = self._metric_key(metric) if self._client.exists(key): raise storage.MetricAlreadyExists(metric) - self._client.hset( - key, self._unaggregated_field().encode("utf8"), None) + self._client.hset(key, self._unaggregated_field(), None) def _store_unaggregated_timeserie(self, metric, data, version=3): - self._client.hset(self._metric_key(metric).encode("utf8"), - self._unaggregated_field(version).encode("utf8"), - data) + self._client.hset(self._metric_key(metric), + self._unaggregated_field(version), data) def _get_unaggregated_timeserie(self, metric, version=3): - data = self._client.hget( - self._metric_key(metric).encode("utf8"), - self._unaggregated_field(version).encode("utf8")) + data = self._client.hget(self._metric_key(metric), + self._unaggregated_field(version)) if data is None: raise storage.MetricDoesNotExist(metric) return data def _list_split_keys_for_metric(self, metric, aggregation, granularity, version=3): - key = self._metric_key(metric).encode("utf8") + key = self._metric_key(metric) if not self._client.exists(key): raise storage.MetricDoesNotExist(metric) split_keys = set() @@ -90,17 +87,17 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): key = self._metric_key(metric) field = self._aggregated_field_for_split( aggregation, timestamp_key, granularity, version) - self._client.hdel(key.encode("utf8"), field.encode("utf8")) + self._client.hdel(key, field) def _store_metric_measures(self, metric, timestamp_key, aggregation, granularity, data, offset=None, version=3): key = self._metric_key(metric) field = self._aggregated_field_for_split( aggregation, timestamp_key, granularity, version) - self._client.hset(key.encode("utf8"), field.encode("utf8"), data) + self._client.hset(key, field, data) def _delete_metric(self, metric): - self._client.delete(self._metric_key(metric).encode("utf8")) + self._client.delete(self._metric_key(metric)) # Carbonara API @@ -109,9 +106,9 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): key = self._metric_key(metric) field = self._aggregated_field_for_split( aggregation, timestamp_key, granularity, version) - data = self._client.hget(key.encode("utf8"), field.encode("utf8")) + data = self._client.hget(key, field) if data is None: - if not self._client.exists(key.encode("utf8")): + if not self._client.exists(key): raise storage.MetricDoesNotExist(metric) raise storage.AggregationDoesNotExist(metric, aggregation) return data -- GitLab From 2adf032e35a474ea33c510874e4fcf86d20361ee Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 17 Mar 2017 16:34:58 +0000 Subject: [PATCH 0670/1483] check unprocessed measures cleared on delete - validate that the unprocessed measures are gone and not just ignored - we use delete_metric against indexer as it mimics normal workflow Change-Id: Id87062ef4b277f56bd257dd28a37d4326028906f --- gnocchi/tests/test_storage.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 95b1f47c..21fbe248 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -107,8 +107,13 @@ class TestStorageDriver(tests_base.TestCase): self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) - self.storage.delete_metric(self.metric, sync=True) + self.index.delete_metric(self.metric.id) self.trigger_processing() + __, __, details = self.storage.incoming._build_report(True) + self.assertIn(str(self.metric.id), details) + self.storage.expunge_metrics(self.index, sync=True) + __, __, details = self.storage.incoming._build_report(True) + self.assertNotIn(str(self.metric.id), details) def test_delete_expunge_metric(self): self.storage.incoming.add_measures(self.metric, [ -- GitLab From fa030072b3a711a3ce879740fd815121deee19d8 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 28 Mar 2017 15:57:59 +0200 Subject: [PATCH 0671/1483] devstack: remove verbose option New oslo.utils have removed the deprecated since 2 years verbose option and we still use it :p Change-Id: I7eb6fae80539809dea6f90c5c8e71b74f6173d98 --- devstack/plugin.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 9add5828..f6e5a6dc 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -417,8 +417,8 @@ function start_gnocchi { fi # run metricd last so we are properly waiting for swift and friends - run_process gnocchi-metricd "$GNOCCHI_BIN_DIR/gnocchi-metricd -d -v --config-file $GNOCCHI_CONF" - run_process gnocchi-statsd "$GNOCCHI_BIN_DIR/gnocchi-statsd -d -v --config-file $GNOCCHI_CONF" + run_process gnocchi-metricd "$GNOCCHI_BIN_DIR/gnocchi-metricd -d --config-file $GNOCCHI_CONF" + run_process gnocchi-statsd "$GNOCCHI_BIN_DIR/gnocchi-statsd -d --config-file $GNOCCHI_CONF" } # stop_gnocchi() - Stop running processes -- GitLab From 4ac9d53383b0db8fd07f8073df63d61334e22cd6 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 27 Mar 2017 19:54:57 +0000 Subject: [PATCH 0672/1483] don't raise error if unaggregated empty new lz4 library doesn't like handling empty binary. if we kill agent during computation of aggregates, the unaggregated object might have been created (in ceph/redis case) but it may not have saved unaggregated measures leaving the object blank. this patch returns None and let's workflow proceed as if new if object is empty since in scenario above, the original raw measures will not have been cleared from unprocessed so they will still be processed again. also, fixes redis issue where passing in None makes the redis actually store 'None'. Change-Id: I358e50ccadff721348630688c47544db6553e96b Closes-Bug: #1676519 --- gnocchi/storage/_carbonara.py | 2 ++ gnocchi/storage/redis.py | 2 +- gnocchi/tests/test_storage.py | 20 ++++++++++++++++++++ 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index d59032da..e53f1f0a 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -96,6 +96,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): self._get_unaggregated_timeserie( metric) ) + if not raw_measures: + return LOG.debug( "Retrieve unaggregated measures " "for %s in %.2fs", diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index fe9fbc42..fc2c63ad 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -55,7 +55,7 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): key = self._metric_key(metric) if self._client.exists(key): raise storage.MetricAlreadyExists(metric) - self._client.hset(key, self._unaggregated_field(), None) + self._client.hset(key, self._unaggregated_field(), '') def _store_unaggregated_timeserie(self, metric, data, version=3): self._client.hset(self._metric_key(metric), diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 95b1f47c..b0c4960b 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -76,6 +76,26 @@ class TestStorageDriver(tests_base.TestCase): self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 3600.0, 1), m) self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), m) + def test_aborted_initial_processing(self): + self.storage.incoming.add_measures(self.metric, [ + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 5), + ]) + with mock.patch.object(self.storage, '_store_unaggregated_timeserie', + side_effect=Exception): + try: + self.trigger_processing() + except Exception: + pass + + with mock.patch('gnocchi.storage._carbonara.LOG') as LOG: + self.trigger_processing() + self.assertFalse(LOG.error.called) + + m = self.storage.get_measures(self.metric) + self.assertIn((utils.datetime_utc(2014, 1, 1), 86400.0, 5.0), m) + self.assertIn((utils.datetime_utc(2014, 1, 1, 12), 3600.0, 5.0), m) + self.assertIn((utils.datetime_utc(2014, 1, 1, 12), 300.0, 5.0), m) + def test_list_metric_with_measures_to_process(self): metrics = self.storage.incoming.list_metric_with_measures_to_process( None, None, full=True) -- GitLab From aa0716be945352e2d9ac65a8861916b9e634d1f0 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 2 Feb 2017 09:10:46 +0100 Subject: [PATCH 0673/1483] tests: functional with tox+pifpaf Change-Id: I7d609a778c7c8592a3c7847e62b832d3805a71f4 --- bindep.txt | 1 + devstack/gate/post_test_hook.sh | 1 + gnocchi/tempest/scenario/__init__.py | 1 + .../tests/functional_live/gabbits/live.yaml | 13 +++-- .../gabbits/search-resource.yaml | 1 + run-func-tests.sh | 55 +++++++++++++++++++ setup.cfg | 4 +- tox.ini | 16 +++++- 8 files changed, 84 insertions(+), 8 deletions(-) create mode 100755 run-func-tests.sh diff --git a/bindep.txt b/bindep.txt index 50e6e0ca..9d9b91a5 100644 --- a/bindep.txt +++ b/bindep.txt @@ -7,3 +7,4 @@ libffi-dev [platform:dpkg] librados-dev [platform:dpkg] ceph [platform:dpkg] redis-server [platform:dpkg] +liberasurecode-dev [platform:dpkg] diff --git a/devstack/gate/post_test_hook.sh b/devstack/gate/post_test_hook.sh index 948e60ca..91e56c56 100755 --- a/devstack/gate/post_test_hook.sh +++ b/devstack/gate/post_test_hook.sh @@ -40,6 +40,7 @@ openstack catalog list export GNOCCHI_SERVICE_TOKEN=$(openstack token issue -c id -f value) export GNOCCHI_SERVICE_URL=$(openstack catalog show metric -c endpoints -f value | awk '/public/{print $2}') +export GNOCCHI_AUTHORIZATION="" # Temporary set to transition to the new functional testing curl -X GET ${GNOCCHI_SERVICE_URL}/v1/archive_policy -H "Content-Type: application/json" diff --git a/gnocchi/tempest/scenario/__init__.py b/gnocchi/tempest/scenario/__init__.py index 3e011b88..43ec4742 100644 --- a/gnocchi/tempest/scenario/__init__.py +++ b/gnocchi/tempest/scenario/__init__.py @@ -62,6 +62,7 @@ class GnocchiGabbiTest(tempest.test.BaseTestCase): require_ssl=require_ssl) os.environ["GNOCCHI_SERVICE_TOKEN"] = token + os.environ["GNOCCHI_AUTHORIZATION"] = "not used" @classmethod def clear_credentials(cls): diff --git a/gnocchi/tests/functional_live/gabbits/live.yaml b/gnocchi/tests/functional_live/gabbits/live.yaml index b102da2a..8f568570 100644 --- a/gnocchi/tests/functional_live/gabbits/live.yaml +++ b/gnocchi/tests/functional_live/gabbits/live.yaml @@ -6,6 +6,7 @@ defaults: request_headers: x-auth-token: $ENVIRON['GNOCCHI_SERVICE_TOKEN'] + authorization: $ENVIRON['GNOCCHI_AUTHORIZATION'] tests: - name: check / @@ -34,6 +35,7 @@ tests: request_headers: content-type: application/json x-auth-token: 'hello' + authorization: 'basic hello:' data: name: medium definition: @@ -145,8 +147,8 @@ tests: $.definition[2].granularity: "0:01:00" $.definition[2].points: 5 $.definition[2].timespan: "0:05:00" - response_strings: - - '"aggregation_methods": ["max", "min", "mean"]' + response_json_paths: + $.aggregation_methods.`sorted`: ["max", "mean", "min"] - name: get wrong accept desc: invalid 'accept' header @@ -300,6 +302,7 @@ tests: request_headers: content-type: application/json x-auth-token: 'hello' + authorization: 'basic hello:' data: name: test_rule metric_pattern: "disk.foo.*" @@ -329,8 +332,10 @@ tests: - name: get all archive policy rules GET: /v1/archive_policy_rule status: 200 - response_strings: - - '"metric_pattern": "live.*", "archive_policy_name": "gabbilive", "name": "gabbilive_rule"' + response_json_paths: + $[\name][0].name: "gabbilive_rule" + $[\name][0].metric_pattern: "live.*" + $[\name][0].archive_policy_name: "gabbilive" - name: get unknown archive policy rule GET: /v1/archive_policy_rule/foo diff --git a/gnocchi/tests/functional_live/gabbits/search-resource.yaml b/gnocchi/tests/functional_live/gabbits/search-resource.yaml index a11c9879..fe254788 100644 --- a/gnocchi/tests/functional_live/gabbits/search-resource.yaml +++ b/gnocchi/tests/functional_live/gabbits/search-resource.yaml @@ -19,6 +19,7 @@ defaults: request_headers: x-auth-token: $ENVIRON['GNOCCHI_SERVICE_TOKEN'] + authorization: $ENVIRON['GNOCCHI_AUTHORIZATION'] tests: # diff --git a/run-func-tests.sh b/run-func-tests.sh new file mode 100755 index 00000000..4d702208 --- /dev/null +++ b/run-func-tests.sh @@ -0,0 +1,55 @@ +#!/bin/bash -x +set -e + +cleanup(){ + type -t gnocchi_stop >/dev/null && gnocchi_stop || true + type -t indexer_stop >/dev/null && indexer_stop || true + type -t storage_stop >/dev/null && storage_stop || true +} +trap cleanup EXIT + +GNOCCHI_TEST_STORAGE_DRIVERS=${GNOCCHI_TEST_STORAGE_DRIVERS:-file} +GNOCCHI_TEST_INDEXER_DRIVERS=${GNOCCHI_TEST_INDEXER_DRIVERS:-postgresql} +for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS}; do + for indexer in ${GNOCCHI_TEST_INDEXER_DRIVERS}; do + case $storage in + ceph) + eval $(pifpaf -e STORAGE run ceph) + rados -c $STORAGE_CEPH_CONF mkpool gnocchi + STORAGE_URL=ceph://$STORAGE_CEPH_CONF + ;; + s3) + if ! which s3rver >/dev/null 2>&1 + then + mkdir -p npm-s3rver + export NPM_CONFIG_PREFIX=npm-s3rver + npm install s3rver --global + export PATH=$PWD/npm-s3rver/bin:$PATH + fi + eval $(pifpaf -e STORAGE run s3rver) + ;; + file) + STORAGE_URL=file:// + ;; + + swift|redis) + eval $(pifpaf -e STORAGE run $storage) + ;; + *) + echo "Unsupported storage backend by functional tests: $storage" + exit 1 + ;; + esac + + eval $(pifpaf -e INDEXER run $indexer) + eval $(pifpaf -e GNOCCHI run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) + + export GNOCCHI_SERVICE_URL=$GNOCCHI_ENDPOINT + export GNOCCHI_SERVICE_TOKEN="" # Just make gabbi happy + export GNOCCHI_AUTHORIZATION="basic YWRtaW46" # admin in base64 + export OS_TEST_PATH=gnocchi/tests/functional_live + ./tools/pretty_tox.sh $* + + cleanup + done +done diff --git a/setup.cfg b/setup.cfg index 0e2e1c43..45dbbeb4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -66,7 +66,7 @@ doc = Jinja2 reno>=1.6.2 test = - pifpaf>=0.12.0 + pifpaf>=0.25.0 gabbi>=1.30.0 coverage>=3.6 fixtures @@ -83,6 +83,8 @@ test = tooz>=1.38 keystonemiddleware>=4.0.0 wsgi_intercept>=1.4.1 +test-swift = + python-swiftclient [global] setup-hooks = diff --git a/tox.ini b/tox.ini index f2d2f320..d7af9206 100644 --- a/tox.ini +++ b/tox.ini @@ -19,20 +19,30 @@ setenv = postgresql: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql: GNOCCHI_TEST_INDEXER_DRIVERS=mysql - GNOCCHI_STORAGE_DEPS=file,swift,s3,ceph,ceph_recommended_lib,redis + GNOCCHI_STORAGE_DEPS=file,swift,test-swift,s3,ceph,ceph_recommended_lib,redis ceph: GNOCCHI_STORAGE_DEPS=ceph,ceph_recommended_lib - swift: GNOCCHI_STORAGE_DEPS=swift + swift: GNOCCHI_STORAGE_DEPS=swift,test-swift file: GNOCCHI_STORAGE_DEPS=file redis: GNOCCHI_STORAGE_DEPS=redis s3: GNOCCHI_STORAGE_DEPS=s3 + + # FIXME(sileht): pbr doesn't support url in setup.cfg extras, so we do this crap + GNOCCHI_TEST_TARBALLS=http://tarballs.openstack.org/swift/swift-master.tar.gz#egg=swift + ceph: GNOCCHI_TEST_TARBALLS= + swift: GNOCCHI_TEST_TARBALLS=http://tarballs.openstack.org/swift/swift-master.tar.gz#egg=swift + s3: GNOCCHI_TEST_TARBALLS= + redis: GNOCCHI_TEST_TARBALLS= + file: GNOCCHI_TEST_TARBALLS= deps = .[test] postgresql: .[postgresql,{env:GNOCCHI_STORAGE_DEPS}] mysql: .[mysql,{env:GNOCCHI_STORAGE_DEPS}] + {env:GNOCCHI_TEST_TARBALLS:} # NOTE(tonyb): This project has chosen to *NOT* consume upper-constraints.txt commands = doc8 --ignore-path doc/source/rest.rst doc/source gnocchi-config-generator {toxinidir}/run-tests.sh {posargs} + {toxinidir}/run-func-tests.sh {posargs} [testenv:py35-postgresql-file-upgrade-from-3.1] # We should always recreate since the script upgrade @@ -76,7 +86,7 @@ commands = flake8 [testenv:py27-gate] setenv = OS_TEST_PATH=gnocchi/tests/functional_live GABBI_LIVE=1 -passenv = {[testenv]passenv} GNOCCHI_SERVICE* +passenv = {[testenv]passenv} GNOCCHI_SERVICE* GNOCCHI_AUTHORIZATION sitepackages = True basepython = python2.7 commands = {toxinidir}/tools/pretty_tox.sh '{posargs}' -- GitLab From 868493e48622e954746d8d267f2935a2627e6655 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 28 Mar 2017 20:54:53 +0000 Subject: [PATCH 0674/1483] search to oslo.config locations for api-paste this is consistent with previous behaviour. look at oslo.config directories first and if nothing found, default to our internal version Change-Id: I8510abc4705b0f9e2b4b25ede3b683b909493606 --- gnocchi/opts.py | 6 +----- gnocchi/rest/app.py | 5 +++-- gnocchi/tests/base.py | 5 +++++ gnocchi/tests/functional/fixtures.py | 5 +++++ 4 files changed, 14 insertions(+), 7 deletions(-) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index f8baeffd..e28a3157 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -14,7 +14,6 @@ import copy import itertools import operator -import os import pkg_resources import uuid @@ -89,10 +88,7 @@ def list_opts(): )), ("api", ( cfg.StrOpt('paste_config', - default=os.path.abspath( - os.path.join( - os.path.dirname(__file__), - "rest", "api-paste.ini")), + default="api-paste.ini", help='Path to API Paste configuration.'), cfg.StrOpt('auth_mode', default="basic", diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index e5ee7c14..02022bd9 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -14,10 +14,10 @@ # License for the specific language governing permissions and limitations # under the License. import os +import pkg_resources import uuid import warnings -from oslo_config import cfg from oslo_log import log from oslo_middleware import cors from oslo_policy import policy @@ -100,7 +100,8 @@ def load_app(conf, indexer=None, storage=None, cfg_path = conf.find_file(cfg_path) if cfg_path is None or not os.path.exists(cfg_path): - raise cfg.ConfigFilesNotFoundError([conf.api.paste_config]) + LOG.debug("No api-paste configuration file found! Using default.") + cfg_path = pkg_resources.resource_filename(__name__, "api-paste.ini") config = dict(conf=conf, indexer=indexer, storage=storage, not_implemented_middleware=not_implemented_middleware) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 455a76a2..8477345f 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -244,6 +244,11 @@ class TestCase(base.BaseTestCase): self.conf = service.prepare_service([], default_config_files=[]) + py_root = os.path.abspath(os.path.join(os.path.dirname(__file__), + '..',)) + self.conf.set_override('paste_config', + os.path.join(py_root, 'rest', 'api-paste.ini'), + group="api") # NOTE(jd) This allows to test S3 on AWS if not os.getenv("AWS_ACCESS_KEY_ID"): diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 226ef724..7af6203f 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -83,6 +83,11 @@ class ConfigFixture(fixture.GabbiFixture): dcf = [] conf = service.prepare_service([], default_config_files=dcf) + py_root = os.path.abspath(os.path.join(os.path.dirname(__file__), + '..', '..',)) + conf.set_override('paste_config', + os.path.join(py_root, 'rest', 'api-paste.ini'), + group="api") # NOTE(sileht): This is not concurrency safe, but only this tests file # deal with cors, so we are fine. set_override don't work because cors -- GitLab From 8e3d0dab7644deade7e417fcc70a4b19773c8019 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 28 Mar 2017 22:27:12 +0000 Subject: [PATCH 0675/1483] load policy from oslo.config locations detect policy in following order: - conf option path - conf option filename + oslo.config locations - internal policy.json Change-Id: I1a8787015ccb6471f0021ed2e12eb7a127defcf7 --- gnocchi/service.py | 13 +++++++++---- gnocchi/tests/base.py | 3 +++ gnocchi/tests/functional/fixtures.py | 3 +++ 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/gnocchi/service.py b/gnocchi/service.py index 069b0bc7..c71681e5 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -38,10 +38,7 @@ def prepare_service(args=None, conf=None, # FIXME(jd) Use the pkg_entry info to register the options of these libs log.register_options(conf) db_options.set_defaults(conf) - policy_opts.set_defaults(conf, policy_file=os.path.abspath( - os.path.join( - os.path.dirname(__file__), - "rest", "policy.json"))) + policy_opts.set_defaults(conf) # Register our own Gnocchi options for group, options in opts.list_opts(): @@ -71,6 +68,14 @@ def prepare_service(args=None, conf=None, urlparse.urlunparse(parsed), "storage") + cfg_path = conf.oslo_policy.policy_file + if not os.path.isabs(cfg_path): + cfg_path = conf.find_file(cfg_path) + if cfg_path is None or not os.path.exists(cfg_path): + cfg_path = os.path.abspath(os.path.join(os.path.dirname(__file__), + 'rest', 'policy.json')) + conf.set_default('policy_file', cfg_path, group='oslo_policy') + log.set_defaults(default_log_levels=log.get_default_log_levels() + ["passlib.utils.compat=INFO"]) log.setup(conf, 'gnocchi') diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 8477345f..2db402f1 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -249,6 +249,9 @@ class TestCase(base.BaseTestCase): self.conf.set_override('paste_config', os.path.join(py_root, 'rest', 'api-paste.ini'), group="api") + self.conf.set_override('policy_file', + os.path.join(py_root, 'rest', 'policy.json'), + group="oslo_policy") # NOTE(jd) This allows to test S3 on AWS if not os.getenv("AWS_ACCESS_KEY_ID"): diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 7af6203f..6a971221 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -88,6 +88,9 @@ class ConfigFixture(fixture.GabbiFixture): conf.set_override('paste_config', os.path.join(py_root, 'rest', 'api-paste.ini'), group="api") + conf.set_override('policy_file', + os.path.join(py_root, 'rest', 'policy.json'), + group="oslo_policy") # NOTE(sileht): This is not concurrency safe, but only this tests file # deal with cors, so we are fine. set_override don't work because cors -- GitLab From de09f3fae90fc3a5269dcf27744009ef7d4e1326 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 31 Mar 2017 11:36:47 +0200 Subject: [PATCH 0676/1483] carbonara: reduce the number of array copy By using `numpy.array.astype(dtype, copy=False)` we can limit the number of data copy done in memory for faster processing and less memory usage. At least in theory. Change-Id: I348d5a7b818f8cbfe51e5a327f2dc7a5f8f355dd --- gnocchi/carbonara.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 55b89466..1f3a4547 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -94,7 +94,7 @@ class GroupedTimeSeries(object): # we always assume the orderd to be the same as the input. freq = granularity * 10e8 self._ts = ts - self.indexes = (numpy.array(ts.index, 'float') // freq) * freq + self.indexes = (numpy.array(ts.index, numpy.float) // freq) * freq self.tstamps, self.counts = numpy.unique(self.indexes, return_counts=True) @@ -124,7 +124,7 @@ class GroupedTimeSeries(object): default=None) def _count(self): - timestamps = numpy.array(self.tstamps, 'datetime64[ns]') + timestamps = self.tstamps.astype('datetime64[ns]', copy=False) return (self.counts, timestamps) def count(self): @@ -163,7 +163,7 @@ class GroupedTimeSeries(object): values = method(self._ts.values, self.indexes, tstamps, *args, **kwargs) - timestamps = numpy.array(tstamps, 'datetime64[ns]') + timestamps = tstamps.astype('datetime64[ns]', copy=False) return pandas.Series(values, pandas.to_datetime(timestamps)) @@ -323,7 +323,7 @@ class BoundTimeSerie(TimeSerie): :nb_points*cls._SERIALIZATION_TIMESTAMP_LEN] timestamps = numpy.frombuffer(timestamps_raw, dtype=' Date: Fri, 31 Mar 2017 15:25:28 +0200 Subject: [PATCH 0677/1483] docs: default to master This defaults the doc to master. And adds banner to point to last stable. Change-Id: I9d0554ee561a4d417631a792eed2fb47e637ae6f --- doc/source/conf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 42111e7a..57fe29e4 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -181,7 +181,8 @@ htmlhelp_basename = 'gnocchidoc' # Multiversion docs scv_sort = ('semver',) -scv_greatest_tag = True +scv_show_banner = True +scv_banner_recent_tag = True scv_priority = 'branches' scv_whitelist_branches = ('master', '^stable/(2\.1|2\.2|[3-9]\.)') scv_whitelist_tags = ("^[2-9]\.",) -- GitLab From eb232de0e7988e3594280d640cc4fea40c218ebf Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 20 Mar 2017 12:16:38 +0100 Subject: [PATCH 0678/1483] tests: Add travis configuration Change-Id: I93b6a598ffd424db89c4bf5374a61a361428de60 --- .travis.yml | 44 ++++++++++++++++++++++++++++++++ tools/travis-ci-setup.dockerfile | 36 ++++++++++++++++++++++++++ tox.ini | 2 ++ 3 files changed, 82 insertions(+) create mode 100644 .travis.yml create mode 100644 tools/travis-ci-setup.dockerfile diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..72b03e19 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,44 @@ +language: python +sudo: required + +services: + - docker + +cache: + directories: + - ~/.cache/pip +env: + - TARGET: bashate + - TARGET: pep8 + - TARGET: docs + - TARGET: docs-gnocchi.xyz + + - TARGET: py27-mysql-ceph-upgrade-from-3.1 + - TARGET: py35-postgresql-file-upgrade-from-3.1 + + - TARGET: py27-mysql + - TARGET: py35-mysql + - TARGET: py27-postgresql + - TARGET: py35-postgresql + +before_script: +# Travis We need to fetch all tags/branches for documentation target + - case $TARGET in + docs*) + git fetch origin $(git ls-remote -q | sed -n '/refs\/heads/s,.*refs/heads\(.*\),:remotes/origin\1,gp') ; + git fetch --tags ; + git fetch --unshallow ; + ;; + esac + + - docker build --tag gnocchi-ci --file=tools/travis-ci-setup.dockerfile . +script: + - docker run -v ~/.cache/pip:/home/tester/.cache/pip -v $(pwd):/home/tester/src gnocchi-ci tox -e ${TARGET} + +notifications: + email: false + irc: + on_success: change + on_failure: always + channels: + - "irc.freenode.org#gnocchi" diff --git a/tools/travis-ci-setup.dockerfile b/tools/travis-ci-setup.dockerfile new file mode 100644 index 00000000..784c14c8 --- /dev/null +++ b/tools/travis-ci-setup.dockerfile @@ -0,0 +1,36 @@ +FROM ubuntu:16.04 +ENV GNOCCHI_SRC /home/tester/src +ENV DEBIAN_FRONTEND noninteractive + +#NOTE(sileht): really no utf-8 in 2017 !? +ENV LANG en_US.UTF-8 +RUN update-locale +RUN locale-gen $LANG + +RUN apt-get update -y && apt-get install -qy \ + git \ + wget \ + nodejs \ + nodejs-legacy \ + npm \ + python \ + python3 \ + python-dev \ + python3-dev \ + python-tox \ + redis-server \ + build-essential \ + libffi-dev \ + libpq-dev \ + postgresql \ + mysql-client \ + mysql-server \ + librados-dev \ + ceph \ + && apt-get clean -y + +RUN useradd -ms /bin/bash tester +RUN mkdir $GNOCCHI_SRC +RUN chown -R tester: $GNOCCHI_SRC +USER tester +WORKDIR $GNOCCHI_SRC diff --git a/tox.ini b/tox.ini index d7af9206..9518c349 100644 --- a/tox.ini +++ b/tox.ini @@ -116,6 +116,7 @@ deps = .[mysql,postgresql,test,file,ceph,swift,s3] commands = gnocchi-config-generator [testenv:docs] +basepython = python2.7 # This does not work, see: https://bitbucket.org/hpk42/tox/issues/302 # deps = {[testenv]deps} # .[doc] @@ -126,6 +127,7 @@ commands = doc8 --ignore-path doc/source/rest.rst doc/source pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx [testenv:docs-gnocchi.xyz] +basepython = python2.7 deps = .[file,postgresql,test,doc] sphinx_rtd_theme sphinxcontrib-versioning -- GitLab From 040239629523409e38f664990e177673f0590ded Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 31 Mar 2017 18:18:46 +0000 Subject: [PATCH 0679/1483] cleanup empty test folder we don't have anything in tests/storage Change-Id: I0a5a0889d0ab66966c5dd097f7f35f5bf9f8047f --- gnocchi/tests/storage/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 gnocchi/tests/storage/__init__.py diff --git a/gnocchi/tests/storage/__init__.py b/gnocchi/tests/storage/__init__.py deleted file mode 100644 index e69de29b..00000000 -- GitLab From 3c224fc179ba548e72274ba9653d8a6a18639586 Mon Sep 17 00:00:00 2001 From: liyi Date: Fri, 7 Apr 2017 16:09:31 +0800 Subject: [PATCH 0680/1483] default_aggregation_methods configuration worked in gnocchi-upgrade We need set the default_aggregation_methods when config file is loaded. Change-Id: Id25372eb7faf206fe9b526205967d6f14f0b3373 Closes-Bug: #1680740 --- gnocchi/service.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/gnocchi/service.py b/gnocchi/service.py index 069b0bc7..1744f02b 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -48,17 +48,17 @@ def prepare_service(args=None, conf=None, conf.register_opts(list(options), group=None if group == "DEFAULT" else group) - # HACK(jd) I'm not happy about that, fix AP class to handle a conf object? - archive_policy.ArchivePolicy.DEFAULT_AGGREGATION_METHODS = ( - conf.archive_policy.default_aggregation_methods - ) - conf.set_default("workers", utils.get_default_workers(), group="metricd") conf(args, project='gnocchi', validate_default_values=True, default_config_files=default_config_files, version=pbr.version.VersionInfo('gnocchi').version_string()) + # HACK(jd) I'm not happy about that, fix AP class to handle a conf object? + archive_policy.ArchivePolicy.DEFAULT_AGGREGATION_METHODS = ( + conf.archive_policy.default_aggregation_methods + ) + # If no coordination URL is provided, default to using the indexer as # coordinator if conf.storage.coordination_url is None: -- GitLab From ee81521317da9f7202798baa04c75a662dd69f14 Mon Sep 17 00:00:00 2001 From: liyi Date: Fri, 7 Apr 2017 15:57:46 +0800 Subject: [PATCH 0681/1483] Modify wrong revision. revision in 0735ed97e5b3_add_tablename_to_resource_type.py is not correct. Change-Id: Idf900f094591ae97496e6214f75089b33fe4fff2 --- ...e_type.py => 0718ed97e5b3_add_tablename_to_resource_type.py} | 0 .../versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename gnocchi/indexer/alembic/versions/{0735ed97e5b3_add_tablename_to_resource_type.py => 0718ed97e5b3_add_tablename_to_resource_type.py} (100%) diff --git a/gnocchi/indexer/alembic/versions/0735ed97e5b3_add_tablename_to_resource_type.py b/gnocchi/indexer/alembic/versions/0718ed97e5b3_add_tablename_to_resource_type.py similarity index 100% rename from gnocchi/indexer/alembic/versions/0735ed97e5b3_add_tablename_to_resource_type.py rename to gnocchi/indexer/alembic/versions/0718ed97e5b3_add_tablename_to_resource_type.py diff --git a/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py b/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py index 0fd3e5c6..1be98151 100644 --- a/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py +++ b/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py @@ -45,7 +45,7 @@ def upgrade(): ) # NOTE(gordc): fix for incorrect migration: - # 0735ed97e5b3_add_tablename_to_resource_type.py#L46 + # 0718ed97e5b3_add_tablename_to_resource_type.py#L46 op.execute(resource_type.update().where( resource_type.c.name == "instance_network_interface" ).values({'tablename': 'instance_net_int'})) -- GitLab From fc3e758d076ff00ee4ec56fa8a412d73bbfcca2e Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 11 Apr 2017 20:28:30 +0000 Subject: [PATCH 0682/1483] cleanup live tests - delete resource using correct resource_type - actually test results for searching on bad uuid Change-Id: I54b6b83432a682c4c61ef00c7d5b4d36997b3305 --- gnocchi/tests/functional_live/gabbits/live.yaml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/gnocchi/tests/functional_live/gabbits/live.yaml b/gnocchi/tests/functional_live/gabbits/live.yaml index 8f568570..48838885 100644 --- a/gnocchi/tests/functional_live/gabbits/live.yaml +++ b/gnocchi/tests/functional_live/gabbits/live.yaml @@ -647,19 +647,21 @@ tests: data: =: id: "cd9eef" - + status: 200 + response_json_paths: + $.`len`: 0 - name: delete myresource resource - DELETE: /v1/resource/generic/2ae35573-7f9f-4bb1-aae8-dad8dff5706e + DELETE: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e status: 204 # assert resource is really deleted - name: assert resource resource is deleted - GET: /v1/resource/generic/2ae35573-7f9f-4bb1-aae8-dad8dff5706e + GET: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e status: 404 - name: post myresource resource no data - POST: /v1/resource/generic + POST: /v1/resource/myresource request_headers: content-type: application/json status: 400 -- GitLab From 0e4cc74adc5922f23cf8e0c2099a4ff571ce9ba7 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 13 Apr 2017 08:20:47 +0200 Subject: [PATCH 0683/1483] doc: last release is the greatest tag We should not use the more recent tag, but the greatest. Currently the doc show 3.0.5 instead of 3.1.2 Change-Id: Ia7c5c57d92f406b804d4d49ebf9a7af0b87cd151 --- doc/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 57fe29e4..31c5c831 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -182,7 +182,7 @@ htmlhelp_basename = 'gnocchidoc' # Multiversion docs scv_sort = ('semver',) scv_show_banner = True -scv_banner_recent_tag = True +scv_banner_greatest_tag = True scv_priority = 'branches' scv_whitelist_branches = ('master', '^stable/(2\.1|2\.2|[3-9]\.)') scv_whitelist_tags = ("^[2-9]\.",) -- GitLab From 7edf1316b0ef138066b4b9468eb0689fad36effc Mon Sep 17 00:00:00 2001 From: lingyongxu Date: Thu, 6 Apr 2017 14:53:27 +0800 Subject: [PATCH 0684/1483] Use HostAddressOpt for opts that accept IP and hostnames Some configuration options were accepting both IP addresses and hostnames. Since there was no specific OSLO opt type to support this, we were using ``StrOpt``. The change [1] that added support for ``HostAddressOpt`` type was merged in Ocata and became available for use with oslo version 3.22. This patch changes the opt type of configuration options to use this more relevant opt type - HostAddressOpt. [1] I77bdb64b7e6e56ce761d76696bc4448a9bd325eb Change-Id: If8cc07c293bc44440541e208fa4bdcd6c20a0f03 --- gnocchi/opts.py | 6 +++--- requirements.txt | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index f8baeffd..573b2216 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -109,9 +109,9 @@ def list_opts(): ("storage", (_STORAGE_OPTS + gnocchi.storage._carbonara.OPTS)), ("incoming", _INCOMING_OPTS), ("statsd", ( - cfg.StrOpt('host', - default='0.0.0.0', - help='The listen IP for statsd'), + cfg.HostAddressOpt('host', + default='0.0.0.0', + help='The listen IP for statsd'), cfg.PortOpt('port', default=8125, help='The port for statsd'), diff --git a/requirements.txt b/requirements.txt index f6f69b98..a5bbd784 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ pbr numpy>=1.9.0 iso8601 -oslo.config>=2.7.0 +oslo.config>=3.22.0 oslo.log>=2.3.0 oslo.policy>=0.3.0 oslo.serialization>=1.4.0 -- GitLab From 0f210a7ba66b8fcfe1aa09e2db17a2c1528b5e64 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 4 Apr 2017 19:18:48 +0000 Subject: [PATCH 0685/1483] s3: use a single bucket for aggregate storage s3 has a bucket limit so we can't just create a bucket per metric. s3 has some partitioning logic to divide a bucket based on prefix[1] so let us use the metric as a prefix of the object key. [1] https://aws.amazon.com/blogs/aws/amazon-s3-performance-tips-tricks-seattle-hiring-event/ Change-Id: If4a1ec3345d85a9937b4961b0ef61751b0a5e90d Closes-Bug: #1671925 --- gnocchi/storage/s3.py | 87 ++++++++++--------- .../s3-bucket-limit-224951bb6a81ddce.yaml | 8 ++ 2 files changed, 53 insertions(+), 42 deletions(-) create mode 100644 releasenotes/notes/s3-bucket-limit-224951bb6a81ddce.yaml diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 2b0c2fe9..f5aef4f8 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -59,41 +59,45 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): self.s3, self._region_name, self._bucket_prefix = ( s3.get_connection(conf) ) + self._bucket_name = '%s-aggregates' % self._bucket_prefix - def _bucket_name(self, metric): - return '%s-%s' % (self._bucket_prefix, str(metric.id)) + def upgrade(self, index): + super(S3Storage, self).upgrade(index) + try: + s3.create_bucket(self.s3, self._bucket_name, self._region_name) + except botocore.exceptions.ClientError as e: + if e.response['Error'].get('Code') != "BucketAlreadyExists": + raise @staticmethod def _object_name(split_key, aggregation, granularity, version=3): - name = '%s_%s_%s' % (split_key, aggregation, granularity) + name = '%s_%s_%s' % (aggregation, granularity, split_key) return name + '_v%s' % version if version else name + @staticmethod + def _prefix(metric): + return str(metric.id) + '/' + def _create_metric(self, metric): - try: - s3.create_bucket(self.s3, self._bucket_name(metric), - self._region_name) - except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') != "BucketAlreadyExists": - raise - # raise storage.MetricAlreadyExists(metric) + pass def _store_metric_measures(self, metric, timestamp_key, aggregation, granularity, data, offset=0, version=3): self.s3.put_object( - Bucket=self._bucket_name(metric), - Key=self._object_name( + Bucket=self._bucket_name, + Key=self._prefix(metric) + self._object_name( timestamp_key, aggregation, granularity, version), Body=data) def _delete_metric_measures(self, metric, timestamp_key, aggregation, granularity, version=3): self.s3.delete_object( - Bucket=self._bucket_name(metric), - Key=self._object_name( + Bucket=self._bucket_name, + Key=self._prefix(metric) + self._object_name( timestamp_key, aggregation, granularity, version)) def _delete_metric(self, metric): - bucket = self._bucket_name(metric) + bucket = self._bucket_name response = {} while response.get('IsTruncated', True): if 'NextContinuationToken' in response: @@ -104,39 +108,38 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): kwargs = {} try: response = self.s3.list_objects_v2( - Bucket=bucket, **kwargs) + Bucket=bucket, Prefix=self._prefix(metric), **kwargs) except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') == "NoSuchBucket": + if e.response['Error'].get('Code') == "NoSuchKey": # Maybe it never has been created (no measure) return raise s3.bulk_delete(self.s3, bucket, [c['Key'] for c in response.get('Contents', ())]) - try: - self.s3.delete_bucket(Bucket=bucket) - except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') != "NoSuchBucket": - raise def _get_measures(self, metric, timestamp_key, aggregation, granularity, version=3): try: response = self.s3.get_object( - Bucket=self._bucket_name(metric), - Key=self._object_name( + Bucket=self._bucket_name, + Key=self._prefix(metric) + self._object_name( timestamp_key, aggregation, granularity, version)) except botocore.exceptions.ClientError as e: - code = e.response['Error'].get('Code') - if code == "NoSuchBucket": - raise storage.MetricDoesNotExist(metric) - elif code == "NoSuchKey": + if e.response['Error'].get('Code') == 'NoSuchKey': + try: + response = self.s3.list_objects_v2( + Bucket=self._bucket_name, Prefix=self._prefix(metric)) + except botocore.exceptions.ClientError as e: + if e.response['Error'].get('Code') == 'NoSuchKey': + raise storage.MetricDoesNotExist(metric) + raise raise storage.AggregationDoesNotExist(metric, aggregation) raise return response['Body'].read() def _list_split_keys_for_metric(self, metric, aggregation, granularity, version=3): - bucket = self._bucket_name(metric) + bucket = self._bucket_name keys = set() response = {} while response.get('IsTruncated', True): @@ -149,41 +152,41 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): try: response = self.s3.list_objects_v2( Bucket=bucket, + Prefix=self._prefix(metric) + '%s_%s' % (aggregation, + granularity), **kwargs) except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') == "NoSuchBucket": + if e.response['Error'].get('Code') == "NoSuchKey": raise storage.MetricDoesNotExist(metric) raise for f in response.get('Contents', ()): try: meta = f['Key'].split('_') - if (aggregation == meta[1] - and granularity == float(meta[2]) - and self._version_check(f['Key'], version)): - keys.add(meta[0]) + if (self._version_check(f['Key'], version)): + keys.add(meta[2]) except (ValueError, IndexError): # Might be "none", or any other file. Be resilient. continue return keys @staticmethod - def _build_unaggregated_timeserie_path(version): - return 'none' + ("_v%s" % version if version else "") + def _build_unaggregated_timeserie_path(metric, version): + return S3Storage._prefix(metric) + 'none' + ("_v%s" % version + if version else "") def _get_unaggregated_timeserie(self, metric, version=3): try: response = self.s3.get_object( - Bucket=self._bucket_name(metric), - Key=self._build_unaggregated_timeserie_path(version)) + Bucket=self._bucket_name, + Key=self._build_unaggregated_timeserie_path(metric, version)) except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') in ("NoSuchBucket", - "NoSuchKey"): + if e.response['Error'].get('Code') == "NoSuchKey": raise storage.MetricDoesNotExist(metric) raise return response['Body'].read() def _store_unaggregated_timeserie(self, metric, data, version=3): self.s3.put_object( - Bucket=self._bucket_name(metric), - Key=self._build_unaggregated_timeserie_path(version), + Bucket=self._bucket_name, + Key=self._build_unaggregated_timeserie_path(metric, version), Body=data) diff --git a/releasenotes/notes/s3-bucket-limit-224951bb6a81ddce.yaml b/releasenotes/notes/s3-bucket-limit-224951bb6a81ddce.yaml new file mode 100644 index 00000000..1dba0232 --- /dev/null +++ b/releasenotes/notes/s3-bucket-limit-224951bb6a81ddce.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + Previously, s3 storage driver stored aggregates in a bucket per metric. + This would quickly run into bucket limit set by s3. s3 storage driver is + fixed so it stores all aggregates for all metrics in a single bucket. + Buckets previously created by Gnocchi will need to be deleted as they will + no longer be handled. -- GitLab From e537e7caf8bb2a91ef0309b395dae37044826feb Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 13 Apr 2017 15:46:38 +0000 Subject: [PATCH 0686/1483] drop _delete_unaggregated_timeserie it's not used anywhere: - file driver just deletes entire folder - ceph driver just deletes as part of _delete_metric as unaggregated object is what contains omaps references to aggregate objects Change-Id: I83dfe73e8256dfa12df346b9615b3f2874e96773 --- gnocchi/storage/ceph.py | 4 ---- gnocchi/storage/file.py | 9 --------- 2 files changed, 13 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index e242e085..aad5b521 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -157,10 +157,6 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): self.ioctx.write_full( self._build_unaggregated_timeserie_path(metric, version), data) - def _delete_unaggregated_timeserie(self, metric, version=3): - self.ioctx.aio_remove( - self._build_unaggregated_timeserie_path(metric, version)) - def _get_object_content(self, name): offset = 0 content = b'' diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 5a91de3f..3c067bef 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -98,15 +98,6 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): raise storage.MetricDoesNotExist(metric) raise - def _delete_unaggregated_timeserie(self, metric, version=3): - path = self._build_unaggregated_timeserie_path(metric, version) - try: - os.unlink(path) - except IOError as e: - if e.errno == errno.ENOENT: - raise storage.MetricDoesNotExist(metric) - raise - def _list_split_keys_for_metric(self, metric, aggregation, granularity, version=3): try: -- GitLab From bea626591833f587798fe0fd59d90a7d3e440f63 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 14 Apr 2017 16:12:29 +0200 Subject: [PATCH 0687/1483] redis: remove useless dep on msgpack-python This seems to be a left-over since Redis has no particular usage of msgpack. Change-Id: I7046392ec77650b740c2a89f0069a3bf1f586558 --- setup.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 45dbbeb4..0be52afb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -41,7 +41,6 @@ s3 = tooz>=1.38 redis = redis>=2.10.0 # MIT - msgpack-python lz4>=0.9.0 tooz>=1.38 swift = -- GitLab From b1f03e1b32e13b29fba608ad8a0ec775dce85083 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 17 Apr 2017 17:51:24 +0000 Subject: [PATCH 0688/1483] drop ceph incoming upgrade this is an upgrade from 2.x. we don't support upgrade from 2.x to gnocchi next Change-Id: Ie7857b624ae2bdda68845bf24e2dd1e5b1dfa6ce --- gnocchi/storage/incoming/ceph.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index 316d678e..e2024281 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -55,23 +55,6 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): ceph.close_rados_connection(self.rados, self.ioctx) super(CephStorage, self).stop() - def upgrade(self, index): - super(CephStorage, self).upgrade(index) - - # Move names stored in xattrs to omap - try: - xattrs = tuple(k for k, v in - self.ioctx.get_xattrs(self.MEASURE_PREFIX)) - except rados.ObjectNotFound: - return - with rados.WriteOpCtx() as op: - self.ioctx.set_omap(op, xattrs, tuple([b""]*len(xattrs))) - self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, - flags=self.OMAP_WRITE_FLAGS) - - for xattr in xattrs: - self.ioctx.rm_xattr(self.MEASURE_PREFIX, xattr) - def _store_new_measures(self, metric, data): # NOTE(sileht): list all objects in a pool is too slow with # many objects (2min for 20000 objects in 50osds cluster), -- GitLab From 733d37a780ac4528276c0cb5af602b0e530ec07d Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 3 Apr 2017 19:19:05 +0000 Subject: [PATCH 0689/1483] drop single bucket partitioning as we move to multiple buckets, we want workers to process buckets completely. this removes any internal bucket partitioning as first step Change-Id: I4ac40688e3069965d5899ae024d3183989e281e9 --- gnocchi/cli.py | 49 +++----------------------- gnocchi/storage/incoming/__init__.py | 2 +- gnocchi/storage/incoming/_carbonara.py | 4 --- gnocchi/storage/incoming/ceph.py | 21 ++++++----- gnocchi/storage/incoming/file.py | 7 ++-- gnocchi/storage/incoming/redis.py | 6 ++-- gnocchi/storage/incoming/s3.py | 14 ++------ gnocchi/storage/incoming/swift.py | 10 ++---- gnocchi/tests/functional/fixtures.py | 3 +- gnocchi/tests/test_aggregates.py | 3 +- gnocchi/tests/test_rest.py | 3 +- gnocchi/tests/test_storage.py | 9 ++--- 12 files changed, 32 insertions(+), 99 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 30591368..56facb39 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -145,10 +145,8 @@ class MetricReporting(MetricProcessBase): class MetricScheduler(MetricProcessBase): name = "scheduler" - MAX_OVERLAP = 0.3 GROUP_ID = "gnocchi-scheduler" SYNC_RATE = 30 - TASKS_PER_WORKER = 16 BLOCK_SIZE = 4 def __init__(self, worker_id, conf, queue): @@ -157,32 +155,9 @@ class MetricScheduler(MetricProcessBase): self._coord, self._my_id = utils.get_coordinator_and_start( conf.storage.coordination_url) self.queue = queue - self.previously_scheduled_metrics = set() self.workers = conf.metricd.workers - self.block_index = 0 - self.block_size_default = self.workers * self.TASKS_PER_WORKER - self.block_size = self.block_size_default self.periodic = None - def set_block(self, event): - get_members_req = self._coord.get_members(self.GROUP_ID) - try: - members = sorted(get_members_req.get()) - self.block_index = members.index(self._my_id) - reqs = list(self._coord.get_member_capabilities(self.GROUP_ID, m) - for m in members) - for req in reqs: - cap = msgpack.loads(req.get(), encoding='utf-8') - max_workers = max(cap['workers'], self.workers) - self.block_size = max_workers * self.TASKS_PER_WORKER - LOG.info('New set of agents detected. Now working on block: %s, ' - 'with up to %s metrics', self.block_index, - self.block_size) - except Exception: - LOG.warning('Error getting block to work on, defaulting to first') - self.block_index = 0 - self.block_size = self.block_size_default - @utils.retry def _configure(self): super(MetricScheduler, self)._configure() @@ -191,7 +166,6 @@ class MetricScheduler(MetricProcessBase): join_req = self._coord.join_group(self.GROUP_ID, cap) join_req.get() LOG.info('Joined coordination group: %s', self.GROUP_ID) - self.set_block(None) @periodics.periodic(spacing=self.SYNC_RATE, run_immediately=True) def run_watchers(): @@ -203,8 +177,6 @@ class MetricScheduler(MetricProcessBase): t.daemon = True t.start() - self._coord.watch_join_group(self.GROUP_ID, self.set_block) - self._coord.watch_leave_group(self.GROUP_ID, self.set_block) except coordination.GroupNotCreated as e: create_group_req = self._coord.create_group(self.GROUP_ID) try: @@ -221,23 +193,10 @@ class MetricScheduler(MetricProcessBase): def _run_job(self): try: - metrics = set( - self.store.incoming.list_metric_with_measures_to_process( - self.block_size, self.block_index)) - if metrics and not self.queue.empty(): - # NOTE(gordc): drop metrics we previously process to avoid - # handling twice - number_of_scheduled_metrics = len(metrics) - metrics = metrics - self.previously_scheduled_metrics - if (float(number_of_scheduled_metrics - len(metrics)) / - self.block_size > self.MAX_OVERLAP): - LOG.warning('Metric processing lagging scheduling rate. ' - 'It is recommended to increase the number of ' - 'workers or to lengthen processing interval.') - metrics = list(metrics) + metrics = list( + self.store.incoming.list_metric_with_measures_to_process()) for i in six.moves.range(0, len(metrics), self.BLOCK_SIZE): self.queue.put(metrics[i:i + self.BLOCK_SIZE]) - self.previously_scheduled_metrics = set(metrics) LOG.debug("%d metrics scheduled for processing.", len(metrics)) except Exception: LOG.error("Unexpected error scheduling metrics for processing", @@ -326,8 +285,8 @@ def metricd_tester(conf): index = indexer.get_driver(conf) index.connect() s = storage.get_driver(conf) - metrics = s.incoming.list_metric_with_measures_to_process( - conf.stop_after_processing_metrics, 0) + metrics = s.incoming.list_metric_with_measures_to_process()[ + :conf.stop_after_processing_metrics] s.process_new_measures(index, metrics, True) diff --git a/gnocchi/storage/incoming/__init__.py b/gnocchi/storage/incoming/__init__.py index f1df9f33..ee6c90f1 100644 --- a/gnocchi/storage/incoming/__init__.py +++ b/gnocchi/storage/incoming/__init__.py @@ -52,5 +52,5 @@ class StorageDriver(object): raise exceptions.NotImplementedError @staticmethod - def list_metric_with_measures_to_process(size, part, full=False): + def list_metric_with_measures_to_process(): raise NotImplementedError diff --git a/gnocchi/storage/incoming/_carbonara.py b/gnocchi/storage/incoming/_carbonara.py index dc77d2d1..9dba7c5a 100644 --- a/gnocchi/storage/incoming/_carbonara.py +++ b/gnocchi/storage/incoming/_carbonara.py @@ -67,10 +67,6 @@ class CarbonaraBasedStorage(incoming.StorageDriver): def _build_report(details): raise NotImplementedError - @staticmethod - def list_metric_with_measures_to_process(size, part, full=False): - raise NotImplementedError - @staticmethod def delete_unprocessed_measures_for_metric_id(metric_id): raise NotImplementedError diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index e2024281..e7a9032e 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -16,7 +16,6 @@ import contextlib import datetime import errno import functools -import itertools import uuid @@ -119,14 +118,18 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): return () return (k for k, v in omaps) - def list_metric_with_measures_to_process(self, size, part, full=False): - names = self._list_object_names_to_process(limit=-1 if full else - size * (part + 1)) - if full: - objs_it = names - else: - objs_it = itertools.islice(names, size * part, size * (part + 1)) - return set([name.split("_")[1] for name in objs_it]) + def list_metric_with_measures_to_process(self): + names = set() + marker = "" + while True: + obj_names = list(self._list_object_names_to_process( + marker=marker, limit=1000)) + names.update(name.split("_")[1] for name in obj_names) + if len(obj_names) < 1000: + break + else: + marker = obj_names[-1] + return names def delete_unprocessed_measures_for_metric_id(self, metric_id): object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) diff --git a/gnocchi/storage/incoming/file.py b/gnocchi/storage/incoming/file.py index 439a3ab2..c4d58087 100644 --- a/gnocchi/storage/incoming/file.py +++ b/gnocchi/storage/incoming/file.py @@ -75,11 +75,8 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): return (len(metric_details.keys()), sum(metric_details.values()), metric_details if details else None) - def list_metric_with_measures_to_process(self, size, part, full=False): - if full: - return set(os.listdir(self.measure_path)) - return set( - os.listdir(self.measure_path)[size * part:size * (part + 1)]) + def list_metric_with_measures_to_process(self): + return set(os.listdir(self.measure_path)) def _list_measures_container_for_metric_id(self, metric_id): try: diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py index 32908aca..df1edd6e 100644 --- a/gnocchi/storage/incoming/redis.py +++ b/gnocchi/storage/incoming/redis.py @@ -46,13 +46,11 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): return (len(metric_details.keys()), sum(metric_details.values()), metric_details if details else None) - def list_metric_with_measures_to_process(self, size, part, full=False): + def list_metric_with_measures_to_process(self): match = redis.SEP.join([self.STORAGE_PREFIX, "*"]) keys = self._client.scan_iter(match=match, count=1000) measures = set([k.decode('utf8').split(redis.SEP)[1] for k in keys]) - if full: - return measures - return set(list(measures)[size * part:size * (part + 1)]) + return measures def delete_unprocessed_measures_for_metric_id(self, metric_id): self._client.delete(self._build_measure_path(metric_id)) diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/storage/incoming/s3.py index 259d1bab..ed3f4caf 100644 --- a/gnocchi/storage/incoming/s3.py +++ b/gnocchi/storage/incoming/s3.py @@ -80,12 +80,8 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): return (len(metric_details), sum(metric_details.values()), metric_details if details else None) - def list_metric_with_measures_to_process(self, size, part, full=False): - if full: - limit = 1000 # 1000 is the default anyway - else: - limit = size * (part + 1) - + def list_metric_with_measures_to_process(self): + limit = 1000 # 1000 is the default anyway metrics = set() response = {} # Handle pagination @@ -103,11 +99,7 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): **kwargs) for p in response.get('CommonPrefixes', ()): metrics.add(p['Prefix'].rstrip('/')) - - if full: - return metrics - - return sorted(list(metrics))[size * part:] + return metrics def _list_measure_files_for_metric_id(self, metric_id): files = set() diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py index 5052f6c7..c5d47d18 100644 --- a/gnocchi/storage/incoming/swift.py +++ b/gnocchi/storage/incoming/swift.py @@ -58,16 +58,10 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): measures = int(headers.get('x-container-object-count')) return nb_metrics, measures, metric_details if details else None - def list_metric_with_measures_to_process(self, size, part, full=False): - limit = None - if not full: - limit = size * (part + 1) + def list_metric_with_measures_to_process(self): headers, files = self.swift.get_container(self.MEASURE_PREFIX, delimiter='/', - full_listing=full, - limit=limit) - if not full: - files = files[size * part:] + full_listing=True) return set(f['subdir'][:-1] for f in files if 'subdir' in f) def _list_measure_files_for_metric_id(self, metric_id): diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 226ef724..fa4e6da6 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -173,8 +173,7 @@ class MetricdThread(threading.Thread): def run(self): incoming = self.storage.incoming while self.flag: - metrics = incoming.list_metric_with_measures_to_process( - None, None, full=True) + metrics = incoming.list_metric_with_measures_to_process() self.storage.process_background_tasks(self.index, metrics) time.sleep(0.1) diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 9fc0b9d5..5fc0d084 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -60,8 +60,7 @@ class TestAggregates(tests_base.TestCase): for n, val in enumerate(data)] self.index.create_metric(metric.id, str(uuid.uuid4()), 'medium') self.storage.incoming.add_measures(metric, measures) - metrics = self.storage.incoming.list_metric_with_measures_to_process( - None, None, full=True) + metrics = self.storage.incoming.list_metric_with_measures_to_process() self.storage.process_background_tasks(self.index, metrics, sync=True) return metric diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index ddcf949d..df1788ce 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -122,8 +122,7 @@ class TestingApp(webtest.TestApp): req.headers['X-User-Id'] = self.USER_ID req.headers['X-Project-Id'] = self.PROJECT_ID response = super(TestingApp, self).do_request(req, *args, **kwargs) - metrics = self.storage.incoming.list_metric_with_measures_to_process( - None, None, full=True) + metrics = self.storage.incoming.list_metric_with_measures_to_process() self.storage.process_background_tasks(self.indexer, metrics, sync=True) return response diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 21fbe248..9f807829 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -77,18 +77,15 @@ class TestStorageDriver(tests_base.TestCase): self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), m) def test_list_metric_with_measures_to_process(self): - metrics = self.storage.incoming.list_metric_with_measures_to_process( - None, None, full=True) + metrics = self.storage.incoming.list_metric_with_measures_to_process() self.assertEqual(set(), metrics) self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) - metrics = self.storage.incoming.list_metric_with_measures_to_process( - None, None, full=True) + metrics = self.storage.incoming.list_metric_with_measures_to_process() self.assertEqual(set([str(self.metric.id)]), metrics) self.trigger_processing() - metrics = self.storage.incoming.list_metric_with_measures_to_process( - None, None, full=True) + metrics = self.storage.incoming.list_metric_with_measures_to_process() self.assertEqual(set([]), metrics) def test_delete_nonempty_metric(self): -- GitLab From 2981b3f00902433395fe3ec6436cc2a2d6c4de56 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 4 Apr 2017 14:36:04 +0000 Subject: [PATCH 0690/1483] drop scheduler process as we move to multiple buckets, we will let each worker figure out it's whether a bucket needs to be processed and thus we don't need a central scheduler to distribute work around. for now, all workers will go ham and work on same bucket. Change-Id: I659f93545961b2943902eaefe7c7bd622abdc42f --- gnocchi/cli.py | 86 ++++-------------------------------------------- requirements.txt | 1 - setup.cfg | 1 - 3 files changed, 6 insertions(+), 82 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 56facb39..0a4a7dba 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -13,22 +13,16 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. -import multiprocessing import sys import threading import time import cotyledon from cotyledon import oslo_config_glue -from futurist import periodics -import msgpack from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six -import tenacity -import tooz -from tooz import coordination from gnocchi import archive_policy from gnocchi import genconfig @@ -143,70 +137,26 @@ class MetricReporting(MetricProcessBase): exc_info=True) -class MetricScheduler(MetricProcessBase): - name = "scheduler" - GROUP_ID = "gnocchi-scheduler" - SYNC_RATE = 30 - BLOCK_SIZE = 4 +class MetricProcessor(MetricProcessBase): + name = "processing" - def __init__(self, worker_id, conf, queue): - super(MetricScheduler, self).__init__( + def __init__(self, worker_id, conf): + super(MetricProcessor, self).__init__( worker_id, conf, conf.metricd.metric_processing_delay) self._coord, self._my_id = utils.get_coordinator_and_start( conf.storage.coordination_url) - self.queue = queue - self.workers = conf.metricd.workers - self.periodic = None - - @utils.retry - def _configure(self): - super(MetricScheduler, self)._configure() - try: - cap = msgpack.dumps({'workers': self.workers}) - join_req = self._coord.join_group(self.GROUP_ID, cap) - join_req.get() - LOG.info('Joined coordination group: %s', self.GROUP_ID) - - @periodics.periodic(spacing=self.SYNC_RATE, run_immediately=True) - def run_watchers(): - self._coord.run_watchers() - - self.periodic = periodics.PeriodicWorker.create([]) - self.periodic.add(run_watchers) - t = threading.Thread(target=self.periodic.start) - t.daemon = True - t.start() - - except coordination.GroupNotCreated as e: - create_group_req = self._coord.create_group(self.GROUP_ID) - try: - create_group_req.get() - except coordination.GroupAlreadyExist: - pass - raise tenacity.TryAgain(e) - except tooz.NotImplemented: - LOG.warning('Configured coordination driver does not support ' - 'required functionality. Coordination is disabled.') - except Exception as e: - LOG.error('Failed to configure coordination. Coordination is ' - 'disabled: %s', e) def _run_job(self): try: metrics = list( self.store.incoming.list_metric_with_measures_to_process()) - for i in six.moves.range(0, len(metrics), self.BLOCK_SIZE): - self.queue.put(metrics[i:i + self.BLOCK_SIZE]) LOG.debug("%d metrics scheduled for processing.", len(metrics)) + self.store.process_background_tasks(self.index, metrics) except Exception: LOG.error("Unexpected error scheduling metrics for processing", exc_info=True) def close_services(self): - if self.periodic: - self.periodic.stop() - self.periodic.wait() - self._coord.leave_group(self.GROUP_ID) self._coord.stop() @@ -225,38 +175,14 @@ class MetricJanitor(MetricProcessBase): LOG.error("Unexpected error during metric cleanup", exc_info=True) -class MetricProcessor(MetricProcessBase): - name = "processing" - - def __init__(self, worker_id, conf, queue): - super(MetricProcessor, self).__init__(worker_id, conf, 0) - self.queue = queue - - def _run_job(self): - try: - try: - metrics = self.queue.get(block=True, timeout=10) - except six.moves.queue.Empty: - # NOTE(sileht): Allow the process to exit gracefully every - # 10 seconds - return - self.store.process_background_tasks(self.index, metrics) - except Exception: - LOG.error("Unexpected error during measures processing", - exc_info=True) - - class MetricdServiceManager(cotyledon.ServiceManager): def __init__(self, conf): super(MetricdServiceManager, self).__init__() oslo_config_glue.setup(self, conf) self.conf = conf - self.queue = multiprocessing.Manager().Queue() - - self.add(MetricScheduler, args=(self.conf, self.queue)) self.metric_processor_id = self.add( - MetricProcessor, args=(self.conf, self.queue), + MetricProcessor, args=(self.conf,), workers=conf.metricd.workers) if self.conf.metricd.metric_reporting_delay >= 0: self.add(MetricReporting, args=(self.conf,)) diff --git a/requirements.txt b/requirements.txt index f6f69b98..dae4c542 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,4 +23,3 @@ tenacity>=3.1.0 # Apache-2.0 WebOb>=1.4.1 Paste PasteDeploy -msgpack-python diff --git a/setup.cfg b/setup.cfg index 45dbbeb4..0be52afb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -41,7 +41,6 @@ s3 = tooz>=1.38 redis = redis>=2.10.0 # MIT - msgpack-python lz4>=0.9.0 tooz>=1.38 swift = -- GitLab From 75e420673159fcf4a383f130e1652243fccc2b75 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 20 Apr 2017 08:45:14 +0200 Subject: [PATCH 0691/1483] Use NULL as creator in ResourceUUID conversion In older versions of Gnocchi, the creator could be null. It's unlikely now as all auth helper returns a string as creator, but it's still possible in theory. This patch uses the NULL string as the creator is it's None (so NULL in SQL). Change-Id: Id1faa16512b995fa2cd24ca4bed7912934cb5aad Closes-Bug: #1684246 --- gnocchi/tests/test_utils.py | 19 +++++++++++++++++++ gnocchi/utils.py | 2 ++ 2 files changed, 21 insertions(+) diff --git a/gnocchi/tests/test_utils.py b/gnocchi/tests/test_utils.py index d8319e3d..cbae6456 100644 --- a/gnocchi/tests/test_utils.py +++ b/gnocchi/tests/test_utils.py @@ -13,6 +13,7 @@ # under the License. import datetime import os +import uuid import iso8601 import mock @@ -57,3 +58,21 @@ class TestUtils(tests_base.TestCase): utils.to_datetime(utils.to_timestamp(1425652440.4)), datetime.datetime(2015, 3, 6, 14, 34, 0, 400000, tzinfo=iso8601.iso8601.UTC)) + + +class TestResourceUUID(tests_base.TestCase): + def test_conversion(self): + self.assertEqual( + uuid.UUID('ba571521-1de6-5aff-b183-1535fd6eb5d0'), + utils.ResourceUUID( + uuid.UUID('ba571521-1de6-5aff-b183-1535fd6eb5d0'), + "bar")) + self.assertEqual( + uuid.UUID('ba571521-1de6-5aff-b183-1535fd6eb5d0'), + utils.ResourceUUID("foo", "bar")) + self.assertEqual( + uuid.UUID('4efb21f6-3d19-5fe3-910b-be8f0f727846'), + utils.ResourceUUID("foo", None)) + self.assertEqual( + uuid.UUID('853e5c64-f45e-58b2-999c-96df856fbe3d'), + utils.ResourceUUID("foo", "")) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index fdde916a..45c4ccc9 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -48,6 +48,8 @@ def ResourceUUID(value, creator): return uuid.UUID(value) except ValueError: if len(value) <= 255: + if creator is None: + creator = "\x00" # value/creator must be str (unicode) in Python 3 and str (bytes) # in Python 2. It's not logical, I know. if six.PY2: -- GitLab From 1977a1737a9ab087aeeb26c49d90503a2fb82419 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 18 Apr 2017 14:46:09 +0200 Subject: [PATCH 0692/1483] Replace oslo_serialization.jsonutils with simpler version This drops jsonutils from oslo_serialization with a much simpler version of what is needed to encode JSON in Gnocchi. Change-Id: I9239f0e8b5fdbb1fa5be78d6c523d44977cbf74f --- gnocchi/json.py | 59 ++++++++++++++++++++++++++++-------------------- requirements.txt | 1 - 2 files changed, 34 insertions(+), 26 deletions(-) diff --git a/gnocchi/json.py b/gnocchi/json.py index 152b5467..30819ebb 100644 --- a/gnocchi/json.py +++ b/gnocchi/json.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2015-2016 Red Hat, Inc. +# Copyright © 2015-2017 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -13,38 +13,47 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +from __future__ import absolute_import + import datetime +import json +import uuid import numpy -from oslo_serialization import jsonutils +import six import ujson -_ORIG_TP = jsonutils.to_primitive - - -def _to_primitive(value, *args, **kwargs): - if isinstance(value, datetime.datetime): - return value.isoformat() - if isinstance(value, numpy.datetime64): +def to_primitive(obj): + if isinstance(obj, ((six.text_type,) + + six.integer_types + + (type(None), bool, float))): + return obj + if isinstance(obj, uuid.UUID): + return six.text_type(obj) + if isinstance(obj, datetime.datetime): + return obj.isoformat() + if isinstance(obj, numpy.datetime64): # Do not include nanoseconds if null - return str(value).rpartition(".000000000")[0] + "+00:00" + return str(obj).rpartition(".000000000")[0] + "+00:00" # This mimics what Pecan implements in its default JSON encoder - if hasattr(value, "jsonify"): - return _to_primitive(value.jsonify(), *args, **kwargs) - return _ORIG_TP(value, *args, **kwargs) - - -def to_primitive(*args, **kwargs): - try: - jsonutils.to_primitive = _to_primitive - return jsonutils.to_primitive(*args, **kwargs) - finally: - jsonutils.to_primitive = _ORIG_TP - - -def dumps(obj, *args, **kwargs): - return jsonutils.dumps(obj, default=to_primitive) + if hasattr(obj, "jsonify"): + return to_primitive(obj.jsonify()) + if isinstance(obj, dict): + return {to_primitive(k): to_primitive(v) + for k, v in obj.items()} + if hasattr(obj, 'iteritems'): + return to_primitive(dict(obj.iteritems())) + # Python 3 does not have iteritems + if hasattr(obj, 'items'): + return to_primitive(dict(obj.items())) + if hasattr(obj, '__iter__'): + return list(map(to_primitive, obj)) + return obj + + +def dumps(*args, **kwargs): + return json.dumps(*args, default=to_primitive, **kwargs) # For convenience diff --git a/requirements.txt b/requirements.txt index a5bbd784..1265ce7d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,6 @@ iso8601 oslo.config>=3.22.0 oslo.log>=2.3.0 oslo.policy>=0.3.0 -oslo.serialization>=1.4.0 oslo.utils>=3.18.0 oslo.middleware>=3.22.0 pandas>=0.18.0 -- GitLab From 535afd9b135b9f46f561f68d3699d024ff227314 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 18 Apr 2017 15:25:33 +0200 Subject: [PATCH 0693/1483] json: use ujson to dumps This should improve encoding speed. Change-Id: I0492d42a80ffae6f128b9ca5875c651a6e03e59e --- gnocchi/json.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/gnocchi/json.py b/gnocchi/json.py index 30819ebb..eb5fa924 100644 --- a/gnocchi/json.py +++ b/gnocchi/json.py @@ -13,10 +13,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from __future__ import absolute_import - import datetime -import json import uuid import numpy @@ -52,8 +49,8 @@ def to_primitive(obj): return obj -def dumps(*args, **kwargs): - return json.dumps(*args, default=to_primitive, **kwargs) +def dumps(obj): + return ujson.dumps(to_primitive(obj)) # For convenience -- GitLab From f8e1c2192ec30bd7015ecf1bbe83ba1eb949f32b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 20 Apr 2017 15:34:38 +0200 Subject: [PATCH 0694/1483] doc: remove oslosphinx support Change-Id: I924a60ff8635a684d2bc34870baf61c02dbe4d8a --- doc/source/conf.py | 12 +++--------- setup.cfg | 2 +- tox.ini | 3 +-- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 31c5c831..be6feaea 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -14,8 +14,6 @@ import datetime import os import subprocess -import oslosphinx - # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -101,17 +99,13 @@ pygments_style = 'sphinx' # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'openstack' +html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -if html_theme == "sphinx_rtd_theme": - import sphinx_rtd_theme - html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] -else: - html_theme_path = [os.path.join(os.path.dirname(oslosphinx.__file__), - 'theme')] +import sphinx_rtd_theme +html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to diff --git a/setup.cfg b/setup.cfg index 0be52afb..f49af905 100644 --- a/setup.cfg +++ b/setup.cfg @@ -58,8 +58,8 @@ file = lz4>=0.9.0 tooz>=1.38 doc = - oslosphinx>=2.2.0 sphinx + sphinx_rtd_theme sphinxcontrib-httpdomain PyYAML Jinja2 diff --git a/tox.ini b/tox.ini index 9518c349..64612a36 100644 --- a/tox.ini +++ b/tox.ini @@ -129,10 +129,9 @@ commands = doc8 --ignore-path doc/source/rest.rst doc/source [testenv:docs-gnocchi.xyz] basepython = python2.7 deps = .[file,postgresql,test,doc] - sphinx_rtd_theme sphinxcontrib-versioning # for 2.x doc pytimeparse retrying commands = - pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-versioning build doc/source doc/build/html -- -D html_theme=sphinx_rtd_theme + pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-versioning build doc/source doc/build/html -- GitLab From 717d6c76ec7f316d823699764ab12a038a2562dd Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 20 Apr 2017 14:57:34 +0200 Subject: [PATCH 0695/1483] Switch to new Gnocchi logo Change-Id: I5ecff1acb98dabbac63e5e25d4bd7a42c8132477 --- README.rst | 2 +- doc/source/conf.py | 8 ++++++-- doc/source/gnocchi-icon.png | Bin 0 -> 12573 bytes doc/source/gnocchi-logo.jpg | Bin 17800 -> 0 bytes doc/source/gnocchi-logo.png | Bin 0 -> 92830 bytes doc/source/index.rst | 5 ----- 6 files changed, 7 insertions(+), 8 deletions(-) create mode 100644 doc/source/gnocchi-icon.png delete mode 100644 doc/source/gnocchi-logo.jpg create mode 100644 doc/source/gnocchi-logo.png diff --git a/README.rst b/README.rst index 2d76f543..5aad556d 100644 --- a/README.rst +++ b/README.rst @@ -2,7 +2,7 @@ Gnocchi - Metric as a Service =============================== -.. image:: doc/source/gnocchi-logo.jpg +.. image:: doc/source/gnocchi-logo.png Gnocchi is a multi-tenant timeseries, metrics and resources database. It provides an `HTTP REST`_ interface to create and manipulate the data. It is diff --git a/doc/source/conf.py b/doc/source/conf.py index be6feaea..064eb03c 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -117,12 +117,12 @@ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +html_logo = 'gnocchi-logo.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -html_favicon = 'gnocchi-logo.jpg' +html_favicon = 'gnocchi-icon.png' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, @@ -173,6 +173,10 @@ html_static_path = ['_static'] # Output file base name for HTML help builder. htmlhelp_basename = 'gnocchidoc' +html_theme_options = { + 'logo_only': True, +} + # Multiversion docs scv_sort = ('semver',) scv_show_banner = True diff --git a/doc/source/gnocchi-icon.png b/doc/source/gnocchi-icon.png new file mode 100644 index 0000000000000000000000000000000000000000..d6108c4182d54422ec09aa935f2fd0594aa2d68f GIT binary patch literal 12573 zcmZ{L1z1#F*Y=rV=p4FBQo2(Zx*KU>NI`n&9!k0-M39hfQIL)SML;P*Qep@Z1Y`&$ zMEH50_x--_`QQKh=eo`{d#|-1tBNniTu_)@fq<2jRUYBwB5SOw{%`ng?jF0VzrT;HkWf%ikYJFQpf}>7kcf8uz;7}oqrYbU*)Je`#B=qeEi+Ky;%P$*WST9!2cdQ`(G9P_wV2B z^z`}fioE>(-K*RF2?g8x2#E*^3;k~(e>a!^Y2N>Z`it}bg*gYi{eNJ8asCDSTN(dq zqx`K#W%azB++0Fb?fsngS_R>3@9&K; z_4f8qFmwxc_VD~G%PJ-)`7h)@`Tv0a)eBhzgq!oNdjC?Qf{47(|3mvvc@=L@AB3}? z-(T_)|CawJ?LXy>o&CH$0{$xgzL%T7g6O{?|3Uswl;!{M6{P+P^H1V`0xbSFz(0xq z0Q@x!vVZHzLeIdw3&EeC!>a|5DiBkbjW=Q~K|EEdQq- zF>&dC*7Fa}zmf7nf4$59c(?y$IDb=bEk}Xic1-`v3Ka-`2}fZ8fB=b>s*-8&(m@2i zzms{03tbDtFl{W;vOsN{t&`UM*}|^bdrl0RtVD%c2pmQ{UvivDCpesoC=+;3M8-wY z2Es`I9-~Jq?j>0p>pW_#TxvDFFD3%_K#@2mrlm0nWqQrB?{425hP^a>QdePR+xxZe zu=u1%VWqLMB6Kb2=Fi!Mk4>0r&tV8V!vV#&@_w%4tTmc%cUUMpN4V7#E~y5K;w*g? z@=6tF;?xF3+0(f6S%BUH^w6_BT?yd67LW zao*sI1rrVuU-E>EY~dbTe$_Md%HV*=W?TQ4voQqM=?~+(=Z(re?g7qTFL{I*$g&_CdU^Csu|J0(;!XZery*Nky7HjVoFDXz0QGBOQ1K({*NX*f)kkRU zp5M}|Tv1hgrrOvWe6#J5RGPH!LKW&V0gn{!oSjKuUc3<)Iym_%B~tO?vNrd8^{3U= zQ*h>Dwcv$&HxZu`=aob!(Mz)Gh^HI814h|b+4DALx{n_dp1kpn#A?C^0w_q5-;ZnycK;Y2I z>tyI6Hlz8kE*$D_D)>4h`W2YT%{XAErpMBu8otU)IZ zn2!Cl#Npx$Zjxck_8aA$ukjZ@l*_?}x;V4?Sb?@g#hH0Z*Gu0vcHjCcV-DZ)9X0(_ z$40)3fv|$v3-gQ;mpL~2syDUXz2XM)iHi94GtXYezwuBUagXdBXjf~xaSmqpZ8*89 zZI0Ky>@H+e1V`t=^9eFyf|Y$``GE3yg{NSoqK6saqzG&PW*vAmZ-S!_>4L3%$7qn1 zG*>pf>row+AS44kB?Y7Rf#W&KB*n3eu=z};vf?Jo3US&9=@z`E-+7O;;TP(v0V_q5>XUy;aKb zfvUe#UL(XfZHoJU*qzcSXvA7DrPIVxhRJGJVUrqWee#uGmzFD@)jgNs^Tb1r;dO+l zmtImUMaQ9oFcq7hNgysYbTvt~$b!V6azPZ60|sALVz3s9i?=$T2e1n4NV4Ul_4I>} zeZ{3%sjfV_!6=Id-?2NF<9kL_n{cq?BejnrUa8~8u=15Up)d+rhosT?wt-mW*DQr> z03#_-Yh!J3Dl8O?&CuhDUGcU=SPj^k$^=HXyslKYaB)xsLe5xE)@kUvZ^nR_80`+6 zjO4F7x1V&}kl&Rmv)FcS@D4B5Uv7NpiD0g>!M%2*L9d26|kH~ zWg*{ju|2sfb#u&ikjF{hNY@^P*xY`(d}%T)_0G{l^Ug*y){vui)<%^scd9^jx7TLfjc&m6_MJix6>&3T1YJ;%IN z?j}6X4;uMhY$WW?i&q8d91cL!$A5b9-Z;&sNK*t4*9xl|1sD5(?EE>;1)BN}LgOsl zB}hq-d`;4AiT_z{y{0QR{8V4K!xMx@{Dl*+l0?greR4BX?gH@*h%_nt>+%2mft777 z1i9?^8V|GJ+!i#lSQ^)!O3v)a`tV+N;M>49wLTkF=tXiGm^C(<$%7b2mQjb6uI_;! zTu05}^dbA|nS68CGgztQjrWi~2uW26dvE_r|NVLcvsAM&5#kfWz<$*RaQ%Cq5}+fr zQ0Is78kIi7dCm8_OE9_tTZV45fYkwK%nOra{KEQx8koRCN;C7gN!HOM{A6{gwk|xu z!X3b@K#(qA%I!u^{PzSRFdUkY8Uh2C?ddIjXUgMO88KhGlaV*}-Pp(%8>M8Wo#-+m z)&fQ9gE08Tgcr(AS_vQ{gVe3yoM6ReMK<@o@67LL`}lnnmX=P03;01VSZVFV7~sP` zWF26nKnG#!niZ9%`v8F?4Yqw0E8Nq>Xe)ceWo-r(`IbQQoCT3gg?zz=j89W5cd$S7 zV+Cqa-m+?dAuD8o72@Y0ig-5c2#RKr)q+fk5-lL(9tyZ9F`uKKlZN=C|rR%PpW(ChNTX(wj}0-M9D zcpHocs@s_RRVAPuLrLt06Yz0b^n%BLRL*zl*04e{f_UI54t|vRi&D7oH%%x?oDKCU z@f!K`xNchF`~i0|>btQJvpB#xG?$3HiA)55RtqiKvwIKX+1SsdO(0`?44Hc`5linG zitQ%F4nmmbE(h&p-dm-&xJ*eh+Qtu^{0Jc=dHK=GdF*@X#Tk{Z6tL;|5ckbw$xL6> z)zZo3nfCYcR=12oZ+B6Oe$QP)Ip=HYS>rI|a!Bp>RCP}h=N<6+y31pDqr&E^KorT+m*M&@3&RRzCj1pL0FI+L)g$3! z8{HR;KFA{r%M+dvFss74b?oHvqkqbLew6a)+HfatVwFi#Qw_BwsJflj@TAc)c&upJ zrVOwTvy0s4B;(I-=4hadiTb*#rA#=$P^;eWq_7zF?t`h}ol8}rtR6e=5MACk+i zI6MPd`)QW!cS6K0+|PZE4i1EMVjA$;GApD<4(=tu7A8!gtJ{4bc7-)gaW&Vmq@vjR zl-|OKi)6(p*KWm<+T-2!^Ajo-+b)9G0-5n2gTcAo!m?ZU2H0pEb8Wb;|7h0R(+$F7 z@NCF*)=T`-02|F;JqucQhKh{u@()$X`tYc!)6lBeY}YxE&6YX5V_Ck-b-Hx%FiS~? z#Bdw7zkNon>g}(QpSOT&gdYJ`SUF` z6r2(8K2s*-lI-G!<$-VMO@@F9F#&crZ%WsF===(_jZ59rUrD;n5E}oM44YU=dKP>h zJ}>}oCO>>n`zlZXKiPf+;DsWxsq8CZMCM6@(`+xTuFfSmgnz#n zKhhLE&ZkcU5Rv!>62)NU`lo!%soDJzZ}xSBSL^ErzCNwGb}tYcA3R?-gAIgX+j!ig z1!;?(Ij`UErXwN|eZ7a;)oedg*(xt-YbVcq%s54!RlUp#hz|qrj0JwdA>73CWt_e> zugR%xTsgPOwF(QPm3^h~0C0>gLg2KFSA~(QAXQG2&)$&FccN1n66s+l0FwHYCS9r|urFW;ZsQ9ENbt3Us@jpr%++lS%?MK*-%if*+XY z%RB9vvng#u(6W~;9XvdzYbzXp%OoiTS+LPuBjGgKnZAtU<(%TLa4biCNQ_bZDhb6r z8fHZ!CB6A<;%&CZ=PW@<#o1mIz%x2nt#IitWR{erw4l87hB_1#3+7P>J{6bhP8lZA=;C=s_sI?8g(NjJy8v=$~Hx*sNe=!g5U5yW4d-QxPo$ z3INr(_>NLR2t+eqY>fB;Eh!F~(;cqn6dES~MujX|30UBQ*d0zm)=|Apl%NVc(BT1x zMLD?aT0a%q|D}-~>A;4HokOkpqnPlDgX-}b?DGJ$nmcNrt%8!m!lx0B|7`sQ(8B|< z6PA%9>zZj~H|!0e1>{X({+~YIcOI(vZIvjsjSv4Ujb21-+&HXyMLS_~y{JG1ad8PC z%-z0+wuh*WLrbrHrp(PDa#|^pVA{oKJMl@*{KJsxB5ai!dWd|H;FS&qZYqvRQzO-g z8b%nf9VeH!w{kS%Wx7CB6V3d)dUE?Geru~x##3tyQCR2Q!g<6AHKT1{)^wKqUGspCN3%pr zAFM%>JrBnufj^EHep}z@?K1i$(?;^v#L9bdE<(=(zQX9$;?cQPjjZMOx147gk~r*J zmbeOj4yn)^JrmYtjD7wjF&05V{IbjTUXaa?rtJC#SLgd7{+eHcxTWS>JN3UHo`1HL z6PhnA(YW?#O-hKe<>lfx?ukfHOO4V9$s99rem7)_e!i)t_;H7tPb~74C=+FTZL>vD zh8rY(ubm@1$oS4n4nalC9TkD54xFvSw8#YhxB~c298Cmp-6x+d1zU}4mqmGG?mnEz%$T#G$l@^Ko$gx}*jOW;P>f zbyg!<1$}9@p%+BHJ{f8Qf+MXzB5CI!s%7JJai4+;7VgttSH^t#koeQc@eA0$YqH;# z>3RLHKbwzlqI6en$kiF)2zP#15TAOz4Wapa2%{jmW0xjv9%Pt15A7w6CQr?mi8$x0 zVf2uYE5&7OcfzZ{oBc@6(dLBT=7LA$TZd;#oK;6+RoqBmbC=Vwpqp)zeAet8`)lUh z?*oMq24ePNzUL9xPbDerKU>6tofBuc_FPpGD}eFOqp7suO;y6g#+F9#x(s32I49>< zCyZV9o5CslH{fEbe5>8L&vX8JuMWx_I?QuT-4g^7AJNgfRbJ0py#{lpNR={J>$MF@iW37MV7!I#gXI&p=AO#rc zV#4S>$>~i73DES>9PiVyVD~S4eCLntpFw?gF_}y8%1S*)fMJ^M*7uc5EQPvc0REm8 z_>92E!3kt$6g`Zx;Du@?993bVs;r`;pUwqgPY?WvVFwKmLl+XHm;lMm$3=Om;YOUG zO!mxZJOD*~Rei~xY50)}i3h+M^I==PA3Rwe-=@V9!<%j6(P?$~fCCsXY)RU=6HpKz zv-v0)c#Q>28Q#D|)}u7lpeR!|V8Kk{JW2K^Xhn@vjWsFx*4IrdVqPtYL8yobw4s`% zK>*N}h{A2lAk#^(Yb*oc)55@KU1(pgSwAYwlnUhdn+yQtOo@!M$E$OwK)TdmWMcrh z-?&~K9hj>I7x0M&Q4T0QB`?yN*n1TsAqjN~C>Rm-Rlbw`H7=0A-)Maelrkzq{baKs zla}3~f&3xJ(#%Hw_|O`j9qQRluon+Yp3^t$BIb@+%M}}zpT}uGe2Dhua0uT!<-$2K z9(jFuqQalL4D&hTthhZ8xy!>$<751_^mKL?WM7guZj#xXR`?~6m&kmGTK$w(Lt?_r zUN=LImUb-$e}#M@ZnMDp6CPJPQXtuKg;;BMPIXS%ot@^Q7l$V6lEqY35I$o&_~AgG z6!^V&{KBt@&YP?uS#?uZX5#+6of&n&;H-C3HWg-Td^TiKpeUGCo2^&lL|*qAKU5uL z8DDjDmX-v&dsUrC*lG|QS>p^T{u8Ky{mnmW9xq4F-Kp`sdp7#@&|8LDc{2T6-nZ3( zLHN|+lP}WV)!!mu=UQ){w`ZX_q^*Q?`s34JbL`VD$T70zxqOLC5?yvd!{G!e-O!KT z!7GzYF9{Ur`Z(7XEalNK6?Pw^tyuH@yI>lM`9P}e?ztWtAw&En&ie54baGph*OD&wjngUw%r#gtxTo#_^VP-dIyoH4%gk_vmATlI_yi zHhpJ~$+bk_8)I297kfQ084H!19fsFoHMz1|(JWp!J&r4Vj^ArdJROuYXlg;pb*bum z+I&0DJWAVotmw5B3iZLVDdbG-o0;EXySWgY&x)+lpyWDB*;qjGq%q!^F7aU9sj41N zjUl;H*9UomrAs!m;+o3}6RP^yi9w zE(S78q5YM_>MaFPYXqY~B?>*Hq#*Rq!572t#`~%z1Rz5dL&`|#7BD5lf5!6j4T8rJ zbN1tqi)bALAYF{(sX&RpN$Z4D$ii-YTo)urTmW2YioU5egqdod9{~XBVj*@MA0{7c znmeizAAIX~0Ql`1jy?#RJlebFv&yZH0O)B|yq+sRnv9(NMc(j@Js1F3S^i|Ji-EKA zg&}};GHPvqRS`x==(=S==ROL+w8E>iz%Q(YU5J1j0Tw|F{osmCG;YqaUSgdRF!dm2 zm1dp_NjO?eQ{PpS^P1__iNV+r4%I&f4D4}1Kus}M>eZJbN%EDKzfeZEi!h`p!;Zo@Pe3l>F>Ulenj(tseiAeci};7sPmmn6QgZ$HkjvjIJ}64E9Q z)jA8*l=J0@fG@AH{3HUQI2oZzfbb(CfMfgKGv;MC9T6aSj6BhsCfXl^Ba{RrKL#>V zP&>cIV6x}xxA_e)a##($C$Sto<=)Ey&Q~A`?$Jk|d?AeAR@UgbCCJop1#h-O8lT!t zPUC$4sQ*#3}!T0D5yA!4oh0rFrj$t}~E z{3uafnJY!9i>}BP4+ z-UPA!@-m{fzzX&B(4VmnOFc-r3ll1#U6wjw8onum#g3n@j(#pwv) zA2~^1ykw0)TOFoRpIErR5?{P9<2HFvu%k*6LRc(Qi#lk$SlJ5?6N+6HI3=ds(`VQw zerLPOV%S)pIcmrzk2|>xZ`+H0bDz$FA?Ibp$pI7AfL!e1!Rx5zZx3xLqK`L}&Yqnz zW~S^(^-$=XXJ!ecHbq= z%U}IUK0%W-4YsP^)t;iX!U*^(xMM~WduA2RHFHTPxGt|;D40|c5uq3$HUR=ne z>il@XGeQHVPj^q@bzv)1!3*;{FMxH$kO}&osV3!TP1c{WLMXVhi$??_{Pc@HrA;_@ z2rou6O~*!kX~t`C9+VoOcb$1xZ6jITH0@FwI7>E5<{P$_2y zt2D0vYXvl$9bdwSM+RXN0;LB-?u$!a(fmW6v+XQ%u>q{|a07YtM}Z`y^g(xP=1$1x zFy$$MO)Onwy3rCh$p~}jX_FQbJko`9kJC`Q`UJv6vmSAGc$>~&&ilBtoOlksN9AvV zSqBYwfoW84qg$6tGL`-qPi=GZ3ICMA&e*cxU(GxRF54z*Oy)E9%T7mnvF=vl1PQVe zM4IukDpc($nF(3uK9?RrOXRf;0tENRM z?2^Mxpr!CyjNPTI?}&t^OyRkb){kineB8P?-rkwIFc~17q!1sH(8QjQIeb5#IN2`$ z`%-S@n3$N+5>>^Hn(g>FzpEn9(Y{2_|2fIdX|N=16vko=N?Sfo^Avt>fk4gnf~77R zq(4WTO$^%17gQQWbgejJ(vL6r#O^UQGQWEZ=@7qz>BvvD*>3rsll{UZ{-QX=jPh91 zLMC9OOa=>pBVZUuh58K52fmy`_&H+Fu(^04XJU&@u)~ zDGb6&71~+Qjc+IfbP#7fU0?@)HQmvdct~WEkU-#LU5R%0gho3I-jHpRf{}Nu5~99P zLoHs3-giAJg?2!Y&m6Y}c637Oe>TCUZ&Ds*FvaQsts*W80Zxv?u~cbp7bE@cwvg!; z8Sq#Uz=NpxD^u$GE=gudN8dN4KiOe=Y9`;kg&*qxpT&+yM~x6Yi&`Ybm2M_I>ML!Z zFr7R7EebsOMBy|HsUV@1UxiXyZsprspKDXG)W^QEd$+NPm0r)WS9=TN$m)g{@DD}y z+Th#Wi$eHo7F?$n$mgc?tcF7K4{eX$;W&=bV$5Jd5fu}yyV=G>Tkl=C^-Dke>>o=< z*c<3_2(rOyB#%Pc-}OX(nJ>6E8td{fLndl<3t{*9k{(3r$!9gG2BV$z3bQ3vlr;@% z=;`@sqOmL%B)az7fq@@%m=8zE}Hai$DP z(Vlhzdj>KbFTI=OFfxUzrQKU^hANwJY&O1lV^f{^;K0_Y5Z-UpAT<^{)wVW{=?lmi z_KTk};oQ!BYsd7FVcL&z4_-mYuW|2jgGVSWp0<1cY726I^fJP{l!N-0!Z4Q(YyL?Y zo@)SBzNUwlUY~8{{Jy6*y1`hf&n4@XCyM8Q=c>zG4e)%=R5zN?xfa7m_B%0xJ(Hk! zww@hX95Z)$AQgmu1MYPfihwXwNL3N_f0l%jiAB2;FxOKpEZHaK*kkeb)G9Tu^$~A8 z9X8PXsIdT@*+CdM$xeG-KiyEiNQfT9E|Tt3_2&E38U zYf#3*4+2c5bazn3(@n4~ol2YgWJC+C0lbU%oSRmcqE$UXY zr=%O!OR_y=E1R@X9?lrM7`#mDOuPGXGnh%XFP3W;m@=IEE<`m*dy z`;T=>yRo1B`N4J%d(BBqUk+RDXH1ysyA~XleiN`EWr|4uq+4~qsUP=?yK1WRy8-=? z(@xWS4?+>_q-B?89pI8mB+cNyeZR}BUWgwvgN(~3jB~%VWbs74tgl5ziyc* zNIgf~l_og$e&DW_!)7bjgqk#F`WmjlFi*@BVKa0?)UDw z>Tl}(JSIZWEW~8q-sw8?qDJDXMxsk6(=@gsCV7ILEZtP{IdyT>?8~0&{&sv)Q>C_7 zEuL-Ly{^Oa2aWBmK@+RhrD5tSGs)0Y?Qw&lc8_ZB3lJAGsf_$c%L zeDKX=H_hHYVD(%6aTEAsD z)+6I%6x)SEV9B^q_xMG8?Gv0O=PYUXX}TAyk9oOb*8+)mtN-|aWMQ3AcwO%{d1Fq` zoU?&y0<3ZQY4@2mQ|#xHm2YuGpISmis2_Upw6G_Xzjl8gJ%6EprlC5o)~8s}ikEmw zC?v$yB_w^6XZMW2=<~H~*DF%!3xj>-)5KH^4cD30E7!Q4e1n~4h9Z-e9IdR}r@H>d zMa{LB#`U$CURHCj;pm)6!s_QFe_kJ0r=|g`(zuv(Gp`HcnLn9ViXr6&kM-`z=H)Ml z(P^#j_iM1`kxKT~)|Z=+BICrwu9>xb8sDqg%KvWJ}($Zt9+a?)X_LOJ47&dL+PBHw(b zbL{4*I|@G3TWjB`Ys}?X^{#@u<%nBX2?P=jx2fH5`Jn-McyR4k$&27$2lJw#kp4zu z^-I&uio5QkSvR+-GOKH(wqqy@X`R>&5`f#aH)$} zSTY?yk#q6i4R9b?yUIb+HTQbO8lgd%t7>0Ti8k8hfLWcr567QaQgpW-HTOo2qnKF3 z|G?I7LqRfCk=;_)HN|hwKF2wJCm$p3q10WogscLCat3%#K0=-`8IwbDwyP%j8IiR7 zYmKw#t%t_^0&XxoegtuTgJ|6v$&pL~OOVl#KDWl>mHjY!i&aA#RFvX~mtC{r<%MZ* zc>wy2MUU9}#zxTu`nSW2Lu`1^=1P1C&M3o*Z^vY-Ecn;B^KVa`$Ub5$>19Wd{Q8GV zwze*aZ$AsMnjR(P_*p9>cV=JYMor)D$lj0~6BJn{&2mB2+?5*|9d`pI>$w;oP77pt z{cIxZqN>n8byV&y^`Sop1z8Bz!t#ctUV;LaLwk&%rM-m#QN^g@qSGS-vjbk+o|!CN z*cKBUAtwkD5L0va{e3XfbK)aFlM9)n4=iX{`iMMlWII-9UU>d}AWUid*ZT&l38spH z+Ss(0(kI%pwh!GwUZAjX1pbqO`PeZ5L9G{`cHg|On(scER|;>jL7v5Qx>#c+6%Q-z zs}I)%&E%h6Kajd}baPVib=s!Hp9*H%5`+At2yv8S1MgxIV3?I4o)PRCxy+KYyBI<2 z)i;KgQB{+C7H_As1LeTxyOZt6#v;m?iytM?`)@`L=edKJwD02;S^7TosBuzsO%`ZpLhaYrE?<8Gs(tJ11qbWXlYi*l^D}>VBMw4v! z&vjSfCfZq>D^2+>P9{Wf-3n4060;?rnpZp|#|+z%C2ew_Vw9?Fd?3f>{K%VugCFqn z2lwN8=ED2JhElfHgwIsk_D#09I9FrQ52T`HK_{=~W8z7q<1bM4IRe~ypnicZmV0;W zug4_MtXW*A1wK6cvuc%{mm!!kl+Uv7muHQ^{&{@G@F}wC)Sa`-+uS>5dQM63SS#!7 z9#(^&QyXi(N$eF=0Ajf)c%uvkehw?ejk-7z(KR4%_(nX^s!zGh;3H{9095(dhyZY&lJ9*kKo)i6n(pIFB z?S~B~n*`D!-jYzPZMOIMVLv>SO&XWBd@_mcXIg*9Vu7IwxgL>kM)Xve`s;g;;BCdRl_x&GSUPMHC*k`3*Ut)ABkk zY%hniV>Z)R9b~VpD^1~9Iu^a=o%3(Zw9i?#BO}_L1QRh9<1u<$49|NWAx>N&&lXeOE$JDXu@| z(P_WB;@iS^w|A-59}Eq+X;9XT=kAdvnznz4;b@A@2+H+rp>=BQ`^|oVQLI8e=*uCw zrs{2Lr#QM3-0tJkK#9B5ZBb0xlRBMw$a}k#Ewc_v_J$9VTJY3_|9aRe(@{FaSVLUk z<>q#OCo`&KGiuN3YhS3_YZDUNJ3;6$r><`cYP^*yb`^(5wg}6n>M};f$dgs+8?eqr W;?$I9q}E^GENH3et2QaaWBw0S@ynC| literal 0 HcmV?d00001 diff --git a/doc/source/gnocchi-logo.jpg b/doc/source/gnocchi-logo.jpg deleted file mode 100644 index a9eaca753678529cbbc0781ec24a6f3e2a06f517..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17800 zcmbWe2RvNc*FStl@1pk-B}DJNiylPpL^pcxB|-EO1c@j~wCE8ndT$W~VWM{;h#7*x z;2p`mx%YjZ-}8U}@3Vd8obOqCt-aUUd+oB$KCZuBuK>5ymDQ915C{aQBL9HvH9TWA zMMbLzy4uQW_mz+t0Dz}@AMEadati?5+^;7onluJ*pZ+z+g6U97$B7!^I- zJgwb>0N}UHH&OucP1`ae8(D;3TttLhfCt(9f71Uk^AFem8Qk>kZ;m6KUwsB5|MU;- zAK8Cs9&Z3Z>=@ZLPyeCWya0fvCjdY>_YaNb6#x)M13=^Wzw9Bu@fRmwUrz~M-hhAr z9neV@UQxKZtDBfc8vGz9jyIaeHm{W)z-t+!_S-1$J5%@o{{^% zI`RKn@Ly{EOAjtxdk1@Odv|12hDa|1yE`Jg+uaWA3-)kl1iSxFJ^a5E`cOq5?61SV5d10gxC-2BZYK57GshfF6PzLGB7b17&HNz1+9X1K!>1n6jT&E6jBsAlshQAC}Jq`C>kjGC>AJ=C|)SRDA6b> zD9=&epj4tXqI9E-piHBzpzNX?qXMY7sN|@(QMpmYP!&(JP~V_d zqqd+9qE4Z%pzfibp<$qrpfRFxqe-Hvq8XssqIsc(qa~r`puI(FMC(JFLR&?HqFtfm zqtl{uqD!Ewp&O$+q6eVIpl6|%pf{lRqfeu6qMu-3V31?5VTfX=Vwhk!V}xKNVdP=d zV02+jV60;tV`5@bVRB+hV`^hsWBOvoV7|bt#O%bJ#N5P$W8q>kUkBzW6ElW%#}LOZadC5&|9qH3B<=2!fXcjRcbfKM1i2SqbF{EeV4Ovk2=5#|igt zVc)uQOYxTVt?*kfZ#Cb7+&U&AA>t>}CUPT6A}S{uCi+HnOeRETNES%;lB}I(Pb1FtEB`O!HG^!@5MQSu^ z4r(3h0P1||e(F6MQW{AbTbcx#_cU{~D6|~3y0js*g|we&kLl>?ROmeEUeNW@?a`Cd z%h5a2KcnxU-(etOkYNBbJY(o&_|8biD9`B1_?)qi5qg{Uw(4!a+plkr-M(OAV=`ch zWU66WWX53@V|HME#@xdUWno~^WC>-dV3}jZW))*~V$Eh9WIesZcE{*W?471N-`FVG z)YyXAD%cj-@!4hBz1ZKdf4PfsSM09K-MqV>IZ!x+Ih;6Na*T7LaEfq(IrBItxX`)8 zx!k$la6!27xaGP1xhuF=dB}P0^F;DA@%-Rr;Wgz=;~nI^;uGd`?^Dey%QUr<>vLhyqiOo&s+UMOGat1yYMrf{rqkMO04sED^nwaAVri|9kq zTv3P^sn`RtB(WiJba4go2=O-YGYJt1Uy1h;2a?>9E|TSv-=x^2?4^pN)}@)Gt)*W} zugEaUJd}AQvm$$2)>`(B?3x_2oSj^W+_wB(d1v`b`5y{=3f>A03a5$^ieZXf_fYSt z+)KDOrbMh{pp>Juq|Bu3s9d28RS{MRQRz}eSJhBWQ-!F}soAQPsX^66)x*^LH1IU^ zG;%c7?sMGtzTcvWqN%Q#p}C;Ns^zZLqz!1RX=i9JK45#`^`KP;L+63cOPwuUe%(;r zK|K;ZE4^~P6MY5!RQ-7ab^||y9z#MybHg&jQ=@xE=|(HYyvAY1BPP@)V3QA~n5KrN zC8oz_N@kg6o94piG3GNCY!-nQ!dOle2qfw_`7DpKibHAnlOu@Xb-$F~f1kN!BULX%DOj&H=-mRhW&B_I!vhQh>H>)Z-2*=baRkK&Z3im{7l)vQ*oO3k-VS{nx*8@Q_BtFC zZWG=U!5k4CvH3{l(c8y3k6j**M)F0bNB)d5ifW0bi+&Wn{zTCBt2g(wNVYVz^0bz=-EMo?PS&2(RVe`r8?pm$JyuyaUysBKtcxcQ^l$EFdHk;YHL zpBhGmM;pdO#u~>($3J`)|J*VmHPJCCJJ~&TZ))I++Lw`Ot?7vwgPE@obI8h9yRYA7 zU1wo){&N@ej~37u5*BYQW-ZY!y;;7yT(csw(!P3c_0yW(+Wflh`tF9$24XXM3x6wX zn_;``o4~iW9p#n?t1QC?8WR8?dSer|51M+ckl^n4Bdoz!>$jLj;M}Ge+vEV zIeu`wbmDe$ahd?9g1c^H$Vs}MHiX#w=O7mBJ=)Wo;ap|^onEq z3nL?nrvQM7tOt!xfS>Ov>fbt|h&)C6gVobOzj;6?s6aXhxm)_B7m8&%%AfKb($W85 z_jHUu*gqZfpL)}=|H8KbARXrqPx!09b>QdYdj;+*7At7XU0RM@Gj6;wS6M$@S;PSuRk&nMTD)}$^jVT}i_cs+lL%H!q z(68P=(LzEby74@uC!$g!uQ$0UN;mZ1R^HJ0kQ~2Cxw#-?;lDf;RTbHff6G1qetR~G z<_!WI}aOs9$ODL-auKt?Uz_KdZV( zK1nnEE4~X5zCK7fe%?POAvKmlHm-!CxBbohqVM71D)V2b8!6uZ%JGk_{|MXP zv-Y)@x#?JLJ~3{75&gf1#J|LU^ZXZSO%FS;L(o6c{CwPeNa??(wcpbJ;`x{KpSF;= zk^W_Zz1@FS;Xkwft1LTP2_HWjXM0=UUoP{fCx7{lg0&;>zsOR&|D9Wk_r@9iF9x(gt}0>XeHR1gz@LIgr3 z0$uk349KyK{!6`Ivj}qN0>}jb4XF?oHV!V5p#eF8fKXA9N~5Erp&@g_ZWahMB6Q;0 z{PGwiI@Xv>UZet#(_UjSE7W$A=}tgd1Z}({v2n;LD5uBUHcXoL*$ZSCwG9G$?iUP?R9N8 zHnX5El+4C^0*9PMXq6RqW7;pv{_hNn{9jr2$FP6wngj4qk&6ToDiI(HTq52iY<7r- zT0%>^w4>*)fdO7x4W^xNhN=7?n>H5Xcf^c82RPc4V|w>ID=H*k8k$L_{*W+HtmdPo z$&$6-PHb8E-KnN;IlchumAda0R%Q+k$N!8Ve64(_emm25_{UZ(Z8;(TSJkg8PoIyM zsvpu>UEyk91L)K+?>h7F&k~IHjF82< zm$o%h*_D_ENr$hu`*bi~R$T-39ysJyB;k|!DDUhw_nN|a>*Aga_(a_azU6Y}7eR*o z8JxrW#5G?@T#qg;ugR89)jH#ny{ToUG}HG|rFRWG15LO;+G$Vl8%A&h$47HT1I`QH z_G_R^@@=4awtfNUpxW{ocxNZvz@xZE>0{Gdnv$x#US8h%j&_{&7`C779v@hZ^%juG#k)$$ zdU3Q|?T&W@LS*+|`E~8)bC^Yt?##UAF;lH;SooBoD5NVHDd85Hw)@3Uca2uVD2Za( z=kx2885Qc>hFSxjrnVqGBZ%Lb$bQ)+Q7G0m5ZQXwy z?z#SXP}(>Y+ak1T@zL_|ibKcqu^^1jXJUR;E{2&_+OjR7`~2XfA<}bV!L&hH^-Bk2 zStiKNb#4&n71Ex~I4OPi%a9|ikMBOB z)sZjqdGkLZoF12!a1^UfRdk&3qnO$Ym8OlE5FPWwmV&P(KLXGwdG@r_*ic{X=Ja@xOoCP!d-pn-2O=lQZl!}R4yw*P`#1w&-=n^97=(t9k|fFYBD z)@O`x-8NcOgt=q8Ko|yDyt;*H56W8)9=g#FevGwJPrV}fAqg|nJki#KX)N!(=+`9j zOKaSn-c6Q#XyrHSsWuX``KnK65-*dv9p+s3J*?gvujB3#H)46mmVW;J2Qx(+cqF3l zy|o;O0(BvErV`wu^#WY3@>X~uLp^FUOD(h~aDYE*etxOy`^l8>g^9(I!o=+n%5Hnk zuXd-xEEJ{|Ma}`CtMOu#c#=G%cws&db`8R`5`x@OuuZ8|Tz1bgJzQUZVJ7BD<>a8J zS6i_2WqU=r0n?re6?S-!9;;R#J$yv|H01>|_wn$t(0rZ)cJUF@2YuyL=C7ZE#-%a_ z0xNQ~N?p(=aTS@ZAB$p4d5RBsVlzkOMjU?+Ds?fhv3|0>^}y)B_6@Wn>h&vXq*lD-t_H0!W}eg>jW+e_Qk z?F3<}>eR;w1MZkS*1lxs;>y~h5qfQV|7V19a(kQy5{S>&K$J@ZXLAQB!gzA|z879N zZ+#x`r$_Nr`<^y~5A>NDL(U2(J2iHHBJ#b+wIvw zN@|z;jmW_htkQ!{sw4N`exb-gjh=Uz;34Z05p8<^q_aLOJDMOq^!?x~c%f-ZuhO&~ z&AUlS?+m6PIP>k;k8&5i`x1y&q;Ai`Sr(n>>ETu@`tP*wT&?SWNZ(mYj=NmYNu!Y{ z;C|#CK%gwYZ%&oFs*NhHW)U{u%3#%Hd|yj6k?+e}O$8(O{kxpXJenp4A!b|Q3ZaV7 zA62WeMCO53Mlp*eCUcU{_Z?DJ3B^8nE1OVeqD2Z|TFDzBh};{g@4cJ<^sUp$&}E^MB)hgmQKT6r^z$MWN?_CH;J#M}X$XNFr7w)(tl_}J)m(^;rY_?nX zWw)f^%PE)M+>pTD(^v)3cirBEq3+tB4EANlb*HOeLC3ODa9<|AU2S?KrY&IfFrhd_ zHvZA@dQM@)+A{7=6Aaum7pdgSqrsnVj;=szN6?lZ0kTN#DtK~EF%XfGL-B-ga2YpH zM%=v;kr!Z|jp4MSh(%TwttNEy&WS8}TK&+&f|{3g|#DdauPyapYj^C zd9wzO9~Hxh%yIjP&Xv>%sLsBXkp;f2WAi@Uq)9j=sC-U^;MPjn(;+!FxQZU6I3WKP zaxO;=U!uR$(&TY#9z-V2A2k=UKbmgd?}!(W%>>S3CH_?Q zPSQ3Dcb<)nuH5wEt!IY?-9o`=c-)nGf8gmrX5okysrA-twrikRv!)AC1`oIfFc#s_ zT9XLv%hJ95b-lPX=xJ<@Kj$@I!R9wVEZgIjgiUT5Lx4wo%4roj0M5)#v_jD&!J0W=WC1d@=-$DJBZb8GKa8QX^?9 z`7&CH9z)(zrC3XreFXZ!(%G}FWoG}%bvZgzd7bMTkoK*8Udn!`_V(?--F9whzpV6l ztZzZw5>pGbZ2G6P)Hr+Ypl4kj|2NguAlb{{t$pEx_?BYm7e?qsqIZ9ZV}mipCM~eH z@r11mRmy+V*9x!34Wbfct-zx3cIF;oq$+xV+jfb{VRg18gye#0UAZxy>B!2jn}^~P z2bTFcDC3c2?s3i2Om$yJc6PRo?VpA;*eRbp>%B<&$8aXxls_J*v=s4Q)@H2IJZkCg?F=aK4SaBNInjcExPD@6Ny4K{>pm zKk9YsweLO!Oj0JzDC6u-1gF<2zC+-Z)PN#Zv56?fr@07Hreo(}7vHQb^}$_kGWa4C zvTn40bRifRsXd3Xe)1mX(N9!jR)5-o50h@w#%M1s$8ZVkLCiqzVepSo*)Qk|@UhTm zteqlAFHW9cmS5d=rYDA*T_`(6;I`32xz1$@Rs&ykK>U(ry$Fs^G%r1hzCvTh@xJKMsG->4+V&5z`RswFO%{G6a{*RRmR( zoX8qCw1+4n2eC-=nx?~)_WLG^9uh88~ z_C%`!e>;1A>+tJ3tfr@v2Eh%5^y9u2iXek84_NX~`A!ol*{@v#bj}jbc;u*Tsh+Xe z9z5ho+W1Mjh__ zLK?kJrCZ**sG@c5V|D%`q!zj@lqqAc%iDMjY=pygtTL_YQW0d^16J>P!4Y+x`!8+R zmg7vxCP(~Mt>WjrlNC;+rA@`2G#tNIt~>xe#A{vrD#pf=aLEmqgcYtj?xRA_d)G?4 zYiNxFW97bN_*EB{-e=KOSyG|X`2rp463E9QJD<_cvwv}dLD`0{B;li0oig4MT-xGN zXeh%>mu++S{jk_l1Ux_Vo&8Nx(z+C++zXuyJ*&9X*~W0H0gDGsP)LuGl{mu++c^?y>U0t&m?xR5V^NW+5sfn?26gCc>-r17I8JGB%nmVkI2nc2@W! zRP||B*9&Fe@e%bn_~da6Yl+NOk+FE)fRfK-S$vg_;)~Y$Dj&$e=Gh&q=&}TT zTLRT)ANGn0&ZUc=&6oUO3Hz+XV$*MPu9g<<#Vv|xA&G8E56T6+wE()H3gj#Dt~Z@V zvf~)@0oGP=5m4VxG3zJUa4>$uJWJyhcG^1v%<&)|B>1Y}e_6 zr!*#gTh(|P=+v)PAI8j)>W;Id^r0I7>1s7Syl^hr!Q_Z5oEn%}kI&U72DGL59#c?T z-8*<%*}!Lw${sq=p^wVe4TfCSMi?fzt?0m7Tj(jtlkUyBs`xT$-*t!0JL${g=Z-T_ zEw=A3=HBjn4ByLcZJ+xLPh92vzQlI*bwcX)tuTc-+4`gbb0JlQx~CF=iMPs3MPTBp zeh2O6ZJ0{p@kxTJk+1(u+QS;Dyg@0FuH(_LVrc3WcE*KtC`0<(#bC_t0QE|$?|xZo z-CSktf$L)ePeRR4+Ek2&v3&(_)}|gp2B&04X!e?N)oQa{$76R_p2cI%yU*yxhG_(b zaMmwY0>n@{DZ1eqJti@p(C4O;rn_vUZ_ewh&rY=WJnW~fPCNI%?{ZmLbsrtF?d^*e zUhwFsDebyk%m=-R-IKoHak>TwJLtd+^fEk7la_%USKW3~$`y^)g>w-jXiZuQ%$iGEUxM&!<96cyfeKGMAb+4Bns0_Yz!g!YoXUzAqlP9-LBVbufMr zf?Ti&wxxP85^LNsHiMn&6UrxDa;~S&Yj;^F)rv2@dLD=8z(Z@F|)^x?*z=WBQj9UJ2_|x)VJx*YR0tZ z3VM<63(#SXMlk=*wTi_6<9rWz-tNBb$tJ`@- z&vp+r2Q$(lMuvtZo88+XAJ=^kI4kH?DAGgZ%p5u+<|??g8^ft*&35z2 zr~K&H1{zOKsN_7j2So*x9__b&{va-`!qkrA@>0?TYg4XaNq-*2AUg~*ahDTQHFgVP@adA%<8#@te13OKV`>4Kez`?BN9W zf$3AGmuYuCzwDW5u`FBdq;a9<=@6PvY_8%t;NG5dcN_ikzNIql`HQFG1{t3oU|`jh zbHZd-jC#it7_d6XhOQ{!2kSLB@Sdg1qUj$$dC3>lTmphPrKZ&9LiawaR{t3Nz|o}r zk_GamvlfCN3s_@_l40tGqMrP8k$q>SbPHNAfG^Rb;U=-a7|C7zmcVe*yN}&lfqX!9 zf$|zK@f?qnlFc=YL3wuO%#z-sv(+%Y*63{5vgx5xuCm@di0@!5HbTN0*c8UIALhE8 zxpx3BcvC`MA?=16*_Oj}u?F6=fQ+V*>g{iqH1@uAT<3lqrOtdl znj$&rV5N(ecFV=!0;XL^4$ZESX@2$_mBKE^Ye1#(rNKqW;M3wdMkNB!Vp5qHBW>pJ z1T-RUy`2P{^;z6L*;0)qV@s;wMf^ssOrEU7CL)hXPpDw_uqdg3dtgJd!!m#;&{7Rk z<{7B$TUN}09M^~p(RKraxK32sQ!hAVaJm$yc60V}=qj^otyVnD-poYL$bD|OdIs#i z@1I!(v2Ab2;jVC6$rkwWRMJzQA=9M-E)TPP07r!-_R2DFBY17AM^+bYB)KKbBdr^>E-^N@UZuC+_@033F20dOun5|ntml#9U;DZh((W_J z>#gxNgq_4r?GgXDUQ#d~1n1S;QO!}dsfw$1?Xt3$BZ9A`g!rau?W*ofCb`bEG`>9A zY=tQ9@g^`py+jeDO#}3|T{>|CD|+)u>W{Fav?iqZ$v&=2VWxjmADj}W`dPyUi$IX$ zUg5aGsISxz8W)D}lIeM;lx4*%X(kc6ff2u?a5lEX8mvhKP5F0;40*q5C2MOm_aEQ$ zZON$EBE2%^^Tn!>{d~IT1|wNttc3+UIxZ{?G{xRgC*ih%%e`PWuiz?;@Y_fS^| zQQ^#*5z$L`Au7pL$ALA>Fp=6aDsnbuU3$sDE}THUg6GNQ0>MTVq3oA6$6>gAUI?Dc z(s~yU0(iOca2=#E$kehfU7<}&i!rl{?W0f91|BVUIK!GFsf^fa61P->RweuBSdr=J zT!MMsg8=U&^WslceuXaCdYkW_fmg+%NaAWVbx%6+t3Fm=kx#0Lq$6L!k6XcT7fqbK z7b<%;t2RbQwmq{H#2$nI-~tjUqqm_laL0;Wk@R~lOo5)u}$*y_MY9a z6&oiDP|2S>wBToXh+1KxMnV*yiuUjnlj#*im|-9HZM8dNimmIR2Bke(y!j)m-LApO z&Vs`|O*{e~%Cf;6{moB>6Acqh+tp38i_T$wP9s#>U#rNPt~#r9V7z@bv+I4q+R6Je zw7g z=Qo zETmagc8GpFGRG9*DQJk?)G1j;v<3K-QB)QV(~%fz>8ejA)NX3Itt)4`&nG%kp; zDzdg#73C($)X-`%X3(iuJ!-kp&7qnD+G` za!CuCX)!o*&hn=q-0Ei8C%Km{(dhF|@~N~c+f&Y7l6x?tJ>m5iuJxMs_Hf5_)L*4+o#5C{U@Gui)9Cgw3mFF;F@^2rQK+QL|=k5pecVYiBi-TW%iw7&xdn zNVmJEix-68hv6UWORrzB!Yt#mJ7GFb=j&&Qw$2wi?p%*q zf@&q9rR9?B8{8qv**01Jya58L#ymcE7nYN~8S0~(D8d0Kgo=a+BE>=fnKgk~c3kB^PgR`+n2!64y8KYG~Pg zKQ*220YzOgNO`{U(u>8bljk;dJyN2d!DRD0*r1nV8)}PtS!g zV&r1m>Qv=n5V}jcR9chU?p3e`MBFw`huh4nOeaTAaLg`79u2yGNOlkxH&~}yhizpW zz++cDaCdZ^!v$b@)z99&GjM5EuWWtk{F-x!HEw`E7vd1`+=130N4ci|t|Uu~m07@I z>(7p1ZtUL4!mO1ukH)Ig21R|?Y^a(@vJaT-lVOvGl)POKi%FhbvZ+$nI&Eb8Kty7PNa%fI zo_YoX1V)?2OZ{|v>XcSTcOse*)aU?7MK$_BN<3+$Fjl+@;So>ou8E@8rZt?o#bN{I zt#1#T*~nPd7LWZc2st|LB)ghcm@EnXNLOnJtMT*mD?gOhow?iF1*tZk6BwA>-tx5+ zr@ILE4ZH?={bhLJL2I(W@*bb%ju9HwZIg5nEMq}tJid>pQRaQJx2%63lci-OB4aRALW~jgqF1$lE+u(v1+<{I?MdsiN!bC(wCG()m z8%2&Mo{3Mu)glxIiIJ2^D=02fQBUd+>mr4Cq|)$V$&xC1!@Ps?DYJk{h|p-={Z91A zmo=Q6uBQPOO|ti8SF$ArsWtY|&eCf@;6uYb-^+%$TwaEVo>7rw= zIbaQOju0O%y)!ddVsw;=e0SFlmlb{7xl(uy-1<3dF7Ht_;!-j9f;FuTCWW(6H`JFt z8C1~?xOD+|Mn7GwUUouFtl-R|shw`T$oQ{U4S@qQ7l6N!mY5qm=$%U%aiL9BUJjspWNbX!`k8weP*Y$#l{FjQShU!!noI5OI&&@JgtTK~aY zHb6*AAaPusI=GAzE*8o(%>+H@w*oo0X!gyF__=vhf(1-8S)`u#u#}4q1Vi~;@~4FH zCB0%G4>TkLjXt&z(_&v}X6qRZiDbpU&W9b?K;;LCnfksSh_&Y+V>|XV%85pqKi6MqJG%#}vAqb7q*!i$eiuEJav*5e zSVuIHIfkr#+CDcE)CW#(KZzREKn)ZS?@?UcsWw$SjKvH55!4@Ko^Yw%X29x3eK!!I z*DxdGKOw~@q7lU6@BjT^MvcYzO9q0`ogp(kA(RcdUqHUzT_UZ862=t0+{>|lU&vMG zuKE3u>UT*Gdu}*n`=qKS(95IVWl^K&aOd1Fr5PL)!KKmsq|H0V|L%Y%!Cr{bj&gdh zIrI>ot>W7}bxb4lA@T&Zoh2FC6|)od;UQ!#U&LteQV}(}Fv9k(zIWr8Mi%6!`S?U* zxKW>g_nOIcMH{$Z;oNDVx$5Zs8STYamA5WNIqQOr)2c*j=z3pL#8y@gT}G|}w`CM~ z>>AtNs^}AhtfSQ0x>nAwNfJb_;c&sO+}lLr|`d z)r0VHtA?5$?WBC#sVbwsM8c%o@5dTeL%!1ECOW7nfhicdYSULDi=GlPN0}{2Uglg8 z1wz(%YTpK?cE2$`o*$96ycpr8a*TdE{NvS%`!_A^;Por91q3?WntQAYw(>2*3HcE1sVZt83jtUYNoJeh~ndAi>iusG586> z3CHRX3?QYxt(u8wMZggc0H*-hZXsK_0%846;k_weXv#*yx zDZ8=}JbM(;8bs4se_7D$04J+l2vz^W<;v~0_vwTp_|X~nk9G8_JUR5lpz;qm*itYn ze%}3^^_TndmB<;+Dv~-W3-a})?o`8vd*{JRs$aow596Xh0lOFdhGX}wn~{OZ>E7|G zM?wTX%}LDVtA`(8+6ke$qkiOhAwhiaCrQRGzj(=23K)0|vyh1mvPp|Zh7$5d){itx zvEdPs&fLzy4Pu(7Ax%09ymwni^r<%OTE_@WHooTob+tvjHkD=WoU`UYWX5Jv=Cc9` zT`Lkbk?L%{Eq)=v`_qGDn&o^hDsB4}7D5yI0nMw_*8qal^{t{(?3}TGA*Lc=_#41@;Yd2 zS})&Ix}eLF^!-E!!#rGWPV`wmU1RbpF_lOL#+u-l&F3e3oV&HN<0y8p;)IcRq>cub z0-L&qx>^Fei1&?U$0}0q+rs(B3TB+yn|zBM-)rs9h869*I0gMc9@?VIxvBIVe(PX@ z%kn;H2&w35Y$>VoJoVRMI`(N4b`ZNQHz44xiqamja?mX;;1)it#}#2#*^Xd)&%?=aIV37Qxq0L4P4)D;e;!2dK1cslfA4Ms*}cWW;>t<)6H{5sES^vS zIF%pN{d6E$s-=xOD`Rb$$il^c-tCizy91zxSE+{QNr7gY@ckjQB()Nu)I4ol+i=8< zka=p(QDU*tnis%%H2$NKk#bN$+W8>fuduQ>Gd1QXor-RYy{#XP@u)-AW~^EXa#uwM z`*4mr+19Id`Rq7?B7BhnyMr1Yu$CJ4lTLB%2_kktR05+&WfuLZ;8`P#kHa9H%xdW_ zF!*7c#n4V{b1PQ z^vLloyyc=dpM27t!D^bDtFmf4#6SJJQi||X+q+p^<64}^?*mrSgU1JIQVO6CJctpR zaB<{u(>nw!v?-QpO3Empw2vOrce=*cc(P|q8W3Io9=mX!srn0-pyu4cR%Pb0gKUs%9X#`LrkK8rUbf;0{H*tO%tH#e^NKHpIqp z>DQUI_mJjK3=P296zK1q>I$a#SUq2ENJY>nmEGeB)wp0lXx4R}&8fDDuNx2z<@NLj zQIpxupT>3UTh#~|k#q9)*b0}?g;X&tg;^1LH4#GxTOU2O$GisI+Y55)j${dB z2$84CZOcs&NykK`p#~7d-IYZNHV2nxo+-Vwp_L%dd8qvMfy6qUq|x*;e#FXr=WMoI z%F_0lBY5y$6GbL2LCpW*XaR_b84Sr}eTTqXjN{K6we%V!U3tlr2@!l%R z7MA)bRHICz+#(QV=8Rid8e{8zTU7~mT+A8?~`fAr9%b?pQh8E8yxiAiAT zi8g%1dW{)KnOVLfTkP8j%4izGCvwet2<+51N#jRwz+$nz+%58EjA>gk)pav`i>ZYc z-|VgB)J>a|`krd4V$wQ!@OqNwzMmhHXbPF0kRC7Sd95xcvprhxOX)qQ4lY~>LNiZGGmH# z7?rmyIWeV0r5{JY-s+s@N>3*$9uZ(|tT0r5gp2l3&-(5flu_$tip%S!Hj~}SEaov& z{w^B1smV<}vz;A34i6UlAg}V`u2QqKd$y)W>+}+2X1nL46HVxVJ}}l29`z>hjeq2! z)0PGGJ&{w1u(@|&97nj>)&L>R&{nH9UEoNUEMYjocXI!8`!za?&36*4Nj&M_MFZ4T zvPtk)a}?m+D}d?%zr|_aatu8Wv@Cm&2!R7HaJ5PN)M?T|{(bkvYe)24=96QjLEN}F zQ%4}Y3_5a>2KyvKd-;~9)3KB0n-#7UZ-vK9sDY2e_$Cp1(%3uqMM@P$yQATAhR5BW z5ORm+BK9q@;QE~j^BHHzMoO9?{Kuraj^^TWfX|*F!R_vG^uRST<)5aDJdV@84d*|n zYzv{={caJf4Bf*rWLFcx&vw`FaVV?m6Wy4Vg4W3s(xSCxxO>3*=~j}e9KC5t(^J=g z3q$1A7jXRAdkCR*`sD2BEj69U@(*{L&GMrOGWV$i;zK3h_WG>V5IT#TPAt5$YiVRs z-7r*{XKJ~q3$Gt(C+ThRh^?;ijliG9y_CNOLIRm^4hL_saCV66BpufzROD`C)@J1M zVv0@YR)4X3P4DP(QH|fvX=OMcaw_?4)rW)9sV7tQBR1GCz{Aqb_fV`8J$$kNKh($1 zWo1{JcBjg#Y#?o?aWDPFo$T~p8o41L*Kyal3`RKEGML0R)kXsi56NtT6sQ*HG{11I zRJ~`Ppu=fbZjIqz=(U_?(G-O+g6^)7^;gwI9ZPbnYoi(WoezG+EFdd7N~cnM+e=hC zjGK-jm^3BanIwY+)in!LRB!mu2}3_#$o6u|p^LJzSm47`#0~6bq~-ktr!}kx>%~0$ zw(2SPY$Ll_WU;~H<>CW>ug$RPgAgJvCAHPdV$+WzAr;Q8E@63NdaK!8*FYjYZ?-Dz zs(szszI3?ky=bhj$wnjJ8`b%|YMGypo{EDoBrRwQjV(G!uG+3d)= z6$jY||488&_Q@skBYQ?kb=$PM^l-m%9V?CSzD}rSYHC4)e{MEIL+TTR8nkn@BhNja z;Yrq}n_nmS&~Eca%ZKC#`-*6Gki8H&?FazVPbY+S>9XTsFOt8W=(YzGIDPr$8>3j zvvJ-Df&B2J|0^v42PZCt&cJ@-Pqzy2M)ml}qS<<9_w!sO=-X+m3?Xv;cgq~eTe${e=;gtui=3YcT|fU=oypcnVFp9izhy3r<2IJuq1@majfipH+h1SQkiSd z5VE&eRki6m?{>L&wzr+gxlPGi@ev1S*S%tV+h!lRh?fA(+l0S63p6_t0tBvF3X1IsaENe-J<478Ei$L4?kPvR@6DpX!qMXGnU_pa# zIgMX@{LF_Ti~Wb~?OA3Tg^D`7q^D}a-#U>8e-ArnaZlVdMLeRcKvL3gmceOH5)GSb z8C+g8W(sJEpB4M0dv6>D4xk4glkF+;bak_oRdKsXO4j5AFNfp`4A_*0jP7nPD)9*T2ykI~>zNvXase0P;By#&Z6D>BqcPRGH zscuONyS0D2He=4c^&q){tmVFkikSdmb#wOTM*(6FKD_($>dl9a9!5oD1px!;@4-3Q zgSk~g!h+g;xRW#nH*SiYxTUA{UNR%tJl;1SRx4BW*G`PrYJKuGibsh6|EEuZPNv2K zV#dhqRFAyNx@Pvd6*9X9R?;*2I*+p|t})DG4@Nxp>i1`obT;PSu@e8-i=UAv_~*NR z6=!)vW(?K@ z^`4V_Lp|zpG<9Z19jquAWTCM;j3Tl&e2bxnY)~a?mj*BR)2AQ^2`f-la)aOS9oXK} zBk-vkw8z=&qatwyg()`Jm(Uc;pcMCiK3#HA0aH@W$ihn7BY>{xr;4bY6$l-aZ-Ngl zkP=i$XSpK(^GTB%ft@_Hj4Z{$&rINjvJ8pLC@WsaI68ZrDODVT?~Y6R|ATqWi^6at z%ATKY`Z5fg#2pVC5~}LXISKX=vBb7V_)EY{J%&?DKXD&mml6oO`7Em>VB55V&X<^UoQ_A9uAj1o+1jDQelpIrr)J8 zfI2BfA5j*1TfaIOwRrSF&*R<6MPAZ>4mME>?CdCGT)gtX`(JL@295OQ5vUrK}UjzmbfB>ayO`{A$;F?*9gMvXhow=vO1_KxuKJv$TeIIwax*TwUV#LW!}Jdye%MkN$WN*!drKXlKfB zn9%pH-|nYc^9Isej-|{PuQ&|QfV$uUnP+jg`@_7D&lE(T94j7dZl3$E(%2jWJG;1o zcP%q6^{R9^+Fzn%&aH0p`X5d&{e0tqa;0Ix4_*jeHb%VwL`-qP`HuhWy}X z$hwP(bKBb^laa3AAZ02JBP0cP&vwCdjF;*iil;XPxaP5j(_O5p|Ne5U&X_G+toDl! z5EOUWc>lVdk33I$397OqozFc%m2|pmOXXkRU`=K$0cLHB=7AegOGaqgezqUKLi8#- zhLFNB`ATU1%Zy9cSvB8jB)$f2=5Td$);#9p{eMx|O4F&by(-Wb^vuu8K>G)ar}LMf zDb}rl?Tr@(*R@~9j=$hCkkQ+(7q*%h))i=f3Ppn z$t1w=FD=#e1j}o5KpCsz#dBI&2$gl}rEs&t_l9Q(^Al(k7~z=M%Pe z)4#;_8CAcXE()`9y&BDLAn-3m7aLLys|7#Hu{97+&wqx-dQn{la&8~8w}X>K4SL%J zJ*)EivMihPFZDJ^3L_;h>XM=z*aXaY38h7^5O=X#p&DBA$^A?@^9bRLZs7(SlCfrNJWw?uWWCb7%lgjK#&$x*up$i&#sF*$E|ftkL#|9_~kup7{??PKZ@?mblLu;~BtW z*&GM9F!oRVidavw=2yMWa&sM6u2f9a5JX-~`LCzz(WP{5y_);f!KO;%zOA#aagZp| zlo;VXoGt~!i8DKlRn#%y>@pnwbHd2&oVs&4S15brrUP|}7dm%@@7*e-Dp^I(Y8f!= zkUQAk1Ys84{Ky>_9Q5Bq+LOydI-Q>f86>^#+GD(ytWEJ^{o^i3l`(K8IuP|)%8vdM zc}$!i=h%uA^^#;rw&!nGF=>(LeJ|ZgH)B1c9oluUEiUt*jnydof9!lJNa-~D@tn7a z#`BCE`zKorLo@-4+Z14MbAc{Tp1kft1ZDBlUrB3sjvxKcg~WNxa^NIu8acKDBOxvE z;d9MPF$9~QQ~7Pfx&7;#v`_8X1&m*#LJ|tR%@i>Ix%(6K1O-f?K%qHWqXLuhVz2GT z5Avsep5fl4=+zu$q{in$bn6Pgx90!-#zO@RAqMFtDlGQf;;CP4xF?UQOz!2gGjC9@ zM$520|JOmElEqL)D_Yc@Q+%n}DL4KNJ>zDO;xP5HRuzTe+rA|%wQP6$e^=<{@yIbq zXHo`XDXmw*oSHe=1mJ^>^UTE=_*~<9u)X;Z0XA8%98f(|X zrq2I2O8Ld=yxJ{MhQ1SUrZ=H#Ezfd?*jgT{f154bt~-WejLK#IQ%a=6d1I8>eZs^2 zrsE4K%B_93>h}e>iSEq1kxGkSiCgmYUS9_0@xLo63zDq2&97>mEg{;>dIY;8H8TuY z+ST|oEjd}^&Y}B1H)RXN#|ILVcjTPu+>hIxMF6j|pJq#^&UEqBKljDgBgkua+`UA< zokJ*uhiDrughxC|){*aQ!P!C3a#~%QtVPgz_~iR0@Aa^yVRdezaOWlpywAH)RrhZQ zPKEBw{xv6Bb}jYF8x%phttmx9zUMb-LL)+so$e)tbp_qoI%z%cdy@{~)jJlj5VSK3 zLH)AsGQIV zDP5K*ce})?vT8|Erf>%>rY7idvczoHS039o*>XlMfxH4*h}z+tGC$U{>0uqLnKrG< z5~(yK>xud#OY3T@ca!Vu?<){Q!CSn9m_IxQF|^{0|I@K6eKsjb5(k&${5I_xcS}`q zlON9%8D3(5_^epDhxP4r9=*)myK_?dBHN1TNR)0fuxGHjY?{Zr=4h#fMD9D;-(xe= zRK6^|tr&rqS<+NL8!W{Ako_`y^Q0AjB=$zc5<;IE)k`WbW#X>>)lttg#-E9KDNjC~ zsqEQJCmubJ>PqUGk(bFTMmIL+#fLlUS4Je6k-n^(PIYu}$l*@T8Md@!L7w5dGU;s9 zo%X(p85Z4yhIpHvUjtZLi|HPe+~Y%P2-?+ASHxt`Tegf4#ai4>Eg;H+$uWrjnIG)t&0XT^}TpZ}|}Pd^>I#TpMm zSEC)%oj~InusCST--f?X8IK*f5TaVfs*8Ycw~mPrcB?Jy+wcMYKI;m4W+inDON~m_ z3~a4L$=ffu;Ky9dcC8u{#c|(?Kb9xjFuyWyNom5p4R-}*K2iNF@sZ4^enh_V;;>{N z=~tVvRP+&COmT5gnHYkac$stwO)gl@^+t`bJL>y8Fn6*a_Ch z_xQty0JeY&cxmrEq(h{`p5p0YbgBKPu)vv4D_(+X`>QOvdqsI=2bh*Y|kWhxp>6kT|XWOeVcrx^1Qbqo-^fs4tcVDbx;+ki??);|)`5^a_}! zbW(PFJ1aY?PsAs|ELoGd_M8U8F+>=N=#8WDl92?724DZ`p7U6Ds-hE;Fc&=rNJT8q z+7iNvhqQ(}MFOwe_RE9}4}k^FYw z(A`$F2iGzj(npEHEUW0QO8oF^u0FfD`OHWDe)_FIeF)oud*9HfMq9p^X~wG%+#UFg zVTUy_`x0Il`2G@}eVzGOX0$*O$h3vqu%JrD<5eeyiGBR@}`rkj`mSRn-c5O@fpyNB}r z!$;)HAC?Sj&HYdnK+?M8+~wm{*1rAJstOpnN|Fn7_U^(}NLu&U>)E+c)se7;xBn3R z3|nULJZDY60~zIsnp0I)OH2|9FflSW6O1_lg#(M&b4J=8$bEHJNBdlQE8bT2Z@DlE{f&}OdK^c< z=b7Nrq1fIIbrh!7UdH9G*QDo&F!9Q=8Yq(12#ULT8eLjOZ(GBQ283-?#nYXh&6jy^ z(`f{C`e%|*(#c0M>EYze0CTo#PUbPhf0X`){D;5lKy?tMm`w?A83XnBP`sJQeSflj zcz-SN&%qa(ej)|T?m@iEO*abe=y7R8JMeSj4*yhD7e!H%qEOhb|6kpe%~!8IIeR@M z1a>ZtRp3{}Eb^ud@0g4%yuBB;Fvst}--%l04 zmP%^H_BT3AxzaqM>=ANT1C1jHYty|bz0+j*Fs>Pidt$&``P9z{;4QSMGLQ$fIQ{OV zMxfFyG90)!q4dkT#?(D*xcD~^3~xBJJ$?W~Xu|#zBKEt@P>MSrtO4ch0k5@Md)VBf z5#H0FFU7U`jW)nE)$`f~vgXX*!QmYWCu@ruca zaaDh;a4{KU4v?KdV>&%PkwkJnlmtOS|F`?2pt8wQGo`Vzrz&Rm<*|E`_ z^HA56E_}8}kXwsJ`V$`v=|ES=d8Qlj^;d-1*j#xT1T8#Mm%5l}>for~o~5s^KZo5) zv;$q?6vk0m?t6JF8X<#w!XcJAvU0R*pMFapLCvg7ZgOcrJm9sh5i3A%H(AF3MtR$P zjDY3GmyZ7RPZC!WBP>rclq!gzCmyy*ZFW3JnXJo^?iKxTdpKQUN*98g9D@ARF$=n@ zLa7R9gzVAoMmRe6;1<%WaBAE%2GU_O?30=<6uhoQGcA=9fg1Mjr&15kC2M*=aJ+4z zukRnS)rPrECEd+S4aq%Q{Ic@EU3}PY%Hr_2Z5HkH{eBf_YlM^n-l`b3X`0l1Mpym3 z6>TJ0DUn6hq}*1&LQcA!*z{=n_v09kdY^q#-V2<;7hW|+g6tTD$RTOUOQGr6#AL?= ze0T&uCShake9`df*F80^f{a_w0$x;aOMo)*flhB)dC^>2#hvyy^z?|iZ5PxwJqv;z zD?bTze9Lt+nYx^0yDl`Hc^d8$h8jNLB`9dIHjtSP%Sx4uX#F3;=B)XQZ_=3c!9hr^IWs6se_!Q|z5t{we%`>HCf#+VTgk(%{iaN$=drUhle+Tg0Y;EwR zxZ-uTI;NzKXy46h0CvvcIq7m-d7q7|;EC;X7r`{`i;!PYkE1hJh{OHHt%~0Vvdqei z0-z66#=tmD3Mb~av(tS302vK=Go>qq^UA{_L^rq$__UnQ6xLr6Wb0r&ZmfV9A&?YTHld2^w+R^z&A(ObEMd=S$044K9)~ zgPCX@$!96rujiEswtLa4jdmK!2Y={qA=r2J5)}@1^?MH}V_J`BM?%!Z2t=$T#ai)Z zQFjic+w7~!g0CH?OZ(SdL@YuHnP*-BD9l?V`qVlQ!3vn`gi{=6errS)GcT$bns%LM zX=Kg7R?zWgtDtU3C|>v(AC>C#ZsehN=becY1OgG_I_s_5UgxI%6+M_&8v~A=CS7sz zyLg?nJtCohy}(t0r=1d?Fgp)$??K5M**f{+gbNlaGksCj)zx>K>gthX`I|bRd6JK3 zN{0X_IF0e#tBe?{!?MJZ8*n)c!u=W)^C%<&2pJP}JO^U1FKVzp6-v4AEDwUB33@ckX$ z(`F#;ZD6OBJZiM%Iq5e|x4LP2xU+wzRAA?*dQP%9%jY)qs&`C#oD;t?J2uYPr~0Ra zs9}Q_>~MigmYfTbY8}|q5d#2Y9husB;d&&DQ<^*b^(P^iTq&?2RAnFcnTMx2#9hjG zV6`)b5TpHwR_~>RLT?(`X*xaP(CW3!{OLxqQXv(dT{)OIbDq~&D#KyKty_8NCztkB z)&Vs&&%JdEr|W_rhpG?9O3lL3Vb$+MG&T8H1}S4SL86?UuPvf$Gc>z!6ta7ZIT%^IgPqRbt+j;z zd^O__)0H7ZF_t(@`x6o7QO}-S5SHKO^ER8ANxg5QtKav}sw` zFIHLAOI5(k;(A?MGF9T}IcwWAIP%=oPrfQ2zemD?Mr!icN{ECZFnofz_YPh8i`36Z zXcE;&?FlZSj~8q6-Je_s(JbvPkG$_`+ySwFuu>7tqY9`(@F4=u_RVb@OH1YWLv{`C z8TX}>7mM8qciRuKn65YP)p(>lA9MOOdLbN5IVokFZV_E_z`nR^+UQo1Zg9yjP3 zikgKM6l441zQSIJ7lo6iRQVlOpN$)x_p+S8yUFve7F?8D>JwhER#B(>6mPQq49!vY z^0kl0K-~T~$e+*97e+QwnrJrs&%<|oD{?WuWr|xBrDZBkei>_gTZPhsQmz5bup2-v&Y}xC$9}-Jw;kSoQUY*1$j?S z_*0V`#ZZM*1W~_pO|-dcp^lXk*M51}umvf$aW_*?e04cEvm0k=kQH~(p}2bILHXvq zhpgC^B#Y)bC^pyr@D=JEx-cmUz#?L_GFCKHRQ=6W$q@%1!H5llxPw8YtvOdj$4$E2 zFcPJd&-;#=hst}Q{axEtWP{5}aJd;OJV-HwCfEFnYSA@XPA&CwG+!#JJ_)&d@VA39 ztz<%8>v?cjHr_~>x`QG}B2dI-rf`Amd7qW&>eKNgJW>F2>78x5q~oHhnVM@RS86ic zy=;4zPQ=WiI*nUP-S%6k7TW$k^Z0;B(N)N)S?{^bwgRw-cC6Q}AW)GJM+(ywRu2V& znK%F_-d~S4DbXO^7`p$+6+2_;dSkZHxAF0l)-t>2=XkkXHJXZt-`#NLD$XT?q=mqw z2)oO;e;**GB4VsEtUCQ_kO}0-W7vg9vQ6IU?3t)?l4LVGsi%~$M0g+UiCYRZ$-c<- zceH`wS!1Cs;!*WMibKn&bY1}zdVA>zLsJAn9%8I4ls#O|+S)|z{;&J%?vKJx2gr#= zS^>?u!7Jbx%->K1W38oruS1pWm<(GTIWH(3(H7#aPB5)RwnanwAeMdlqz zZPn4aY)oV_zWiL^9f^cJlugf4@?@p%30)GuKw#1|P|EE*DJ4Kp0PR{B8Bl}BjLsui z7;5gPiqnlD1a$r0Cn5uy;}a7pLTH#Z$R=Nob;DA#eU}?R5#9Yp@JB}#up~h^fn2Ou z_=9ad@l1M1j&|wWa6eUaL@r6BqlIs*ZNGHRsg|Srbz{;SHO$w6!K}PZk&Ib8I{wz9Us$z5eJv6flCwt*fHHLJZr}%^$;LXkle#h z{07f&8m1#A{ISeZF5GwTW|sI)+_iZ0U6+nlN){x65t z8I-Q#hz9sF>6`qEi3p!Oo-=saaSCM+9}Ut(+f9B^6BZE&IzH3&xe{@nmy2%T&ZnUj z@dkH2aj1Peea$<-=UsI}rUixS1?*=MI+*Uo*DBb6eh*TdP+t zGrY(ib~%xu8>LO`mwbiNm1SwOiUcZf3OAr8#K=`M+4iKZ@8bz@C+paz=bf+TLZ%vT zn83U>J^NWHVno|t0^}-lbAeQw$d8bwskbuR8G^bJICNji!H_dJO^;PgHJ;P&$CGm| zOZ#IFRVHmgtYiOUwl42}J;UKo(QwM4mY3)aFlv)~Kx(?8#}MI;a-JrFSVR!`Ycw75 zIP<4zoLVNu0*rIH9(Xc&G(%I7VJ&gj!P2(_d>8LM{5{1>K^)?K-O*2_qy;QP?{R8! z8yO)6QVY=(rI1*}+Y0h5opS#p9%O1uJH6sW>=$$6q0N0#&;tc+EY|4<5R0N`s(-)-R$g_ zdY`#dGuz>f4Jw@3v@cbf;=2@`iZ?PR*@%fAfKr42e}S63Gszbo0+Zxs_IL1$NfPtL zi;zvpQn~+1KpUpaZnmN1Y1hS|<##$e%e7=NgFOsmZmTtyXW<2xp%VUKHbw&IX`%!e zuLD#(LAIWCKXdOR{yI;l9RUeVP0csRt4TL)%6dHWzx)P>gwvh6c+pF`Jb0!dy?0LQ z^!%hCX(vSlKg%|RbnFVmeG`$Vb|6xaMLb>Ud*Wl5ufpw|JzHTFr?A^5Ezv|4^Z7at z#1N;s-7{ok1G1BecAk6*6Z8h|n)){XB`-n$1&bM{yb3$KTluM481A#`zTQ+e#e%5G zbNfECq*KoKisYjF8^?Y~m=z4tQT!~03k0%A%tYszz#w|NO#{lFE2RX>MQD3gs}npo zaw9|0=?5A)@mOVlthDDI_ifs8ho(yMn9mao10YUCN92=0PHLBPQnX1R?(QiV^CTjd zSguc%A{KymP^7Rn=W-DU5rO)JS!X%pB!{!Ze>qJXHX49!jC-r8m*bq|4|6ObIlJre z3g;l?Tb$!_7j9NfYQI$FX6#PtpHs!&g=Z5_R{)9JNoPOPwNlgP+yDx;vI@xp;GA@M z1dw!$!a2=O2&#?+rQK6#Ikdb{nokcuG?F9Hib?m>h{Kl4m067nOm}+L^_}N6e>i_8 z0c;0wv+7cVPq%LWmdd4qBD-Kt9yOzZ%FwPh-dnUY0RhP5?0j|Z_7Czb!%P)e=!(va z^Kh?gV5j?-zb(H&ZtcW_VaTsRiuWu5RGp-R_&O{f12Mi5^F7axpPHE<^OXCgL7ExN zO!fprRbUYsuLRyK4Owke26ejkQ*tsd^tL_K5Qj|I+U6amIHm~Lba$4~yaYl&-J^j{ z!1EHR9k4n6**xOvkBaY0-f>8nilwuT)S4VAl)zZ!{+JAU;>#j^F%cja!w_Rtx!}dq zqtjbXcYc%k^iFL7u9dWpfUIBj(>|WXU*N>DG0ETKqkvqH09JIVx5LK1>ioBWT(B;EG+w1}b&tK{08 z@aWF%sz#dBzusQ_bq67?y&OIiE`tgWt_g(v#^eYqc+gxcm5+he6K2;|9e!)eons#M-?A!GjQqBz& z^Yv7oM9$dy9L}G-n<*BXL8KAExc*v}U>J|QYuzEX>rGSHClg$Ps5$)ysXY6Hi@bu> zBOi?op%4MkC!jVe2bmo$EGDb$&D^jZTw6G)m7*FalvZPkl&kF>UzhFqZ?&7NH4Yf% zDlc4F7|l!jTP%$T6x-Lg8ctewpx2SpGaKqxTE<&}4ZgTfqeN*3cf?Di)O z>Hhi~B{zPCe*T5;M!&26M8?lvHaGJ~ID%!@yLi#O#%X0>qu*UWdwyz%V66da-pSY8?d%qhGb=Q=C?PRMjZ6r8JS%cmr&c~2^X})}G z{^YyYBV9+4t7p2*=aW~#W)^t*Inl}`F0@@t&5wDOB&4Ma&syzQ@5*Li-@j&1$;)t` ztIfL{ea9T=W3i`l>$#Qd6?37pdz;ZA1EldNbIW`(z8L zAfXcU&v=oa>n9s`$BvDsp9gi=1u18qzbj!)q4;r)o=dE@LZW36@kLg{F0k{8{LXJ= zerTaMmE1%~TG*0u8pB_=R>=dbUG^_a9DR#)*L|Cb7uY>Mq|eQ&pK9_It@C(z&uqh< z8#glJvVVm?RmyeKrM-^%7J~KV`AZR~y$(jdTN(6F|7i8E<{ur#nZxUMeWc+#*`uwF z!bqdyQM;4z{`Tj3`^zW=)mXlvmy4Te$|}-Bc`NnLj(QEPT||BNKSaX3#V-<{R_M#A z!kl`bRo3AZPt4@YTk%!EaK2M82lK<`(5uvJ8g6iB7K9bi8!XLL2HCwu(Du!B#_)yt zxqC?6Xj!Bmn{#_B%S?0@tQJAu36FFKw{e4- z9(Skt3@zVFCB*D&zBcSoGIPZaWhQT~P$l#khO+Od#0YseT)NqK_~=*0+JKMrnJ(Fl zU_-nG=~$?ekEo+8boek|@lo<|*t|Q|J_9>%x8T(5slnME>NJX+=l}z&H97Ul^47&r zsiaD&#LCvin#3qY()sP<8*`-4GVaiWx18nRuD#8)1K+w2Xx(oHcSzXg$o_N;30Au7 zkL~P_Z25(S%TxUx&HGjLX@3>OtR&{{Y{BKn>bhY;Qp{aFg?N7Sa`9>kyZf*(T9lr5 ziy{Mim5!T>c7i!`QpJPyPwvsg+R*s$;k4_lO@Z{)ptlHhTClL<$l)w@hRtixk5cBGBLcrGv8a%`oLT>DgZPxmvUm9KbErE-T(-cX1+|qH zbpYre0KTNIO+F%U=B25)SkVSc2A1#*gACXTDR_C|;kDc9Mn>5iMT-Y}8&+Pb{mE|+ zEWx7vdS%fOFhjRyS=XhR7Bv-bJ~poaxalIh_9%>UyTf zRIUxpXd>b=D24ZjU+#O&Re8C$?X8a&yxcIMtZi~QCB8qND;69duiOmE`0o5BUY^Iq zD1QX~ekMOev~j38Sv%<-!td=)em$^W(D;fRy*^(MraoA3r;|JD&dYv)4hlxPm43XO znBtU|*PKp^1<<$?hu4|_+Z+D+28PDAz7(lZJ&iGVA+^r(MV^cLVIZzc>r3oxFnEA*>&?$1i=a8ON{0}XTRlZo_Ma+sh>%u z+i6*Poz<1IfKA^YI|Me^#Fs03ZSgQ+NSUQ8#?H!B)Gnk)(72)C-mbEEIH{%Zu23P6 zfH^N!sOQ|uFNk`5)-BfT6dYMy5_nKX+%e(ur+$2RK*J`Lw*a0kk08@E@j_Tl2_ z*Qd+V{hpwj8;(NA@BkE68z}fJw>gp&OV5U{U2PYBLL0V=E3TaPGq5gA2JOi#zX28! zLjcu?j23+x4pd$kT||tu8h;hSQ6h{V{TjW8=NkTnSGS*V44Zov7*%~~a&3G~T*zI9 zooXw~R&VAA)JsZX{CX(fzTEtPZC}BOjwo|XZJy=rs~)+tsY#={yY?d`pp-{-b5lq! zWw$o!BDqZ z^{Xn7k-JzT0K;Xrk5y^ZVTg6Q$-?jj|xhu1Ac=+pQCqr>? zPg=b3;OHfz{d-5zz&wbL z&}zOdu!xb4w~xo)!Q*};6A9N*@5cD&?9yweX0p|AU|Z$1@}97ii4=?(p@L_r(%%!! zBXaK^TOaA}%r(U=we0mx>FtbK5_d{iYYm;ZbQP4{)sU{+ISD1$IVn6ZaH3=QJm*XJ zIpYVXS$kRLk~sdhh7T_A_OXLQolU^p8UV86@|Q)PfOad4^S)vCB>PTz(-DcAeSy&C z_jRab9fmjPu&z&*3xmd9+pS|Mhpk0kH~bym57m3k7I~Q6H|#(r5<@vA#y%nBG72U1-kQ%U9%oBQgkC(kOYfV}zk`jzp{@1K-?+XiSw( z)O#t$2cIdCQ7&XrjDYX}voc#4GsS6H>vipDK~ka3u#N+xQ0 z7*hj)^JfWcuepC6sowiZU?5i%w|TPITIAi6G53y4L)w$dkVTlf}U)h49s0MUx@>kA@Fw?x$4Y;P?<0# z#PnYhLS$gm0Jt6&S?%Y(qWTpRC;~MZ?yP~;h&NqTmJ5BG=0!m-=~#n3>Srp7A6~kA zk=tB5uuH|R&Kd4*bn284_ZxQ3_M83=Ivmbd0RZUxQDu8*C>kszdij=s?)q-dVT$Vv zIRMqS%LA}(>g3RmBHS;qc)&{B>1d7)_V~T#yMOTF$G7}x>Sz=ju0~0MJo@t&m@c1z z&tmb#mLkES3!e-u!In@a6<&}KeULil(sg97G9Jf&J7OvR4ch!Z-h4Qase;G)!jce+ zi;MKx@k~@uI;@fzk!cnwD9_ukBG2=KwJKcg((yJi_8>qCsw8IJB2%;Bhn z70?^m>Hob`yOeIPh;n)|W}@6eljC-(qQ&?3PIZwBm)#Zbg;F6gk-T=Nlo#$iDQ{3o8;hr&T_Oom zMEO;@yJRuuk#exEcZ3UqjfY@+*h-f}kETR_r7gPrQ#gjlAT@TVBl)rQOjR$SJ2rR8BKDKKJNR6RfJ1S zo??uU9~^`bMt=vE>&^@_pUn|q-@9LJg(Rlj_r3~6Od5cKzt_WqdO8Z1@KI!OWTCx{ z`q$62<7w;Pqe*sq3v0C05n?sbijC`TA};(9UAsZbx3DLLfnAN-Q2}{vW_?;>AYgKee>X`(6Pr92`8?o|$(ZLbg=@vUgnb zn!u}Q?&N1}>X}Zy7-h!k65;xcv0cMLeOmKlOLiuYT$Y>Cc`Rp~tA0hF+JecM8Sj0= zJAQBbbKxBGXF@Cun6=3-st{nyKY>-h#>`F3#!-K@YPcU1TQ#w7LJ~{dbj6sX>WyMn8s_7`h>RkJ*R!f3$O>>5y`Zx6VY%>)0;VDW0 zvDrEC_W9|gA6gr>@X58QVta#B|sG`uHGOa#X1OU9}4kck{Mwfp~E$ z@*kt1rTujiDI+Fw8Lug85q4+Zw24QwTZafrQ&iOI1ECeS1&d36rS~3*^8;?6XCCj> z6?=n%@QL~aSp9O!DJ@pEI>_t3qDu%V3l;mwx+y3LWXj>z1ijC${+c-z8@{5aPwF1u zyG@2_XgEt#uVqKv!}IpFo3&)lUk_RcL*k%sp_FL(^yG`C4s3QB+1R1dsRq8CHh>fm z_T}d<4&#PtqW34-Ij(4y88mkkNc-y0nm5#+wH&T_Oplry5k5^5eljpHFuz*x4sct} z(&Nor_0!O!Wx>z;lJp)7NIQLJ6)XtZ8Kocur6<$maG>8{G^#Xe?x-8eJ#23*M)hKT zm0&biA3!T(P6nq}e*;#H!3$?W6$wd6n#Ap78CZhnXF4^xD)KC&NB=P;Vj+Kpa{_&z zguxMh;0U&YxJM^xC!?$_`NqfTf+r)(!D7Idu{G(?i#_KNx@tmws<+c6NCzsU-$_Iy7EwQ02vD7HO{y zr9-IC=*1x%7<=M#;p}n!+*l_3de?>2sWq_u+TnfPUE3yw0sj^ww28(Gp((LSID_w5 zQ0Kulqg(M*HMudpsnVX8**oz-B8l8t0oHVTPKa&E(b6BwwC0*#Cn8p$0t#c{SaZjxiDDwH)kDjARTeZvU51h3+Ue*};bqP)x6S7Gq*V9Pp4& z;<{4^(mkR=vOihlwZpyUb`GWRKmbCM-zVf(q=*BdOJsevaaQdX)TD9B?Br>**YzD8 z_5G}o8#w2@b|LL?-^T_dE#4=-NM~GxX@;RHc%I^g;pYiv%n#4$MTIg5^0rJEp5@)j zaC>LMFGhqh%X{F(b2cI1U4_1yFj#L1>e%0y99=d@-OJn!EG2{7lQk)$m&WjMa`a?# zKg?N{8gwqdqZKHkvrXJ7;-mOLi)ZuKv?EaVv)r3Bn5LxphX8;I$yjv#&bh#Dwp z!84^+>7b($hb^jR)0!dL{?MfLf}pN{Z+-k@lz?1BMH;cGF(s6lhyYAa;Y)`Ugs)hn zucw<+J++)grkp!P>}a0zxAqWpA(!FlD1(XtphLtjAKpmWtbL}FQ`a^B%@2eO(1S)x zE@t&s|6?@37OZwG{ISWC#|cTLD!5_)!^xP8IgnW`Igpk~EYx!#nuHt~U*zxeFa5zJ z0$7!M*Uj5JK~C~npFz+_D^OsgE3mVc+XyOd8-5VOopp4u9}>ldxGA|r*v|@}L{kqB zeRe4Q_s&WO8#2`@N%kSFuQ*Jq#Mjr&DJ2ZLao{rC3TiBlC+bfRYO5XGuQMr#B?q85z)aupJHo<2>h{P)78n; zSB`mhf?^|2bPApM@o4*B_TfFLF%F^%qXzd*0F3~*d09iKI1e<(<7F5ZR|klMWBShW zwitw5u69aPTI)}AoRoDGT|c&yVOri7g4gs0`yJd%>C{AF^(%@P$3beQSH-i9-3T~e zkm4uAhq)VAl%NKP8-LW{O1K}x9S)`ofk5C3yjqn_Iu)WS$duk=i^5?*+x0S=vc6dE767f28?} zMpyl%Jt528Jdv3_k$onX<7p_qlKdd`Z15zoQ#aT7++l$-y<(vbB^V-;gR&Y)%oBA<85W&WS2aj_ z5U|-Fc(ypkzbf*WORK@7Chx-SRaKvb_H)x61R@Kdlzjt1-<-q*w4b(D0!=&f^bs0% znqpkM*^PW4)O)qJZI}7YslUbL~6YjTRZs;TL7V6obV3j6^;$ z^u$C!+CspPs?TT7!>r>?XQbw%5`C#u)n@FOW~_DYiVl8a6KN%?+|OJSvmPX{-g%q zy=%j$_tFMI+>Bkf@5bA8dohp)&GeOAwEYQOf%8@3F?$2g)$Lqd$Sy*xR@$mWj?7z4Rz z?5Mj!)f*8rckT*F&FvDnBUk_g6y*dAkpY`9ckHLDCD$aFc5P=dtCJ#M<#4wRn6+Me zNdhw-0|Oxih;cWeoKVhpX#n*y=`$s|DO49Mnieth&+eyW2)U|BlUT!@`S7o4sdJx+ zwxu$Bw3{K8%JOKpv@eKswhA_*uVZ!-erTZaC)rnT9t~bXK-KlN9gPJIkF;o7_)%?y zTB&&H3+{hoXlYH8OWdnGsrrh_X7DwdV(1&ogZg#D>kYx2AoIoMRCS3NissvoFVQz@ zz!*cp=|Qr@RH_PDjI7r{{B=*1_Eleei4x8Pn@=ZyH4cf+H#rH=X0kVE2_7G z;AoJc4!qEE@Uo*SQS0?%x4qA!tpJ^9pV}AyqzQ9MF>%iV12nE^0tZ=vqTpxCLN|s- zLQbwtkzWoE84jWGWcN<%N|;3O?<{S3xn7@QE?V}wcIX6v1UAVg@4Y(}oO{(N89xg) zLi4Sd6lIQV){>acw9ORcgF1u^jc;o^iM!dp{q*29D z!(}hjaAkC&W>ZU>J5Spb z+G?p~e=KyXR#kE|Sy$EOPpp5KWnlI)v1yF3@U_?Ce1je?aN!xPKkor%5481Kw3?`+ zaO!wlOk^6)#S`a-nySuH*xwv&nO@?Ov~qHLjXMe&@sjT8J1KSCoEev<Itkez)av!=V|%WU%I7}tr+6l`?$I-8%=jBk^8-lW;s9ahhNabAgi`qpxH z)BR72fi*FMSt{;`Dd&RoyuLr5Sad!S^|h?f2iqq2f{ab}uQ@lQ@yVOEWmbf{qFSdJ zOc`V?D=tsbUt&QPe|a4;RCly!jk;!eeQIUKG2@rHKzh@3qm;#o@a{b*d}2Ae9@`)05PLZ%3LLAM_;jZ(jqeJF@5J!AmqmH2oGRA5d36Jvlxy1;ZH%f=JOg0Ye8aQ#%7>fNF$z zROySQs}|-;pL-)j^(-C@v6M+m`7S+cP5Fsa^|U`1bRK4VqRj+Q!B)PLK3I_7`}6#L z#M7M_zB{{O!OV7HPi059Z;tCcDwmlOM%eXr2$nA{TikFW8lPbpZAmHl?I`~OPTMBA z*Gw1s#r*k(=U~HcR-mv?gPkNLhdYGKs1|(`ia~ZITY4{r+I4mT9o*IYU|(RcFc}}| z%kI?Mk1`tOc&(6IB_1;AzCOHX*a5I0`qQN%2o}wPP~3;Xq$2H~6#jZ9E{4c)CHXeV z`mrjH*3IVAuxgc~{K39FsrZ{p+GHx)l@f)9y4~)IyCqEA#q|8LuEM?JK0JCday8&5 zI8X&5c0Q36tfsgy zf?2I5m0w+NPF7E@$B**!PP$4ab@l2DX{dlu7JlP{iUA*LXBVUjUB&nh1q$4>k`Dfz zV0tBrsIBk}2fy6B$>V>OV`_*9ZTdY-R>kRN8jbBAg5_bK{h!`@^`-Itu}vD-xs@`ePLeY0z;UJnUn=9vWJbuHIpwQdHEMA8YhR4X zH*}N!b0dSI;99A-qgo*{RZG9V+#XG(Y`$YtR(+y+(2JIr%SM8it|T)}aNmq|TQZIF zzxgfe&~&AZW@j=RFzH^Iz+STQu)FAJ~{Dh#SAK!Ea(Z|81Ao&|Gp_^q{%E&a8y(1To-$# zw(rH#HcbG>2b=$o>XUJ78Eq{F?v+1GIqieR`r{P?Vy!gQx;2h8X4OZ+dN zFlJ1)wG7Dbtc-YhO=z~22-+U{l<$8-=P3xbwG=+RI>juuHN{|zyzW<&@6!D_w(dJh z?{|g<(oy454WDyfDn95GX2BbFQnt7iwx&J{GVnGg^;*J8LEG~4&suSaM?H(}RMok= zN=k~E^XKZIYNiyjnEe*jhqc!$YUZ?BQ|Nm7f~2NV8TTwrSJJCDEp|N{rOxS+YJbp6 z#SKOP>2JQXy#>Q=F|;&nQ3b*F!8MRuVup@j007<0!n{yoKutP2?ATM( z2^I-VFht~15WR0Bh+;18=`Uqfw=!ITJStC^jUV%SWtj1M$tTz*=>*I?cP#|SS;XUL zxf=f;poOOR&Y?2Rc1_F2{DZyFvPJ@Bt?L#|al;?bmZWy~^-09i^Ru$BTofQ}e3mlisV5; zM$ffv5avUp3ZLyapxP;&_Pr-o?WT2y%xBX{K&dCiaH=){zwx1nCaO``pqXP@_esB< zA-VjrnNd|0gT4^+CDckE;DGu+o0+Lg4Yn&X9lxJRc?`YPJ``PX*zC*bzl)Xfpv)03kU77RH%bOSd zaEz$R#WWa3v=!@B+lKbyb+xQLOGFuBOj(G!on|}HP#LNxCp@q2p1ezL77NuYm9(!?HndXt55W=Ac7Qjt zKO+^_qYMki=!wvg4_!?L(v)istg6;3+Z?6~$(P-E-r4la89Gb*-BNKX2rPbxHPpB0 zZ&&jyp!J@ifnm#Rmm$q8;lcF^E0*UHX&r42Jdy^@8E!L=c8lXArhnV$;j+>X)GzAZ z{hh0IA|2K{w$XjJp$^?#Mw*mHZ2<|{uJ7fU$>v0H;prI~jgIicbalO8Gv?K&h%RoC zl5)7OU|I}9Da%FdMBOK}?9YhMdI{L;PzjnU^EmJ|f$xaC9o?5Z-ALVrM4HnlT zYwaNvBznaA#~7oYRR5F7JFYGszH&_#mE+>W65Pceuy>y)cji4swYX?HgFHC-`3Z0%^gyfk6%^OCG&4K zih9FkI6PsFY2$*{#r~+@9$pMb|8M2f8GsMM;MW3DU&p04&z$FjxU^)C(@xC zS%P(cpZ7Ied)}80Imq=$5-*8SV2%i0dF^M%NhC%qO3K%|ag4SU1Aa!UDaxo|3AKJs6 z)ci8VLb=IGFl=K=WtiYDLYccaEqc1a2<3)qpR}Dv+XNrs%vmxeR=pr0@epq&rs-B)x zd)~Fj-U>R{XoBhwHJ(ju57Q9vae;*>U)RYq9yO)r2F7P_Mp+tZSle9H%_DDf4lw3? ze=w{Q4)5xs6#)3R&sM2VOYRRHW8619-0MRX;0Ml+5jgi>V9)4VB){?5tm3+N zgit3|A%YcllpsaqQdOL0(TsCrwdLre78C>cxz4YYMTv)K)+JkYc8)_oZfliPzE{iz z15B&jsj;)8PH(pjk#DL>0>ri(ml431{voQ6@zDb2Kq=z5GL^lwPv#a^HIpUi_Dh6LD!sSsJZw$Ve7CNYRBh?W?ef9S z#m=ZA%yWb^QAcA)k~ja!sX^KxXYlK&ki8B>@E*$dz^qwXe2C7og{328skq$4gE6v@ zgxpn{DRF84Sdj7dNOmDnUvDK}Nyy4;saRnDg>WQ(wh~LucG#l~REHX^>88uzBMJ7G zh_NQ?;_;n<-7SA(Zhapcl*LHFN%Gs?aqCma+U0hRr-FtWYn`24Do2Lyin~RFu%$OI zNi;#D!0jEvK0Xemoz-E}EX=P`2QWH~px;>J`;g=XBqpT$=!kaW}E zwLYd6&ZyHfVM>)bn*#+W8rnZi&)~9t^{rPP)udQjbze`XSk9l4u)CX++UB z(3}&`_iu%5Tro1+{Gh$CYGB&bWTHt+XE2%K%i^-!1a&)Upi(S#OJ%RWBeSaAd4^VS zgAby9e!9y~bsg=Z3V&6_cH7~s(&tLvjvlJEm35Hz`+ zH`7V(ZQp7cdEa>lGpV%O2O~N4E5b34-;u_- zz1P(xnJhK3bLLT7MBR7WBVL11bHUg<{^_o24FxlFH3{*lEW*CmT-XjZN>WC0%lY6$ zj18wq%GSz^M#H2GOQ6Y_lseiUE08=}*m^8^o z5q zKUI?Pi7E^VmGse9Pw3kY8+hKYob0o`EoeQZ?XH&q{wv+x}AL6B-Vq9&o^$Qt=Y&&Kfsr z7jhCp!qQ0U63tzBrKogZTKAbq)iQPPsao-+cjZ6@!})9LBiV#Vk(<%dR-z} zB0*6qFA_4>X=dT7Tz@l}eP5NM`)?Jm#b#0r!PTkZYsrg~q{?IU@sTR^^H?u$uX=1t z)Z^0}*S`((n1=bohDvS^)f|fLL%sb2WN5!;5-82bD3U108(9QxKkv#^P%;1l}KGk=ZjUM~LngKc#N3`I3he8q*M_U%(O!pRCe$mvHx zp~2joeY-kdm%6A+S30{%>wuOQ^W}Vo4YUaNrHC?{SrMZ3Y?VY_VSHpFpOJQW%ju${ zbpBpi&k`QX_Lo5qDKjO~|3LD#KnW84e&lh6j}j=w=ocL!34>`n5S~%%8Z=4kJtXav zz6X~OpLMJWuFFdv=c}H#v)sML78SgBNN@eu&@yyDZ&UH_x%LJAYClkxMMG(+_y-GO zeT}XwT_`$*NY89ulS&X6ptp-aKYB~x-rR8Ryr)%exD#eo?CtPc*pFueVJz`Nyj zn%#=E1}eaIUgW0DGM*)2uW45UmB^(<9_B*+Xz-4u$E^>2D*2cf;8A&i*-!Ln>b7HAn{aEb>+sm+$H|GMse?EbjkR z+nNgKYIM=6Gpp z+oBLpjn7LbzmgTH3uc0B2XomoKRaSSr*N_-oP7q*-E>1TWgJ}#owgyEdB^kil zBAeW1&ZIZ=!65C|M;-=4jEe!VVHI^-dAPFHxk@eByn5cXizNLe3qVOG((%9i*r$;mWJV*cU3wxA93jIjv-j^FaEN zZ?moV@|Hx>C7LbkV)09aC<|`p2<9)*_Ru}72I`s%I8b- z+qU*?SB9M#HiRG5Wh&x4%%k-o({pVna^)SBL;_{|05X28MKWY*J>vxfwS?nb4+yg zR3Voq+5D(Kp%I5eopuz`U5o^~WaXxqa~Y{yE2WZjccpb{S|7`;`rt&!Q~NpM2h(rn zO~Z=kze}!#kdYe}oBH^axm2pWJ8B05?Vud*{hUO6h-_BwKH{tx7F6ugSEvV7G^lgV zcNl&Bo-lWpZ$2*Wv-8>zt4q&sQSDP2QW`EbcCG*=@lQZ6;EPUK;BmPzvxth4KIP~e z-1H%eQzMO|m&6|;wxrk861oX*&(v2 zXy5~|0tiQGn*l94UA@CvC2QTF-ZeNspR+dcxr=4!j@0&j^rt09k6hn;^(@qc>U;Al za$L93{cv38&X0$|y>@dotojsjk>F%Jn0BwZ&0EUZ`OW>GPRRc%Urfi1c5)+%8&N?n zQ9gYkytSV4E56hUD=8nkyqrx~7Syn(_DPqChXTe;tUP~Quq)9KV+0cEMWOHCmNg6( zWZm1zhJXiVxJTv|AVN&mz?c3j9}f}^)fwGe!>NU?2Ow*&+~ze6TdW&e#3raYjP2ZH zU&#UATaBqhP!*zPBVaC~HQi@^Hi!l>1{oho>$$w8DJ=Po8^OcF!yR;)UATb=kQ|Xf zu6+~-M9yy1FAYh1-n5lrdE~wFv+q`mVCF`4hR#K+Zi@Ka*Xj#}Mo&F%@bS*0Iw?r_3VK|!oUG+>`q~0f^db_B<8#X|UqO?A>+;U2 z`DsX0A&2H zqGQ53+az(r>*hZ1-h8=8j_5@@NIJ`LI*Mwi09KK#!>iCwLyvUOXzgkjwJ#85FoOoi zoyndl;^~AWJs45B+-D6-b4t;N$&2bMy|_iO)M-yan1XEcG8=D<$_w#V9e|;V_Tq zOP><(%DK0ps|5Y&(tryj9%bB9xSD9PzC4&1V!R1 za5;(p2TKXp7+`ugr9|ly9A5WS zCI5Q~{SJa=(En@dK8{^5owha>&18>ia0k$n9`6y9vD6HFFLgX>S$%CH<}{1jw;ey-O3P_CYzgZ}dwE=O z${>1Hv`l!=X)z*AZ@AIR#_+;-tgRb9S?mR&=oDyn{@y8jk0~YB;d9-DKt7%9ORXhG zX%&NBJ<%f&DTDd}%!?+c*5@pFEYLP@thm8=h7le+)(#{Ivv_FWJ3NrG@WG9ofp2cs ziWSxBY5Ld~H1m6wRF92saxv#^&sK-xx{Rd51g%I}!$zErx`hL>-24@|>IW0Hi6=21 z_Hd7{k?CdOoOWZt_>Ve9o-|Rk?mMNh{A%OanqkxWr@V09JgnfFwlqHg?iqIMtA%ua z)O3jFy#{m~C ztQkt0;?@K|*$xHL#rm|tbuS4QmR+xm5nath)Pu>i9*IXMjHy6?6mP zUN?C5uY01&z3&rHQ*}~wd_+kv8P4~p{7fr!aH2Fg>Ua-tRv#drY;|4OZtgpPs{uD621evrSA~4YEzek7uae zs5~$FYD@6Mrs}%G@xD|s+qtkv0MQPf`05Jm-$gl}S{{%-J2SB~K_zPtU?7Z7?OCkn>)x`SJ^%(hL<6Tjg&6{8-o*23 zCAlWlU5!EEXLa@96pPmI8+q02NqJhH9hpWvMZFPSyzYxXk4dUz@6 z4B79`{!u@WnH9$=NCNq(k|%R0T)p}$9kFWs~8udj9iH zX5soIkmE-j*N?Y1T!b$2U#dj~VQBeFeV>kyq>gTBH*Jy)*xl%Iw@+`h`#0ryKSE%f z!T8;XG}_vHwSyP51H5GBr=H~h0q*u@fc|({EpTMxLMxrc)6C^1x})%^em~gp9@pes zn=(K#(lPRW%X;lq4MW!tpymSYljkujY3mORv&7wh``ksZAej}kA!!OwlAKn{&KGjfpQtdyDLEh<$=x*#3mZJ|2-jC>E8m@20l^&3 zD5H!j4hs@-nP(~wls{>U2Pafoibkgrtzj4`yXT$~rsxqx9ENeE$CGF=dIeJz@s zp-lnQ>ucH|G`Q)UypLiog))+NfXJ||SeCj4p7R>Ob*=*W8NT0ry0G4SXVyB1K`-T0 zDz*v}i(V|fywVLi_6>6>w3z_;I(S;&Vz=5bD3-DfEKR2CX!!Ka4d%$Wil1O#ZGQGE zC|1>Vepweul}PCf$E(5%m6LBRk=c-u?I0aN`nrWL+GkDrqcCT?_+K%>&I1b)S{Kd%=)Q z@(u;G{E-#~{J3<+3wwB~!W>krXB)q=`r4o;bh5a5k+ad(S)g>GASietDnw5Q6uY#7 zl+`^{ybEaa0izp3V4;8b{`RxWpV#v^=Ai5#gYQrTqId{8L!-uV2kcs+V?cTL$(l71 zvPudcGA@m~z2-W9+U@tcc#_RkI7%8_D-s z8NNYpnqfUEBAc7Yle_BxFTcOpEyt`W|F;v+#_{- zn92>nA3S!({?)pB%qZ%QXx@*Ur2#@*b^Ip|&}@aH%6;#?-cw9|ooM*sD$czblAebY|_7;RC+Q?@n(CL=dE_`w(Cc zGyA~n^Bb^N^1KDWLp{{o9;X?2p4TvbXfwB&3IA<7PiN?T_%@h>10%e&~vw^#Da%pi(i zC3tQIhCtqaM7LIYwRU{aDWq@kTanH0uQb7s%zV1!7F2h*w^3c(aW~F7hf`yZVCUF2 znXC4*dg%=MWL&eUc$uf?wmuk6^TVBK(6o|C!6hO5pcBIt z|E-mL3b4Ui=jQa6wtuPay4AVw|J#=F9a=E>3hItvpe3C@9!&Q_?YNq(JB@ZSAGvYv zSUK-+-h#aP+qVBS(R&A!W3wqSyOyn=)*H=bJf*gLR^G4s>48XKf9g(s-EgSdUI+wNt!p zZ8;H?vU|-hy@X5Pf?fRDo1V1QVjfm#_SH6EwNdZM@OH;uJt#OX=(&;2u3jp~&Z#cXiD z28Z}eT34^-MPjP~i*Ik+&R37sA}J%o4h-;?9NckBWIp^Qs%P6CBd;%th_V~J9JPOWZBS6nfg=^zlH>J+HLi!{B) zb+NUS1%2~#B^US3A!h2uv#a)%hR>pfEFEZ3t`0AfAl)uUE$3ExZEnV{*l&Xdet1;Qrp!@ehjLU(I22JCTi1X`|l z?kvIPn)Z^9aSqySf?8P{yQ=znK8jRe&&R<{4NRO)Rls4V8?)q)ex^^Ju)B?J@OZWE zX^vY7xP3ZuaSjY|zX6`c^1A7MN1|T*RHIi8m0$+QXKJq}PX3<6RYY^fLn&F-^F2$0 zom+s(AurfC;J?XN5#vz;TnTw(z7zCK8>wR+6v{GV$rwR4*7qre`-*X;3bRyM2w!Q^)lg=j@cP@k2GKESA?O9rVoj-P~jZU zt5FkAx3%;OM6QrD(QNAswmQiv9|g4#8$Y!x)6Uqh3(t|&h!zx?)J659xOz9-CbZqj04HT-kv>g)9#pe z10&0%Lv51AnwZV(z;;Ri66e8;#?WEX@ce`UAHM7yV@TAyG^u*oQKNCJxS@JMV{~3N zCQN&mE3`GQ2yDrBZ)ZzDPcO&mi2d71V7tTUaE-Xd?JIALm@7Z8rWN-7ce40R4TMp? z1aw1#VqP6+>OssG1d%JmBlhbZ#?`s1BX2#W8MpR2CsG4DfMb(tZ$u|SxX0H4S% z$+)dNCxNqYvFRFW|Ig;K5ezwaUQG8PPoGiHxWNLqltyWzf>&<%UM+R#(YX2_Aip8b zR-<5eIz*D7AnFp*tE8w%2cUaLEY7Y4s&fAhq@6#9_AU6FZI@Y&{#2}yBm!Tmaj@}9 zSp*qk#5Dj2US|>G71P95i~jaq`GnJEs~8XfC|riC)Ll}UN{u}sRk62Ms3J6 zImh|9ck5Q{L_cS<*2ikSfQ2AJb)_z80e8ifL1F+hBkcD1WSvko6PdW}QQszLp08W+ zOI8e!;jI|fGH@j;nq=l3XpuP5a;?ri|Hnfunk`zF~5aOVDg zea7a(?h69doq#3_U?Tr70GXYgEpibU^9>l|3a3Df~CIC6e z`SI9hPMy+sznxOWC^ReP4d1oQUEt@$8c{56!$*rH$x_&&=6m$iUP|4O>?gWwzQ>! zsBd(T9C9#b#(i)QKRzeNbh%?@-{z{;KwXdXyG`GSC_4TCx%L5={in|JZ$_TK21mGN z6khM!YSdHdf{yX&p@z&W^S!zmUr0k}kDfsf>n(H(h+TxYnB}th2M${F0I+PV{X^$dX{r&1P8}&-+2Z)kTuR^fnAn zBK6RYKQby3^mT9#l4`l^@j7(ilCwJW z>R{;Q1^jZi&G&c#>$ZrK_{r}8k7VsIQevJZ27hIJ`=A^kOB~g%L;U>nLWl`_^_bPN z@&$4?hV?ofyU1Iixcp7sv~BCkRjgJn_&ozp0v>*1YdB&SOcACU zJ*(xTEjeFZRF!%c14Coh7x2@8^$;BKUu9gL3@YgAfisvk?{kfB*}2x{;5wc`&#s7& zWD$w%Paq@J|2eSTS*}5MyV1@X=l_Ak&7FMC5E2^#yDg<;+NbgOe~ardW#L>gDOE!` z?F4*HIz05RCIEVP1RtdKP+N3^7-trkyeX4x@}5hrt9IsuX9L@4&)ye#$agkfX?0Tv zNx|>Ib`N}*Enmd}%-A#!?k_mvd65E*JyjQ~wm^Qk89diiKE@KaV&>_U9c>@JJ> z#d0SoY_1>=ewgl9_9x?w`ry}eg@3jn>ea(5dHHZX4W~_-mT4@x`4}Ixe!zkqYp##4 z?s$&>zdXiGvN{WDMhRjI|W$W^?*~z+^Zvo5D zB96QSz@%9K8C{nMvt0@NsRB@K%EG+sk$p?b!+JozS4_nNv$QbNqVjvC(t=Xpo3^9; zV0~-wP>1+1su~~J@%-w?xfOyNU~W|OkC_dL<*efBJ0{yw$u zrn^o15q2bTZKRKP3~}AzChY9!=%@%NiNz&MGriF4D|db9lR!7llYHTpCX@xsG7?$U-^pxv2wlYEZSXD@Ki?s6M`V|6e? zhg=sBpusv^0DR%4ZJ%>baA!s(+R~thrjUFS+*Z?x6V@IDi60E>>OVT&r_9~+&{rk; z-=6jh%?HCL@`C;`9h*YL3~pA2n+pW^pN^OM+&S8rp9OcY%)0x-UX>6B1tkxb{jHh|4YL9GO6lCa6sg1Ah z?CPqOdADw~=9wp%e!@tpdU`ZhSlU!TLas&nAL#T0M_Qv3s3LFzZy=lIG)qV9-ZkjK z^AjA&JvWS3eoo~#H;u}t<+*fo30}+iygBEq=1kaAs0^6l5F7+E{vudTvfh2fb)y&@ z>7sXOgQ)NOqWeXIyR!ztSwoGlrUG1%0P%ryMe2@DrlzNB>q0V}4Nn@>F&}{G?K#BJ z5^#3PE_+$QY{NEkJ$9`4yvo-1jlW~hCvfhX0KQs8wJ0!-f$Fk**7rB4gz6Ch6lf{@LJqI=~0Viqt#@|$4di)o~)-M!O`tBOzA$S>z>lPKJ23&<0depyBUA<&V5$D z%4Rtm!+4P@g=>3c%d`JiO=r5aPuKj%bGOm0j^2pJrx?(}K!?6N#+5~&zkXJ(z35uU zSw4EMHL!JUdk^ZFCcZ~Q-1q!)Xx+-L|tBamuPr-j#!y~+9XlPg8RyUQO$%EA0IBFoEZXvtJU4T4XUq6xpyOY2G8>*|yS7j`Zqz8h(`jyarxi=wFb)VB;aX}(V0D!95A z@@4lApA zj8EDwXTqmmec&ilYX_QmIx)vK z8Hkq#J(@K%r$}>|vv=7FV$pn*WQiGKPLFWT0qO%ebBLtDiA4sXm0K9q&B5OT6rbyk zWG`$0#_VL)?n0Y20sNRt5c=ew9%OKt;7rp|{T*b~8M?hR;4Vw6nXGo#=s8KL6f}CE ztTv@3OLu1V#i%CBNF5{p1Bn^DF4rl^?J%j2_ZPsOv}|Nqs3RXlgnV!HiRG;lHN3r> z+@$eQ2mACXzid1<<8=(*5T35|i!PAMVAs{|;5%}>exzxE1Lms$203W7U9U6VH$9$- zYTP-0g4&jg3V+RhFq7sn(%o=0E9)u+ID~ght^duS-bG zES|!*;zml~b~L4^P=cXQYad~bc&M#4uOH7m8LThK)Vb{bpsO@{vCBbZZ!eMDJeXXm}OU11@&S zu^FWYNraoeaFWhpZ1qcr@uIq222W56cm=bcm+tgNT-<+xevBAVGuH6Qc%K4ZiQDGcqo97xQN?NCll38xi=xp+Gl3k;f=@a5_JC`QIS^PVidVZSY)(Q1Ac1a}80%z^Zn>6up&DcRJd7*P^KV)0yeF zlM#lV7peJdJB*|5P}YsXeg88U^$CNw1$Z{Nu5>Vj)SP$c==i)d zOL5ZCgDU2W(^dU4~F169&^h0^miB^lC%GsH3I+V@d0!M4OD{GAP7 zyd*1g)T^G;2s*=Y0`b*fr2JE$J@T$)`Wd;YjbJks5Hbv-;Z9)4B#oaKtL zr*f9`3!dfcF_SNuPRkTkcpP6V>bf&`uL;v{$3-Z$k4vgEE)HO%c+Il8x2d;PNm4}Z zY*U*@Zs6^dR%$+b@4VrU#4I$S)xOy?Z!(>^)6(60OOjdk|7(b)P}=12@ZV$1f2@eX zRgv;vb@N@_-$bYK40O&e0w>_NB1Vo`U~FOmoP2zxw6hP|YGA7Zmc)>hiHYv^#W4Vx zQSrk{=jQa%x2^RnK5=hoAvV=9xE@f-LC60&lf*$9cr9!Gki{U}!j0v)>$7f(Yt0;y zO${8E>-%CeF4cl-D60l9Z5WB za#XcKM1U~0oTm(r{R_jIX>*&P9{=%?mQ%A#p7$P&Ov=XT6Sn+FIlzZEx65&y3~KlJ{Pm+43Fmu+`EM|EjE0F+2_cDlx_ zzk-S@iaoT@!3A-&h6}F2%-A)hMbCw^ba}v0!Nk0_xIK0EFF)2@;YSj!zYQvD!<4WAx2{`jJFfaBp9I?(%15NXc@XoJU%< zo$=^%noM6O>B(FODk~9iQBq0;9_}>ZbSfrbI6*2m39w_;O^1(x%=LNfr8CtlVn>b; z=Y4UPFE7~loxf&UU;GPW4$=A7Is&ZuJ@VD4bY=F+!E2Lbx)0J+UStJ~eP`%Voz%aU zJ0CA8eBEDA1-Hc=rq{J74&K+@Jp}zBoP=}aB=PMi5Vl@@pKA;G4dX6meZSY5s2_p6 zG-V(|hEJWMS;6I6mnOSolyvZim!jWty4~N;Fy2r7f(Pk&2L7ygDWNmCgy$Cte=Fm5 zN$s|JHWWnV0fJoY9_T|9y&>T_ZG53ZD*n^k;d5&i9tM6Y@C2gMGkw2s z+V$Z{?>Cm7reGrTO?L8pLkCW}01gTeUFvgUVe757q*1V{um*LYJYZ*%BZ0*AH}R2y zIF#dbgOfi+o&Z05@9#qVfV$}hVlDrJv&zin@3!Ujs%vwXZVz4d)Ox|Jiq61U>Lq^W z@zR3O&K+K2+6Kem9`?B3XCih8hIHu4z2Hpl62nG1Tm%uBfW+yE%gsin>R~^oB4~-zj^lxIw!(be${!IrNy5B^v)7et6lpC5=K))At%lTO&%M69MRMRO$15srzyo#m>RK1vq#ksf|0|*KsBjD0052Pv^j1>^>BmV?X_)X zguAQm4T;|^Wp+7mo!0uS{~H=@IW+mIAqhHI~^WJ=#_^LBp?W-bY zfW`$^TqaG5r+!O*SyuEH-?N|UFX}9C(_B5agGPI|JkRdTWBJXcu*Ymq)5FB|nGIXa zqj=Ar&=3!CNAb)-J)bs7X~0))IIly8DFlqvV=a4lM{t_bnN2-eMU9$*&#_HxXBK|7~TZSK0a5^TPVmgV17(d$j9D* zp!lJq(Ixpf-lH(qqjCBMBk5-V_!H2wWhUZIV+R*Z`c@5MbL}J8!(vEc+aN0x#h?kS zm44JV5RZRgYXr<@L(B$^j;CXqYOnu{CsnvEfL`Gpe_;TjfnU{)vMo7bRqOlJ+hi}- z|KZ8X97zE;W<*&0wklP$l9Mm3Mo^A4H5b{YEnM%r5e=89QfsRh+SDz$m28A6^#*_OXzGA7aB*qJ$>?*O^A<`FFUkIhwOlYPuU*~w{S^`!8{ zx!m20zrwA7hAi=ld*`ZaUIzmiRRpUM11tTR(2wp})B6mL#*@AVly;YCwn%%K*5&BO z`3fcKuQA}={%$ziss9EU3az!EyYyODF&fz;$yBQIMmo#CtltGT0CmENE&7EMMF72& zZOVl}lUeGG$;@0Xg`}hyaS)mPz~v5=LH*tf(dpsm4OFy7w3dxhUJ{ZBj6+w@tUr2j zf_R5;fF2J`&1)Atpm`!0JjANfbU>8lSd&3*DCxWw?Q}buH6R>0(p1h)RN%KYWKEFi zJ98syX)w8@c(y4^_+T_wjg zSbcp;^)HDOng8Th&=HX<5oKcJT?uOK4zH_#Iu#6SR&`~9yGPqSS@a0+^_l{NN%}3k zqjt!z9GE8@Js=UW`DVQ?&PoGfHQ9exn~%%<7^ubc0I!bvky;k$A+N!XmWVYcQDcn^ z70c6uhLr!C9i_iLT%?c4H3GUde(I1A#n!Hn&K+z}(suCzL(?Tq&7FFUg*x9olxp?o zny7m56uynm2r!p2RLIymEma_oFC@r;-SWn68rkU=5m&s7E=15rS-JkNjlTk*;d!)-ly9`+@kr}l(k5a+YUWT~*s)AL)56PnG~4h`n03|~#9 zqt-?dFjkZ>3=3E}c)6j~v_7ny(eI(`bOfjc<5;!OyITx)|OhS%q(66H^& zo2@K4CCk3Tm^0iwntDlk0$A~qCW;TR3$O7O9~MXXP6U@OkRj18^RO%|w@U%-nN5V>Z8W;Ms3AMLr;3v;b!HthR7H(p%*wG_jB@KGl zVL;s!48Q1I5Dq}{*c`-k%71jviI$ng*6Ei1MxRQt9=78hwl6xj93WJLn;31Rq3X zJ3l7YO|vz4d|sg8ItfN2ZA(!yB{z$Da3TuOJcO`VX}oyo!aZ4)>T`v>1;U8To0I?v z$4d?nh&OwGX}$1%j(7N+6|D$qM35HolIARoN2J`zU-%~6cqeehw??yVzz_NnE1%sa zCfWC!{ohio%$-)`LIF|xMALO4qI>>cRAxLwHEf8XSlg|atE!MQ6UYRwu(T~1qE2HckImIE&;jf z_`bz}Hr%i-!0l~ntQO-KAmYz*ly1sTu8h-ffaH?KRT>-fLGl@!x*=6uU*JdLVF~dp zpy-JB3++)Eq!gBLA1zWTdXCkWlsYxpq_$++Ti7tW=xT@-tKL2ZnBMj{-Xd6eO=L~9 zQ9w8*@uoMx z>f=U9l0N*!CdJ_3xRMqEHRQMboBxxTawJ8nt{vaVfbk!CJ+Ux^i+RD8ph_%_aU+A5 zp^F_O6XZ#JbL$qWoM6fo`Hm^#^^Vke0RA)_pc663L~yles)XlJ(fwtuGfvxeODb8P zTz3At7Qmgt0k&$G;yX-^79c8{9dCam|G#I$!83vWVEMI29~zg-JfX1yci?%&Po*^s z=cYQ+Sza6DreA3r>a-2Y`hO<3wx~n%vW}`x|GSK(`=Npn3@Ud zQ>NFi{U+5FDR%mOp8Q&`D%RBLg|wU!ael>ZjcKuF_?ZD!>&63MBh5L z7VZzoOZ7@`lCKffv^Hakm764dU>WO_Z4nRi!-ae=j&3P#)uUE0+lRDVU`k#aEg!y4 zg#bZ8;JV<70x3u1eg}PousUd4;HejhIfJV<=@@a+J~=-PJrx-0I&!nbJi}X?O1J#~ zqw2Z?v24HpZG_4ydy|SNl9m0SG$bmi?3Fz-vd1Grb_v-eRIdWYz+P*Fo~OLthx&+40?<0tsGK@f zU5b9~y2D9|jhtJ+vGBoc9fOoLB%hNt*{TA>M#vR8{wtEJ*wSQl9k08%yUD@4MCB>H zKVZ~`b<(XJIA>gSQT&h&{ba6M8vbGSWgd`yJ)BJiOW0H;7b84(HkNEd5|1>e_B%GV z8HFvG<_6{f`)|8?=k6^k+d82d(1@kf$s>lVOk9}4Son4Fw zQr`S1hotPFJ+ zevGLe#7tk#jC4k}--9uKAY!!P`VwkNSrk!nnT{hX^Itmn2>F-OIQHJ3fbm7x&o_Dg zLIbsvs9mh*eUX@az&vc@v+O8T$mVUe{WdXO z(b}Vm&%FI-?dMl*hLtKHsek;yBX{wkLWr8pvyg!Zn?ETBehGCSw)L$Zh(Z0~}T)^mu z5COl7%!>=uql@5fKLLWH6UU6H8hZNK>r7_vxBr;+EjYJEk`OTr5$* zhyzgG1V%&pyp@*`uiw^9Bb{odG2Qo$OJiYr{7)|?A1DfcODg;HfIZRH>5?j2-=m_; z7=?GH3{TTLuzCX}N>xvLo;l~FLEFc#! z{qu9W0PbH33c8vr=2hrBOhH_>o$Q@LZ7CvMcKZY;)gptKs{G&h%A@h(Rf|g#ecO9x zyTx;%-sL|Q4JnP4jtONP?>TJei;1wqfT~#tE=GI*so6sZNekkm1iZzor}G_eJ&`%a z`-r<_V>R=M_|9FJbcVu zj@K%w_e1yA+VA@Z!lBjP{7HQKd9G!bkF*5bt&!d<7kbh@;$)_FGx^Ro5_NAVKi>y-z zk3CDC;<_###8Vg;*c1$KvCYJbB*{Q;TDuER;yH~D76uFXS*4tArLdN$a34n`VIBd@ z8{>sq z?dP*lQO1W0oZnH-fMVtOGNlp^SxBA?S2{@GR zoW_FHP_~0(IF;B;div?PG0=Vrf7L!Hm%_XXsBFzZ4>s2TxBV*6mZD*@w}}G zZ!^EPZnw?wID-`kG6VeE=8E}3b8ssMtJhKbe2uh|u&Pd*vUt18PvxM|E;@!1juE>Z zp6B21_1!>24)%sM-W0QFwtu0w3X?DzyLRfFw%3DxY5xuugB3=l+>rBKvF=xeRbT|6+ z0Gmq3&L^Gh(z@zlbcn9C1?NVqcADS((f#{5i>~Vfrs)0-ba-W;`+IX%;V;V82T~Co zH>u;fB>^DV++UKr+qg`7S#`-{zP)j-FF0{emHJkG zGI`czE5)pyx;fH3o#UF_DFcR@3NY$>WcoLrg?L^rYHWW6Q%tjZXo(*FfGK=gWXN#L z+#SrFPm_Pf9h!UC!0n|tBr*O~JE!J_Yo>b+tUA%_BU}Qy6*H$iC}-}D>L>!tA0t{8 z-G~zqQimad=bo_J)E9T3tizcl z@jtC{tE+q{yOwDmJprl9*i+yyGNcE8 zo9*&7?m$te`D9(dq)uMfY2yzcu{dw=E{F4VG<>i2vye(IHOf!l{vFr(A)t%bLTfQP z!lrbb6JIXheP>ik8WV4#;*&6rAIK`tOf%5UzNUAb?>zCxcTPJ|3Np;sb@aLIlU)|7E+W+bAV<2vM z6n!|#nJDY6lnq1p5bPV@KR2O9^BqfDw@Mh|nwOyFsuEkK_&y<^)NRe;kf{feNGItm zv99FHM(lKRaw!Z+pL95nHzTv1*D?D9lMCMr)7WEssEy~7L^c?BNj2))WDrNiGWIB& z%cM=dIKO7O%v&&xti5h`oS|9`-N-bk{wCWZ?*tL~*Nz!u1G7>CbU#AedDF*UT!=91 zOjAFbx!k<@2~^~KqZ{7M#?jUsBv*O|ht5^4Tp9DG3od&Vw%~H|uD`0O{yn!hh*W`y zpeJWIb7mJdbbXX@{))`{y?Kh$(+G|Db_qw=M8IWIt#;SI4RfjKR!)*Zr|QdWXQ*bK z6q!XXnH2?oS3Z4*<+|K;N6KNXi{INXhgsbn&lWJ1)QEx#s~`=8wEEvH_2C>L|M;4*sqipl>O%i+s$-^DunGLGa*vC zvAQH)`y!Vt+M%3==Qi1EqViKsCGPnYcT-sN=!qie?BwLFE|4~TF86Qd@ZQDmM8*p; zbuXHqo7RP{JGBrt$;0J}sb_N`l&i#Vrt$kD7(EwUsS=~CKIJC~kIlTkn3|lUmlki6 zN0wh=%3~207M>+R7ET@yt57=fc~MsdKK1Q7HQ%%@(CCfeLUnTEU-=HUlr0IVKsSsR zjJ)1>)qoxPBfeV{XQxDUS*^^cEsME3Q45SO=8(#LyUVW6>vNu?`8a&OJly6 zcEIA0Rke!qMSoSDDV7+j0JWCh+X;QL8Yn@#U8u6Gxm-TBm(%yUCP(GgEyh~PdhBfv zZz}x@B%E3MyB!C^)Y#Ut2k&<`Ld-V&{rzj3ngZqXKFrR}UX+ruL}gD=XqSfN)Jzio z+5>U|Y}N9#_g!&6!e9#AU<1~vGpt+~2+sC}WHp-Pn5j}Xfr!T&$D+LJ1D#ZrB{trs z!?@YczXwc{>4*str}@9!mZ3P>4xmzwKRw+k_AF$G!FMvZaycNsLYvTCL7{|TKh-y|lA!YMS7#jwr) zaK|a$S42_->oHqjyk{mqyuEBUeq~UAq*DMsabrAKFk=ZvFj!B=jm-F)Cb^!ah#@lm zJ`&elZ798v`XQmb3w7)UCO-R4>MFiDkrI=%Rc*#E_^dy02gve_#GM-O7#h`MeSgUw z1`{-;xv)0B_=D*kzM#H;W@13b$A0nZdvv23ACrDBchjMf#t5483)UDVux@%T6!s+q zk`;`6`{u`G^%8{PcA(Gl)|RU;lEOcJT07F*3hd=vTB{MNfpI+$LPeU~--t-&qqM^XTS8EQOHL=_@xll|`bQ->6AA zJ1h=MD1vzou^*5}^dID&DTvz0X3+BxVwG|G$%!qZF&-{sD%wgt) z6CX*ls>f9=*XlqXma@~Lo#2RUOii-aNRngZerQk{*PFn7y9AG5VG$-4OGm|U`S)yQ za<9hdi;XMQ>>_@Y7C-0wk%$vxn;bCKsNOQSk{)_-P!P@pzIbs|v4oUo1L-wHwquLz zIgk4_OPP6i3{&^NM%~BBfFk?lj61nZgcv2(?TVshufHH6{?KZeg}75_&~B;iy9_2X zQOgMvT_`^jaUs*SS) z_UA;B=lUaGMDk_?elcN~$PsPR38dB_d8G3m9JX$^`lvi?ihqqis+Z>HKcSlvX2x$@ zJsT^pIFsU*Z9#@G*{xpZwm#3}J^tc3$^e%~*v>S$IK$;A`wBwb^sHY!XOe6ty$w~M zaUKk#XsZ|P|1N_qWW=7j zV^6&LgDrI#*p<7=4WHd7CMNuAGW<})KiM!ZVcpd`!;p+kJ=cfTvKJ z6n7Pi$l+{tq8Myog@^R7T!n?p#k3IrfXFNLYeM2&o=aF-_t`$DFHo zbbOJrQzT3i4GZX5MDi;k5u!K4u)61BZl-sZommu*{(qZyez7S{25yo@ng)n`h=kj_ z6O^8#te*w^8I3?Mu)vz>RTodZCHMrm$bF9s9^>DR`XYmGyUeUyhu(jHTl{rRO2-Qg~a%pE{n>ZTR4~F^b#WOs+kM8`XI8*@(og+&=tzj z9@Xn9;PxSI25D+rdkvqZ6crU`Nm#moaffV~!lTBx_uM+ECEw>{vvNAz4mloWdf$ow zt>_>poZ*vi7^=VMT46|xDQ<0TeaC_r;|uMix7bqR|J%T8$`bL=wdDAhFJDjvBf;Q2 zUf*EvwDdM(Im`mcaK`Pa9Xz>b!rmy-*HfqUxnRVPUK*W64GJQ@R4$~{o|^5qFdeMP{(4v-;j$OK^}cphg-nvL>U(S3>kZ(s6d)xD};&WAs`S6XEu>2afBHU)ZapE?8C^=fq%7b;5g!RY12MKx51bc_iXE{$uIw5YjS( zy~6Zr!_(!$Vqzv_R@YJEw|#H;rSsVo}ss9$|m#?TG~DdGg~%Jh{JbM0g2m-Ldieq3^kK5IUJFxG=EU5k+F@tgiCzAoO#C@k~+L zweb#}@0@PGfIZ-EqMc%Dn4%pa0!)Q7#G@k@tLVm(%fq_t&^2 z9)`M=qQqh%|EyVO=6F~|pRRiwr_=1}q|e{wWDg5!>s~Vvg*08NbbE=Nu{ovGCB|{x zO5yoLl5+9pTYb%`cNWsHCgdr$XXdNAq#49_9DFx+zsy0ebj;q=z4sy!9@^$xg_WXm zW_gMMvK+O)uf2-?mSxh_Q_910L>3?;5`xtb&Zfbg8JA~bnB0Uzi_#PY?kgiY(K+&kzUX0_{Ok(#(eBu4cTZ}MG-{WV%eow^ zRSzsL%Xqbavb;NHkyk0IuRG{0yryR{Y@wpZah+6Q(-*IVHg{ENcx@KL{FZp<<+z2Jt~3&Pu57A~wvNN7&1!Cm5xfJ)M-KdzdD%OgC`W&F zCDhh~+p`_hWmT})-rDLnFfd@fCxibpi*M%F+LP=xK(+&n?qD(t{hL)88yD*n&GVAr zl5;MJN&d=@^6dan?zZ=s(l|j_D5d>3-_ACoU`oFT9;eB|##sj#`%(NsUDatxM0TkD z-p4b)@k*t*l}fH*-hjax%piGHl@GI9`dVrKLo1R_(j||uG4kXX!#dYt&G?S^uZ~Um zb>XKCT8X~T&9RttQN-EJUc*ztOh$Y*godchtF5o!{X8=|(3#eJoAZ6jtQ z);grN?y95qw?3{JHGOnBe=H>u?*B`;x59_ATyQn5i>$}uhlr*?Cd>Slxv*lF6juX- z-@T1m16+LqLj!hW(luSfvwhNJ*#;5k^SN_nGEDgl{`9%mLfrGbe976bE^9~A%UZu% zZ)kZg@Z&UtgeQfEu*c}bs+JB_@_4GBa+Wf-6iG|jOs59C*r(!J@+lx%kiN#oE| zT*adgO1aro8a74yRz*vN$#qc&f2j_B9i?D$PaJX^k`~Gwv7Fb?3lLRpHjn16@5{Ow z(G)aQJya>_!#Lxeow__G95&K*x7`GMCJ17R0#lcJdU{qT?tXYp(n9*Gx66e^F0(&Q zs!vleHtxAnf|HpHW2IN7-U@O}`qv5P`J-{&{2&J%u`4^v9DJ&AwaRNU$)4In4s$U- zrv7SijE(Y9&i}oZ6IVf>lT(=J-RtI6Zc-vm5#RWCOMhW1yU4+9$?R_Q`lw)QYIklA z8l5r{m%4>>C-N%C&CQl% z2YHE^nYn44&ybBGW5wR|iph|xTf9|zzsIQk@kGbf>mFeecS<4+l}lURL_Wq>dXcj< zZfx4MSj<@$i%xg(b@0S6$TV%DaX@68_9uz9wKpAv({s~Z+5z!91{2mv?4~L1mKJk8 zY}>P5C~cuwH(o6Ks)TEp&5MI|LdT%ADC>705<;N}hKUf-xs%)1ec^{;lDA#JPb%+C z#q4uZw5IgMCHDkLuEj~MjGw)(z*9f{22^In+6HM|i}$iQ@Nbn$;}T}?Q-n5$7VRab z4_1X%d&O|NeW0)VT%2qiT_JhBHlOH%kLItUxZ>0!&n3M{&NP~X?{7<%3ZGtZ zQt0Rv-KZ`KlAFl3w9_~mrD)pg_g4F{XOA3W(tx-9r$>fUNf&x6&X=>fmOC##aIZ_= z4^O{Xpr`glD6imqU-8Dq>t-T@rN2b+2mWs7L^Q;`6Q2}cbTgV2$Q87}73K*{?wR7J zIvO)nrrp8t#jKy0mG2=i*HG0ZpGZ?xApA4) zCBfyYB#0WjXrgWfTH4NO-g56N>5VlJHD-^W@fpcZ!#{nn(oETwHTT(M5C6VIB0ubQ zV53C*k~cDVao~%71n0AEqrrN1fOze*zm9TVry{dbHcL) zsBq@zJv=tOZ*J@Ir~j2f!HqOs7&C--?aR zwW#vsi!mfVbGl2N&y(}xJ8+>%-RU>&pS5E4+M`#>_q6X^z83EHcd?@_btEo#Cp9MS z>#LHZ!;aT96sN9ITxxiFIj6CcKRcZ!`+-Etm(PrsoFA+84|J%kOqVj9GPTqUzg8ca zSQyiN8$Wip@h$fDbwwNi;G26&`R>qDz`uqKqNJv`Cc#^O{#Y>c*;)%RT*~A%j9djd zt#b@e9)8K-JR?zaL#qE}AG_+_&>5d)%D}J7cN=mv?Q_+v&Aq1;i?t|{#zc{7-5>2$ z@&4=9d&N!r9l9O@>ZD>e64e7HNx*!e@L4%k?SGjyxBd6t!}9vRt8< z+TY)A^aiv}>=cyk~hjt*rc+r3Tm^{d6+E zfLm2?V58@3&*@#?oSG}@EiAbw(o;90$Ff%^x9dyo8qil|GH{1HMhYJ`VwRMGTchl> zq2o3Sho;~Eiy1IEKj+pQg9}hO)4wO3mvZhId~G%ByTRCe#rtpyd=E${Qac>K%zD=4 zZJqtP)^Jl|uW-oQ);yao$-3e)!)3uZ?r@P;YNZ=G3*(5)2(8{h`VUot;-0nSeBdy% zU;Ri(o89yP+%LY5294uUF}p(~1zN%o@TB!8=I0B6)3x2cO~1nUpCEAz#9ov7Mx>{J z8xk+5K2fzX%rkpo;Q^mzvPn!ws!DBhIYo{wt%j=NZ0L_fJuL=xRmDGR&#KN=D*uwE zh@(2~vm&#mCBJfa*_Avw65mIC?G+81w*losN_v9LizT)rj62F-J_!4Nifb#ilL6%O zZ_dH!+pfE}#lLyFqP(6T7eA{RA4q3pUzoDn3!VFJqq_d$Kx1cv*~S^DotCCl^`akK zS@}lYM!q@K8{I(`&Z<+gy-R1sdeJCO^;oR+PNTo{_4-jJyaeTgU7GDh-<-&bmJZg| zRBLC5n4P53C&w&4Zx0W1eyWNuS_pkM#X`~-)4O+T^-iQ)THNr#A56=h&Cvv^-agIt zjUL-0ZSi_(8)57(!hIv1hvbC?66uL*#}4*4>L4;9PwUvA5Q%q)Cs--57O%MTlQXUG z&Fx^G^=~v7;!`_XyTxL%?RJ(VtrJzn3rX{7-|4`oI;o2L4t=<99ThOD8p>G_ZOcP}^zwL8AxbdW6*BuWEI$Jun{QLO5K<^2x zlZAh?cL%SNshL?5bx9py;9Q6rV}5;wMpn8Ots)MNNnvX8(QquoP$614VKHFX0#VY` zW4F?BK#S{EZoE%u=ZhjCCn)0g&*Cp8{o(xcZPlIQ+e$ub>Foa96B{M0LvS_^`)t@v z^E1;&JaYM!9h)Hxs%d%b&`*&&F#B_-meDXcyM#KK3SUZHZ1P@+1S@9QcEPZzPV*Me z<@^ARY#taR);i%<@u+5ag`~-Mb9)&Y8_c>>@yz7Y;s3DhhY}5bVB$M?bqBL^w1Ys+ zSjbp6Tf^3KbkLApyeW-i|8KzT*JBq>SUp$U>Lp*W^@};E%OvRv-FUyh_k8r6Ep_c^ z{H}J+f#-&W>KJE2Qqenc&HW5Ujoi_F@v*L(RU>`t`0CQQVS&;8jd$5oH<~I61dqnW z*!Z^_Au^@a^TYYvp{Lbts>g4epUb2%%;WZ#)P4~dTHwzdZ`p}P&|~z3e1q5QV}oR>85#2AUZt*TaR&Hx5fZLHtX_ch5x11a_fvkY+?zcR;ZWk}bQ+?-HK%Z4 z7QG{u$TOGT3GIfFtg;nlWd2_JQW01woth;W;A4^*7-H2 z@vBJGAkS-u!Rbd_m29AH_}3MmAvwKAJ8LF&uH+fV<86P7e&e=d?_Ed?%&la#%G5EO zbM)w#UXzxh^Ub>|&Bf)fS@61>64gOZw8ITD4dt&7Bg@;i1{L-;fjU9rm{D;g+p zj$(!m8T|Chua7$Nq`Tn~djH@>-4SDawsuGq76MifRzuwpN`R$#QM zhK5G%j~{m*>grNvWo4!6=CV-6{veOG%rO#%@K?xBW%oOt#bDJZPTuhaO#${+j*9P^ zlvei*Hm`5wPKR(BvC|&AAc-h(vq8Xt*BOcz_(_ZY(0)F)0~Ll4LTm>bBkWsG=H%89 z3_>14(%8nPof=6|F?=KK-)dCiVP_{)8Jc`NHu6ZDzJ%4cudAV`I>^wBjv(1e8_d9qUt6LwzfOR(6TKW3J> z3>b9^GYW*OOyzfJHznu>N#R-l8ge^LTF>L;PW+rLy4$Hv z8Nrj5EbhBt|CUFphg_&(N=|=z{Dlmjn4|wiY;5f6*m6JQVXXE?aZ+PjVKfG%b*YL0 z31(CUDpq_4K|=HGo0}y7C$C}IfUdD0}U&s%3R`fa8W#yOan@QqsYPphn&wDR_?N96YVlB3!LR!A-oU?G~|S=y29DX}ehY z@Yq)|rI+C5OI(6U!V{y0id!#ec*vhwzmQ*MG)#z%6(TqDMMFVyD#%X^oveG3pYqj9 z1nIBp*I3*m&lZ8(2}HULmbyh~a?eEQj3VR2NK*>5wbaNeR+wsOX;GLexKGZ57HW`u zy%5(dx3bHj0}|T*wl4h5SB-C++*TVh@fzBo3$)r`(np>!elNsZ+5C(;Wq~p|xQ2{Q zrfA;nKNEtX{5kiO%VBU4Z8(1PsZBn&ZD{oJ=((ty0KNkdwzSuzff6YFYoY-|#z+(# z$mH@n=CkF8$dge*`_S6w#|;3q1We=ac#!P*7iAdEDy~6!)6tf@J;$x?>aPb-KNWw4Lk{s=<<}7s&XBq#Zh2ui^C533mg;^T060m+ zGU_?x%&;+`vOh2Sm3Q84*8rYv;hRw!UQ%p((2{dO6b;=Le5$OF`uZu9LvQ!2xjjcVfq48#^ouHO)jssOaT~Ux*6{8Bo(MfA&?}Vfp>RNSWOONy&y( zSgL2Rdk_UqsFlpI;qZ-OTDJTxl4n!@_2epQXUsB&{W7>a*8uMRf9 zNbsYYk4Ep?YNrX;%I4GUYgQ(1Ff5(??JqcXUB3Ab-dTXOn&tlf{rm4~w<-&9e9%&5 z#lf?7`+BlPTw$NC17zwQWBI2~(&1qTbFPQ^8eX^z&u~6)0#IUilgYe4JHU ziXtVUAmGgO-zbmka*MMojc3#kxq!pMS-iOLVcnMiV_RY`a8-)_NI^3CRTu{Ck|*+t z_rB6)*Q-FYruosA2a!ylAXV1(O`{5pw>eYl#_72I0OlFqpwnt5z>Wq!Xb){n<6WP7 ziX8#iOOT1Epl@fX+7>=(m`hCbj`mf{fXn;1$5fI?rw0CXW!klHW?~j}+c5-=8sx8! zf6O}p88wVpiQ)D90b9a(4QP6xI8>i_7PE0!_Xgev$sILFxuDHtqUDj&i*TG|M;Lr@ zSIs_gNU2k#QT>$ugSyxS`Pa4!Ml9&EBJhjgSI5u(1*rc@fRf?vlVY!Wlr6BMCi?F^>!sg z7s$9G5forwykml`9UXsCW7%kc=Znmof@ds1ldI40e<+8C0`DVD->sI_Z z{t3a5cC?lL5g%?ju2~Qcm8AlIGsP@bo0GOOhZEhno{kjr#Mkh$0D{f zvYD2}Zib{R{9A{Z(5?MCj{Q!~6lKGUSKlYBje2g3Jo#OF1hf&k{rQh2VUQCds|hwgrj`9( z%(;vNh;gLRo%-|-to;p!+IW&b>vqeO`?#H(C;ACPND!Rj$Zqw3;F5N zBrOEk-M_NOunW|Oa&|&$d4>nn*R0aIhy_qdIAhQ()5{E03sR?$?*F3Nuc5wPAAJvU zcoA}8SLq@nSA7zpBNWIe{5I%=p7UfV+ez*Ak4lRD99R!YG#43xsvhmnY@nweS^-yl zc^V=cf21b*D3~X3V=A=&q5AFFl_>9G zx#PY=@VTTssyv;4zk2tf;?1_*H8k~NR=vdnuX+}8MYH*<=B=f}w=ALT9W@M!lOa~| zhiZ??Jb?G?0=1)7TE-1_&Tv>J-Fj8>WxHA$1eTc!Yjj@9Vx674?x!Xfi#Ax>wjpcO*p>2AsHi<3wd@qEW+-toEZ* zHd6Jq6%uSL4_P9L4|3>NyrgDsyklahuuU&E+_XYH5cnx%U-kJ>&I97s8YZ36%^VuX zg-mwth8m9A$}5fcBxrXXh4bqT2NJ5tLrP?owDE1wVL*+w{->gV{#7pwUq$8^1xf1N za~8_3r<(#{-?PzmjRh4lBHJNwg&m-fT@fi#In+sw1&Xw)(ip0dpYKt6Mf?^^dTrJ1 zlg8&a9pudG>5TnGB|!|hXotUjyYdEnKr&>8d*AwhpLz+C$2#*)31x~%IHaZsQWcs3 z8?`T0{dv;EyFZFon;F+(EE6u`rX zjSUZSt%mINpRa-?Xg?2spv|~#F`qUU3WeAJb8zDPMj87rUG#D6?(-;ErLP5ClOLU? z&0(0p+EQo?|ohQ6{R$TCMI;C znqo@UpB_*KO)pv=1j8l)ul1{79wTq~S9>slOJ?UtvHNG$b*1qipCChbgdi=C9o2gH z&}?Un$#*I{8<&3f+z1H?$&Vp>7-6U&)w*xP86ul~ZAa((# zf1v>~^^U!$Iv~K7|3KLu5huVVv+eB|X}OZeHm)!Cn^hGQytc{jk}gWH6fqT_k_F03 zh~-a9JFbglSk{s!>2G-UOHVu4)%)AaE9FskYoW`mcUd}uIMXQN`a%c>Em1EXNHa&| z9eAVv5sjR27WNSvO8CVMh_$p|mDUh9bFlz}*N_a@Qz;$h{2R<1zibTO@4gbkxKp43 z27(Or&SorU58mvQ-j^)zwvC}p-d;-~6(vRa6|v%WHZg;6)tSECK%^v*LWKul41QJ>s#eC!2oRQAhFcT0BS z?FPA9bSq=(ZI9~DHo^C}%|YluXo9Fw13A<4fg2HK2lbYri#;PG(LoIKAAJ6yUd1M4 zEMRt9L?-QnG?hpc&7btnht`i`vc69jMt4M>sm!;ZE=$~xb!+#{!71@l*m$Lf{z%#$ ze9G46?U`4E?>jOui;W#S8 zD}WbnWCUR)Qd6X&s=Be@Y3bi;S=jN+an5%NKXKRRSx#MDo##_;AViTJ4@UDv7cZ^I zh2O}l(C#{)%-<%4?;T=1$w1$cu=aJeVOgnqZc)w~QrGF87kDh9m@Py9#P8}ZFI|%p zoa%7&(D`Yb8`@uXoI!w*(6|O9ggiCH#KhzW1;XnOrN%4@J|)a2{xb|r(9~D${Z9Fv zP7lv`S2@zQo%|yH zQ^l681$pw=Zq`krwGQU)zCGE`6ScUzZ?>$3mdc%m$IF01$H{H{4JrWO8w#MH8R-NF zlYjx5mf|+N?hNmE!Sz?gK7)>D6J14EeGWdEFRB*DEt~LoJ72RC@P13Nc=2pok?`Wx z-g>>Iqa6WrBromO{(R(ebgZ$I$2Z*ke75JNxUFbdk$HNFHlTD`E?8~3olZ2p#ja9~v@7zdv%l)C3B_{TD{|(1g6oJ)Q81$Fpc1JHMI6?i}H)=5FK=tB@ARK~rRWTt3&QK)<9x-AT{QQW) zu&%|wyKmm6zqM_$;xyB!(LNAPTDyAd3>xMrJbuV+&S!hNW8*FX^LBTh3o|oEr)&~; zdcUO1lMU?CFWK9R)gSV+Q=A6;gNtP)N*-zNIoY3?j9srC<P zbt6X{(E$~!Gk1KEUzBv*{W7UkVwX??7y^D?q{f%bF0*%9xms;2lfX+e=kM_8*O1LK zwe{YsINLVS){F1QBzfH)vi|9q=wHL{?4PgfTUg-Zp5;wN`v|(Vtxw#2Q+?;t&INVd zTZi0x2ucze&k`X)H+m_s078TE`DcDs^KFXbroRIrkB+aS&Y`3%VNT7JJf!iLiy2AN zgBJFm+1E8oy2Tc!%ILHzw3LcvXLN$Bf77*%bY2j8xWCvD{Pb0vNrB;yyOh0#=msOk z;cBidm)(OyQST9hw@UA+SS!+N^OB}+-?OlF8ESv3|H0A!Ewr*={V^ZK4eBUD- zM6l}hbbVJA#xN+WK6`&Z^6Ve0r<7z7W3yp3E+6uHFS0LM(UUGO01RE!&&{RHtJa__ zWg~P*Ev%8e6|PhA1kzL~HA~&D}MdZNFTM%v0VFESm(o*!T63)*op|eUr4m2duF^joaSc>XiCy zBp~4K>N4`Aq2b{u;ZW_N?cSceVpECc({%TPe2a4;^rQ-VgGp*TZB=TOZWn(}tp^M? zx+{8m<=GP9gc#Y2m;YMudJ7!I%N5X)ROQj~rGfE!IhVTpH*|Gnb8Xp^f3|m57$XE* z_1DL6_`yO+y$Fzgr>3VV2rrGJzP%a-7+qpe?}99<2|ZhXlaxdWn~Bm^5%I(=T-9Gd zF@aiYkpH1V*xSxL+tWkBoI_93@>ICqoRnsR52XY?O37u3VX%2tHK&~zn6 z8l`-xDG_L7BnBLb5rsR;F_YvUyeI$<_yzIm6QUM!VzewhpcLKrz1hKTn0UUP>&3r16 zi2f-w&I-6&TG6Y(&eCK=v-M-pS|f_@M!$VZRqy#Kp3)1Vy*5T~Jn|NOj@U1Sdsm~B z!1SuBaPU#sX9!W7&IMo@d)brUKsf+6KGg&@t%QSz*Qh40@4$q&V#WKQpYu|N`cRBj z{&NpovMgV&pI>Nnt^6Chhm85_5!aRrm};rjt#>l(FHd9L3%!>nW@g@#mg$2ibyGAo zRf|Fyk%D}mA-tgfdOg{o5sKTE_@ew^tZ27z)DyR5%w^YIe}mPVX%+ORau+Gmx05EF z^i-=83sfqGtr)_S`+9AjHxSskN}BU~FEs8AX$Z7Fl-6wK(n=UU+$XF?o7xts5<#Mn ztnA`Du#ebpdi#4{pX|1@pb)=-qdZt1%TpY;Y!xfZM+oEn8*IJUu$L-S_rBYd z)Xbza35v;7zyEUINSo=|FSiAhV;y0LaMEsyMv)N(0%+(zK#gE83JXshGem|{t%w(d zP!(Zi_Aegjk490Y6k8pS>XV~4nOg?ZPhZsi$WDe!C#|gs74&b;tY#p|@fLa=BNs!` zGIWABJQZIOC0870L!|fcW^prjUw8JF>(+jhT9xgY*Xy}^UOx8NbxEgpa(z9&e;ZKQ z5STX}u@r)Uf|JE~1nCdVu%EezVm(EcXvTfU(mU1<^%Yc#=<534-mSF%@&e9zO~z7X zvi#@!^VuVm=L75?UnFPMKK@pmrHP=)9}Lxr(LLAioHNG)+7Azwz9sz9XQyw$6Zap!s`adQa*F(qMmJ&odITFf%b7{jV4xuRT_8QQ1}xNIg5-UO9ykJX+SaZY-kl z^+x&?Yb(0Mbu4RTJlq(6HW}93KV5n%IH*lv?|%jf5+ULyMim?!Ft{f7K|i5xAyBVn zb8ezhnXu49uk~G@s&c(HvE@RNWzS;G#xMOMqwo8qPY_(Py1Zq&Jo9qDOchcy_!|0< zak{1?6!wDn5uRsbP7!sGX6Hh4+Z23lH2kd5+0FHqp>0nX>wBDqd&4*?D$B);yG|ZEB`}0Q07BPD&V$CPWL|lqvJce01MDHHa_)cVNWosx`>%!ol z=H?)Rr;jY}ZHZ3w%UI*AO#YLDB2JPPRaN$jfi^!@fjxgMe6`C{!tE!~u|or!!fALN z>}_0y-Kd!BY_;t*=y>L!Wr(f~jp?`dOog%z2OdhUHl zI*^93*_}jX4T(@;InoHy>))v5JAvOI3p2f!zV)>}m_bkm3N+4sRz|BKP@tuzcUSO` z0;(oVhCG~}F%agghG>qQAf=J^9jKe~9&+EtAtn1W!&BCE_w9BR>d7ijWJ>)9`#{<= zbuaYE@+Jk~+Z3!S@y*I4@&d3K5RV{O4{BDyoVC>D7sgTs=cUADWUQes0Ck3HD)(-j zL?M&1+&fwRKd}1**iTY$jUMF0fu4j_IXs)NdIu`0FfUJ^%*x`wofAtxkd z)mHE`F@Oq@#f6y@!syG8-ftIO;Lv=30~q#fVm>HinX*%(-2{@T;Kbfic3tYR%z`W? z(O##z7kIiNpPH z7-|lnP`1QwAX9`Yc+LLE;X#X0EBq1-i`_78PO!S%x#1Y0DhVNO$E{TZHLTE<; zVx6*9`6_z(mRY~h(Fm4jxXG?VFbFE*5afB!pcuPWz1C2}gf-+cWo+;p$6j|Ik876F zM-^zlx)p!&zj8=yeA4Ff=jdf}v%^v-nrrOle*6!V51l52smqGR)|$}Ry-jlFuQb$+(+f`R(PB(dL1sC!X5-<+NMge9L8g!>WZhs+E9qLWv`2L8;dg{b_#~Bem^F zTq=HwRgH*iAVF6Vvr?A5b9dIAiyCFBFv&FUih2n zaM|G9c3$rEViAq<*8kr}TNoRk4haimhDEDAR6Xsp+(k_fvr4_yIUL!8 zT`%QU_kxMoR3VQy4mY@5(rXuGKOps?CjuD&s~K7%vAIUmfkf#iW0-}Ao&cNc}M1D z!E368zQDx$p>9yVg)Nfu=#QmFp!fpnTdlsRZOJjWL5Z7#;4}1p=Ndci`AFF{TJ#UP zY@fRD%kppw;2x|GoLArXfoC74Du;XiF*lIsE8nV5@O6Ct2VGzE;H)0o0qsa(IfaZ-kiO~y<`lsQwf%^ih8C1jhD$SexmEK`GdEHa%ElBvwo zPBJGV^Az?r6|#-nu)XWqo$vepuIu;5xvox^JkN9A>t6R->+@NkrTO}MrI;j71v`86 z$Y1AByG{g~3YC8AwZ^B!?ge6ckQ94F2>HRC||lYa7P{ z00R732F#a-e+`|;s6S0?1`Ul!G$#YEF|-_FtxQ^;fg9;?g9Au`f~I%!89|p_ukn(W9Cu`M zVT_P(oLbqG-xpO?fgaU6Fz5grGRtTm2&mb?8>&wD{Eo?VGyjoqf~TB&550046va%C zFQdQM{7EZ+E>AT*+(a4DUZajy*o3kG>wj)-Zh2FgnLcs$pVu=b2Qoh*$R6z>X@o#w z$^EtkY1yTOLVx7vDJj}n^V-#t8CZkWBsoNayv%8J_YqZ zRkJQ-ci%jiqP6&R^b7cVif+t$s!{pXA~%fmpD|8SVGTi0)uPhqCQXz!<9-|Z25-|Zuz%?pgFd^ zO0gF16WXpK`R|6;{@x~IkPpa{9eQ!%-Y>G1I_r zBBORE0mo3yfkxE-0xjtJEeL7fA9^u-mmJQ<31#>q!T=O=3Xlz4{3oX51sQpW;V(J< ze2N_Ne1fal^ZSn<3H-%^paPP>4?i742fk-W7JHu>*YW2MMBhjNLeZM*6V?u2dg0F1 zMJWXZJ6gv!82$ObPwjSJPtjLUH8iVAnOOGo_aCP+{)z5p1 zzd0p{LwxRIF$E0a4f4otcVye(CM|??3aWGEAp9{zuFg@oKG(-2lhY0Kd1d9ovO{1` z=?F47+@$P+^tghsKv^H+H*fjSCVu4ZQrWNl`zzP`PC)C&B2w|lvMil`{0IRV;HR_m zYCC`d5(TSs9w~W&@lYt;DuTZG%)P(I=OK9;eo2B?uFm~_Kg7Gv-yz>Hf1VY~MA2=p zlSRJT&%fIMVhtm3cBmVYV7lPnmCp0Nm3xx!x%fV>jVcMDlpP3s#2(0fpD_%ERH7&U zR2IB{AA-l@(IZn+Qw2_TcJt*HXfv~G@~}geb?tKg%a=Z+Nq!hpgx2m%Ni=JQNRD0x z6H-lqJphKNgO>Ysx8ipO;#|~gFPjOUm{>z`T?I^~vsmm6-TitG;ahRRlZjN56Bw&jL+7Egy5pS@WrQU3}H8j88nMKCcbz=fK?YV;M*au8vqdrducg(>2V zu##>L-fxdKGu>i5)v}P^amRAmKR6I{Sehx;XZU~mP}k;Xcjt~PUaEba=@php9q;N@ z6>IMKvD8*9Xq=Nhn$nFh-^i?TRpZ!o$DxsT>y&HjS*t#Nx-OAm`3J18)V6=n$Eyu;& zX`i1J!q|ze)#(nu8LO0T9Wx%bHp@?}IKFEWU%X{0wg<`N@Qu%VwRDU{*fJY7=Y3sE)SU>pp9{sXd^bV(5H zR>@+8pCo#g^(9KOGgs5~#Y+7`S1r>94Y>F(mJ5~s^b|_83f}tE8MCE)XGJ%K=v0s0 zhlnw3Hr=PN#-W*-KW;Ik=XIp|?BjW~CxprxfecTb`uxQUnkcHg_OYR=Uc1nj<+x@G z5!`5!2EOgv7&DBOTVal4##%epu#L2bwhu8mAyGtnoytu*JzvZapIf2186&$oS|FZA%ICmRVj#+MNg81X5 zi8+VOZFkIZE;m~n8`Mh*Z)-A{XsK>?LY*B&+SarrP=BIk#0cwLKu z0~FmpBaqQ9YxeU%-hBs@?idd@8X}??6mC1pTX!eF`|Dd=8=l!@=C~9zcKErGPS$la zz3BMiXF?hB?*uj4U)b=WoH(MP$lbI0Vz7%yqn@t*U#`_;Z> zDof#nM8~1`x7>30*YCg1Bw|Se`@_Zjr>%~#HXYada|AK=%v%^mED>rn$#JO;&HX&& zd%e5~iBAGiDbNnD&YOG7h=G6EIjos$KqQMa|F^)Ls>Jfnh&gO{l(e?GEfhSTL{bhS zENMTUk~#^6=2^pDr=JaY$v>gll9C_T&qbT+Rct-jE(=dmC)OuEbRa1N5v+`je@E}| ze>Dya@QBOY-d-$e;gM!<$+cdl7b^L}59`QiUh%f9KsiEthx`^+0743GP%iX_MG)IwETmXsAdBcG29gEM?3z za$=-C9MENWsn*0ii)E=0A%#L8QFc2Kp zEo7e$Y9>0rsBj-M%A#{7SDM~ne9IhloqFI`FBaLc3EdPoU5efT6|Kbv(Of_}H3B0l zeTg6Vj~g%`9!0gcJX+kW*6YFN!C_mZ#yS`a6lFpta>Bm?m|_zkZ^`192|x;$Nn(v4 zV@wybGY?;pJ?}&wdU2}J;m!(SPbA||^FY~iW>o1LvsYhVV85$u-h&jE%0!=xqae36 znNVkj?1jZXd`Q9J2!j6P*K7k-rl4u}P&dU5m_p z`7{FuaBZ^(a3e7xbptfbG(4u}^x;DME9jsVo0_dXc!Mn;mqX3`jfJL*8D&5BHYYX% zSrgv6wMGpZK$*w0RQ8o)io%N2(89Fm?$!e;_~@TW?M(ZlDrZLMrX8Z$&uxaKrFjwB zgc@O_uFN>%W~J7DHMsVmf7a`xwT4qSFKAg-SkYsu3lic--t1?I%lPX_Nh%xbsfaGt zU=)pYeo1RnwauPrII}&0&);*mfapLY&)}%GXfC^GMF5xgGKAxFKEGs3)3W`AFRYlOq(tP__Wx;1`d z1ucflV0df6CT!t;xp7Ow!D%Y-JC_6FH1ovP`#Zb!Wnn8lq}0W!1Nh@#67%;S1t4O7 zYGT4p_!vgrC6L^LZ7qGRkNFVnd8(?)#uvIci(|?Y8+8#xoa{#+QsbH>Y1KT!<`#-_ zd^V%`ar9Ki2=6;y9@?_#I2Pir<*=^#rII_l<)5~KI~*uiVw>R1d>A_%F(Dzzl=_nB z6p$8jbTjtmRUko-2{II_9toZ+wn7m_eh?r<&SnHC{}&*|(C2hyZZrrC$MFq68jO$CHX|T z`^o`)*{O;|m*c1OlnaF{91L{)JUszq5mNippS03G7g^T5)hOSuj$;+URJbwW=M5pgQt=@VFe{Bf(ONwiEcPC*Ysrfj?U7@9|6JEIFD(Tp{BO zZg?RpRor3`8*gZ1YYR`a!OB@TB889RFOK=b?|kBM@*LJ{WhxstlX9dyT3?|C+|XSI zq{`BtrhrueID+9{W|@VuHwC^`0m~NBmy|113{pyMz34LzLOP>JGRIodO zEr6!>$d=}ntp~d)Zy&T4ttRiRz7b`tYC5s$Ku%27pjo`c4)L{o!;9aG17tpL^ca3~ zhS>N%H5m7@b zb^yz9Lx2h7>w1Bvb7i=FljBlGuJ}Z2i@b?MX!De6Za}U44;$tX`wx9Zi&|LL05J!X z#%U@o{o0|6%97sFsPuy^9gX=84J#j-AIy0g6(((O)YB!hjgWJ7xaeCT;-%AoZxXIF z3q`V`Ej2gyHue!Axx|{ULNzBO*tU09ySN~;h{2F?-MM48o&Q^2rL2wAR41I42xiY) zQ0CpxVcWNpE0ZIsE$GJS?BH8a!gFOSTwC@#W-VG?Qc^;;P$ae>*HHDH8z-dhL@Z_( zyz;mJ$x|B4`c<8Q3%-FsDygoFiF6StkA-y`)JzX+o!tnB%RH+D74y_Z!ijex;G^Ge zmH_f`y&Eg+JXnj>3&mBiixqjMLx;&VE%5)s=7)w#{(3FpLE+FmtU+zra4I?gyXw98 zZNqjN=r5|o&H7EHVEBRaV$|*XH_(^~*vl(N$IWQ9T>3TZO_qlIq8F!1?)f4yisXmN ze&zJhr1-9~XsR072YLH?pJ0FqL6vQAaL||?Bx`7ax~Bo@CRFwc%q|a|iTF@gn%FD8 z5|@Rfj)>2B=>a%o1|lBBdmjpjeNDt~yJNTQPU}6MOic2`3|RdL;2 z2NVC{Lhi)dyuF1|)gSA@kX9X^^_nQ>&rB0EHqMe7f~e9=5)VW&4uT zBf%d@Y$F6uSyq}JIl{1G^l)n`C80}@k^phly)`H-`f3E8oU4Wn!shKFccm)Z=>OjU zb3M3&c3xgDH)2T8)79lX=(Jk5>?h&G2H(;8ZqR_#Kfx>WM7gB;p^}V!-{O9Q{n;P0 zG#ZPqkrQs|%{9`>)h8ZucA1qAD?t~tTokHr=j9qym`Zd41Yu);?^SB*swLYftX(yw zlCQL<*#-Jupo>wd6jf!i9u7TFnsB+Tgqm4y64w?VMxi8N zpt}ai6oN>pQkJcnGz8pxwFugI*)K!OIeB@RC2IdX{<~WXMa$A5%HQwH$T22?Q`wuY z%Yyh(<|@xf+uh3An{vDVYmUMF4QZl@&s`6NzB9xKAgIdC%I{)Augp)LcH@q@XlFq zx=Y_5Mvj^tIu}(Xil;%Ual_^?$A<2&)c~iY60jr+zIniA6n=R+WnF+kq&%QxZavxRRGBxdbZQP^d_@`q%`bnHH>z z%>K5aMO;{d0UUwjdNoC7cco7|6|oCvK;le0d`_+oXuz-My{)(y~{+XZton z2t8C;AO5(qUnz$hzyzJZWp@R|7^9MKu$8y|2(_lm8{Xt)^btOq?Vc|NB}+vfrl%OI zy3t4qqKbrU+&bV6gD?p$Efe+V`97S$1hX8n7bf~Q; zy09#Mz%xKy6?BNoBbfyXrc3-#jIjZeHt>N$(fsa#{_+p7V-FG3pr#(YnGqJzao&`@XVqRTWfjN> z5CE9hC@aCYK9vuX*K;L!3cq92^V17xHFhy5fAfpR*QjPF{y9d4UzLC6iCttC0?fdR z>{I5Nb|i(iTyHWYisXX93P-uokiOB~b;6PbNV$)Aeg$Ndg5tIZ2#p--_?XEdpmzW# zb;Vrw0NyU9wj_8=N2}4x&gCfIzG;s-sk_I7iZ~a!2K*A90LDO;@g0|@+6hiKJ8u$k zmJ!79pf4*yW#!8;1b5PBNuy2Nk1Q#lKw{zp-BNnWL~CSb4mr75+L^LgqPP9FEd4}W zM6h!jpSTmpM`Tl>k27L++*!6`5OA8_NpAcU++nxl#d%6^xV2-m;LRdV%9`oZqLl{5 z@tg$ou*x*~4GbeK2rdc@NSb+Jn1>w5bU~V_IpPl`D!GFf&zRRQ<>jQ4FW!&3d+UCW z@n)_fSN#2C7(`Jg+jW=))ChhfM0)ktZl`~2`X9BY*%vfe-kHUD$o<&ZmrGzW)UYdd zZlyr$|4;>r{}qutns7a!0mX{qN8HV&Dvf~Uu88tIDC3m61ni7>9EQ(-dB%90gNwS& z!KZx9_AWifuPY}jJ*3$|gEws9PoKWxpNV*VVi|5ANqNVw?425wVZJ*FKQPfY5K(m0?>h02!y>6`Z$*!b@PeM8hu_< z+DO7o!;0qXBl}%HDa$QqD)>z+O@DLHO802L01oL|5;^C#n!A#_3+cFu4qRz2J9X>* z=rGlmzeX=)TrUk~MHg)p$AV#qPfk|x9N;4wqFXE8f%c%K4yo{`7(SNobU|q%(tPL+ zBB0PNAmaIFne=j{d;*j$xi35nMaKKYJbH{ryI+p+#n%grnGOAvU#boD&0i?H#Z3E& zwRRJxP%qNh*0Gs;HLw7$o*9W2kW7x$-=dlj5M=R_TTZI10Q_{xL17O7`F{t$oBx;+scwt8_yM8 z5f9w{D4X8KK>&sLBM*PK2r0v%)%o*m$8&;8j5u1c@l- zG}4$7_Y;qJHpAKZ43WB8w=6VuLZm~E-NrWCv7-20ap^h2ck653>pox_!2S8oBEl6QZV%8yMHD|RK_ zF?(UWJ6H6K7;gXQyUo2-6M9UK_IU2-+--WIVV?u^25`O9OZv0v&u>tl9%kDAd?#mr zO>h4Pw9mnQr2TBl}g4{YwqIh~4*#I4s2#IX{xN5{kf&E!h0G|&#lkLu>Wiu|(oL#i9Z zd*`{0qL6h}83Uj6^@XAkbw}0+g0#W(srYety!CbHHQ(Ofu)N^zZ{fVGkykJDBYpB=47ysompyGl)ImYE0bqSGN!C*XD0qTP~7iUIe2q(OHv)Q##` zxM=HPSmn?IIE~iBKegjsysIZPjNyNzQp?+RZ!YZ-9^{;D@JfB%#4FIOtjJwxA zkh-&O}CM3wYN@Gp*cN6c84=Dvs!{Z#hiIA%6Y5{SbOX}4WZ(cQo10%q{Ra`*c! zjn)|N0#45LYBdUhC?2_BAF?#At6s@(q(aRHr#||NDux`G#^{YPC1UU5(vFR^Pr#`Q z=98X^dR=);MK0Mv5Wd}eapkh?UE6DZw|r0(=wq7!7S#{i_$u=&dAIwFEXq_kFAB&d zwyd)+**H3i*6tEIkV`m5!;JAiTb!|N9#?=64pwgF6$*2{cJ-U0xDqVH+SkD>d-mZ8 z$tMwc1Enog#g91MhI)S03w&ko&<*_d0^x})%>$gt@V`U^ceRRUzY}|Y<694GLnIh> za?SJ3gCJIJQ(^_ncY|!q>XJG4dHG13~O^w-2JQ7-?5Zs(fLcRpGs&xO^(@ zX*q5ciE|w&7LmGSp25qo?%<=o*Q=Bz6N43j7A@R6M)V`H#dRF)$l3$(>e9rIVhG`Z zg7`gOpQxxgal0hX6s>TZO+sxOnZ#TR6*s$XxQ;WAg>A@#$dPPtYwn-{Fx(gq3|50W z8$y`}2$H?P+nyAdp7AfbE-#@yPeED*_$l*FrvsOV{X(w8pa%piHj6 z_eV7sWXJCRv=9pmk`N8;QFHLS4k_#5q>oN8^w|6XcT^<4n4A1jLqqEx-@&lTiIH0uhw>m-T6^|#g>|qBYVGp!wk*G|)Ovpz zaltMM@WL)d%sJ_Y{a@BZ7m6XxbK%EX4ZeX8By>vS9F~Z9K?8Be45I zSkv5W;H#OgNn7}UNgE9?_O*8$!dlGiTC=&$Azc3hX}jJ{@!#Eg{)A&1mY&N4@$2$D zJ3dIn52ngT)y}cSW4c)shCP zmc4gliqTRAJkk3I;tJ(8RpxaRVo?^_&Sy}^(>9yATtbkjCQ|~r7LYJ}61$tcAGdV> zLp)Hc2A?DmeDega;ep=7$7c(cthf2TEH=Q)vLJHT8~;s?@ODt!znGI+lNFYM`6A_$ zGzA!H$=}WTAi$kLx_QQ}0B`*dYUv0_t9_wykot#3Zwa%rSkVZFk}>$Il7xjn@m|Zn zl)wsd)2wH9!ke*-9MH{Bc4ApetPEBOYo8WmoxZ32vEKazL#dw zX`U4Y1v$B-`Q=S8e(i$$sa~TJ=~uJhCsdfHx4DX5ac4Y7axU)!oRFoc3DQqo>M?7)!J?k~PkI*M4{J zX^TMKRzYLpc{xE`;n6|@fA_$Y?tbO!`6Wkt^D|w7I^gLES>NvqG!X6u2b$*>mc5urYB(l(#OZXF?btlL@2m~~UCMdaQv_NtIfs_XQFh#KyWRt>cK?(HDmCl0QT z&!dD)RfWFD%9T&I%-*g&x1kwC5dLKp3Fp=$&w9=WBQ#VW@3`q>d{HD_jcWug=c93O zPoTab8a>Lvl6r57HGsOb18m&d$myOG!|x#nmhO9z_&1@xQbS4QLz?4n5?}E=Z6L6N zisF;50`?83a{OoKFVKzF;AD<@Bw`h-yX$`2**>bpbgG2SC@S-&iu?p=qoQ~HLQ;<1 zbDIjQ7*t_{ltf8iaivHD6@~usm%t?Wuj7Yjq#ZkbXTEm8B0$OS^&7L`Q+8uoib_gp z>83|KQBBdXMy*j0dMxzy>4Rzv^}`$BVIJewo3!0hLt1}jbMMJYshUTr5d`#kg7JZrvvThBO?ssFv@T3y|eJYHo=(V646|qs5C`S@w!g8 z5y-xZm7-y~i@NTuWS0=7#4e75r9*`yt*NDP@%$zH1APkfVeI!KT5iGUuJWXoPLj3U z(Z=UemUU46$VY{=ry50e&*W+}D)!v{3BySMN2Jbb1|HT7+1W+oH15I6A0Ke7q~vT0oF z-ol9;VU#torLje_u|=V6o;o5J+pqrC)HQvh6O3alsX2GOUn-L(`9U&7u~wr^jm=r6&LR|I z*87Ya;jTzqqBztqgZ?^A86>=P5@CJy24Z#xzdio_L@{*g?g_L7m?R0}LYmDHTgs6{ zx!4?jg22`Ekao{&BAAxW-F^m-{M#&9mqMMNwHT&^3a${@xK?FnST8$pVQL{QEcXci zp(q&UJza95g?s)j`&wiJp;C5W^1b{{@A!JjBeA75m{)fd-0GgFdln(p34g3DTnL3q zV@s8`3f8POI9*I+dR$p1$1O`Wv(n?;*-Zpt_&bmONIg+Jlp6id>Rq_Kv3-V6AYO*s z)K$XC`LqjUf!qi1i^qbk)$wcEzDrGFJT6azKC&0y((kcI;U~=WcW(Tt{mmRL_I-u8 z+Ow4<<(VTa*;+hJ@!RPDOP(zpA3& zdsFrL-&>=%haJyH^S1oNtU8REjnW8$(56lpIQD;U>pZj104;D#*&wqS142fjvobY4o3;9K1NLGTM zvt*m<>e9Re?Z&3#wK#KBX7?zjkKx~0GwGA#?;i2245ZkuRQN+Sn06j3oSW=9+&=l1 zsVr7LaRb_p+U5&8j5;&ZtX`~)OpwaeV(6rNJqWnAhlr#lX_DPh0BzOKb~jqd15M@J z|7E!VZp&SG1^Tikc!)6V5;f9lqVmDH&~*DMcMJeP(db?7_ar zDBC!LxU)f0T;H-Ha~46^T<~`8D^dHfq`Scw)JA_btAo^Xy3LBg=oB`(^O!e*;Bv(r zAvO*^{TkNpQVv(V^6sxvi%Z>cbGO~jWCVte86e$;NpVh7?E zotSTK)~@yFOy;QU<*_2D2A?!%q^7Z1H4pXXh=m%jGFEX^b3CCu4hr7*;69dileUl6 z8>gl3R{&APse8jco^7RSnDcxzY%qW*dM>GGh79+b{$elb+!x6+D~s)DT4{5BB8|#c zkkHwxZ4L*pqF;>1X32@fRjxT!%>!)tv}?$m(YH%k#C~akhP5 zT{<(zru?0q@Hn%NLuIm~>=`2kl&kE{j{XI_vQ*Cs7^gd9td4LMuX<#p7VHeSx9xO| zZO^(olH1N5Tvr|TwwJG)h7T0AVX)g8*U7tBsLfR0vP=yjk*tCO$2S<)HWH-`@6-DF zId=S5I`aVIzj_AAE>I*$PN5{|RzUPifpHX&5DaoFzzum!y0x)fyo6TR%N^RSzMfeC zhsm+WiBZ8u@Xz3OU4`7U3V+e!iYoSnuQy$9!H)@3=TDb^H|aBVKo8*hMXp&-zB~4- z@n^y2YvP0JRMTiP1=7iTYH1L3# zR*+}rDBnbz(Sa_3TDOnZj@PueAY?vlWQxYbZv@gWJlL)iypCk620Zmg+L^`a6J`fP zAxN0Zv_UJVVq5R7`g|-P4;rBuO3tY+T=uT4mRNGFcn#NkAPQ}KeDIfiAD$~rRqx9u zFC@v9s|XOgLRuLwO#_8R#llfq{A3@%qA#3Rrnh7X4l@-FTxk$QLR$$;w4sOUdev^d zh2Q!uW*(%27m%@Qxu$5$GIHqbUL)NGxbJIUGx^tE0!IgZTaLE+kF`j4){6I09z6}5 z){9z_mM0*WR`if>>Odx?rHFAno1kNk`Xk=frn zeTL&yAkY_#r2R3Md!_VWkIyd5s8X>kxA0VHN z@rw`s&A4{e?%eX*n>o512fEobH8lyudQm$OHc^`G24H+wQ-YRY;zh2O2`@CXVb zMxw^}@Ly8St^ds1HOKobW8^^mAFuUBDm$=>HcL+N<-EWeewOL!>x)yfu`8LC9Z)VZub#cn)^NnRRz_;@ecpog4&D% zaq%z1ZozyZ{};e^P$S^V{p;eWzxw>QYwYZ|uL3hM*Q=0qY&X(+b_4Rzx}#ifiEk+| z{mg2bEgV6$9XoW&5E#F7i^6WsqI~Os6vGiIPF}emMKp|A(a?0QZ+2Ve$-l}ZtyM2* z!7qJUd(nq)?}A*y=G7i$b6t~GBzvchGb9Y^@}S$Zv@&Ze$SfL4f}^i5YGSzyj1Kmn z<|x@5n{jf;jS&ztT071EJ8kfkWjMqJ6d1|%UrJ%!v+s1|)uCnO$zuSSfKLUquzMgW~eE|Z%fT3?uk zBQXPZ=?;7o-=9U705p`#;8~v6fuWIaWTfGQ`Odd}$&n|T5my3Bm?{$|)QKy}svXe& z&*5phP`5FkVI<$i%2ix=n!mgI>aOBx2`LiGua35xb)NfxW#kpWonM+fcn{fVe@r zU8CfznT(0ssx43Z_%pEs9^3Ggn0xX2r2{O7GocHh zuL%>}`W%!Q3XRke{seFg*5u0J{MIp#C&eLTCi=KuaihXQC+o|#FqosLhW%WO3Rt+G zy9Gcm2&E4yNU*33N!a*N-=CAe3#4toz3D`ZFXo?*yv!j(6O8PaD1U&8ae3{17LGpS z`XSA9nOGyJ!?pPIh7t6;bYoCqtu%3ng|G?an3X2j_Cgt;niAK!wdFN23fV2i`$qV6 zjCkd0i7w0qABm+kq`sntJVPrU+hb^coV;TBoR`kM>M~9lh#x z1$orVzj84Y=V!^Iv(#ibBa5qr`38?^O_T4wcxr-4&n2FbR_L!E)=WS0824}_nv}1K zNurZJfRQLCp)dBKmBLzOr6}pBm zDS7n`DwuB?R{lSgS?{BwE(RZuy*VL}V`Qa6tWjY**Hzut*7lI9^|`-eOQg}IbfC8U zJb;8wbFuY@6NQ}cyaMtQ>%|-9jvmDRih3B-4*qB6D#^Zx|M-7HbR1N*6=m8t1$DZPzV>|M=I1NWJ*u4;%SB_!$ur7o5-jl-77n0dpo)Mp*HXtxT{W-Qi+MHv{YJXP=LpqwAZcc!v9S1u0g2zBpsJEQf8GiX$y#DP zJ@Q=xaj7b+-8LGI=b|I=5cYs*c^%&QvrRdl6^x#^05@Gk{t5?Tu4!XUz;BrF(9wQBE#O%7M z_67~$Mqd_?KgKieNCtrU9CR~AK!6g7gUay#1P^|Nbo0Mjy+M3IlJBcN7L2gGI?+;= z0abeZ9P`8#7oE1*$2jY@uK@kc_Mu(s0HVUFtAulyF>;WNVl#1Ps~!|^Vr&qI9uOcx z#s;T4Wr4UEGZJcDLMkt91Dt)1VU`eFl?I{YVq5qy3U+6rLAKSB>Gs;p1hkvwvs=M8 z5Hd4@BUT2j97SpV{_ZuWT$G&%Heqx(H2i|%)VGYv(>(AHZ>NpvAlI2)Zg?FbE!O7KH%&6bMJ)cHCNlml%-786)F?ICS# zTFf%!!y|rnleo9z{K0Hh()RKw@if`m&Wyh%xncQ_>z}4eC^FrDv%h@bTmzTbY{Es< zfWD*Fb!e^yo@r}a^q?-O9cCo&{RG$(Y~U^E$ZYQUp&l(5yg(>hRAjq;T+j61{?DSm z4xJ)Dg7xa2x8&cQ&o$2v>uU-nRo_%#j16}??R1*g%N0`)i5h~sEzTjv$fe$h4FCHd zX^bc-JIR+1O@}+%Br#FZ5ac}89U|Xww(CBKQv=N5bU4f|J zh?_HZzu8^^h-dCxr=lBHf*`WOY-vJC{GxH6rmjV$Yt`}by{aUF|51&I2Nva$r(f7zRku2uBdZcUfmtozKVBOfJ>(FkdE5C<6 z^0*>hjuhMmPY}OGQgd6a4p;O+&~b{nvfg+ahOAG9F20+$u58Rm#Vj7)KF7%KcKBCf zh0f&Xd+u|d(0v5W0DOU0IXF1Z{@B|C&vgPIHPjy%gt;+zHw=83g)rN1lJYEn*IS$7 zQC&XRo@$^{mL;;Gfxam+pfYb>jG}wmQoO|t&!z5l#n?2l66&<*iL-`{pV*T658wc` zQJ_mmj1?(KApePzJc}nxyVBL zpRFR&GyBLjSyp+#Qc`%Up%vCm%G?_W3*ZcTQ0A%sqpg47J%zC6n^y+ac^aqfp1>F_ zAeCt~;(?F!4_hOyB>UCWlbxcw#iRa?d^*oqZC<6Pdz`Q)cky%Ui~sZ^Ie+l`nbq~S z^4Jyf>N@ut>K+fq=j#l3?z1(q5ZAT;NB&*--N{1qJ$lNbb7rYhMm4=OGI`nJwgZ0v zZX2i-1dJnpG?r;pH?y{K%KtYK_#cen0HX}8=_+}@0Edppm+u$|6oOVC1)ad`kj)Nc8#lJkrti?x!10n#fu%Y zCcZ1dc(5Y6NR!iyhzcCAx{)BveBWAgC!mNlasbEKnpH6FtC#s|B<_@)>BZp$%BB$+O~d`APxYRYgk3VK0~4okGpl8+vOGvd5uv2TLL#3tJNr~@yB zLDXM4p~#Q_hfNqnVgi?xGob%K`h!Tps4;y^fU~UANpLc-r3WFDH4QvD>_XC{l^_M^ zOoj)D=zmKq#T}?KT=N+9LahLz4!{2;bO;XR+7Tv`X_ z+I`|CmWP4NzC8S|!3bMBB5=dCf6icrmyT-Svc~^T#%6t)GGjFF`@%!5cXyx@p+}4{ zW}@OK;Cxv!#+$0@G-g$tFb~Xv=~(i?_MsO^rhN3zcx)mNI?JIBN<=f@x9Je=XJo1G zw$^)be3u{hYD7-!T%|n8Lm2dRm^aci6%yXjsDm01(DXsuYi$#J1e*qCK4|UN%79O{ zf!!O*6~>VkUTD3EY1vqTxj}R3@)IJVwxkR^0Q{c43`t!el}6ot>~lm0Jk}d%T*Che zCR7=2k&mlb73SMbeqMmp7735w`42~bw@^*4;V6S!t8VnjnCo)3(8M96opiZar0j<8 z667cN)e+OJynL5Ih~p@$-bl+`FW)sy&NBnbOhc}d1d|uL;Q_x6o-QxM^j+I23Nl9| zMMH{NMWb7e{#}2+ly(hH(F|ILNIdQn(6LPGA^ZxA+UdZmu)};s>e$mTZcg94N`f&e zhmsH){=a3SxE3`Gh}okaA+HXiUv+xT;R7I8^N*1kvg zTa*miZ4pad?Sx37Kq3DqtO8+quu|v`f>z`Fkr^c4@?(%0zNUV!Jo+0LY^!FD;zawMX5Ox%ljaFH}7W7XNa}#k)QqL zKk{~scA1|%x^=(LB+z$qQXlB5*#gGq?-u6=1LUb&xPs~JylOpuV#9|UhV<)}GkQl5 zNvC>$u^%+E1D~f=EndDjPc^IqXT2QhXETNy54p%c_0uI5#UJe02OsuQ3kNeWuuyIC z_(lb1?Fd2yE-p4c|AgtO7%qDuNZ;I>k1jSq%0PLq?oH(Gb9fzQ0j*`Z<dv9FWMWMM`hEd)6LX#yH5H1V3l`t{~c7%5_UX{<(mGeNbw4~Xwbg$%;m?FZ&S z4BYg;la{l0up`Ks6B+|T82`66oLPiOrDVr}2!0D_eQQMg>(U#f-sbU)oNa78fzF%6 zozJRpb!l<70_)UKAlgFY)@Iqf$_;GYBNSbfJh~-cgV(?RBAF*xpFkoN*h4SDLi@io zaq4#OlLpkMr6kiG%O-y7Q~*I>KFOc-eViq3!ie*ID8UbE{(5KbXAr_Lh*-z?mHue783fiylzsM6j4d&%kFsqw*%{N8U{`hqv1!$KTiD#okz; z+UP&S>ghn}q{lEs8_n=)Ib&ahw5wE1vl9A$dMVL8&1qN>kU>HNR|pN@e{e8VYT67M z<0K~j1jg&w{o0x`@v7bUUN0k=E*B+4NkBKO5K|jg7Sitgx{)1Q9>W|VI?f}|l_z2} zAU7uiFA0Ozm#=|cLVEsbCZv7&6kQTF4muj1`aV+(P4`)VXnNh4Uja);&^y$3D316b0!B#1GPUvIW<=Bi*VsiHBob~)Qy(1~9{ zLa7+ubAGw3G=}%eLiF;7{5;b-F`x=eAbGP3>GX ziYAvITxqfhKG1iia>Y4a`swa7#b9%|OxJFBcp&l`-AuOrRT8h#QQe!H(oXyqRm&e) z!JqfC61K`BO_lH{Cp@IFH{M_48KVKjb zsf^dK^0|QgYeq{^iv{y~+M#S0`Z%EiTuB;AKn%aP00O%(z&2|nrNJ#!Sha>u+SyFn zcu9bl9OXN}$YMS^ByAzm$G28^omEtXC8oHajL0G)W1{P;R7yfo<&imJIQjam+Yl5I z3s?!A!_Dz;SK#nyOt&SKM=ld(7$fDW4Yx^E#np2H9v5fJsR;rX(y1W^@FHN8h~Auh z6N_E@Bom-^|FO?0__RCNC%`4&Qt z@~@4$cZDJu1%fTrcr3JRhR(m#7RPRmn6yGYWl&w2g$lR+CaBiH%z;bKq^Y-j<;}g_ zLZ|6$@2m_Oq}ZK&i@TQ^N`44;IAdw3GRU{gug2ZwP#drAW9XF(?P;*T^B8ySFC+Gs z-DPTXy;m1c0I>jojA3=fr4894p2@$9ruv;O(WQ%5U_|zAE4DINdFf;dXp=z7MB(9h zr{p+kI>4U~a!bC|2?yjHw{~=FhC@4tP{y)O|Lt`vCz!xtH_kvV`ylkf;$ShHn0BJT z73Eq^karco^3ACTj__w1*TBObjOHvBtCKdp}AR0g!S=BJG@`iLO) zQfke3KH{E8R;f{pue3>fTQ$3t?^4w)l7TQ%4aOpCJGi)0gTtkJ{qELMMyjWQr(jY* z)ok2ioYWd~0Rrg!PO?-Wo1P2%r6WJ&X=8?LdeWBkC?*H{_!-<|+V zA*gfo%&I2LWOPQD*JXKJYBE>q3#CQ z9Nn|Cw?F437C^Rg*it7TJxOCnU04p?(o1R2?^uX&7qUAE;-T#SgXYK}%v-b?AsqG; z3P=x&&s`a(F4{BJQBd$Vi#?u2M)^1ne=#w{s2B%Z@5a==I@o@wO>uTdg;h8wKql4g?x&NM(um|iw_1FB{JfU( z;XHD3jR=HYiz_F9E*9eCdgojo2z^Ix+0r`XqnGY2d7jV^F4nALHB2t#x>Up)jt9nW z&3gkasE_7X(6nt1pj5dxmb!E%+i$sNV!wEdLK3HOQ@r@BTJtvt)*A{o&?P}hF~`9R2TRBf%S$WM&%C4|49Y~GHuos*~@ZolzCjO&topH9|;x}yuc%ZwnG3Jjv;m_|q1KyAGJzxP#P>`IDjX4NwjT zs5$GK>ph0L>9|vN!*&{gZVnG890JS^ugZT7BC+b|Px;#y;8jHyE6w~Tl1~anGysM3 zf6rSE6fh=zd2ATTF)hS~Edc5|rJQ0W+0RHx*2d-ZT;uyq23Oh<8#LR|WUk@Y{eR%6}em`aM52t6ZSu*mC zybgrffyVba#4`IgVIX#&Co3pPiQxk66%CN{g!hV(?{uFm0nnH3U-t`bltZv% zcb*n-US1FfmXBa9$RTkipK>8MS<}XSJu|X+f-kU0oW9I8O1`!!N;=gx1Ul3uJtO9% zxk0d*t}ao?Xw;ob7HtAjTBBl|V^57MLS z<{PF6nk=f*jGkA78rC$4+29tAC9h0k55Bm5=fAVgbJRRM9^}$k)|DlL#uf}1sq5rVJO0g zH?xZ3_xaWNY-nnMNb;Lcg3eq=eh#99q>!1O0<6dwB&PBibuNHP2!l@0?mUEpkK2-p zxcElO5oqZAUF3RxAbp1J{B%JE714)-X1;SWJ>wXVm3e3xc_ItCQ;@UzgSHJ@r>=-+ zr%l!)Mb8UwzU8w3VK*h$3o`X_14acZRjCjM`fZ4-jlja0k;Y=>2o9t%Boa)L+AZX* zEo!5AfjpHYZrtnEl+Wyc(6hanp1o`dO7tMvy z-C3EPvQseEHtzMvTAX7fh%O$=nEp_pH@DND9k3K2z2yAE^Id8OO;}_-=cgj|ThK&0j487HB8BD3^_-g#74 zfm3*Z64^O-GA{{+D4cAl)ibO@Nk?5U+V#+?FNE%C-|Z<;$2a8Kzizz}jzmV-=;ri4 zyb3qJv1fn)hl6tN>#5=v)8dH29SCi{efeIzCIqK-iOYS&n680-+`xsCW*kflp6_;` zli@Wn^GJi}q)&RuJz^V@gvu~N^SmNRxX^}RGB0K#uH-QX@{UKRAtybt*7TqY8v zc|NoWYX`JrU%B%tNy#m)cj3Mud!mUl3JGY90Iwu)dN$w zkdNmcB92yW^b_&8ukMV6;3xuTq zXI|jUIS_E_>Ee-xs4*U6YnVFg_m{dV1fcq)#P_G#nXf_>=6fF}$9xZy>JO@5$1M$Y z_XS=C?&c;Io|+OK+!q6N%#%Z$gl3~Rn4rmjdGke1cK$CL8Wf+IT?B44Y1A}nbTlda zGg*JI_}+Uv(1gOU9L{rgU^02)K=kxiTY?0oVJ&89|}jG{@EauS2^KEpY0aggnG6ZFA3|IwG`VtBSZBW6^qd;`_- z9>t3ZgFv<%RdJJFw$@xXGRgN)AM>mor31w6l_GqdYzG>Ei>C^mP$!LB)O}PRTQ%|6 zCM9?JT)L*zPW*tW^1-BQ0pqi%mv*6_wtU=-7yDA*3BaCQqj}e(OyVv>P)%jowiB(-Y1Dq+JN#PV(xI>8h%Og$Y=4YTv z!5q->WsBkRS3e!uz#xx}xB=7NsY_dtjwN$JuoRm|91w?VNHL`vgr?5uT*dd%X- zrI@t^#=K1*J7v!|QTrcOei-;O?O`tj+bguGIs^ z+mEnnBro6yG1OHzP z;`@9C6RPUIG>2Ghl~?#1c0nT}*%M4g`&vE^MmogH+r7jooM#2WEbSVoe2*J zD`vlF^g~kM`tW>-8ZK4@!0anU=-Gb1?S1i23VQ<43RgpWqb-7q)4s{x-uIoz;gK&M z$D1uM2#*x-6@|)E*|n1ssESbob(z;474>noAVA#^T4ey;#C=Nm@ul z167U)g0W0xyW@@|ll8vlL?dg;Ny^jKZS&woT)5Hz}L#VBOBf}W42oy2>$LnKPnLDSZlViUhe~HJQ z?PG;L#B{F2RB8shbK{4yM;sW;ZA#O6Q!o_~Y;ODoWY6h2iFdC+L$Z zzK8Is(tLG?K+3NaVV}guG6u zBs^F20K=ASMf=YfwSno-1T62i2gnr3&!Ep%_v)zxi!v4u5>^h3QU$K66g3DiPrOu?MRQ?DG;Qb4dQE@SGOd zTXufhoZ?`X`SGLHTK`3Ji{HP(G8M15MrE;_VosB^-w&{am7Djoz#)U0qDL{kS^mL| zN+d%hUj#4mf;7>f7~|mQTA8%?qsU^e!C+bK!l2YAVu0Qk+>?6AiIWY3?6%b z#VvSw3ceva(fgh})%2g0zU<)MDP^@%Eg(}NTzchjw+Ep`9wrZk7+*wSp)joD9EZzI zVJlQ@B?F()M;|DtJe=57xY6KCI+;Dtxwx<5`&%;jzBEXze54fH=NyP>9psj0VoFv{ zHCb9JsJrfnVGu4eR`5~c)I{aoJSoX8mOOWyWN?<9H3RPWXzt?L@MvOI4BXK)LVUat zR16Z1@Z{`(P>YgHzBWY;Wwt4kkWBuQ+rf81Qy>@M7CGF_!=sM94`7Y%7lX_w(q<3u z0#c}<6(#L!VDjvi#I0`x{K9*9s&L3}ryA$j*GjMm0l+J;6QkZN{0713toJ7=!68*? z@?>AQx%NNZm}ozV3|3%1?4Uh-4)y9+{hS7U0QQLk(>ZU|3C@yFI$z5eBz?!jm}znw zt8}M&;?w&bvL5XDV7pI5kjPY9Vr4(oux0dS7WtJ+6lrA;oH^AquTmc1Y~udbSx4_U z|LfdgtY!-ri~+wj0!Z{VCo#$>84Ck?nhdIbNok@yT3m5A9x-aZaGs_0D8l~anzKRa zuZ-DftRqOyVi79B2q$RaLV6WmNMun1WE_<}y%Id91fz~mjqk$N260^+X#j745PrG; za2{JQUIqWEO~+pldj4v%%?J@{d$)YB+Y^<5Fj}=5eh368P@PWohnI!<2}PKCAH0zh z^`Xra{7$n_RG!g@nR|7d(tANQn(n!KJ!9#!(eylIkjOekZZJhI?zl?>C0))eYNUl@ ze5Zcv70psb?|bRH8~keZyYZdaxuBE!qaNUwVCbtz_KIo$9c`cUh~o8q@tE(&X=Jo% z2=T?71gO((Opc@b$&3ZQ3XC;zwH>9dCe+3J7>6`n38_;*08O!wc`SJX;h z9jxfdjUU8jOQS})xs3ehkNgi5j6L#QloSuT7LIE`iSlE4&|CrCb>bKp;bI{=SmuC} zmsTG`J;dhyxb_Pkc0g&+(#pg{1}R)?!?ke7xT^Xgu9;n7+b=l-!= z<58EQ;z%DGNU~8AA~51Q5M*u>Agj;Rsb!Kl^DtW(fINiH#fa$sB#*-`)i<%KICfQf z66E8B6&B%5eKa?Zg(2hO?))@cmYG!8jJ@6ry1w~}q;89HcO%~Y7FEIi?B)-2J?$ol z{o%U5d);?;|E+B_9RHvv^G-dK)3@FJSM5lNWr5-7f7cxre`WcEHgZRu$CH z5e$=Too(z|Nk(h8SYLXUSLDs>Qz#mcZxl$GyXkN5H8sD3kd6DkuB7;@?;~(o4!R=_ zHb*s3Yr;%RXdkQSEab`hj`V(4VB1OlW5K5An7)RkO@GB_Wy6cx5`we)9Kx{S_uBMos`ETN7e*Duhjxo z{T2V8a>qZ~kAF0`f$!8l+J=Wp!7*v{^MNXX19#Ftw{J?|l5m^$8o*h40rCkRxYupmdj?buMBX@V7GCx7jsG`Wu{}m?V0HSHzvJt-4)Jz{@N4GIc|omr@#Vk0tf<|Pge@V( zTM5S}WEv>OF_B@Qa_@sj4pJEDIaM9&b-bcuaE;YtsC#&ER}zN()&cHy)an@tY#-A? zC6WQx5I3jHBVeJ;Z|nvx?72|ocpU_1@d{l#37yTSgzaR=rrqSqGp-d!fm{b5cmP0O z*3aS#Ka^-1qrSQQfCl|sxW?{-bpx0q-2cgnyFHvWKRKF6*AWxSowRUi8X5Zfen8hk zj!t!;Ch8WnJ%|rHVO&d0OvhBuCvxx%EopeN={~(*9ukeVck2RLFF+-_Z#EE$wjq!` zf$@j?d34OeHH9a}!50#+yD5yzw+Dd|2VocuY@0b(RzRmMp2*v^XiXW|IsS795N%iP z`pc_OZl=noPl&{@iG_f zz%dHOZ^>0fAL(eIl)i;7uB^y?|CjhLll0?}c=Qc=DXr+vMQcEi+n-?6v`(2}*t-HJ zsbu{3xvx4E9@$a7m^CP9kHd*%@0yDp8Q~9Yg0KaHS~qMHW#pkAx4+*}9J6pq3oxAx3Pl_@zLx1^(d&VLMBOgSu!`xD zSK;l32(hP^Tj&Y9&ei1`8ffq3oFY-Z2xBAq)&5Zq^AI7F$uRIo-(0A7%>+ZTfQAt} zbP3r~Ql`)@kpyJ*ISh@HogH^jxDR%IsF~{sNTA(xeMi*jv5`B4+b+EK*+a&nDObyIA&$Dpe66CmFZ{3Qp8kH;dvCLFtbitQIE59rV#MqE6OZ4CZGXx*pg zL;iw$#?ui~1qQb(`Mzr@$$cA%(3v@Ot{#7R`{qpS-TKJ!j2xV$XDnfANZ0X+5z|R+g0a@-@|EA z`VkioHCA==6$m1_`Ps;;cy;V|8|a;UyZCH~KW0gFtg``MS*nQh2%boCd9mv@Ug3;; zW=r5=h}o1?C8cTxUZXtsqvCN2wBISUjqB~jAb$1Q z@6p{$5iBkPZg3y(55lvR*|LP4)sT}G>~=#iUbEr3Je8LldUeL_9B(zkrz-&jJ9d;0 zCl%*&>~S5}*8Xv?X6G+x+5ZAE2|yw-w9ADm9g*JD*P{-d}PNrSaN{sN>`@`ncWd*5P%ub~gwop~_a>`o{V&kzo1H$o(=~XHS+zXZ&2s zwD8uL zm&$nM?VtRy1Lz0_0w^LWaG&W#Pce=7EHA{gIucuHOsF)|w*-EjmgoCjE3RkDmNyEh z>KXWrZ2Oo!S9A?aN}A^g&<#SVs?b%>Ug4Hr4TInFu2@Ywj~>$_$Q5eU3xw6xv|b5V zRPV#o2`CQNPhMDOTs1{vF25sWQjG2^7PK(h{r;Nd7J+Tu0OIa?wTOI-_pT~8_3XiD z+WhwxEWdLAKsxD2R-At2ZKS4ojonoOLqj?ef%>pDs@N`gio#OkU~wk2#HXPI7tl=w zQo3nc5cHY)1=@;c>;Z5bvGC*XHXiJ~?904gmEvEtk_nt}w+Y$?gs~QF`sel>!bT6o z2$K;w6%P(^@5x!H1K({jji$Vf!_P528_Kos&WD9L8jn z>?&^|H9?dk8)SonCn2t~hi!XuIzp2Gt=6lPIX9XK{if9Fau0rM2dTsj6dVt7rk#;q z)Pcc7!z2O(+V?i#edSM;y_jkhlZ**#M6zNZRMIyD9#R+il5XyNf1tpy`(#(*Y%&WW zAywo0JO+5z@&-(q{R(rd+rs(6Hu1`=C-YS#X*b2Hf{N$f-aR4xcv$3;nVph=kZ@%^9!_ zdqgA-JV!?1KOdfZNECwp?$r1`z$6pitl*RT7)oJP`g6;v>HwHkEZ<%em?zRV*dqgT z?G$JbkWr63$4@eFdkxsr5W#$SbnvQO#6i~K{I@iAev;B(dBaY>?bh^9@JBEmeeL?Z zBq)WsLD#RH{8}7AlR2dd!N-UIA)^?A_@lfaJklK%E`4Vnb2*goaz7D?9W>p?!00uX zjJlsLmE}(*wzth5D@dx`o>jRJakO;K(f66&Hvh-qQAw^k^Kjlfw*P8RPds*qptDMf ztL$bdiMl~i?RfHj*c;W zm<)mN$kRq^7!g^(fXIBZjUKBoB~JJIbKT5@I`c3`4xCpJhC3U-?&BFjV%0Y)>Bjy9}~FI8B&oot+nL)#f2Vcr$UY z)Ns&tjQVdt%-K}JBC)-ut{4zK$)+NeXZJc^nH}f!*ngGM2dlC5065tItNIe2PWPU? zBg)g0A^WFsf=ufa_siN2>$&I4A@i7ws7}xp z6q0U8@f^0G2kkr!N=eUu55>+SU}YlmWcLyzTnGOp z8EVCgHkp~kv7cUZ#G+hjg%Lz?Djm!kGQc%;9hDnbjCz}2z)*e-omhMLy(@xM3>Wcz zBTwU?KO>1R2ujp}Gg4S$>D}2Q`&MGsdP3d=ira1EC9szQT8Xr5*_giZ?{TS~FUJ5* zaarp8)g)(EUmBe?{$xRa)50s8=9ww?Ecxv3259s2KoJ#?Td&ypjnpwfGV{lDStLVA zhcn|aXk|-je{ru&>c~2K)BNdR=jm1rhy%C#;{@I}M#Nx-B6CeBa__*P$XEN_sQWuu z^uQ&ZGZ0;|@UH>N)hpTCyY8jLE+%Dxl?@@b$~D;+4hS^%+iJ}UNRLqd=uX_Sy%->V z+cn*jWSy@1z-0sS;?ESJN^R-BtCTWrcdtT9u`{eA-AM$xyJ`#Rpt(oA{lH4^lWZ3W z(i6h)t>m|z|A`Mw?ew}<{>}Wm6x#Q;8=OXnA?X^clJ(yk^7YV-BBdlQhWlNY!Vw|O zUtq)UYfULKVAw!aav*6WS@p*VfyHF!6ea)5%zUn&tkes`T+Yc5C3d@o%__Ad`}w>B z;JxAxQTyc{^tHk6`hJnQJ~f2oEz|6qeD0VQt549X1Y8^Jn&6S!B};-v19JjCe!B4INPZ@zaGLW2qm zn}t8<@Y~kiw3(7jf*9Wm9V;76^UKhug^+xv&>T+!S_Rn5p&ahTJ3(&Avl7cw34{Y* z^7<7}BrEMaf_UG(5GL(2624e$TUR(e#aKXl%^VODe>nOQ_Ieu%8_GP zeWzpgv;87s)xpEDKypFZA;eX&qL8a^_lNBAg!1A?vg{z`iH@tIdb};nC@bp7d)JYz zu6x*rZm0AGCRQ-V+~W-rCkJ9oUr&UJwVX~g-h|zvc9U~Csj+GGAki@oyy{dLvos?S zOJFL4O;W=AE6>H-0JX8bHEpeTH3ZiVGA2u}QcD_#)WBJ0r6rQKZE~yCO&i0y9}suw zv%glEw&oTk`REHN^`j1RX*c?Hfgo^g%fo77=XJM!-GoHmaElEmbp!1()8oV^_Mc+? ze6Y{%Ik)E@ujPSY6@5$QtTe0APaq4Xw}L31m8}z~nxe!Kf93GO0LjEyNN;mmgTMoE z&rd_o=lU!mk_mga3O1^!Uqpdw&9u>TWaQ}rr-Fgqk= zs|{Cdrq=QjY?FyPJmieSn;XO}C_n+S{*uCPwTFb4{>$2fB8(&RBf=|n8$3E4*XuiZ zPq(Cq7n*?nTyit6T~1Nqjz$UDoZw*L&|IZz(yMP`dMi2)GFH!vlL@bxa4;uqMF@dL zlhX4YyDM$7A5B2PghSZZ-OY!@sK>_g#gi)pVv+DEdFVmu;fG4)omf1Z;W(88wY$xV@@zGSY6f z7Kbn;!+^LBBijqodn1T zWI>>vRKhC`gD8>%5wN}w#wJ9l#DnZIy0;EE%5iPAzN+|VF@1sZ%o4HdW$eU|?yu)8 z)oNr*{neUjuwZ{0ciB5e-+m?jl%W4Z!08D++Fl=jj}?Z>t9>aUv7DCaFnij%%7*;o*4iFV@Z3NI&ITe-1DqaD#$^;7h9<^o!_A_r{Tp3Vj$0}A4?=+BpCi2 z#D)^_9MD*f3vuCj1>TmM+tG64A@+KXqyLx^(~nZn3;*1$>SN=jED+C%$jsbP2@NnS zD4$)JFywR?&T=c|^}jL_wY1MoI?QUv#WfPO;74m;R@0TphNcfG^a?klwM@Dc%gW|* zRvhIAS%LpVB#cQ0%mrH?!B&e6q43SGiqJ9d7${^lc@w%{YB zyZJp{J*oKSeuDOIldQhoU=noHii^)f#3CwK9{QGQbkP?^TKU*Kb-w@XOpOo_w;##E zAr5Rnwf~wvgC7lTGjG~VQDgC87=dmZDi6j5-k5ACh5DP3}sZaqE{*%J>0v6kkNV(g%U{iiiOd)WR>0D zLl1o=8^(Gr^o=#VTUY&bJ{Vu~k*)EttcvAjW{KV5E)k$}YrDA_^>c2u)fBNpngV)#>Cp=(+~@vfe!H0NJ2S0QQ5PH< zyUnfcqNaNlFR4gCHOKoVa6EZLnR`EUJpD ze1sG_P<2WM308u3EJHLeA{NR>oc=f>^iyBoP`l0Ncfl7WRj$@LeB_O71m}` z^J0zs(=(aR#(vKPjmsEoxB7qc{oKmIGGoP|O70x=U6WP5eg;bb1fp7VIAgm{ zFu#z}m^E)EbZk}TWcd$`Glx57F{KZvcW`8`lZ=H3sy#ZATK?(M+liuXp;URr`4%Bt z_2QU)g>lF3L1kAy;$ZS3%=;B``!dM@I)E8MpYD0y<_h&7(e%JWgau{PVwwVTnP2S& z3bIwcn=+kXC4oB(YaUiWwruzgkw{v9t;e7wQ3A?;UIe=jBul>MCCU1$rdzh~_XO_} zcAtOF<4tkQti|jqrAD8;OR26-dobIehy9p3L-x_`-496S=McH@IkzD*$pobv^yE>C zqbjt>u7G{aWoaTct$NKp$BnG!y^Yq9_HUszWQ9lOngOcy%ImO4e;&Vo?CxCc3UF-q zG>?pOi`CIwTz?DG>_nN_D8xy`v9=dYAjLYJ<12RR?0U-1@643dOvFMGap9PzE8*AK zXzD)bA~grp%jsS_iIG2J-nM}=d}zsl4w4VPH=J#f<2=kRFusNM34<}^KU4g)JD#Av zL{>P6{*a*6ul|P@R74Vn-uE797#kBk@kzypc>T!U{(Mhixirdd)U%8TNU8i?q8Tp0 z4VxDB7-u!D8H7YeD^_pwF|ow=JV@e7D@I;j@%8m`Iv0}=RXrq32;R7HT*UwoH`~_-N_`@*0)On1EtBV zqvs}D%Lm?>6{vhVJk;H^db6TKTnf$kN_>Je2uGFf+E<@(hiS`YeSd zGv@tRlglRMF+7F#pXX1vcUG*D!N8dV-F@GFPqC-mgBW^p5dC18-`&CqtJIUl+4z;9 zFP`xe+&i2Inb^>txq4YT7_g=T_^5jCsh`jCc|@o}&+QO&&1F-)$3O|rZEz6|P#$Lw zM>L~F`@_U8R2m84*u-0no-aj%&gj5Ur+j|<{(P3WXoe=!6|Zc%IVmg07Rq2fWOjIg z7uxZ#f^NVi<{1ZemiF*hVrSP&JB1#`Eun<^+G&ys)71S1Bu4>+L&J()bvc_Q&JHF< zV)P~E;`L2yH18qLcJoU4RFhm@Jl4gOTAIl89n)KF1Lx4MH3u@6mBock3%IuZU(jMq znDT0;zug7NQ>(-&=~{{&u&s()A7Jo5lzq!Cs1&;DGC`Bs@;Sy$;>mbdKxEi-dLk<9 zuw-VuTSZG@!}O;0w&w%Hf$vrf%ah*;l;f#>IAbH}h z>dzIBpO6tnd)V>neJ6wJ=TYlW*{;3T@@89&hffob{(OOX-Gkg{TzI)Z+En*$)n-Vu z?UL6YJ=XWbY$F;_(GvALBI$~0>u>n?fpoLJX^z6ohbu~l)~%E!rIwBw%-E{hY1Wea zx;+t<61z{gC3ZgOxx}8c*n|}XR;v0|fle?T5jKgN#mxG|i4T;=sZ_jai@(zR;PpjD zP=x|*=b!#>#%K15Y^?lvPU}L;7i;G+PBh2Xu`+l2FV6k@h=Yy=2~Jm_)Kk4HjTokf zJFL0Cd(W?j)M-Y4+(JHFX}$3%_hx=Vj-%q84J%es_gJF%-hH1bXlL>6N-6NDjbFwY z7l_aJ*wOyr=BwDRZi;fwBW~OD5Baely7_S3$39`_ty|K}$Qt4x;ziZ%$(r4nz1_9~ zRMv?8AK@~5!c;I+dn}}rU?@2AQYERt{2wW^I=`)9KhFp$t=&atrDP8L)BP+Rq}aSVI#3$5N`|B>{bwe zWAsi_vD|26wQh!-yA83%R{0&tdA#YB_Y$V|YM*!aWWThz-W`Q>Plm&89Up+Ms|23* zrNO)8F=Ylrv2m6UAKQLv=L*BHLmb+XlDj-$*Ld4R$nwvEz2Po&>DlrtICY4x2I|%nuw@dBX9Y5Icqy~Xkkrz|WPmp2 zw>R9iL5g4ZBqG7Fcf9Z(qFBhN8qTA%q9Q+fUQhpH43gmo#%VcA0R0+Jj{tsX7F z28EUnD%7yPJ@BP~UX3dICdrmp%W1WCm5$CTMj5D2TX#Rh>6C1vzTDmKLATx?P?nGU zlFEISN5IUk8yInhH+DQBG9i~3^H)C_KzeHE-{uhaf#1E^-~EvD83YXcTI()AZx5dB z(AS&plgK13KWg?8@`z_6;Rwz(oS@^PZBFhgAMPHi4IkO046ku43kZ@?n%Pt5irh~7 zzuOE`{bDRwCGwY03fK(y^9mi^n+`Q399G3}y!q zOnQBit<3s4lL^l|&+=bRlIOp)B*{w|ix~~o;^NpO7w!CZx-HBoy27mr`J>fu>(Hl? z%!rsYQ}}-%X0WkM2Yq>H=4!jpXMAY-Z2^z?k7>w5yN1qyARA+lqpgmlT{)Pm=TSN< zDIy+Qso4|)@y?WWlD_L^;(~5nM<2H{am1SdN$%%a3{{!jZ+48Mkf`dmZe2Gzgo(+~ z80%xAA&W8nZyHBq`-?=FUaEWrSH9jeJ%sQ+C((AVL^_)9&-@uHR8ui~(sG{;8C5bSw; zuH#5m&F1#5`NZ~n-DUSezU7~J%fmff%Tuk{`mwyj3thirbckn@5B*d*F*YiDw5z@U zYcCBHBo;}Dg|jJ%vlsS?$MQC(y@8#&?|VkvjK)}4%BBq@dBlE9eoyKER{&O~BZl%2m;9~$ zGz#h<2X=%=_K`Fi%d4$?atOWzlBk^fJRthrr>>~|L+;oAbAJeg^g1`ilX(m!NT3G3 zYp3r%xyg>0`sfpq)!EbCB{_-+DKU{}Faidj9B?L6Hs?T0E%8UUoH(djU6zDBK7gM7 zkV>ZNS?3@W#uEsz7`v8Q53aSjK29v10lshEUiq3_i;(gEp;>-UqgnV|x4f_r-qF1L75{WgUPe9_VPO?K?6V=2bZhy{l851{oR`?Y?0(A;YS2jH4@2%shPeTwjJzr$r zbR=3+TLxnGWbkx<<3^hi#Mr!pr3ogy3v;H_n<9uKCin02-Ha#Pqm2P7@u|+~#lvUT z)Fc#X%m0gV000w3KumESkYjGb0!Y6Ylrz5grYQ3Nv7AqQ|6z9|&KFp411?;GwPOG) zlEH6!@z?*pMH+}qI5omgy;#NqxA?SEmD4raWWQe_n+%5XeSQ;P)6Z0Mg)q5&%Wv_wCXP)IDj&h8&od{!EsgB zkjb0PMls~1~L{)SDgaj;<{hi(z)hn8mz28fxz7cdht%cmEQ z2gL-U*H4bY4WXP}H~(G)^fCmCloCv`PdvES6%(p|pY|5bDfck{wAiCRK7n07k3Dr(2A4UH z%U;P8{rfV&mCj7OB;tc5_;kmNe+O}(_&9;!KU3>&Wm1IPo4h87>}7s2;fW(s67SIQo4@FPSm2* zwKf-pe)a#}JLp`dsWC9MlERcp0kB_%{r8AQhE$o*u7D1>qjqNDZVE}Swfdtlm;WBG z;3TL;y;KfXk(GAi5ImviJb}PtE{E1+PeD) zeqZY3cht(^ zxlW(qtRJG6^wgo)D9gIC;8}Q^IRy#=M&PiQL?1-k9b-@>}R>rq93k2=D1!{5^jw*}3 z_&2|S$h!J&bKdfc&SFQb#2ThR261Gx8Y9ZmHi}k^Xr0+_X6*XO8Jc$G|EHZJyu5mk UK}nk4X~h4Amf^K(4Tq@z4`@ENEdT%j literal 0 HcmV?d00001 diff --git a/doc/source/index.rst b/doc/source/index.rst index d5318fc7..6525abf7 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -2,11 +2,6 @@ Gnocchi – Metric as a Service ================================== -.. image:: gnocchi-logo.jpg - :align: right - :width: 20% - :alt: Gnocchi logo - .. include:: ../../README.rst :start-line: 6 -- GitLab From 977a4218cb558bec496827a74e396462ee5fc5bd Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 20 Apr 2017 17:20:54 +0200 Subject: [PATCH 0696/1483] tox: factorize docs dependencies This has been fixed in tox 2.4. Change-Id: I040bd1a12ade7a255e0d52c2c1fb6ff61be007d9 --- tox.ini | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tox.ini b/tox.ini index 64612a36..dbef454f 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -minversion = 1.8 +minversion = 2.4 envlist = py{35,27}-{postgresql,mysql}{,-file,-swift,-ceph,-s3},pep8,bashate [testenv] @@ -117,10 +117,11 @@ commands = gnocchi-config-generator [testenv:docs] basepython = python2.7 -# This does not work, see: https://bitbucket.org/hpk42/tox/issues/302 +## This does not work, see: https://github.com/tox-dev/tox/issues/509 # deps = {[testenv]deps} -# .[doc] -deps = .[test,postgresql,file,doc] +# .[postgresql,doc] +# setenv = GNOCCHI_STORAGE_DEPS=file +deps = .[test,file,postgresql,doc] setenv = GNOCCHI_TEST_STORAGE_DRIVER=file GNOCCHI_TEST_INDEXER_DRIVER=postgresql commands = doc8 --ignore-path doc/source/rest.rst doc/source @@ -128,7 +129,8 @@ commands = doc8 --ignore-path doc/source/rest.rst doc/source [testenv:docs-gnocchi.xyz] basepython = python2.7 -deps = .[file,postgresql,test,doc] +setenv = GNOCCHI_STORAGE_DEPS=file +deps = {[testenv:docs]deps} sphinxcontrib-versioning # for 2.x doc pytimeparse -- GitLab From 882548f2b31bcc1e7ac88257c5e3c57cfd174a32 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 20 Apr 2017 17:22:17 +0200 Subject: [PATCH 0697/1483] tox: remove useless variable declarations Those are only used when running tests, otherwise they're just either default or passed via pifpaf Change-Id: I3db6a69ee35be3e7cd627bbeb929dcfe4c548603 --- tox.ini | 2 -- 1 file changed, 2 deletions(-) diff --git a/tox.ini b/tox.ini index dbef454f..4bc2d69c 100644 --- a/tox.ini +++ b/tox.ini @@ -122,8 +122,6 @@ basepython = python2.7 # .[postgresql,doc] # setenv = GNOCCHI_STORAGE_DEPS=file deps = .[test,file,postgresql,doc] -setenv = GNOCCHI_TEST_STORAGE_DRIVER=file - GNOCCHI_TEST_INDEXER_DRIVER=postgresql commands = doc8 --ignore-path doc/source/rest.rst doc/source pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx -- GitLab From 657b66f1b018a07de11e63a69e857922b1733a36 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 21 Apr 2017 19:47:19 +0200 Subject: [PATCH 0698/1483] doc: fix deps for old version Change-Id: I0d2cb10afc8e5ca17cc95ee6c9fb9de347808ba5 --- tox.ini | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tox.ini b/tox.ini index 64612a36..bafeefd4 100644 --- a/tox.ini +++ b/tox.ini @@ -133,5 +133,8 @@ deps = .[file,postgresql,test,doc] # for 2.x doc pytimeparse retrying +# for 3.x doc + sphinx_rtd_theme + oslosphinx commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-versioning build doc/source doc/build/html -- GitLab From b6188b69c9405079e9c8e9a08ba2d57d81a07bdb Mon Sep 17 00:00:00 2001 From: gord chung Date: Sun, 9 Apr 2017 13:46:12 -0400 Subject: [PATCH 0699/1483] hide lock error on delete_metric tooz non-blocking lock will raise an error if done through contextmanager. delete_metric uses this functionality. we shouldn't show an error log in this case since it likely means another process is handling it. this raises an non-critical error to be caught so worker skips processing, either another process handles it, or we will come back to it. also, metric should only be removed from indexer if storage completes succesfully. Change-Id: I3ab657072706c648cb66eee83a58b69feb919b93 --- gnocchi/storage/__init__.py | 20 +++++++++++++------- gnocchi/storage/_carbonara.py | 7 ++++++- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index e6416f44..3a7a5e06 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -129,6 +129,14 @@ class MetricUnaggregatable(StorageError): % (", ".join((str(m.id) for m in metrics)), reason)) +class LockedMetric(StorageError): + """Error raised when this metric is already being handled by another.""" + + def __init__(self, metric): + self.metric = metric + super(LockedMetric, self).__init__("Metric %s is locked" % metric) + + def get_driver_class(namespace, conf): """Return the storage driver class. @@ -190,18 +198,16 @@ class StorageDriver(object): for m in metrics_to_expunge: try: self.delete_metric(m, sync) + index.expunge_metric(m.id) + except (indexer.NoSuchMetric, LockedMetric): + # It's possible another process deleted or is deleting the + # metric, not a big deal + pass except Exception: if sync: raise LOG.error("Unable to expunge metric %s from storage", m, exc_info=True) - continue - try: - index.expunge_metric(m.id) - except indexer.NoSuchMetric: - # It's possible another process deleted the metric in the mean - # time, not a big deal - pass @staticmethod def process_new_measures(indexer, metrics, sync=False): diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index e53f1f0a..a786da2e 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -339,9 +339,14 @@ class CarbonaraBasedStorage(storage.StorageDriver): def delete_metric(self, metric, sync=False): LOG.debug("Deleting metric %s", metric) - with self._lock(metric.id)(blocking=sync): + lock = self._lock(metric.id) + if not lock.acquire(blocking=sync): + raise storage.LockedMetric(metric) + try: self._delete_metric(metric) self.incoming.delete_unprocessed_measures_for_metric_id(metric.id) + finally: + lock.release() @staticmethod def _delete_metric_measures(metric, timestamp_key, -- GitLab From f8569ccab6e433a3e3c7a4f42ea0b93e8e5a547b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sat, 22 Apr 2017 11:16:45 +0200 Subject: [PATCH 0700/1483] doc: override html theme for old version Change-Id: Id949ca8c1d81c0eaa8b7d2d388fee8e6a80f756b --- README.rst | 2 +- doc/source/{ => _static}/gnocchi-icon.png | Bin doc/source/{ => _static}/gnocchi-logo.png | Bin doc/source/conf.py | 9 +++++++++ tox.ini | 1 - 5 files changed, 10 insertions(+), 2 deletions(-) rename doc/source/{ => _static}/gnocchi-icon.png (100%) rename doc/source/{ => _static}/gnocchi-logo.png (100%) diff --git a/README.rst b/README.rst index 5aad556d..ca172f4d 100644 --- a/README.rst +++ b/README.rst @@ -2,7 +2,7 @@ Gnocchi - Metric as a Service =============================== -.. image:: doc/source/gnocchi-logo.png +.. image:: doc/source/_static/gnocchi-logo.png Gnocchi is a multi-tenant timeseries, metrics and resources database. It provides an `HTTP REST`_ interface to create and manipulate the data. It is diff --git a/doc/source/gnocchi-icon.png b/doc/source/_static/gnocchi-icon.png similarity index 100% rename from doc/source/gnocchi-icon.png rename to doc/source/_static/gnocchi-icon.png diff --git a/doc/source/gnocchi-logo.png b/doc/source/_static/gnocchi-logo.png similarity index 100% rename from doc/source/gnocchi-logo.png rename to doc/source/_static/gnocchi-logo.png diff --git a/doc/source/conf.py b/doc/source/conf.py index 064eb03c..b3019d44 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -184,3 +184,12 @@ scv_banner_greatest_tag = True scv_priority = 'branches' scv_whitelist_branches = ('master', '^stable/(2\.1|2\.2|[3-9]\.)') scv_whitelist_tags = ("^[2-9]\.",) + +here = os.path.dirname(os.path.realpath(__file__)) +html_static_path_abs = ",".join([os.path.join(here, p) for p in html_static_path]) +# Override some conf for old version +scv_overflow = ("-D", "html_theme=sphinx_rtd_theme", + "-D", "html_theme_options.logo_only=True", + "-D", "html_logo=gnocchi-logo.png", + "-D", "html_favicon=gnocchi-icon.png", + "-D", "html_static_path=%s" % html_static_path_abs) diff --git a/tox.ini b/tox.ini index bafeefd4..61315f9e 100644 --- a/tox.ini +++ b/tox.ini @@ -134,7 +134,6 @@ deps = .[file,postgresql,test,doc] pytimeparse retrying # for 3.x doc - sphinx_rtd_theme oslosphinx commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-versioning build doc/source doc/build/html -- GitLab From aae3529160811d0b360c6a7d286bfb11b8cfb4ee Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 24 Apr 2017 08:06:25 +0200 Subject: [PATCH 0701/1483] doc: fix sphinx warning This will ensure future build are not broken. Change-Id: Ibc644db35fafbe7aeb09c1c9f14e8818527ef2d2 --- ...nocchi-icon.png => gnocchi-icon-source.png} | Bin doc/source/_static/gnocchi-icon.ico | Bin 0 -> 370334 bytes doc/source/conf.py | 10 ++++++---- doc/source/rest.j2 | 2 +- tox.ini | 2 +- 5 files changed, 8 insertions(+), 6 deletions(-) rename doc/source/_static/{gnocchi-icon.png => gnocchi-icon-source.png} (100%) create mode 100644 doc/source/_static/gnocchi-icon.ico diff --git a/doc/source/_static/gnocchi-icon.png b/doc/source/_static/gnocchi-icon-source.png similarity index 100% rename from doc/source/_static/gnocchi-icon.png rename to doc/source/_static/gnocchi-icon-source.png diff --git a/doc/source/_static/gnocchi-icon.ico b/doc/source/_static/gnocchi-icon.ico new file mode 100644 index 0000000000000000000000000000000000000000..783bde939396df274f409f34cfbae74b43b0aa68 GIT binary patch literal 370334 zcmeHw37BMARc00~J6NJXOLyfKnYDFRX4bxRkCr;9AR{U+-w2A@E+aDzh%-8hN~JIe zj5C0YI^qT>=va)>)z!6jb!B8mWY&^VjwncjAcfk{*x1ctcgB)c*Z;o}FY?99_g=h+ zcccK4UG-G<&U2{^nASD!T0$1?30?G^ZTFv z*(dXl{L5GmJh!Z#Jzi{l^JKa4^M!@_?~j-2=f@XoAHwyi(S_5kz%zz zd~x;0aIt!0%lzt1>*9O;`TXh{WNNq`A1&6tFt$+rEXw$Jp;TWPFV)UYlek6}?N>0bUmjhkJ*wyJB(UDcJ*iuu z&W}+a0WbU^+VF$7o>R|EzE`CN(gW%40qEJUpxzB#{ISI5_dYNmNC$zwLOTPz@Hen8 zet)9Wc+t?1Y8tBPek@Wu>48XlV6wdSeV8XN(OwUPrKm8PK)KWx#un-i7fQ9$;Dx(~ zPOGOz+SyXL^gv2_U~Im&gY!LbU(FltecKBk1N;mBR46y@D=gGrmXgOM)sP+ttp_;Q zjV;zbX`G%Pkq$A8wTkhD+DGAYeCzP}$8QVmK}bQ;12Oji*3@1%wov<$?;IErj*Ph{ z{0Zy$9yi(FIM)mA^MQLeUZ8IW`x*bcV`#`*!+T(2ss6(f z$D3c=yKw==!xu3we%$b(pZn%*S*}iE|IR05-WUA4@c=(VywRtIi}eG!2aACnK)s)P z(!8F1oS%MXhAQeA20|g1O@i;|{?yI=!2i~BRdvUi zs^){h<1?pm2qW{W3g-+cU{g(&>uT##U2R`(s2!Kq)E$>`U0G98S8nKQ*VP-!d0F34 zZYSjVJN};KusrNdRER55qs6MiT%pfu(r&)Z30Jt()#R@B40E9yIQiKR?7 zT5Zy2_t-F&6)vE!VES}T9l58j4uTJ8D~#f^tkmDHo`|*5x;*vp>6Q5B_Mmw8m8+=t zPc5sj&Lk{yRB4<`x8gSzv+x~pKLdT>PH+LQJ@?hL{~&9+0c~sA+tjJkJkR^$-eHG8 zC;05lRrT#Tf>frO3S08Gvw-oSbtD(iHXsk&bx%Xv3cH{ujA30NXcD9Qmn08c8fbty`0}u)cie>w=sQT#kI3~juD6*y=R2`)e?NOoJtqFbL)c5Up$n$IH1pia^ASJ^E6XmrRr!EF4rZovK zE}+e@54OVg2y6x9G*&y|+s`xJJ=zzw#G{1<9x8)P%#?Ph&hiRw>eTTCS-ywOxx@ar(v#!86 z+Ub=WGWR?1?!^o65qxg?iaHW68uEEJ_yND12JEG+4r7irMT-OMfA`;8*YU}WOY*8e z<9Zm=<2BahbMF{t@IV#ohu@P=WX99#$#U&iyvDA4-o!P1gVTsFpgmy6zx}lpgR~8M znC$x%=<}1g-y7qB_^+bYAos)ZAR>Qvfgj*e!MgVxZtok;^|S%}xPZK1`3|NoV!csx z|4{TF=mR%rSJY4C524Yv7`eT^+djtSbucdAJkLD{+?!9cWd2d4;L6|WJ*8nom2q3`c2-%y87udf|A@$Y4}5R}`oZ}%%_E{81ot1p zE_h9T5F36=;jjLjN!W~|E!@BBg?*>3yywbIb>IPT0rY>T4&e9xPVfOaWM2vPLXP$g zIr}fjd-@5nZutG-qr?0Xk#Bx1uW`Zsa0U1C@NB&x<8p%x?Ed`=uoF)8#s%aF>Jo>} zuBrWt!Ur4rwdKbI57b~6e3SX84Y!5N8P^XM^7xyrnES(u>!qDQoxc}5!O|HtADjUn zfKPT`&FJ|-5v@Of2OfhyusvLqm|a>{&j^c~gCg700}Ssk-cU#S&Is5!?<$SV$yoKCw!xyVJ;>J^!?#Hvcc#d`)7}|RYx&hV;=?Adp{9gZZJ>tMT z_~3HB`2hOBnV~uLq<~1str*s`zCTNNjJvMHf8VzMAi6I623%j%YYGGK!IAsG2Nw~8 z0B(tc!_syS#16bMx8M%gaB#o};>J~0?w1(;o!o)-fPL@-(jVB5@4(U{j+_J^U~Pha zh`g*ha;*Tl3|<%z4!IOVY~ZJ}g2}k+8JGUw)7(kl0oN7a4;+*a@)Dy6UEs4bOX~T# z1bBc(56@RqkuNZAe0RHFWd1+r96!kW79R|%AGBi>VS5;>A5_#m0pO53k?~S}wi_1m z_&wwPdYYRD!Eso>wd@7PBo5#|&@oAjQ8X|n3HuCxFn1vD(%{K*<8^t&xL=#Ozdx+? z|HOEN{zU9Mpsr6JfaMS9^{;)7s`$M{P2*s@8|=}Y`!iZ zePR#R1h`hnHN$>=2>cy=5cEat$#6^(c;F9n1@bP99b2gVUEXl+*Iu6IpV-{(gW*u? zM4JJ6KF=`dS4Yr%06xiTOw#mK_4Qq#k&Caf-{ozxS9txb0Pgz@6>D)}dcgZIj)+*+ zaR@!`XR>Sy?tM6LF*`9ySSPqPG^94=0_Yx%#CZR6*(0=$f7$>+_x~Bq6YwL@ZqS^- zScE=&i#jHWF$gJ-Ny@W-5I%ta=cfK?wJYTbbbNIL799`z&VeSo{#hv!N-bSng3}_E@^wZMPj=bA7dJ05U{uKM~wx$OT)I7>aU3e|88e;ffFXqud1CmXOnq-nah_sx|qXr z3ZIc9_`S~G2iYWbk5tA2+A!A(J`<6JuE5x1+7H|Z&ApG^)*jLlg8;w4gL&}_AaBq& z4i}T%8%VtKto>=6#mzh%R=#hZyL|xf_kb(--A?KdkYjG2sC9^mocaMH=L)PnXkNfN zBG(_ZzTn|o(0qWnrlihA!Cr#zaG5fV`bg&wU#LBvk5x9fiIH+7XCHW~>5r1}8UAe>fEPxhNHN-YCGk zG^Vgv`>-1Z)A$*&O*?^jr*2^B3f3Bw{jZ%LnDa;L5AZ<*wJ&%cP-^kgFV3?1JH#Am zy#am(?sL}sfHP4JfhVGlLEyf^T@^Lq0v#hhnpkYSA&poMS|{oX^es-oMxZXxEuW9Y z5te;HUj%=<9l8Vkl~&F#y%vahNjM<*nZ1ei6`vF15~F-xD{9UNz&jc-Qc?xu_5^|x zh-J?C#5eQ(TDTATm%ljxzZL!3jC<94=`$Srniw{-s&ly18@|DhIpAZ&+q}d9w|$KP z><2%d;Mx`A;XAkizAeF626P1%T=)9iU=y5~x~%Rn0{mzM;s9P9RNppDu3!7XiyxQ| ztj!IuS9SbblzM3h`+BLd8AlfH4WQ+x5u&{u%>VTekNkt z3-3Wcr~jJk;PhG1$4y_iobPRvzObb>v@>ix8g ze+>}l){P77-_y^)HE*8fNqr&9d+%NQ&ScxAq8`awu98BjaZ+@E&e-F=&IDkb8_wb3 ze9qh*-C|$um=E%rdug~=mv)3+mk|F#YJIQp6@F^!s`}b2=mX;y)UA>$q!a(!mP6VP z2#gY|#H>EQ(#t<$T5tWw$2dGV03#f@5(By&G_XC3y+GlS>zzNnm z#6h0T0UIUKm^qW}`vKbq-@r@L$^&vf@VX8#l9Drl!Z@SLsT)}D^`!Q(=a>HTyNuw3ih4)dIG|8g zBe9(g62dxNlw7rKDI_0Y)bn4Fy3)M%mg43w# zISl>8sPjNV$N^ySfc2L?1n$chJ_uo*p`seI%j)(t@W7S_p157?fK_Gsxxj7zBG3Cq zEP;jVe*X#|49)|H8F*tFIAF3|`<1BX)~u9^-0At!acw$67;y>OUO*fIZ4p^>49*Q) zBbYj_o{}^V6c%bLS;1;l^)$)`=+zfVKLXv!6pn1TEtY4JhKTvgwm zGzW|pYyT1zwz5#}_ON1sd&OQ#<@J6@obk=`4hFRubiN?w50m%5&pFYu8Q=#T#AdMeGB6%#H_G(9U)K63jQUp87m~CGa8}SyWMQt3s*b6Q z1qx$aMNx*$W?(##rTh2#mwdqejIn2hGG~~<>k2=S1P3IYM>q4zE9z7riFW;>uq9^yGlz32$!4ayth zN4!574yfja1Gq16Ms$Ge#OL&&Beea9%+r$5XN>(rpA-&QKg%l${X^CQ(yaq@vonJF z5tyTeXDDPYJ{fz7lgLk&CtF}fVpOxE19YNnW6qP{t94nU01KSyDEr>RWt4eS^rIh-f++W&4_ud*Hj{ z<^Y_Z^RYDHHHx~JenwcH@!fOVv1XuiK*BDt_C9Le0Q(rz5N`kucxBuiP$<>UM}fOE zl_@bnIkE$K)bSWs!2I!tTIVCscG!=2gWU<`YDB#7e~6m{Cd!RBr3tTL*G2XR3;qM^ z3e*vJrW))6#xZk$L)?3sXDaGH#?1j^i;bgU<1S6*-EnzMd;vYqje#*^dbzC_C7yj0 zmz|IV2at0RKX)TdxDBf=vNu37eQ8e1?k&tRl^!%d$9rG1I$kF!_R z55#Q?@O~4e#s|Zid(%{o*a4ykgsKO`!HX63oyl-O^+jpIZdi4Z^#I|3tZ~51HT88# zZ~*U#nBhiP*h@n>xyu_A#~da1h}=HF`ND;`Ibd^1J!iaB`)C?)8)h9OZ;0psSHfkiIW2+78`es7OPK$i9NTnh96i}yxhKv>a*z;#CgcPC4G>wF@Q4!-W(SPz-Rv2 z;fvMS)&$2ctg5Z&s%ppCs@jIXqj(<`{<$V7_!m7Oi(>$J#~z(Ir=A%#4`3hdyOPW= zHh%xA+I707_MJqo-}`E6=b5T1;Il0u=?aCs`##v{?3B4*x4uI`{Hmdoz zpK>ieIC!G2j@(lRFVrqAj-0QLu!`oU>jzo9Nb{eXO6 z<*7M%Z(Yrvs%aa7d=W%fkh21W10uj>toG&f6?G(R9zb5frMR#h#(jwQDe?h0z~Td) zj|#fNUf2-i3NKwj;sS&Nvd#hU4gOx3902?HR2aBSstnBs@EII%@PWk*c3olO0?rGG zKBha?IU(!gS?b>hCnF#E*t~jL7(4*||2LBK|AoyPTwkCMk$!_ap%Yjpy7{{+)b6pCQ*8_hN0qiw`Vr;F*%l zdCz-|e#|FW|g6 z`W%H9gcqXZg^K!2uN*M8SbNCAMPB|EO11vRVfze8oHLDe1lj`K@bC3~do6-?1kaxn z|3ZJyBuYGGwCr7%)l5(xm|VK?s=VOd-cIcU2;=$Uyg1J5gXjo$j?jL`Q%%2Pdp#nd zGpm!dY31I7`Bbof;MGAnAP+f%?AYh)$j%I;j)1wG*yTCXgXjo$Uf^rmAo}cTOJ~4& z)=8abFZdh`pKBH$hBR8xZ#I`=mB~310gTD?C2Vm5Z2m#l9PD`S_chlosIOQ#v<_Ts>Js~wTCq@K zZ={+hU=O^}j|1Qf{IaQ7_i9(>atUjHLes~1=xqP73H@?Fkl$&Zz&Zx&r+pI0yGNTt zcp?S%Mn&D@#Q|gUwH}_BjN>a+ES1`h?9l(P4KO@N9av z1-to_2mk)#UK{|w*gL%DN>e{KHkUB4Lm!{T3!IOSoE$86DM~)@DqH&}P;bjW!F?Od z=gruQy-R2#xKH+B^rl;2eXuq(q&B(n0L}-fn2L3;b_I?L9-MpAVI$Ixz#1Xk*wsb zFxwmge$cuDa6{Yu0Bm^9yP7AkPC!2(_4l~(9oGHabPC2!kPjKJq1Q%p;?MLtV-PvQ za&txiIj!%#0jB5#{kDU42Ikd0&?kr;+8dfPfG7GR=mR8g970Sab$D_I&t1t1e`r62 z!~W25Rt9ke)(5}esRul_te$P+T<_PWotM_yx={-KHfGk1`Z)Cs*zuY}5Zi3=2yJ|F z3GIFAAev90hj7n@<`noe=wAm>uzlsEh3k&*n-jr zc^?B6b;^wc#>@5hxZ%d6&y3wGocA#;?eC4(E9(7j98k!?`r!6ui477xXkb0MH@u~g zpJrFoZ@6&)&lSLa+8ZX#`_8sVY>frEq@0zxU#R(Z$DKU}~=6@r6F~iKft=lW{CuHpm#5xuy<# zaR5I@EbXUF(*LctM3zrbI70NRjE#Yv74-r?4wzW1y}`nbNq=QcK<3Ut=1x<7ix|PG z*SW%NUZ|)iBW}2168_ni+?^quvAM|$&=|nF z;GgSjN9_pdJGc1%Q2r zNHZ64_Wc5Kewc88Z~(?YMSY+H-~rzNKYy8ZT|o8>3kL`XV4uK>dTl`1x5W@I^vfpr z1SCdKI6yc+sa;pp{x-M+yqG9A;>G_1t7`ND@}1#t*m}p-ay{}j7a3#1GpQZ}74`X{ zIrXFf(AOf^Kll@g>>UFBC(f^`?PsfM>P%J5oT{l^r)z2l-cO#ds%_a35oV)o%*t&RV<$)8%!3oq0W`Tclfipj}8%NAsXsCV2HOm~d zIxlSuyqotiC+$xU??MN-F|(weAA|$22JoTK@ETJtbNUdrovW&8*bN8nZR!b5Ua-Gs zp86xmS|!Bft>{FF=nVN0e@;z2u0ixH2|Iq`v9V+rxeq zumf)Hx~hh|=>dgO?R4g`?qhpw$TJL_a$Y}haZMdQv!?DmF&HP<`GK!l9=YbwS#ZSt z&?V-<5sTo6<(p~`Iieyw5%yS@@;rI0*bV=;Ej%$iTujM2fDexQ<)}b$!oVbb0sDbd zV)iiR_(63ACokCFYoEkP_$0tT)J@1m^hxX$zoqC7?IAFYN-gzN*zUcc5 zG4KNCZR+{-9fn~;*m=P3H95lWlMs8ubB+_v@Z*e%dZJKKn|(O|Imb=UHyrBuLGTB1 zuBL8JU%(N}<$hR>@;j|dz}|4`65_iQzG&M;74;rp{Nu~XQtd7FIn%V)vE&r6^Fqw} zLN|_S+Z<+=&^Msp5yDa|_;_mF?!k0A%?i+)pHT!-aFP7+?~bu9m7H~imz zMSYD4djjp4cfRi17vN`O_4^&hFvmHuKp!A+L|cMb8x*g3`n~3+*4hi#U-U(u;)VX~ zxX4}BMGmuHaU-b1d{>)l82H(ZGCWKr9PL$uxlU8lXSL*1p@bC39YoEuo^ z#sP)JTE&hR(_Y8o3yAVLk{h5SQ;$AyvAH(Ed5dE(PxF4%ZEgEZs3UORr+ow4rtLFn z`Af8~BunemTx*+HVI;(AFSE=;PD=2;e4epM=IMZF$Rv=Nc#HVbn|8;IOM1JEiAi zu46zCB8K%^24k{YK1JyxjOYJ6=qDExE;dTqh@Ya9#wav~Qk% zdR*iZebI*>=!uX#sN5M`?zVix3|Y= zf1ttRH4m7KyNx(M1oUe_RZ3xX0EAka&o|Ax$*O+ z;ofSGtP7eoeua-hd7iJ>$N_6FwJ>AaU*t%UIZyN+83VR4fEa;SJ2~LibLyF12n*jmo%xdArb z7yN{X9~1i3>rA23>ZvB4Ba+**LrfT}(uPQE7{mu0bJGL(+1Nt;VbgR0;Q*NfMGxv6 z16UjU1uqVmDA#{iIH0q?Wc)~f7&r!g+KU4UWi?`2oPhWP1j7bmcu*T*L!6+U6UK^F zqwE1$7Z45*4(J>MI9v3EcFa3pWADJPnq&`PKh&U{DZdxroKa(7x}v6?xbJw6Gsuy9 zxM2zpB$i`PaAOp1OwuK0melh*Fz@mP@wKN+;Q%>Huu~5-X)H@ydVjWv5v&c(sVBL> zzT=~@c{OHAtiafU>>CuU^o|u%`^@Vsu?_-gy}{W+7fsLwM&?&VCl$Ly#=!a*IN^qO zx6g*pKYrWr#p(?cctFk(l6lb7dA;X$dF&N%;{bk!J$D~8fdghF$B@i}>+@jV`{KvF z@b3CK)(CJ8&l6eafvGDu^8VbV{lWpwF|eZUa>csOhlS;}yR#lUxZ~2AU_dYs?ihe> zY2{+odEb%Kxt+@#IaYN})wK0F&J~}*nPEO~_xuX}!Ah2Ofo)5TaG!tLo~_g;`15V$ zQ`YzeR_*w;^0FU5esktIKV;=*Khw;Gu5+_9NBTX;(~kV;%-c@c{fo$djvUN7PjiZ& z1=a*U>BIa@d( z4j@*pwsdCuoNM8>NZjJ$yi44+=UreI*yCJ#XCK%NigC*6IsIgFPUqaeA9dQh1gu{{ z-BUcL_ft3M0sq+-AUZLv}8wZ?o ztsS%0d<#5r{v@_~!D7_1)B_Gfrzh929ePejTQfVS_rXuo*VMiMFz+f-DA(VeMVlbe z^+6}jFvsk%um(Glc;nn-VXlok>bSft{Amm}bzaV$mfgzvOJ5k@2Z>8w(1mHQYJHgY zEHT=eTeV%w`88!Y9fSw0`JCO4jBr;1fWTy_@eNti1=_iWT3FoU z&`<4i9(}tO?wU4cOINkwYy-xCFH3hNzAW1_jq~9kI7|As>`(F-xy{ns$(!WUJk95Q z*#Xzoy+QCDL>z08?NnVt7<2%JI++q)ajbqg=_j$ zP0ixkeP2!O!L|2fP3=2bR|oE`tAn@>ov5qBxQ^UYH)_sH8jDfgUt_%GZ@16o+UE}5 z?q}@FZ<;pX?23AA5PWx&xxG}~7N-tCE*QPAstPSWAQzAa$N|JZu}|FZC+6E&ucyim zjS4SWE4QtyLuaC$(w@iRZ=nAnmFww!*xae*oU3YgH+%>Ae!NsW8?_EFaQWbk5nN+7 z?$}*~zZZ73_Gwu@XvTG@ju4f&zxWwrjr{9gf@fN3K&F{wgvhG}$ zqaRV*-e@D+OAh540sDh%O+%wVm zz=)r(EAPua&9v$Jc&>~MoP-YF1>S+a9xc|s5C$IOE7Z%rE68HQa*8OBnK(K5AmiuhO$AjQIh|I)d z?SF)Y$+*keCTjws2jx@`M1JxA8U)`#WF|_D%$_a0Q_c|-4#*=1RMZpGCH0&j_zog7 zzF1$5d;SjVezGnowm_eHbf&p8tk__S9}0{0YhhtB?sBGAx zfSDDw%Z}eLUc(=7Icls%QGWaUFq!L&o9j%)@SyF24)DG(Fz+m*P-@&81xBMTb7Wp} zjt@Gf-}-QL zq4rqR^LE(fi%&2kV_58!f$SB;1i#7;yJ3A-DAh9Q7nHe7c);W_02|<)VPU*mIim}W zJHwu*<0wyJgoFc(=71ITgWa$j=J#U@wJ*kj+c593>#D>Gn~Z_fwvFdA$B<6}@j$N( z6XV^=nkY9iw}<$S%WDJKTB#jlVne6nxdP|Rv%s_3d5$~JZ)fh@Z%yi5bGH++*IWX4>^C&UrH1oJKpER*<Y34(vOdR^Y9rMoD zI4kVyd%_cGjnjzvEP%N6YFQ1`4sIKcfMxM zMdnW>Kj<86U9i|weL^g2YF<67H}r-l4Vz$oP;AEad+`b6Ll@Z72iEP_XP#B+{z=*P zz48cR13nubqWdTb@xzE6%*0wD&OgZaJQ(7<)9W}5U7o%<%U%zl!`m_Le68nt=>PQb zrA@~V(r@5^#XgW5>Q8X4z(M39UyEx#ALKq_4`fUiu>0ZRn>*y-b6g@xy!u@CW`tpRf)6XOrc|o73(iWX>^(8%{_Uu;z5Er5)gUo8^PYoZJn= z{d~`L`-2x+Yj+0wgTp>IbWmcLD6hRU?K~j9!3a6PveT`#HegWC=_lcX4{1*K;sdVN zQ@^J!k!Bx$m^uK;dT3D0M)7-n-q3vY<{)-^l)n{SU_cJA&F6Ye-NHHJ@>|V&&%0_t=_1-8j97dTqhx}L5>>t>=RL{mUPRf^0`<(IbjES}McC3>hiD5ps znj={KJYu&zu`8|U9jdK>y4GJ-jSY$j~ zC+Aq`&U&83+LoScF<)`M)mX)xtFa25ns{ZbsU5T2bEa`idm6fSRbCYcJbN8%5{hj{x|WUHJZDmHE&`2yfNAV zv;nk_t#ytB^?ustmd;Q6occcPbL>HF>-x0OGpg%z9;e>UxttiMEkhrjg>%|MS&ywZ zL5~3buSJ8*_(~lw)jk^foF9T7(dK}Fep%n~)bc5G`21Qv1?tGOi(P#R^a0{LI@-={ z{{npsmY;#X2EWUaZT()C#WKNN|KI?19j5&Ib&&A=P=qXWklMK(-rmd z_|O{7oyHbxpG+eM?38myBOSZKFN6C6p#%K=(7gJJXpk9Sso2N=G2wv0$Ae%he|-oJ z_{sQ?8r_`=3$>Lra=?yDvaeWpCB3|Yn4nKF=Vx@tjIZR0#oCE9a)9h7Oz-#?Zc+;8 z1pG*RD2?_`6N`;Eq>%&Ud=cS*v~mq>fWH`;Q%{N(nemkl4!A3g9FS<-u=vu&AL%m& zX0EBPiw~jk+-dmYYU=hAN*)r?15&96RMh+9L1nym8DFUVL)vr!?jua)81R`Nq`rpp zRz*EBxu~8OFFND7TVb*OzO-?Ga%IwLn!Bs3J;`4x2mY) zVdE}s4FK2J!xygwh#3Gi!ZC(1L7OZI0ta9M?SR!$hXEEiuRoEoco!3EzTTkvn%TEyx=#ic21P4FG_lU@zfQJ zlO+67a?k$yz2(niUPjLG&OFP^5zBmwabln22RTB%JTe|nTT=I!wzo`S<*m+rP4I3AED-ZMWUnS)m;2MGKDNdFL_Ry~Pnd>dw^Y4*U zEn7b&G2hr5^ufI0HInus-y-(&d^RZ_*tRS=Nn@Q$?RLG-TyJ4t>-+HcbG)mkf_8Hgu_F=6*ZvVa));&L8QQt7& zke8pk^uXqldJbZWKc55-$QdI8aDb)Xw_@|$u!`&K-z6VpN7>lj|f?`LJscjzC83Hme7VJlyR-UEmu z`t!Ksh(_jDRqk_>*)Y%jc2*325RALY@b`@6Ng|FfkY3}!x#w>x>WST#)L7^PT?+K= zfyr|HwQ-+A#JGVO;J|Y%1RXP3k%K+YwKpAq&zQUc=6Q<)xTeSWJ{`NClJ)#SpC{G= zUhg|t<;%!=fM*ZjjN&iG=_AlQV940*82jKhy{8^_IOFjSBksm64%fnRFMl=e;n(MW z{iyfqcC;x5t_A-8ZfssXEwVu_1$XZOJcUO(oi;ZfOp3+w&fl@^gvT5cp$F*$HVXw zM!MIyg=y;OjH%<9ceJA!N6&S0jcwT0jF)HpT$K2>aBlx=>G<>)uzsnJ-wDw+z&gM` z?w(g))x+o%(HMJRbg{k{u>=psWgl!fo1Fdv%!AZ{E&S5ArsrnZ+tk5{W6seQmg#5L zb2ad+=j)Jlb31nXea-bd#`v-Bdza8g6a69qc>*@Ti(?$pa;rXjV6wdSQpEc{6a^Oy zKd`FC?q5}t=c{V_S*#bFt7+T1jb)x$m%!ZIFTVLZ&gYPKV6Ii-^TQ7i4ZRrN| z)g1&5iAB!6`tUcPvAf1>1q~dz{00KgQv)4#c;P%OS>D&w%q_j^!#- z$JJ938p?9p0ehfzW??-l9`NJRFqlA$Zr2&o4~i;ZuLLKl#HW#(}J=Km`a3mg*gQdZ1(ss3Qtx`1$iSsVcT zKRCIlo|m$rE7b|92gaA{2gB9{gagds0O0>0rYdSAq~RvTq_hVRGq4<{F0gG$<Eq zU7(^qzw46PnbKh^)e5l(sOJsOSJ%Sg0i0=I#yWV)ee{v?Ohr91bw$mFILxG|l=lE| z`?jz+0D6URK(_hgpO7njI^{!Fsuw{I;2e@ra+&bFu}J5rY~w4{eFOXd;n}NdDuUr9 zMP{G}w$9gg4;QO9!q5k1a=e!=v;0?4pV(DV6B!t)QpE^+pio--jW9Ss&LPj-+CXJy zNj*Qpp(RCUs0RwA+9w9(0r3lFhXWA*cV=o{Ju5>)RH_P$ubPzHylR5`{Tm?$^i zG)N4g>>*C69#B!A2L2DlIF#g88SVk#_{<&uDz1`uF|t0P~dQDlb_I{S?6dU+uoCcAGF1 zr47mTz?S3c>0^ua&-KFtvQC(cE&#p%2ClbGo>osyZsf?FP3nQKzO24-Y@z;eKRh5g z%j4w%&iymj)M1mxpR^?%J#gDe^$hq2dbSIA*0972#J~f{{|)^A{7^+bIUOTLYGGs# zz&}t}tbL?ceL&6@iJSvY$eG_pA}+O!npmuz?KOTtY=ZE40Q3Hbcde*zPVGREiW%Jl z6HARZ_L8?~1bK>OoggG0sHo2%*5_42^Xe;%9%RzE)c3%|a^pq7cHsR5up`7bFu)oj zeEpBn=D)3?o{{>&Ar;I`4@{g>&xfz@Z-e>>WWBI=T>#jB6u$mrlIuG+u*zgZ3#G>Y z3VIHS_y~jX0I>f#&imWT>e-nb6jJHj^}tBEHU*vF(?N8C9hc+`VxMz|J_39H$0WB$ z?jU4LgW)gyMVv!W>&6Ab1MB(#=KLF&^H1XMw;D4Lq#?QPfm_e1XJQTUL^t~&_jUn% z{J{PjB%epF;bL^-$1bU{LaAQzJC`JHdklg3Rm}H)g1DX+%UM1~4}WQ#tp|pe)C6)I z&SO8J>-ohKrMlVchS?mu$J(1@EiQKmH#j+zCvLBle;VGJ+mw7Zt?5MaF!kjrw5=P zybR|VesJ4z^-5fC2L{jK`iGwJ+e4f_FwVIAEBO0|yI0gN;=V84jr9%hI?6ZF1JVO=_W<+b zFh?eE`mKzqV?2F}Bi;@Ceh}9K;E4tNE#ta`>k6(CzDX@eE#ot zT~;&Vvyb~pCVM|+|M!KVq0RR9e2wbaKdQ32P9cX3MTUls+JELOM;9|Rw8>AVsEZjI z8to9mhwJZewnQ8Sb;z`Ch0i()>X5mPcg=sdtQYR6^PD9zG<3`+giqV=+SS2__CB)9 ztXF4f$bK`s44%XKKikdhGR1X?j+WVF=Ik<0ZCBI&Ozl;+|I{v1v=?K4ulCrI zeqE-cRrq+l_3JW4ytMyYA0YU&)d2hV`gOa9+A@w-;qy_e^=%pZgV4pSSGq0Ztg@}r zZJF!MAKU)lu5??bX#2MH(yDY@W^SX*(N?9~G8;wOmG+ad9=#>Av3u4VX3038sJ^wM zKCUvYhFRro>}Gt^YM8IgX!B;4%*IE7Z(1O;WUk}IhJUTb<8xi$^$h~}pfwbIWm-?w zk}2ZF#(&53J@C2yTihssPda7hTxE`Sl;bWl8bGGmLAZq_>vFed2l>l1JIG(A*+Kp? z?Rv02Zo`oEv+C+6bBtfMWWCBc8j{SYtsK`Ok9z2iZq+A7WsX@gbKd0~4M}FyD#!a) zL`JjD0c5;~hVE!x&Y&`Q7`hzq;iHieiVPlV*K&H1@gA!BHu`^=k~!AwAKyBQj0&rN z{Kh==lCEPlY#Bc+TlWeg6Qo_feR#Clc5j+T*R*7ccf0T!3jBE~muYf%5SgOyy_)6B z1(5L-Y0gbuo~fRjx{TEzGMjmyE_|qE{HN2Q&8$lonNi;7Ixe4ofn<1}qE8|C>Sz#| z>kusB^7(h{I=-Cq5x`eLWR4agIETyUUsy7uMcic0=SKKS%lL}~lj(9F-ePkaWxCW! z*SVWaaj0uQZw?^y)G%dOmd`(ZucD6tzS?{p|10A1`4?E`x=$haN|)13hFA_F(}jOF z^ZRZxDf)*L_H0{BLCm%-4`W7V%5OJ>eT$XZhfOQsm$ zUO{B82Pnsnf2?)|k@1^UtaAJ&153tlZ~!{`c?FU28$eb$YR9N87?&9TcvzD zs4e5uL2VhI4jQ%Y<nkL}oNVIY-xJJO}i#-ef!nJ&S3z%S%Q-ik6HQF!ZD7 zBBQ&xCF2Di-GyCbMs0PbsN27zy~@nFm0|_u;5(LA>|f@Xy*}67%juBuc<{QIR{wad zfb}fn{!p8b-MW|i1M8G=e;Cc7VU^>~91a=Rj&2U0b~&z{=#X*k#O4rdm*YC>9Wt&R zG}_V68blkO>rscy+y)72h_&nEI?9eY%5iI$Q^u`f&0^N?Wpk91we<%w;S+4wYC zkJMJq#tO9_m#<9gQP^ZQK8;alyIfWPH_B{m{Lu~NY?L{+L55Z9{MQn()p>)A)rEGM zxz19o2t13Qb*WAns|)Sr6dmPQU1*oN?vNREsk2k&sEdqqBp!2-aXxrU#9klAL$yXv zN824T?WWtyaXheg)9o^RZTolBtv-%Xf6P^;Xe*^TMKs0maa;bnU1q&m9rfwx#Puq7 z$aHk#XuJ3hnT`%R<}PFJp!OhysJ+hi#;^CXL#DXiu+jBfb(V8|UFMjV%zDG@)#)r} zz0&sTbjt8u^Pjys4w<4QW2=p$U2P+x<5rGxte!sVQ% 3.1. so we can remove all of this when we don't +# publish version <= 3.1.X anymore scv_overflow = ("-D", "html_theme=sphinx_rtd_theme", "-D", "html_theme_options.logo_only=True", "-D", "html_logo=gnocchi-logo.png", - "-D", "html_favicon=gnocchi-icon.png", + "-D", "html_favicon=gnocchi-icon.ico", "-D", "html_static_path=%s" % html_static_path_abs) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 1c83ed72..c06c845d 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -6,7 +6,7 @@ Authentication ============== By default, the authentication is configured to the "basic" mode. You need to -provide an `Authorization' header in your HTTP requests with a valid username +provide an `Authorization` header in your HTTP requests with a valid username (the password is not used). The "admin" password is granted all privileges, whereas any other username is recognize as having standard permissions. diff --git a/tox.ini b/tox.ini index 61315f9e..11d0da4f 100644 --- a/tox.ini +++ b/tox.ini @@ -124,7 +124,7 @@ deps = .[test,postgresql,file,doc] setenv = GNOCCHI_TEST_STORAGE_DRIVER=file GNOCCHI_TEST_INDEXER_DRIVER=postgresql commands = doc8 --ignore-path doc/source/rest.rst doc/source - pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx + pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx -W [testenv:docs-gnocchi.xyz] basepython = python2.7 -- GitLab From a9b4e485eb2b64b6b787809d236cb815941c9ab7 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 21 Mar 2017 20:45:53 +0000 Subject: [PATCH 0702/1483] ceph: make sure aio_remove operations return There's no guarantee that an aio_remove operation will ever finish if Gnocchi crashes in the middle of an operation. A metric could be entirely expunged from the indexer while some of its measures/archive files would never be deleted, leaving stalled files for ever in Ceph. This patches makes sure this does not happen by waiting for the aio_remove operation to finish before proceeding to any next steps, or by switching to the sync remove call. Change-Id: I7c7bb15a0368c59c14926a1eec226682a9d86a93 --- gnocchi/storage/ceph.py | 20 ++++++++++++++---- gnocchi/storage/incoming/ceph.py | 36 ++++++++++++++++++++++---------- 2 files changed, 41 insertions(+), 15 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index aad5b521..baa977d1 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -88,11 +88,19 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): granularity, version=3): name = self._get_object_name(metric, timestamp_key, aggregation, granularity, version) + + try: + self.ioctx.remove_object(name) + except rados.ObjectNotFound: + # It's possible that we already remove that object and then crashed + # before removing it from the OMAP key list; then no big deal + # anyway. + pass + with rados.WriteOpCtx() as op: self.ioctx.remove_omap_keys(op, (name,)) self.ioctx.operate_write_op( op, self._build_unaggregated_timeserie_path(metric, 3)) - self.ioctx.aio_remove(name) def _delete_metric(self, metric): with rados.ReadOpCtx() as op: @@ -104,9 +112,13 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): return if ret == errno.ENOENT: return - for name, _ in omaps: - self.ioctx.aio_remove(name) - self.ioctx.aio_remove( + + ops = [self.ioctx.aio_remove(name) for name, _ in omaps] + + for op in ops: + op.wait_for_complete_and_cb() + + self.ioctx.remove_object( self._build_unaggregated_timeserie_path(metric, 3)) def _get_measures(self, metric, timestamp_key, aggregation, granularity, diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index e7a9032e..a6d9acbd 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -133,25 +133,26 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): def delete_unprocessed_measures_for_metric_id(self, metric_id): object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) - object_names = self._list_object_names_to_process(object_prefix) + object_names = tuple(self._list_object_names_to_process(object_prefix)) + if not object_names: return + for op in list(map(self.ioctx.aio_remove, object_names)): + op.wait_for_complete_and_cb() + # Now clean objects and omap with rados.WriteOpCtx() as op: # NOTE(sileht): come on Ceph, no return code # for this operation ?!! - self.ioctx.remove_omap_keys(op, tuple(object_names)) + self.ioctx.remove_omap_keys(op, object_names) self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, flags=self.OMAP_WRITE_FLAGS) - for n in object_names: - self.ioctx.aio_remove(n) - @contextlib.contextmanager def process_measure_for_metric(self, metric): object_prefix = self.MEASURE_PREFIX + "_" + str(metric.id) - object_names = list(self._list_object_names_to_process(object_prefix)) + object_names = tuple(self._list_object_names_to_process(object_prefix)) measures = [] ops = [] @@ -160,6 +161,18 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): tmp_measures = {} def add_to_measures(name, comp, data): + # Check that the measure file has not been deleted while still + # listed in the OMAP – this can happen after a crash + ret = comp.get_return_value() + if ret < 0: + exc = rados.errno_to_exception[abs(ret)] + if exc == rados.ObjectNotFound: + # Object has been deleted, so this is just a stalled entry + # in the OMAP listing, ignore + return + # This is not an "expected" error, raise it back + raise exc + if name in tmp_measures: tmp_measures[name] += data else: @@ -186,13 +199,14 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): yield measures - # Now clean objects and omap + # First delete all objects + for op in list(map(self.ioctx.aio_remove, object_names)): + op.wait_for_complete_and_cb() + + # Now clean omap with rados.WriteOpCtx() as op: # NOTE(sileht): come on Ceph, no return code # for this operation ?!! - self.ioctx.remove_omap_keys(op, tuple(object_names)) + self.ioctx.remove_omap_keys(op, object_names) self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, flags=self.OMAP_WRITE_FLAGS) - - for n in object_names: - self.ioctx.aio_remove(n) -- GitLab From 4278189688a1142d86a74f20e0d928a5ca0fda4b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 18 Apr 2017 15:47:24 +0200 Subject: [PATCH 0703/1483] utils: replace bool_from_string by builtin distutils strtobool Change-Id: I68777282cf008658ce8d343a63b904e7f7ba720b --- gnocchi/aggregates/moving_stats.py | 3 +-- gnocchi/rest/__init__.py | 32 ++++++++++++++++-------------- gnocchi/storage/common/redis.py | 6 +++--- gnocchi/tests/test_rest.py | 6 ++---- gnocchi/utils.py | 8 ++++++++ 5 files changed, 31 insertions(+), 24 deletions(-) diff --git a/gnocchi/aggregates/moving_stats.py b/gnocchi/aggregates/moving_stats.py index 3645a0f3..cfd04adb 100644 --- a/gnocchi/aggregates/moving_stats.py +++ b/gnocchi/aggregates/moving_stats.py @@ -16,7 +16,6 @@ import datetime import numpy -from oslo_utils import strutils from oslo_utils import timeutils import pandas import six @@ -75,7 +74,7 @@ class MovingAverage(aggregates.CustomAggregator): """ if center: - center = strutils.bool_from_string(center) + center = utils.strtobool(center) def moving_window(x): msec = datetime.timedelta(milliseconds=1) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 9e64dfff..abef8b16 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -21,7 +21,6 @@ import uuid from concurrent import futures import jsonpatch from oslo_utils import dictutils -from oslo_utils import strutils import pecan from pecan import rest import pyparsing @@ -138,16 +137,8 @@ def Timespan(value): def get_header_option(name, params): type, options = werkzeug.http.parse_options_header( pecan.request.headers.get('Accept')) - try: - return strutils.bool_from_string( - options.get(name, params.pop(name, 'false')), - strict=True) - except ValueError as e: - method = 'Accept' if name in options else 'query' - abort( - 400, - "Unable to parse %s value in %s: %s" - % (name, method, six.text_type(e))) + return strtobool('Accept header' if name in options else name, + options.get(name, params.pop(name, 'false'))) def get_history(params): @@ -158,6 +149,17 @@ def get_details(params): return get_header_option('details', params) +def strtobool(varname, v): + """Convert a string to a boolean. + + Default to false if unable to convert. + """ + try: + return utils.strtobool(v) + except ValueError as e: + abort(400, "Unable to parse `%s': %s" % (varname, six.text_type(e))) + + RESOURCE_DEFAULT_PAGINATION = ['revision_start:asc', 'started_at:asc'] @@ -427,7 +429,7 @@ class MetricController(rest.RestController): except ValueError as e: abort(400, e) - if strutils.bool_from_string(refresh): + if strtobool("refresh", refresh): pecan.request.storage.process_new_measures( pecan.request.indexer, [six.text_type(self.metric.id)], True) @@ -1366,7 +1368,7 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): names=names, resource_id=resource_id) known_names = [m.name for m in metrics] - if strutils.bool_from_string(create_metrics): + if strtobool("create_metrics", create_metrics): already_exists_names = [] for name in names: if name not in known_names: @@ -1603,7 +1605,7 @@ class AggregationController(rest.RestController): abort(400, "fill must be a float or \'null\': %s" % e) try: - if strutils.bool_from_string(refresh): + if strtobool("refresh", refresh): pecan.request.storage.process_new_measures( pecan.request.indexer, [six.text_type(m.id) for m in metrics], True) @@ -1667,7 +1669,7 @@ class StatusController(rest.RestController): enforce("get status", {}) try: report = pecan.request.storage.incoming.measures_report( - strutils.bool_from_string(details)) + strtobool("details", details)) except incoming.ReportGenerationError: abort(503, 'Unable to generate status. Please retry.') report_dict = {"storage": {"summary": report['summary']}} diff --git a/gnocchi/storage/common/redis.py b/gnocchi/storage/common/redis.py index 54a5a350..91a90d4d 100644 --- a/gnocchi/storage/common/redis.py +++ b/gnocchi/storage/common/redis.py @@ -18,8 +18,6 @@ from __future__ import absolute_import from six.moves.urllib import parse -from oslo_utils import strutils - try: import redis from redis import sentinel @@ -27,6 +25,8 @@ except ImportError: redis = None sentinel = None +from gnocchi import utils + SEP = ':' @@ -96,7 +96,7 @@ def get_client(conf): if a not in options: continue if a in CLIENT_BOOL_ARGS: - v = strutils.bool_from_string(options[a][-1]) + v = utils.strtobool(options[a][-1]) elif a in CLIENT_LIST_ARGS: v = options[a][-1] elif a in CLIENT_INT_ARGS: diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index df1788ce..296755f4 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -1450,8 +1450,7 @@ class ResourceTest(RestTest): result = self.app.get("/v1/resource/generic?details=awesome", status=400) self.assertIn( - b"Unable to parse details value in query: " - b"Unrecognized value 'awesome', acceptable values are", + b"Unable to parse `details': invalid truth value", result.body) def test_list_resources_with_bad_details_in_accept(self): @@ -1461,8 +1460,7 @@ class ResourceTest(RestTest): }, status=400) self.assertIn( - b"Unable to parse details value in Accept: " - b"Unrecognized value 'foo', acceptable values are", + b"Unable to parse `Accept header': invalid truth value", result.body) def _do_test_list_resources_with_detail(self, request): diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 45c4ccc9..816548de 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -1,5 +1,6 @@ # -*- encoding: utf-8 -*- # +# Copyright © 2015-2017 Red Hat, Inc. # Copyright © 2015-2016 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -14,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. import datetime +import distutils.util import errno import itertools import multiprocessing @@ -206,3 +208,9 @@ def ensure_paths(paths): except OSError as e: if e.errno != errno.EEXIST: raise + + +def strtobool(v): + if isinstance(v, bool): + return v + return bool(distutils.util.strtobool(v)) -- GitLab From 431ea99739f040ad9f432bffe5306380246f92b6 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 18 Apr 2017 15:50:00 +0200 Subject: [PATCH 0704/1483] Drop usage of importutils It actually have no value other just using __import__ directly Change-Id: I2ef6db2e0e4e292ee3ec9c11a654fac1188b75e6 --- gnocchi/storage/common/ceph.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/gnocchi/storage/common/ceph.py b/gnocchi/storage/common/ceph.py index 992c2675..6e1dc58e 100644 --- a/gnocchi/storage/common/ceph.py +++ b/gnocchi/storage/common/ceph.py @@ -13,17 +13,20 @@ # under the License. from oslo_log import log -from oslo_utils import importutils LOG = log.getLogger(__name__) for RADOS_MODULE_NAME in ('cradox', 'rados'): - rados = importutils.try_import(RADOS_MODULE_NAME) - if rados is not None: + try: + rados = __import__(RADOS_MODULE_NAME) + except ImportError: + pass + else: break else: RADOS_MODULE_NAME = None + rados = None if rados is not None and hasattr(rados, 'run_in_thread'): rados.run_in_thread = lambda target, args, timeout=None: target(*args) -- GitLab From d9d472ed70e327f8affeefbd26135e1db2df376c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 24 Apr 2017 16:07:57 +0200 Subject: [PATCH 0705/1483] storage: introduce add_measures_batch for Ceph This replaces the thread based approach with a faster write operation for the driver that will support it. This seems to improve performance by a large magnitude. Change-Id: I318b37aec3f0e274f06c2379c9caa827b5dc9650 --- gnocchi/rest/__init__.py | 21 ++++++---------- gnocchi/storage/incoming/__init__.py | 14 ++++++++--- gnocchi/storage/incoming/_carbonara.py | 19 +++++++++++--- gnocchi/storage/incoming/ceph.py | 34 ++++++++++++++------------ 4 files changed, 52 insertions(+), 36 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 9e64dfff..f0176d43 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -18,7 +18,6 @@ import functools import itertools import uuid -from concurrent import futures import jsonpatch from oslo_utils import dictutils from oslo_utils import strutils @@ -163,8 +162,6 @@ RESOURCE_DEFAULT_PAGINATION = ['revision_start:asc', METRIC_DEFAULT_PAGINATION = ['id:asc'] -THREADS = utils.get_default_workers() - def get_pagination_options(params, default): max_limit = pecan.request.conf.api.max_limit @@ -1422,12 +1419,10 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): for metric in known_metrics: enforce("post measures", metric) - storage = pecan.request.storage.incoming - with futures.ThreadPoolExecutor(max_workers=THREADS) as executor: - list(executor.map(lambda x: storage.add_measures(*x), - ((metric, - body_by_rid[metric.resource_id][metric.name]) - for metric in known_metrics))) + pecan.request.storage.incoming.add_measures_batch( + dict((metric, + body_by_rid[metric.resource_id][metric.name]) + for metric in known_metrics)) pecan.response.status = 202 @@ -1456,11 +1451,9 @@ class MetricsMeasuresBatchController(rest.RestController): for metric in metrics: enforce("post measures", metric) - storage = pecan.request.storage.incoming - with futures.ThreadPoolExecutor(max_workers=THREADS) as executor: - list(executor.map(lambda x: storage.add_measures(*x), - ((metric, body[metric.id]) for metric in - metrics))) + pecan.request.storage.incoming.add_measures_batch( + dict((metric, body[metric.id]) for metric in + metrics)) pecan.response.status = 202 diff --git a/gnocchi/storage/incoming/__init__.py b/gnocchi/storage/incoming/__init__.py index ee6c90f1..c1eb06af 100644 --- a/gnocchi/storage/incoming/__init__.py +++ b/gnocchi/storage/incoming/__init__.py @@ -1,5 +1,6 @@ # -*- encoding: utf-8 -*- # +# Copyright © 2017 Red Hat, Inc. # Copyright © 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -31,16 +32,23 @@ class StorageDriver(object): def upgrade(indexer): pass - @staticmethod - def add_measures(metric, measures): + def add_measures(self, metric, measures): """Add a measure to a metric. :param metric: The metric measured. :param measures: The actual measures. """ - raise exceptions.NotImplementedError + self.add_measures_batch({metric: measures}) @staticmethod + def add_measures_batch(metrics_and_measures): + """Add a batch of measures for some metrics. + + :param metrics_and_measures: A dict where keys + are metrics and value are measure. + """ + raise exceptions.NotImplementedError + def measures_report(details=True): """Return a report of pending to process measures. diff --git a/gnocchi/storage/incoming/_carbonara.py b/gnocchi/storage/incoming/_carbonara.py index 9dba7c5a..97a67a9f 100644 --- a/gnocchi/storage/incoming/_carbonara.py +++ b/gnocchi/storage/incoming/_carbonara.py @@ -14,17 +14,21 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +from concurrent import futures import itertools import struct from oslo_log import log import pandas -import six.moves +import six from gnocchi.storage import incoming +from gnocchi import utils LOG = log.getLogger(__name__) +_NUM_WORKERS = utils.get_default_workers() + class CarbonaraBasedStorage(incoming.StorageDriver): MEASURE_PREFIX = "measure" @@ -45,12 +49,19 @@ class CarbonaraBasedStorage(incoming.StorageDriver): pandas.to_datetime(measures[::2], unit='ns'), itertools.islice(measures, 1, len(measures), 2)) - def add_measures(self, metric, measures): + def _encode_measures(self, measures): measures = list(measures) - data = struct.pack( + return struct.pack( "<" + self._MEASURE_SERIAL_FORMAT * len(measures), *list(itertools.chain.from_iterable(measures))) - self._store_new_measures(metric, data) + + def add_measures_batch(self, metrics_and_measures): + with futures.ThreadPoolExecutor(max_workers=_NUM_WORKERS) as executor: + list(executor.map( + lambda args: self._store_new_measures(*args), + ((metric, self._encode_measures(measures)) + for metric, measures + in six.iteritems(metrics_and_measures)))) @staticmethod def _store_new_measures(metric, data): diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index e7a9032e..c9dbbe4b 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -18,6 +18,7 @@ import errno import functools import uuid +import six from gnocchi.storage.common import ceph from gnocchi.storage.incoming import _carbonara @@ -54,23 +55,26 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): ceph.close_rados_connection(self.rados, self.ioctx) super(CephStorage, self).stop() - def _store_new_measures(self, metric, data): - # NOTE(sileht): list all objects in a pool is too slow with - # many objects (2min for 20000 objects in 50osds cluster), - # and enforce us to iterrate over all objects - # So we create an object MEASURE_PREFIX, that have as - # omap the list of objects to process (not xattr because - # it doesn't allow to configure the locking behavior) - name = "_".join(( - self.MEASURE_PREFIX, - str(metric.id), - str(uuid.uuid4()), - datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S"))) - - self.ioctx.write_full(name, data) + def add_measures_batch(self, metrics_and_measures): + names = [] + for metric, measures in six.iteritems(metrics_and_measures): + name = "_".join(( + self.MEASURE_PREFIX, + str(metric.id), + str(uuid.uuid4()), + datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S"))) + names.append(name) + data = self._encode_measures(measures) + self.ioctx.write_full(name, data) with rados.WriteOpCtx() as op: - self.ioctx.set_omap(op, (name,), (b"",)) + # NOTE(sileht): list all objects in a pool is too slow with + # many objects (2min for 20000 objects in 50osds cluster), + # and enforce us to iterrate over all objects + # So we create an object MEASURE_PREFIX, that have as + # omap the list of objects to process (not xattr because + # it doesn't # allow to configure the locking behavior) + self.ioctx.set_omap(op, tuple(names), (b"",) * len(names)) self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, flags=self.OMAP_WRITE_FLAGS) -- GitLab From f74aec793d87b158c6bd03c8a50d26323465bffe Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 25 Apr 2017 16:33:44 +0200 Subject: [PATCH 0706/1483] s3: check for consistency after write Change-Id: I50fb952279f736c88543867729abe3c9c7302463 --- gnocchi/storage/s3.py | 35 +++++++++++++++++-- ...stency_check_timeout-a30db3bd07a9a281.yaml | 9 +++++ 2 files changed, 41 insertions(+), 3 deletions(-) create mode 100644 releasenotes/notes/s3_consistency_check_timeout-a30db3bd07a9a281.yaml diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index f5aef4f8..8de5507e 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2016 Red Hat, Inc. +# Copyright © 2016-2017 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -16,6 +16,7 @@ import os from oslo_config import cfg +import tenacity from gnocchi import storage from gnocchi.storage import _carbonara @@ -42,6 +43,12 @@ OPTS = [ max_length=26, default='gnocchi', help='Prefix to namespace metric bucket.'), + cfg.FloatOpt('s3_check_consistency_timeout', + min=0, + default=60, + help="Maximum time to wait checking data consistency when " + "writing to S3. Set to 0 to disable data consistency " + "validation."), ] @@ -54,12 +61,19 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = True + _consistency_wait = tenacity.wait_exponential(multiplier=0.1) + def __init__(self, conf, incoming): super(S3Storage, self).__init__(conf, incoming) self.s3, self._region_name, self._bucket_prefix = ( s3.get_connection(conf) ) self._bucket_name = '%s-aggregates' % self._bucket_prefix + if conf.s3_check_consistency_timeout > 0: + self._consistency_stop = tenacity.stop_after_delay( + conf.s3_check_consistency_timeout) + else: + self._consistency_stop = None def upgrade(self, index): super(S3Storage, self).upgrade(index) @@ -81,9 +95,24 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): def _create_metric(self, metric): pass + def _put_object_safe(self, Bucket, Key, Body): + put = self.s3.put_object(Bucket=Bucket, Key=Key, Body=Body) + + if self._consistency_stop: + + def _head(): + return self.s3.head_object(Bucket=Bucket, + Key=Key, IfMatch=put['ETag']) + + tenacity.Retrying( + retry=tenacity.retry_if_result( + lambda r: r['ETag'] != put['ETag']), + wait=self._consistency_wait, + stop=self._consistency_stop)(_head) + def _store_metric_measures(self, metric, timestamp_key, aggregation, granularity, data, offset=0, version=3): - self.s3.put_object( + self._put_object_safe( Bucket=self._bucket_name, Key=self._prefix(metric) + self._object_name( timestamp_key, aggregation, granularity, version), @@ -186,7 +215,7 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): return response['Body'].read() def _store_unaggregated_timeserie(self, metric, data, version=3): - self.s3.put_object( + self._put_object_safe( Bucket=self._bucket_name, Key=self._build_unaggregated_timeserie_path(metric, version), Body=data) diff --git a/releasenotes/notes/s3_consistency_check_timeout-a30db3bd07a9a281.yaml b/releasenotes/notes/s3_consistency_check_timeout-a30db3bd07a9a281.yaml new file mode 100644 index 00000000..5b5426ee --- /dev/null +++ b/releasenotes/notes/s3_consistency_check_timeout-a30db3bd07a9a281.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + The S3 driver now checks for data consistency by default. S3 does not + guarantee read-after-write consistency when overwriting data. Gnocchi now + waits up to `s3_check_consistency_timeout` seconds before returning and + unlocking a metric for new processing. This makes sure that the data that + will be read by the next workers will be consistent and that no data will + be lost. This feature can be disabled by setting the value to 0. -- GitLab From 68bd5bfed711509fe0a50e024d2a3d84cf518ea9 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 25 Apr 2017 16:36:23 +0200 Subject: [PATCH 0707/1483] coordination: use redis if available We can use redis as coordinator if storage is redis. So this change does it. Change-Id: I5d9f6fe155935a2bebb16ca5ece76bd90310c3e4 --- gnocchi/service.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/gnocchi/service.py b/gnocchi/service.py index 2864c424..26b8e7dd 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -59,14 +59,23 @@ def prepare_service(args=None, conf=None, # If no coordination URL is provided, default to using the indexer as # coordinator if conf.storage.coordination_url is None: - parsed = urlparse.urlparse(conf.indexer.url) - proto, _, _ = parsed.scheme.partition("+") - parsed = list(parsed) - # Set proto without the + part - parsed[0] = proto - conf.set_default("coordination_url", - urlparse.urlunparse(parsed), - "storage") + if conf.storage.driver == "redis": + conf.set_default("coordination_url", + conf.storage.redis_url, + "storage") + elif conf.incoming.driver == "redis": + conf.set_default("coordination_url", + conf.incoming.redis_url, + "storage") + else: + parsed = urlparse.urlparse(conf.indexer.url) + proto, _, _ = parsed.scheme.partition("+") + parsed = list(parsed) + # Set proto without the + part + parsed[0] = proto + conf.set_default("coordination_url", + urlparse.urlunparse(parsed), + "storage") cfg_path = conf.oslo_policy.policy_file if not os.path.isabs(cfg_path): -- GitLab From 7a80be1f588a9b6f9b3b04a089c0de855ea41053 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 26 Apr 2017 14:38:30 +0200 Subject: [PATCH 0708/1483] Rename GNOCCHI_SERVICE_URL to GNOCCHI_ENDPOINT That makes it possible to use directly pifpaf variables. Change-Id: I89dc322330ceb030965df25cc062b7255efa5775 --- devstack/gate/post_test_hook.sh | 4 ++-- gnocchi/tests/functional_live/test_gabbi_live.py | 4 ++-- run-func-tests.sh | 1 - 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/devstack/gate/post_test_hook.sh b/devstack/gate/post_test_hook.sh index 91e56c56..f4a89086 100755 --- a/devstack/gate/post_test_hook.sh +++ b/devstack/gate/post_test_hook.sh @@ -39,10 +39,10 @@ cd $GNOCCHI_DIR openstack catalog list export GNOCCHI_SERVICE_TOKEN=$(openstack token issue -c id -f value) -export GNOCCHI_SERVICE_URL=$(openstack catalog show metric -c endpoints -f value | awk '/public/{print $2}') +export GNOCCHI_ENDPOINT=$(openstack catalog show metric -c endpoints -f value | awk '/public/{print $2}') export GNOCCHI_AUTHORIZATION="" # Temporary set to transition to the new functional testing -curl -X GET ${GNOCCHI_SERVICE_URL}/v1/archive_policy -H "Content-Type: application/json" +curl -X GET ${GNOCCHI_ENDPOINT}/v1/archive_policy -H "Content-Type: application/json" sudo gnocchi-upgrade diff --git a/gnocchi/tests/functional_live/test_gabbi_live.py b/gnocchi/tests/functional_live/test_gabbi_live.py index 9e013a9e..aeed07a8 100644 --- a/gnocchi/tests/functional_live/test_gabbi_live.py +++ b/gnocchi/tests/functional_live/test_gabbi_live.py @@ -26,7 +26,7 @@ TESTS_DIR = 'gabbits' def load_tests(loader, tests, pattern): """Provide a TestSuite to the discovery process.""" - gnocchi_url = os.getenv('GNOCCHI_SERVICE_URL') + gnocchi_url = os.getenv('GNOCCHI_ENDPOINT') if gnocchi_url: parsed_url = urlparse.urlsplit(gnocchi_url) prefix = parsed_url.path.rstrip('/') # turn it into a prefix @@ -45,4 +45,4 @@ def load_tests(loader, tests, pattern): port=port, prefix=prefix) elif os.getenv("GABBI_LIVE"): - raise RuntimeError('"GNOCCHI_SERVICE_URL" is not set') + raise RuntimeError('"GNOCCHI_ENDPOINT" is not set') diff --git a/run-func-tests.sh b/run-func-tests.sh index 4d702208..2e8ecd74 100755 --- a/run-func-tests.sh +++ b/run-func-tests.sh @@ -44,7 +44,6 @@ for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS}; do eval $(pifpaf -e INDEXER run $indexer) eval $(pifpaf -e GNOCCHI run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) - export GNOCCHI_SERVICE_URL=$GNOCCHI_ENDPOINT export GNOCCHI_SERVICE_TOKEN="" # Just make gabbi happy export GNOCCHI_AUTHORIZATION="basic YWRtaW46" # admin in base64 export OS_TEST_PATH=gnocchi/tests/functional_live -- GitLab From 8e2972a63030294608fd113468e7f96eaa172cfb Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 25 Apr 2017 18:15:56 +0200 Subject: [PATCH 0709/1483] tests: don't use eval for last call on pifpaf run Change-Id: I2a597f68cbb04869951e1f2adcd778328115be23 --- run-func-tests.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/run-func-tests.sh b/run-func-tests.sh index 2e8ecd74..5f17ad84 100755 --- a/run-func-tests.sh +++ b/run-func-tests.sh @@ -2,7 +2,6 @@ set -e cleanup(){ - type -t gnocchi_stop >/dev/null && gnocchi_stop || true type -t indexer_stop >/dev/null && indexer_stop || true type -t storage_stop >/dev/null && storage_stop || true } @@ -42,12 +41,11 @@ for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS}; do esac eval $(pifpaf -e INDEXER run $indexer) - eval $(pifpaf -e GNOCCHI run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) export GNOCCHI_SERVICE_TOKEN="" # Just make gabbi happy export GNOCCHI_AUTHORIZATION="basic YWRtaW46" # admin in base64 export OS_TEST_PATH=gnocchi/tests/functional_live - ./tools/pretty_tox.sh $* + pifpaf -e GNOCCHI run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL -- ./tools/pretty_tox.sh $* cleanup done -- GitLab From 91193eb559843525c1446b4caa26f07e0c6dc7c1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 27 Apr 2017 07:03:38 +0200 Subject: [PATCH 0710/1483] doc: Change grafana url Grafana plugin have changed to gnocchixyz org on github. This changes links in documentation. Change-Id: I6fb68c8849bcb9b34e52297a243ee23bc2e646be --- doc/source/grafana.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/grafana.rst b/doc/source/grafana.rst index ab47dea2..d731e613 100644 --- a/doc/source/grafana.rst +++ b/doc/source/grafana.rst @@ -48,5 +48,5 @@ steps: .. _`Grafana`: http://grafana.org .. _`Documentation`: https://grafana.net/plugins/sileht-gnocchi-datasource -.. _`Source`: https://github.com/sileht/grafana-gnocchi-datasource +.. _`Source`: https://github.com/gnocchixyz/grafana-gnocchi-datasource .. _`CORS`: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing -- GitLab From eebee522731cc6852e22b81a4c575ae6244b248a Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 19 Apr 2017 20:56:42 +0000 Subject: [PATCH 0711/1483] update aggregation workers help as of v3, we are io-bound so it actually makes sense to use threads. the gate doesn't like threads so keep it defaulting to 1 and just advise users on how to set value. Change-Id: I6e3974d0c420ba5d5e41c9a27a48cb7b3d3f03fc --- gnocchi/storage/_carbonara.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index a786da2e..6f7c0060 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -35,9 +35,9 @@ from gnocchi import utils OPTS = [ cfg.IntOpt('aggregation_workers_number', default=1, min=1, - help='Number of workers to run during adding new measures for ' - 'pre-aggregation needs. Due to the Python GIL, ' - '1 is usually faster, unless you have high latency I/O'), + help='Number of threads to process and store aggregates. ' + 'Set value roughly equal to number of aggregates to be ' + 'computed per metric'), cfg.StrOpt('coordination_url', secret=True, help='Coordination driver URL'), -- GitLab From 1c35c7c6a63e858ac574118a2e20500f8af43697 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 27 Apr 2017 17:01:47 +0200 Subject: [PATCH 0712/1483] ceph: Don't fail if the container does not exists We previously remove this object with aio_remove so error was ignored. We have to catch it now. Change-Id: Ie036dc157e84a5665808b3b37e1c676b533d3411 --- gnocchi/storage/ceph.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index baa977d1..87b510fe 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -118,8 +118,12 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): for op in ops: op.wait_for_complete_and_cb() - self.ioctx.remove_object( - self._build_unaggregated_timeserie_path(metric, 3)) + try: + self.ioctx.remove_object( + self._build_unaggregated_timeserie_path(metric, 3)) + except rados.ObjectNotFound: + # It's possible that the object does not exists + pass def _get_measures(self, metric, timestamp_key, aggregation, granularity, version=3): -- GitLab From fcfb9c6e531d611edc3aff7c552a03bcebbfa274 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 27 Apr 2017 16:23:27 +0200 Subject: [PATCH 0713/1483] ceph: remove usage of errno This removes the usage of errno for this because ceph doesn't return the same errno depending of the platform. Change-Id: I38fb99e0b536d4570abe512ff88bd89428773a16 --- gnocchi/storage/ceph.py | 24 +++++++++++++++++++++--- gnocchi/storage/common/ceph.py | 5 +++++ gnocchi/storage/incoming/ceph.py | 20 ++++++++++---------- 3 files changed, 36 insertions(+), 13 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index baa977d1..b4aa696a 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -13,7 +13,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import errno from oslo_config import cfg @@ -110,7 +109,16 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): op, self._build_unaggregated_timeserie_path(metric, 3)) except rados.ObjectNotFound: return - if ret == errno.ENOENT: + + # NOTE(sileht): after reading the libradospy, I'm + # not sure that ret will have the correct value + # get_omap_vals transforms the C int to python int + # before operate_read_op is called, I dunno if the int + # content is copied during this transformation or if + # this is a pointer to the C int, I think it's copied... + try: + ceph.errno_to_exception(ret) + except rados.ObjectNotFound: return ops = [self.ioctx.aio_remove(name) for name, _ in omaps] @@ -143,8 +151,18 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): op, self._build_unaggregated_timeserie_path(metric, 3)) except rados.ObjectNotFound: raise storage.MetricDoesNotExist(metric) - if ret == errno.ENOENT: + + # NOTE(sileht): after reading the libradospy, I'm + # not sure that ret will have the correct value + # get_omap_vals transforms the C int to python int + # before operate_read_op is called, I dunno if the int + # content is copied during this transformation or if + # this is a pointer to the C int, I think it's copied... + try: + ceph.errno_to_exception(ret) + except rados.ObjectNotFound: raise storage.MetricDoesNotExist(metric) + keys = set() for name, value in omaps: meta = name.split('_') diff --git a/gnocchi/storage/common/ceph.py b/gnocchi/storage/common/ceph.py index 6e1dc58e..764cdf4f 100644 --- a/gnocchi/storage/common/ceph.py +++ b/gnocchi/storage/common/ceph.py @@ -71,3 +71,8 @@ def close_rados_connection(conn, ioctx): ioctx.aio_flush() ioctx.close() conn.shutdown() + + +def errno_to_exception(ret): + if ret < 0: + raise rados.errno_to_exception[abs(ret)] diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index de33a841..ada8c047 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -14,7 +14,6 @@ from collections import defaultdict import contextlib import datetime -import errno import functools import uuid @@ -118,8 +117,11 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): # before operate_read_op is called, I dunno if the int # content is copied during this transformation or if # this is a pointer to the C int, I think it's copied... - if ret == errno.ENOENT: + try: + ceph.errno_to_exception(ret) + except rados.ObjectNotFound: return () + return (k for k, v in omaps) def list_metric_with_measures_to_process(self): @@ -168,14 +170,12 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): # Check that the measure file has not been deleted while still # listed in the OMAP – this can happen after a crash ret = comp.get_return_value() - if ret < 0: - exc = rados.errno_to_exception[abs(ret)] - if exc == rados.ObjectNotFound: - # Object has been deleted, so this is just a stalled entry - # in the OMAP listing, ignore - return - # This is not an "expected" error, raise it back - raise exc + try: + ceph.errno_to_exception(ret) + except rados.ObjectNotFound: + # Object has been deleted, so this is just a stalled entry + # in the OMAP listing, ignore + return if name in tmp_measures: tmp_measures[name] += data -- GitLab From 0986bc24ed6d4bf1faecf6f539c4a6c467348069 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 28 Apr 2017 08:47:56 +0200 Subject: [PATCH 0714/1483] fix search by user_id/project_id Since we store user_id/project_id as creator, we bokre the per user/project metric listing. This change fixes that. Change-Id: I92555d0020658ddecf37e4a8be9cfa27452a7a14 --- gnocchi/indexer/sqlalchemy.py | 9 +- .../tests/functional/gabbits/metric-list.yaml | 108 ++++++++++++++++++ 2 files changed, 116 insertions(+), 1 deletion(-) create mode 100644 gnocchi/tests/functional/gabbits/metric-list.yaml diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 7959cb4e..3497b52d 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -663,7 +663,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): @retry_on_deadlock def list_metrics(self, names=None, ids=None, details=False, status='active', limit=None, marker=None, sorts=None, - **kwargs): + creator=None, **kwargs): sorts = sorts or [] if ids is not None and not ids: return [] @@ -676,6 +676,13 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): q = q.filter(Metric.name.in_(names)) if ids is not None: q = q.filter(Metric.id.in_(ids)) + if creator is not None: + if creator[0] == ":": + q = q.filter(Metric.creator.like("%%%s" % creator)) + elif creator[-1] == ":": + q = q.filter(Metric.creator.like("%s%%" % creator)) + else: + q = q.filter(Metric.creator == creator) for attr in kwargs: q = q.filter(getattr(Metric, attr) == kwargs[attr]) if details: diff --git a/gnocchi/tests/functional/gabbits/metric-list.yaml b/gnocchi/tests/functional/gabbits/metric-list.yaml new file mode 100644 index 00000000..cd6cc28c --- /dev/null +++ b/gnocchi/tests/functional/gabbits/metric-list.yaml @@ -0,0 +1,108 @@ +fixtures: + - ConfigFixture + +defaults: + request_headers: + x-user-id: 0fbb231484614b1a80131fc22f6afc9c + x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + x-roles: admin + +tests: + - name: create archive policy 1 + desc: for later use + POST: /v1/archive_policy + request_headers: + content-type: application/json + x-roles: admin + data: + name: first_archive + definition: + - granularity: 1 second + status: 201 + + - name: create archive policy 2 + desc: for later use + POST: /v1/archive_policy + request_headers: + content-type: application/json + x-roles: admin + data: + name: second_archive + definition: + - granularity: 1 second + status: 201 + + - name: create metric 1 + POST: /v1/metric + request_headers: + content-type: application/json + data: + name: "disk.io.rate" + unit: "B/s" + archive_policy_name: first_archive + status: 201 + response_json_paths: + $.archive_policy_name: first_archive + $.name: disk.io.rate + $.unit: B/s + + - name: create metric 2 + POST: /v1/metric + request_headers: + content-type: application/json + x-user-id: 4fff6179c2fc414dbedfc8cc82d6ada7 + x-project-id: f3ca498a61c84422b953133adb71cff8 + data: + name: "disk.io.rate" + unit: "B/s" + archive_policy_name: first_archive + status: 201 + response_json_paths: + $.archive_policy_name: first_archive + $.name: disk.io.rate + $.unit: B/s + + - name: create metric 3 + POST: /v1/metric + request_headers: + content-type: application/json + x-user-id: faf30294217c4e1a91387d9c8f1fb1fb + x-project-id: f3ca498a61c84422b953133adb71cff8 + data: + name: "cpu_util" + unit: "%" + archive_policy_name: first_archive + status: 201 + response_json_paths: + $.archive_policy_name: first_archive + $.name: cpu_util + $.unit: "%" + + - name: create metric 4 + POST: /v1/metric + request_headers: + content-type: application/json + data: + name: "cpu" + unit: "ns" + archive_policy_name: second_archive + status: 201 + response_json_paths: + $.archive_policy_name: second_archive + $.name: cpu + $.unit: ns + + - name: list metrics + GET: /v1/metric + response_json_paths: + $.`len`: 4 + + - name: list metrics by user_id + GET: /v1/metric?user_id=faf30294217c4e1a91387d9c8f1fb1fb + response_json_paths: + $.`len`: 1 + + - name: list metrics by project_id + GET: /v1/metric?project_id=f3ca498a61c84422b953133adb71cff8 + response_json_paths: + $.`len`: 2 -- GitLab From dd0a877361f0d457875fe3c41249fa022fe4eb17 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 28 Apr 2017 08:53:39 +0200 Subject: [PATCH 0715/1483] rest: Extends metric list filters Change-Id: I821c2d1a8c4cf0a217e72d95d8373f274024f9e0 --- gnocchi/rest/__init__.py | 33 +++++++++++++----- .../tests/functional/gabbits/metric-list.yaml | 34 +++++++++++++++++++ 2 files changed, 59 insertions(+), 8 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index a9fc4aa6..ec3207f5 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -167,9 +167,9 @@ METRIC_DEFAULT_PAGINATION = ['id:asc'] def get_pagination_options(params, default): max_limit = pecan.request.conf.api.max_limit - limit = params.get('limit', max_limit) - marker = params.get('marker') - sorts = params.get('sort', default) + limit = params.pop('limit', max_limit) + marker = params.pop('marker', None) + sorts = params.pop('sort', default) if not isinstance(sorts, list): sorts = [sorts] @@ -536,14 +536,30 @@ class MetricsController(rest.RestController): pecan.response.status = 201 return m - @staticmethod + MetricListSchema = voluptuous.Schema({ + "user_id": six.text_type, + "project_id": six.text_type, + "creator": six.text_type, + "limit": six.text_type, + "name": six.text_type, + "id": six.text_type, + "unit": six.text_type, + "archive_policy_name": six.text_type, + "status": voluptuous.Any("active", "delete"), + "sort": voluptuous.Any([six.text_type], six.text_type), + "marker": six.text_type, + }) + + @classmethod @pecan.expose('json') - def get_all(**kwargs): + def get_all(cls, **kwargs): + kwargs = cls.MetricListSchema(kwargs) + # Compat with old user/project API - provided_user_id = kwargs.get('user_id') - provided_project_id = kwargs.get('project_id') + provided_user_id = kwargs.pop('user_id', None) + provided_project_id = kwargs.pop('project_id', None) if provided_user_id is None and provided_project_id is None: - provided_creator = kwargs.get('creator') + provided_creator = kwargs.pop('creator', None) else: provided_creator = ( (provided_user_id or "") @@ -563,6 +579,7 @@ class MetricsController(rest.RestController): attr_filter['creator'] = provided_creator attr_filter.update(get_pagination_options( kwargs, METRIC_DEFAULT_PAGINATION)) + attr_filter.update(kwargs) try: return pecan.request.indexer.list_metrics(**attr_filter) except indexer.IndexerException as e: diff --git a/gnocchi/tests/functional/gabbits/metric-list.yaml b/gnocchi/tests/functional/gabbits/metric-list.yaml index cd6cc28c..59f58b96 100644 --- a/gnocchi/tests/functional/gabbits/metric-list.yaml +++ b/gnocchi/tests/functional/gabbits/metric-list.yaml @@ -97,6 +97,40 @@ tests: response_json_paths: $.`len`: 4 + - name: list metrics by id + GET: /v1/metric?id=$HISTORY['create metric 1'].$RESPONSE['id'] + response_json_paths: + $.`len`: 1 + $[0].name: disk.io.rate + $[0].archive_policy.name: first_archive + + - name: list metrics by name + GET: /v1/metric?name=disk.io.rate + response_json_paths: + $.`len`: 2 + $[0].name: disk.io.rate + $[1].name: disk.io.rate + $[0].archive_policy.name: first_archive + $[1].archive_policy.name: first_archive + + - name: list metrics by unit + GET: /v1/metric?unit=ns + response_json_paths: + $.`len`: 1 + $[0].name: cpu + $[0].archive_policy.name: second_archive + + - name: list metrics by archive_policy + GET: /v1/metric?archive_policy_name=first_archive&sort=name:desc + response_json_paths: + $.`len`: 3 + $[0].name: disk.io.rate + $[1].name: disk.io.rate + $[2].name: cpu_util + $[0].archive_policy.name: first_archive + $[1].archive_policy.name: first_archive + $[2].archive_policy.name: first_archive + - name: list metrics by user_id GET: /v1/metric?user_id=faf30294217c4e1a91387d9c8f1fb1fb response_json_paths: -- GitLab From 2a6c20794ad728dc6dbb985ff7780cf36b50a359 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 27 Apr 2017 15:43:33 +0200 Subject: [PATCH 0716/1483] tests: more live verification Sometime live tests fail for an yet unknown reason. This change adds tests to help the debugging of this failure. Related-bug: #1681955 Change-Id: Ifadf481794943b3283211dd4e276df503f73f9b0 --- .../tests/functional_live/gabbits/live.yaml | 57 ++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/gnocchi/tests/functional_live/gabbits/live.yaml b/gnocchi/tests/functional_live/gabbits/live.yaml index 48838885..d63cb096 100644 --- a/gnocchi/tests/functional_live/gabbits/live.yaml +++ b/gnocchi/tests/functional_live/gabbits/live.yaml @@ -375,10 +375,28 @@ tests: $.archive_policy_name: gabbilive $.name: live.io.rate + - name: assert metric is present in listing + GET: /v1/metric?id=$HISTORY['create metric with name and rule'].$RESPONSE['$.id'] + response_json_paths: + $.`len`: 1 + + - name: assert metric is the only one with this policy + GET: /v1/metric?archive_policy_name=gabbilive + response_json_paths: + $.`len`: 1 + - name: delete metric - DELETE: $LOCATION + DELETE: /v1/metric/$HISTORY['create metric with name and rule'].$RESPONSE['$.id'] status: 204 + - name: assert metric is expunged + GET: $HISTORY['assert metric is present in listing'].$URL&status=delete + poll: + count: 360 + delay: 1 + response_json_paths: + $.`len`: 0 + - name: create metric with name and policy POST: /v1/metric request_headers: @@ -401,6 +419,10 @@ tests: DELETE: /v1/metric/$RESPONSE['$.id'] status: 204 + - name: ensure the metric is delete + GET: /v1/metric/$HISTORY['get valid metric id'].$RESPONSE['$.id'] + status: 404 + - name: create metric bad archive policy POST: /v1/metric request_headers: @@ -540,6 +562,13 @@ tests: $.project_id: 98e785d7-9487-4159-8ab8-8230ec37537a $.display_name: "myvm" + - name: get vcpus metric + GET: /v1/metric/$HISTORY['get myresource resource'].$RESPONSE['$.metrics.vcpus'] + status: 200 + response_json_paths: + $.name: vcpus + $.resource.id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e + - name: search for myresource resource via user_id POST: /v1/search/resource/myresource request_headers: @@ -651,6 +680,14 @@ tests: response_json_paths: $.`len`: 0 + - name: assert vcpus metric exists in listing + GET: /v1/metric?id=$HISTORY['get myresource resource'].$RESPONSE['$.metrics.vcpus'] + poll: + count: 360 + delay: 1 + response_json_paths: + $.`len`: 1 + - name: delete myresource resource DELETE: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e status: 204 @@ -660,12 +697,30 @@ tests: GET: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e status: 404 + - name: assert vcpus metric is really expurged + GET: $HISTORY['assert vcpus metric exists in listing'].$URL&status=delete + poll: + count: 360 + delay: 1 + response_json_paths: + $.`len`: 0 + - name: post myresource resource no data POST: /v1/resource/myresource request_headers: content-type: application/json status: 400 + - name: assert no metrics have the gabbilive policy + GET: $HISTORY['assert metric is the only one with this policy'].$URL + response_json_paths: + $.`len`: 0 + + - name: assert no delete metrics have the gabbilive policy + GET: $HISTORY['assert metric is the only one with this policy'].$URL&status=delete + response_json_paths: + $.`len`: 0 + - name: delete single archive policy cleanup DELETE: /v1/archive_policy/gabbilive poll: -- GitLab From 065c0eaf4fd364fbd888e9c043a046937531daa5 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 27 Apr 2017 15:14:07 +0000 Subject: [PATCH 0717/1483] use redis as coordinator by default we use sql as coordinator but gnocchi has high locking requirements becuase of number of sacks which far exceeds default 100 connection limit of sql. switch to use redis as lock manager instead. Closes-Bug: #1681955 Change-Id: I9f7606c17535432743c3bdd0cda0e4200f03d328 --- run-func-tests.sh | 2 +- setup.cfg | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/run-func-tests.sh b/run-func-tests.sh index 5f17ad84..cf28931d 100755 --- a/run-func-tests.sh +++ b/run-func-tests.sh @@ -45,7 +45,7 @@ for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS}; do export GNOCCHI_SERVICE_TOKEN="" # Just make gabbi happy export GNOCCHI_AUTHORIZATION="basic YWRtaW46" # admin in base64 export OS_TEST_PATH=gnocchi/tests/functional_live - pifpaf -e GNOCCHI run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL -- ./tools/pretty_tox.sh $* + pifpaf -e GNOCCHI run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL --coordination-driver redis -- ./tools/pretty_tox.sh $* cleanup done diff --git a/setup.cfg b/setup.cfg index f49af905..386d90f6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -65,7 +65,7 @@ doc = Jinja2 reno>=1.6.2 test = - pifpaf>=0.25.0 + pifpaf>=1.0.1 gabbi>=1.30.0 coverage>=3.6 fixtures -- GitLab From 57fdce963924cd403e299663714063ca25b6ac5b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 3 May 2017 07:43:07 +0200 Subject: [PATCH 0718/1483] tempest: rework gabbi setup The current approach is a bit hacky and create one tempest scenario per gabbi HTTP call. The side effect is that tests continue to run even the previous one have fail. This changes the approach by running gabbi to have one scenario per yaml file from tempest point of view. This will make easier to debug the scenario in case of failure. Change-Id: Ice4be203798a222b4b521540e637fb6ec0c9c01e --- gnocchi/tempest/scenario/__init__.py | 94 +++++++++++++++++----------- 1 file changed, 59 insertions(+), 35 deletions(-) diff --git a/gnocchi/tempest/scenario/__init__.py b/gnocchi/tempest/scenario/__init__.py index 43ec4742..7db0fd6f 100644 --- a/gnocchi/tempest/scenario/__init__.py +++ b/gnocchi/tempest/scenario/__init__.py @@ -15,31 +15,35 @@ from __future__ import absolute_import import os import unittest -from gabbi import driver +from gabbi import runner +from gabbi import suitemaker +from gabbi import utils import six.moves.urllib.parse as urlparse from tempest import config import tempest.test CONF = config.CONF +TEST_DIR = os.path.join(os.path.dirname(__file__), '..', '..', + 'tests', 'functional_live', 'gabbits') + class GnocchiGabbiTest(tempest.test.BaseTestCase): credentials = ['admin'] + TIMEOUT_SCALING_FACTOR = 5 + @classmethod def skip_checks(cls): super(GnocchiGabbiTest, cls).skip_checks() if not CONF.service_available.gnocchi: raise cls.skipException("Gnocchi support is required") - @classmethod - def resource_setup(cls): - super(GnocchiGabbiTest, cls).resource_setup() - - url = cls.os_admin.auth_provider.base_url( + def _do_test(self, filename): + token = self.os_admin.auth_provider.get_token() + url = self.os_admin.auth_provider.base_url( {'service': CONF.metric.catalog_type, 'endpoint_type': CONF.metric.endpoint_type}) - token = cls.os_admin.auth_provider.get_token() parsed_url = urlparse.urlsplit(url) prefix = parsed_url.path.rstrip('/') # turn it into a prefix @@ -53,34 +57,54 @@ class GnocchiGabbiTest(tempest.test.BaseTestCase): if parsed_url.port: port = parsed_url.port - test_dir = os.path.join(os.path.dirname(__file__), '..', '..', - 'tests', 'functional_live', 'gabbits') - cls.tests = driver.build_tests( - test_dir, unittest.TestLoader(), - host=host, port=port, prefix=prefix, - test_loader_name='tempest.scenario.gnocchi.test', - require_ssl=require_ssl) - os.environ["GNOCCHI_SERVICE_TOKEN"] = token os.environ["GNOCCHI_AUTHORIZATION"] = "not used" - @classmethod - def clear_credentials(cls): - # FIXME(sileht): We don't want the token to be invalided, but - # for some obcurs reason, clear_credentials is called before/during run - # So, make the one used by tearDropClass a dump, and call it manually - # in run() - pass - - def run(self, result=None): - self.setUp() - try: - self.tests.run(result) - finally: - super(GnocchiGabbiTest, self).clear_credentials() - self.tearDown() - - def test_fake(self): - # NOTE(sileht): A fake test is needed to have the class loaded - # by the test runner - pass + with file(os.path.join(TEST_DIR, filename)) as f: + suite_dict = utils.load_yaml(f) + suite_dict.setdefault('defaults', {})['ssl'] = require_ssl + test_suite = suitemaker.test_suite_from_dict( + loader=unittest.defaultTestLoader, + test_base_name="gabbi", + suite_dict=suite_dict, + test_directory=TEST_DIR, + host=host, port=port, + fixture_module=None, + intercept=None, + prefix=prefix, + handlers=runner.initialize_handlers([]), + test_loader_name="tempest") + + # NOTE(sileht): We hide stdout/stderr and reraise the failure + # manually, tempest will print it itself. + with open(os.devnull, 'w') as stream: + result = unittest.TextTestRunner( + stream=stream, verbosity=0, failfast=True, + ).run(test_suite) + + if not result.wasSuccessful(): + failures = (result.errors + result.failures + + result.unexpectedSuccesses) + if failures: + test, bt = failures[0] + name = test.test_data.get('name', test.id()) + msg = 'From test "%s" :\n%s' % (name, bt) + self.fail(msg) + + self.assertTrue(result.wasSuccessful()) + + +def test_maker(name, filename): + def test(self): + self._do_test(filename) + test.__name__ = name + return test + + +# Create one scenario per yaml file +for filename in os.listdir(TEST_DIR): + if not filename.endswith('.yaml'): + continue + name = "test_%s" % filename[:-5].lower().replace("-", "_") + setattr(GnocchiGabbiTest, name, + test_maker(name, filename)) -- GitLab From d81f13dc9f33953ba4cb9ec1a5f28944193011c0 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 3 May 2017 12:21:52 +0200 Subject: [PATCH 0719/1483] swift: make sure to retry if the client cannot find Swift The following can happen: 2017-05-02 15:21:24.363 96139 CRITICAL gnocchi [-] ClientException: Endpoint for object-store not found - have you specified a region? 2017-05-02 15:21:24.363 96139 ERROR gnocchi Traceback (most recent call last): 2017-05-02 15:21:24.363 96139 ERROR gnocchi File "/usr/bin/gnocchi-statsd", line 10, in 2017-05-02 15:21:24.363 96139 ERROR gnocchi sys.exit(statsd()) 2017-05-02 15:21:24.363 96139 ERROR gnocchi File "/usr/lib/python2.7/site-packages/gnocchi/cli.py", line 74, in statsd 2017-05-02 15:21:24.363 96139 ERROR gnocchi statsd_service.start() 2017-05-02 15:21:24.363 96139 ERROR gnocchi File "/usr/lib/python2.7/site-packages/gnocchi/statsd.py", line 174, in start 2017-05-02 15:21:24.363 96139 ERROR gnocchi stats = Stats(conf) 2017-05-02 15:21:24.363 96139 ERROR gnocchi File "/usr/lib/python2.7/site-packages/gnocchi/statsd.py", line 38, in __init__ 2017-05-02 15:21:24.363 96139 ERROR gnocchi self.storage = storage.get_driver(self.conf) 2017-05-02 15:21:24.363 96139 ERROR gnocchi File "/usr/lib/python2.7/site-packages/gnocchi/storage/__init__.py", line 158, in get_driver 2017-05-02 15:21:24.363 96139 ERROR gnocchi return get_driver_class(conf)(conf.storage) 2017-05-02 15:21:24.363 96139 ERROR gnocchi File "/usr/lib/python2.7/site-packages/gnocchi/storage/swift.py", line 98, in __init__ 2017-05-02 15:21:24.363 96139 ERROR gnocchi self.swift.put_container(self.MEASURE_PREFIX) 2017-05-02 15:21:24.363 96139 ERROR gnocchi File "/usr/lib/python2.7/site-packages/swiftclient/client.py", line 1728, in put_container 2017-05-02 15:21:24.363 96139 ERROR gnocchi query_string=query_string) 2017-05-02 15:21:24.363 96139 ERROR gnocchi File "/usr/lib/python2.7/site-packages/swiftclient/client.py", line 1635, in _retry 2017-05-02 15:21:24.363 96139 ERROR gnocchi self.url, self.token = self.get_auth() 2017-05-02 15:21:24.363 96139 ERROR gnocchi File "/usr/lib/python2.7/site-packages/swiftclient/client.py", line 1587, in get_auth 2017-05-02 15:21:24.363 96139 ERROR gnocchi timeout=self.timeout) 2017-05-02 15:21:24.363 96139 ERROR gnocchi File "/usr/lib/python2.7/site-packages/swiftclient/client.py", line 662, in get_auth 2017-05-02 15:21:24.363 96139 ERROR gnocchi auth_version=auth_version) 2017-05-02 15:21:24.363 96139 ERROR gnocchi File "/usr/lib/python2.7/site-packages/swiftclient/client.py", line 596, in get_auth_keystone 2017-05-02 15:21:24.363 96139 ERROR gnocchi 'have you specified a region?' % service_type) 2017-05-02 15:21:24.363 96139 ERROR gnocchi ClientException: Endpoint for object-store not found - have you specified a region? 2017-05-02 15:21:24.363 96139 ERROR gnocchi Make sure we retry. Change-Id: I12ad167962aa62c22785c32394a607929ca8b925 --- gnocchi/storage/common/swift.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/gnocchi/storage/common/swift.py b/gnocchi/storage/common/swift.py index 95003fdd..5d4ff47e 100644 --- a/gnocchi/storage/common/swift.py +++ b/gnocchi/storage/common/swift.py @@ -24,13 +24,13 @@ except ImportError: swift_utils = None from gnocchi import storage +from gnocchi import utils LOG = log.getLogger(__name__) -def get_connection(conf): - if swclient is None: - raise RuntimeError("python-swiftclient unavailable") +@utils.retry +def _get_connection(conf): return swclient.Connection( auth_version=conf.swift_auth_version, authurl=conf.swift_authurl, @@ -44,6 +44,13 @@ def get_connection(conf): retries=0) +def get_connection(conf): + if swclient is None: + raise RuntimeError("python-swiftclient unavailable") + + return _get_connection(conf) + + POST_HEADERS = {'Accept': 'application/json', 'Content-Type': 'text/plain'} -- GitLab From d7393f65782b9fe9895062a9b5c59e86167cbe68 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 8 May 2017 15:10:26 -0400 Subject: [PATCH 0720/1483] rest: do not duplicate non-existing resource ids Currently the code tries over and over again to create metrics for a resource that does not exist. Which add 2 bad effects (with N is the number of metrics that gets measures for this non-existing resource): - it calls N times create_metrics() where it's sure it'll raise NoSuchResource, so let's just break at the first error - the unknown resource is added N times to the detail of the error message Change-Id: I5b0de5989c21cd57d84b53cb9e8a13a311ad4862 --- gnocchi/rest/__init__.py | 1 + .../functional/gabbits/batch-measures.yaml | 33 +++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index ec3207f5..ef884535 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1404,6 +1404,7 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): unknown_resources.append({ 'resource_id': six.text_type(resource_id), 'original_resource_id': original_resource_id}) + break except indexer.IndexerException as e: # This catch NoSuchArchivePolicy, which is unlikely # be still possible diff --git a/gnocchi/tests/functional/gabbits/batch-measures.yaml b/gnocchi/tests/functional/gabbits/batch-measures.yaml index ae3b454e..a121f6fb 100644 --- a/gnocchi/tests/functional/gabbits/batch-measures.yaml +++ b/gnocchi/tests/functional/gabbits/batch-measures.yaml @@ -227,6 +227,39 @@ tests: - original_resource_id: "bbbbbbbb-d63b-4cdd-be89-111111111111" resource_id: "bbbbbbbb-d63b-4cdd-be89-111111111111" + - name: push measurements to unknown named metrics and resource with create_metrics with uuid resource id where resources is several times listed + POST: /v1/batch/resources/metrics/measures?create_metrics=true + request_headers: + content-type: application/json + accept: application/json + data: + aaaaaaaa-d63b-4cdd-be89-111111111111: + auto.test: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + auto.test2: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + bbbbbbbb-d63b-4cdd-be89-111111111111: + auto.test: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + + status: 400 + response_json_paths: + $.description.cause: "Unknown resources" + $.description.detail[/original_resource_id]: + - original_resource_id: "aaaaaaaa-d63b-4cdd-be89-111111111111" + resource_id: "aaaaaaaa-d63b-4cdd-be89-111111111111" + - original_resource_id: "bbbbbbbb-d63b-4cdd-be89-111111111111" + resource_id: "bbbbbbbb-d63b-4cdd-be89-111111111111" + - name: push measurements to unknown named metrics and resource with create_metrics with non uuid resource id POST: /v1/batch/resources/metrics/measures?create_metrics=true request_headers: -- GitLab From 9889bd04f5ae9949c739ab656771aa5b8c1bab95 Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 3 Mar 2017 19:31:20 +0000 Subject: [PATCH 0721/1483] push incoming into different sacks having everything in one giant folder/bucket/container/object is bad because: - does not allow for good distribution of backend driver. - makes it hella hard to cleanly split work across multiple metricd - starves metrics from being processed. we used "sacks" to avoid naming same as driver paradigms. driver implementation will be done individually so this does nothing but pretend to lock multiple buckets when it's actually just one. Related-Bug: #1629420 Related-Bug: #1623263 Related-Bug: #1620674 Change-Id: Icc32d918fe55416385122470c47d60ddbb30dd34 --- gnocchi/cli.py | 42 +++++++++++++++++++------- gnocchi/storage/incoming/__init__.py | 2 +- gnocchi/storage/incoming/_carbonara.py | 9 ++++++ gnocchi/storage/incoming/ceph.py | 2 +- gnocchi/storage/incoming/file.py | 2 +- gnocchi/storage/incoming/redis.py | 2 +- gnocchi/storage/incoming/s3.py | 3 +- gnocchi/storage/incoming/swift.py | 2 +- gnocchi/tests/functional/fixtures.py | 4 +-- gnocchi/tests/test_aggregates.py | 3 +- gnocchi/tests/test_rest.py | 3 +- gnocchi/tests/test_storage.py | 7 +++-- gnocchi/tests/utils.py | 19 ++++++++++++ 13 files changed, 75 insertions(+), 25 deletions(-) create mode 100644 gnocchi/tests/utils.py diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 0a4a7dba..853153dd 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -146,15 +146,31 @@ class MetricProcessor(MetricProcessBase): self._coord, self._my_id = utils.get_coordinator_and_start( conf.storage.coordination_url) + def _sack_lock(self, sack): + lock_name = b'gnocchi-sack-%s-lock' % str(sack).encode('ascii') + return self._coord.get_lock(lock_name) + def _run_job(self): - try: - metrics = list( - self.store.incoming.list_metric_with_measures_to_process()) - LOG.debug("%d metrics scheduled for processing.", len(metrics)) - self.store.process_background_tasks(self.index, metrics) - except Exception: - LOG.error("Unexpected error scheduling metrics for processing", - exc_info=True) + m_count = 0 + s_count = 0 + in_store = self.store.incoming + for s in six.moves.range(in_store.NUM_SACKS): + # TODO(gordc): support delay release lock so we don't + # process a sack right after another process + lock = self._sack_lock(s) + if not lock.acquire(blocking=False): + continue + try: + metrics = in_store.list_metric_with_measures_to_process(s) + m_count = len(metrics) + self.store.process_background_tasks(self.index, metrics) + s_count += 1 + except Exception: + LOG.error("Unexpected error processing assigned job", + exc_info=True) + finally: + lock.release() + LOG.debug("%d metrics processed from %d sacks", m_count, s_count) def close_services(self): self._coord.stop() @@ -211,9 +227,13 @@ def metricd_tester(conf): index = indexer.get_driver(conf) index.connect() s = storage.get_driver(conf) - metrics = s.incoming.list_metric_with_measures_to_process()[ - :conf.stop_after_processing_metrics] - s.process_new_measures(index, metrics, True) + metrics = set() + for i in six.moves.range(s.incoming.NUM_SACKS): + metrics.update(s.incoming.list_metric_with_measures_to_process(i)) + if len(metrics) >= conf.stop_after_processing_metrics: + break + s.process_new_measures( + index, list(metrics)[:conf.stop_after_processing_metrics], True) def metricd(): diff --git a/gnocchi/storage/incoming/__init__.py b/gnocchi/storage/incoming/__init__.py index c1eb06af..eb99ae4d 100644 --- a/gnocchi/storage/incoming/__init__.py +++ b/gnocchi/storage/incoming/__init__.py @@ -60,5 +60,5 @@ class StorageDriver(object): raise exceptions.NotImplementedError @staticmethod - def list_metric_with_measures_to_process(): + def list_metric_with_measures_to_process(sack): raise NotImplementedError diff --git a/gnocchi/storage/incoming/_carbonara.py b/gnocchi/storage/incoming/_carbonara.py index 97a67a9f..c17ae852 100644 --- a/gnocchi/storage/incoming/_carbonara.py +++ b/gnocchi/storage/incoming/_carbonara.py @@ -32,9 +32,12 @@ _NUM_WORKERS = utils.get_default_workers() class CarbonaraBasedStorage(incoming.StorageDriver): MEASURE_PREFIX = "measure" + SACK_PREFIX = "incoming-%s" _MEASURE_SERIAL_FORMAT = "Qd" _MEASURE_SERIAL_LEN = struct.calcsize(_MEASURE_SERIAL_FORMAT) + NUM_SACKS = 8 + def _unserialize_measures(self, measure_id, data): nb_measures = len(data) // self._MEASURE_SERIAL_LEN try: @@ -85,3 +88,9 @@ class CarbonaraBasedStorage(incoming.StorageDriver): @staticmethod def process_measure_for_metric(metric): raise NotImplementedError + + def sack_for_metric(self, metric_id): + return metric_id.int % self.NUM_SACKS + + def get_sack_name(self, sack): + return self.SACK_PREFIX % sack diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index ada8c047..8c5fe7be 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -124,7 +124,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): return (k for k, v in omaps) - def list_metric_with_measures_to_process(self): + def list_metric_with_measures_to_process(self, sack): names = set() marker = "" while True: diff --git a/gnocchi/storage/incoming/file.py b/gnocchi/storage/incoming/file.py index c4d58087..35d3b912 100644 --- a/gnocchi/storage/incoming/file.py +++ b/gnocchi/storage/incoming/file.py @@ -75,7 +75,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): return (len(metric_details.keys()), sum(metric_details.values()), metric_details if details else None) - def list_metric_with_measures_to_process(self): + def list_metric_with_measures_to_process(self, sack): return set(os.listdir(self.measure_path)) def _list_measures_container_for_metric_id(self, metric_id): diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py index df1edd6e..24cde101 100644 --- a/gnocchi/storage/incoming/redis.py +++ b/gnocchi/storage/incoming/redis.py @@ -46,7 +46,7 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): return (len(metric_details.keys()), sum(metric_details.values()), metric_details if details else None) - def list_metric_with_measures_to_process(self): + def list_metric_with_measures_to_process(self, sack): match = redis.SEP.join([self.STORAGE_PREFIX, "*"]) keys = self._client.scan_iter(match=match, count=1000) measures = set([k.decode('utf8').split(redis.SEP)[1] for k in keys]) diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/storage/incoming/s3.py index ed3f4caf..862a1f88 100644 --- a/gnocchi/storage/incoming/s3.py +++ b/gnocchi/storage/incoming/s3.py @@ -20,7 +20,6 @@ import uuid import six - from gnocchi.storage.common import s3 from gnocchi.storage.incoming import _carbonara @@ -80,7 +79,7 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): return (len(metric_details), sum(metric_details.values()), metric_details if details else None) - def list_metric_with_measures_to_process(self): + def list_metric_with_measures_to_process(self, sack): limit = 1000 # 1000 is the default anyway metrics = set() response = {} diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py index c5d47d18..6a5f7d7e 100644 --- a/gnocchi/storage/incoming/swift.py +++ b/gnocchi/storage/incoming/swift.py @@ -58,7 +58,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): measures = int(headers.get('x-container-object-count')) return nb_metrics, measures, metric_details if details else None - def list_metric_with_measures_to_process(self): + def list_metric_with_measures_to_process(self, sack): headers, files = self.swift.get_container(self.MEASURE_PREFIX, delimiter='/', full_listing=True) diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 59121cec..7d212073 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -34,6 +34,7 @@ from gnocchi.indexer import sqlalchemy from gnocchi.rest import app from gnocchi import service from gnocchi import storage +from gnocchi.tests import utils # NOTE(chdent): Hack to restore semblance of global configuration to @@ -179,9 +180,8 @@ class MetricdThread(threading.Thread): self.flag = True def run(self): - incoming = self.storage.incoming while self.flag: - metrics = incoming.list_metric_with_measures_to_process() + metrics = utils.list_all_incoming_metrics(self.storage.incoming) self.storage.process_background_tasks(self.index, metrics) time.sleep(0.1) diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 5fc0d084..d5d4e900 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -23,6 +23,7 @@ from gnocchi import aggregates from gnocchi.aggregates import moving_stats from gnocchi import storage from gnocchi.tests import base as tests_base +from gnocchi.tests import utils as tests_utils from gnocchi import utils @@ -60,7 +61,7 @@ class TestAggregates(tests_base.TestCase): for n, val in enumerate(data)] self.index.create_metric(metric.id, str(uuid.uuid4()), 'medium') self.storage.incoming.add_measures(metric, measures) - metrics = self.storage.incoming.list_metric_with_measures_to_process() + metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) self.storage.process_background_tasks(self.index, metrics, sync=True) return metric diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 296755f4..f5d979a6 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -36,6 +36,7 @@ from gnocchi import archive_policy from gnocchi import rest from gnocchi.rest import app from gnocchi.tests import base as tests_base +from gnocchi.tests import utils as tests_utils from gnocchi import utils @@ -122,7 +123,7 @@ class TestingApp(webtest.TestApp): req.headers['X-User-Id'] = self.USER_ID req.headers['X-Project-Id'] = self.PROJECT_ID response = super(TestingApp, self).do_request(req, *args, **kwargs) - metrics = self.storage.incoming.list_metric_with_measures_to_process() + metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) self.storage.process_background_tasks(self.indexer, metrics, sync=True) return response diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 6151b2fb..d6a2fde1 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -27,6 +27,7 @@ from gnocchi import indexer from gnocchi import storage from gnocchi.storage import _carbonara from gnocchi.tests import base as tests_base +from gnocchi.tests import utils as tests_utils from gnocchi import utils @@ -97,15 +98,15 @@ class TestStorageDriver(tests_base.TestCase): self.assertIn((utils.datetime_utc(2014, 1, 1, 12), 300.0, 5.0), m) def test_list_metric_with_measures_to_process(self): - metrics = self.storage.incoming.list_metric_with_measures_to_process() + metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) self.assertEqual(set(), metrics) self.storage.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) - metrics = self.storage.incoming.list_metric_with_measures_to_process() + metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) self.assertEqual(set([str(self.metric.id)]), metrics) self.trigger_processing() - metrics = self.storage.incoming.list_metric_with_measures_to_process() + metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) self.assertEqual(set([]), metrics) def test_delete_nonempty_metric(self): diff --git a/gnocchi/tests/utils.py b/gnocchi/tests/utils.py new file mode 100644 index 00000000..e9b0b339 --- /dev/null +++ b/gnocchi/tests/utils.py @@ -0,0 +1,19 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import six + + +def list_all_incoming_metrics(incoming): + return set.union(*[incoming.list_metric_with_measures_to_process(i) + for i in six.moves.range(incoming.NUM_SACKS)]) -- GitLab From bfac71ad35c752b9049334c1a7d4dbec42415c24 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 10 May 2017 20:33:11 +0000 Subject: [PATCH 0722/1483] add note to clear backlog before upgrade upgrade is a single process, even with threads, it will be slow as hell to upgrade unprocessed measures versus just clearing it with tens or hundreds of metricd Change-Id: Ife3b58105d7cd17a824119f7ef3c369ad1b419a0 --- doc/source/install.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/source/install.rst b/doc/source/install.rst index 5ed1b35b..897107a1 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -154,6 +154,11 @@ that your indexer and storage are properly upgraded. Run the following: 2. Stop the old version of `gnocchi-metricd` daemon +.. note:: + + Data in backlog is never migrated between versions. Ensure the backlog is + empty before any upgrade to ensure data is not lost. + 3. Install the new version of Gnocchi 4. Run `gnocchi-upgrade` -- GitLab From 287117bf7adc2fe9723bd64202c8ad9fd2c6e5a1 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 4 Apr 2017 15:54:14 +0000 Subject: [PATCH 0723/1483] redis: push to multiple sacks enable redis to push to multiple sacks and handle them accordingly Change-Id: I84c046b80fa46d172dfee69dc4987e8b5a860a1f --- gnocchi/storage/incoming/redis.py | 13 ++++++------- gnocchi/tests/base.py | 3 ++- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py index 24cde101..f4f70a11 100644 --- a/gnocchi/storage/incoming/redis.py +++ b/gnocchi/storage/incoming/redis.py @@ -24,21 +24,21 @@ from gnocchi.storage.incoming import _carbonara class RedisStorage(_carbonara.CarbonaraBasedStorage): - STORAGE_PREFIX = "incoming" - def __init__(self, conf): super(RedisStorage, self).__init__(conf) self._client = redis.get_client(conf) def _build_measure_path(self, metric_id): - return redis.SEP.join([self.STORAGE_PREFIX, six.text_type(metric_id)]) + return redis.SEP.join([ + self.get_sack_name(self.sack_for_metric(metric_id)), + six.text_type(metric_id)]) def _store_new_measures(self, metric, data): path = self._build_measure_path(metric.id) self._client.rpush(path, data) def _build_report(self, details): - match = redis.SEP.join([self.STORAGE_PREFIX, "*"]) + match = redis.SEP.join([self.get_sack_name("*"), "*"]) metric_details = collections.defaultdict(int) for key in self._client.scan_iter(match=match, count=1000): metric = key.decode('utf8').split(redis.SEP)[1] @@ -47,10 +47,9 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): metric_details if details else None) def list_metric_with_measures_to_process(self, sack): - match = redis.SEP.join([self.STORAGE_PREFIX, "*"]) + match = redis.SEP.join([self.get_sack_name(sack), "*"]) keys = self._client.scan_iter(match=match, count=1000) - measures = set([k.decode('utf8').split(redis.SEP)[1] for k in keys]) - return measures + return set([k.decode('utf8').split(redis.SEP)[1] for k in keys]) def delete_unprocessed_measures_for_metric_id(self, metric_id): self._client.delete(self._build_measure_path(metric_id)) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 2db402f1..9cc68ddf 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -325,7 +325,8 @@ class TestCase(base.BaseTestCase): if self.conf.storage.driver == 'redis': # Create one prefix per test self.storage.STORAGE_PREFIX = str(uuid.uuid4()) - self.storage.incoming.STORAGE_PREFIX = str(uuid.uuid4()) + self.storage.incoming.SACK_PREFIX = ( + str(uuid.uuid4()) + self.storage.incoming.SACK_PREFIX) self.storage.upgrade(self.index) -- GitLab From 462f7079b669524cb2eaf04ee19840aac091f05f Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 4 Apr 2017 16:09:56 +0000 Subject: [PATCH 0724/1483] ceph: push to multiple sacks enable ceph to push to multiple sacks and handle them accordingly Change-Id: I22bb2840bff9f2a8c571b87c4e2038490c0c3493 --- gnocchi/storage/incoming/ceph.py | 86 ++++++++++++++++++-------------- 1 file changed, 48 insertions(+), 38 deletions(-) diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index 8c5fe7be..906a2b9a 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -26,6 +26,9 @@ rados = ceph.rados class CephStorage(_carbonara.CarbonaraBasedStorage): + + Q_LIMIT = 1000 + def __init__(self, conf): super(CephStorage, self).__init__(conf) self.rados, self.ioctx = ceph.create_rados_connection(conf) @@ -55,59 +58,62 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): super(CephStorage, self).stop() def add_measures_batch(self, metrics_and_measures): - names = [] + names_by_sack = defaultdict(list) for metric, measures in six.iteritems(metrics_and_measures): name = "_".join(( self.MEASURE_PREFIX, str(metric.id), str(uuid.uuid4()), datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S"))) - names.append(name) + sack = self.get_sack_name(self.sack_for_metric(metric.id)) + names_by_sack[sack].append(name) data = self._encode_measures(measures) self.ioctx.write_full(name, data) - with rados.WriteOpCtx() as op: - # NOTE(sileht): list all objects in a pool is too slow with - # many objects (2min for 20000 objects in 50osds cluster), - # and enforce us to iterrate over all objects - # So we create an object MEASURE_PREFIX, that have as - # omap the list of objects to process (not xattr because - # it doesn't # allow to configure the locking behavior) - self.ioctx.set_omap(op, tuple(names), (b"",) * len(names)) - self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, - flags=self.OMAP_WRITE_FLAGS) + for sack, names in names_by_sack.items(): + with rados.WriteOpCtx() as op: + # NOTE(sileht): list all objects in a pool is too slow with + # many objects (2min for 20000 objects in 50osds cluster), + # and enforce us to iterrate over all objects + # So we create an object MEASURE_PREFIX, that have as + # omap the list of objects to process (not xattr because + # it doesn't # allow to configure the locking behavior) + self.ioctx.set_omap(op, tuple(names), (b"",) * len(names)) + self.ioctx.operate_write_op(op, sack, + flags=self.OMAP_WRITE_FLAGS) def _build_report(self, details): - LIMIT = 1000 metrics = set() count = 0 metric_details = defaultdict(int) - marker = "" - while True: - names = list(self._list_object_names_to_process(marker=marker, - limit=LIMIT)) - if names and names[0] < marker: - raise _carbonara.ReportGenerationError("Unable to cleanly " - "compute backlog.") - for name in names: - count += 1 - metric = name.split("_")[1] - metrics.add(metric) - if details: - metric_details[metric] += 1 - if len(names) < LIMIT: - break - else: - marker = name + for i in six.moves.range(self.NUM_SACKS): + marker = "" + while True: + names = list(self._list_object_names_to_process( + i, marker=marker, limit=self.Q_LIMIT)) + if names and names[0] < marker: + raise _carbonara.ReportGenerationError("Unable to cleanly " + "compute backlog.") + for name in names: + count += 1 + metric = name.split("_")[1] + metrics.add(metric) + if details: + metric_details[metric] += 1 + if len(names) < self.Q_LIMIT: + break + else: + marker = name return len(metrics), count, metric_details if details else None - def _list_object_names_to_process(self, prefix="", marker="", limit=-1): + def _list_object_names_to_process(self, sack, prefix="", marker="", + limit=-1): with rados.ReadOpCtx() as op: omaps, ret = self.ioctx.get_omap_vals(op, marker, prefix, limit) try: self.ioctx.operate_read_op( - op, self.MEASURE_PREFIX, flag=self.OMAP_READ_FLAGS) + op, self.get_sack_name(sack), flag=self.OMAP_READ_FLAGS) except rados.ObjectNotFound: # API have still written nothing return () @@ -129,17 +135,19 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): marker = "" while True: obj_names = list(self._list_object_names_to_process( - marker=marker, limit=1000)) + sack, marker=marker, limit=self.Q_LIMIT)) names.update(name.split("_")[1] for name in obj_names) - if len(obj_names) < 1000: + if len(obj_names) < self.Q_LIMIT: break else: marker = obj_names[-1] return names def delete_unprocessed_measures_for_metric_id(self, metric_id): + sack = self.sack_for_metric(metric_id) object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) - object_names = tuple(self._list_object_names_to_process(object_prefix)) + object_names = tuple(self._list_object_names_to_process( + sack, object_prefix)) if not object_names: return @@ -152,13 +160,15 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): # NOTE(sileht): come on Ceph, no return code # for this operation ?!! self.ioctx.remove_omap_keys(op, object_names) - self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, + self.ioctx.operate_write_op(op, self.get_sack_name(sack), flags=self.OMAP_WRITE_FLAGS) @contextlib.contextmanager def process_measure_for_metric(self, metric): + sack = self.sack_for_metric(metric.id) object_prefix = self.MEASURE_PREFIX + "_" + str(metric.id) - object_names = tuple(self._list_object_names_to_process(object_prefix)) + object_names = tuple(self._list_object_names_to_process( + sack, object_prefix)) measures = [] ops = [] @@ -212,5 +222,5 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): # NOTE(sileht): come on Ceph, no return code # for this operation ?!! self.ioctx.remove_omap_keys(op, object_names) - self.ioctx.operate_write_op(op, self.MEASURE_PREFIX, + self.ioctx.operate_write_op(op, self.get_sack_name(sack), flags=self.OMAP_WRITE_FLAGS) -- GitLab From d5059fbef766f01eb8b157e42c5656ca1009d566 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 5 Apr 2017 14:42:51 +0000 Subject: [PATCH 0725/1483] file: push to multiple sacks enable file to push to multiple sacks and handle them accordingly Change-Id: If7a2ff8512df428cf8fe9c5084b7695a1e8e2a11 --- gnocchi/storage/incoming/file.py | 36 +++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/gnocchi/storage/incoming/file.py b/gnocchi/storage/incoming/file.py index 35d3b912..490f7c17 100644 --- a/gnocchi/storage/incoming/file.py +++ b/gnocchi/storage/incoming/file.py @@ -29,14 +29,22 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): super(FileStorage, self).__init__(conf) self.basepath = conf.file_basepath self.basepath_tmp = os.path.join(self.basepath, 'tmp') - self.measure_path = os.path.join(self.basepath, 'measure') - def upgrade(self, indexer): - super(FileStorage, self).upgrade(indexer) - utils.ensure_paths([self.basepath_tmp, self.measure_path]) + def upgrade(self, index): + super(FileStorage, self).upgrade(index) + utils.ensure_paths([self._sack_path(i) + for i in six.moves.range(self.NUM_SACKS)]) + utils.ensure_paths([self.basepath_tmp]) + + def _sack_path(self, sack): + return os.path.join(self.basepath, self.get_sack_name(sack)) + + def _measure_path(self, sack, metric_id): + return os.path.join(self._sack_path(sack), six.text_type(metric_id)) def _build_measure_path(self, metric_id, random_id=None): - path = os.path.join(self.measure_path, six.text_type(metric_id)) + sack = self.sack_for_metric(metric_id) + path = self._measure_path(sack, metric_id) if random_id: if random_id is True: now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") @@ -69,18 +77,26 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): def _build_report(self, details): metric_details = {} - for metric in os.listdir(self.measure_path): - metric_details[metric] = len( - self._list_measures_container_for_metric_id(metric)) + for i in six.moves.range(self.NUM_SACKS): + for metric in self.list_metric_with_measures_to_process(i): + metric_details[metric] = len( + self._list_measures_container_for_metric_id_str(i, metric)) return (len(metric_details.keys()), sum(metric_details.values()), metric_details if details else None) def list_metric_with_measures_to_process(self, sack): - return set(os.listdir(self.measure_path)) + return set(self._list_target(self._sack_path(sack))) + + def _list_measures_container_for_metric_id_str(self, sack, metric_id): + return self._list_target(self._measure_path(sack, metric_id)) def _list_measures_container_for_metric_id(self, metric_id): + return self._list_target(self._build_measure_path(metric_id)) + + @staticmethod + def _list_target(target): try: - return os.listdir(self._build_measure_path(metric_id)) + return os.listdir(target) except OSError as e: # Some other process treated this one, then do nothing if e.errno == errno.ENOENT: -- GitLab From 85b41d699a676f3550e2158e6fefbf20451ed910 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 5 Apr 2017 15:04:21 +0000 Subject: [PATCH 0726/1483] swift: push to multiple sacks enable swift to push to multiple sacks and handle them accordingly Change-Id: I0550961960805505d85919f30bfa1677dc390345 --- gnocchi/storage/incoming/swift.py | 62 ++++++++++++++++--------------- 1 file changed, 33 insertions(+), 29 deletions(-) diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py index 6a5f7d7e..e344da1d 100644 --- a/gnocchi/storage/incoming/swift.py +++ b/gnocchi/storage/incoming/swift.py @@ -30,61 +30,65 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): super(SwiftStorage, self).__init__(conf) self.swift = swift.get_connection(conf) - def upgrade(self, indexer): - super(SwiftStorage, self).upgrade(indexer) - self.swift.put_container(self.MEASURE_PREFIX) + def upgrade(self, index): + super(SwiftStorage, self).upgrade(index) + for i in six.moves.range(self.NUM_SACKS): + self.swift.put_container(self.get_sack_name(i)) def _store_new_measures(self, metric, data): now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") self.swift.put_object( - self.MEASURE_PREFIX, + self.get_sack_name(self.sack_for_metric(metric.id)), six.text_type(metric.id) + "/" + six.text_type(uuid.uuid4()) + now, data) def _build_report(self, details): metric_details = defaultdict(int) - if details: - headers, files = self.swift.get_container(self.MEASURE_PREFIX, - full_listing=True) - for f in files: - metric, __ = f['name'].split("/", 1) - metric_details[metric] += 1 - nb_metrics = len(metric_details) - else: - headers, files = self.swift.get_container(self.MEASURE_PREFIX, - delimiter='/', - full_listing=True) - nb_metrics = len(files) - measures = int(headers.get('x-container-object-count')) - return nb_metrics, measures, metric_details if details else None + nb_metrics = 0 + measures = 0 + for i in six.moves.range(self.NUM_SACKS): + if details: + headers, files = self.swift.get_container( + self.get_sack_name(i), full_listing=True) + for f in files: + metric, __ = f['name'].split("/", 1) + metric_details[metric] += 1 + else: + headers, files = self.swift.get_container( + self.get_sack_name(i), delimiter='/', full_listing=True) + nb_metrics += len(files) + measures += int(headers.get('x-container-object-count')) + return (nb_metrics or len(metric_details), measures, + metric_details if details else None) def list_metric_with_measures_to_process(self, sack): - headers, files = self.swift.get_container(self.MEASURE_PREFIX, - delimiter='/', - full_listing=True) + headers, files = self.swift.get_container( + self.get_sack_name(sack), delimiter='/', full_listing=True) return set(f['subdir'][:-1] for f in files if 'subdir' in f) - def _list_measure_files_for_metric_id(self, metric_id): + def _list_measure_files_for_metric_id(self, sack, metric_id): headers, files = self.swift.get_container( - self.MEASURE_PREFIX, path=six.text_type(metric_id), + self.get_sack_name(sack), path=six.text_type(metric_id), full_listing=True) return files def delete_unprocessed_measures_for_metric_id(self, metric_id): - files = self._list_measure_files_for_metric_id(metric_id) - swift.bulk_delete(self.swift, self.MEASURE_PREFIX, files) + sack = self.sack_for_metric(metric_id) + files = self._list_measure_files_for_metric_id(sack, metric_id) + swift.bulk_delete(self.swift, self.get_sack_name(sack), files) @contextlib.contextmanager def process_measure_for_metric(self, metric): - files = self._list_measure_files_for_metric_id(metric.id) + sack = self.sack_for_metric(metric.id) + sack_name = self.get_sack_name(sack) + files = self._list_measure_files_for_metric_id(sack, metric.id) measures = [] for f in files: - headers, data = self.swift.get_object( - self.MEASURE_PREFIX, f['name']) + headers, data = self.swift.get_object(sack_name, f['name']) measures.extend(self._unserialize_measures(f['name'], data)) yield measures # Now clean objects - swift.bulk_delete(self.swift, self.MEASURE_PREFIX, files) + swift.bulk_delete(self.swift, sack_name, files) -- GitLab From 25cc9d7a03e81199379c900b85d16b347a8f2387 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 5 Apr 2017 16:10:23 +0000 Subject: [PATCH 0727/1483] s3: push to multiple sacks enable s3 to push to multiple sacks and handle them accordingly Change-Id: If68b4ef0c172f824b86b120092feb936a1c2fda2 --- gnocchi/storage/incoming/s3.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/storage/incoming/s3.py index 862a1f88..bd4fb467 100644 --- a/gnocchi/storage/incoming/s3.py +++ b/gnocchi/storage/incoming/s3.py @@ -29,6 +29,9 @@ botocore = s3.botocore class S3Storage(_carbonara.CarbonaraBasedStorage): + # NOTE(gordc): override to follow s3 partitioning logic + SACK_PREFIX = '%s/' + def __init__(self, conf): super(S3Storage, self).__init__(conf) self.s3, self._region_name, self._bucket_prefix = ( @@ -54,10 +57,9 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") self.s3.put_object( Bucket=self._bucket_name_measures, - Key=(six.text_type(metric.id) - + "/" - + six.text_type(uuid.uuid4()) - + now), + Key=(self.get_sack_name(self.sack_for_metric(metric.id)) + + six.text_type(metric.id) + "/" + + six.text_type(uuid.uuid4()) + now), Body=data) def _build_report(self, details): @@ -73,8 +75,9 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): response = self.s3.list_objects_v2( Bucket=self._bucket_name_measures, **kwargs) + # FIXME(gordc): this can be streamlined if not details for c in response.get('Contents', ()): - metric, metric_file = c['Key'].split("/", 1) + __, metric, metric_file = c['Key'].split("/", 2) metric_details[metric] += 1 return (len(metric_details), sum(metric_details.values()), metric_details if details else None) @@ -93,14 +96,15 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): kwargs = {} response = self.s3.list_objects_v2( Bucket=self._bucket_name_measures, + Prefix=self.get_sack_name(sack), Delimiter="/", MaxKeys=limit, **kwargs) for p in response.get('CommonPrefixes', ()): - metrics.add(p['Prefix'].rstrip('/')) + metrics.add(p['Prefix'].split('/', 2)[1]) return metrics - def _list_measure_files_for_metric_id(self, metric_id): + def _list_measure_files_for_metric_id(self, sack, metric_id): files = set() response = {} while response.get('IsTruncated', True): @@ -112,7 +116,8 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): kwargs = {} response = self.s3.list_objects_v2( Bucket=self._bucket_name_measures, - Prefix=six.text_type(metric_id) + "/", + Prefix=(self.get_sack_name(sack) + + six.text_type(metric_id) + "/"), **kwargs) for c in response.get('Contents', ()): @@ -121,12 +126,14 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): return files def delete_unprocessed_measures_for_metric_id(self, metric_id): - files = self._list_measure_files_for_metric_id(metric_id) + sack = self.sack_for_metric(metric_id) + files = self._list_measure_files_for_metric_id(sack, metric_id) s3.bulk_delete(self.s3, self._bucket_name_measures, files) @contextlib.contextmanager def process_measure_for_metric(self, metric): - files = self._list_measure_files_for_metric_id(metric.id) + sack = self.sack_for_metric(metric.id) + files = self._list_measure_files_for_metric_id(sack, metric.id) measures = [] for f in files: -- GitLab From f8b18df786725a2527210e98149b241c0f188a02 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 13 May 2017 13:11:34 +0200 Subject: [PATCH 0728/1483] doc: fix typo and URL in collectd page Change-Id: I9743f7f5f7e61173416f29478178567393f38355 --- doc/source/collectd.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/collectd.rst b/doc/source/collectd.rst index 835df4ba..0b91b448 100644 --- a/doc/source/collectd.rst +++ b/doc/source/collectd.rst @@ -3,7 +3,7 @@ ================== `Collectd`_ can use Gnocchi to store its data through a plugin called -`collectd-gnocchi`. It can be installed with _pip_:: +`collectd-gnocchi`. It can be installed with *pip*:: pip install collectd-gnocchi @@ -11,4 +11,4 @@ .. _`Collectd`: https://www.collectd.org/ -.. _`Sources and documentation`: https://github.com/jd/collectd-gnocchi +.. _`Sources and documentation`: https://github.com/gnocchixyz/collectd-gnocchi -- GitLab From e82460117a97c520d410093644c03bbc22c5d5a2 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 19 Apr 2017 13:46:47 +0000 Subject: [PATCH 0729/1483] implement hashring partitioning support hashring partitioning if user wants to reduce potential locking load by sacrificing potential throughput. if hashring is not supported or does not assign jobs, we just default to entire set of sacks. if fails to set up partitioning, try again. Change-Id: I1439fb3cdb171ce57ce7887857aa4789fe8f0d9c --- gnocchi/cli.py | 49 ++++++++++++++++++++++++++++++++++++++++++++++++- gnocchi/opts.py | 7 +++++++ 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 853153dd..8d3b9549 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -19,10 +19,13 @@ import time import cotyledon from cotyledon import oslo_config_glue +from futurist import periodics from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six +import tenacity +import tooz from gnocchi import archive_policy from gnocchi import genconfig @@ -139,12 +142,56 @@ class MetricReporting(MetricProcessBase): class MetricProcessor(MetricProcessBase): name = "processing" + GROUP_ID = "gnocchi-processing" def __init__(self, worker_id, conf): super(MetricProcessor, self).__init__( worker_id, conf, conf.metricd.metric_processing_delay) self._coord, self._my_id = utils.get_coordinator_and_start( conf.storage.coordination_url) + self._tasks = [] + self.group_state = None + + @utils.retry + def _configure(self): + super(MetricProcessor, self)._configure() + # create fallback in case paritioning fails or assigned no tasks + self.fallback_tasks = list( + six.moves.range(self.store.incoming.NUM_SACKS)) + try: + self.partitioner = self._coord.join_partitioned_group( + self.GROUP_ID, partitions=200) + LOG.info('Joined coordination group: %s', self.GROUP_ID) + + @periodics.periodic(spacing=self.conf.metricd.worker_sync_rate, + run_immediately=True) + def run_watchers(): + self._coord.run_watchers() + + self.periodic = periodics.PeriodicWorker.create([]) + self.periodic.add(run_watchers) + t = threading.Thread(target=self.periodic.start) + t.daemon = True + t.start() + except NotImplementedError: + LOG.warning('Coordinator does not support partitioning. Worker ' + 'will battle against other workers for jobs.') + except tooz.ToozError as e: + LOG.error('Unexpected error configuring coordinator for ' + 'partitioning. Retrying: %s', e) + raise tenacity.TryAgain(e) + + def _get_tasks(self): + try: + if (not self._tasks or + self.group_state != self.partitioner.ring.nodes): + self.group_state = self.partitioner.ring.nodes.copy() + # TODO(gordc): make replicas configurable + self._tasks = [ + i for i in six.moves.range(self.store.incoming.NUM_SACKS) + if self.partitioner.belongs_to_self(i, replicas=3)] + finally: + return self._tasks or self.fallback_tasks def _sack_lock(self, sack): lock_name = b'gnocchi-sack-%s-lock' % str(sack).encode('ascii') @@ -154,7 +201,7 @@ class MetricProcessor(MetricProcessBase): m_count = 0 s_count = 0 in_store = self.store.incoming - for s in six.moves.range(in_store.NUM_SACKS): + for s in self._get_tasks(): # TODO(gordc): support delay release lock so we don't # process a sack right after another process lock = self._sack_lock(s) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 604cebcc..ca3a7be4 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -85,6 +85,13 @@ def list_opts(): required=True, help="How many seconds to wait between " "cleaning of expired data"), + cfg.IntOpt('worker_sync_rate', + default=30, + help="Frequency to detect when metricd workers join or " + "leave system (in seconds). A shorter rate, may " + "improve rebalancing but create more coordination " + "load"), + )), ("api", ( cfg.StrOpt('paste_config', -- GitLab From b21b878f36f3c5c8e6b037b55c8be95810bd4388 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 19 Apr 2017 14:28:45 +0000 Subject: [PATCH 0730/1483] configurable hashring replicas as a trade off between less load on coordination backend vs higher worker utilisation. Change-Id: I679e79edf857be3660a80e54e039ded60e8431b5 --- gnocchi/cli.py | 4 ++-- gnocchi/opts.py | 8 +++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 8d3b9549..c13aca83 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -186,10 +186,10 @@ class MetricProcessor(MetricProcessBase): if (not self._tasks or self.group_state != self.partitioner.ring.nodes): self.group_state = self.partitioner.ring.nodes.copy() - # TODO(gordc): make replicas configurable self._tasks = [ i for i in six.moves.range(self.store.incoming.NUM_SACKS) - if self.partitioner.belongs_to_self(i, replicas=3)] + if self.partitioner.belongs_to_self( + i, replicas=self.conf.metricd.processing_replicas)] finally: return self._tasks or self.fallback_tasks diff --git a/gnocchi/opts.py b/gnocchi/opts.py index ca3a7be4..f1142fd4 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -91,7 +91,13 @@ def list_opts(): "leave system (in seconds). A shorter rate, may " "improve rebalancing but create more coordination " "load"), - + cfg.IntOpt('processing_replicas', + default=3, + min=1, + help="Number of workers that share a task. A higher " + "value may improve worker utilization but may also " + "increase load on coordination backend. Value is " + "capped by number of workers globally."), )), ("api", ( cfg.StrOpt('paste_config', -- GitLab From 1bacea94162ea1d72dd87e17554af037524abd96 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 20 Apr 2017 21:14:14 +0000 Subject: [PATCH 0731/1483] configurable sacks framework this adds framework to configure sack size without corruption. - default to 128 sacks - add note on how to calculate how many sacks to set actual ability to change value done in subsequent patch. Change-Id: I389c1a7ca9b3fe39b3716782e85073796ad26333 --- doc/source/running.rst | 29 +++++++++++++++++++++ gnocchi/cli.py | 5 +++- gnocchi/storage/__init__.py | 4 +-- gnocchi/storage/incoming/_carbonara.py | 35 +++++++++++++++++++++++--- gnocchi/storage/incoming/ceph.py | 12 +++++++++ gnocchi/storage/incoming/file.py | 22 +++++++++++++--- gnocchi/storage/incoming/redis.py | 6 +++++ gnocchi/storage/incoming/s3.py | 33 ++++++++++++++++++------ gnocchi/storage/incoming/swift.py | 15 +++++++++-- gnocchi/storage/s3.py | 4 +-- gnocchi/tests/base.py | 5 ++-- gnocchi/tests/functional/fixtures.py | 2 +- 12 files changed, 148 insertions(+), 24 deletions(-) diff --git a/doc/source/running.rst b/doc/source/running.rst index 0cbdbb13..f56f6541 100644 --- a/doc/source/running.rst +++ b/doc/source/running.rst @@ -133,6 +133,35 @@ process is continuously increasing, you will need to (maybe temporarily) increase the number of `gnocchi-metricd` daemons. You can run any number of metricd daemon on any number of servers. +How to scale measure processing +=============================== + +Measurement data pushed to Gnocchi is divided into sacks for better +distribution. The number of partitions is controlled by the `sacks` option +under the `[incoming]` section. This value should be set based on the +number of active metrics the system will capture. Additionally, the number of +`sacks`, should be higher than the total number of active metricd workers. +distribution. Incoming metrics are pushed to specific sacks and each sack +is assigned to one or more `gnocchi-metricd` daemons for processing. + +How many sacks do we need to create +----------------------------------- + +This number of sacks enabled should be set based on the number of active +metrics the system will capture. Additionally, the number of sacks, should +be higher than the total number of active `gnocchi-metricd` workers. + +In general, use the following equation to determine the appropriate `sacks` +value to set: + +.. math:: + + sacks value = number of **active** metrics / 300 + +If the estimated number of metrics is the absolute maximum, divide the value +by 500 instead. If the estimated number of active metrics is conservative and +expected to grow, divide the value by 100 instead to accommodate growth. + How to monitor Gnocchi ====================== diff --git a/gnocchi/cli.py b/gnocchi/cli.py index c13aca83..ef47dcb9 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -53,6 +53,9 @@ def upgrade(): help="Skip storage upgrade."), cfg.BoolOpt("skip-archive-policies-creation", default=False, help="Skip default archive policies creation."), + cfg.IntOpt("num-storage-sacks", default=128, + help="Initial number of storage sacks to create."), + ]) conf = service.prepare_service(conf=conf) index = indexer.get_driver(conf) @@ -63,7 +66,7 @@ def upgrade(): if not conf.skip_storage: s = storage.get_driver(conf) LOG.info("Upgrading storage %s", s) - s.upgrade(index) + s.upgrade(index, conf.num_storage_sacks) if (not conf.skip_archive_policies_creation and not index.list_archive_policies() diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 3a7a5e06..d06a47cf 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -162,8 +162,8 @@ class StorageDriver(object): def stop(): pass - def upgrade(self, index): - self.incoming.upgrade(index) + def upgrade(self, index, num_sacks): + self.incoming.upgrade(index, num_sacks) def process_background_tasks(self, index, metrics, sync=False): """Process background tasks for this storage. diff --git a/gnocchi/storage/incoming/_carbonara.py b/gnocchi/storage/incoming/_carbonara.py index c17ae852..f9a9f47a 100644 --- a/gnocchi/storage/incoming/_carbonara.py +++ b/gnocchi/storage/incoming/_carbonara.py @@ -32,11 +32,40 @@ _NUM_WORKERS = utils.get_default_workers() class CarbonaraBasedStorage(incoming.StorageDriver): MEASURE_PREFIX = "measure" - SACK_PREFIX = "incoming-%s" + SACK_PREFIX = "incoming" + CFG_PREFIX = 'gnocchi-config' + CFG_SACKS = 'sacks' _MEASURE_SERIAL_FORMAT = "Qd" _MEASURE_SERIAL_LEN = struct.calcsize(_MEASURE_SERIAL_FORMAT) - NUM_SACKS = 8 + @property + def NUM_SACKS(self): + if not hasattr(self, '_num_sacks'): + try: + self._num_sacks = int(self.get_storage_sacks()) + except Exception as e: + LOG.error('Unable to detect the number of storage sacks. ' + 'Ensure gnocchi-upgrade has been executed: %s', e) + raise + return self._num_sacks + + def get_sack_prefix(self, num_sacks=None): + sacks = num_sacks if num_sacks else self.NUM_SACKS + return self.SACK_PREFIX + str(sacks) + '-%s' + + def upgrade(self, index, num_sacks): + super(CarbonaraBasedStorage, self).upgrade(index) + if not self.get_storage_sacks(): + self.set_storage_settings(num_sacks) + + @staticmethod + def get_storage_sacks(): + """Return the number of sacks in storage. None if not set.""" + raise NotImplementedError + + @staticmethod + def set_storage_settings(num_sacks): + raise NotImplementedError def _unserialize_measures(self, measure_id, data): nb_measures = len(data) // self._MEASURE_SERIAL_LEN @@ -93,4 +122,4 @@ class CarbonaraBasedStorage(incoming.StorageDriver): return metric_id.int % self.NUM_SACKS def get_sack_name(self, sack): - return self.SACK_PREFIX % sack + return self.get_sack_prefix() % sack diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index 906a2b9a..e75fcfca 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -15,6 +15,7 @@ from collections import defaultdict import contextlib import datetime import functools +import json import uuid import six @@ -57,6 +58,17 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): ceph.close_rados_connection(self.rados, self.ioctx) super(CephStorage, self).stop() + def get_storage_sacks(self): + try: + return json.loads( + self.ioctx.read(self.CFG_PREFIX).decode())[self.CFG_SACKS] + except rados.ObjectNotFound: + return + + def set_storage_settings(self, num_sacks): + self.ioctx.write_full(self.CFG_PREFIX, + json.dumps({self.CFG_SACKS: num_sacks}).encode()) + def add_measures_batch(self, metrics_and_measures): names_by_sack = defaultdict(list) for metric, measures in six.iteritems(metrics_and_measures): diff --git a/gnocchi/storage/incoming/file.py b/gnocchi/storage/incoming/file.py index 490f7c17..cd32e67f 100644 --- a/gnocchi/storage/incoming/file.py +++ b/gnocchi/storage/incoming/file.py @@ -14,6 +14,7 @@ import contextlib import datetime import errno +import json import os import tempfile import uuid @@ -30,11 +31,26 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): self.basepath = conf.file_basepath self.basepath_tmp = os.path.join(self.basepath, 'tmp') - def upgrade(self, index): - super(FileStorage, self).upgrade(index) + def upgrade(self, index, num_sacks): + super(FileStorage, self).upgrade(index, num_sacks) + utils.ensure_paths([self.basepath_tmp]) + + def get_storage_sacks(self): + try: + with open(os.path.join(self.basepath_tmp, self.CFG_PREFIX), + 'r') as f: + return json.load(f)[self.CFG_SACKS] + except IOError as e: + if e.errno == errno.ENOENT: + return + raise + + def set_storage_settings(self, num_sacks): + data = {self.CFG_SACKS: num_sacks} + with open(os.path.join(self.basepath_tmp, self.CFG_PREFIX), 'w') as f: + json.dump(data, f) utils.ensure_paths([self._sack_path(i) for i in six.moves.range(self.NUM_SACKS)]) - utils.ensure_paths([self.basepath_tmp]) def _sack_path(self, sack): return os.path.join(self.basepath, self.get_sack_name(sack)) diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py index f4f70a11..24c74716 100644 --- a/gnocchi/storage/incoming/redis.py +++ b/gnocchi/storage/incoming/redis.py @@ -28,6 +28,12 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): super(RedisStorage, self).__init__(conf) self._client = redis.get_client(conf) + def get_storage_sacks(self): + return self._client.hget(self.CFG_PREFIX, self.CFG_SACKS) + + def set_storage_settings(self, num_sacks): + self._client.hset(self.CFG_PREFIX, self.CFG_SACKS, num_sacks) + def _build_measure_path(self, metric_id): return redis.SEP.join([ self.get_sack_name(self.sack_for_metric(metric_id)), diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/storage/incoming/s3.py index bd4fb467..378de439 100644 --- a/gnocchi/storage/incoming/s3.py +++ b/gnocchi/storage/incoming/s3.py @@ -16,6 +16,7 @@ from collections import defaultdict import contextlib import datetime +import json import uuid import six @@ -29,9 +30,6 @@ botocore = s3.botocore class S3Storage(_carbonara.CarbonaraBasedStorage): - # NOTE(gordc): override to follow s3 partitioning logic - SACK_PREFIX = '%s/' - def __init__(self, conf): super(S3Storage, self).__init__(conf) self.s3, self._region_name, self._bucket_prefix = ( @@ -42,8 +40,26 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): self._bucket_prefix + "-" + self.MEASURE_PREFIX ) - def upgrade(self, indexer): - super(S3Storage, self).upgrade(indexer) + def get_storage_sacks(self): + try: + response = self.s3.get_object(Bucket=self._bucket_name_measures, + Key=self.CFG_PREFIX) + return json.loads(response['Body'].read().decode())[self.CFG_SACKS] + except botocore.exceptions.ClientError as e: + if e.response['Error'].get('Code') == "NoSuchKey": + return + + def set_storage_settings(self, num_sacks): + data = {self.CFG_SACKS: num_sacks} + self.s3.put_object(Bucket=self._bucket_name_measures, + Key=self.CFG_PREFIX, + Body=json.dumps(data).encode()) + + def get_sack_prefix(self, num_sacks=None): + # NOTE(gordc): override to follow s3 partitioning logic + return '%s-' + ('%s/' % (num_sacks if num_sacks else self.NUM_SACKS)) + + def upgrade(self, indexer, num_sacks): try: s3.create_bucket(self.s3, self._bucket_name_measures, self._region_name) @@ -52,6 +68,8 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): "BucketAlreadyExists", "BucketAlreadyOwnedByYou" ): raise + # need to create bucket first to store storage settings object + super(S3Storage, self).upgrade(indexer, num_sacks) def _store_new_measures(self, metric, data): now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") @@ -77,8 +95,9 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): **kwargs) # FIXME(gordc): this can be streamlined if not details for c in response.get('Contents', ()): - __, metric, metric_file = c['Key'].split("/", 2) - metric_details[metric] += 1 + if c['Key'] != self.CFG_PREFIX: + __, metric, metric_file = c['Key'].split("/", 2) + metric_details[metric] += 1 return (len(metric_details), sum(metric_details.values()), metric_details if details else None) diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py index e344da1d..5370771e 100644 --- a/gnocchi/storage/incoming/swift.py +++ b/gnocchi/storage/incoming/swift.py @@ -14,6 +14,7 @@ from collections import defaultdict import contextlib import datetime +import json import uuid import six @@ -30,8 +31,18 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): super(SwiftStorage, self).__init__(conf) self.swift = swift.get_connection(conf) - def upgrade(self, index): - super(SwiftStorage, self).upgrade(index) + def get_storage_sacks(self): + try: + __, data = self.swift.get_object(self.CFG_PREFIX, self.CFG_PREFIX) + return json.loads(data)[self.CFG_SACKS] + except swclient.ClientException as e: + if e.http_status == 404: + return + + def set_storage_settings(self, num_sacks): + self.swift.put_container(self.CFG_PREFIX) + self.swift.put_object(self.CFG_PREFIX, self.CFG_PREFIX, + json.dumps({self.CFG_SACKS: num_sacks})) for i in six.moves.range(self.NUM_SACKS): self.swift.put_container(self.get_sack_name(i)) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 8de5507e..59c801de 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -75,8 +75,8 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): else: self._consistency_stop = None - def upgrade(self, index): - super(S3Storage, self).upgrade(index) + def upgrade(self, index, num_sacks): + super(S3Storage, self).upgrade(index, num_sacks) try: s3.create_bucket(self.s3, self._bucket_name, self._region_name) except botocore.exceptions.ClientError as e: diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 9cc68ddf..3f35b40c 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -325,10 +325,9 @@ class TestCase(base.BaseTestCase): if self.conf.storage.driver == 'redis': # Create one prefix per test self.storage.STORAGE_PREFIX = str(uuid.uuid4()) - self.storage.incoming.SACK_PREFIX = ( - str(uuid.uuid4()) + self.storage.incoming.SACK_PREFIX) + self.storage.incoming.SACK_PREFIX = str(uuid.uuid4()) - self.storage.upgrade(self.index) + self.storage.upgrade(self.index, 128) def tearDown(self): self.index.disconnect() diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 7d212073..90004194 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -134,7 +134,7 @@ class ConfigFixture(fixture.GabbiFixture): self.index = index s = storage.get_driver(conf) - s.upgrade(index) + s.upgrade(index, 128) LOAD_APP_KWARGS = { 'storage': s, -- GitLab From 5b83fe64ccf8da8a9bbd98fe25005039f088b920 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 25 Apr 2017 21:03:05 +0000 Subject: [PATCH 0732/1483] sack-changer cli support ability to change sacks via command line. will set new sack value and clear any old sacks (if applicable). backlog must be empty to change value. Change-Id: Icf74b081e4cfaaaa607a5b9c684cbad4b8ecc006 --- doc/source/running.rst | 33 ++++++++++++++++++++++++++ gnocchi/cli.py | 20 ++++++++++++++++ gnocchi/storage/incoming/_carbonara.py | 4 ++++ gnocchi/storage/incoming/ceph.py | 8 +++++++ gnocchi/storage/incoming/file.py | 6 +++++ gnocchi/storage/incoming/redis.py | 5 ++++ gnocchi/storage/incoming/s3.py | 5 ++++ gnocchi/storage/incoming/swift.py | 5 ++++ setup.cfg | 1 + 9 files changed, 87 insertions(+) diff --git a/doc/source/running.rst b/doc/source/running.rst index f56f6541..09d901e7 100644 --- a/doc/source/running.rst +++ b/doc/source/running.rst @@ -162,6 +162,39 @@ If the estimated number of metrics is the absolute maximum, divide the value by 500 instead. If the estimated number of active metrics is conservative and expected to grow, divide the value by 100 instead to accommodate growth. +How do we change sack size +-------------------------- + +In the event your system grows to capture signficantly more metrics than +originally anticipated, the number of sacks can be changed to maintain good +distribution. To avoid any loss of data when modifying `sacks` option. The +option should be changed in the following order:: + + 1. Stop all input services (api, statsd) + + 2. Stop all metricd services once backlog is cleared + + 3. Run gnocchi-change-sack-size to set new sack size. Note + that sack value can only be changed if the backlog is empty. + + 4. Restart all gnocchi services (api, statsd, metricd) with new configuration + +Alternatively, to minimise API downtime:: + + 1. Run gnocchi-upgrade but use a new incoming storage target such as a new + ceph pool, file path, etc... Additionally, set aggregate storage to a + new target as well. + + 2. Run gnocchi-change-sack-size against new target + + 3. Stop all input services (api, statsd) + + 4. Restart all input services but target newly created incoming storage + + 5. When done clearing backlog from original incoming storage, switch all + metricd datemons to target new incoming storage but maintain original + aggregate storage. + How to monitor Gnocchi ====================== diff --git a/gnocchi/cli.py b/gnocchi/cli.py index ef47dcb9..7c3f6fd5 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -76,6 +76,26 @@ def upgrade(): index.create_archive_policy_rule("default", "*", "low") +def change_sack_size(): + conf = cfg.ConfigOpts() + conf.register_cli_opts([ + cfg.IntOpt("sack_size", required=True, min=1, + help="Number of sacks."), + ]) + conf = service.prepare_service(conf=conf) + s = storage.get_driver(conf) + report = s.incoming.measures_report(details=False) + remainder = report['summary']['measures'] + if remainder: + LOG.error('Cannot change sack when non-empty backlog. Process ' + 'remaining %s measures and try again', remainder) + return + LOG.info("Changing sack size to: %s", conf.sack_size) + old_num_sacks = s.incoming.get_storage_sacks() + s.incoming.set_storage_settings(conf.sack_size) + s.incoming.remove_sack_group(old_num_sacks) + + def statsd(): statsd_service.start() diff --git a/gnocchi/storage/incoming/_carbonara.py b/gnocchi/storage/incoming/_carbonara.py index f9a9f47a..2fe9bbd5 100644 --- a/gnocchi/storage/incoming/_carbonara.py +++ b/gnocchi/storage/incoming/_carbonara.py @@ -67,6 +67,10 @@ class CarbonaraBasedStorage(incoming.StorageDriver): def set_storage_settings(num_sacks): raise NotImplementedError + @staticmethod + def remove_sack_group(num_sacks): + raise NotImplementedError + def _unserialize_measures(self, measure_id, data): nb_measures = len(data) // self._MEASURE_SERIAL_LEN try: diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index e75fcfca..0f6c970a 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -69,6 +69,14 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): self.ioctx.write_full(self.CFG_PREFIX, json.dumps({self.CFG_SACKS: num_sacks}).encode()) + def remove_sack_group(self, num_sacks): + prefix = self.get_sack_prefix(num_sacks) + for i in six.moves.xrange(num_sacks): + try: + self.ioctx.remove_object(prefix % i) + except rados.ObjectNotFound: + pass + def add_measures_batch(self, metrics_and_measures): names_by_sack = defaultdict(list) for metric, measures in six.iteritems(metrics_and_measures): diff --git a/gnocchi/storage/incoming/file.py b/gnocchi/storage/incoming/file.py index cd32e67f..070094e7 100644 --- a/gnocchi/storage/incoming/file.py +++ b/gnocchi/storage/incoming/file.py @@ -16,6 +16,7 @@ import datetime import errno import json import os +import shutil import tempfile import uuid @@ -52,6 +53,11 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): utils.ensure_paths([self._sack_path(i) for i in six.moves.range(self.NUM_SACKS)]) + def remove_sack_group(self, num_sacks): + prefix = self.get_sack_prefix(num_sacks) + for i in six.moves.xrange(num_sacks): + shutil.rmtree(os.path.join(self.basepath, prefix % i)) + def _sack_path(self, sack): return os.path.join(self.basepath, self.get_sack_name(sack)) diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py index 24c74716..fa5f1e88 100644 --- a/gnocchi/storage/incoming/redis.py +++ b/gnocchi/storage/incoming/redis.py @@ -34,6 +34,11 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): def set_storage_settings(self, num_sacks): self._client.hset(self.CFG_PREFIX, self.CFG_SACKS, num_sacks) + @staticmethod + def remove_sack_group(num_sacks): + # NOTE(gordc): redis doesn't maintain keys with empty values + pass + def _build_measure_path(self, metric_id): return redis.SEP.join([ self.get_sack_name(self.sack_for_metric(metric_id)), diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/storage/incoming/s3.py index 378de439..52016d11 100644 --- a/gnocchi/storage/incoming/s3.py +++ b/gnocchi/storage/incoming/s3.py @@ -59,6 +59,11 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): # NOTE(gordc): override to follow s3 partitioning logic return '%s-' + ('%s/' % (num_sacks if num_sacks else self.NUM_SACKS)) + @staticmethod + def remove_sack_group(num_sacks): + # nothing to cleanup since sacks are part of path + pass + def upgrade(self, indexer, num_sacks): try: s3.create_bucket(self.s3, self._bucket_name_measures, diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py index 5370771e..b0549d0f 100644 --- a/gnocchi/storage/incoming/swift.py +++ b/gnocchi/storage/incoming/swift.py @@ -46,6 +46,11 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): for i in six.moves.range(self.NUM_SACKS): self.swift.put_container(self.get_sack_name(i)) + def remove_sack_group(self, num_sacks): + prefix = self.get_sack_prefix(num_sacks) + for i in six.moves.xrange(num_sacks): + self.swift.delete_container(prefix % i) + def _store_new_measures(self, metric, data): now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") self.swift.put_object( diff --git a/setup.cfg b/setup.cfg index 386d90f6..a2f544a7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -133,6 +133,7 @@ gnocchi.rest.auth_helper = console_scripts = gnocchi-config-generator = gnocchi.cli:config_generator gnocchi-upgrade = gnocchi.cli:upgrade + gnocchi-change-sack-size = gnocchi.cli:change_sack_size gnocchi-statsd = gnocchi.cli:statsd gnocchi-metricd = gnocchi.cli:metricd -- GitLab From 3099bdbebf1c281c4a8063e6fa6312b993be0f4c Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 18 Apr 2017 14:11:54 +0000 Subject: [PATCH 0733/1483] don't lock on delete we don't need to lock metric on delete but rather we only need to check if sack is being processed. either: 1) sack is locked, so there's a chance that metric is being processed. therefore we skip. 2) sack is unlocked, so there's no chance that a concurrent process will start processing metric as the indexer already says it's already deleted and no other process can see it as not-deleted. Change-Id: I8df135621dfabc3d17733a3577d0ea60b30e83e4 --- gnocchi/cli.py | 6 +----- gnocchi/storage/_carbonara.py | 14 ++++++++------ gnocchi/storage/incoming/_carbonara.py | 5 +++++ 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 7c3f6fd5..4599c917 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -216,10 +216,6 @@ class MetricProcessor(MetricProcessBase): finally: return self._tasks or self.fallback_tasks - def _sack_lock(self, sack): - lock_name = b'gnocchi-sack-%s-lock' % str(sack).encode('ascii') - return self._coord.get_lock(lock_name) - def _run_job(self): m_count = 0 s_count = 0 @@ -227,7 +223,7 @@ class MetricProcessor(MetricProcessBase): for s in self._get_tasks(): # TODO(gordc): support delay release lock so we don't # process a sack right after another process - lock = self._sack_lock(s) + lock = in_store.get_sack_lock(self._coord, s) if not lock.acquire(blocking=False): continue try: diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 6f7c0060..1a6eac6c 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -339,14 +339,16 @@ class CarbonaraBasedStorage(storage.StorageDriver): def delete_metric(self, metric, sync=False): LOG.debug("Deleting metric %s", metric) - lock = self._lock(metric.id) + lock = self.incoming.get_sack_lock( + self.coord, self.incoming.sack_for_metric(metric.id)) if not lock.acquire(blocking=sync): raise storage.LockedMetric(metric) - try: - self._delete_metric(metric) - self.incoming.delete_unprocessed_measures_for_metric_id(metric.id) - finally: - lock.release() + # NOTE(gordc): no need to hold lock because the metric has been already + # marked as "deleted" in the indexer so no measure worker + # is going to process it anymore. + lock.release() + self._delete_metric(metric) + self.incoming.delete_unprocessed_measures_for_metric_id(metric.id) @staticmethod def _delete_metric_measures(metric, timestamp_key, diff --git a/gnocchi/storage/incoming/_carbonara.py b/gnocchi/storage/incoming/_carbonara.py index 2fe9bbd5..22805ad0 100644 --- a/gnocchi/storage/incoming/_carbonara.py +++ b/gnocchi/storage/incoming/_carbonara.py @@ -71,6 +71,11 @@ class CarbonaraBasedStorage(incoming.StorageDriver): def remove_sack_group(num_sacks): raise NotImplementedError + @staticmethod + def get_sack_lock(coord, sack): + lock_name = b'gnocchi-sack-%s-lock' % str(sack).encode('ascii') + return coord.get_lock(lock_name) + def _unserialize_measures(self, measure_id, data): nb_measures = len(data) // self._MEASURE_SERIAL_LEN try: -- GitLab From 5c18a8d4fe24ac223b45ac576fb3d4e4545c64b7 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 9 May 2017 17:57:24 -0400 Subject: [PATCH 0734/1483] drop metric lock lock sack when refresh param is given. check first if there's anything to be refreshed rather than blindly blocking. Change-Id: I945db03e80450d35877427fce9c12675891e016d --- gnocchi/opts.py | 4 ++++ gnocchi/rest/__init__.py | 25 +++++++++++++++------- gnocchi/storage/_carbonara.py | 29 +++++++++++++++----------- gnocchi/storage/incoming/_carbonara.py | 4 ++++ gnocchi/storage/incoming/ceph.py | 5 +++++ gnocchi/storage/incoming/file.py | 3 +++ gnocchi/storage/incoming/redis.py | 3 +++ gnocchi/storage/incoming/s3.py | 4 ++++ gnocchi/storage/incoming/swift.py | 4 ++++ 9 files changed, 62 insertions(+), 19 deletions(-) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index f1142fd4..023138da 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -114,6 +114,10 @@ def list_opts(): required=True, help=('The maximum number of items returned in a ' 'single response from a collection resource')), + cfg.IntOpt('refresh_timeout', + default=10, min=0, + help='Number of seconds before timeout when attempting ' + 'to force refresh of metric.'), )), ("storage", (_STORAGE_OPTS + gnocchi.storage._carbonara.OPTS)), ("incoming", _INCOMING_OPTS), diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index ec3207f5..8623b807 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -426,10 +426,14 @@ class MetricController(rest.RestController): except ValueError as e: abort(400, e) - if strtobool("refresh", refresh): - pecan.request.storage.process_new_measures( - pecan.request.indexer, [six.text_type(self.metric.id)], True) - + if (strtobool("refresh", refresh) and + pecan.request.storage.incoming.has_unprocessed(self.metric)): + try: + pecan.request.storage.refresh_metric( + pecan.request.indexer, self.metric, + pecan.request.conf.api.refresh_timeout) + except storage.SackLockTimeoutError as e: + abort(503, e) try: if aggregation in self.custom_agg: measures = self.custom_agg[aggregation].compute( @@ -1616,9 +1620,16 @@ class AggregationController(rest.RestController): try: if strtobool("refresh", refresh): - pecan.request.storage.process_new_measures( - pecan.request.indexer, - [six.text_type(m.id) for m in metrics], True) + store = pecan.request.storage + metrics_to_update = [ + m for m in metrics if store.incoming.has_unprocessed(m)] + for m in metrics_to_update: + try: + pecan.request.storage.refresh_metric( + pecan.request.indexer, m, + pecan.request.conf.api.refresh_timeout) + except storage.SackLockTimeoutError as e: + abort(503, e) if number_of_metrics == 1: # NOTE(sileht): don't do the aggregation if we only have one # metric diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 1a6eac6c..76f8b259 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -54,6 +54,10 @@ class CorruptionError(ValueError): super(CorruptionError, self).__init__(message) +class SackLockTimeoutError(Exception): + pass + + class CarbonaraBasedStorage(storage.StorageDriver): def __init__(self, conf, incoming): @@ -70,10 +74,6 @@ class CarbonaraBasedStorage(storage.StorageDriver): def stop(self): self.coord.stop() - def _lock(self, metric_id): - lock_name = b"gnocchi-" + str(metric_id).encode('ascii') - return self.coord.get_lock(lock_name) - @staticmethod def _get_measures(metric, timestamp_key, aggregation, granularity, version=3): @@ -355,18 +355,25 @@ class CarbonaraBasedStorage(storage.StorageDriver): aggregation, granularity, version=3): raise NotImplementedError + def refresh_metric(self, indexer, metric, timeout): + s = self.incoming.sack_for_metric(metric.id) + lock = self.incoming.get_sack_lock(self.coord, s) + if not lock.acquire(blocking=timeout): + raise SackLockTimeoutError( + 'Unable to refresh metric: %s. Metric is locked. ' + 'Please try again.' % metric.id) + try: + self.process_new_measures(indexer, [six.text_type(metric.id)]) + finally: + lock.release() + def process_new_measures(self, indexer, metrics_to_process, sync=False): # process only active metrics. deleted metrics with unprocessed # measures will be skipped until cleaned by janitor. metrics = indexer.list_metrics(ids=metrics_to_process) for metric in metrics: - lock = self._lock(metric.id) - # Do not block if we cannot acquire the lock, that means some other - # worker is doing the job. We'll just ignore this metric and may - # get back later to it if needed. - if not lock.acquire(blocking=sync): - continue + # NOTE(gordc): must lock at sack level try: locksw = timeutils.StopWatch().start() LOG.debug("Processing measures for %s", metric) @@ -381,8 +388,6 @@ class CarbonaraBasedStorage(storage.StorageDriver): if sync: raise LOG.error("Error processing new measures", exc_info=True) - finally: - lock.release() def _compute_and_store_timeseries(self, metric, measures): # NOTE(mnaser): The metric could have been handled by diff --git a/gnocchi/storage/incoming/_carbonara.py b/gnocchi/storage/incoming/_carbonara.py index 22805ad0..e20720d6 100644 --- a/gnocchi/storage/incoming/_carbonara.py +++ b/gnocchi/storage/incoming/_carbonara.py @@ -127,6 +127,10 @@ class CarbonaraBasedStorage(incoming.StorageDriver): def process_measure_for_metric(metric): raise NotImplementedError + @staticmethod + def has_unprocessed(metric): + raise NotImplementedError + def sack_for_metric(self, metric_id): return metric_id.int % self.NUM_SACKS diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index 0f6c970a..677c5233 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -183,6 +183,11 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): self.ioctx.operate_write_op(op, self.get_sack_name(sack), flags=self.OMAP_WRITE_FLAGS) + def has_unprocessed(self, metric): + sack = self.sack_for_metric(metric.id) + object_prefix = self.MEASURE_PREFIX + "_" + str(metric.id) + return bool(self._list_object_names_to_process(sack, object_prefix)) + @contextlib.contextmanager def process_measure_for_metric(self, metric): sack = self.sack_for_metric(metric.id) diff --git a/gnocchi/storage/incoming/file.py b/gnocchi/storage/incoming/file.py index 070094e7..781d3ec5 100644 --- a/gnocchi/storage/incoming/file.py +++ b/gnocchi/storage/incoming/file.py @@ -148,6 +148,9 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): files = self._list_measures_container_for_metric_id(metric_id) self._delete_measures_files_for_metric_id(metric_id, files) + def has_unprocessed(self, metric): + return os.path.isdir(self._build_measure_path(metric.id)) + @contextlib.contextmanager def process_measure_for_metric(self, metric): files = self._list_measures_container_for_metric_id(metric.id) diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py index fa5f1e88..9e81327c 100644 --- a/gnocchi/storage/incoming/redis.py +++ b/gnocchi/storage/incoming/redis.py @@ -65,6 +65,9 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): def delete_unprocessed_measures_for_metric_id(self, metric_id): self._client.delete(self._build_measure_path(metric_id)) + def has_unprocessed(self, metric): + return bool(self._client.exists(self._build_measure_path(metric.id))) + @contextlib.contextmanager def process_measure_for_metric(self, metric): key = self._build_measure_path(metric.id) diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/storage/incoming/s3.py index 52016d11..89de4192 100644 --- a/gnocchi/storage/incoming/s3.py +++ b/gnocchi/storage/incoming/s3.py @@ -154,6 +154,10 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): files = self._list_measure_files_for_metric_id(sack, metric_id) s3.bulk_delete(self.s3, self._bucket_name_measures, files) + def has_unprocessed(self, metric): + sack = self.sack_for_metric(metric.id) + return bool(self._list_measure_files_for_metric_id(sack, metric.id)) + @contextlib.contextmanager def process_measure_for_metric(self, metric): sack = self.sack_for_metric(metric.id) diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py index b0549d0f..304126f9 100644 --- a/gnocchi/storage/incoming/swift.py +++ b/gnocchi/storage/incoming/swift.py @@ -93,6 +93,10 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): files = self._list_measure_files_for_metric_id(sack, metric_id) swift.bulk_delete(self.swift, self.get_sack_name(sack), files) + def has_unprocessed(self, metric): + sack = self.sack_for_metric(metric.id) + return bool(self._list_measure_files_for_metric_id(sack, metric.id)) + @contextlib.contextmanager def process_measure_for_metric(self, metric): sack = self.sack_for_metric(metric.id) -- GitLab From bca589a557dff0b5541c7cfa69dcba3215f3954d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 15 May 2017 18:14:14 +0200 Subject: [PATCH 0735/1483] doc: move storage plan to running makes more sense Change-Id: Id5eb6d15cf521b6ade14d37ebcb648f4489cb63c --- doc/source/architecture.rst | 30 ------------------------------ doc/source/running.rst | 30 ++++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index f62a67c7..9b7b4f9c 100755 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -80,33 +80,3 @@ duration computing). .. _PostgreSQL: http://postgresql.org .. _MySQL: http://mysql.org - -How to plan for Gnocchi’s storage ---------------------------------- - -Gnocchi uses a custom file format based on its library *Carbonara*. In Gnocchi, -a time series is a collection of points, where a point is a given measure, or -sample, in the lifespan of a time series. The storage format is compressed -using various techniques, therefore the computing of a time series' size can be -estimated based on its **worst** case scenario with the following formula:: - - number of points × 8 bytes = size in bytes - -The number of points you want to keep is usually determined by the following -formula:: - - number of points = timespan ÷ granularity - -For example, if you want to keep a year of data with a one minute resolution:: - - number of points = (365 days × 24 hours × 60 minutes) ÷ 1 minute - number of points = 525 600 - -Then:: - - size in bytes = 525 600 bytes × 6 = 3 159 600 bytes = 3 085 KiB - -This is just for a single aggregated time series. If your archive policy uses -the 6 default aggregation methods (mean, min, max, sum, std, count) with the -same "one year, one minute aggregations" resolution, the space used will go up -to a maximum of 6 × 4.1 MiB = 24.6 MiB. diff --git a/doc/source/running.rst b/doc/source/running.rst index 0cbdbb13..cc2675e8 100644 --- a/doc/source/running.rst +++ b/doc/source/running.rst @@ -119,6 +119,36 @@ values are sent, the maximum pessimistic storage size is taken into account. * maximum optimistic size per metric: 1 539 KiB * maximum pessimistic size per metric: 277 172 KiB +How to plan for Gnocchi’s storage +================================= + +Gnocchi uses a custom file format based on its library *Carbonara*. In Gnocchi, +a time series is a collection of points, where a point is a given measure, or +sample, in the lifespan of a time series. The storage format is compressed +using various techniques, therefore the computing of a time series' size can be +estimated based on its **worst** case scenario with the following formula:: + + number of points × 8 bytes = size in bytes + +The number of points you want to keep is usually determined by the following +formula:: + + number of points = timespan ÷ granularity + +For example, if you want to keep a year of data with a one minute resolution:: + + number of points = (365 days × 24 hours × 60 minutes) ÷ 1 minute + number of points = 525 600 + +Then:: + + size in bytes = 525 600 bytes × 6 = 3 159 600 bytes = 3 085 KiB + +This is just for a single aggregated time series. If your archive policy uses +the 6 default aggregation methods (mean, min, max, sum, std, count) with the +same "one year, one minute aggregations" resolution, the space used will go up +to a maximum of 6 × 4.1 MiB = 24.6 MiB. + How many metricd workers do we need to run ========================================== -- GitLab From 63fa106d781943af4965a33bb09f5089d685c0f9 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 15 May 2017 17:12:05 +0000 Subject: [PATCH 0736/1483] ceph: async write new measures we should use async writes since we're not using threads anymore. testing on small ceph cluster and posting 20metrics/POST results in ~2x better write performance. Change-Id: Ic451e6ea874a2a3695d3d148b1c05ecc54aa473b --- gnocchi/storage/incoming/ceph.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index 677c5233..9d75f7bd 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -79,6 +79,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): def add_measures_batch(self, metrics_and_measures): names_by_sack = defaultdict(list) + ops = [] for metric, measures in six.iteritems(metrics_and_measures): name = "_".join(( self.MEASURE_PREFIX, @@ -88,8 +89,12 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): sack = self.get_sack_name(self.sack_for_metric(metric.id)) names_by_sack[sack].append(name) data = self._encode_measures(measures) - self.ioctx.write_full(name, data) + ops.append(self.ioctx.aio_write_full(name, data)) + while ops: + op = ops.pop() + op.wait_for_complete() + ops = [] for sack, names in names_by_sack.items(): with rados.WriteOpCtx() as op: # NOTE(sileht): list all objects in a pool is too slow with @@ -99,8 +104,11 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): # omap the list of objects to process (not xattr because # it doesn't # allow to configure the locking behavior) self.ioctx.set_omap(op, tuple(names), (b"",) * len(names)) - self.ioctx.operate_write_op(op, sack, - flags=self.OMAP_WRITE_FLAGS) + ops.append(self.ioctx.operate_aio_write_op( + op, sack, flags=self.OMAP_WRITE_FLAGS)) + while ops: + op = ops.pop() + op.wait_for_complete() def _build_report(self, details): metrics = set() -- GitLab From 60f7e33bd9498f714e5dc368e05bf0c6577e32c4 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 15 May 2017 22:53:33 +0000 Subject: [PATCH 0737/1483] fix workload logging accidentally show total metrics in last sack as total metrics processed. Change-Id: I5c95857ab35ee0d2ba0932be9634853075851bb1 --- gnocchi/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 853153dd..91992c67 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -162,7 +162,7 @@ class MetricProcessor(MetricProcessBase): continue try: metrics = in_store.list_metric_with_measures_to_process(s) - m_count = len(metrics) + m_count += len(metrics) self.store.process_background_tasks(self.index, metrics) s_count += 1 except Exception: -- GitLab From e35e526ad6ddb60913deda84d188aba4e6507fd7 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 15 May 2017 19:53:01 +0200 Subject: [PATCH 0738/1483] ceph: fix errno_to_exception We are using an internal API of rados. This change removes its usage. Closes-bug: #1690876 Change-Id: Ia1304395748f54fed0995406b9bc7260af8f5e84 --- gnocchi/storage/common/ceph.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/gnocchi/storage/common/ceph.py b/gnocchi/storage/common/ceph.py index 764cdf4f..b1c9b673 100644 --- a/gnocchi/storage/common/ceph.py +++ b/gnocchi/storage/common/ceph.py @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +import errno + from oslo_log import log LOG = log.getLogger(__name__) @@ -73,6 +75,26 @@ def close_rados_connection(conn, ioctx): conn.shutdown() +# NOTE(sileht): The mapping is not part of the rados Public API So we copy it +# here. +EXCEPTION_NAMES = { + errno.EPERM: 'PermissionError', + errno.ENOENT: 'ObjectNotFound', + errno.EIO: 'IOError', + errno.ENOSPC: 'NoSpace', + errno.EEXIST: 'ObjectExists', + errno.EBUSY: 'ObjectBusy', + errno.ENODATA: 'NoData', + errno.EINTR: 'InterruptedOrTimeoutError', + errno.ETIMEDOUT: 'TimedOut', + errno.EACCES: 'PermissionDeniedError' +} + + def errno_to_exception(ret): if ret < 0: - raise rados.errno_to_exception[abs(ret)] + name = EXCEPTION_NAMES.get(abs(ret)) + if name is None: + raise rados.Error("Unhandled error '%s'" % ret) + else: + raise getattr(rados, name) -- GitLab From 45954c26c8ec4a2ff68743df83c27293c7279d1e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 16 May 2017 11:21:02 +0200 Subject: [PATCH 0739/1483] Replace oslo_utils.units.M by its value Really? Change-Id: Ibcf02f81c6148a0958a4924a1b322acf3d92959e --- gnocchi/indexer/sqlalchemy_base.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index da36b186..25cecc5d 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -22,7 +22,6 @@ import decimal import iso8601 from oslo_db.sqlalchemy import models from oslo_utils import timeutils -from oslo_utils import units import six import sqlalchemy from sqlalchemy.dialects import mysql @@ -58,7 +57,7 @@ class PreciseTimestamp(types.TypeDecorator): return None integer = int(dec) - micro = (dec - decimal.Decimal(integer)) * decimal.Decimal(units.M) + micro = (dec - decimal.Decimal(integer)) * decimal.Decimal(1000000) daittyme = datetime.datetime.utcfromtimestamp(integer) return daittyme.replace(microsecond=int(round(micro))) -- GitLab From 4444130a6aeff2be38d78d9bf5cacc1d0e21c278 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 16 May 2017 11:23:34 +0200 Subject: [PATCH 0740/1483] Stop relying on dictutils Change-Id: Ida27c6ae1f6b5e5cd9a763c596054ccda2f9a38f --- gnocchi/rest/__init__.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index ec3207f5..cd857fa7 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -19,7 +19,6 @@ import itertools import uuid import jsonpatch -from oslo_utils import dictutils import pecan from pecan import rest import pyparsing @@ -57,6 +56,21 @@ def abort(status_code, detail='', headers=None, comment=None, **kw): return pecan.abort(status_code, detail, headers, comment, **kw) +def flatten_dict_to_keypairs(d, separator=':'): + """Generator that produces sequence of keypairs for nested dictionaries. + + :param d: dictionaries which may be nested + :param separator: symbol between names + """ + for name, value in sorted(six.iteritems(d)): + if isinstance(value, dict): + for subname, subvalue in flatten_dict_to_keypairs(value, + separator): + yield ('%s%s%s' % (name, separator, subname), subvalue) + else: + yield name, value + + def enforce(rule, target): """Return the user and project the request should be limited to. @@ -73,7 +87,7 @@ def enforce(rule, target): target = target.__dict__ # Flatten dict - target = dict(dictutils.flatten_dict_to_keypairs(d=target, separator='.')) + target = dict(flatten_dict_to_keypairs(d=target, separator='.')) if not pecan.request.policy_enforcer.enforce(rule, target, creds): abort(403) -- GitLab From 3332206969f5dc6c93d22ac5e8d752199632cf34 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 15 May 2017 21:25:36 +0000 Subject: [PATCH 0741/1483] ceph: store measures in omap values ceph sucks for small objects; POSTs to ceph incoming are always small objects. this changes logic to store new measures as omap values instead of objects. this allows us to write to 'memory' aka leveldb/rocksdb instead of disk. this does not change the durability agreement since we are already storing object keys in omap so if omap fails, we will lose link to objects regardless if on disk or not. using local 20OSD ceph cluster, with 18 metricd. this is: - ~2x faster than aio_write patch to POST - ~2x faster than aio_write patch to process - ~3x faster than no aio_write patch to POST - ~3x faster than no aio_write patch to process - ~3.5x faster than 3.1 to POST - something a lot faster than 3.1 to process (no idea why) Change-Id: I4bae365955fdbafe4ad837596490774c42bc5251 --- gnocchi/storage/incoming/ceph.py | 96 +++++++++++--------------------- 1 file changed, 31 insertions(+), 65 deletions(-) diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index 9d75f7bd..15777a52 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -14,7 +14,6 @@ from collections import defaultdict import contextlib import datetime -import functools import json import uuid @@ -78,8 +77,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): pass def add_measures_batch(self, metrics_and_measures): - names_by_sack = defaultdict(list) - ops = [] + data_by_sack = defaultdict(lambda: defaultdict(list)) for metric, measures in six.iteritems(metrics_and_measures): name = "_".join(( self.MEASURE_PREFIX, @@ -87,15 +85,12 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): str(uuid.uuid4()), datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S"))) sack = self.get_sack_name(self.sack_for_metric(metric.id)) - names_by_sack[sack].append(name) - data = self._encode_measures(measures) - ops.append(self.ioctx.aio_write_full(name, data)) - while ops: - op = ops.pop() - op.wait_for_complete() + data_by_sack[sack]['names'].append(name) + data_by_sack[sack]['measures'].append( + self._encode_measures(measures)) ops = [] - for sack, names in names_by_sack.items(): + for sack, data in data_by_sack.items(): with rados.WriteOpCtx() as op: # NOTE(sileht): list all objects in a pool is too slow with # many objects (2min for 20000 objects in 50osds cluster), @@ -103,7 +98,8 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): # So we create an object MEASURE_PREFIX, that have as # omap the list of objects to process (not xattr because # it doesn't # allow to configure the locking behavior) - self.ioctx.set_omap(op, tuple(names), (b"",) * len(names)) + self.ioctx.set_omap(op, tuple(data['names']), + tuple(data['measures'])) ops.append(self.ioctx.operate_aio_write_op( op, sack, flags=self.OMAP_WRITE_FLAGS)) while ops: @@ -117,7 +113,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): for i in six.moves.range(self.NUM_SACKS): marker = "" while True: - names = list(self._list_object_names_to_process( + names = list(self._list_keys_to_process( i, marker=marker, limit=self.Q_LIMIT)) if names and names[0] < marker: raise _carbonara.ReportGenerationError("Unable to cleanly " @@ -135,8 +131,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): return len(metrics), count, metric_details if details else None - def _list_object_names_to_process(self, sack, prefix="", marker="", - limit=-1): + def _list_keys_to_process(self, sack, prefix="", marker="", limit=-1): with rados.ReadOpCtx() as op: omaps, ret = self.ioctx.get_omap_vals(op, marker, prefix, limit) try: @@ -162,7 +157,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): names = set() marker = "" while True: - obj_names = list(self._list_object_names_to_process( + obj_names = list(self._list_keys_to_process( sack, marker=marker, limit=self.Q_LIMIT)) names.update(name.split("_")[1] for name in obj_names) if len(obj_names) < self.Q_LIMIT: @@ -173,87 +168,58 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): def delete_unprocessed_measures_for_metric_id(self, metric_id): sack = self.sack_for_metric(metric_id) - object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) - object_names = tuple(self._list_object_names_to_process( - sack, object_prefix)) + key_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) + keys = tuple(self._list_keys_to_process(sack, key_prefix)) - if not object_names: + if not keys: return - for op in list(map(self.ioctx.aio_remove, object_names)): - op.wait_for_complete_and_cb() - # Now clean objects and omap with rados.WriteOpCtx() as op: # NOTE(sileht): come on Ceph, no return code # for this operation ?!! - self.ioctx.remove_omap_keys(op, object_names) + self.ioctx.remove_omap_keys(op, keys) self.ioctx.operate_write_op(op, self.get_sack_name(sack), flags=self.OMAP_WRITE_FLAGS) def has_unprocessed(self, metric): sack = self.sack_for_metric(metric.id) object_prefix = self.MEASURE_PREFIX + "_" + str(metric.id) - return bool(self._list_object_names_to_process(sack, object_prefix)) + return bool(self._list_keys_to_process(sack, object_prefix)) @contextlib.contextmanager def process_measure_for_metric(self, metric): sack = self.sack_for_metric(metric.id) - object_prefix = self.MEASURE_PREFIX + "_" + str(metric.id) - object_names = tuple(self._list_object_names_to_process( - sack, object_prefix)) + key_prefix = self.MEASURE_PREFIX + "_" + str(metric.id) measures = [] - ops = [] - bufsize = 8192 # Same sa rados_read one - - tmp_measures = {} - - def add_to_measures(name, comp, data): - # Check that the measure file has not been deleted while still - # listed in the OMAP – this can happen after a crash - ret = comp.get_return_value() + processed_keys = [] + with rados.ReadOpCtx() as op: + omaps, ret = self.ioctx.get_omap_vals(op, "", key_prefix, -1) + self.ioctx.operate_read_op(op, self.get_sack_name(sack), + flag=self.OMAP_READ_FLAGS) + # NOTE(sileht): after reading the libradospy, I'm + # not sure that ret will have the correct value + # get_omap_vals transforms the C int to python int + # before operate_read_op is called, I dunno if the int + # content is copied during this transformation or if + # this is a pointer to the C int, I think it's copied... try: ceph.errno_to_exception(ret) except rados.ObjectNotFound: # Object has been deleted, so this is just a stalled entry # in the OMAP listing, ignore return - - if name in tmp_measures: - tmp_measures[name] += data - else: - tmp_measures[name] = data - if len(data) < bufsize: - measures.extend(self._unserialize_measures(name, - tmp_measures[name])) - del tmp_measures[name] - else: - ops.append(self.ioctx.aio_read( - name, bufsize, len(tmp_measures[name]), - functools.partial(add_to_measures, name) - )) - - for name in object_names: - ops.append(self.ioctx.aio_read( - name, bufsize, 0, - functools.partial(add_to_measures, name) - )) - - while ops: - op = ops.pop() - op.wait_for_complete_and_cb() + for k, v in omaps: + measures.extend(self._unserialize_measures(k, v)) + processed_keys.append(k) yield measures - # First delete all objects - for op in list(map(self.ioctx.aio_remove, object_names)): - op.wait_for_complete_and_cb() - # Now clean omap with rados.WriteOpCtx() as op: # NOTE(sileht): come on Ceph, no return code # for this operation ?!! - self.ioctx.remove_omap_keys(op, object_names) + self.ioctx.remove_omap_keys(op, tuple(processed_keys)) self.ioctx.operate_write_op(op, self.get_sack_name(sack), flags=self.OMAP_WRITE_FLAGS) -- GitLab From 906951633bada71e902bfb260ccdc508becae8e4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 17 May 2017 13:36:13 +0200 Subject: [PATCH 0742/1483] Fix Carbonara compression with Cradox and LZ4 >= 0.9.2 Change-Id: Ifca3c175146887db153b2f5bb780a532e8bc6d48 --- gnocchi/carbonara.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 55b89466..6aae5032 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -251,6 +251,13 @@ class TimeSerie(object): return GroupedTimeSeries(self.ts[start:], granularity) + @staticmethod + def _compress(payload): + # FIXME(jd) lz4 > 0.9.2 returns bytearray instead of bytes. But Cradox + # does not accept bytearray but only bytes, so make sure that we have a + # byte type returned. + return memoryview(lz4.block.compress(payload)).tobytes() + class BoundTimeSerie(TimeSerie): def __init__(self, ts=None, block_size=None, back_window=0): @@ -341,7 +348,7 @@ class BoundTimeSerie(TimeSerie): timestamps = numpy.array(timestamps, dtype=' Date: Wed, 17 May 2017 23:31:28 +0200 Subject: [PATCH 0743/1483] redis: fix CLIENT_LIST_ARGS parsing The entire list must be taken, not only the last argument. Change-Id: I9b5b9336250cb5292cdd6c7ca4964bfe186575d5 Closes-Bug: #1691542 --- gnocchi/storage/common/redis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/storage/common/redis.py b/gnocchi/storage/common/redis.py index 91a90d4d..8491c369 100644 --- a/gnocchi/storage/common/redis.py +++ b/gnocchi/storage/common/redis.py @@ -98,7 +98,7 @@ def get_client(conf): if a in CLIENT_BOOL_ARGS: v = utils.strtobool(options[a][-1]) elif a in CLIENT_LIST_ARGS: - v = options[a][-1] + v = options[a] elif a in CLIENT_INT_ARGS: v = int(options[a][-1]) else: -- GitLab From 26f87a457e233278945885dbea6a8b7774210d5b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 16 May 2017 11:24:19 +0200 Subject: [PATCH 0744/1483] Stop using oslo_utils.fnmatch This was a work around for old buggy Python 2.7 versions. Everybody will have to upgrade! Change-Id: Id105e2e8c1fab7af49c11b323db64886e57eade6 --- gnocchi/indexer/__init__.py | 2 +- releasenotes/notes/fnmatch-python-2.7-c524ce1e1b238b0a.yaml | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/fnmatch-python-2.7-c524ce1e1b238b0a.yaml diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 7d29ba92..e529382c 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -13,12 +13,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import fnmatch import hashlib import os import iso8601 from oslo_config import cfg -from oslo_utils import fnmatch from oslo_utils import netutils import six from stevedore import driver diff --git a/releasenotes/notes/fnmatch-python-2.7-c524ce1e1b238b0a.yaml b/releasenotes/notes/fnmatch-python-2.7-c524ce1e1b238b0a.yaml new file mode 100644 index 00000000..bab5e73a --- /dev/null +++ b/releasenotes/notes/fnmatch-python-2.7-c524ce1e1b238b0a.yaml @@ -0,0 +1,5 @@ +--- +other: + - | + A workaround for a Python 2.7 bug in `fnmatch` has been removed. Makes sure + you use at least Python 2.7.9 to avoid running into it. -- GitLab From bbf27ad09f1736590dfafe217b526ed6cc7c6b59 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 16 May 2017 11:26:19 +0200 Subject: [PATCH 0745/1483] Stop using oslo_utils.netutils The urlsplit provided does not provide anymore value than the builtin one in this case. Change-Id: I4a40c35b7123b7760c354ccfd7e1a5a0c156becc --- gnocchi/indexer/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index e529382c..1ffc9cb4 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -19,8 +19,8 @@ import os import iso8601 from oslo_config import cfg -from oslo_utils import netutils import six +from six.moves.urllib import parse from stevedore import driver from gnocchi import exceptions @@ -72,7 +72,7 @@ class Resource(object): def get_driver(conf): """Return the configured driver.""" - split = netutils.urlsplit(conf.indexer.url) + split = parse.urlsplit(conf.indexer.url) d = driver.DriverManager('gnocchi.indexer', split.scheme).driver return d(conf) -- GitLab From 11e5668e69db32687b807dd47bfb72a91c596bf5 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 18 May 2017 21:25:23 +0200 Subject: [PATCH 0746/1483] carbonara: rewrite lock debug statement There's no lock anymore so this should not be logged like that. instead, we only LOG.debug if we successfully processed the log. The time is not useful anymore and since log are timestamped, it can be computed if needed anyway. Change-Id: I40d38985f7a4f3368aeeec56d2b592f64882d1f7 --- gnocchi/storage/_carbonara.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 76f8b259..4d367d1a 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -375,16 +375,12 @@ class CarbonaraBasedStorage(storage.StorageDriver): for metric in metrics: # NOTE(gordc): must lock at sack level try: - locksw = timeutils.StopWatch().start() LOG.debug("Processing measures for %s", metric) with self.incoming.process_measure_for_metric(metric) \ as measures: self._compute_and_store_timeseries(metric, measures) - LOG.debug("Metric %s locked during %.2f seconds", - metric.id, locksw.elapsed()) + LOG.debug("Measures for metric %s processed", metric) except Exception: - LOG.debug("Metric %s locked during %.2f seconds", - metric.id, locksw.elapsed()) if sync: raise LOG.error("Error processing new measures", exc_info=True) -- GitLab From cf762fbb3dcf423ad6bd7acaeec50b97b446671b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 16 May 2017 11:35:35 +0200 Subject: [PATCH 0747/1483] Remove oslo.utils dependency Change-Id: I5d1a027f4e2d550ab8d27ca0cf9bc61c586f2eed --- gnocchi/aggregates/moving_stats.py | 7 +-- gnocchi/cli.py | 6 +- gnocchi/indexer/sqlalchemy_base.py | 7 +-- gnocchi/storage/_carbonara.py | 41 +++++++------- gnocchi/tests/test_carbonara.py | 6 +- gnocchi/tests/test_rest.py | 6 +- gnocchi/tests/test_storage.py | 4 +- gnocchi/tests/test_utils.py | 27 +++++++++ gnocchi/utils.py | 91 ++++++++++++++++++++++++++++-- requirements.txt | 2 +- 10 files changed, 151 insertions(+), 46 deletions(-) diff --git a/gnocchi/aggregates/moving_stats.py b/gnocchi/aggregates/moving_stats.py index cfd04adb..b0ce3b40 100644 --- a/gnocchi/aggregates/moving_stats.py +++ b/gnocchi/aggregates/moving_stats.py @@ -16,7 +16,6 @@ import datetime import numpy -from oslo_utils import timeutils import pandas import six @@ -80,8 +79,8 @@ class MovingAverage(aggregates.CustomAggregator): msec = datetime.timedelta(milliseconds=1) zero = datetime.timedelta(seconds=0) half_span = datetime.timedelta(seconds=window / 2) - start = timeutils.normalize_time(data.index[0]) - stop = timeutils.normalize_time( + start = utils.normalize_time(data.index[0]) + stop = utils.normalize_time( data.index[-1] + datetime.timedelta(seconds=min_grain)) # min_grain addition necessary since each bin of rolled-up data # is indexed by leftmost timestamp of bin. @@ -90,7 +89,7 @@ class MovingAverage(aggregates.CustomAggregator): right = 2 * half_span - left - msec # msec subtraction is so we don't include right endpoint in slice. - x = timeutils.normalize_time(x) + x = utils.normalize_time(x) if x - left >= start and x + right <= stop: dslice = data[x - left: x + right] diff --git a/gnocchi/cli.py b/gnocchi/cli.py index fdaa5264..06e1fbbc 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -22,7 +22,6 @@ from cotyledon import oslo_config_glue from futurist import periodics from oslo_config import cfg from oslo_log import log -from oslo_utils import timeutils import six import tenacity import tooz @@ -120,10 +119,9 @@ class MetricProcessBase(cotyledon.Service): time.sleep(self.startup_delay) while not self._shutdown.is_set(): - with timeutils.StopWatch() as timer: + with utils.StopWatch() as timer: self._run_job() - self._shutdown.wait(max(0, self.interval_delay - - timer.elapsed())) + self._shutdown.wait(max(0, self.interval_delay - timer.elapsed())) self._shutdown_done.set() def terminate(self): diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index 25cecc5d..1ebc60a9 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -21,7 +21,6 @@ import decimal import iso8601 from oslo_db.sqlalchemy import models -from oslo_utils import timeutils import six import sqlalchemy from sqlalchemy.dialects import mysql @@ -91,7 +90,7 @@ class PreciseTimestamp(types.TypeDecorator): def process_bind_param(self, value, dialect): if value is not None: - value = timeutils.normalize_time(value) + value = utils.normalize_time(value) if dialect.name == 'mysql': return self._dt_to_decimal(value) return value @@ -100,7 +99,7 @@ class PreciseTimestamp(types.TypeDecorator): if dialect.name == 'mysql': value = self._decimal_to_dt(value) if value is not None: - return timeutils.normalize_time(value).replace( + return utils.normalize_time(value).replace( tzinfo=iso8601.iso8601.UTC) @@ -116,7 +115,7 @@ class TimestampUTC(types.TypeDecorator): def process_bind_param(self, value, dialect): if value is not None: - return timeutils.normalize_time(value) + return utils.normalize_time(value) def process_result_value(self, value, dialect): if value is not None: diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 4d367d1a..65983ad1 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -23,7 +23,6 @@ from concurrent import futures import iso8601 from oslo_config import cfg from oslo_log import log -from oslo_utils import timeutils import six import six.moves @@ -91,17 +90,17 @@ class CarbonaraBasedStorage(storage.StorageDriver): be retrieved, returns None. """ - with timeutils.StopWatch() as sw: + with utils.StopWatch() as sw: raw_measures = ( self._get_unaggregated_timeserie( metric) ) - if not raw_measures: - return - LOG.debug( - "Retrieve unaggregated measures " - "for %s in %.2fs", - metric.id, sw.elapsed()) + if not raw_measures: + return + LOG.debug( + "Retrieve unaggregated measures " + "for %s in %.2fs", + metric.id, sw.elapsed()) try: return carbonara.BoundTimeSerie.unserialize( raw_measures, block_size, back_window) @@ -447,23 +446,23 @@ class CarbonaraBasedStorage(storage.StorageDriver): new_first_block_timestamp) for aggregation in agg_methods)) - with timeutils.StopWatch() as sw: + with utils.StopWatch() as sw: ts.set_values(measures, before_truncate_callback=_map_add_measures, ignore_too_old_timestamps=True) - elapsed = sw.elapsed() - number_of_operations = (len(agg_methods) * len(definition)) - perf = "" - if elapsed > 0: - perf = " (%d points/s, %d measures/s)" % ( - ((number_of_operations * computed_points['number']) / - elapsed), - ((number_of_operations * len(measures)) / elapsed) - ) - LOG.debug("Computed new metric %s with %d new measures " - "in %.2f seconds%s", - metric.id, len(measures), elapsed, perf) + number_of_operations = (len(agg_methods) * len(definition)) + perf = "" + elapsed = sw.elapsed() + if elapsed > 0: + perf = " (%d points/s, %d measures/s)" % ( + ((number_of_operations * computed_points['number']) / + elapsed), + ((number_of_operations * len(measures)) / elapsed) + ) + LOG.debug("Computed new metric %s with %d new measures " + "in %.2f seconds%s", + metric.id, len(measures), elapsed, perf) self._store_unaggregated_timeserie(metric, ts.serialize()) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 4469eb2a..82ec819a 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -18,7 +18,7 @@ import functools import math import fixtures -from oslo_utils import timeutils +import iso8601 from oslotest import base import pandas import six @@ -138,13 +138,13 @@ class TestAggregatedTimeSerie(base.BaseTestCase): [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], ts.fetch( - from_timestamp=timeutils.parse_isotime( + from_timestamp=iso8601.parse_date( "2014-01-01 12:00:04"))) self.assertEqual( [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], ts.fetch( - from_timestamp=timeutils.parse_isotime( + from_timestamp=iso8601.parse_date( "2014-01-01 13:00:04+01:00"))) def test_before_epoch(self): diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index f5d979a6..9caf9b39 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -23,9 +23,9 @@ import hashlib import json import uuid +import iso8601 from keystonemiddleware import fixture as ksm_fixture import mock -from oslo_utils import timeutils import six from stevedore import extension import testscenarios @@ -417,7 +417,7 @@ class MetricTest(RestTest): self.app.get("/v1/metric/%s/measures" % metric['id'], status=403) - @mock.patch.object(timeutils, 'utcnow') + @mock.patch.object(utils, 'utcnow') def test_get_measure_start_relative(self, utcnow): """Make sure the timestamps can be relative to now.""" utcnow.return_value = datetime.datetime(2014, 1, 1, 10, 23) @@ -732,7 +732,7 @@ class ResourceTest(RestTest): @staticmethod def _strtime_to_httpdate(dt): return email_utils.formatdate(calendar.timegm( - timeutils.parse_isotime(dt).timetuple()), usegmt=True) + iso8601.parse_date(dt).timetuple()), usegmt=True) def _check_etag(self, response, resource): lastmodified = self._strtime_to_httpdate(resource['revision_start']) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index d6a2fde1..7047f44d 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -16,8 +16,8 @@ import datetime import uuid +import iso8601 import mock -from oslo_utils import timeutils from oslotest import base import six.moves @@ -649,7 +649,7 @@ class TestStorageDriver(tests_base.TestCase): (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), ], self.storage.get_measures( self.metric, - from_timestamp=timeutils.parse_isotime("2014-1-1 13:00:00+01:00"), + from_timestamp=iso8601.parse_date("2014-1-1 13:00:00+01:00"), to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2))) self.assertEqual([ diff --git a/gnocchi/tests/test_utils.py b/gnocchi/tests/test_utils.py index cbae6456..d90bc287 100644 --- a/gnocchi/tests/test_utils.py +++ b/gnocchi/tests/test_utils.py @@ -76,3 +76,30 @@ class TestResourceUUID(tests_base.TestCase): self.assertEqual( uuid.UUID('853e5c64-f45e-58b2-999c-96df856fbe3d'), utils.ResourceUUID("foo", "")) + + +class StopWatchTest(tests_base.TestCase): + def test_no_states(self): + watch = utils.StopWatch() + self.assertRaises(RuntimeError, watch.stop) + + def test_start_stop(self): + watch = utils.StopWatch() + watch.start() + watch.stop() + + def test_no_elapsed(self): + watch = utils.StopWatch() + self.assertRaises(RuntimeError, watch.elapsed) + + def test_elapsed(self): + watch = utils.StopWatch() + watch.start() + watch.stop() + elapsed = watch.elapsed() + self.assertAlmostEqual(elapsed, watch.elapsed()) + + def test_context_manager(self): + with utils.StopWatch() as watch: + pass + self.assertGreater(watch.elapsed(), 0) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 816548de..b7e92263 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -24,15 +24,14 @@ import os import uuid import iso8601 +import monotonic import numpy from oslo_log import log -from oslo_utils import timeutils import pandas as pd import six import tenacity from tooz import coordination - LOG = log.getLogger(__name__) # uuid5 namespace for id transformation. @@ -160,8 +159,16 @@ def to_timespan(value): def utcnow(): - """Better version of utcnow() that returns utcnow with a correct TZ.""" - return timeutils.utcnow(True) + """Version of utcnow() that returns utcnow with a correct TZ.""" + return datetime.datetime.now(tz=iso8601.iso8601.UTC) + + +def normalize_time(timestamp): + """Normalize time in arbitrary timezone to UTC naive object.""" + offset = timestamp.utcoffset() + if offset is None: + return timestamp + return timestamp.replace(tzinfo=None) - offset def datetime_utc(*args): @@ -214,3 +221,79 @@ def strtobool(v): if isinstance(v, bool): return v return bool(distutils.util.strtobool(v)) + + +class StopWatch(object): + """A simple timer/stopwatch helper class. + + Inspired by: apache-commons-lang java stopwatch. + + Not thread-safe (when a single watch is mutated by multiple threads at + the same time). Thread-safe when used by a single thread (not shared) or + when operations are performed in a thread-safe manner on these objects by + wrapping those operations with locks. + + It will use the `monotonic`_ pypi library to find an appropriate + monotonically increasing time providing function (which typically varies + depending on operating system and python version). + + .. _monotonic: https://pypi.python.org/pypi/monotonic/ + """ + _STARTED = object() + _STOPPED = object() + + def __init__(self): + self._started_at = None + self._stopped_at = None + self._state = None + + def start(self): + """Starts the watch (if not already started). + + NOTE(harlowja): resets any splits previously captured (if any). + """ + if self._state == self._STARTED: + return self + self._started_at = monotonic.monotonic() + self._state = self._STARTED + return self + + @staticmethod + def _delta_seconds(earlier, later): + # Uses max to avoid the delta/time going backwards (and thus negative). + return max(0.0, later - earlier) + + def elapsed(self): + """Returns how many seconds have elapsed.""" + if self._state not in (self._STARTED, self._STOPPED): + raise RuntimeError("Can not get the elapsed time of a stopwatch" + " if it has not been started/stopped") + if self._state == self._STOPPED: + elapsed = self._delta_seconds(self._started_at, self._stopped_at) + else: + elapsed = self._delta_seconds( + self._started_at, monotonic.monotonic()) + return elapsed + + def __enter__(self): + """Starts the watch.""" + self.start() + return self + + def __exit__(self, type, value, traceback): + """Stops the watch (ignoring errors if stop fails).""" + try: + self.stop() + except RuntimeError: + pass + + def stop(self): + """Stops the watch.""" + if self._state == self._STOPPED: + return self + if self._state != self._STARTED: + raise RuntimeError("Can not stop a stopwatch that has not been" + " started") + self._stopped_at = monotonic.monotonic() + self._state = self._STOPPED + return self diff --git a/requirements.txt b/requirements.txt index 447aeca0..e06a0ecf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,6 @@ iso8601 oslo.config>=3.22.0 oslo.log>=2.3.0 oslo.policy>=0.3.0 -oslo.utils>=3.18.0 oslo.middleware>=3.22.0 pandas>=0.18.0 scipy>=0.18.1 # BSD @@ -22,3 +21,4 @@ tenacity>=3.1.0 # Apache-2.0 WebOb>=1.4.1 Paste PasteDeploy +monotonic -- GitLab From 8a2f36cbae79c2db77b3e9815736fe560e709ca3 Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 19 May 2017 03:06:04 +0000 Subject: [PATCH 0748/1483] add release note for sacks Change-Id: I0c0f190f556b36579a3afea63dc79fa7b1a9f443 --- .../notes/incoming-sacks-413f4818882ab83d.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 releasenotes/notes/incoming-sacks-413f4818882ab83d.yaml diff --git a/releasenotes/notes/incoming-sacks-413f4818882ab83d.yaml b/releasenotes/notes/incoming-sacks-413f4818882ab83d.yaml new file mode 100644 index 00000000..c2cf17ff --- /dev/null +++ b/releasenotes/notes/incoming-sacks-413f4818882ab83d.yaml @@ -0,0 +1,14 @@ +--- +features: + - | + New measures are now sharded into sacks to better distribute data across + storage driver as well as allow for improved scheduling of aggregation + workload. +upgrade: + - | + The storage driver needs to be upgraded. The number of sacks to distribute + across can be configured on upgrade by passing in ``num-storage-sacks`` + value on upgrade. A default number of sacks will be created if not set. + This can be reconfigured post-upgrade as well by using + ``gnocchi-change-sack-size`` cli. See documentation for hints on the number + of sacks to set for your environment and upgrade notes -- GitLab From 6377e25bdcca68be66fadf65aa16a6f174cfaa99 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 18 May 2017 07:37:14 +0200 Subject: [PATCH 0749/1483] wsgi: use pbr wsgi binary We don't need to provide the app.wsgi script anymore. pbr provides it within the gnocchi-api binary Change-Id: I6e16607128849a18b9e6eb1bc5558d1c9df64775 --- devstack/plugin.sh | 2 +- doc/source/running.rst | 2 +- gnocchi/rest/app.wsgi | 7 +++++++ .../notes/wsgi-script-deprecation-c6753a844ca0b411.yaml | 7 +++++++ 4 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/wsgi-script-deprecation-c6753a844ca0b411.yaml diff --git a/devstack/plugin.sh b/devstack/plugin.sh index f6e5a6dc..e1ef90b4 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -281,7 +281,7 @@ function configure_gnocchi { rm -f "$GNOCCHI_UWSGI_FILE" iniset "$GNOCCHI_UWSGI_FILE" uwsgi http $GNOCCHI_SERVICE_HOST:$GNOCCHI_SERVICE_PORT - iniset "$GNOCCHI_UWSGI_FILE" uwsgi wsgi-file "$GNOCCHI_DIR/gnocchi/rest/app.wsgi" + iniset "$GNOCCHI_UWSGI_FILE" uwsgi wsgi-file "/usr/local/bin/gnocchi-api" # This is running standalone iniset "$GNOCCHI_UWSGI_FILE" uwsgi master true # Set die-on-term & exit-on-reload so that uwsgi shuts down diff --git a/doc/source/running.rst b/doc/source/running.rst index 0cbdbb13..d541715f 100644 --- a/doc/source/running.rst +++ b/doc/source/running.rst @@ -25,7 +25,7 @@ The following uwsgi configuration file can be used:: [uwsgi] http = localhost:8041 # Set the correct path depending on your installation - wsgi-file = /usr/lib/python2.7/dist-packages/gnocchi/rest/app.wsgi + wsgi-file = /usr/local/bin/gnocchi-api master = true die-on-term = true threads = 32 diff --git a/gnocchi/rest/app.wsgi b/gnocchi/rest/app.wsgi index b7fefed1..475d9acb 100644 --- a/gnocchi/rest/app.wsgi +++ b/gnocchi/rest/app.wsgi @@ -17,6 +17,13 @@ See http://pecan.readthedocs.org/en/latest/deployment.html for details. """ + +import debtcollector + from gnocchi.rest import app application = app.build_wsgi_app() +debtcollector.deprecate(prefix="The wsgi script gnocchi/rest/app.wsgi is deprecated", + postfix=", please use gnocchi-api binary as wsgi script instead", + version="4.0", removal_version="4.1", + category=RuntimeWarning) diff --git a/releasenotes/notes/wsgi-script-deprecation-c6753a844ca0b411.yaml b/releasenotes/notes/wsgi-script-deprecation-c6753a844ca0b411.yaml new file mode 100644 index 00000000..d2739ec7 --- /dev/null +++ b/releasenotes/notes/wsgi-script-deprecation-c6753a844ca0b411.yaml @@ -0,0 +1,7 @@ +--- +deprecations: + - | + The custom gnocchi/rest/app.wsgi is now deprecated, the gnocchi-api binary + should be used as wsgi script file. For example, with uwsgi "--wsgi-file + /usr/lib/python2.7/gnocchi/rest/app.wsgi" should be replaced by + "--wsgi-file /usr/bin/gnocchi-api". -- GitLab From 86694d656ce8b66be172603537be4dd57983a5e1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 19 May 2017 07:35:26 +0200 Subject: [PATCH 0750/1483] travis: fix travis-ci docker image A couple of fix for the docker image used by travis: * locale package is no more installed by default * liberasurecode-dev is required, now * sphinx >= 1.6.0 have broken sphinx-versioning * Don't use sphinx math module Change-Id: Iba06d0c4667e2a11495fb25375de7152b2b02597 --- doc/source/running.rst | 4 +--- setup.cfg | 2 +- tools/travis-ci-setup.dockerfile | 17 +++++++++++------ 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/doc/source/running.rst b/doc/source/running.rst index 1880fde2..9e96730e 100644 --- a/doc/source/running.rst +++ b/doc/source/running.rst @@ -182,9 +182,7 @@ metrics the system will capture. Additionally, the number of sacks, should be higher than the total number of active `gnocchi-metricd` workers. In general, use the following equation to determine the appropriate `sacks` -value to set: - -.. math:: +value to set:: sacks value = number of **active** metrics / 300 diff --git a/setup.cfg b/setup.cfg index a2f544a7..6675c97b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -58,7 +58,7 @@ file = lz4>=0.9.0 tooz>=1.38 doc = - sphinx + sphinx<1.6.0 sphinx_rtd_theme sphinxcontrib-httpdomain PyYAML diff --git a/tools/travis-ci-setup.dockerfile b/tools/travis-ci-setup.dockerfile index 784c14c8..be2179bc 100644 --- a/tools/travis-ci-setup.dockerfile +++ b/tools/travis-ci-setup.dockerfile @@ -2,12 +2,8 @@ FROM ubuntu:16.04 ENV GNOCCHI_SRC /home/tester/src ENV DEBIAN_FRONTEND noninteractive -#NOTE(sileht): really no utf-8 in 2017 !? -ENV LANG en_US.UTF-8 -RUN update-locale -RUN locale-gen $LANG - RUN apt-get update -y && apt-get install -qy \ + locales \ git \ wget \ nodejs \ @@ -17,7 +13,7 @@ RUN apt-get update -y && apt-get install -qy \ python3 \ python-dev \ python3-dev \ - python-tox \ + python-pip \ redis-server \ build-essential \ libffi-dev \ @@ -26,9 +22,18 @@ RUN apt-get update -y && apt-get install -qy \ mysql-client \ mysql-server \ librados-dev \ + liberasurecode-dev \ ceph \ && apt-get clean -y +#NOTE(sileht): really no utf-8 in 2017 !? +ENV LANG en_US.UTF-8 +RUN update-locale +RUN locale-gen $LANG + +#NOTE(sileht): Upgrade python dev tools +RUN pip install -U pip tox virtualenv + RUN useradd -ms /bin/bash tester RUN mkdir $GNOCCHI_SRC RUN chown -R tester: $GNOCCHI_SRC -- GitLab From 7cb2c01a2ea1aa457f5e5c827530124cea2e6f1b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 19 May 2017 18:19:57 +0200 Subject: [PATCH 0751/1483] Add status badges in README --- README.rst | 7 +++++++ doc/source/index.rst | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index ca172f4d..1bbd8925 100644 --- a/README.rst +++ b/README.rst @@ -2,6 +2,13 @@ Gnocchi - Metric as a Service =============================== +.. image:: https://travis-ci.org/gnocchixyz/gnocchi.png?branch=master + :target: https://travis-ci.org/gnocchixyz/gnocchi + :alt: Build Status + +.. image:: https://badge.fury.io/py/gnocchi.svg + :target: https://badge.fury.io/py/gnocchi + .. image:: doc/source/_static/gnocchi-logo.png Gnocchi is a multi-tenant timeseries, metrics and resources database. It diff --git a/doc/source/index.rst b/doc/source/index.rst index 6525abf7..c3f8d5c5 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -3,7 +3,7 @@ Gnocchi – Metric as a Service ================================== .. include:: ../../README.rst - :start-line: 6 + :start-line: 13 Key Features ------------ -- GitLab From 32a918b92b5dbc048bcab2f5b26b96c9396e746b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 19 May 2017 18:40:46 +0200 Subject: [PATCH 0752/1483] doc: stop advertising OpenStack ML and change bugs link --- doc/source/index.rst | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index c3f8d5c5..8a873a94 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -27,11 +27,8 @@ Community --------- You can join Gnocchi's community via the following channels: -- Bug tracker: https://bugs.launchpad.net/gnocchi +- Bug tracker: https://github.com/gnocchixyz/gnocchi/issues - IRC: #gnocchi on `Freenode `_ -- Mailing list: `openstack-dev@lists.openstack.org - `_ with - *[gnocchi]* in the `Subject` header. Why Gnocchi? ------------ -- GitLab From 108f4a6fa853064d3c8e1af19dc0078083ba608d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 22 May 2017 12:40:18 +0200 Subject: [PATCH 0753/1483] Remove requirement comments Does not make any sense --- tox.ini | 1 - 1 file changed, 1 deletion(-) diff --git a/tox.ini b/tox.ini index 415d5e6a..b9fa1711 100644 --- a/tox.ini +++ b/tox.ini @@ -37,7 +37,6 @@ deps = .[test] postgresql: .[postgresql,{env:GNOCCHI_STORAGE_DEPS}] mysql: .[mysql,{env:GNOCCHI_STORAGE_DEPS}] {env:GNOCCHI_TEST_TARBALLS:} -# NOTE(tonyb): This project has chosen to *NOT* consume upper-constraints.txt commands = doc8 --ignore-path doc/source/rest.rst doc/source gnocchi-config-generator -- GitLab From 2da3291a3f0a15a434dcae8420f2e1599c4cb76e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 22 May 2017 13:22:21 +0200 Subject: [PATCH 0754/1483] Add Gnocchi logo source files --- logo/gnocchi-bw.eps | 5771 ++++++++++++++++++++++++++++++++++++++++ logo/gnocchi-icon.eps | 5578 +++++++++++++++++++++++++++++++++++++++ logo/gnocchi.eps | 5775 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 17124 insertions(+) create mode 100644 logo/gnocchi-bw.eps create mode 100644 logo/gnocchi-icon.eps create mode 100644 logo/gnocchi.eps diff --git a/logo/gnocchi-bw.eps b/logo/gnocchi-bw.eps new file mode 100644 index 00000000..809af940 --- /dev/null +++ b/logo/gnocchi-bw.eps @@ -0,0 +1,5771 @@ +%!PS-Adobe-3.1 EPSF-3.0 +%ADO_DSC_Encoding: MacOS Roman +%%Title: gnocchi-nb.eps +%%Creator: Adobe Illustrator(R) 13.0 +%%For: Thierry Ung +%%CreationDate: 4/3/17 +%%BoundingBox: 0 0 1096 840 +%%HiResBoundingBox: 0 0 1096 840 +%%CropBox: 0 0 1096 840 +%%LanguageLevel: 2 +%%DocumentData: Clean7Bit +%ADOBeginClientInjection: DocumentHeader "AI11EPS" +%%AI8_CreatorVersion: 13.0.0 %AI9_PrintingDataBegin %AI3_Cropmarks: 36.0000 36.0000 1060.0000 804.0000 +%ADO_BuildNumber: Adobe Illustrator(R) 13.0.0 x409 R agm 4.4378 ct 5.1039 %ADO_ContainsXMP: MainFirst %AI7_Thumbnail: 128 100 8 %%BeginData: 5212 Hex Bytes %0000330000660000990000CC0033000033330033660033990033CC0033FF %0066000066330066660066990066CC0066FF009900009933009966009999 %0099CC0099FF00CC0000CC3300CC6600CC9900CCCC00CCFF00FF3300FF66 %00FF9900FFCC3300003300333300663300993300CC3300FF333300333333 %3333663333993333CC3333FF3366003366333366663366993366CC3366FF %3399003399333399663399993399CC3399FF33CC0033CC3333CC6633CC99 %33CCCC33CCFF33FF0033FF3333FF6633FF9933FFCC33FFFF660000660033 %6600666600996600CC6600FF6633006633336633666633996633CC6633FF %6666006666336666666666996666CC6666FF669900669933669966669999 %6699CC6699FF66CC0066CC3366CC6666CC9966CCCC66CCFF66FF0066FF33 %66FF6666FF9966FFCC66FFFF9900009900339900669900999900CC9900FF %9933009933339933669933999933CC9933FF996600996633996666996699 %9966CC9966FF9999009999339999669999999999CC9999FF99CC0099CC33 %99CC6699CC9999CCCC99CCFF99FF0099FF3399FF6699FF9999FFCC99FFFF %CC0000CC0033CC0066CC0099CC00CCCC00FFCC3300CC3333CC3366CC3399 %CC33CCCC33FFCC6600CC6633CC6666CC6699CC66CCCC66FFCC9900CC9933 %CC9966CC9999CC99CCCC99FFCCCC00CCCC33CCCC66CCCC99CCCCCCCCCCFF %CCFF00CCFF33CCFF66CCFF99CCFFCCCCFFFFFF0033FF0066FF0099FF00CC %FF3300FF3333FF3366FF3399FF33CCFF33FFFF6600FF6633FF6666FF6699 %FF66CCFF66FFFF9900FF9933FF9966FF9999FF99CCFF99FFFFCC00FFCC33 %FFCC66FFCC99FFCCCCFFCCFFFFFF33FFFF66FFFF99FFFFCC110000001100 %000011111111220000002200000022222222440000004400000044444444 %550000005500000055555555770000007700000077777777880000008800 %000088888888AA000000AA000000AAAAAAAABB000000BB000000BBBBBBBB %DD000000DD000000DDDDDDDDEE000000EE000000EEEEEEEE0000000000FF %00FF0000FFFFFF0000FF00FFFFFF00FFFFFF %524C45FDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFF %FDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFD9CFFA87D7D %7DFD08FFA87DA8FD6FFF2727F827F82727FD05FF7D27F827F827A8FD33FF %A85252F827F827277D7DFD11FF7D27F827F8527DFD17FF5227F827275227 %277DFFFFFF7D27F827F827F827A8FD30FFA852F827F827F827F827F827F8 %7DFD0EFF2727F827F827F827F8A8FD15FF52F852FFFFFF52F87DFFFFFF27 %F852A8FF7D27F87DFD2FFF7D27F827277DFD04A87D52F827F8A8FD0CFF52 %27F8FD04A87D2727F8A8FD14FF52277DFFFFFF27277DFFFFFFF8277DFFFF %FF522752FD2EFF7D27F8277DFD09FF272752FFFFFFA8FD07FF2727F8A8A8 %FD05FF2727F8FD07FF7DFD09FFA8A8FF52F87DFFFFFF52F87DFFFFFF27F8 %A8FFFFFF52F852FD2EFF27F852FD0BFF27F8522727F827F827277DA8FF7D %27F8A8FFA87D7D52A8FFFF27277DFFFFFF5227F827F827277DA8FF5227F8 %27F827F8277DFFFFFF2727F827277D272727FFFFFFF82752FD2DFF7DF827 %A8FFA8FFFFFF7DA8A8FFFF7DF827F827F827F827F827F8277D27F8A8A8FD %05FF7D7DA87DF827A87DF827F827F827F827F827F827F827F827F827F87D %FFFFFF52F827F827F827F827F852F827F8A8FD2DFF2727A8FD04FFA8F827 %F827F8522727F8527DFD05A87D27F827F827FFFD06A8FFFFFFA827F827F8 %27277DFD04A87D27F827277D7DFFA8A87D277DFFFFFFFD04A85227F82752 %7D7D7D27277DFD2CFF7D27F8FFA8FFA8A8F827F827F827F827F827FD08FF %A852F827A8FFFD04A87D7D527D7DFF5227F82727A8FD06FF272727FD07FF %277DFD08FFA827F87DFFFFFF52F87DFD2CFF7DF852FD04FFF827F827F827 %F827F827F8FD09FFA827F8FD0DFFF82727FD08FF2727FD08FF277DFD09FF %7D277DFFFFFF52277DFD2CFF522752FFA8FFA827F827F827F827F827F827 %FFFFFFA8277DA8FFFFFF5227A8FFFD067D527D7DFFFF27F8A8FFFFFFA852 %52527DF8A8FFFFFFA85252527DF87DFFFFFFA852A8FD04FFF87DFFFFFF52 %F87DFD2CFF7DF87DFD04FFF827F827F8A8FFFFFFA8F8FFFFFFA827F852FF %FFFF7DF8A8FD0CFF5227FD04FF2727F827F852FD04FFF827F827F8277DFF %FFFF2727F8A8FFFFFF277DFFFFFF27277DFD2CFF522752FFA8FFA827F827 %F8277DFFA8FF7D27A8FFA8A8F82727FFA8FF7D27A8FFA87D527D52527DA8 %A8A8FF5227FFFFFF5227F827F82727FFFFFF5227F827F827F87DFFFFFF52 %F82752FFFFFF277DFFFFFF52F87DFD2CFF7DF87DFD04FFF827F827F8A8FF %FFFFA8F8FFFFFFA827F827FFFFFFA8F8FFA8A8FD0AFF7D7DFFFFFF52F827 %F827F87DFFFFFF52F827F827F8277DFFFFFF2727F87DFFFFFF527DFFFFFF %52277DFD2CFF7D27F8FFA8FFA87DF827F8277DFFA8FF7D27A8FFA8A8F827 %F8FFA8FF7D27A8FFA8FFFFFF7D7D7DA8A8A8FF7D52FFA8FF7D27F827F827 %52FFFFFF5227F827F827F87DA8FFFF52F8277DFFA8FF52A8A8FFA852F87D %FD2DFFF827A8FFFFFFA852F827F8A8FFFFFFA8F8FFFFFFA827F827FFFFFF %A8F8A8FFA87D7D7DA8A8FD05FF527DFD04FFF827F827F87DFD04FFF827F8 %27F827A8FFFFFF5227F8A8FFFFFF7DA8FFFFFF52277DFD2DFF52F852A8FF %A8FFA8A85252A8FFA8FF7D27A8FFA8A8F827F8FFA8FF7D2727FFA8FD06FF %A87DA8A827F8FFA8FFA8A8522752A852FFA8FFA8A8525252A827A8A8FFA8 %7DF8277DFFA8FF52A8A8FFA87DF87DFD2DFFA827F8A8FD0BFFA8F8FFFFFF %A827F827FFFFFFA8F827FD04FFA8A87D7D7DFFFFA8F8277DFD08FF7D7DFD %08FF7DA8FFFFFF5227F8A8FFFFFF7DA8FFFFFF52277DFD2EFF5227F87DA8 %FFA8FFA8FFA8FFA8FF7D27A8FFA8A8F827F8FFA8FF7D27F8A8A87D7DA8A8 %FFA87D7DFF2727F827A8FFA8FFA8FFA8FF7D27A8FFA8FFA8FFA8FF7DA8A8 %FFA87DF8277DFFA8FF52A8A8FFA87DF87DFD2FFF2727F827A8FD07FFA852 %F8FFA8FF7D27F827A8FFA87DF82727FFFFFFA87D52A8FFFF7D27F827F827 %52FD05FFA87DF8277DFD06FF7D7DFFA8FF5227F87DA8FFA8527DFFA8FF52 %277DFD30FF5227F827F827F827F827F827F827F827F827F827F827F827F8 %27F8277DFFA8A8A8FFA8FF7D27F852A827F827F827F827F827F827F827F8 %27F827F827F827F827F827F827F827F827F827F827F827F87DFD31FFA87D %2727F827F827F82727522727F827F87D7D27F827F827527DF82727A8A8FF %A8A82727F852FFFFFFA82727F827F827F8527DA82727F827F827F827F827 %F82727A87D27F827F827F827F827F87DFD35FFA8A87DFD04A8FD10FF7DF8 %27F827F827F827F87DFD06FFA8A87DA8A8A8FD04FFA8A87DA87DFD62FF7D %52F82752527DFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFF %FDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFF %FDFCFFFDFCFFFDFCFFFD15FFFF %%EndData +%ADOEndClientInjection: DocumentHeader "AI11EPS" +%%Pages: 1 +%%DocumentNeededResources: +%%DocumentSuppliedResources: procset Adobe_AGM_Image 1.0 0 +%%+ procset Adobe_CoolType_Utility_T42 1.0 0 +%%+ procset Adobe_CoolType_Utility_MAKEOCF 1.23 0 +%%+ procset Adobe_CoolType_Core 2.31 0 +%%+ procset Adobe_AGM_Core 2.0 0 +%%+ procset Adobe_AGM_Utils 1.0 0 +%%DocumentFonts: +%%DocumentNeededFonts: +%%DocumentNeededFeatures: +%%DocumentSuppliedFeatures: +%%DocumentProcessColors: Black +%%DocumentCustomColors: +%%CMYKCustomColor: +%%RGBCustomColor: +%%EndComments + + + + + + +%%BeginDefaults +%%ViewingOrientation: 1 0 0 1 +%%EndDefaults +%%BeginProlog +%%BeginResource: procset Adobe_AGM_Utils 1.0 0 +%%Version: 1.0 0 +%%Copyright: Copyright(C)2000-2006 Adobe Systems, Inc. All Rights Reserved. +systemdict/setpacking known +{currentpacking true setpacking}if +userdict/Adobe_AGM_Utils 73 dict dup begin put +/bdf +{bind def}bind def +/nd{null def}bdf +/xdf +{exch def}bdf +/ldf +{load def}bdf +/ddf +{put}bdf +/xddf +{3 -1 roll put}bdf +/xpt +{exch put}bdf +/ndf +{ + exch dup where{ + pop pop pop + }{ + xdf + }ifelse +}def +/cdndf +{ + exch dup currentdict exch known{ + pop pop + }{ + exch def + }ifelse +}def +/gx +{get exec}bdf +/ps_level + /languagelevel where{ + pop systemdict/languagelevel gx + }{ + 1 + }ifelse +def +/level2 + ps_level 2 ge +def +/level3 + ps_level 3 ge +def +/ps_version + {version cvr}stopped{-1}if +def +/set_gvm +{currentglobal exch setglobal}bdf +/reset_gvm +{setglobal}bdf +/makereadonlyarray +{ + /packedarray where{pop packedarray + }{ + array astore readonly}ifelse +}bdf +/map_reserved_ink_name +{ + dup type/stringtype eq{ + dup/Red eq{ + pop(_Red_) + }{ + dup/Green eq{ + pop(_Green_) + }{ + dup/Blue eq{ + pop(_Blue_) + }{ + dup()cvn eq{ + pop(Process) + }if + }ifelse + }ifelse + }ifelse + }if +}bdf +/AGMUTIL_GSTATE 22 dict def +/get_gstate +{ + AGMUTIL_GSTATE begin + /AGMUTIL_GSTATE_clr_spc currentcolorspace def + /AGMUTIL_GSTATE_clr_indx 0 def + /AGMUTIL_GSTATE_clr_comps 12 array def + mark currentcolor counttomark + {AGMUTIL_GSTATE_clr_comps AGMUTIL_GSTATE_clr_indx 3 -1 roll put + /AGMUTIL_GSTATE_clr_indx AGMUTIL_GSTATE_clr_indx 1 add def}repeat pop + /AGMUTIL_GSTATE_fnt rootfont def + /AGMUTIL_GSTATE_lw currentlinewidth def + /AGMUTIL_GSTATE_lc currentlinecap def + /AGMUTIL_GSTATE_lj currentlinejoin def + /AGMUTIL_GSTATE_ml currentmiterlimit def + currentdash/AGMUTIL_GSTATE_do xdf/AGMUTIL_GSTATE_da xdf + /AGMUTIL_GSTATE_sa currentstrokeadjust def + /AGMUTIL_GSTATE_clr_rnd currentcolorrendering def + /AGMUTIL_GSTATE_op currentoverprint def + /AGMUTIL_GSTATE_bg currentblackgeneration cvlit def + /AGMUTIL_GSTATE_ucr currentundercolorremoval cvlit def + currentcolortransfer cvlit/AGMUTIL_GSTATE_gy_xfer xdf cvlit/AGMUTIL_GSTATE_b_xfer xdf + cvlit/AGMUTIL_GSTATE_g_xfer xdf cvlit/AGMUTIL_GSTATE_r_xfer xdf + /AGMUTIL_GSTATE_ht currenthalftone def + /AGMUTIL_GSTATE_flt currentflat def + end +}def +/set_gstate +{ + AGMUTIL_GSTATE begin + AGMUTIL_GSTATE_clr_spc setcolorspace + AGMUTIL_GSTATE_clr_indx{AGMUTIL_GSTATE_clr_comps AGMUTIL_GSTATE_clr_indx 1 sub get + /AGMUTIL_GSTATE_clr_indx AGMUTIL_GSTATE_clr_indx 1 sub def}repeat setcolor + AGMUTIL_GSTATE_fnt setfont + AGMUTIL_GSTATE_lw setlinewidth + AGMUTIL_GSTATE_lc setlinecap + AGMUTIL_GSTATE_lj setlinejoin + AGMUTIL_GSTATE_ml setmiterlimit + AGMUTIL_GSTATE_da AGMUTIL_GSTATE_do setdash + AGMUTIL_GSTATE_sa setstrokeadjust + AGMUTIL_GSTATE_clr_rnd setcolorrendering + AGMUTIL_GSTATE_op setoverprint + AGMUTIL_GSTATE_bg cvx setblackgeneration + AGMUTIL_GSTATE_ucr cvx setundercolorremoval + AGMUTIL_GSTATE_r_xfer cvx AGMUTIL_GSTATE_g_xfer cvx AGMUTIL_GSTATE_b_xfer cvx + AGMUTIL_GSTATE_gy_xfer cvx setcolortransfer + AGMUTIL_GSTATE_ht/HalftoneType get dup 9 eq exch 100 eq or + { + currenthalftone/HalftoneType get AGMUTIL_GSTATE_ht/HalftoneType get ne + { + mark AGMUTIL_GSTATE_ht{sethalftone}stopped cleartomark + }if + }{ + AGMUTIL_GSTATE_ht sethalftone + }ifelse + AGMUTIL_GSTATE_flt setflat + end +}def +/get_gstate_and_matrix +{ + AGMUTIL_GSTATE begin + /AGMUTIL_GSTATE_ctm matrix currentmatrix def + end + get_gstate +}def +/set_gstate_and_matrix +{ + set_gstate + AGMUTIL_GSTATE begin + AGMUTIL_GSTATE_ctm setmatrix + end +}def +/AGMUTIL_str256 256 string def +/AGMUTIL_src256 256 string def +/AGMUTIL_dst64 64 string def +/AGMUTIL_srcLen nd +/AGMUTIL_ndx nd +/AGMUTIL_cpd nd +/capture_cpd{ + //Adobe_AGM_Utils/AGMUTIL_cpd currentpagedevice ddf +}def +/thold_halftone +{ + level3 + {sethalftone currenthalftone} + { + dup/HalftoneType get 3 eq + { + sethalftone currenthalftone + }{ + begin + Width Height mul{ + Thresholds read{pop}if + }repeat + end + currenthalftone + }ifelse + }ifelse +}def +/rdcmntline +{ + currentfile AGMUTIL_str256 readline pop + (%)anchorsearch{pop}if +}bdf +/filter_cmyk +{ + dup type/filetype ne{ + exch()/SubFileDecode filter + }{ + exch pop + } + ifelse + [ + exch + { + AGMUTIL_src256 readstring pop + dup length/AGMUTIL_srcLen exch def + /AGMUTIL_ndx 0 def + AGMCORE_plate_ndx 4 AGMUTIL_srcLen 1 sub{ + 1 index exch get + AGMUTIL_dst64 AGMUTIL_ndx 3 -1 roll put + /AGMUTIL_ndx AGMUTIL_ndx 1 add def + }for + pop + AGMUTIL_dst64 0 AGMUTIL_ndx getinterval + } + bind + /exec cvx + ]cvx +}bdf +/filter_indexed_devn +{ + cvi Names length mul names_index add Lookup exch get +}bdf +/filter_devn +{ + 4 dict begin + /srcStr xdf + /dstStr xdf + dup type/filetype ne{ + 0()/SubFileDecode filter + }if + [ + exch + [ + /devicen_colorspace_dict/AGMCORE_gget cvx/begin cvx + currentdict/srcStr get/readstring cvx/pop cvx + /dup cvx/length cvx 0/gt cvx[ + Adobe_AGM_Utils/AGMUTIL_ndx 0/ddf cvx + names_index Names length currentdict/srcStr get length 1 sub{ + 1/index cvx/exch cvx/get cvx + currentdict/dstStr get/AGMUTIL_ndx/load cvx 3 -1/roll cvx/put cvx + Adobe_AGM_Utils/AGMUTIL_ndx/AGMUTIL_ndx/load cvx 1/add cvx/ddf cvx + }for + currentdict/dstStr get 0/AGMUTIL_ndx/load cvx/getinterval cvx + ]cvx/if cvx + /end cvx + ]cvx + bind + /exec cvx + ]cvx + end +}bdf +/AGMUTIL_imagefile nd +/read_image_file +{ + AGMUTIL_imagefile 0 setfileposition + 10 dict begin + /imageDict xdf + /imbufLen Width BitsPerComponent mul 7 add 8 idiv def + /imbufIdx 0 def + /origDataSource imageDict/DataSource get def + /origMultipleDataSources imageDict/MultipleDataSources get def + /origDecode imageDict/Decode get def + /dstDataStr imageDict/Width get colorSpaceElemCnt mul string def + imageDict/MultipleDataSources known{MultipleDataSources}{false}ifelse + { + /imbufCnt imageDict/DataSource get length def + /imbufs imbufCnt array def + 0 1 imbufCnt 1 sub{ + /imbufIdx xdf + imbufs imbufIdx imbufLen string put + imageDict/DataSource get imbufIdx[AGMUTIL_imagefile imbufs imbufIdx get/readstring cvx/pop cvx]cvx put + }for + DeviceN_PS2{ + imageDict begin + /DataSource[DataSource/devn_sep_datasource cvx]cvx def + /MultipleDataSources false def + /Decode[0 1]def + end + }if + }{ + /imbuf imbufLen string def + Indexed_DeviceN level3 not and DeviceN_NoneName or{ + /srcDataStrs[imageDict begin + currentdict/MultipleDataSources known{MultipleDataSources{DataSource length}{1}ifelse}{1}ifelse + { + Width Decode length 2 div mul cvi string + }repeat + end]def + imageDict begin + /DataSource[AGMUTIL_imagefile Decode BitsPerComponent false 1/filter_indexed_devn load dstDataStr srcDataStrs devn_alt_datasource/exec cvx]cvx def + /Decode[0 1]def + end + }{ + imageDict/DataSource[1 string dup 0 AGMUTIL_imagefile Decode length 2 idiv string/readstring cvx/pop cvx names_index/get cvx/put cvx]cvx put + imageDict/Decode[0 1]put + }ifelse + }ifelse + imageDict exch + load exec + imageDict/DataSource origDataSource put + imageDict/MultipleDataSources origMultipleDataSources put + imageDict/Decode origDecode put + end +}bdf +/write_image_file +{ + begin + {(AGMUTIL_imagefile)(w+)file}stopped{ + false + }{ + Adobe_AGM_Utils/AGMUTIL_imagefile xddf + 2 dict begin + /imbufLen Width BitsPerComponent mul 7 add 8 idiv def + MultipleDataSources{DataSource 0 get}{DataSource}ifelse type/filetype eq{ + /imbuf imbufLen string def + }if + 1 1 Height MultipleDataSources not{Decode length 2 idiv mul}if{ + pop + MultipleDataSources{ + 0 1 DataSource length 1 sub{ + DataSource type dup + /arraytype eq{ + pop DataSource exch gx + }{ + /filetype eq{ + DataSource exch get imbuf readstring pop + }{ + DataSource exch get + }ifelse + }ifelse + AGMUTIL_imagefile exch writestring + }for + }{ + DataSource type dup + /arraytype eq{ + pop DataSource exec + }{ + /filetype eq{ + DataSource imbuf readstring pop + }{ + DataSource + }ifelse + }ifelse + AGMUTIL_imagefile exch writestring + }ifelse + }for + end + true + }ifelse + end +}bdf +/close_image_file +{ + AGMUTIL_imagefile closefile(AGMUTIL_imagefile)deletefile +}def +statusdict/product known userdict/AGMP_current_show known not and{ + /pstr statusdict/product get def + pstr(HP LaserJet 2200)eq + pstr(HP LaserJet 4000 Series)eq or + pstr(HP LaserJet 4050 Series )eq or + pstr(HP LaserJet 8000 Series)eq or + pstr(HP LaserJet 8100 Series)eq or + pstr(HP LaserJet 8150 Series)eq or + pstr(HP LaserJet 5000 Series)eq or + pstr(HP LaserJet 5100 Series)eq or + pstr(HP Color LaserJet 4500)eq or + pstr(HP Color LaserJet 4600)eq or + pstr(HP LaserJet 5Si)eq or + pstr(HP LaserJet 1200 Series)eq or + pstr(HP LaserJet 1300 Series)eq or + pstr(HP LaserJet 4100 Series)eq or + { + userdict/AGMP_current_show/show load put + userdict/show{ + currentcolorspace 0 get + /Pattern eq + {false charpath f} + {AGMP_current_show}ifelse + }put + }if + currentdict/pstr undef +}if +/consumeimagedata +{ + begin + AGMIMG_init_common + currentdict/MultipleDataSources known not + {/MultipleDataSources false def}if + MultipleDataSources + { + DataSource 0 get type + dup/filetype eq + { + 1 dict begin + /flushbuffer Width cvi string def + 1 1 Height cvi + { + pop + 0 1 DataSource length 1 sub + { + DataSource exch get + flushbuffer readstring pop pop + }for + }for + end + }if + dup/arraytype eq exch/packedarraytype eq or DataSource 0 get xcheck and + { + Width Height mul cvi + { + 0 1 DataSource length 1 sub + {dup DataSource exch gx length exch 0 ne{pop}if}for + dup 0 eq + {pop exit}if + sub dup 0 le + {exit}if + }loop + pop + }if + } + { + /DataSource load type + dup/filetype eq + { + 1 dict begin + /flushbuffer Width Decode length 2 idiv mul cvi string def + 1 1 Height{pop DataSource flushbuffer readstring pop pop}for + end + }if + dup/arraytype eq exch/packedarraytype eq or/DataSource load xcheck and + { + Height Width BitsPerComponent mul 8 BitsPerComponent sub add 8 idiv Decode length 2 idiv mul mul + { + DataSource length dup 0 eq + {pop exit}if + sub dup 0 le + {exit}if + }loop + pop + }if + }ifelse + end +}bdf +/addprocs +{ + 2{/exec load}repeat + 3 1 roll + [5 1 roll]bind cvx +}def +/modify_halftone_xfer +{ + currenthalftone dup length dict copy begin + currentdict 2 index known{ + 1 index load dup length dict copy begin + currentdict/TransferFunction known{ + /TransferFunction load + }{ + currenttransfer + }ifelse + addprocs/TransferFunction xdf + currentdict end def + currentdict end sethalftone + }{ + currentdict/TransferFunction known{ + /TransferFunction load + }{ + currenttransfer + }ifelse + addprocs/TransferFunction xdf + currentdict end sethalftone + pop + }ifelse +}def +/clonearray +{ + dup xcheck exch + dup length array exch + Adobe_AGM_Core/AGMCORE_tmp -1 ddf + { + Adobe_AGM_Core/AGMCORE_tmp 2 copy get 1 add ddf + dup type/dicttype eq + { + Adobe_AGM_Core/AGMCORE_tmp get + exch + clonedict + Adobe_AGM_Core/AGMCORE_tmp 4 -1 roll ddf + }if + dup type/arraytype eq + { + Adobe_AGM_Core/AGMCORE_tmp get exch + clonearray + Adobe_AGM_Core/AGMCORE_tmp 4 -1 roll ddf + }if + exch dup + Adobe_AGM_Core/AGMCORE_tmp get 4 -1 roll put + }forall + exch{cvx}if +}bdf +/clonedict +{ + dup length dict + begin + { + dup type/dicttype eq + {clonedict}if + dup type/arraytype eq + {clonearray}if + def + }forall + currentdict + end +}bdf +/DeviceN_PS2 +{ + /currentcolorspace AGMCORE_gget 0 get/DeviceN eq level3 not and +}bdf +/Indexed_DeviceN +{ + /indexed_colorspace_dict AGMCORE_gget dup null ne{ + dup/CSDBase known{ + /CSDBase get/CSD get_res/Names known + }{ + pop false + }ifelse + }{ + pop false + }ifelse +}bdf +/DeviceN_NoneName +{ + /Names where{ + pop + false Names + { + (None)eq or + }forall + }{ + false + }ifelse +}bdf +/DeviceN_PS2_inRip_seps +{ + /AGMCORE_in_rip_sep where + { + pop dup type dup/arraytype eq exch/packedarraytype eq or + { + dup 0 get/DeviceN eq level3 not and AGMCORE_in_rip_sep and + { + /currentcolorspace exch AGMCORE_gput + false + }{ + true + }ifelse + }{ + true + }ifelse + }{ + true + }ifelse +}bdf +/base_colorspace_type +{ + dup type/arraytype eq{0 get}if +}bdf +/currentdistillerparams where{pop currentdistillerparams/CoreDistVersion get 5000 lt}{true}ifelse +{ + /pdfmark_5{cleartomark}bind def +}{ + /pdfmark_5{pdfmark}bind def +}ifelse +/ReadBypdfmark_5 +{ + currentfile exch 0 exch/SubFileDecode filter + /currentdistillerparams where + {pop currentdistillerparams/CoreDistVersion get 5000 lt}{true}ifelse + {flushfile cleartomark} + {/PUT pdfmark}ifelse +}bdf +/xpdfm +{ + { + dup 0 get/Label eq + { + aload length[exch 1 add 1 roll/PAGELABEL + }{ + aload pop + [{ThisPage}<<5 -2 roll>>/PUT + }ifelse + pdfmark_5 + }forall +}bdf +/ds{ + Adobe_AGM_Utils begin +}bdf +/dt{ + currentdict Adobe_AGM_Utils eq{ + end + }if +}bdf +systemdict/setpacking known +{setpacking}if +%%EndResource +%%BeginResource: procset Adobe_AGM_Core 2.0 0 +%%Version: 2.0 0 +%%Copyright: Copyright(C)1997-2007 Adobe Systems, Inc. All Rights Reserved. +systemdict/setpacking known +{ + currentpacking + true setpacking +}if +userdict/Adobe_AGM_Core 209 dict dup begin put +/Adobe_AGM_Core_Id/Adobe_AGM_Core_2.0_0 def +/AGMCORE_str256 256 string def +/AGMCORE_save nd +/AGMCORE_graphicsave nd +/AGMCORE_c 0 def +/AGMCORE_m 0 def +/AGMCORE_y 0 def +/AGMCORE_k 0 def +/AGMCORE_cmykbuf 4 array def +/AGMCORE_screen[currentscreen]cvx def +/AGMCORE_tmp 0 def +/AGMCORE_&setgray nd +/AGMCORE_&setcolor nd +/AGMCORE_&setcolorspace nd +/AGMCORE_&setcmykcolor nd +/AGMCORE_cyan_plate nd +/AGMCORE_magenta_plate nd +/AGMCORE_yellow_plate nd +/AGMCORE_black_plate nd +/AGMCORE_plate_ndx nd +/AGMCORE_get_ink_data nd +/AGMCORE_is_cmyk_sep nd +/AGMCORE_host_sep nd +/AGMCORE_avoid_L2_sep_space nd +/AGMCORE_distilling nd +/AGMCORE_composite_job nd +/AGMCORE_producing_seps nd +/AGMCORE_ps_level -1 def +/AGMCORE_ps_version -1 def +/AGMCORE_environ_ok nd +/AGMCORE_CSD_cache 0 dict def +/AGMCORE_currentoverprint false def +/AGMCORE_deltaX nd +/AGMCORE_deltaY nd +/AGMCORE_name nd +/AGMCORE_sep_special nd +/AGMCORE_err_strings 4 dict def +/AGMCORE_cur_err nd +/AGMCORE_current_spot_alias false def +/AGMCORE_inverting false def +/AGMCORE_feature_dictCount nd +/AGMCORE_feature_opCount nd +/AGMCORE_feature_ctm nd +/AGMCORE_ConvertToProcess false def +/AGMCORE_Default_CTM matrix def +/AGMCORE_Default_PageSize nd +/AGMCORE_Default_flatness nd +/AGMCORE_currentbg nd +/AGMCORE_currentucr nd +/AGMCORE_pattern_paint_type 0 def +/knockout_unitsq nd +currentglobal true setglobal +[/CSA/Gradient/Procedure] +{ + /Generic/Category findresource dup length dict copy/Category defineresource pop +}forall +setglobal +/AGMCORE_key_known +{ + where{ + /Adobe_AGM_Core_Id known + }{ + false + }ifelse +}ndf +/flushinput +{ + save + 2 dict begin + /CompareBuffer 3 -1 roll def + /readbuffer 256 string def + mark + { + currentfile readbuffer{readline}stopped + {cleartomark mark} + { + not + {pop exit} + if + CompareBuffer eq + {exit} + if + }ifelse + }loop + cleartomark + end + restore +}bdf +/getspotfunction +{ + AGMCORE_screen exch pop exch pop + dup type/dicttype eq{ + dup/HalftoneType get 1 eq{ + /SpotFunction get + }{ + dup/HalftoneType get 2 eq{ + /GraySpotFunction get + }{ + pop + { + abs exch abs 2 copy add 1 gt{ + 1 sub dup mul exch 1 sub dup mul add 1 sub + }{ + dup mul exch dup mul add 1 exch sub + }ifelse + }bind + }ifelse + }ifelse + }if +}def +/np +{newpath}bdf +/clp_npth +{clip np}def +/eoclp_npth +{eoclip np}def +/npth_clp +{np clip}def +/graphic_setup +{ + /AGMCORE_graphicsave save store + concat + 0 setgray + 0 setlinecap + 0 setlinejoin + 1 setlinewidth + []0 setdash + 10 setmiterlimit + np + false setoverprint + false setstrokeadjust + //Adobe_AGM_Core/spot_alias gx + /Adobe_AGM_Image where{ + pop + Adobe_AGM_Image/spot_alias 2 copy known{ + gx + }{ + pop pop + }ifelse + }if + /sep_colorspace_dict null AGMCORE_gput + 100 dict begin + /dictstackcount countdictstack def + /showpage{}def + mark +}def +/graphic_cleanup +{ + cleartomark + dictstackcount 1 countdictstack 1 sub{end}for + end + AGMCORE_graphicsave restore +}def +/compose_error_msg +{ + grestoreall initgraphics + /Helvetica findfont 10 scalefont setfont + /AGMCORE_deltaY 100 def + /AGMCORE_deltaX 310 def + clippath pathbbox np pop pop 36 add exch 36 add exch moveto + 0 AGMCORE_deltaY rlineto AGMCORE_deltaX 0 rlineto + 0 AGMCORE_deltaY neg rlineto AGMCORE_deltaX neg 0 rlineto closepath + 0 AGMCORE_&setgray + gsave 1 AGMCORE_&setgray fill grestore + 1 setlinewidth gsave stroke grestore + currentpoint AGMCORE_deltaY 15 sub add exch 8 add exch moveto + /AGMCORE_deltaY 12 def + /AGMCORE_tmp 0 def + AGMCORE_err_strings exch get + { + dup 32 eq + { + pop + AGMCORE_str256 0 AGMCORE_tmp getinterval + stringwidth pop currentpoint pop add AGMCORE_deltaX 28 add gt + { + currentpoint AGMCORE_deltaY sub exch pop + clippath pathbbox pop pop pop 44 add exch moveto + }if + AGMCORE_str256 0 AGMCORE_tmp getinterval show( )show + 0 1 AGMCORE_str256 length 1 sub + { + AGMCORE_str256 exch 0 put + }for + /AGMCORE_tmp 0 def + }{ + AGMCORE_str256 exch AGMCORE_tmp xpt + /AGMCORE_tmp AGMCORE_tmp 1 add def + }ifelse + }forall +}bdf +/AGMCORE_CMYKDeviceNColorspaces[ + [/Separation/None/DeviceCMYK{0 0 0}] + [/Separation(Black)/DeviceCMYK{0 0 0 4 -1 roll}bind] + [/Separation(Yellow)/DeviceCMYK{0 0 3 -1 roll 0}bind] + [/DeviceN[(Yellow)(Black)]/DeviceCMYK{0 0 4 2 roll}bind] + [/Separation(Magenta)/DeviceCMYK{0 exch 0 0}bind] + [/DeviceN[(Magenta)(Black)]/DeviceCMYK{0 3 1 roll 0 exch}bind] + [/DeviceN[(Magenta)(Yellow)]/DeviceCMYK{0 3 1 roll 0}bind] + [/DeviceN[(Magenta)(Yellow)(Black)]/DeviceCMYK{0 4 1 roll}bind] + [/Separation(Cyan)/DeviceCMYK{0 0 0}] + [/DeviceN[(Cyan)(Black)]/DeviceCMYK{0 0 3 -1 roll}bind] + [/DeviceN[(Cyan)(Yellow)]/DeviceCMYK{0 exch 0}bind] + [/DeviceN[(Cyan)(Yellow)(Black)]/DeviceCMYK{0 3 1 roll}bind] + [/DeviceN[(Cyan)(Magenta)]/DeviceCMYK{0 0}] + [/DeviceN[(Cyan)(Magenta)(Black)]/DeviceCMYK{0 exch}bind] + [/DeviceN[(Cyan)(Magenta)(Yellow)]/DeviceCMYK{0}] + [/DeviceCMYK] +]def +/ds{ + Adobe_AGM_Core begin + /currentdistillerparams where + { + pop currentdistillerparams/CoreDistVersion get 5000 lt + {<>setdistillerparams}if + }if + /AGMCORE_ps_version xdf + /AGMCORE_ps_level xdf + errordict/AGM_handleerror known not{ + errordict/AGM_handleerror errordict/handleerror get put + errordict/handleerror{ + Adobe_AGM_Core begin + $error/newerror get AGMCORE_cur_err null ne and{ + $error/newerror false put + AGMCORE_cur_err compose_error_msg + }if + $error/newerror true put + end + errordict/AGM_handleerror get exec + }bind put + }if + /AGMCORE_environ_ok + ps_level AGMCORE_ps_level ge + ps_version AGMCORE_ps_version ge and + AGMCORE_ps_level -1 eq or + def + AGMCORE_environ_ok not + {/AGMCORE_cur_err/AGMCORE_bad_environ def}if + /AGMCORE_&setgray systemdict/setgray get def + level2{ + /AGMCORE_&setcolor systemdict/setcolor get def + /AGMCORE_&setcolorspace systemdict/setcolorspace get def + }if + /AGMCORE_currentbg currentblackgeneration def + /AGMCORE_currentucr currentundercolorremoval def + /AGMCORE_Default_flatness currentflat def + /AGMCORE_distilling + /product where{ + pop systemdict/setdistillerparams known product(Adobe PostScript Parser)ne and + }{ + false + }ifelse + def + /AGMCORE_GSTATE AGMCORE_key_known not{ + /AGMCORE_GSTATE 21 dict def + /AGMCORE_tmpmatrix matrix def + /AGMCORE_gstack 32 array def + /AGMCORE_gstackptr 0 def + /AGMCORE_gstacksaveptr 0 def + /AGMCORE_gstackframekeys 14 def + /AGMCORE_&gsave/gsave ldf + /AGMCORE_&grestore/grestore ldf + /AGMCORE_&grestoreall/grestoreall ldf + /AGMCORE_&save/save ldf + /AGMCORE_&setoverprint/setoverprint ldf + /AGMCORE_gdictcopy{ + begin + {def}forall + end + }def + /AGMCORE_gput{ + AGMCORE_gstack AGMCORE_gstackptr get + 3 1 roll + put + }def + /AGMCORE_gget{ + AGMCORE_gstack AGMCORE_gstackptr get + exch + get + }def + /gsave{ + AGMCORE_&gsave + AGMCORE_gstack AGMCORE_gstackptr get + AGMCORE_gstackptr 1 add + dup 32 ge{limitcheck}if + /AGMCORE_gstackptr exch store + AGMCORE_gstack AGMCORE_gstackptr get + AGMCORE_gdictcopy + }def + /grestore{ + AGMCORE_&grestore + AGMCORE_gstackptr 1 sub + dup AGMCORE_gstacksaveptr lt{1 add}if + dup AGMCORE_gstack exch get dup/AGMCORE_currentoverprint known + {/AGMCORE_currentoverprint get setoverprint}{pop}ifelse + /AGMCORE_gstackptr exch store + }def + /grestoreall{ + AGMCORE_&grestoreall + /AGMCORE_gstackptr AGMCORE_gstacksaveptr store + }def + /save{ + AGMCORE_&save + AGMCORE_gstack AGMCORE_gstackptr get + AGMCORE_gstackptr 1 add + dup 32 ge{limitcheck}if + /AGMCORE_gstackptr exch store + /AGMCORE_gstacksaveptr AGMCORE_gstackptr store + AGMCORE_gstack AGMCORE_gstackptr get + AGMCORE_gdictcopy + }def + /setoverprint{ + dup/AGMCORE_currentoverprint exch AGMCORE_gput AGMCORE_&setoverprint + }def + 0 1 AGMCORE_gstack length 1 sub{ + AGMCORE_gstack exch AGMCORE_gstackframekeys dict put + }for + }if + level3/AGMCORE_&sysshfill AGMCORE_key_known not and + { + /AGMCORE_&sysshfill systemdict/shfill get def + /AGMCORE_&sysmakepattern systemdict/makepattern get def + /AGMCORE_&usrmakepattern/makepattern load def + }if + /currentcmykcolor[0 0 0 0]AGMCORE_gput + /currentstrokeadjust false AGMCORE_gput + /currentcolorspace[/DeviceGray]AGMCORE_gput + /sep_tint 0 AGMCORE_gput + /devicen_tints[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]AGMCORE_gput + /sep_colorspace_dict null AGMCORE_gput + /devicen_colorspace_dict null AGMCORE_gput + /indexed_colorspace_dict null AGMCORE_gput + /currentcolor_intent()AGMCORE_gput + /customcolor_tint 1 AGMCORE_gput + /absolute_colorimetric_crd null AGMCORE_gput + /relative_colorimetric_crd null AGMCORE_gput + /saturation_crd null AGMCORE_gput + /perceptual_crd null AGMCORE_gput + currentcolortransfer cvlit/AGMCore_gray_xfer xdf cvlit/AGMCore_b_xfer xdf + cvlit/AGMCore_g_xfer xdf cvlit/AGMCore_r_xfer xdf + << + /MaxPatternItem currentsystemparams/MaxPatternCache get + >> + setuserparams + end +}def +/ps +{ + /setcmykcolor where{ + pop + Adobe_AGM_Core/AGMCORE_&setcmykcolor/setcmykcolor load put + }if + Adobe_AGM_Core begin + /setcmykcolor + { + 4 copy AGMCORE_cmykbuf astore/currentcmykcolor exch AGMCORE_gput + 1 sub 4 1 roll + 3{ + 3 index add neg dup 0 lt{ + pop 0 + }if + 3 1 roll + }repeat + setrgbcolor pop + }ndf + /currentcmykcolor + { + /currentcmykcolor AGMCORE_gget aload pop + }ndf + /setoverprint + {pop}ndf + /currentoverprint + {false}ndf + /AGMCORE_cyan_plate 1 0 0 0 test_cmyk_color_plate def + /AGMCORE_magenta_plate 0 1 0 0 test_cmyk_color_plate def + /AGMCORE_yellow_plate 0 0 1 0 test_cmyk_color_plate def + /AGMCORE_black_plate 0 0 0 1 test_cmyk_color_plate def + /AGMCORE_plate_ndx + AGMCORE_cyan_plate{ + 0 + }{ + AGMCORE_magenta_plate{ + 1 + }{ + AGMCORE_yellow_plate{ + 2 + }{ + AGMCORE_black_plate{ + 3 + }{ + 4 + }ifelse + }ifelse + }ifelse + }ifelse + def + /AGMCORE_have_reported_unsupported_color_space false def + /AGMCORE_report_unsupported_color_space + { + AGMCORE_have_reported_unsupported_color_space false eq + { + (Warning: Job contains content that cannot be separated with on-host methods. This content appears on the black plate, and knocks out all other plates.)== + Adobe_AGM_Core/AGMCORE_have_reported_unsupported_color_space true ddf + }if + }def + /AGMCORE_composite_job + AGMCORE_cyan_plate AGMCORE_magenta_plate and AGMCORE_yellow_plate and AGMCORE_black_plate and def + /AGMCORE_in_rip_sep + /AGMCORE_in_rip_sep where{ + pop AGMCORE_in_rip_sep + }{ + AGMCORE_distilling + { + false + }{ + userdict/Adobe_AGM_OnHost_Seps known{ + false + }{ + level2{ + currentpagedevice/Separations 2 copy known{ + get + }{ + pop pop false + }ifelse + }{ + false + }ifelse + }ifelse + }ifelse + }ifelse + def + /AGMCORE_producing_seps AGMCORE_composite_job not AGMCORE_in_rip_sep or def + /AGMCORE_host_sep AGMCORE_producing_seps AGMCORE_in_rip_sep not and def + /AGM_preserve_spots + /AGM_preserve_spots where{ + pop AGM_preserve_spots + }{ + AGMCORE_distilling AGMCORE_producing_seps or + }ifelse + def + /AGM_is_distiller_preserving_spotimages + { + currentdistillerparams/PreserveOverprintSettings known + { + currentdistillerparams/PreserveOverprintSettings get + { + currentdistillerparams/ColorConversionStrategy known + { + currentdistillerparams/ColorConversionStrategy get + /sRGB ne + }{ + true + }ifelse + }{ + false + }ifelse + }{ + false + }ifelse + }def + /convert_spot_to_process where{pop}{ + /convert_spot_to_process + { + //Adobe_AGM_Core begin + dup map_alias{ + /Name get exch pop + }if + dup dup(None)eq exch(All)eq or + { + pop false + }{ + AGMCORE_host_sep + { + gsave + 1 0 0 0 setcmykcolor currentgray 1 exch sub + 0 1 0 0 setcmykcolor currentgray 1 exch sub + 0 0 1 0 setcmykcolor currentgray 1 exch sub + 0 0 0 1 setcmykcolor currentgray 1 exch sub + add add add 0 eq + { + pop false + }{ + false setoverprint + current_spot_alias false set_spot_alias + 1 1 1 1 6 -1 roll findcmykcustomcolor 1 setcustomcolor + set_spot_alias + currentgray 1 ne + }ifelse + grestore + }{ + AGMCORE_distilling + { + pop AGM_is_distiller_preserving_spotimages not + }{ + //Adobe_AGM_Core/AGMCORE_name xddf + false + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 0 eq + AGMUTIL_cpd/OverrideSeparations known and + { + AGMUTIL_cpd/OverrideSeparations get + { + /HqnSpots/ProcSet resourcestatus + { + pop pop pop true + }if + }if + }if + { + AGMCORE_name/HqnSpots/ProcSet findresource/TestSpot gx not + }{ + gsave + [/Separation AGMCORE_name/DeviceGray{}]AGMCORE_&setcolorspace + false + AGMUTIL_cpd/SeparationColorNames 2 copy known + { + get + {AGMCORE_name eq or}forall + not + }{ + pop pop pop true + }ifelse + grestore + }ifelse + }ifelse + }ifelse + }ifelse + end + }def + }ifelse + /convert_to_process where{pop}{ + /convert_to_process + { + dup length 0 eq + { + pop false + }{ + AGMCORE_host_sep + { + dup true exch + { + dup(Cyan)eq exch + dup(Magenta)eq 3 -1 roll or exch + dup(Yellow)eq 3 -1 roll or exch + dup(Black)eq 3 -1 roll or + {pop} + {convert_spot_to_process and}ifelse + } + forall + { + true exch + { + dup(Cyan)eq exch + dup(Magenta)eq 3 -1 roll or exch + dup(Yellow)eq 3 -1 roll or exch + (Black)eq or and + }forall + not + }{pop false}ifelse + }{ + false exch + { + /PhotoshopDuotoneList where{pop false}{true}ifelse + { + dup(Cyan)eq exch + dup(Magenta)eq 3 -1 roll or exch + dup(Yellow)eq 3 -1 roll or exch + dup(Black)eq 3 -1 roll or + {pop} + {convert_spot_to_process or}ifelse + } + { + convert_spot_to_process or + } + ifelse + } + forall + }ifelse + }ifelse + }def + }ifelse + /AGMCORE_avoid_L2_sep_space + version cvr 2012 lt + level2 and + AGMCORE_producing_seps not and + def + /AGMCORE_is_cmyk_sep + AGMCORE_cyan_plate AGMCORE_magenta_plate or AGMCORE_yellow_plate or AGMCORE_black_plate or + def + /AGM_avoid_0_cmyk where{ + pop AGM_avoid_0_cmyk + }{ + AGM_preserve_spots + userdict/Adobe_AGM_OnHost_Seps known + userdict/Adobe_AGM_InRip_Seps known or + not and + }ifelse + { + /setcmykcolor[ + { + 4 copy add add add 0 eq currentoverprint and{ + pop 0.0005 + }if + }/exec cvx + /AGMCORE_&setcmykcolor load dup type/operatortype ne{ + /exec cvx + }if + ]cvx def + }if + /AGMCORE_IsSeparationAProcessColor + { + dup(Cyan)eq exch dup(Magenta)eq exch dup(Yellow)eq exch(Black)eq or or or + }def + AGMCORE_host_sep{ + /setcolortransfer + { + AGMCORE_cyan_plate{ + pop pop pop + }{ + AGMCORE_magenta_plate{ + 4 3 roll pop pop pop + }{ + AGMCORE_yellow_plate{ + 4 2 roll pop pop pop + }{ + 4 1 roll pop pop pop + }ifelse + }ifelse + }ifelse + settransfer + } + def + /AGMCORE_get_ink_data + AGMCORE_cyan_plate{ + {pop pop pop} + }{ + AGMCORE_magenta_plate{ + {4 3 roll pop pop pop} + }{ + AGMCORE_yellow_plate{ + {4 2 roll pop pop pop} + }{ + {4 1 roll pop pop pop} + }ifelse + }ifelse + }ifelse + def + /AGMCORE_RemoveProcessColorNames + { + 1 dict begin + /filtername + { + dup/Cyan eq 1 index(Cyan)eq or + {pop(_cyan_)}if + dup/Magenta eq 1 index(Magenta)eq or + {pop(_magenta_)}if + dup/Yellow eq 1 index(Yellow)eq or + {pop(_yellow_)}if + dup/Black eq 1 index(Black)eq or + {pop(_black_)}if + }def + dup type/arraytype eq + {[exch{filtername}forall]} + {filtername}ifelse + end + }def + level3{ + /AGMCORE_IsCurrentColor + { + dup AGMCORE_IsSeparationAProcessColor + { + AGMCORE_plate_ndx 0 eq + {dup(Cyan)eq exch/Cyan eq or}if + AGMCORE_plate_ndx 1 eq + {dup(Magenta)eq exch/Magenta eq or}if + AGMCORE_plate_ndx 2 eq + {dup(Yellow)eq exch/Yellow eq or}if + AGMCORE_plate_ndx 3 eq + {dup(Black)eq exch/Black eq or}if + AGMCORE_plate_ndx 4 eq + {pop false}if + }{ + gsave + false setoverprint + current_spot_alias false set_spot_alias + 1 1 1 1 6 -1 roll findcmykcustomcolor 1 setcustomcolor + set_spot_alias + currentgray 1 ne + grestore + }ifelse + }def + /AGMCORE_filter_functiondatasource + { + 5 dict begin + /data_in xdf + data_in type/stringtype eq + { + /ncomp xdf + /comp xdf + /string_out data_in length ncomp idiv string def + 0 ncomp data_in length 1 sub + { + string_out exch dup ncomp idiv exch data_in exch ncomp getinterval comp get 255 exch sub put + }for + string_out + }{ + string/string_in xdf + /string_out 1 string def + /component xdf + [ + data_in string_in/readstring cvx + [component/get cvx 255/exch cvx/sub cvx string_out/exch cvx 0/exch cvx/put cvx string_out]cvx + [/pop cvx()]cvx/ifelse cvx + ]cvx/ReusableStreamDecode filter + }ifelse + end + }def + /AGMCORE_separateShadingFunction + { + 2 dict begin + /paint? xdf + /channel xdf + dup type/dicttype eq + { + begin + FunctionType 0 eq + { + /DataSource channel Range length 2 idiv DataSource AGMCORE_filter_functiondatasource def + currentdict/Decode known + {/Decode Decode channel 2 mul 2 getinterval def}if + paint? not + {/Decode[1 1]def}if + }if + FunctionType 2 eq + { + paint? + { + /C0[C0 channel get 1 exch sub]def + /C1[C1 channel get 1 exch sub]def + }{ + /C0[1]def + /C1[1]def + }ifelse + }if + FunctionType 3 eq + { + /Functions[Functions{channel paint? AGMCORE_separateShadingFunction}forall]def + }if + currentdict/Range known + {/Range[0 1]def}if + currentdict + end}{ + channel get 0 paint? AGMCORE_separateShadingFunction + }ifelse + end + }def + /AGMCORE_separateShading + { + 3 -1 roll begin + currentdict/Function known + { + currentdict/Background known + {[1 index{Background 3 index get 1 exch sub}{1}ifelse]/Background xdf}if + Function 3 1 roll AGMCORE_separateShadingFunction/Function xdf + /ColorSpace[/DeviceGray]def + }{ + ColorSpace dup type/arraytype eq{0 get}if/DeviceCMYK eq + { + /ColorSpace[/DeviceN[/_cyan_/_magenta_/_yellow_/_black_]/DeviceCMYK{}]def + }{ + ColorSpace dup 1 get AGMCORE_RemoveProcessColorNames 1 exch put + }ifelse + ColorSpace 0 get/Separation eq + { + { + [1/exch cvx/sub cvx]cvx + }{ + [/pop cvx 1]cvx + }ifelse + ColorSpace 3 3 -1 roll put + pop + }{ + { + [exch ColorSpace 1 get length 1 sub exch sub/index cvx 1/exch cvx/sub cvx ColorSpace 1 get length 1 add 1/roll cvx ColorSpace 1 get length{/pop cvx}repeat]cvx + }{ + pop[ColorSpace 1 get length{/pop cvx}repeat cvx 1]cvx + }ifelse + ColorSpace 3 3 -1 roll bind put + }ifelse + ColorSpace 2/DeviceGray put + }ifelse + end + }def + /AGMCORE_separateShadingDict + { + dup/ColorSpace get + dup type/arraytype ne + {[exch]}if + dup 0 get/DeviceCMYK eq + { + exch begin + currentdict + AGMCORE_cyan_plate + {0 true}if + AGMCORE_magenta_plate + {1 true}if + AGMCORE_yellow_plate + {2 true}if + AGMCORE_black_plate + {3 true}if + AGMCORE_plate_ndx 4 eq + {0 false}if + dup not currentoverprint and + {/AGMCORE_ignoreshade true def}if + AGMCORE_separateShading + currentdict + end exch + }if + dup 0 get/Separation eq + { + exch begin + ColorSpace 1 get dup/None ne exch/All ne and + { + ColorSpace 1 get AGMCORE_IsCurrentColor AGMCORE_plate_ndx 4 lt and ColorSpace 1 get AGMCORE_IsSeparationAProcessColor not and + { + ColorSpace 2 get dup type/arraytype eq{0 get}if/DeviceCMYK eq + { + /ColorSpace + [ + /Separation + ColorSpace 1 get + /DeviceGray + [ + ColorSpace 3 get/exec cvx + 4 AGMCORE_plate_ndx sub -1/roll cvx + 4 1/roll cvx + 3[/pop cvx]cvx/repeat cvx + 1/exch cvx/sub cvx + ]cvx + ]def + }{ + AGMCORE_report_unsupported_color_space + AGMCORE_black_plate not + { + currentdict 0 false AGMCORE_separateShading + }if + }ifelse + }{ + currentdict ColorSpace 1 get AGMCORE_IsCurrentColor + 0 exch + dup not currentoverprint and + {/AGMCORE_ignoreshade true def}if + AGMCORE_separateShading + }ifelse + }if + currentdict + end exch + }if + dup 0 get/DeviceN eq + { + exch begin + ColorSpace 1 get convert_to_process + { + ColorSpace 2 get dup type/arraytype eq{0 get}if/DeviceCMYK eq + { + /ColorSpace + [ + /DeviceN + ColorSpace 1 get + /DeviceGray + [ + ColorSpace 3 get/exec cvx + 4 AGMCORE_plate_ndx sub -1/roll cvx + 4 1/roll cvx + 3[/pop cvx]cvx/repeat cvx + 1/exch cvx/sub cvx + ]cvx + ]def + }{ + AGMCORE_report_unsupported_color_space + AGMCORE_black_plate not + { + currentdict 0 false AGMCORE_separateShading + /ColorSpace[/DeviceGray]def + }if + }ifelse + }{ + currentdict + false -1 ColorSpace 1 get + { + AGMCORE_IsCurrentColor + { + 1 add + exch pop true exch exit + }if + 1 add + }forall + exch + dup not currentoverprint and + {/AGMCORE_ignoreshade true def}if + AGMCORE_separateShading + }ifelse + currentdict + end exch + }if + dup 0 get dup/DeviceCMYK eq exch dup/Separation eq exch/DeviceN eq or or not + { + exch begin + ColorSpace dup type/arraytype eq + {0 get}if + /DeviceGray ne + { + AGMCORE_report_unsupported_color_space + AGMCORE_black_plate not + { + ColorSpace 0 get/CIEBasedA eq + { + /ColorSpace[/Separation/_ciebaseda_/DeviceGray{}]def + }if + ColorSpace 0 get dup/CIEBasedABC eq exch dup/CIEBasedDEF eq exch/DeviceRGB eq or or + { + /ColorSpace[/DeviceN[/_red_/_green_/_blue_]/DeviceRGB{}]def + }if + ColorSpace 0 get/CIEBasedDEFG eq + { + /ColorSpace[/DeviceN[/_cyan_/_magenta_/_yellow_/_black_]/DeviceCMYK{}]def + }if + currentdict 0 false AGMCORE_separateShading + }if + }if + currentdict + end exch + }if + pop + dup/AGMCORE_ignoreshade known + { + begin + /ColorSpace[/Separation(None)/DeviceGray{}]def + currentdict end + }if + }def + /shfill + { + AGMCORE_separateShadingDict + dup/AGMCORE_ignoreshade known + {pop} + {AGMCORE_&sysshfill}ifelse + }def + /makepattern + { + exch + dup/PatternType get 2 eq + { + clonedict + begin + /Shading Shading AGMCORE_separateShadingDict def + Shading/AGMCORE_ignoreshade known + currentdict end exch + {pop<>}if + exch AGMCORE_&sysmakepattern + }{ + exch AGMCORE_&usrmakepattern + }ifelse + }def + }if + }if + AGMCORE_in_rip_sep{ + /setcustomcolor + { + exch aload pop + dup 7 1 roll inRip_spot_has_ink not { + 4{4 index mul 4 1 roll} + repeat + /DeviceCMYK setcolorspace + 6 -2 roll pop pop + }{ + //Adobe_AGM_Core begin + /AGMCORE_k xdf/AGMCORE_y xdf/AGMCORE_m xdf/AGMCORE_c xdf + end + [/Separation 4 -1 roll/DeviceCMYK + {dup AGMCORE_c mul exch dup AGMCORE_m mul exch dup AGMCORE_y mul exch AGMCORE_k mul} + ] + setcolorspace + }ifelse + setcolor + }ndf + /setseparationgray + { + [/Separation(All)/DeviceGray{}]setcolorspace_opt + 1 exch sub setcolor + }ndf + }{ + /setseparationgray + { + AGMCORE_&setgray + }ndf + }ifelse + /findcmykcustomcolor + { + 5 makereadonlyarray + }ndf + /setcustomcolor + { + exch aload pop pop + 4{4 index mul 4 1 roll}repeat + setcmykcolor pop + }ndf + /has_color + /colorimage where{ + AGMCORE_producing_seps{ + pop true + }{ + systemdict eq + }ifelse + }{ + false + }ifelse + def + /map_index + { + 1 index mul exch getinterval{255 div}forall + }bdf + /map_indexed_devn + { + Lookup Names length 3 -1 roll cvi map_index + }bdf + /n_color_components + { + base_colorspace_type + dup/DeviceGray eq{ + pop 1 + }{ + /DeviceCMYK eq{ + 4 + }{ + 3 + }ifelse + }ifelse + }bdf + level2{ + /mo/moveto ldf + /li/lineto ldf + /cv/curveto ldf + /knockout_unitsq + { + 1 setgray + 0 0 1 1 rectfill + }def + level2/setcolorspace AGMCORE_key_known not and{ + /AGMCORE_&&&setcolorspace/setcolorspace ldf + /AGMCORE_ReplaceMappedColor + { + dup type dup/arraytype eq exch/packedarraytype eq or + { + /AGMCORE_SpotAliasAry2 where{ + begin + dup 0 get dup/Separation eq + { + pop + dup length array copy + dup dup 1 get + current_spot_alias + { + dup map_alias + { + false set_spot_alias + dup 1 exch setsepcolorspace + true set_spot_alias + begin + /sep_colorspace_dict currentdict AGMCORE_gput + pop pop pop + [ + /Separation Name + CSA map_csa + MappedCSA + /sep_colorspace_proc load + ] + dup Name + end + }if + }if + map_reserved_ink_name 1 xpt + }{ + /DeviceN eq + { + dup length array copy + dup dup 1 get[ + exch{ + current_spot_alias{ + dup map_alias{ + /Name get exch pop + }if + }if + map_reserved_ink_name + }forall + ]1 xpt + }if + }ifelse + end + }if + }if + }def + /setcolorspace + { + dup type dup/arraytype eq exch/packedarraytype eq or + { + dup 0 get/Indexed eq + { + AGMCORE_distilling + { + /PhotoshopDuotoneList where + { + pop false + }{ + true + }ifelse + }{ + true + }ifelse + { + aload pop 3 -1 roll + AGMCORE_ReplaceMappedColor + 3 1 roll 4 array astore + }if + }{ + AGMCORE_ReplaceMappedColor + }ifelse + }if + DeviceN_PS2_inRip_seps{AGMCORE_&&&setcolorspace}if + }def + }if + }{ + /adj + { + currentstrokeadjust{ + transform + 0.25 sub round 0.25 add exch + 0.25 sub round 0.25 add exch + itransform + }if + }def + /mo{ + adj moveto + }def + /li{ + adj lineto + }def + /cv{ + 6 2 roll adj + 6 2 roll adj + 6 2 roll adj curveto + }def + /knockout_unitsq + { + 1 setgray + 8 8 1[8 0 0 8 0 0]{}image + }def + /currentstrokeadjust{ + /currentstrokeadjust AGMCORE_gget + }def + /setstrokeadjust{ + /currentstrokeadjust exch AGMCORE_gput + }def + /setcolorspace + { + /currentcolorspace exch AGMCORE_gput + }def + /currentcolorspace + { + /currentcolorspace AGMCORE_gget + }def + /setcolor_devicecolor + { + base_colorspace_type + dup/DeviceGray eq{ + pop setgray + }{ + /DeviceCMYK eq{ + setcmykcolor + }{ + setrgbcolor + }ifelse + }ifelse + }def + /setcolor + { + currentcolorspace 0 get + dup/DeviceGray ne{ + dup/DeviceCMYK ne{ + dup/DeviceRGB ne{ + dup/Separation eq{ + pop + currentcolorspace 3 gx + currentcolorspace 2 get + }{ + dup/Indexed eq{ + pop + currentcolorspace 3 get dup type/stringtype eq{ + currentcolorspace 1 get n_color_components + 3 -1 roll map_index + }{ + exec + }ifelse + currentcolorspace 1 get + }{ + /AGMCORE_cur_err/AGMCORE_invalid_color_space def + AGMCORE_invalid_color_space + }ifelse + }ifelse + }if + }if + }if + setcolor_devicecolor + }def + }ifelse + /sop/setoverprint ldf + /lw/setlinewidth ldf + /lc/setlinecap ldf + /lj/setlinejoin ldf + /ml/setmiterlimit ldf + /dsh/setdash ldf + /sadj/setstrokeadjust ldf + /gry/setgray ldf + /rgb/setrgbcolor ldf + /cmyk[ + /currentcolorspace[/DeviceCMYK]/AGMCORE_gput cvx + /setcmykcolor load dup type/operatortype ne{/exec cvx}if + ]cvx bdf + level3 AGMCORE_host_sep not and{ + /nzopmsc{ + 6 dict begin + /kk exch def + /yy exch def + /mm exch def + /cc exch def + /sum 0 def + cc 0 ne{/sum sum 2#1000 or def cc}if + mm 0 ne{/sum sum 2#0100 or def mm}if + yy 0 ne{/sum sum 2#0010 or def yy}if + kk 0 ne{/sum sum 2#0001 or def kk}if + AGMCORE_CMYKDeviceNColorspaces sum get setcolorspace + sum 0 eq{0}if + end + setcolor + }bdf + }{ + /nzopmsc/cmyk ldf + }ifelse + /sep/setsepcolor ldf + /devn/setdevicencolor ldf + /idx/setindexedcolor ldf + /colr/setcolor ldf + /csacrd/set_csa_crd ldf + /sepcs/setsepcolorspace ldf + /devncs/setdevicencolorspace ldf + /idxcs/setindexedcolorspace ldf + /cp/closepath ldf + /clp/clp_npth ldf + /eclp/eoclp_npth ldf + /f/fill ldf + /ef/eofill ldf + /@/stroke ldf + /nclp/npth_clp ldf + /gset/graphic_setup ldf + /gcln/graphic_cleanup ldf + /ct/concat ldf + /cf/currentfile ldf + /fl/filter ldf + /rs/readstring ldf + /AGMCORE_def_ht currenthalftone def + /clonedict Adobe_AGM_Utils begin/clonedict load end def + /clonearray Adobe_AGM_Utils begin/clonearray load end def + currentdict{ + dup xcheck 1 index type dup/arraytype eq exch/packedarraytype eq or and{ + bind + }if + def + }forall + /getrampcolor + { + /indx exch def + 0 1 NumComp 1 sub + { + dup + Samples exch get + dup type/stringtype eq{indx get}if + exch + Scaling exch get aload pop + 3 1 roll + mul add + }for + ColorSpaceFamily/Separation eq + {sep} + { + ColorSpaceFamily/DeviceN eq + {devn}{setcolor}ifelse + }ifelse + }bdf + /sssetbackground{ + aload pop + ColorSpaceFamily/Separation eq + {sep} + { + ColorSpaceFamily/DeviceN eq + {devn}{setcolor}ifelse + }ifelse + }bdf + /RadialShade + { + 40 dict begin + /ColorSpaceFamily xdf + /background xdf + /ext1 xdf + /ext0 xdf + /BBox xdf + /r2 xdf + /c2y xdf + /c2x xdf + /r1 xdf + /c1y xdf + /c1x xdf + /rampdict xdf + /setinkoverprint where{pop/setinkoverprint{pop}def}if + gsave + BBox length 0 gt + { + np + BBox 0 get BBox 1 get moveto + BBox 2 get BBox 0 get sub 0 rlineto + 0 BBox 3 get BBox 1 get sub rlineto + BBox 2 get BBox 0 get sub neg 0 rlineto + closepath + clip + np + }if + c1x c2x eq + { + c1y c2y lt{/theta 90 def}{/theta 270 def}ifelse + }{ + /slope c2y c1y sub c2x c1x sub div def + /theta slope 1 atan def + c2x c1x lt c2y c1y ge and{/theta theta 180 sub def}if + c2x c1x lt c2y c1y lt and{/theta theta 180 add def}if + }ifelse + gsave + clippath + c1x c1y translate + theta rotate + -90 rotate + {pathbbox}stopped + {0 0 0 0}if + /yMax xdf + /xMax xdf + /yMin xdf + /xMin xdf + grestore + xMax xMin eq yMax yMin eq or + { + grestore + end + }{ + /max{2 copy gt{pop}{exch pop}ifelse}bdf + /min{2 copy lt{pop}{exch pop}ifelse}bdf + rampdict begin + 40 dict begin + background length 0 gt{background sssetbackground gsave clippath fill grestore}if + gsave + c1x c1y translate + theta rotate + -90 rotate + /c2y c1x c2x sub dup mul c1y c2y sub dup mul add sqrt def + /c1y 0 def + /c1x 0 def + /c2x 0 def + ext0 + { + 0 getrampcolor + c2y r2 add r1 sub 0.0001 lt + { + c1x c1y r1 360 0 arcn + pathbbox + /aymax exch def + /axmax exch def + /aymin exch def + /axmin exch def + /bxMin xMin axmin min def + /byMin yMin aymin min def + /bxMax xMax axmax max def + /byMax yMax aymax max def + bxMin byMin moveto + bxMax byMin lineto + bxMax byMax lineto + bxMin byMax lineto + bxMin byMin lineto + eofill + }{ + c2y r1 add r2 le + { + c1x c1y r1 0 360 arc + fill + } + { + c2x c2y r2 0 360 arc fill + r1 r2 eq + { + /p1x r1 neg def + /p1y c1y def + /p2x r1 def + /p2y c1y def + p1x p1y moveto p2x p2y lineto p2x yMin lineto p1x yMin lineto + fill + }{ + /AA r2 r1 sub c2y div def + AA -1 eq + {/theta 89.99 def} + {/theta AA 1 AA dup mul sub sqrt div 1 atan def} + ifelse + /SS1 90 theta add dup sin exch cos div def + /p1x r1 SS1 SS1 mul SS1 SS1 mul 1 add div sqrt mul neg def + /p1y p1x SS1 div neg def + /SS2 90 theta sub dup sin exch cos div def + /p2x r1 SS2 SS2 mul SS2 SS2 mul 1 add div sqrt mul def + /p2y p2x SS2 div neg def + r1 r2 gt + { + /L1maxX p1x yMin p1y sub SS1 div add def + /L2maxX p2x yMin p2y sub SS2 div add def + }{ + /L1maxX 0 def + /L2maxX 0 def + }ifelse + p1x p1y moveto p2x p2y lineto L2maxX L2maxX p2x sub SS2 mul p2y add lineto + L1maxX L1maxX p1x sub SS1 mul p1y add lineto + fill + }ifelse + }ifelse + }ifelse + }if + c1x c2x sub dup mul + c1y c2y sub dup mul + add 0.5 exp + 0 dtransform + dup mul exch dup mul add 0.5 exp 72 div + 0 72 matrix defaultmatrix dtransform dup mul exch dup mul add sqrt + 72 0 matrix defaultmatrix dtransform dup mul exch dup mul add sqrt + 1 index 1 index lt{exch}if pop + /hires xdf + hires mul + /numpix xdf + /numsteps NumSamples def + /rampIndxInc 1 def + /subsampling false def + numpix 0 ne + { + NumSamples numpix div 0.5 gt + { + /numsteps numpix 2 div round cvi dup 1 le{pop 2}if def + /rampIndxInc NumSamples 1 sub numsteps div def + /subsampling true def + }if + }if + /xInc c2x c1x sub numsteps div def + /yInc c2y c1y sub numsteps div def + /rInc r2 r1 sub numsteps div def + /cx c1x def + /cy c1y def + /radius r1 def + np + xInc 0 eq yInc 0 eq rInc 0 eq and and + { + 0 getrampcolor + cx cy radius 0 360 arc + stroke + NumSamples 1 sub getrampcolor + cx cy radius 72 hires div add 0 360 arc + 0 setlinewidth + stroke + }{ + 0 + numsteps + { + dup + subsampling{round cvi}if + getrampcolor + cx cy radius 0 360 arc + /cx cx xInc add def + /cy cy yInc add def + /radius radius rInc add def + cx cy radius 360 0 arcn + eofill + rampIndxInc add + }repeat + pop + }ifelse + ext1 + { + c2y r2 add r1 lt + { + c2x c2y r2 0 360 arc + fill + }{ + c2y r1 add r2 sub 0.0001 le + { + c2x c2y r2 360 0 arcn + pathbbox + /aymax exch def + /axmax exch def + /aymin exch def + /axmin exch def + /bxMin xMin axmin min def + /byMin yMin aymin min def + /bxMax xMax axmax max def + /byMax yMax aymax max def + bxMin byMin moveto + bxMax byMin lineto + bxMax byMax lineto + bxMin byMax lineto + bxMin byMin lineto + eofill + }{ + c2x c2y r2 0 360 arc fill + r1 r2 eq + { + /p1x r2 neg def + /p1y c2y def + /p2x r2 def + /p2y c2y def + p1x p1y moveto p2x p2y lineto p2x yMax lineto p1x yMax lineto + fill + }{ + /AA r2 r1 sub c2y div def + AA -1 eq + {/theta 89.99 def} + {/theta AA 1 AA dup mul sub sqrt div 1 atan def} + ifelse + /SS1 90 theta add dup sin exch cos div def + /p1x r2 SS1 SS1 mul SS1 SS1 mul 1 add div sqrt mul neg def + /p1y c2y p1x SS1 div sub def + /SS2 90 theta sub dup sin exch cos div def + /p2x r2 SS2 SS2 mul SS2 SS2 mul 1 add div sqrt mul def + /p2y c2y p2x SS2 div sub def + r1 r2 lt + { + /L1maxX p1x yMax p1y sub SS1 div add def + /L2maxX p2x yMax p2y sub SS2 div add def + }{ + /L1maxX 0 def + /L2maxX 0 def + }ifelse + p1x p1y moveto p2x p2y lineto L2maxX L2maxX p2x sub SS2 mul p2y add lineto + L1maxX L1maxX p1x sub SS1 mul p1y add lineto + fill + }ifelse + }ifelse + }ifelse + }if + grestore + grestore + end + end + end + }ifelse + }bdf + /GenStrips + { + 40 dict begin + /ColorSpaceFamily xdf + /background xdf + /ext1 xdf + /ext0 xdf + /BBox xdf + /y2 xdf + /x2 xdf + /y1 xdf + /x1 xdf + /rampdict xdf + /setinkoverprint where{pop/setinkoverprint{pop}def}if + gsave + BBox length 0 gt + { + np + BBox 0 get BBox 1 get moveto + BBox 2 get BBox 0 get sub 0 rlineto + 0 BBox 3 get BBox 1 get sub rlineto + BBox 2 get BBox 0 get sub neg 0 rlineto + closepath + clip + np + }if + x1 x2 eq + { + y1 y2 lt{/theta 90 def}{/theta 270 def}ifelse + }{ + /slope y2 y1 sub x2 x1 sub div def + /theta slope 1 atan def + x2 x1 lt y2 y1 ge and{/theta theta 180 sub def}if + x2 x1 lt y2 y1 lt and{/theta theta 180 add def}if + } + ifelse + gsave + clippath + x1 y1 translate + theta rotate + {pathbbox}stopped + {0 0 0 0}if + /yMax exch def + /xMax exch def + /yMin exch def + /xMin exch def + grestore + xMax xMin eq yMax yMin eq or + { + grestore + end + }{ + rampdict begin + 20 dict begin + background length 0 gt{background sssetbackground gsave clippath fill grestore}if + gsave + x1 y1 translate + theta rotate + /xStart 0 def + /xEnd x2 x1 sub dup mul y2 y1 sub dup mul add 0.5 exp def + /ySpan yMax yMin sub def + /numsteps NumSamples def + /rampIndxInc 1 def + /subsampling false def + xStart 0 transform + xEnd 0 transform + 3 -1 roll + sub dup mul + 3 1 roll + sub dup mul + add 0.5 exp 72 div + 0 72 matrix defaultmatrix dtransform dup mul exch dup mul add sqrt + 72 0 matrix defaultmatrix dtransform dup mul exch dup mul add sqrt + 1 index 1 index lt{exch}if pop + mul + /numpix xdf + numpix 0 ne + { + NumSamples numpix div 0.5 gt + { + /numsteps numpix 2 div round cvi dup 1 le{pop 2}if def + /rampIndxInc NumSamples 1 sub numsteps div def + /subsampling true def + }if + }if + ext0 + { + 0 getrampcolor + xMin xStart lt + { + xMin yMin xMin neg ySpan rectfill + }if + }if + /xInc xEnd xStart sub numsteps div def + /x xStart def + 0 + numsteps + { + dup + subsampling{round cvi}if + getrampcolor + x yMin xInc ySpan rectfill + /x x xInc add def + rampIndxInc add + }repeat + pop + ext1{ + xMax xEnd gt + { + xEnd yMin xMax xEnd sub ySpan rectfill + }if + }if + grestore + grestore + end + end + end + }ifelse + }bdf +}def +/pt +{ + end +}def +/dt{ +}def +/pgsv{ + //Adobe_AGM_Core/AGMCORE_save save put +}def +/pgrs{ + //Adobe_AGM_Core/AGMCORE_save get restore +}def +systemdict/findcolorrendering known{ + /findcolorrendering systemdict/findcolorrendering get def +}if +systemdict/setcolorrendering known{ + /setcolorrendering systemdict/setcolorrendering get def +}if +/test_cmyk_color_plate +{ + gsave + setcmykcolor currentgray 1 ne + grestore +}def +/inRip_spot_has_ink +{ + dup//Adobe_AGM_Core/AGMCORE_name xddf + convert_spot_to_process not +}def +/map255_to_range +{ + 1 index sub + 3 -1 roll 255 div mul add +}def +/set_csa_crd +{ + /sep_colorspace_dict null AGMCORE_gput + begin + CSA get_csa_by_name setcolorspace_opt + set_crd + end +} +def +/map_csa +{ + currentdict/MappedCSA known{MappedCSA null ne}{false}ifelse + {pop}{get_csa_by_name/MappedCSA xdf}ifelse +}def +/setsepcolor +{ + /sep_colorspace_dict AGMCORE_gget begin + dup/sep_tint exch AGMCORE_gput + TintProc + end +}def +/setdevicencolor +{ + /devicen_colorspace_dict AGMCORE_gget begin + Names length copy + Names length 1 sub -1 0 + { + /devicen_tints AGMCORE_gget 3 1 roll xpt + }for + TintProc + end +}def +/sep_colorspace_proc +{ + /AGMCORE_tmp exch store + /sep_colorspace_dict AGMCORE_gget begin + currentdict/Components known{ + Components aload pop + TintMethod/Lab eq{ + 2{AGMCORE_tmp mul NComponents 1 roll}repeat + LMax sub AGMCORE_tmp mul LMax add NComponents 1 roll + }{ + TintMethod/Subtractive eq{ + NComponents{ + AGMCORE_tmp mul NComponents 1 roll + }repeat + }{ + NComponents{ + 1 sub AGMCORE_tmp mul 1 add NComponents 1 roll + }repeat + }ifelse + }ifelse + }{ + ColorLookup AGMCORE_tmp ColorLookup length 1 sub mul round cvi get + aload pop + }ifelse + end +}def +/sep_colorspace_gray_proc +{ + /AGMCORE_tmp exch store + /sep_colorspace_dict AGMCORE_gget begin + GrayLookup AGMCORE_tmp GrayLookup length 1 sub mul round cvi get + end +}def +/sep_proc_name +{ + dup 0 get + dup/DeviceRGB eq exch/DeviceCMYK eq or level2 not and has_color not and{ + pop[/DeviceGray] + /sep_colorspace_gray_proc + }{ + /sep_colorspace_proc + }ifelse +}def +/setsepcolorspace +{ + current_spot_alias{ + dup begin + Name map_alias{ + exch pop + }if + end + }if + dup/sep_colorspace_dict exch AGMCORE_gput + begin + CSA map_csa + /AGMCORE_sep_special Name dup()eq exch(All)eq or store + AGMCORE_avoid_L2_sep_space{ + [/Indexed MappedCSA sep_proc_name 255 exch + {255 div}/exec cvx 3 -1 roll[4 1 roll load/exec cvx]cvx + ]setcolorspace_opt + /TintProc{ + 255 mul round cvi setcolor + }bdf + }{ + MappedCSA 0 get/DeviceCMYK eq + currentdict/Components known and + AGMCORE_sep_special not and{ + /TintProc[ + Components aload pop Name findcmykcustomcolor + /exch cvx/setcustomcolor cvx + ]cvx bdf + }{ + AGMCORE_host_sep Name(All)eq and{ + /TintProc{ + 1 exch sub setseparationgray + }bdf + }{ + AGMCORE_in_rip_sep MappedCSA 0 get/DeviceCMYK eq and + AGMCORE_host_sep or + Name()eq and{ + /TintProc[ + MappedCSA sep_proc_name exch 0 get/DeviceCMYK eq{ + cvx/setcmykcolor cvx + }{ + cvx/setgray cvx + }ifelse + ]cvx bdf + }{ + AGMCORE_producing_seps MappedCSA 0 get dup/DeviceCMYK eq exch/DeviceGray eq or and AGMCORE_sep_special not and{ + /TintProc[ + /dup cvx + MappedCSA sep_proc_name cvx exch + 0 get/DeviceGray eq{ + 1/exch cvx/sub cvx 0 0 0 4 -1/roll cvx + }if + /Name cvx/findcmykcustomcolor cvx/exch cvx + AGMCORE_host_sep{ + AGMCORE_is_cmyk_sep + /Name cvx + /AGMCORE_IsSeparationAProcessColor load/exec cvx + /not cvx/and cvx + }{ + Name inRip_spot_has_ink not + }ifelse + [ + /pop cvx 1 + ]cvx/if cvx + /setcustomcolor cvx + ]cvx bdf + }{ + /TintProc{setcolor}bdf + [/Separation Name MappedCSA sep_proc_name load]setcolorspace_opt + }ifelse + }ifelse + }ifelse + }ifelse + }ifelse + set_crd + setsepcolor + end +}def +/additive_blend +{ + 3 dict begin + /numarrays xdf + /numcolors xdf + 0 1 numcolors 1 sub + { + /c1 xdf + 1 + 0 1 numarrays 1 sub + { + 1 exch add/index cvx + c1/get cvx/mul cvx + }for + numarrays 1 add 1/roll cvx + }for + numarrays[/pop cvx]cvx/repeat cvx + end +}def +/subtractive_blend +{ + 3 dict begin + /numarrays xdf + /numcolors xdf + 0 1 numcolors 1 sub + { + /c1 xdf + 1 1 + 0 1 numarrays 1 sub + { + 1 3 3 -1 roll add/index cvx + c1/get cvx/sub cvx/mul cvx + }for + /sub cvx + numarrays 1 add 1/roll cvx + }for + numarrays[/pop cvx]cvx/repeat cvx + end +}def +/exec_tint_transform +{ + /TintProc[ + /TintTransform cvx/setcolor cvx + ]cvx bdf + MappedCSA setcolorspace_opt +}bdf +/devn_makecustomcolor +{ + 2 dict begin + /names_index xdf + /Names xdf + 1 1 1 1 Names names_index get findcmykcustomcolor + /devicen_tints AGMCORE_gget names_index get setcustomcolor + Names length{pop}repeat + end +}bdf +/setdevicencolorspace +{ + dup/AliasedColorants known{false}{true}ifelse + current_spot_alias and{ + 7 dict begin + /names_index 0 def + dup/names_len exch/Names get length def + /new_names names_len array def + /new_LookupTables names_len array def + /alias_cnt 0 def + dup/Names get + { + dup map_alias{ + exch pop + dup/ColorLookup known{ + dup begin + new_LookupTables names_index ColorLookup put + end + }{ + dup/Components known{ + dup begin + new_LookupTables names_index Components put + end + }{ + dup begin + new_LookupTables names_index[null null null null]put + end + }ifelse + }ifelse + new_names names_index 3 -1 roll/Name get put + /alias_cnt alias_cnt 1 add def + }{ + /name xdf + new_names names_index name put + dup/LookupTables known{ + dup begin + new_LookupTables names_index LookupTables names_index get put + end + }{ + dup begin + new_LookupTables names_index[null null null null]put + end + }ifelse + }ifelse + /names_index names_index 1 add def + }forall + alias_cnt 0 gt{ + /AliasedColorants true def + /lut_entry_len new_LookupTables 0 get dup length 256 ge{0 get length}{length}ifelse def + 0 1 names_len 1 sub{ + /names_index xdf + new_LookupTables names_index get dup length 256 ge{0 get length}{length}ifelse lut_entry_len ne{ + /AliasedColorants false def + exit + }{ + new_LookupTables names_index get 0 get null eq{ + dup/Names get names_index get/name xdf + name(Cyan)eq name(Magenta)eq name(Yellow)eq name(Black)eq + or or or not{ + /AliasedColorants false def + exit + }if + }if + }ifelse + }for + lut_entry_len 1 eq{ + /AliasedColorants false def + }if + AliasedColorants{ + dup begin + /Names new_names def + /LookupTables new_LookupTables def + /AliasedColorants true def + /NComponents lut_entry_len def + /TintMethod NComponents 4 eq{/Subtractive}{/Additive}ifelse def + /MappedCSA TintMethod/Additive eq{/DeviceRGB}{/DeviceCMYK}ifelse def + currentdict/TTTablesIdx known not{ + /TTTablesIdx -1 def + }if + end + }if + }if + end + }if + dup/devicen_colorspace_dict exch AGMCORE_gput + begin + currentdict/AliasedColorants known{ + AliasedColorants + }{ + false + }ifelse + dup not{ + CSA map_csa + }if + /TintTransform load type/nulltype eq or{ + /TintTransform[ + 0 1 Names length 1 sub + { + /TTTablesIdx TTTablesIdx 1 add def + dup LookupTables exch get dup 0 get null eq + { + 1 index + Names exch get + dup(Cyan)eq + { + pop exch + LookupTables length exch sub + /index cvx + 0 0 0 + } + { + dup(Magenta)eq + { + pop exch + LookupTables length exch sub + /index cvx + 0/exch cvx 0 0 + }{ + (Yellow)eq + { + exch + LookupTables length exch sub + /index cvx + 0 0 3 -1/roll cvx 0 + }{ + exch + LookupTables length exch sub + /index cvx + 0 0 0 4 -1/roll cvx + }ifelse + }ifelse + }ifelse + 5 -1/roll cvx/astore cvx + }{ + dup length 1 sub + LookupTables length 4 -1 roll sub 1 add + /index cvx/mul cvx/round cvx/cvi cvx/get cvx + }ifelse + Names length TTTablesIdx add 1 add 1/roll cvx + }for + Names length[/pop cvx]cvx/repeat cvx + NComponents Names length + TintMethod/Subtractive eq + { + subtractive_blend + }{ + additive_blend + }ifelse + ]cvx bdf + }if + AGMCORE_host_sep{ + Names convert_to_process{ + exec_tint_transform + } + { + currentdict/AliasedColorants known{ + AliasedColorants not + }{ + false + }ifelse + 5 dict begin + /AvoidAliasedColorants xdf + /painted? false def + /names_index 0 def + /names_len Names length def + AvoidAliasedColorants{ + /currentspotalias current_spot_alias def + false set_spot_alias + }if + Names{ + AGMCORE_is_cmyk_sep{ + dup(Cyan)eq AGMCORE_cyan_plate and exch + dup(Magenta)eq AGMCORE_magenta_plate and exch + dup(Yellow)eq AGMCORE_yellow_plate and exch + (Black)eq AGMCORE_black_plate and or or or{ + /devicen_colorspace_dict AGMCORE_gget/TintProc[ + Names names_index/devn_makecustomcolor cvx + ]cvx ddf + /painted? true def + }if + painted?{exit}if + }{ + 0 0 0 0 5 -1 roll findcmykcustomcolor 1 setcustomcolor currentgray 0 eq{ + /devicen_colorspace_dict AGMCORE_gget/TintProc[ + Names names_index/devn_makecustomcolor cvx + ]cvx ddf + /painted? true def + exit + }if + }ifelse + /names_index names_index 1 add def + }forall + AvoidAliasedColorants{ + currentspotalias set_spot_alias + }if + painted?{ + /devicen_colorspace_dict AGMCORE_gget/names_index names_index put + }{ + /devicen_colorspace_dict AGMCORE_gget/TintProc[ + names_len[/pop cvx]cvx/repeat cvx 1/setseparationgray cvx + 0 0 0 0/setcmykcolor cvx + ]cvx ddf + }ifelse + end + }ifelse + } + { + AGMCORE_in_rip_sep{ + Names convert_to_process not + }{ + level3 + }ifelse + { + [/DeviceN Names MappedCSA/TintTransform load]setcolorspace_opt + /TintProc level3 not AGMCORE_in_rip_sep and{ + [ + Names/length cvx[/pop cvx]cvx/repeat cvx + ]cvx bdf + }{ + {setcolor}bdf + }ifelse + }{ + exec_tint_transform + }ifelse + }ifelse + set_crd + /AliasedColorants false def + end +}def +/setindexedcolorspace +{ + dup/indexed_colorspace_dict exch AGMCORE_gput + begin + currentdict/CSDBase known{ + CSDBase/CSD get_res begin + currentdict/Names known{ + currentdict devncs + }{ + 1 currentdict sepcs + }ifelse + AGMCORE_host_sep{ + 4 dict begin + /compCnt/Names where{pop Names length}{1}ifelse def + /NewLookup HiVal 1 add string def + 0 1 HiVal{ + /tableIndex xdf + Lookup dup type/stringtype eq{ + compCnt tableIndex map_index + }{ + exec + }ifelse + /Names where{ + pop setdevicencolor + }{ + setsepcolor + }ifelse + currentgray + tableIndex exch + 255 mul cvi + NewLookup 3 1 roll put + }for + [/Indexed currentcolorspace HiVal NewLookup]setcolorspace_opt + end + }{ + level3 + { + currentdict/Names known{ + [/Indexed[/DeviceN Names MappedCSA/TintTransform load]HiVal Lookup]setcolorspace_opt + }{ + [/Indexed[/Separation Name MappedCSA sep_proc_name load]HiVal Lookup]setcolorspace_opt + }ifelse + }{ + [/Indexed MappedCSA HiVal + [ + currentdict/Names known{ + Lookup dup type/stringtype eq + {/exch cvx CSDBase/CSD get_res/Names get length dup/mul cvx exch/getinterval cvx{255 div}/forall cvx} + {/exec cvx}ifelse + /TintTransform load/exec cvx + }{ + Lookup dup type/stringtype eq + {/exch cvx/get cvx 255/div cvx} + {/exec cvx}ifelse + CSDBase/CSD get_res/MappedCSA get sep_proc_name exch pop/load cvx/exec cvx + }ifelse + ]cvx + ]setcolorspace_opt + }ifelse + }ifelse + end + set_crd + } + { + CSA map_csa + AGMCORE_host_sep level2 not and{ + 0 0 0 0 setcmykcolor + }{ + [/Indexed MappedCSA + level2 not has_color not and{ + dup 0 get dup/DeviceRGB eq exch/DeviceCMYK eq or{ + pop[/DeviceGray] + }if + HiVal GrayLookup + }{ + HiVal + currentdict/RangeArray known{ + { + /indexed_colorspace_dict AGMCORE_gget begin + Lookup exch + dup HiVal gt{ + pop HiVal + }if + NComponents mul NComponents getinterval{}forall + NComponents 1 sub -1 0{ + RangeArray exch 2 mul 2 getinterval aload pop map255_to_range + NComponents 1 roll + }for + end + }bind + }{ + Lookup + }ifelse + }ifelse + ]setcolorspace_opt + set_crd + }ifelse + }ifelse + end +}def +/setindexedcolor +{ + AGMCORE_host_sep{ + /indexed_colorspace_dict AGMCORE_gget + begin + currentdict/CSDBase known{ + CSDBase/CSD get_res begin + currentdict/Names known{ + map_indexed_devn + devn + } + { + Lookup 1 3 -1 roll map_index + sep + }ifelse + end + }{ + Lookup MappedCSA/DeviceCMYK eq{4}{1}ifelse 3 -1 roll + map_index + MappedCSA/DeviceCMYK eq{setcmykcolor}{setgray}ifelse + }ifelse + end + }{ + level3 not AGMCORE_in_rip_sep and/indexed_colorspace_dict AGMCORE_gget/CSDBase known and{ + /indexed_colorspace_dict AGMCORE_gget/CSDBase get/CSD get_res begin + map_indexed_devn + devn + end + } + { + setcolor + }ifelse + }ifelse +}def +/ignoreimagedata +{ + currentoverprint not{ + gsave + dup clonedict begin + 1 setgray + /Decode[0 1]def + /DataSourcedef + /MultipleDataSources false def + /BitsPerComponent 8 def + currentdict end + systemdict/image gx + grestore + }if + consumeimagedata +}def +/add_res +{ + dup/CSD eq{ + pop + //Adobe_AGM_Core begin + /AGMCORE_CSD_cache load 3 1 roll put + end + }{ + defineresource pop + }ifelse +}def +/del_res +{ + { + aload pop exch + dup/CSD eq{ + pop + {//Adobe_AGM_Core/AGMCORE_CSD_cache get exch undef}forall + }{ + exch + {1 index undefineresource}forall + pop + }ifelse + }forall +}def +/get_res +{ + dup/CSD eq{ + pop + dup type dup/nametype eq exch/stringtype eq or{ + AGMCORE_CSD_cache exch get + }if + }{ + findresource + }ifelse +}def +/get_csa_by_name +{ + dup type dup/nametype eq exch/stringtype eq or{ + /CSA get_res + }if +}def +/paintproc_buf_init +{ + /count get 0 0 put +}def +/paintproc_buf_next +{ + dup/count get dup 0 get + dup 3 1 roll + 1 add 0 xpt + get +}def +/cachepaintproc_compress +{ + 5 dict begin + currentfile exch 0 exch/SubFileDecode filter/ReadFilter exch def + /ppdict 20 dict def + /string_size 16000 def + /readbuffer string_size string def + currentglobal true setglobal + ppdict 1 array dup 0 1 put/count xpt + setglobal + /LZWFilter + { + exch + dup length 0 eq{ + pop + }{ + ppdict dup length 1 sub 3 -1 roll put + }ifelse + {string_size}{0}ifelse string + }/LZWEncode filter def + { + ReadFilter readbuffer readstring + exch LZWFilter exch writestring + not{exit}if + }loop + LZWFilter closefile + ppdict + end +}def +/cachepaintproc +{ + 2 dict begin + currentfile exch 0 exch/SubFileDecode filter/ReadFilter exch def + /ppdict 20 dict def + currentglobal true setglobal + ppdict 1 array dup 0 1 put/count xpt + setglobal + { + ReadFilter 16000 string readstring exch + ppdict dup length 1 sub 3 -1 roll put + not{exit}if + }loop + ppdict dup dup length 1 sub()put + end +}def +/make_pattern +{ + exch clonedict exch + dup matrix currentmatrix matrix concatmatrix 0 0 3 2 roll itransform + exch 3 index/XStep get 1 index exch 2 copy div cvi mul sub sub + exch 3 index/YStep get 1 index exch 2 copy div cvi mul sub sub + matrix translate exch matrix concatmatrix + 1 index begin + BBox 0 get XStep div cvi XStep mul/xshift exch neg def + BBox 1 get YStep div cvi YStep mul/yshift exch neg def + BBox 0 get xshift add + BBox 1 get yshift add + BBox 2 get xshift add + BBox 3 get yshift add + 4 array astore + /BBox exch def + [xshift yshift/translate load null/exec load]dup + 3/PaintProc load put cvx/PaintProc exch def + end + gsave 0 setgray + makepattern + grestore +}def +/set_pattern +{ + dup/PatternType get 1 eq{ + dup/PaintType get 1 eq{ + currentoverprint sop[/DeviceGray]setcolorspace 0 setgray + }if + }if + setpattern +}def +/setcolorspace_opt +{ + dup currentcolorspace eq{pop}{setcolorspace}ifelse +}def +/updatecolorrendering +{ + currentcolorrendering/RenderingIntent known{ + currentcolorrendering/RenderingIntent get + } + { + Intent/AbsoluteColorimetric eq + { + /absolute_colorimetric_crd AGMCORE_gget dup null eq + } + { + Intent/RelativeColorimetric eq + { + /relative_colorimetric_crd AGMCORE_gget dup null eq + } + { + Intent/Saturation eq + { + /saturation_crd AGMCORE_gget dup null eq + } + { + /perceptual_crd AGMCORE_gget dup null eq + }ifelse + }ifelse + }ifelse + { + pop null + } + { + /RenderingIntent known{null}{Intent}ifelse + }ifelse + }ifelse + Intent ne{ + Intent/ColorRendering{findresource}stopped + { + pop pop systemdict/findcolorrendering known + { + Intent findcolorrendering + { + /ColorRendering findresource true exch + } + { + /ColorRendering findresource + product(Xerox Phaser 5400)ne + exch + }ifelse + dup Intent/AbsoluteColorimetric eq + { + /absolute_colorimetric_crd exch AGMCORE_gput + } + { + Intent/RelativeColorimetric eq + { + /relative_colorimetric_crd exch AGMCORE_gput + } + { + Intent/Saturation eq + { + /saturation_crd exch AGMCORE_gput + } + { + Intent/Perceptual eq + { + /perceptual_crd exch AGMCORE_gput + } + { + pop + }ifelse + }ifelse + }ifelse + }ifelse + 1 index{exch}{pop}ifelse + } + {false}ifelse + } + {true}ifelse + { + dup begin + currentdict/TransformPQR known{ + currentdict/TransformPQR get aload pop + 3{{}eq 3 1 roll}repeat or or + } + {true}ifelse + currentdict/MatrixPQR known{ + currentdict/MatrixPQR get aload pop + 1.0 eq 9 1 roll 0.0 eq 9 1 roll 0.0 eq 9 1 roll + 0.0 eq 9 1 roll 1.0 eq 9 1 roll 0.0 eq 9 1 roll + 0.0 eq 9 1 roll 0.0 eq 9 1 roll 1.0 eq + and and and and and and and and + } + {true}ifelse + end + or + { + clonedict begin + /TransformPQR[ + {4 -1 roll 3 get dup 3 1 roll sub 5 -1 roll 3 get 3 -1 roll sub div + 3 -1 roll 3 get 3 -1 roll 3 get dup 4 1 roll sub mul add}bind + {4 -1 roll 4 get dup 3 1 roll sub 5 -1 roll 4 get 3 -1 roll sub div + 3 -1 roll 4 get 3 -1 roll 4 get dup 4 1 roll sub mul add}bind + {4 -1 roll 5 get dup 3 1 roll sub 5 -1 roll 5 get 3 -1 roll sub div + 3 -1 roll 5 get 3 -1 roll 5 get dup 4 1 roll sub mul add}bind + ]def + /MatrixPQR[0.8951 -0.7502 0.0389 0.2664 1.7135 -0.0685 -0.1614 0.0367 1.0296]def + /RangePQR[-0.3227950745 2.3229645538 -1.5003771057 3.5003465881 -0.1369979095 2.136967392]def + currentdict end + }if + setcolorrendering_opt + }if + }if +}def +/set_crd +{ + AGMCORE_host_sep not level2 and{ + currentdict/ColorRendering known{ + ColorRendering/ColorRendering{findresource}stopped not{setcolorrendering_opt}if + }{ + currentdict/Intent known{ + updatecolorrendering + }if + }ifelse + currentcolorspace dup type/arraytype eq + {0 get}if + /DeviceRGB eq + { + currentdict/UCR known + {/UCR}{/AGMCORE_currentucr}ifelse + load setundercolorremoval + currentdict/BG known + {/BG}{/AGMCORE_currentbg}ifelse + load setblackgeneration + }if + }if +}def +/set_ucrbg +{ + dup null eq{pop/AGMCORE_currentbg load}{/Procedure get_res}ifelse setblackgeneration + dup null eq{pop/AGMCORE_currentucr load}{/Procedure get_res}ifelse setundercolorremoval +}def +/setcolorrendering_opt +{ + dup currentcolorrendering eq{ + pop + }{ + clonedict + begin + /Intent Intent def + currentdict + end + setcolorrendering + }ifelse +}def +/cpaint_gcomp +{ + convert_to_process//Adobe_AGM_Core/AGMCORE_ConvertToProcess xddf + //Adobe_AGM_Core/AGMCORE_ConvertToProcess get not + { + (%end_cpaint_gcomp)flushinput + }if +}def +/cpaint_gsep +{ + //Adobe_AGM_Core/AGMCORE_ConvertToProcess get + { + (%end_cpaint_gsep)flushinput + }if +}def +/cpaint_gend +{np}def +/T1_path +{ + currentfile token pop currentfile token pop mo + { + currentfile token pop dup type/stringtype eq + {pop exit}if + 0 exch rlineto + currentfile token pop dup type/stringtype eq + {pop exit}if + 0 rlineto + }loop +}def +/T1_gsave + level3 + {/clipsave} + {/gsave}ifelse + load def +/T1_grestore + level3 + {/cliprestore} + {/grestore}ifelse + load def +/set_spot_alias_ary +{ + dup inherit_aliases + //Adobe_AGM_Core/AGMCORE_SpotAliasAry xddf +}def +/set_spot_normalization_ary +{ + dup inherit_aliases + dup length + /AGMCORE_SpotAliasAry where{pop AGMCORE_SpotAliasAry length add}if + array + //Adobe_AGM_Core/AGMCORE_SpotAliasAry2 xddf + /AGMCORE_SpotAliasAry where{ + pop + AGMCORE_SpotAliasAry2 0 AGMCORE_SpotAliasAry putinterval + AGMCORE_SpotAliasAry length + }{0}ifelse + AGMCORE_SpotAliasAry2 3 1 roll exch putinterval + true set_spot_alias +}def +/inherit_aliases +{ + {dup/Name get map_alias{/CSD put}{pop}ifelse}forall +}def +/set_spot_alias +{ + /AGMCORE_SpotAliasAry2 where{ + /AGMCORE_current_spot_alias 3 -1 roll put + }{ + pop + }ifelse +}def +/current_spot_alias +{ + /AGMCORE_SpotAliasAry2 where{ + /AGMCORE_current_spot_alias get + }{ + false + }ifelse +}def +/map_alias +{ + /AGMCORE_SpotAliasAry2 where{ + begin + /AGMCORE_name xdf + false + AGMCORE_SpotAliasAry2{ + dup/Name get AGMCORE_name eq{ + /CSD get/CSD get_res + exch pop true + exit + }{ + pop + }ifelse + }forall + end + }{ + pop false + }ifelse +}bdf +/spot_alias +{ + true set_spot_alias + /AGMCORE_&setcustomcolor AGMCORE_key_known not{ + //Adobe_AGM_Core/AGMCORE_&setcustomcolor/setcustomcolor load put + }if + /customcolor_tint 1 AGMCORE_gput + //Adobe_AGM_Core begin + /setcustomcolor + { + //Adobe_AGM_Core begin + dup/customcolor_tint exch AGMCORE_gput + 1 index aload pop pop 1 eq exch 1 eq and exch 1 eq and exch 1 eq and not + current_spot_alias and{1 index 4 get map_alias}{false}ifelse + { + false set_spot_alias + /sep_colorspace_dict AGMCORE_gget null ne + 3 1 roll 2 index{ + exch pop/sep_tint AGMCORE_gget exch + }if + mark 3 1 roll + setsepcolorspace + counttomark 0 ne{ + setsepcolor + }if + pop + not{/sep_tint 1.0 AGMCORE_gput}if + pop + true set_spot_alias + }{ + AGMCORE_&setcustomcolor + }ifelse + end + }bdf + end +}def +/begin_feature +{ + Adobe_AGM_Core/AGMCORE_feature_dictCount countdictstack put + count Adobe_AGM_Core/AGMCORE_feature_opCount 3 -1 roll put + {Adobe_AGM_Core/AGMCORE_feature_ctm matrix currentmatrix put}if +}def +/end_feature +{ + 2 dict begin + /spd/setpagedevice load def + /setpagedevice{get_gstate spd set_gstate}def + stopped{$error/newerror false put}if + end + count Adobe_AGM_Core/AGMCORE_feature_opCount get sub dup 0 gt{{pop}repeat}{pop}ifelse + countdictstack Adobe_AGM_Core/AGMCORE_feature_dictCount get sub dup 0 gt{{end}repeat}{pop}ifelse + {Adobe_AGM_Core/AGMCORE_feature_ctm get setmatrix}if +}def +/set_negative +{ + //Adobe_AGM_Core begin + /AGMCORE_inverting exch def + level2{ + currentpagedevice/NegativePrint known AGMCORE_distilling not and{ + currentpagedevice/NegativePrint get//Adobe_AGM_Core/AGMCORE_inverting get ne{ + true begin_feature true{ + <>setpagedevice + }end_feature + }if + /AGMCORE_inverting false def + }if + }if + AGMCORE_inverting{ + [{1 exch sub}/exec load dup currenttransfer exch]cvx bind settransfer + AGMCORE_distilling{ + erasepage + }{ + gsave np clippath 1/setseparationgray where{pop setseparationgray}{setgray}ifelse + /AGMIRS_&fill where{pop AGMIRS_&fill}{fill}ifelse grestore + }ifelse + }if + end +}def +/lw_save_restore_override{ + /md where{ + pop + md begin + initializepage + /initializepage{}def + /pmSVsetup{}def + /endp{}def + /pse{}def + /psb{}def + /orig_showpage where + {pop} + {/orig_showpage/showpage load def} + ifelse + /showpage{orig_showpage gR}def + end + }if +}def +/pscript_showpage_override{ + /NTPSOct95 where + { + begin + showpage + save + /showpage/restore load def + /restore{exch pop}def + end + }if +}def +/driver_media_override +{ + /md where{ + pop + md/initializepage known{ + md/initializepage{}put + }if + md/rC known{ + md/rC{4{pop}repeat}put + }if + }if + /mysetup where{ + /mysetup[1 0 0 1 0 0]put + }if + Adobe_AGM_Core/AGMCORE_Default_CTM matrix currentmatrix put + level2 + {Adobe_AGM_Core/AGMCORE_Default_PageSize currentpagedevice/PageSize get put}if +}def +/driver_check_media_override +{ + /PrepsDict where + {pop} + { + Adobe_AGM_Core/AGMCORE_Default_CTM get matrix currentmatrix ne + Adobe_AGM_Core/AGMCORE_Default_PageSize get type/arraytype eq + { + Adobe_AGM_Core/AGMCORE_Default_PageSize get 0 get currentpagedevice/PageSize get 0 get eq and + Adobe_AGM_Core/AGMCORE_Default_PageSize get 1 get currentpagedevice/PageSize get 1 get eq and + }if + { + Adobe_AGM_Core/AGMCORE_Default_CTM get setmatrix + }if + }ifelse +}def +AGMCORE_err_strings begin + /AGMCORE_bad_environ(Environment not satisfactory for this job. Ensure that the PPD is correct or that the PostScript level requested is supported by this printer. )def + /AGMCORE_color_space_onhost_seps(This job contains colors that will not separate with on-host methods. )def + /AGMCORE_invalid_color_space(This job contains an invalid color space. )def +end +/set_def_ht +{AGMCORE_def_ht sethalftone}def +/set_def_flat +{AGMCORE_Default_flatness setflat}def +end +systemdict/setpacking known +{setpacking}if +%%EndResource +%%BeginResource: procset Adobe_CoolType_Core 2.31 0 %%Copyright: Copyright 1997-2006 Adobe Systems Incorporated. All Rights Reserved. %%Version: 2.31 0 10 dict begin /Adobe_CoolType_Passthru currentdict def /Adobe_CoolType_Core_Defined userdict/Adobe_CoolType_Core known def Adobe_CoolType_Core_Defined {/Adobe_CoolType_Core userdict/Adobe_CoolType_Core get def} if userdict/Adobe_CoolType_Core 70 dict dup begin put /Adobe_CoolType_Version 2.31 def /Level2? systemdict/languagelevel known dup {pop systemdict/languagelevel get 2 ge} if def Level2? not { /currentglobal false def /setglobal/pop load def /gcheck{pop false}bind def /currentpacking false def /setpacking/pop load def /SharedFontDirectory 0 dict def } if currentpacking true setpacking currentglobal false setglobal userdict/Adobe_CoolType_Data 2 copy known not {2 copy 10 dict put} if get begin /@opStackCountByLevel 32 dict def /@opStackLevel 0 def /@dictStackCountByLevel 32 dict def /@dictStackLevel 0 def end setglobal currentglobal true setglobal userdict/Adobe_CoolType_GVMFonts known not {userdict/Adobe_CoolType_GVMFonts 10 dict put} if setglobal currentglobal false setglobal userdict/Adobe_CoolType_LVMFonts known not {userdict/Adobe_CoolType_LVMFonts 10 dict put} if setglobal /ct_VMDictPut { dup gcheck{Adobe_CoolType_GVMFonts}{Adobe_CoolType_LVMFonts}ifelse 3 1 roll put }bind def /ct_VMDictUndef { dup Adobe_CoolType_GVMFonts exch known {Adobe_CoolType_GVMFonts exch undef} { dup Adobe_CoolType_LVMFonts exch known {Adobe_CoolType_LVMFonts exch undef} {pop} ifelse }ifelse }bind def /ct_str1 1 string def /ct_xshow { /_ct_na exch def /_ct_i 0 def currentpoint /_ct_y exch def /_ct_x exch def { pop pop ct_str1 exch 0 exch put ct_str1 show {_ct_na _ct_i get}stopped {pop pop} { _ct_x _ct_y moveto 0 rmoveto } ifelse /_ct_i _ct_i 1 add def currentpoint /_ct_y exch def /_ct_x exch def } exch @cshow }bind def /ct_yshow { /_ct_na exch def /_ct_i 0 def currentpoint /_ct_y exch def /_ct_x exch def { pop pop ct_str1 exch 0 exch put ct_str1 show {_ct_na _ct_i get}stopped {pop pop} { _ct_x _ct_y moveto 0 exch rmoveto } ifelse /_ct_i _ct_i 1 add def currentpoint /_ct_y exch def /_ct_x exch def } exch @cshow }bind def /ct_xyshow { /_ct_na exch def /_ct_i 0 def currentpoint /_ct_y exch def /_ct_x exch def { pop pop ct_str1 exch 0 exch put ct_str1 show {_ct_na _ct_i get}stopped {pop pop} { {_ct_na _ct_i 1 add get}stopped {pop pop pop} { _ct_x _ct_y moveto rmoveto } ifelse } ifelse /_ct_i _ct_i 2 add def currentpoint /_ct_y exch def /_ct_x exch def } exch @cshow }bind def /xsh{{@xshow}stopped{Adobe_CoolType_Data begin ct_xshow end}if}bind def /ysh{{@yshow}stopped{Adobe_CoolType_Data begin ct_yshow end}if}bind def /xysh{{@xyshow}stopped{Adobe_CoolType_Data begin ct_xyshow end}if}bind def currentglobal true setglobal /ct_T3Defs { /BuildChar { 1 index/Encoding get exch get 1 index/BuildGlyph get exec }bind def /BuildGlyph { exch begin GlyphProcs exch get exec end }bind def }bind def setglobal /@_SaveStackLevels { Adobe_CoolType_Data begin /@vmState currentglobal def false setglobal @opStackCountByLevel @opStackLevel 2 copy known not { 2 copy 3 dict dup/args 7 index 5 add array put put get } { get dup/args get dup length 3 index lt { dup length 5 add array exch 1 index exch 0 exch putinterval 1 index exch/args exch put } {pop} ifelse } ifelse begin count 1 sub 1 index lt {pop count} if dup/argCount exch def dup 0 gt { args exch 0 exch getinterval astore pop } {pop} ifelse count /restCount exch def end /@opStackLevel @opStackLevel 1 add def countdictstack 1 sub @dictStackCountByLevel exch @dictStackLevel exch put /@dictStackLevel @dictStackLevel 1 add def @vmState setglobal end }bind def /@_RestoreStackLevels { Adobe_CoolType_Data begin /@opStackLevel @opStackLevel 1 sub def @opStackCountByLevel @opStackLevel get begin count restCount sub dup 0 gt {{pop}repeat} {pop} ifelse args 0 argCount getinterval{}forall end /@dictStackLevel @dictStackLevel 1 sub def @dictStackCountByLevel @dictStackLevel get end countdictstack exch sub dup 0 gt {{end}repeat} {pop} ifelse }bind def /@_PopStackLevels { Adobe_CoolType_Data begin /@opStackLevel @opStackLevel 1 sub def /@dictStackLevel @dictStackLevel 1 sub def end }bind def /@Raise { exch cvx exch errordict exch get exec stop }bind def /@ReRaise { cvx $error/errorname get errordict exch get exec stop }bind def /@Stopped { 0 @#Stopped }bind def /@#Stopped { @_SaveStackLevels stopped {@_RestoreStackLevels true} {@_PopStackLevels false} ifelse }bind def /@Arg { Adobe_CoolType_Data begin @opStackCountByLevel @opStackLevel 1 sub get begin args exch argCount 1 sub exch sub get end end }bind def currentglobal true setglobal /CTHasResourceForAllBug Level2? { 1 dict dup /@shouldNotDisappearDictValue true def Adobe_CoolType_Data exch/@shouldNotDisappearDict exch put begin count @_SaveStackLevels {(*){pop stop}128 string/Category resourceforall} stopped pop @_RestoreStackLevels currentdict Adobe_CoolType_Data/@shouldNotDisappearDict get dup 3 1 roll ne dup 3 1 roll { /@shouldNotDisappearDictValue known { { end currentdict 1 index eq {pop exit} if } loop } if } { pop end } ifelse } {false} ifelse def true setglobal /CTHasResourceStatusBug Level2? { mark {/steveamerige/Category resourcestatus} stopped {cleartomark true} {cleartomark currentglobal not} ifelse } {false} ifelse def setglobal /CTResourceStatus { mark 3 1 roll /Category findresource begin ({ResourceStatus}stopped)0()/SubFileDecode filter cvx exec {cleartomark false} {{3 2 roll pop true}{cleartomark false}ifelse} ifelse end }bind def /CTWorkAroundBugs { Level2? { /cid_PreLoad/ProcSet resourcestatus { pop pop currentglobal mark { (*) { dup/CMap CTHasResourceStatusBug {CTResourceStatus} {resourcestatus} ifelse { pop dup 0 eq exch 1 eq or { dup/CMap findresource gcheck setglobal /CMap undefineresource } { pop CTHasResourceForAllBug {exit} {stop} ifelse } ifelse } {pop} ifelse } 128 string/CMap resourceforall } stopped {cleartomark} stopped pop setglobal } if } if }bind def /ds { Adobe_CoolType_Core begin CTWorkAroundBugs /mo/moveto load def /nf/newencodedfont load def /msf{makefont setfont}bind def /uf{dup undefinefont ct_VMDictUndef}bind def /ur/undefineresource load def /chp/charpath load def /awsh/awidthshow load def /wsh/widthshow load def /ash/ashow load def /@xshow/xshow load def /@yshow/yshow load def /@xyshow/xyshow load def /@cshow/cshow load def /sh/show load def /rp/repeat load def /.n/.notdef def end currentglobal false setglobal userdict/Adobe_CoolType_Data 2 copy known not {2 copy 10 dict put} if get begin /AddWidths? false def /CC 0 def /charcode 2 string def /@opStackCountByLevel 32 dict def /@opStackLevel 0 def /@dictStackCountByLevel 32 dict def /@dictStackLevel 0 def /InVMFontsByCMap 10 dict def /InVMDeepCopiedFonts 10 dict def end setglobal }bind def /dt { currentdict Adobe_CoolType_Core eq {end} if }bind def /ps { Adobe_CoolType_Core begin Adobe_CoolType_GVMFonts begin Adobe_CoolType_LVMFonts begin SharedFontDirectory begin }bind def /pt { end end end end }bind def /unload { systemdict/languagelevel known { systemdict/languagelevel get 2 ge { userdict/Adobe_CoolType_Core 2 copy known {undef} {pop pop} ifelse } if } if }bind def /ndf { 1 index where {pop pop pop} {dup xcheck{bind}if def} ifelse }def /findfont systemdict begin userdict begin /globaldict where{/globaldict get begin}if dup where pop exch get /globaldict where{pop end}if end end Adobe_CoolType_Core_Defined {/systemfindfont exch def} { /findfont 1 index def /systemfindfont exch def } ifelse /undefinefont {pop}ndf /copyfont { currentglobal 3 1 roll 1 index gcheck setglobal dup null eq{0}{dup length}ifelse 2 index length add 1 add dict begin exch { 1 index/FID eq {pop pop} {def} ifelse } forall dup null eq {pop} {{def}forall} ifelse currentdict end exch setglobal }bind def /copyarray { currentglobal exch dup gcheck setglobal dup length array copy exch setglobal }bind def /newencodedfont { currentglobal { SharedFontDirectory 3 index known {SharedFontDirectory 3 index get/FontReferenced known} {false} ifelse } { FontDirectory 3 index known {FontDirectory 3 index get/FontReferenced known} { SharedFontDirectory 3 index known {SharedFontDirectory 3 index get/FontReferenced known} {false} ifelse } ifelse } ifelse dup { 3 index findfont/FontReferenced get 2 index dup type/nametype eq {findfont} if ne {pop false} if } if dup { 1 index dup type/nametype eq {findfont} if dup/CharStrings known { /CharStrings get length 4 index findfont/CharStrings get length ne { pop false } if } {pop} ifelse } if { pop 1 index findfont /Encoding get exch 0 1 255 {2 copy get 3 index 3 1 roll put} for pop pop pop } { currentglobal 4 1 roll dup type/nametype eq {findfont} if dup gcheck setglobal dup dup maxlength 2 add dict begin exch { 1 index/FID ne 2 index/Encoding ne and {def} {pop pop} ifelse } forall /FontReferenced exch def /Encoding exch dup length array copy def /FontName 1 index dup type/stringtype eq{cvn}if def dup currentdict end definefont ct_VMDictPut setglobal } ifelse }bind def /SetSubstituteStrategy { $SubstituteFont begin dup type/dicttype ne {0 dict} if currentdict/$Strategies known { exch $Strategies exch 2 copy known { get 2 copy maxlength exch maxlength add dict begin {def}forall {def}forall currentdict dup/$Init known {dup/$Init get exec} if end /$Strategy exch def } {pop pop pop} ifelse } {pop pop} ifelse end }bind def /scff { $SubstituteFont begin dup type/stringtype eq {dup length exch} {null} ifelse /$sname exch def /$slen exch def /$inVMIndex $sname null eq { 1 index $str cvs dup length $slen sub $slen getinterval cvn } {$sname} ifelse def end {findfont} @Stopped { dup length 8 add string exch 1 index 0(BadFont:)putinterval 1 index exch 8 exch dup length string cvs putinterval cvn {findfont} @Stopped {pop/Courier findfont} if } if $SubstituteFont begin /$sname null def /$slen 0 def /$inVMIndex null def end }bind def /isWidthsOnlyFont { dup/WidthsOnly known {pop pop true} { dup/FDepVector known {/FDepVector get{isWidthsOnlyFont dup{exit}if}forall} { dup/FDArray known {/FDArray get{isWidthsOnlyFont dup{exit}if}forall} {pop} ifelse } ifelse } ifelse }bind def /ct_StyleDicts 4 dict dup begin /Adobe-Japan1 4 dict dup begin Level2? { /Serif /HeiseiMin-W3-83pv-RKSJ-H/Font resourcestatus {pop pop/HeiseiMin-W3} { /CIDFont/Category resourcestatus { pop pop /HeiseiMin-W3/CIDFont resourcestatus {pop pop/HeiseiMin-W3} {/Ryumin-Light} ifelse } {/Ryumin-Light} ifelse } ifelse def /SansSerif /HeiseiKakuGo-W5-83pv-RKSJ-H/Font resourcestatus {pop pop/HeiseiKakuGo-W5} { /CIDFont/Category resourcestatus { pop pop /HeiseiKakuGo-W5/CIDFont resourcestatus {pop pop/HeiseiKakuGo-W5} {/GothicBBB-Medium} ifelse } {/GothicBBB-Medium} ifelse } ifelse def /HeiseiMaruGo-W4-83pv-RKSJ-H/Font resourcestatus {pop pop/HeiseiMaruGo-W4} { /CIDFont/Category resourcestatus { pop pop /HeiseiMaruGo-W4/CIDFont resourcestatus {pop pop/HeiseiMaruGo-W4} { /Jun101-Light-RKSJ-H/Font resourcestatus {pop pop/Jun101-Light} {SansSerif} ifelse } ifelse } { /Jun101-Light-RKSJ-H/Font resourcestatus {pop pop/Jun101-Light} {SansSerif} ifelse } ifelse } ifelse /RoundSansSerif exch def /Default Serif def } { /Serif/Ryumin-Light def /SansSerif/GothicBBB-Medium def { (fonts/Jun101-Light-83pv-RKSJ-H)status }stopped {pop}{ {pop pop pop pop/Jun101-Light} {SansSerif} ifelse /RoundSansSerif exch def }ifelse /Default Serif def } ifelse end def /Adobe-Korea1 4 dict dup begin /Serif/HYSMyeongJo-Medium def /SansSerif/HYGoThic-Medium def /RoundSansSerif SansSerif def /Default Serif def end def /Adobe-GB1 4 dict dup begin /Serif/STSong-Light def /SansSerif/STHeiti-Regular def /RoundSansSerif SansSerif def /Default Serif def end def /Adobe-CNS1 4 dict dup begin /Serif/MKai-Medium def /SansSerif/MHei-Medium def /RoundSansSerif SansSerif def /Default Serif def end def end def Level2?{currentglobal true setglobal}if /ct_BoldRomanWidthProc { stringwidth 1 index 0 ne{exch .03 add exch}if setcharwidth 0 0 }bind def /ct_Type0WidthProc { dup stringwidth 0 0 moveto 2 index true charpath pathbbox 0 -1 7 index 2 div .88 setcachedevice2 pop 0 0 }bind def /ct_Type0WMode1WidthProc { dup stringwidth pop 2 div neg -0.88 2 copy moveto 0 -1 5 -1 roll true charpath pathbbox setcachedevice }bind def /cHexEncoding [/c00/c01/c02/c03/c04/c05/c06/c07/c08/c09/c0A/c0B/c0C/c0D/c0E/c0F/c10/c11/c12 /c13/c14/c15/c16/c17/c18/c19/c1A/c1B/c1C/c1D/c1E/c1F/c20/c21/c22/c23/c24/c25 /c26/c27/c28/c29/c2A/c2B/c2C/c2D/c2E/c2F/c30/c31/c32/c33/c34/c35/c36/c37/c38 /c39/c3A/c3B/c3C/c3D/c3E/c3F/c40/c41/c42/c43/c44/c45/c46/c47/c48/c49/c4A/c4B /c4C/c4D/c4E/c4F/c50/c51/c52/c53/c54/c55/c56/c57/c58/c59/c5A/c5B/c5C/c5D/c5E /c5F/c60/c61/c62/c63/c64/c65/c66/c67/c68/c69/c6A/c6B/c6C/c6D/c6E/c6F/c70/c71 /c72/c73/c74/c75/c76/c77/c78/c79/c7A/c7B/c7C/c7D/c7E/c7F/c80/c81/c82/c83/c84 /c85/c86/c87/c88/c89/c8A/c8B/c8C/c8D/c8E/c8F/c90/c91/c92/c93/c94/c95/c96/c97 /c98/c99/c9A/c9B/c9C/c9D/c9E/c9F/cA0/cA1/cA2/cA3/cA4/cA5/cA6/cA7/cA8/cA9/cAA /cAB/cAC/cAD/cAE/cAF/cB0/cB1/cB2/cB3/cB4/cB5/cB6/cB7/cB8/cB9/cBA/cBB/cBC/cBD /cBE/cBF/cC0/cC1/cC2/cC3/cC4/cC5/cC6/cC7/cC8/cC9/cCA/cCB/cCC/cCD/cCE/cCF/cD0 /cD1/cD2/cD3/cD4/cD5/cD6/cD7/cD8/cD9/cDA/cDB/cDC/cDD/cDE/cDF/cE0/cE1/cE2/cE3 /cE4/cE5/cE6/cE7/cE8/cE9/cEA/cEB/cEC/cED/cEE/cEF/cF0/cF1/cF2/cF3/cF4/cF5/cF6 /cF7/cF8/cF9/cFA/cFB/cFC/cFD/cFE/cFF]def /ct_BoldBaseFont 11 dict begin /FontType 3 def /FontMatrix[1 0 0 1 0 0]def /FontBBox[0 0 1 1]def /Encoding cHexEncoding def /_setwidthProc/ct_BoldRomanWidthProc load def /_bcstr1 1 string def /BuildChar { exch begin _basefont setfont _bcstr1 dup 0 4 -1 roll put dup _setwidthProc 3 copy moveto show _basefonto setfont moveto show end }bind def currentdict end def systemdict/composefont known { /ct_DefineIdentity-H { /Identity-H/CMap resourcestatus { pop pop } { /CIDInit/ProcSet findresource begin 12 dict begin begincmap /CIDSystemInfo 3 dict dup begin /Registry(Adobe)def /Ordering(Identity)def /Supplement 0 def end def /CMapName/Identity-H def /CMapVersion 1.000 def /CMapType 1 def 1 begincodespacerange <0000> endcodespacerange 1 begincidrange <0000>0 endcidrange endcmap CMapName currentdict/CMap defineresource pop end end } ifelse } def /ct_BoldBaseCIDFont 11 dict begin /CIDFontType 1 def /CIDFontName/ct_BoldBaseCIDFont def /FontMatrix[1 0 0 1 0 0]def /FontBBox[0 0 1 1]def /_setwidthProc/ct_Type0WidthProc load def /_bcstr2 2 string def /BuildGlyph { exch begin _basefont setfont _bcstr2 1 2 index 256 mod put _bcstr2 0 3 -1 roll 256 idiv put _bcstr2 dup _setwidthProc 3 copy moveto show _basefonto setfont moveto show end }bind def currentdict end def }if Level2?{setglobal}if /ct_CopyFont{ { 1 index/FID ne 2 index/UniqueID ne and {def}{pop pop}ifelse }forall }bind def /ct_Type0CopyFont { exch dup length dict begin ct_CopyFont [ exch FDepVector { dup/FontType get 0 eq { 1 index ct_Type0CopyFont /_ctType0 exch definefont } { /_ctBaseFont exch 2 index exec } ifelse exch } forall pop ] /FDepVector exch def currentdict end }bind def /ct_MakeBoldFont { dup/ct_SyntheticBold known { dup length 3 add dict begin ct_CopyFont /ct_StrokeWidth .03 0 FontMatrix idtransform pop def /ct_SyntheticBold true def currentdict end definefont } { dup dup length 3 add dict begin ct_CopyFont /PaintType 2 def /StrokeWidth .03 0 FontMatrix idtransform pop def /dummybold currentdict end definefont dup/FontType get dup 9 ge exch 11 le and { ct_BoldBaseCIDFont dup length 3 add dict copy begin dup/CIDSystemInfo get/CIDSystemInfo exch def ct_DefineIdentity-H /_Type0Identity/Identity-H 3 -1 roll[exch]composefont /_basefont exch def /_Type0Identity/Identity-H 3 -1 roll[exch]composefont /_basefonto exch def currentdict end /CIDFont defineresource } { ct_BoldBaseFont dup length 3 add dict copy begin /_basefont exch def /_basefonto exch def currentdict end definefont } ifelse } ifelse }bind def /ct_MakeBold{ 1 index 1 index findfont currentglobal 5 1 roll dup gcheck setglobal dup /FontType get 0 eq { dup/WMode known{dup/WMode get 1 eq}{false}ifelse version length 4 ge and {version 0 4 getinterval cvi 2015 ge} {true} ifelse {/ct_Type0WidthProc} {/ct_Type0WMode1WidthProc} ifelse ct_BoldBaseFont/_setwidthProc 3 -1 roll load put {ct_MakeBoldFont}ct_Type0CopyFont definefont } { dup/_fauxfont known not 1 index/SubstMaster known not and { ct_BoldBaseFont/_setwidthProc /ct_BoldRomanWidthProc load put ct_MakeBoldFont } { 2 index 2 index eq {exch pop } { dup length dict begin ct_CopyFont currentdict end definefont } ifelse } ifelse } ifelse pop pop pop setglobal }bind def /?str1 256 string def /?set { $SubstituteFont begin /$substituteFound false def /$fontname 1 index def /$doSmartSub false def end dup findfont $SubstituteFont begin $substituteFound {false} { dup/FontName known { dup/FontName get $fontname eq 1 index/DistillerFauxFont known not and /currentdistillerparams where {pop false 2 index isWidthsOnlyFont not and} if } {false} ifelse } ifelse exch pop /$doSmartSub true def end { 5 1 roll pop pop pop pop findfont } { 1 index findfont dup/FontType get 3 eq { 6 1 roll pop pop pop pop pop false } {pop true} ifelse { $SubstituteFont begin pop pop /$styleArray 1 index def /$regOrdering 2 index def pop pop 0 1 $styleArray length 1 sub { $styleArray exch get ct_StyleDicts $regOrdering 2 copy known { get exch 2 copy known not {pop/Default} if get dup type/nametype eq { ?str1 cvs length dup 1 add exch ?str1 exch(-)putinterval exch dup length exch ?str1 exch 3 index exch putinterval add ?str1 exch 0 exch getinterval cvn } { pop pop/Unknown } ifelse } { pop pop pop pop/Unknown } ifelse } for end findfont }if } ifelse currentglobal false setglobal 3 1 roll null copyfont definefont pop setglobal }bind def setpacking userdict/$SubstituteFont 25 dict put 1 dict begin /SubstituteFont dup $error exch 2 copy known {get} {pop pop{pop/Courier}bind} ifelse def /currentdistillerparams where dup { pop pop currentdistillerparams/CannotEmbedFontPolicy 2 copy known {get/Error eq} {pop pop false} ifelse } if not { countdictstack array dictstack 0 get begin userdict begin $SubstituteFont begin /$str 128 string def /$fontpat 128 string def /$slen 0 def /$sname null def /$match false def /$fontname null def /$substituteFound false def /$inVMIndex null def /$doSmartSub true def /$depth 0 def /$fontname null def /$italicangle 26.5 def /$dstack null def /$Strategies 10 dict dup begin /$Type3Underprint { currentglobal exch false setglobal 11 dict begin /UseFont exch $WMode 0 ne { dup length dict copy dup/WMode $WMode put /UseFont exch definefont } if def /FontName $fontname dup type/stringtype eq{cvn}if def /FontType 3 def /FontMatrix[.001 0 0 .001 0 0]def /Encoding 256 array dup 0 1 255{/.notdef put dup}for pop def /FontBBox[0 0 0 0]def /CCInfo 7 dict dup begin /cc null def /x 0 def /y 0 def end def /BuildChar { exch begin CCInfo begin 1 string dup 0 3 index put exch pop /cc exch def UseFont 1000 scalefont setfont cc stringwidth/y exch def/x exch def x y setcharwidth $SubstituteFont/$Strategy get/$Underprint get exec 0 0 moveto cc show x y moveto end end }bind def currentdict end exch setglobal }bind def /$GetaTint 2 dict dup begin /$BuildFont { dup/WMode known {dup/WMode get} {0} ifelse /$WMode exch def $fontname exch dup/FontName known { dup/FontName get dup type/stringtype eq{cvn}if } {/unnamedfont} ifelse exch Adobe_CoolType_Data/InVMDeepCopiedFonts get 1 index/FontName get known { pop Adobe_CoolType_Data/InVMDeepCopiedFonts get 1 index get null copyfont } {$deepcopyfont} ifelse exch 1 index exch/FontBasedOn exch put dup/FontName $fontname dup type/stringtype eq{cvn}if put definefont Adobe_CoolType_Data/InVMDeepCopiedFonts get begin dup/FontBasedOn get 1 index def end }bind def /$Underprint { gsave x abs y abs gt {/y 1000 def} {/x -1000 def 500 120 translate} ifelse Level2? { [/Separation(All)/DeviceCMYK{0 0 0 1 pop}] setcolorspace } {0 setgray} ifelse 10 setlinewidth x .8 mul [7 3] { y mul 8 div 120 sub x 10 div exch moveto 0 y 4 div neg rlineto dup 0 rlineto 0 y 4 div rlineto closepath gsave Level2? {.2 setcolor} {.8 setgray} ifelse fill grestore stroke } forall pop grestore }bind def end def /$Oblique 1 dict dup begin /$BuildFont { currentglobal exch dup gcheck setglobal null copyfont begin /FontBasedOn currentdict/FontName known { FontName dup type/stringtype eq{cvn}if } {/unnamedfont} ifelse def /FontName $fontname dup type/stringtype eq{cvn}if def /currentdistillerparams where {pop} { /FontInfo currentdict/FontInfo known {FontInfo null copyfont} {2 dict} ifelse dup begin /ItalicAngle $italicangle def /FontMatrix FontMatrix [1 0 ItalicAngle dup sin exch cos div 1 0 0] matrix concatmatrix readonly end 4 2 roll def def } ifelse FontName currentdict end definefont exch setglobal }bind def end def /$None 1 dict dup begin /$BuildFont{}bind def end def end def /$Oblique SetSubstituteStrategy /$findfontByEnum { dup type/stringtype eq{cvn}if dup/$fontname exch def $sname null eq {$str cvs dup length $slen sub $slen getinterval} {pop $sname} ifelse $fontpat dup 0(fonts/*)putinterval exch 7 exch putinterval /$match false def $SubstituteFont/$dstack countdictstack array dictstack put mark { $fontpat 0 $slen 7 add getinterval {/$match exch def exit} $str filenameforall } stopped { cleardictstack currentdict true $SubstituteFont/$dstack get { exch { 1 index eq {pop false} {true} ifelse } {begin false} ifelse } forall pop } if cleartomark /$slen 0 def $match false ne {$match(fonts/)anchorsearch pop pop cvn} {/Courier} ifelse }bind def /$ROS 1 dict dup begin /Adobe 4 dict dup begin /Japan1 [/Ryumin-Light/HeiseiMin-W3 /GothicBBB-Medium/HeiseiKakuGo-W5 /HeiseiMaruGo-W4/Jun101-Light]def /Korea1 [/HYSMyeongJo-Medium/HYGoThic-Medium]def /GB1 [/STSong-Light/STHeiti-Regular]def /CNS1 [/MKai-Medium/MHei-Medium]def end def end def /$cmapname null def /$deepcopyfont { dup/FontType get 0 eq { 1 dict dup/FontName/copied put copyfont begin /FDepVector FDepVector copyarray 0 1 2 index length 1 sub { 2 copy get $deepcopyfont dup/FontName/copied put /copied exch definefont 3 copy put pop pop } for def currentdict end } {$Strategies/$Type3Underprint get exec} ifelse }bind def /$buildfontname { dup/CIDFont findresource/CIDSystemInfo get begin Registry length Ordering length Supplement 8 string cvs 3 copy length 2 add add add string dup 5 1 roll dup 0 Registry putinterval dup 4 index(-)putinterval dup 4 index 1 add Ordering putinterval 4 2 roll add 1 add 2 copy(-)putinterval end 1 add 2 copy 0 exch getinterval $cmapname $fontpat cvs exch anchorsearch {pop pop 3 2 roll putinterval cvn/$cmapname exch def} {pop pop pop pop pop} ifelse length $str 1 index(-)putinterval 1 add $str 1 index $cmapname $fontpat cvs putinterval $cmapname length add $str exch 0 exch getinterval cvn }bind def /$findfontByROS { /$fontname exch def $ROS Registry 2 copy known { get Ordering 2 copy known {get} {pop pop[]} ifelse } {pop pop[]} ifelse false exch { dup/CIDFont resourcestatus { pop pop save 1 index/CIDFont findresource dup/WidthsOnly known {dup/WidthsOnly get} {false} ifelse exch pop exch restore {pop} {exch pop true exit} ifelse } {pop} ifelse } forall {$str cvs $buildfontname} { false(*) { save exch dup/CIDFont findresource dup/WidthsOnly known {dup/WidthsOnly get not} {true} ifelse exch/CIDSystemInfo get dup/Registry get Registry eq exch/Ordering get Ordering eq and and {exch restore exch pop true exit} {pop restore} ifelse } $str/CIDFont resourceforall {$buildfontname} {$fontname $findfontByEnum} ifelse } ifelse }bind def end end currentdict/$error known currentdict/languagelevel known and dup {pop $error/SubstituteFont known} if dup {$error} {Adobe_CoolType_Core} ifelse begin { /SubstituteFont /CMap/Category resourcestatus { pop pop { $SubstituteFont begin /$substituteFound true def dup length $slen gt $sname null ne or $slen 0 gt and { $sname null eq {dup $str cvs dup length $slen sub $slen getinterval cvn} {$sname} ifelse Adobe_CoolType_Data/InVMFontsByCMap get 1 index 2 copy known { get false exch { pop currentglobal { GlobalFontDirectory 1 index known {exch pop true exit} {pop} ifelse } { FontDirectory 1 index known {exch pop true exit} { GlobalFontDirectory 1 index known {exch pop true exit} {pop} ifelse } ifelse } ifelse } forall } {pop pop false} ifelse { exch pop exch pop } { dup/CMap resourcestatus { pop pop dup/$cmapname exch def /CMap findresource/CIDSystemInfo get{def}forall $findfontByROS } { 128 string cvs dup(-)search { 3 1 roll search { 3 1 roll pop {dup cvi} stopped {pop pop pop pop pop $findfontByEnum} { 4 2 roll pop pop exch length exch 2 index length 2 index sub exch 1 sub -1 0 { $str cvs dup length 4 index 0 4 index 4 3 roll add getinterval exch 1 index exch 3 index exch putinterval dup/CMap resourcestatus { pop pop 4 1 roll pop pop pop dup/$cmapname exch def /CMap findresource/CIDSystemInfo get{def}forall $findfontByROS true exit } {pop} ifelse } for dup type/booleantype eq {pop} {pop pop pop $findfontByEnum} ifelse } ifelse } {pop pop pop $findfontByEnum} ifelse } {pop pop $findfontByEnum} ifelse } ifelse } ifelse } {//SubstituteFont exec} ifelse /$slen 0 def end } } { { $SubstituteFont begin /$substituteFound true def dup length $slen gt $sname null ne or $slen 0 gt and {$findfontByEnum} {//SubstituteFont exec} ifelse end } } ifelse bind readonly def Adobe_CoolType_Core/scfindfont/systemfindfont load put } { /scfindfont { $SubstituteFont begin dup systemfindfont dup/FontName known {dup/FontName get dup 3 index ne} {/noname true} ifelse dup { /$origfontnamefound 2 index def /$origfontname 4 index def/$substituteFound true def } if exch pop { $slen 0 gt $sname null ne 3 index length $slen gt or and { pop dup $findfontByEnum findfont dup maxlength 1 add dict begin {1 index/FID eq{pop pop}{def}ifelse} forall currentdict end definefont dup/FontName known{dup/FontName get}{null}ifelse $origfontnamefound ne { $origfontname $str cvs print ( substitution revised, using )print dup/FontName known {dup/FontName get}{(unspecified font)} ifelse $str cvs print(.\n)print } if } {exch pop} ifelse } {exch pop} ifelse end }bind def } ifelse end end Adobe_CoolType_Core_Defined not { Adobe_CoolType_Core/findfont { $SubstituteFont begin $depth 0 eq { /$fontname 1 index dup type/stringtype ne{$str cvs}if def /$substituteFound false def } if /$depth $depth 1 add def end scfindfont $SubstituteFont begin /$depth $depth 1 sub def $substituteFound $depth 0 eq and { $inVMIndex null ne {dup $inVMIndex $AddInVMFont} if $doSmartSub { currentdict/$Strategy known {$Strategy/$BuildFont get exec} if } if } if end }bind put } if } if end /$AddInVMFont { exch/FontName 2 copy known { get 1 dict dup begin exch 1 index gcheck def end exch Adobe_CoolType_Data/InVMFontsByCMap get exch $DictAdd } {pop pop pop} ifelse }bind def /$DictAdd { 2 copy known not {2 copy 4 index length dict put} if Level2? not { 2 copy get dup maxlength exch length 4 index length add lt 2 copy get dup length 4 index length add exch maxlength 1 index lt { 2 mul dict begin 2 copy get{forall}def 2 copy currentdict put end } {pop} ifelse } if get begin {def} forall end }bind def end end %%EndResource currentglobal true setglobal %%BeginResource: procset Adobe_CoolType_Utility_MAKEOCF 1.23 0 %%Copyright: Copyright 1987-2006 Adobe Systems Incorporated. %%Version: 1.23 0 systemdict/languagelevel known dup {currentglobal false setglobal} {false} ifelse exch userdict/Adobe_CoolType_Utility 2 copy known {2 copy get dup maxlength 27 add dict copy} {27 dict} ifelse put Adobe_CoolType_Utility begin /@eexecStartData def /@recognizeCIDFont null def /ct_Level2? exch def /ct_Clone? 1183615869 internaldict dup /CCRun known not exch/eCCRun known not ct_Level2? and or def ct_Level2? {globaldict begin currentglobal true setglobal} if /ct_AddStdCIDMap ct_Level2? {{ mark Adobe_CoolType_Utility/@recognizeCIDFont currentdict put { ((Hex)57 StartData 0615 1e27 2c39 1c60 d8a8 cc31 fe2b f6e0 7aa3 e541 e21c 60d8 a8c9 c3d0 6d9e 1c60 d8a8 c9c2 02d7 9a1c 60d8 a849 1c60 d8a8 cc36 74f4 1144 b13b 77)0()/SubFileDecode filter cvx exec } stopped { cleartomark Adobe_CoolType_Utility/@recognizeCIDFont get countdictstack dup array dictstack exch 1 sub -1 0 { 2 copy get 3 index eq {1 index length exch sub 1 sub{end}repeat exit} {pop} ifelse } for pop pop Adobe_CoolType_Utility/@eexecStartData get eexec } {cleartomark} ifelse }} {{ Adobe_CoolType_Utility/@eexecStartData get eexec }} ifelse bind def userdict/cid_extensions known dup{cid_extensions/cid_UpdateDB known and}if { cid_extensions begin /cid_GetCIDSystemInfo { 1 index type/stringtype eq {exch cvn exch} if cid_extensions begin dup load 2 index known { 2 copy cid_GetStatusInfo dup null ne { 1 index load 3 index get dup null eq {pop pop cid_UpdateDB} { exch 1 index/Created get eq {exch pop exch pop} {pop cid_UpdateDB} ifelse } ifelse } {pop cid_UpdateDB} ifelse } {cid_UpdateDB} ifelse end }bind def end } if ct_Level2? {end setglobal} if /ct_UseNativeCapability? systemdict/composefont known def /ct_MakeOCF 35 dict def /ct_Vars 25 dict def /ct_GlyphDirProcs 6 dict def /ct_BuildCharDict 15 dict dup begin /charcode 2 string def /dst_string 1500 string def /nullstring()def /usewidths? true def end def ct_Level2?{setglobal}{pop}ifelse ct_GlyphDirProcs begin /GetGlyphDirectory { systemdict/languagelevel known {pop/CIDFont findresource/GlyphDirectory get} { 1 index/CIDFont findresource/GlyphDirectory get dup type/dicttype eq { dup dup maxlength exch length sub 2 index lt { dup length 2 index add dict copy 2 index /CIDFont findresource/GlyphDirectory 2 index put } if } if exch pop exch pop } ifelse + }def /+ { systemdict/languagelevel known { currentglobal false setglobal 3 dict begin /vm exch def } {1 dict begin} ifelse /$ exch def systemdict/languagelevel known { vm setglobal /gvm currentglobal def $ gcheck setglobal } if ?{$ begin}if }def /?{$ type/dicttype eq}def /|{ userdict/Adobe_CoolType_Data known { Adobe_CoolType_Data/AddWidths? known { currentdict Adobe_CoolType_Data begin begin AddWidths? { Adobe_CoolType_Data/CC 3 index put ?{def}{$ 3 1 roll put}ifelse CC charcode exch 1 index 0 2 index 256 idiv put 1 index exch 1 exch 256 mod put stringwidth 2 array astore currentfont/Widths get exch CC exch put } {?{def}{$ 3 1 roll put}ifelse} ifelse end end } {?{def}{$ 3 1 roll put}ifelse} ifelse } {?{def}{$ 3 1 roll put}ifelse} ifelse }def /! { ?{end}if systemdict/languagelevel known {gvm setglobal} if end }def /:{string currentfile exch readstring pop}executeonly def end ct_MakeOCF begin /ct_cHexEncoding [/c00/c01/c02/c03/c04/c05/c06/c07/c08/c09/c0A/c0B/c0C/c0D/c0E/c0F/c10/c11/c12 /c13/c14/c15/c16/c17/c18/c19/c1A/c1B/c1C/c1D/c1E/c1F/c20/c21/c22/c23/c24/c25 /c26/c27/c28/c29/c2A/c2B/c2C/c2D/c2E/c2F/c30/c31/c32/c33/c34/c35/c36/c37/c38 /c39/c3A/c3B/c3C/c3D/c3E/c3F/c40/c41/c42/c43/c44/c45/c46/c47/c48/c49/c4A/c4B /c4C/c4D/c4E/c4F/c50/c51/c52/c53/c54/c55/c56/c57/c58/c59/c5A/c5B/c5C/c5D/c5E /c5F/c60/c61/c62/c63/c64/c65/c66/c67/c68/c69/c6A/c6B/c6C/c6D/c6E/c6F/c70/c71 /c72/c73/c74/c75/c76/c77/c78/c79/c7A/c7B/c7C/c7D/c7E/c7F/c80/c81/c82/c83/c84 /c85/c86/c87/c88/c89/c8A/c8B/c8C/c8D/c8E/c8F/c90/c91/c92/c93/c94/c95/c96/c97 /c98/c99/c9A/c9B/c9C/c9D/c9E/c9F/cA0/cA1/cA2/cA3/cA4/cA5/cA6/cA7/cA8/cA9/cAA /cAB/cAC/cAD/cAE/cAF/cB0/cB1/cB2/cB3/cB4/cB5/cB6/cB7/cB8/cB9/cBA/cBB/cBC/cBD /cBE/cBF/cC0/cC1/cC2/cC3/cC4/cC5/cC6/cC7/cC8/cC9/cCA/cCB/cCC/cCD/cCE/cCF/cD0 /cD1/cD2/cD3/cD4/cD5/cD6/cD7/cD8/cD9/cDA/cDB/cDC/cDD/cDE/cDF/cE0/cE1/cE2/cE3 /cE4/cE5/cE6/cE7/cE8/cE9/cEA/cEB/cEC/cED/cEE/cEF/cF0/cF1/cF2/cF3/cF4/cF5/cF6 /cF7/cF8/cF9/cFA/cFB/cFC/cFD/cFE/cFF]def /ct_CID_STR_SIZE 8000 def /ct_mkocfStr100 100 string def /ct_defaultFontMtx[.001 0 0 .001 0 0]def /ct_1000Mtx[1000 0 0 1000 0 0]def /ct_raise{exch cvx exch errordict exch get exec stop}bind def /ct_reraise {cvx $error/errorname get(Error: )print dup( )cvs print errordict exch get exec stop }bind def /ct_cvnsi { 1 index add 1 sub 1 exch 0 4 1 roll { 2 index exch get exch 8 bitshift add } for exch pop }bind def /ct_GetInterval { Adobe_CoolType_Utility/ct_BuildCharDict get begin /dst_index 0 def dup dst_string length gt {dup string/dst_string exch def} if 1 index ct_CID_STR_SIZE idiv /arrayIndex exch def 2 index arrayIndex get 2 index arrayIndex ct_CID_STR_SIZE mul sub { dup 3 index add 2 index length le { 2 index getinterval dst_string dst_index 2 index putinterval length dst_index add/dst_index exch def exit } { 1 index length 1 index sub dup 4 1 roll getinterval dst_string dst_index 2 index putinterval pop dup dst_index add/dst_index exch def sub /arrayIndex arrayIndex 1 add def 2 index dup length arrayIndex gt {arrayIndex get} { pop exit } ifelse 0 } ifelse } loop pop pop pop dst_string 0 dst_index getinterval end }bind def ct_Level2? { /ct_resourcestatus currentglobal mark true setglobal {/unknowninstancename/Category resourcestatus} stopped {cleartomark setglobal true} {cleartomark currentglobal not exch setglobal} ifelse { { mark 3 1 roll/Category findresource begin ct_Vars/vm currentglobal put ({ResourceStatus}stopped)0()/SubFileDecode filter cvx exec {cleartomark false} {{3 2 roll pop true}{cleartomark false}ifelse} ifelse ct_Vars/vm get setglobal end } } {{resourcestatus}} ifelse bind def /CIDFont/Category ct_resourcestatus {pop pop} { currentglobal true setglobal /Generic/Category findresource dup length dict copy dup/InstanceType/dicttype put /CIDFont exch/Category defineresource pop setglobal } ifelse ct_UseNativeCapability? { /CIDInit/ProcSet findresource begin 12 dict begin begincmap /CIDSystemInfo 3 dict dup begin /Registry(Adobe)def /Ordering(Identity)def /Supplement 0 def end def /CMapName/Identity-H def /CMapVersion 1.000 def /CMapType 1 def 1 begincodespacerange <0000> endcodespacerange 1 begincidrange <0000>0 endcidrange endcmap CMapName currentdict/CMap defineresource pop end end } if } { /ct_Category 2 dict begin /CIDFont 10 dict def /ProcSet 2 dict def currentdict end def /defineresource { ct_Category 1 index 2 copy known { get dup dup maxlength exch length eq { dup length 10 add dict copy ct_Category 2 index 2 index put } if 3 index 3 index put pop exch pop } {pop pop/defineresource/undefined ct_raise} ifelse }bind def /findresource { ct_Category 1 index 2 copy known { get 2 index 2 copy known {get 3 1 roll pop pop} {pop pop/findresource/undefinedresource ct_raise} ifelse } {pop pop/findresource/undefined ct_raise} ifelse }bind def /resourcestatus { ct_Category 1 index 2 copy known { get 2 index known exch pop exch pop { 0 -1 true } { false } ifelse } {pop pop/findresource/undefined ct_raise} ifelse }bind def /ct_resourcestatus/resourcestatus load def } ifelse /ct_CIDInit 2 dict begin /ct_cidfont_stream_init { { dup(Binary)eq { pop null currentfile ct_Level2? { {cid_BYTE_COUNT()/SubFileDecode filter} stopped {pop pop pop} if } if /readstring load exit } if dup(Hex)eq { pop currentfile ct_Level2? { {null exch/ASCIIHexDecode filter/readstring} stopped {pop exch pop(>)exch/readhexstring} if } {(>)exch/readhexstring} ifelse load exit } if /StartData/typecheck ct_raise } loop cid_BYTE_COUNT ct_CID_STR_SIZE le { 2 copy cid_BYTE_COUNT string exch exec pop 1 array dup 3 -1 roll 0 exch put } { cid_BYTE_COUNT ct_CID_STR_SIZE div ceiling cvi dup array exch 2 sub 0 exch 1 exch { 2 copy 5 index ct_CID_STR_SIZE string 6 index exec pop put pop } for 2 index cid_BYTE_COUNT ct_CID_STR_SIZE mod string 3 index exec pop 1 index exch 1 index length 1 sub exch put } ifelse cid_CIDFONT exch/GlyphData exch put 2 index null eq { pop pop pop } { pop/readstring load 1 string exch { 3 copy exec pop dup length 0 eq { pop pop pop pop pop true exit } if 4 index eq { pop pop pop pop false exit } if } loop pop } ifelse }bind def /StartData { mark { currentdict dup/FDArray get 0 get/FontMatrix get 0 get 0.001 eq { dup/CDevProc known not { /CDevProc 1183615869 internaldict/stdCDevProc 2 copy known {get} { pop pop {pop pop pop pop pop 0 -1000 7 index 2 div 880} } ifelse def } if } { /CDevProc { pop pop pop pop pop 0 1 cid_temp/cid_CIDFONT get /FDArray get 0 get /FontMatrix get 0 get div 7 index 2 div 1 index 0.88 mul }def } ifelse /cid_temp 15 dict def cid_temp begin /cid_CIDFONT exch def 3 copy pop dup/cid_BYTE_COUNT exch def 0 gt { ct_cidfont_stream_init FDArray { /Private get dup/SubrMapOffset known { begin /Subrs SubrCount array def Subrs SubrMapOffset SubrCount SDBytes ct_Level2? { currentdict dup/SubrMapOffset undef dup/SubrCount undef /SDBytes undef } if end /cid_SD_BYTES exch def /cid_SUBR_COUNT exch def /cid_SUBR_MAP_OFFSET exch def /cid_SUBRS exch def cid_SUBR_COUNT 0 gt { GlyphData cid_SUBR_MAP_OFFSET cid_SD_BYTES ct_GetInterval 0 cid_SD_BYTES ct_cvnsi 0 1 cid_SUBR_COUNT 1 sub { exch 1 index 1 add cid_SD_BYTES mul cid_SUBR_MAP_OFFSET add GlyphData exch cid_SD_BYTES ct_GetInterval 0 cid_SD_BYTES ct_cvnsi cid_SUBRS 4 2 roll GlyphData exch 4 index 1 index sub ct_GetInterval dup length string copy put } for pop } if } {pop} ifelse } forall } if cleartomark pop pop end CIDFontName currentdict/CIDFont defineresource pop end end } stopped {cleartomark/StartData ct_reraise} if }bind def currentdict end def /ct_saveCIDInit { /CIDInit/ProcSet ct_resourcestatus {true} {/CIDInitC/ProcSet ct_resourcestatus} ifelse { pop pop /CIDInit/ProcSet findresource ct_UseNativeCapability? {pop null} {/CIDInit ct_CIDInit/ProcSet defineresource pop} ifelse } {/CIDInit ct_CIDInit/ProcSet defineresource pop null} ifelse ct_Vars exch/ct_oldCIDInit exch put }bind def /ct_restoreCIDInit { ct_Vars/ct_oldCIDInit get dup null ne {/CIDInit exch/ProcSet defineresource pop} {pop} ifelse }bind def /ct_BuildCharSetUp { 1 index begin CIDFont begin Adobe_CoolType_Utility/ct_BuildCharDict get begin /ct_dfCharCode exch def /ct_dfDict exch def CIDFirstByte ct_dfCharCode add dup CIDCount ge {pop 0} if /cid exch def { GlyphDirectory cid 2 copy known {get} {pop pop nullstring} ifelse dup length FDBytes sub 0 gt { dup FDBytes 0 ne {0 FDBytes ct_cvnsi} {pop 0} ifelse /fdIndex exch def dup length FDBytes sub FDBytes exch getinterval /charstring exch def exit } { pop cid 0 eq {/charstring nullstring def exit} if /cid 0 def } ifelse } loop }def /ct_SetCacheDevice { 0 0 moveto dup stringwidth 3 -1 roll true charpath pathbbox 0 -1000 7 index 2 div 880 setcachedevice2 0 0 moveto }def /ct_CloneSetCacheProc { 1 eq { stringwidth pop -2 div -880 0 -1000 setcharwidth moveto } { usewidths? { currentfont/Widths get cid 2 copy known {get exch pop aload pop} {pop pop stringwidth} ifelse } {stringwidth} ifelse setcharwidth 0 0 moveto } ifelse }def /ct_Type3ShowCharString { ct_FDDict fdIndex 2 copy known {get} { currentglobal 3 1 roll 1 index gcheck setglobal ct_Type1FontTemplate dup maxlength dict copy begin FDArray fdIndex get dup/FontMatrix 2 copy known {get} {pop pop ct_defaultFontMtx} ifelse /FontMatrix exch dup length array copy def /Private get /Private exch def /Widths rootfont/Widths get def /CharStrings 1 dict dup/.notdef dup length string copy put def currentdict end /ct_Type1Font exch definefont dup 5 1 roll put setglobal } ifelse dup/CharStrings get 1 index/Encoding get ct_dfCharCode get charstring put rootfont/WMode 2 copy known {get} {pop pop 0} ifelse exch 1000 scalefont setfont ct_str1 0 ct_dfCharCode put ct_str1 exch ct_dfSetCacheProc ct_SyntheticBold { currentpoint ct_str1 show newpath moveto ct_str1 true charpath ct_StrokeWidth setlinewidth stroke } {ct_str1 show} ifelse }def /ct_Type4ShowCharString { ct_dfDict ct_dfCharCode charstring FDArray fdIndex get dup/FontMatrix get dup ct_defaultFontMtx ct_matrixeq not {ct_1000Mtx matrix concatmatrix concat} {pop} ifelse /Private get Adobe_CoolType_Utility/ct_Level2? get not { ct_dfDict/Private 3 -1 roll {put} 1183615869 internaldict/superexec get exec } if 1183615869 internaldict Adobe_CoolType_Utility/ct_Level2? get {1 index} {3 index/Private get mark 6 1 roll} ifelse dup/RunInt known {/RunInt get} {pop/CCRun} ifelse get exec Adobe_CoolType_Utility/ct_Level2? get not {cleartomark} if }bind def /ct_BuildCharIncremental { { Adobe_CoolType_Utility/ct_MakeOCF get begin ct_BuildCharSetUp ct_ShowCharString } stopped {stop} if end end end end }bind def /BaseFontNameStr(BF00)def /ct_Type1FontTemplate 14 dict begin /FontType 1 def /FontMatrix [0.001 0 0 0.001 0 0]def /FontBBox [-250 -250 1250 1250]def /Encoding ct_cHexEncoding def /PaintType 0 def currentdict end def /BaseFontTemplate 11 dict begin /FontMatrix [0.001 0 0 0.001 0 0]def /FontBBox [-250 -250 1250 1250]def /Encoding ct_cHexEncoding def /BuildChar/ct_BuildCharIncremental load def ct_Clone? { /FontType 3 def /ct_ShowCharString/ct_Type3ShowCharString load def /ct_dfSetCacheProc/ct_CloneSetCacheProc load def /ct_SyntheticBold false def /ct_StrokeWidth 1 def } { /FontType 4 def /Private 1 dict dup/lenIV 4 put def /CharStrings 1 dict dup/.notdefput def /PaintType 0 def /ct_ShowCharString/ct_Type4ShowCharString load def } ifelse /ct_str1 1 string def currentdict end def /BaseFontDictSize BaseFontTemplate length 5 add def /ct_matrixeq { true 0 1 5 { dup 4 index exch get exch 3 index exch get eq and dup not {exit} if } for exch pop exch pop }bind def /ct_makeocf { 15 dict begin exch/WMode exch def exch/FontName exch def /FontType 0 def /FMapType 2 def dup/FontMatrix known {dup/FontMatrix get/FontMatrix exch def} {/FontMatrix matrix def} ifelse /bfCount 1 index/CIDCount get 256 idiv 1 add dup 256 gt{pop 256}if def /Encoding 256 array 0 1 bfCount 1 sub{2 copy dup put pop}for bfCount 1 255{2 copy bfCount put pop}for def /FDepVector bfCount dup 256 lt{1 add}if array def BaseFontTemplate BaseFontDictSize dict copy begin /CIDFont exch def CIDFont/FontBBox known {CIDFont/FontBBox get/FontBBox exch def} if CIDFont/CDevProc known {CIDFont/CDevProc get/CDevProc exch def} if currentdict end BaseFontNameStr 3(0)putinterval 0 1 bfCount dup 256 eq{1 sub}if { FDepVector exch 2 index BaseFontDictSize dict copy begin dup/CIDFirstByte exch 256 mul def FontType 3 eq {/ct_FDDict 2 dict def} if currentdict end 1 index 16 BaseFontNameStr 2 2 getinterval cvrs pop BaseFontNameStr exch definefont put } for ct_Clone? {/Widths 1 index/CIDFont get/GlyphDirectory get length dict def} if FontName currentdict end definefont ct_Clone? { gsave dup 1000 scalefont setfont ct_BuildCharDict begin /usewidths? false def currentfont/Widths get begin exch/CIDFont get/GlyphDirectory get { pop dup charcode exch 1 index 0 2 index 256 idiv put 1 index exch 1 exch 256 mod put stringwidth 2 array astore def } forall end /usewidths? true def end grestore } {exch pop} ifelse }bind def currentglobal true setglobal /ct_ComposeFont { ct_UseNativeCapability? { 2 index/CMap ct_resourcestatus {pop pop exch pop} { /CIDInit/ProcSet findresource begin 12 dict begin begincmap /CMapName 3 index def /CMapVersion 1.000 def /CMapType 1 def exch/WMode exch def /CIDSystemInfo 3 dict dup begin /Registry(Adobe)def /Ordering CMapName ct_mkocfStr100 cvs (Adobe-)search { pop pop (-)search { dup length string copy exch pop exch pop } {pop(Identity)} ifelse } {pop (Identity)} ifelse def /Supplement 0 def end def 1 begincodespacerange <0000> endcodespacerange 1 begincidrange <0000>0 endcidrange endcmap CMapName currentdict/CMap defineresource pop end end } ifelse composefont } { 3 2 roll pop 0 get/CIDFont findresource ct_makeocf } ifelse }bind def setglobal /ct_MakeIdentity { ct_UseNativeCapability? { 1 index/CMap ct_resourcestatus {pop pop} { /CIDInit/ProcSet findresource begin 12 dict begin begincmap /CMapName 2 index def /CMapVersion 1.000 def /CMapType 1 def /CIDSystemInfo 3 dict dup begin /Registry(Adobe)def /Ordering CMapName ct_mkocfStr100 cvs (Adobe-)search { pop pop (-)search {dup length string copy exch pop exch pop} {pop(Identity)} ifelse } {pop(Identity)} ifelse def /Supplement 0 def end def 1 begincodespacerange <0000> endcodespacerange 1 begincidrange <0000>0 endcidrange endcmap CMapName currentdict/CMap defineresource pop end end } ifelse composefont } { exch pop 0 get/CIDFont findresource ct_makeocf } ifelse }bind def currentdict readonly pop end end %%EndResource setglobal %%BeginResource: procset Adobe_CoolType_Utility_T42 1.0 0 %%Copyright: Copyright 1987-2004 Adobe Systems Incorporated. %%Version: 1.0 0 userdict/ct_T42Dict 15 dict put ct_T42Dict begin /Is2015? { version cvi 2015 ge }bind def /AllocGlyphStorage { Is2015? { pop } { {string}forall }ifelse }bind def /Type42DictBegin { 25 dict begin /FontName exch def /CharStrings 256 dict begin /.notdef 0 def currentdict end def /Encoding exch def /PaintType 0 def /FontType 42 def /FontMatrix[1 0 0 1 0 0]def 4 array astore cvx/FontBBox exch def /sfnts }bind def /Type42DictEnd { currentdict dup/FontName get exch definefont end ct_T42Dict exch dup/FontName get exch put }bind def /RD{string currentfile exch readstring pop}executeonly def /PrepFor2015 { Is2015? { /GlyphDirectory 16 dict def sfnts 0 get dup 2 index (glyx) putinterval 2 index (locx) putinterval pop pop } { pop pop }ifelse }bind def /AddT42Char { Is2015? { /GlyphDirectory get begin def end pop pop } { /sfnts get 4 index get 3 index 2 index putinterval pop pop pop pop }ifelse }bind def /T0AddT42Mtx2 { /CIDFont findresource/Metrics2 get begin def end }bind def end %%EndResource currentglobal true setglobal %%BeginFile: MMFauxFont.prc %%Copyright: Copyright 1987-2001 Adobe Systems Incorporated. %%All Rights Reserved. userdict /ct_EuroDict 10 dict put ct_EuroDict begin /ct_CopyFont { { 1 index /FID ne {def} {pop pop} ifelse} forall } def /ct_GetGlyphOutline { gsave initmatrix newpath exch findfont dup length 1 add dict begin ct_CopyFont /Encoding Encoding dup length array copy dup 4 -1 roll 0 exch put def currentdict end /ct_EuroFont exch definefont 1000 scalefont setfont 0 0 moveto [ <00> stringwidth <00> false charpath pathbbox [ {/m cvx} {/l cvx} {/c cvx} {/cp cvx} pathforall grestore counttomark 8 add } def /ct_MakeGlyphProc { ] cvx /ct_PSBuildGlyph cvx ] cvx } def /ct_PSBuildGlyph { gsave 8 -1 roll pop 7 1 roll 6 -2 roll ct_FontMatrix transform 6 2 roll 4 -2 roll ct_FontMatrix transform 4 2 roll ct_FontMatrix transform currentdict /PaintType 2 copy known {get 2 eq}{pop pop false} ifelse dup 9 1 roll { currentdict /StrokeWidth 2 copy known { get 2 div 0 ct_FontMatrix dtransform pop 5 1 roll 4 -1 roll 4 index sub 4 1 roll 3 -1 roll 4 index sub 3 1 roll exch 4 index add exch 4 index add 5 -1 roll pop } { pop pop } ifelse } if setcachedevice ct_FontMatrix concat ct_PSPathOps begin exec end { currentdict /StrokeWidth 2 copy known { get } { pop pop 0 } ifelse setlinewidth stroke } { fill } ifelse grestore } def /ct_PSPathOps 4 dict dup begin /m {moveto} def /l {lineto} def /c {curveto} def /cp {closepath} def end def /ct_matrix1000 [1000 0 0 1000 0 0] def /ct_AddGlyphProc { 2 index findfont dup length 4 add dict begin ct_CopyFont /CharStrings CharStrings dup length 1 add dict copy begin 3 1 roll def currentdict end def /ct_FontMatrix ct_matrix1000 FontMatrix matrix concatmatrix def /ct_PSBuildGlyph /ct_PSBuildGlyph load def /ct_PSPathOps /ct_PSPathOps load def currentdict end definefont pop } def systemdict /languagelevel known { /ct_AddGlyphToPrinterFont { 2 copy ct_GetGlyphOutline 3 add -1 roll restore ct_MakeGlyphProc ct_AddGlyphProc } def } { /ct_AddGlyphToPrinterFont { pop pop restore Adobe_CTFauxDict /$$$FONTNAME get /Euro Adobe_CTFauxDict /$$$SUBSTITUTEBASE get ct_EuroDict exch get ct_AddGlyphProc } def } ifelse /AdobeSansMM { 556 0 24 -19 541 703 { 541 628 m 510 669 442 703 354 703 c 201 703 117 607 101 444 c 50 444 l 25 372 l 97 372 l 97 301 l 49 301 l 24 229 l 103 229 l 124 67 209 -19 350 -19 c 435 -19 501 25 509 32 c 509 131 l 492 105 417 60 343 60 c 267 60 204 127 197 229 c 406 229 l 430 301 l 191 301 l 191 372 l 455 372 l 479 444 l 194 444 l 201 531 245 624 348 624 c 433 624 484 583 509 534 c cp 556 0 m } ct_PSBuildGlyph } def /AdobeSerifMM { 500 0 10 -12 484 692 { 347 298 m 171 298 l 170 310 170 322 170 335 c 170 362 l 362 362 l 374 403 l 172 403 l 184 580 244 642 308 642 c 380 642 434 574 457 457 c 481 462 l 474 691 l 449 691 l 433 670 429 657 410 657 c 394 657 360 692 299 692 c 204 692 94 604 73 403 c 22 403 l 10 362 l 70 362 l 69 352 69 341 69 330 c 69 319 69 308 70 298 c 22 298 l 10 257 l 73 257 l 97 57 216 -12 295 -12 c 364 -12 427 25 484 123 c 458 142 l 425 101 384 37 316 37 c 256 37 189 84 173 257 c 335 257 l cp 500 0 m } ct_PSBuildGlyph } def end %%EndFile setglobal Adobe_CoolType_Core begin /$Oblique SetSubstituteStrategy end %%BeginResource: procset Adobe_AGM_Image 1.0 0 +%%Version: 1.0 0 +%%Copyright: Copyright(C)2000-2006 Adobe Systems, Inc. All Rights Reserved. +systemdict/setpacking known +{ + currentpacking + true setpacking +}if +userdict/Adobe_AGM_Image 71 dict dup begin put +/Adobe_AGM_Image_Id/Adobe_AGM_Image_1.0_0 def +/nd{ + null def +}bind def +/AGMIMG_&image nd +/AGMIMG_&colorimage nd +/AGMIMG_&imagemask nd +/AGMIMG_mbuf()def +/AGMIMG_ybuf()def +/AGMIMG_kbuf()def +/AGMIMG_c 0 def +/AGMIMG_m 0 def +/AGMIMG_y 0 def +/AGMIMG_k 0 def +/AGMIMG_tmp nd +/AGMIMG_imagestring0 nd +/AGMIMG_imagestring1 nd +/AGMIMG_imagestring2 nd +/AGMIMG_imagestring3 nd +/AGMIMG_imagestring4 nd +/AGMIMG_imagestring5 nd +/AGMIMG_cnt nd +/AGMIMG_fsave nd +/AGMIMG_colorAry nd +/AGMIMG_override nd +/AGMIMG_name nd +/AGMIMG_maskSource nd +/AGMIMG_flushfilters nd +/invert_image_samples nd +/knockout_image_samples nd +/img nd +/sepimg nd +/devnimg nd +/idximg nd +/ds +{ + Adobe_AGM_Core begin + Adobe_AGM_Image begin + /AGMIMG_&image systemdict/image get def + /AGMIMG_&imagemask systemdict/imagemask get def + /colorimage where{ + pop + /AGMIMG_&colorimage/colorimage ldf + }if + end + end +}def +/ps +{ + Adobe_AGM_Image begin + /AGMIMG_ccimage_exists{/customcolorimage where + { + pop + /Adobe_AGM_OnHost_Seps where + { + pop false + }{ + /Adobe_AGM_InRip_Seps where + { + pop false + }{ + true + }ifelse + }ifelse + }{ + false + }ifelse + }bdf + level2{ + /invert_image_samples + { + Adobe_AGM_Image/AGMIMG_tmp Decode length ddf + /Decode[Decode 1 get Decode 0 get]def + }def + /knockout_image_samples + { + Operator/imagemask ne{ + /Decode[1 1]def + }if + }def + }{ + /invert_image_samples + { + {1 exch sub}currenttransfer addprocs settransfer + }def + /knockout_image_samples + { + {pop 1}currenttransfer addprocs settransfer + }def + }ifelse + /img/imageormask ldf + /sepimg/sep_imageormask ldf + /devnimg/devn_imageormask ldf + /idximg/indexed_imageormask ldf + /_ctype 7 def + currentdict{ + dup xcheck 1 index type dup/arraytype eq exch/packedarraytype eq or and{ + bind + }if + def + }forall +}def +/pt +{ + end +}def +/dt +{ +}def +/AGMIMG_flushfilters +{ + dup type/arraytype ne + {1 array astore}if + dup 0 get currentfile ne + {dup 0 get flushfile}if + { + dup type/filetype eq + { + dup status 1 index currentfile ne and + {closefile} + {pop} + ifelse + }{pop}ifelse + }forall +}def +/AGMIMG_init_common +{ + currentdict/T known{/ImageType/T ldf currentdict/T undef}if + currentdict/W known{/Width/W ldf currentdict/W undef}if + currentdict/H known{/Height/H ldf currentdict/H undef}if + currentdict/M known{/ImageMatrix/M ldf currentdict/M undef}if + currentdict/BC known{/BitsPerComponent/BC ldf currentdict/BC undef}if + currentdict/D known{/Decode/D ldf currentdict/D undef}if + currentdict/DS known{/DataSource/DS ldf currentdict/DS undef}if + currentdict/O known{ + /Operator/O load 1 eq{ + /imagemask + }{ + /O load 2 eq{ + /image + }{ + /colorimage + }ifelse + }ifelse + def + currentdict/O undef + }if + currentdict/HSCI known{/HostSepColorImage/HSCI ldf currentdict/HSCI undef}if + currentdict/MD known{/MultipleDataSources/MD ldf currentdict/MD undef}if + currentdict/I known{/Interpolate/I ldf currentdict/I undef}if + currentdict/SI known{/SkipImageProc/SI ldf currentdict/SI undef}if + /DataSource load xcheck not{ + DataSource type/arraytype eq{ + DataSource 0 get type/filetype eq{ + /_Filters DataSource def + currentdict/MultipleDataSources known not{ + /DataSource DataSource dup length 1 sub get def + }if + }if + }if + currentdict/MultipleDataSources known not{ + /MultipleDataSources DataSource type/arraytype eq{ + DataSource length 1 gt + } + {false}ifelse def + }if + }if + /NComponents Decode length 2 div def + currentdict/SkipImageProc known not{/SkipImageProc{false}def}if +}bdf +/imageormask_sys +{ + begin + AGMIMG_init_common + save mark + level2{ + currentdict + Operator/imagemask eq{ + AGMIMG_&imagemask + }{ + use_mask{ + process_mask AGMIMG_&image + }{ + AGMIMG_&image + }ifelse + }ifelse + }{ + Width Height + Operator/imagemask eq{ + Decode 0 get 1 eq Decode 1 get 0 eq and + ImageMatrix/DataSource load + AGMIMG_&imagemask + }{ + BitsPerComponent ImageMatrix/DataSource load + AGMIMG_&image + }ifelse + }ifelse + currentdict/_Filters known{_Filters AGMIMG_flushfilters}if + cleartomark restore + end +}def +/overprint_plate +{ + currentoverprint{ + 0 get dup type/nametype eq{ + dup/DeviceGray eq{ + pop AGMCORE_black_plate not + }{ + /DeviceCMYK eq{ + AGMCORE_is_cmyk_sep not + }if + }ifelse + }{ + false exch + { + AGMOHS_sepink eq or + }forall + not + }ifelse + }{ + pop false + }ifelse +}def +/process_mask +{ + level3{ + dup begin + /ImageType 1 def + end + 4 dict begin + /DataDict exch def + /ImageType 3 def + /InterleaveType 3 def + /MaskDict 9 dict begin + /ImageType 1 def + /Width DataDict dup/MaskWidth known{/MaskWidth}{/Width}ifelse get def + /Height DataDict dup/MaskHeight known{/MaskHeight}{/Height}ifelse get def + /ImageMatrix[Width 0 0 Height neg 0 Height]def + /NComponents 1 def + /BitsPerComponent 1 def + /Decode DataDict dup/MaskD known{/MaskD}{[1 0]}ifelse get def + /DataSource Adobe_AGM_Core/AGMIMG_maskSource get def + currentdict end def + currentdict end + }if +}def +/use_mask +{ + dup/Mask known {dup/Mask get}{false}ifelse +}def +/imageormask +{ + begin + AGMIMG_init_common + SkipImageProc{ + currentdict consumeimagedata + } + { + save mark + level2 AGMCORE_host_sep not and{ + currentdict + Operator/imagemask eq DeviceN_PS2 not and{ + imagemask + }{ + AGMCORE_in_rip_sep currentoverprint and currentcolorspace 0 get/DeviceGray eq and{ + [/Separation/Black/DeviceGray{}]setcolorspace + /Decode[Decode 1 get Decode 0 get]def + }if + use_mask{ + process_mask image + }{ + DeviceN_NoneName DeviceN_PS2 Indexed_DeviceN level3 not and or or AGMCORE_in_rip_sep and + { + Names convert_to_process not{ + 2 dict begin + /imageDict xdf + /names_index 0 def + gsave + imageDict write_image_file{ + Names{ + dup(None)ne{ + [/Separation 3 -1 roll/DeviceGray{1 exch sub}]setcolorspace + Operator imageDict read_image_file + names_index 0 eq{true setoverprint}if + /names_index names_index 1 add def + }{ + pop + }ifelse + }forall + close_image_file + }if + grestore + end + }{ + Operator/imagemask eq{ + imagemask + }{ + image + }ifelse + }ifelse + }{ + Operator/imagemask eq{ + imagemask + }{ + image + }ifelse + }ifelse + }ifelse + }ifelse + }{ + Width Height + Operator/imagemask eq{ + Decode 0 get 1 eq Decode 1 get 0 eq and + ImageMatrix/DataSource load + /Adobe_AGM_OnHost_Seps where{ + pop imagemask + }{ + currentgray 1 ne{ + currentdict imageormask_sys + }{ + currentoverprint not{ + 1 AGMCORE_&setgray + currentdict imageormask_sys + }{ + currentdict ignoreimagedata + }ifelse + }ifelse + }ifelse + }{ + BitsPerComponent ImageMatrix + MultipleDataSources{ + 0 1 NComponents 1 sub{ + DataSource exch get + }for + }{ + /DataSource load + }ifelse + Operator/colorimage eq{ + AGMCORE_host_sep{ + MultipleDataSources level2 or NComponents 4 eq and{ + AGMCORE_is_cmyk_sep{ + MultipleDataSources{ + /DataSource DataSource 0 get xcheck + { + [ + DataSource 0 get/exec cvx + DataSource 1 get/exec cvx + DataSource 2 get/exec cvx + DataSource 3 get/exec cvx + /AGMCORE_get_ink_data cvx + ]cvx + }{ + DataSource aload pop AGMCORE_get_ink_data + }ifelse def + }{ + /DataSource + Width BitsPerComponent mul 7 add 8 idiv Height mul 4 mul + /DataSource load + filter_cmyk 0()/SubFileDecode filter def + }ifelse + /Decode[Decode 0 get Decode 1 get]def + /MultipleDataSources false def + /NComponents 1 def + /Operator/image def + invert_image_samples + 1 AGMCORE_&setgray + currentdict imageormask_sys + }{ + currentoverprint not Operator/imagemask eq and{ + 1 AGMCORE_&setgray + currentdict imageormask_sys + }{ + currentdict ignoreimagedata + }ifelse + }ifelse + }{ + MultipleDataSources NComponents AGMIMG_&colorimage + }ifelse + }{ + true NComponents colorimage + }ifelse + }{ + Operator/image eq{ + AGMCORE_host_sep{ + /DoImage true def + currentdict/HostSepColorImage known{HostSepColorImage not}{false}ifelse + { + AGMCORE_black_plate not Operator/imagemask ne and{ + /DoImage false def + currentdict ignoreimagedata + }if + }if + 1 AGMCORE_&setgray + DoImage + {currentdict imageormask_sys}if + }{ + use_mask{ + process_mask image + }{ + image + }ifelse + }ifelse + }{ + Operator/knockout eq{ + pop pop pop pop pop + currentcolorspace overprint_plate not{ + knockout_unitsq + }if + }if + }ifelse + }ifelse + }ifelse + }ifelse + cleartomark restore + }ifelse + currentdict/_Filters known{_Filters AGMIMG_flushfilters}if + end +}def +/sep_imageormask +{ + /sep_colorspace_dict AGMCORE_gget begin + CSA map_csa + begin + AGMIMG_init_common + SkipImageProc{ + currentdict consumeimagedata + }{ + save mark + AGMCORE_avoid_L2_sep_space{ + /Decode[Decode 0 get 255 mul Decode 1 get 255 mul]def + }if + AGMIMG_ccimage_exists + MappedCSA 0 get/DeviceCMYK eq and + currentdict/Components known and + Name()ne and + Name(All)ne and + Operator/image eq and + AGMCORE_producing_seps not and + level2 not and + { + Width Height BitsPerComponent ImageMatrix + [ + /DataSource load/exec cvx + { + 0 1 2 index length 1 sub{ + 1 index exch + 2 copy get 255 xor put + }for + }/exec cvx + ]cvx bind + MappedCSA 0 get/DeviceCMYK eq{ + Components aload pop + }{ + 0 0 0 Components aload pop 1 exch sub + }ifelse + Name findcmykcustomcolor + customcolorimage + }{ + AGMCORE_producing_seps not{ + level2{ + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 2 ne AGMCORE_avoid_L2_sep_space not and currentcolorspace 0 get/Separation ne and{ + [/Separation Name MappedCSA sep_proc_name exch dup 0 get 15 string cvs(/Device)anchorsearch{pop pop 0 get}{pop}ifelse exch load]setcolorspace_opt + /sep_tint AGMCORE_gget setcolor + }if + currentdict imageormask + }{ + currentdict + Operator/imagemask eq{ + imageormask + }{ + sep_imageormask_lev1 + }ifelse + }ifelse + }{ + AGMCORE_host_sep{ + Operator/knockout eq{ + currentdict/ImageMatrix get concat + knockout_unitsq + }{ + currentgray 1 ne{ + AGMCORE_is_cmyk_sep Name(All)ne and{ + level2{ + Name AGMCORE_IsSeparationAProcessColor + { + Operator/imagemask eq{ + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 2 ne{ + /sep_tint AGMCORE_gget 1 exch sub AGMCORE_&setcolor + }if + }{ + invert_image_samples + }ifelse + }{ + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 2 ne{ + [/Separation Name[/DeviceGray] + { + sep_colorspace_proc AGMCORE_get_ink_data + 1 exch sub + }bind + ]AGMCORE_&setcolorspace + /sep_tint AGMCORE_gget AGMCORE_&setcolor + }if + }ifelse + currentdict imageormask_sys + }{ + currentdict + Operator/imagemask eq{ + imageormask_sys + }{ + sep_image_lev1_sep + }ifelse + }ifelse + }{ + Operator/imagemask ne{ + invert_image_samples + }if + currentdict imageormask_sys + }ifelse + }{ + currentoverprint not Name(All)eq or Operator/imagemask eq and{ + currentdict imageormask_sys + }{ + currentoverprint not + { + gsave + knockout_unitsq + grestore + }if + currentdict consumeimagedata + }ifelse + }ifelse + }ifelse + }{ + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 2 ne{ + currentcolorspace 0 get/Separation ne{ + [/Separation Name MappedCSA sep_proc_name exch 0 get exch load]setcolorspace_opt + /sep_tint AGMCORE_gget setcolor + }if + }if + currentoverprint + MappedCSA 0 get/DeviceCMYK eq and + Name AGMCORE_IsSeparationAProcessColor not and + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 2 ne{Name inRip_spot_has_ink not and}{false}ifelse + Name(All)ne and{ + imageormask_l2_overprint + }{ + currentdict imageormask + }ifelse + }ifelse + }ifelse + }ifelse + cleartomark restore + }ifelse + currentdict/_Filters known{_Filters AGMIMG_flushfilters}if + end + end +}def +/colorSpaceElemCnt +{ + mark currentcolor counttomark dup 2 add 1 roll cleartomark +}bdf +/devn_sep_datasource +{ + 1 dict begin + /dataSource xdf + [ + 0 1 dataSource length 1 sub{ + dup currentdict/dataSource get/exch cvx/get cvx/exec cvx + /exch cvx names_index/ne cvx[/pop cvx]cvx/if cvx + }for + ]cvx bind + end +}bdf +/devn_alt_datasource +{ + 11 dict begin + /convProc xdf + /origcolorSpaceElemCnt xdf + /origMultipleDataSources xdf + /origBitsPerComponent xdf + /origDecode xdf + /origDataSource xdf + /dsCnt origMultipleDataSources{origDataSource length}{1}ifelse def + /DataSource origMultipleDataSources + { + [ + BitsPerComponent 8 idiv origDecode length 2 idiv mul string + 0 1 origDecode length 2 idiv 1 sub + { + dup 7 mul 1 add index exch dup BitsPerComponent 8 idiv mul exch + origDataSource exch get 0()/SubFileDecode filter + BitsPerComponent 8 idiv string/readstring cvx/pop cvx/putinterval cvx + }for + ]bind cvx + }{origDataSource}ifelse 0()/SubFileDecode filter def + [ + origcolorSpaceElemCnt string + 0 2 origDecode length 2 sub + { + dup origDecode exch get dup 3 -1 roll 1 add origDecode exch get exch sub 2 BitsPerComponent exp 1 sub div + 1 BitsPerComponent 8 idiv{DataSource/read cvx/not cvx{0}/if cvx/mul cvx}repeat/mul cvx/add cvx + }for + /convProc load/exec cvx + origcolorSpaceElemCnt 1 sub -1 0 + { + /dup cvx 2/add cvx/index cvx + 3 1/roll cvx/exch cvx 255/mul cvx/cvi cvx/put cvx + }for + ]bind cvx 0()/SubFileDecode filter + end +}bdf +/devn_imageormask +{ + /devicen_colorspace_dict AGMCORE_gget begin + CSA map_csa + 2 dict begin + dup + /srcDataStrs[3 -1 roll begin + AGMIMG_init_common + currentdict/MultipleDataSources known{MultipleDataSources{DataSource length}{1}ifelse}{1}ifelse + { + Width Decode length 2 div mul cvi + { + dup 65535 gt{1 add 2 div cvi}{exit}ifelse + }loop + string + }repeat + end]def + /dstDataStr srcDataStrs 0 get length string def + begin + AGMIMG_init_common + SkipImageProc{ + currentdict consumeimagedata + }{ + save mark + AGMCORE_producing_seps not{ + level3 not{ + Operator/imagemask ne{ + /DataSource[[ + DataSource Decode BitsPerComponent currentdict/MultipleDataSources known{MultipleDataSources}{false}ifelse + colorSpaceElemCnt/devicen_colorspace_dict AGMCORE_gget/TintTransform get + devn_alt_datasource 1/string cvx/readstring cvx/pop cvx]cvx colorSpaceElemCnt 1 sub{dup}repeat]def + /MultipleDataSources true def + /Decode colorSpaceElemCnt[exch{0 1}repeat]def + }if + }if + currentdict imageormask + }{ + AGMCORE_host_sep{ + Names convert_to_process{ + CSA get_csa_by_name 0 get/DeviceCMYK eq{ + /DataSource + Width BitsPerComponent mul 7 add 8 idiv Height mul 4 mul + DataSource Decode BitsPerComponent currentdict/MultipleDataSources known{MultipleDataSources}{false}ifelse + 4/devicen_colorspace_dict AGMCORE_gget/TintTransform get + devn_alt_datasource + filter_cmyk 0()/SubFileDecode filter def + /MultipleDataSources false def + /Decode[1 0]def + /DeviceGray setcolorspace + currentdict imageormask_sys + }{ + AGMCORE_report_unsupported_color_space + AGMCORE_black_plate{ + /DataSource + DataSource Decode BitsPerComponent currentdict/MultipleDataSources known{MultipleDataSources}{false}ifelse + CSA get_csa_by_name 0 get/DeviceRGB eq{3}{1}ifelse/devicen_colorspace_dict AGMCORE_gget/TintTransform get + devn_alt_datasource + /MultipleDataSources false def + /Decode colorSpaceElemCnt[exch{0 1}repeat]def + currentdict imageormask_sys + }{ + gsave + knockout_unitsq + grestore + currentdict consumeimagedata + }ifelse + }ifelse + } + { + /devicen_colorspace_dict AGMCORE_gget/names_index known{ + Operator/imagemask ne{ + MultipleDataSources{ + /DataSource[DataSource devn_sep_datasource/exec cvx]cvx def + /MultipleDataSources false def + }{ + /DataSource/DataSource load dstDataStr srcDataStrs 0 get filter_devn def + }ifelse + invert_image_samples + }if + currentdict imageormask_sys + }{ + currentoverprint not Operator/imagemask eq and{ + currentdict imageormask_sys + }{ + currentoverprint not + { + gsave + knockout_unitsq + grestore + }if + currentdict consumeimagedata + }ifelse + }ifelse + }ifelse + }{ + currentdict imageormask + }ifelse + }ifelse + cleartomark restore + }ifelse + currentdict/_Filters known{_Filters AGMIMG_flushfilters}if + end + end + end +}def +/imageormask_l2_overprint +{ + currentdict + currentcmykcolor add add add 0 eq{ + currentdict consumeimagedata + }{ + level3{ + currentcmykcolor + /AGMIMG_k xdf + /AGMIMG_y xdf + /AGMIMG_m xdf + /AGMIMG_c xdf + Operator/imagemask eq{ + [/DeviceN[ + AGMIMG_c 0 ne{/Cyan}if + AGMIMG_m 0 ne{/Magenta}if + AGMIMG_y 0 ne{/Yellow}if + AGMIMG_k 0 ne{/Black}if + ]/DeviceCMYK{}]setcolorspace + AGMIMG_c 0 ne{AGMIMG_c}if + AGMIMG_m 0 ne{AGMIMG_m}if + AGMIMG_y 0 ne{AGMIMG_y}if + AGMIMG_k 0 ne{AGMIMG_k}if + setcolor + }{ + /Decode[Decode 0 get 255 mul Decode 1 get 255 mul]def + [/Indexed + [ + /DeviceN[ + AGMIMG_c 0 ne{/Cyan}if + AGMIMG_m 0 ne{/Magenta}if + AGMIMG_y 0 ne{/Yellow}if + AGMIMG_k 0 ne{/Black}if + ] + /DeviceCMYK{ + AGMIMG_k 0 eq{0}if + AGMIMG_y 0 eq{0 exch}if + AGMIMG_m 0 eq{0 3 1 roll}if + AGMIMG_c 0 eq{0 4 1 roll}if + } + ] + 255 + { + 255 div + mark exch + dup dup dup + AGMIMG_k 0 ne{ + /sep_tint AGMCORE_gget mul MappedCSA sep_proc_name exch pop load exec 4 1 roll pop pop pop + counttomark 1 roll + }{ + pop + }ifelse + AGMIMG_y 0 ne{ + /sep_tint AGMCORE_gget mul MappedCSA sep_proc_name exch pop load exec 4 2 roll pop pop pop + counttomark 1 roll + }{ + pop + }ifelse + AGMIMG_m 0 ne{ + /sep_tint AGMCORE_gget mul MappedCSA sep_proc_name exch pop load exec 4 3 roll pop pop pop + counttomark 1 roll + }{ + pop + }ifelse + AGMIMG_c 0 ne{ + /sep_tint AGMCORE_gget mul MappedCSA sep_proc_name exch pop load exec pop pop pop + counttomark 1 roll + }{ + pop + }ifelse + counttomark 1 add -1 roll pop + } + ]setcolorspace + }ifelse + imageormask_sys + }{ + write_image_file{ + currentcmykcolor + 0 ne{ + [/Separation/Black/DeviceGray{}]setcolorspace + gsave + /Black + [{1 exch sub/sep_tint AGMCORE_gget mul}/exec cvx MappedCSA sep_proc_name cvx exch pop{4 1 roll pop pop pop 1 exch sub}/exec cvx] + cvx modify_halftone_xfer + Operator currentdict read_image_file + grestore + }if + 0 ne{ + [/Separation/Yellow/DeviceGray{}]setcolorspace + gsave + /Yellow + [{1 exch sub/sep_tint AGMCORE_gget mul}/exec cvx MappedCSA sep_proc_name cvx exch pop{4 2 roll pop pop pop 1 exch sub}/exec cvx] + cvx modify_halftone_xfer + Operator currentdict read_image_file + grestore + }if + 0 ne{ + [/Separation/Magenta/DeviceGray{}]setcolorspace + gsave + /Magenta + [{1 exch sub/sep_tint AGMCORE_gget mul}/exec cvx MappedCSA sep_proc_name cvx exch pop{4 3 roll pop pop pop 1 exch sub}/exec cvx] + cvx modify_halftone_xfer + Operator currentdict read_image_file + grestore + }if + 0 ne{ + [/Separation/Cyan/DeviceGray{}]setcolorspace + gsave + /Cyan + [{1 exch sub/sep_tint AGMCORE_gget mul}/exec cvx MappedCSA sep_proc_name cvx exch pop{pop pop pop 1 exch sub}/exec cvx] + cvx modify_halftone_xfer + Operator currentdict read_image_file + grestore + }if + close_image_file + }{ + imageormask + }ifelse + }ifelse + }ifelse +}def +/indexed_imageormask +{ + begin + AGMIMG_init_common + save mark + currentdict + AGMCORE_host_sep{ + Operator/knockout eq{ + /indexed_colorspace_dict AGMCORE_gget dup/CSA known{ + /CSA get get_csa_by_name + }{ + /Names get + }ifelse + overprint_plate not{ + knockout_unitsq + }if + }{ + Indexed_DeviceN{ + /devicen_colorspace_dict AGMCORE_gget dup/names_index known exch/Names get convert_to_process or{ + indexed_image_lev2_sep + }{ + currentoverprint not{ + knockout_unitsq + }if + currentdict consumeimagedata + }ifelse + }{ + AGMCORE_is_cmyk_sep{ + Operator/imagemask eq{ + imageormask_sys + }{ + level2{ + indexed_image_lev2_sep + }{ + indexed_image_lev1_sep + }ifelse + }ifelse + }{ + currentoverprint not{ + knockout_unitsq + }if + currentdict consumeimagedata + }ifelse + }ifelse + }ifelse + }{ + level2{ + Indexed_DeviceN{ + /indexed_colorspace_dict AGMCORE_gget begin + }{ + /indexed_colorspace_dict AGMCORE_gget dup null ne + { + begin + currentdict/CSDBase known{CSDBase/CSD get_res/MappedCSA get}{CSA}ifelse + get_csa_by_name 0 get/DeviceCMYK eq ps_level 3 ge and ps_version 3015.007 lt and + AGMCORE_in_rip_sep and{ + [/Indexed[/DeviceN[/Cyan/Magenta/Yellow/Black]/DeviceCMYK{}]HiVal Lookup] + setcolorspace + }if + end + } + {pop}ifelse + }ifelse + imageormask + Indexed_DeviceN{ + end + }if + }{ + Operator/imagemask eq{ + imageormask + }{ + indexed_imageormask_lev1 + }ifelse + }ifelse + }ifelse + cleartomark restore + currentdict/_Filters known{_Filters AGMIMG_flushfilters}if + end +}def +/indexed_image_lev2_sep +{ + /indexed_colorspace_dict AGMCORE_gget begin + begin + Indexed_DeviceN not{ + currentcolorspace + dup 1/DeviceGray put + dup 3 + currentcolorspace 2 get 1 add string + 0 1 2 3 AGMCORE_get_ink_data 4 currentcolorspace 3 get length 1 sub + { + dup 4 idiv exch currentcolorspace 3 get exch get 255 exch sub 2 index 3 1 roll put + }for + put setcolorspace + }if + currentdict + Operator/imagemask eq{ + AGMIMG_&imagemask + }{ + use_mask{ + process_mask AGMIMG_&image + }{ + AGMIMG_&image + }ifelse + }ifelse + end end +}def + /OPIimage + { + dup type/dicttype ne{ + 10 dict begin + /DataSource xdf + /ImageMatrix xdf + /BitsPerComponent xdf + /Height xdf + /Width xdf + /ImageType 1 def + /Decode[0 1 def] + currentdict + end + }if + dup begin + /NComponents 1 cdndf + /MultipleDataSources false cdndf + /SkipImageProc{false}cdndf + /Decode[ + 0 + currentcolorspace 0 get/Indexed eq{ + 2 BitsPerComponent exp 1 sub + }{ + 1 + }ifelse + ]cdndf + /Operator/image cdndf + end + /sep_colorspace_dict AGMCORE_gget null eq{ + imageormask + }{ + gsave + dup begin invert_image_samples end + sep_imageormask + grestore + }ifelse + }def +/cachemask_level2 +{ + 3 dict begin + /LZWEncode filter/WriteFilter xdf + /readBuffer 256 string def + /ReadFilter + currentfile + 0(%EndMask)/SubFileDecode filter + /ASCII85Decode filter + /RunLengthDecode filter + def + { + ReadFilter readBuffer readstring exch + WriteFilter exch writestring + not{exit}if + }loop + WriteFilter closefile + end +}def +/spot_alias +{ + /mapto_sep_imageormask + { + dup type/dicttype ne{ + 12 dict begin + /ImageType 1 def + /DataSource xdf + /ImageMatrix xdf + /BitsPerComponent xdf + /Height xdf + /Width xdf + /MultipleDataSources false def + }{ + begin + }ifelse + /Decode[/customcolor_tint AGMCORE_gget 0]def + /Operator/image def + /SkipImageProc{false}def + currentdict + end + sep_imageormask + }bdf + /customcolorimage + { + Adobe_AGM_Image/AGMIMG_colorAry xddf + /customcolor_tint AGMCORE_gget + << + /Name AGMIMG_colorAry 4 get + /CSA[/DeviceCMYK] + /TintMethod/Subtractive + /TintProc null + /MappedCSA null + /NComponents 4 + /Components[AGMIMG_colorAry aload pop pop] + >> + setsepcolorspace + mapto_sep_imageormask + }ndf + Adobe_AGM_Image/AGMIMG_&customcolorimage/customcolorimage load put + /customcolorimage + { + Adobe_AGM_Image/AGMIMG_override false put + current_spot_alias{dup 4 get map_alias}{false}ifelse + { + false set_spot_alias + /customcolor_tint AGMCORE_gget exch setsepcolorspace + pop + mapto_sep_imageormask + true set_spot_alias + }{ + //Adobe_AGM_Image/AGMIMG_&customcolorimage get exec + }ifelse + }bdf +}def +/snap_to_device +{ + 6 dict begin + matrix currentmatrix + dup 0 get 0 eq 1 index 3 get 0 eq and + 1 index 1 get 0 eq 2 index 2 get 0 eq and or exch pop + { + 1 1 dtransform 0 gt exch 0 gt/AGMIMG_xSign? exch def/AGMIMG_ySign? exch def + 0 0 transform + AGMIMG_ySign?{floor 0.1 sub}{ceiling 0.1 add}ifelse exch + AGMIMG_xSign?{floor 0.1 sub}{ceiling 0.1 add}ifelse exch + itransform/AGMIMG_llY exch def/AGMIMG_llX exch def + 1 1 transform + AGMIMG_ySign?{ceiling 0.1 add}{floor 0.1 sub}ifelse exch + AGMIMG_xSign?{ceiling 0.1 add}{floor 0.1 sub}ifelse exch + itransform/AGMIMG_urY exch def/AGMIMG_urX exch def + [AGMIMG_urX AGMIMG_llX sub 0 0 AGMIMG_urY AGMIMG_llY sub AGMIMG_llX AGMIMG_llY]concat + }{ + }ifelse + end +}def +level2 not{ + /colorbuf + { + 0 1 2 index length 1 sub{ + dup 2 index exch get + 255 exch sub + 2 index + 3 1 roll + put + }for + }def + /tint_image_to_color + { + begin + Width Height BitsPerComponent ImageMatrix + /DataSource load + end + Adobe_AGM_Image begin + /AGMIMG_mbuf 0 string def + /AGMIMG_ybuf 0 string def + /AGMIMG_kbuf 0 string def + { + colorbuf dup length AGMIMG_mbuf length ne + { + dup length dup dup + /AGMIMG_mbuf exch string def + /AGMIMG_ybuf exch string def + /AGMIMG_kbuf exch string def + }if + dup AGMIMG_mbuf copy AGMIMG_ybuf copy AGMIMG_kbuf copy pop + } + addprocs + {AGMIMG_mbuf}{AGMIMG_ybuf}{AGMIMG_kbuf}true 4 colorimage + end + }def + /sep_imageormask_lev1 + { + begin + MappedCSA 0 get dup/DeviceRGB eq exch/DeviceCMYK eq or has_color not and{ + { + 255 mul round cvi GrayLookup exch get + }currenttransfer addprocs settransfer + currentdict imageormask + }{ + /sep_colorspace_dict AGMCORE_gget/Components known{ + MappedCSA 0 get/DeviceCMYK eq{ + Components aload pop + }{ + 0 0 0 Components aload pop 1 exch sub + }ifelse + Adobe_AGM_Image/AGMIMG_k xddf + Adobe_AGM_Image/AGMIMG_y xddf + Adobe_AGM_Image/AGMIMG_m xddf + Adobe_AGM_Image/AGMIMG_c xddf + AGMIMG_y 0.0 eq AGMIMG_m 0.0 eq and AGMIMG_c 0.0 eq and{ + {AGMIMG_k mul 1 exch sub}currenttransfer addprocs settransfer + currentdict imageormask + }{ + currentcolortransfer + {AGMIMG_k mul 1 exch sub}exch addprocs 4 1 roll + {AGMIMG_y mul 1 exch sub}exch addprocs 4 1 roll + {AGMIMG_m mul 1 exch sub}exch addprocs 4 1 roll + {AGMIMG_c mul 1 exch sub}exch addprocs 4 1 roll + setcolortransfer + currentdict tint_image_to_color + }ifelse + }{ + MappedCSA 0 get/DeviceGray eq{ + {255 mul round cvi ColorLookup exch get 0 get}currenttransfer addprocs settransfer + currentdict imageormask + }{ + MappedCSA 0 get/DeviceCMYK eq{ + currentcolortransfer + {255 mul round cvi ColorLookup exch get 3 get 1 exch sub}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 2 get 1 exch sub}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 1 get 1 exch sub}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 0 get 1 exch sub}exch addprocs 4 1 roll + setcolortransfer + currentdict tint_image_to_color + }{ + currentcolortransfer + {pop 1}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 2 get}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 1 get}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 0 get}exch addprocs 4 1 roll + setcolortransfer + currentdict tint_image_to_color + }ifelse + }ifelse + }ifelse + }ifelse + end + }def + /sep_image_lev1_sep + { + begin + /sep_colorspace_dict AGMCORE_gget/Components known{ + Components aload pop + Adobe_AGM_Image/AGMIMG_k xddf + Adobe_AGM_Image/AGMIMG_y xddf + Adobe_AGM_Image/AGMIMG_m xddf + Adobe_AGM_Image/AGMIMG_c xddf + {AGMIMG_c mul 1 exch sub} + {AGMIMG_m mul 1 exch sub} + {AGMIMG_y mul 1 exch sub} + {AGMIMG_k mul 1 exch sub} + }{ + {255 mul round cvi ColorLookup exch get 0 get 1 exch sub} + {255 mul round cvi ColorLookup exch get 1 get 1 exch sub} + {255 mul round cvi ColorLookup exch get 2 get 1 exch sub} + {255 mul round cvi ColorLookup exch get 3 get 1 exch sub} + }ifelse + AGMCORE_get_ink_data currenttransfer addprocs settransfer + currentdict imageormask_sys + end + }def + /indexed_imageormask_lev1 + { + /indexed_colorspace_dict AGMCORE_gget begin + begin + currentdict + MappedCSA 0 get dup/DeviceRGB eq exch/DeviceCMYK eq or has_color not and{ + {HiVal mul round cvi GrayLookup exch get HiVal div}currenttransfer addprocs settransfer + imageormask + }{ + MappedCSA 0 get/DeviceGray eq{ + {HiVal mul round cvi Lookup exch get HiVal div}currenttransfer addprocs settransfer + imageormask + }{ + MappedCSA 0 get/DeviceCMYK eq{ + currentcolortransfer + {4 mul HiVal mul round cvi 3 add Lookup exch get HiVal div 1 exch sub}exch addprocs 4 1 roll + {4 mul HiVal mul round cvi 2 add Lookup exch get HiVal div 1 exch sub}exch addprocs 4 1 roll + {4 mul HiVal mul round cvi 1 add Lookup exch get HiVal div 1 exch sub}exch addprocs 4 1 roll + {4 mul HiVal mul round cvi Lookup exch get HiVal div 1 exch sub}exch addprocs 4 1 roll + setcolortransfer + tint_image_to_color + }{ + currentcolortransfer + {pop 1}exch addprocs 4 1 roll + {3 mul HiVal mul round cvi 2 add Lookup exch get HiVal div}exch addprocs 4 1 roll + {3 mul HiVal mul round cvi 1 add Lookup exch get HiVal div}exch addprocs 4 1 roll + {3 mul HiVal mul round cvi Lookup exch get HiVal div}exch addprocs 4 1 roll + setcolortransfer + tint_image_to_color + }ifelse + }ifelse + }ifelse + end end + }def + /indexed_image_lev1_sep + { + /indexed_colorspace_dict AGMCORE_gget begin + begin + {4 mul HiVal mul round cvi Lookup exch get HiVal div 1 exch sub} + {4 mul HiVal mul round cvi 1 add Lookup exch get HiVal div 1 exch sub} + {4 mul HiVal mul round cvi 2 add Lookup exch get HiVal div 1 exch sub} + {4 mul HiVal mul round cvi 3 add Lookup exch get HiVal div 1 exch sub} + AGMCORE_get_ink_data currenttransfer addprocs settransfer + currentdict imageormask_sys + end end + }def +}if +end +systemdict/setpacking known +{setpacking}if +%%EndResource +currentdict Adobe_AGM_Utils eq {end} if +%%EndProlog +%%BeginSetup +Adobe_AGM_Utils begin +2 2010 Adobe_AGM_Core/ds gx +Adobe_CoolType_Core/ds get exec Adobe_AGM_Image/ds gx +currentdict Adobe_AGM_Utils eq {end} if +%%EndSetup +%%Page: (Page 1) 1 +%%EndPageComments +%%BeginPageSetup +%ADOBeginClientInjection: PageSetup Start "AI11EPS" +%AI12_RMC_Transparency: Balance=75 RasterRes=300 GradRes=150 Text=0 Stroke=1 Clip=1 OP=0 +%ADOEndClientInjection: PageSetup Start "AI11EPS" +Adobe_AGM_Utils begin +Adobe_AGM_Core/ps gx +Adobe_AGM_Utils/capture_cpd gx +Adobe_CoolType_Core/ps get exec Adobe_AGM_Image/ps gx +%ADOBeginClientInjection: PageSetup End "AI11EPS" +/currentdistillerparams where {pop currentdistillerparams /CoreDistVersion get 5000 lt} {true} ifelse { userdict /AI11_PDFMark5 /cleartomark load put userdict /AI11_ReadMetadata_PDFMark5 {flushfile cleartomark } bind put} { userdict /AI11_PDFMark5 /pdfmark load put userdict /AI11_ReadMetadata_PDFMark5 {/PUT pdfmark} bind put } ifelse [/NamespacePush AI11_PDFMark5 [/_objdef {ai_metadata_stream_123} /type /stream /OBJ AI11_PDFMark5 [{ai_metadata_stream_123} currentfile 0 (% &&end XMP packet marker&&) /SubFileDecode filter AI11_ReadMetadata_PDFMark5 + + + + application/postscript + + + Web + + + + + Adobe Illustrator CS3 + 2017-04-03T09:56:19+02:00 + 2017-04-03T10:02:52+02:00 + 2017-04-03T10:02:52+02:00 + + + + 256 + 76 + JPEG + /9j/4AAQSkZJRgABAgEASABIAAD/7QAsUGhvdG9zaG9wIDMuMAA4QklNA+0AAAAAABAASAAAAAEA AQBIAAAAAQAB/+4ADkFkb2JlAGTAAAAAAf/bAIQABgQEBAUEBgUFBgkGBQYJCwgGBggLDAoKCwoK DBAMDAwMDAwQDA4PEA8ODBMTFBQTExwbGxscHx8fHx8fHx8fHwEHBwcNDA0YEBAYGhURFRofHx8f Hx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8f/8AAEQgATAEAAwER AAIRAQMRAf/EAaIAAAAHAQEBAQEAAAAAAAAAAAQFAwIGAQAHCAkKCwEAAgIDAQEBAQEAAAAAAAAA AQACAwQFBgcICQoLEAACAQMDAgQCBgcDBAIGAnMBAgMRBAAFIRIxQVEGE2EicYEUMpGhBxWxQiPB UtHhMxZi8CRygvElQzRTkqKyY3PCNUQnk6OzNhdUZHTD0uIIJoMJChgZhJRFRqS0VtNVKBry4/PE 1OT0ZXWFlaW1xdXl9WZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo+Ck5SVlpeYmZ qbnJ2en5KjpKWmp6ipqqusra6voRAAICAQIDBQUEBQYECAMDbQEAAhEDBCESMUEFURNhIgZxgZEy obHwFMHR4SNCFVJicvEzJDRDghaSUyWiY7LCB3PSNeJEgxdUkwgJChgZJjZFGidkdFU38qOzwygp 0+PzhJSktMTU5PRldYWVpbXF1eX1RlZmdoaWprbG1ub2R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo +DlJWWl5iZmpucnZ6fkqOkpaanqKmqq6ytrq+v/aAAwDAQACEQMRAD8A9U4q7FWGebvzi/LjynK9 vrOtQreps1jAGuJwf5XSIPwP+vTFXnl1/wA5gfl9HKUt9L1SdB/uwpAgPyBmJp88Vdbf85gfl88g W40rVYUP7apbvT5j1lNPlirNfLn5+flTr8iQ22uxWty5AEF8rWpqegDyhYyT4BsVegKyuodCGVgC rA1BB6EHFW8VdirsVdirsVSDzh588peT7EXnmHUYrKN6+jEavNKR2jiQM7e9BQd8VeRX/wDzmF5K imKWOjajcxAkepJ6MNfcAPIfvpiqceX/APnKv8r9TmWG+N5oztt6l3CHir2+OBpSPmVAxV61pmqa ZqllHfaZdw3tlMKxXNvIssbfJkJBxVFYq7FXYq7FXYq7FXYq7FXzR+fv/OQfmXSPM1x5V8pzrYiw CrqGohEkleZ1DGOPmGVFQNQmleXhTdV4ddeefzL8xSmCbW9V1Fnr/oyTzupr1pEh4/hiqBeLzl5S 1S2vZYr/AEPU2Hr2s0qy20zLy+2vIKzKWHyOKvuX8oPOV15x/LzSNdvQBfTo8V5xFFMsEjRM4Hbn w5U7VxVlGp6nY6Xp1zqV/MtvZWcTz3MzdEjjBZjt7DFXxx+Zf/OSnnXzJfzW/l+6l0LQlJWBLZuF 1Io/blmX41J/lQgDpv1xVgkXm78yNKkj1FNY1e0aYhkuWnuFWQj3ZqP+OKvof8hv+cib3zDqMPlX ze6Nqk/w6bqiqsYnYCvpTKoChyPssoAPTr1VfQFzc29rby3NzKkFvAjSTTSMFREUVZmY7AAdTir5 G/OP/nJPWdfuZ9G8ozyaboKExyX0ZKXN1Q0JDDeOI9gPiI+1144qw7yH+RP5iedYlvbKzWz0yTdd Sv2MUbg94wA0knzVae+KvWdP/wCcNIfTRtR80MZDQyR29oAo8QHeU1+fH6MVbvf+cM7coxsvNTq9 TxWazDAjsCyzLT50xV575u/5xj/M3QIpLm0gh1yzjBZnsGJmCjxgcI5PsnLFVT/nHz81/MegectJ 8vTXUt15f1W4jsTYyMXWGSdgkUkNa8KSMOQGxFe9CFX2lirsVdirsVYb+Z/5peXvy/0M32pP6t/O GXTdOQ/vJ5FH/CopI5v29zQFV8XgeefzV88kktqGt6gxJJPCGCFPvEcUYP8Atsd1Xt+j/wDOG9mb NG1fzJIbpgC6WtuojU0FVDSMxbfvRflirH/N3/OInmjT7Z7ny3qkOtcAW+pyp9VnI8EJaSNj82XF Xm3kjz/52/LLzJJ9W9W3MUvDVdFuQyRycTRkkjYfA47OBUfLYqvt/wAk+cdI84+WrPX9Kcm2ul+O JvtxSrtJE4H7SNt79RscVT3FXYq7FXYq7FXYq7FXwP8Anj/5NrzR/wAxrf8AEVxV9w+ULa3t/K+k pBEkKG0gYrGoUVMS1NB3xV8w/wDOYn/KcaL/ANswf9REuKvYP+cYf/JO6V/xmu/+oh8VQ/8AzlNq VzZ/lLcxQEqt/d21tOQafu+RlI+kxAYq8e/5xO8o6LrXm7UtT1OBLp9GgjksoZQGUTTOQJeJ2JQJ tXoTXrTFX1pqmlabq1hNp+p2sV5Y3C8JreZQ6MPcHFX59eZbU+U/zB1O20uU10LVJksZSeTD6rcH 0iT/ADDgK++KvoD/AJyy/MmW2t7byNps3F7pBda0yHf0q/uYCR/MQXYeAXscVYz/AM45/kxY67H/ AIv8x263GmRyMml2Eg5RzvGeLyyKRRo0b4Qvdga7DdV9WQ3IVQhUBVFBxFAAPbFWFeaPz1/K7y1d y2Oo60j38JKy2trHJcOrDqrGNWRW7UZgcVSrTf8AnJn8oL2YRNqstmzGitc20yr9LIrqPpxVHfmr +ZOkaZ+VOr6/ouowXhuIvqWn3NrKsg+sXH7sFWQkco1YyU/ycVfEGi6veaNq9nq1kVF7YTJcWrOo dVliYMjcTseLAHfFWWan+eP5takSbjzPex8u1qy2o+j6usXjiqWHzj+Zd1+/Oua1Pz/3b9aunrTb 7XI16YqmGkfnP+a2jTA2/mW/cxmhiu5DdKKbceFx6gHTFXu/5Xf85VWGqTJpfneOLTLgj91q8NRb OQOkqHkYyf5gSpP8uKvBPN3mLX/zN/MR7lFaS61S5S00u0J2ihLcIY/BQAasfGrYq+tPyo/J3QPI liy2rNca1dRKmoao32jTfhEhqqIG3p1O3KtBRVmd3oWqzBvR8w39qT9n047BgP8AkZaufxxVBwWn nywkJ+vWet2xNfTuIjZXAHtLD6sTfL0l+eKvKP8AnJr8t/8AEHlVPONjp0kOu6UAL6EBXkksqnkW ETOG9EnkCDsvKvsq82/5xf8AzNby75p/wvfP/uJ8wSqsLE7RXtOMZHtNtGffj4HFXrv5s/8AOSmg +ULibR9CiTWdei+GZixFpbv/ACyMu8jDuiHbuwO2KvnLzB+ef5r69Mxn8wXVtHIaLbWDfVEAP7I9 Hi7D/WY4qlTeZ/zOijE7atraR7lZTcXYX4dzRuXbFU+8uf8AOQf5saFIpTXJNRhX7VvqQ+tK3zd/ 333OMVei6v8A85h6zLoMEWlaJDa644Iu7qZzLbodxWGMcWJI3+Nvh6fFiryfVvzc/NPXbkm58x6g zyHaC1la3jPfaK39NdvliqEk82/mdZcZ5NZ1u2oA6ytc3cezbAhiw2OKpBqWp6hqd9Nf6hcPdXtw 3Oe4lYs7tSlWY9Tir9FfK/8AyjWk/wDMFb/8mlxV8vf85gqz+e9ERAWZtNAVQKkk3ElABiryLS/O Hn3y3HHBpusalpUAJZLeKeaGImpJPp1CHdvDFWSax+eXnTzD5Pu/K/mV49VtZ+DwXjIsVzDJE4ZD yjAVxsQ3JakH7WKpN+W35ja55B8xLrOlBJQ6GG8s5a+nNCSCVNNwQRVW7H2qMVey63/zmNez6VJD o/l5bLU5E4rdT3HrxxMR9pYxHHzIPSpp4jtirxXyJYW/mT8xNFs9YuCItU1GJbyZ6s0jSygsu37U rHjXxNcVRfnrUL7zp+amqSwMJZ9V1M2tjyJpw9QW9utd9ggUYq+4dD0ey0XRrHSLFeFpYQR28APX jGoUE+JNKk+OKvJ/+cmPzHv/ACz5ctdE0mVoNR1z1BLcoaPFaxcQ/Eg1VpC4UHwDd6Yq+RcVdiq4 SSCMxhiI2IZkrsStQCR4jkcVeh/kB5e0XzB+aGm6ZrNol7YSR3DyW0leLNHCzrWhHRhir7X0ryl5 V0hVGlaPZWAX7P1a3iiOxr1RR33xVNcVSjzD5Q8r+Y7ZrbXNLttQiYUrNGrOvuj/AG0PupBxV8c/ n1+Tg/L7WYLnTWeby7qfL6o0nxPDKm7wO3fY8kY9RUfsklVlf/OK35f2Go3915xvHZpNJm+rafAp IAleKskj060SQBR7mvbFX04+k/WxKxvLmDmOI9CT0+PuKDFUBceSpJVonmLWYDv8UdyhO/8ArxON sVWw+X/Odm1bXzQbuMVpFqdlDNt4c7Y2bfTviqdWX6WdGi1OG248SC8Duweu28ToOII/y2xV8Efm b5buvKH5i6zpkSG0W1vHn00xtQrbyN6tsyMtNxGy9OhxVOvyZ/JvU/zF1aVpJGs9AsmX9IX4ALlm 3EMNdjIw6k7KNz2BVfY3lD8uPJXlG1SDQdKgtpEFGuyoe5fxLzNVzXwrTwAxVkmKvnz/AJy3tvLN l5TsJF021XXtRvQqXyxIs4hiRml/eABj8TIKHxxV57+RP/OP7edI18w+YjJb+W1crbQRnjLdsho1 G/YiBFCw3J2FOuKvrDy95V8t+XLNbPQtNt9Ot1FCsCBS1O7v9pz7sScVTRlVlKsAVIoQdwQcVfJP /OW1v5asfMuj2Ol6dbWeoNbSXeozW8SxtIJX4RepwAqR6TnfffFX1J5X/wCUa0n/AJgrf/k0uKo/ 6tb+v9Y9JPrHHh63Ec+NSePLrTfpirri2trmFoLmJJ4XFHikUOrD3Vqg4q+fvz4/5x60GbQ7zzP5 Rs1sNRsUa4vdOgFIZ4VHKRo4xskiLvRNm8K4q81/5xWsbK8/M2WG8t47mL9G3DenMiyLUSRUNGBF cVfXn+F/LX/Vpsv+keL/AJpxV8D+VQF/MrRwuwGs21KbUpdLiqp+VkSXH5meWBNMIwdVtJDI+/Jk mVwu5G7sOP04q+9sVfJP/OWTu35kWKkEKmkwhfA1uLgkjFXiuKsy/Kn8u/8AH3md9D/SH6N4W0lz 9Y9H16+myLx4c4uvPrXFXr3/AEJ3/wB/d/3Lv+zrFWMfkLo/6F/Pq50j1frDaWdRtFuOPDmYC0XM LVuPIDpU4q+vFuZR1ofnir4b82/md+ZVn5v1aGHzVqqpZ31xDCi3cyxhY5mVR6YbgdvEYq+pP+cf /wAwtS86eQEvdZkEmqWNzJY3NxQJ6vBEkWQhaLUpKAadxiqF/wCcm9Kh1D8oNUmI5yadNbXcNKGj essLH/kXM2KpB/ziz5etbD8um1dGL3Os3MjzbmipbM0KJxrQbhmrSpr7DFXrJ8s6NemRrqKSUtQM DPPQgdKgPTFUj1Ty7+VVjIx1K5t7J+jLNqUsFKUH2TOoGKoe3l/KOB/9E80RQM1AqxeYJwtaUFI/ rRT/AIXFWT6PqGhu4jstcS/BHFYvrMM5LePIVkJ/2WKvmH/nMKGzXz3o8sZH1uTTFE6jrwWeX02P zqw+jFX0V+VHlC28p+QNH0eJAk6wJPfMOr3MwDzEnvRjxHsAMVYd/wA5DfnDe+Q9ItNP0Xj+n9VD tFO4Di3hjIDS8GqGZiaJUU2JPShVfLEn5s/mfJctct5r1YSMeRVbydY6+0asEA9gMVRF15o87fmZ rnl3Qtb1Fr6b6wtlYyuiKyfW5ERmYoq8vsgktvQYq+89J0ux0nS7TS7CMQ2VlClvbxD9mONQqj7h irwD/nI7889d8vasPKPle4+p3kcSy6pfqFMq+qvKOGItXgeBDM1K7ihG+KvBLb82/wA0Le4FxH5r 1VpAa8ZLuaVPH+7kZk/DFUq81+bvMHmzV21jXrn63qDRpE03BI6rGvFfhjCqPoGKv0G8r/8AKNaT /wAwVv8A8mlxV89/85UefPOWg+Z9K07RNZutMs57D1pY7SQwlpDLInIulH+yo74qgv8AnGn84PN2 oecR5V1/UptUtL+GV7OS7cyzxzwJ6lBK5LsrRo1VJO9CKb1VfUbojoyOoZGBVlYVBB2IIOKvkr/n HDTo9M/PbWtNj/u7KDULdK9aRXKIP+I4q+tsVfnp5X/8mXpH/bZt/wDqKXFXab6flT8y7X62Cseg 61H9YB6hbO6HP6f3eKvvoEEVG4PQ4q+af+cufK84vNF80RIWhaNtNunrsrIzTQbf5XOT7sVfOuKv ZP8AnFT/AMmbN/2zbj/k5Fir68xV8s/lT/601r3/ADGax/yefFX1Nir8+/PX/KbeYf8AtpXn/J98 VfSP/OI7N/gfV1r8I1NiB7m3ir+rFWd/nd/5KjzL/wAwn/G64qwb/nElbg+RNUke5d4P0m0cNqac IysETM67cvj9QVFafDt3qq9jbyxol9IwvoXu0ap9GeaaWLv/ALrdynfwxVRubf8ALfy0oa4h0jRw BVeSW1saewopPXtiq218/wDkYqRYXqTqT0s4JpgTTr+5jeu3fFU4s9bs7sj0o7pa9DLaXUI8Ossa Yq+cv+cxrSxj1TyjfPHWSVLyG5I2ZoYXgZV+j1np88VfTPrxfzDFXyP/AM5gRzf8rB0meh+rPpMa Rv8AsmRLmcuB7gOtfoxV4TirN/ySMY/NjyuZKcfrydRXehp+OKvvnmn8w+/FXwd+fiTp+b/mYT7u bhGX/UaGNo/+EIxVgGKuxV+jvlt1j8u6XG54ulpArKeoIiUEYq+Xv+cwvi86aJIN0Om8QfdZ5Cf+ JDFWHf8AON7hPzo8useg+udP+YGfFX2+bteynFXzJ+TJr/zkr53l6FJNXcD56ii/8bYq+lzdSnwH yGKvgTQkWP8ANXTkQUVNdhVR7C8AGKs4/wCco/JUuh/mJJrMUZGneYEFzG4Hwi4jASdPmTxk/wBl ir27/nH/APMaDzb5Kgs7mWuuaKiWt6jH4njUUhnFSSeSCjH+YHxGKs/8waBpPmDR7rR9Xt1utPvE 4TwtUVoQQQRuGVgCpHQ4q+bvMv8AziRr8V4zeWtXtrmyYsVjv+cMyD9leUSSJIfFqJ8sVZR+R/5G +cfJPm+TW9ZmsmtWtJbYR28sjyc3eNgaNGi8fgP7WKvecVfLP5U/+tNa9/zGax/yefFX1Nir8+/P X/KbeYf+2lef8n3xV9If84j/APKE6x/20j/yYixVnn53f+So8y/8wn/G64q+fv8AnFrzLqln5+/Q Ud0F0zVIZXntHoQ8sEZdGjqQQ4ANadV6jYEKvqy60mG8lrcz3DQ/74jmeBKdDUwmNmB8GYjFUVp/ ljyzp372x0y0tn+000cMauf8pnpyJ9ycVQd75+8qW1ybOO+F9fj/AI8dPR72cf60duJCv+ypiqLs NR1q9kVzph0+0J3a8kT6wR7QwmVRX/KkBH8uKvlX/nLrVzdfmJY6ckvOHTtOj5RbUSaeSR36d2j9 PFX0J+WHm6DzZ5G0nWElWS5eBIr8L+xdRKFmUrUlfiHIV/ZIPfFUo/OT8qLf8wtCggjnW01jT2eT T7pwSnxgB4pAN+D8V+IAkEd9wVXz5N/zi7+aiTNGsVlKgNBKlyAp9xyVW+8Yq7Wfye83flfbaX53 1K4tbptO1S0f6naNI1FVjLyeR0jp8UYSgB+1ir660zUrLVNOtdSsZRNZXkST28o6NHIoZTv7HFXl P53/AJFSeermDWtGuYrTXIIxBLHcclhniViVqyKzK68jQ8TXYbUxV41H/wA4vfmo8oRobKNT/uxr kcR/wKs34Yqx78zvyk1n8vYtJ/Sl5b3U2qLOeNtzKRmApUcnCFqiUfsjFX29p3/HPtf+MMf/ABEY q+Y/+cvP+Un0H/mCk/5OnFWF/wDOOv8A5OTy/wD9Hn/UDPir7YxV82/kv/60f55+erf91KPFX0li r4M0X/ybFh/23ov+owYq+2fzN/L3S/PnlW40S9Iimr6theU5NBcKCFcDuN+LDuDir4nmg89/lX50 HMPpmtWRPBx8UU8RNKivwyxPT/aI2VfQvkj/AJyl8n6pBHB5ojfRNRAAedVea0dthVSgaRKnejLQ D9o4q9KtPzH/AC+vGRLbzNpcskn2IheQcztX7HPl+GKty/mR+XkTvHL5o0lJIyVdGvrYMGBoQRzr UYqxvXP+cg/yo0j1FOsi/nReQhsY3n5eyygCGvzfFXgP5WeePLtv+d+oeaNRuRp2lX8uozxyXAoV Fy7PGr8OfxfFTFX0Z/yu78qP+pltP+H/AOacVfFvm27tr3zXrV5auJba5v7maCQVoySTMysK77g4 q90/5xq/MLyX5a8qapZ67q0On3M1+Zoo5eVWQwovIUBHVTirMfzZ/Nn8udW/LnXtO07Xre5vrm34 QQJz5O3NTQVXFXyr5d1/UvL2uWWtaZJ6V9YSiWFjXiSNirAEVV1JVh3BxV956JqkPmny5putafeS 21vqFuk4EBiYguAWjYuklGRqqadCMVS/VNN8mQyGHU7e61+9AquntJcX7b7gtbu5giU/zOFX3xVH aZb+bWjEGlaXpnlfTAfhRgLmenj6FsYYEP8Az1fFUv8AzB/MGw/LjyxPf6vqralq06sNMsJBCjSz duCRIjCJSauzE0HepFVXxhYaZ5z/ADD803P1ZJdY168WW7uGJHJhGvJiSaKooAqjYdFHbFU8/K/8 1vMX5b6xcRrAbjTpn4anpM1YzzjPHkhIrHKvQ7b9CNgQq+ofLP57/lfr1uJE1qHTZgAZLbUmW0dC e3KQ+k3+wc4qyaTzt5MjtRdya9pyWpCsJ2u4BHxanE8y9KGu2KvKfz7/ADN/LnU/y+1XQLTXbe91 S6EL2kdnW5UtFPHIayx1iX4VI3evsemKvKPyb/PnUPI0f6I1SGTUfLbMXjhjI9e1ZjyYwciFZWJq 0ZIFdwRvyVfSmi/nJ+WGr2yz23mOyh5bGK8lW0kB8OE/pk/RtiqcXfnfyXZxiS71/TrdGNFaW7gQ E9aAs43xV4B/zkz5+8g+ZNE02w0XU49R1exvObGAO0awSRMHpLT02q4TZWOKvW7D87PyqSxt1bzJ aqyxIGU8wQQo2+zirwL/AJyX84eWfM3mDR59B1CPUIbe0dJni5UVjISAagdsVYr+R+t6Tof5oaLq mrXKWen2/wBa9a5krxXnaTRrWgPVmAxV9Xf8ru/Kj/qZbT/h/wDmnFXhP5Wed/Kmlfnn5u13UdSi ttIvzqX1O8flwk9a/SWOlAT8SKSMVe7f8ru/Kj/qZbT/AIf/AJpxV8heW5Eu/wA1NKktj6qXGuwN CVB+IPeKVoPeuKv0GxVIPOXkTyr5y0z9HeYLFLuJamGX7M0TH9qKRaMp237HvXFXz15r/wCcPdUj leXyprUVxASStpqIMUijw9aJXVz/ALBcVYHcf84z/nNFKUTQ0nUdJY7yzCn5epKjfhiq2L/nGn86 HcK2gLGD1dryyIH/AAMzH8MVZHov/OIv5hXbK2p32n6bEftDm9xKPkqKEP8AyMxVF69/zh/5yt7h Romr2WoWxAq9yJLWUNTf4FE60r/l4qlf/QpX5p/790z/AKSJP+qWKsm8r/8AOHupSW90/mfWYraZ omWyi08NMFlP2XmaVY6qvdFG/wDMMVY9df8AOI35lxzukF3pc8IPwS+tKhI7VUxbHFVNf+cSfzSL AGfS1BNCxuJaD32hOKpp5p/5xN1/SvJ0d/pV5+l/MUDs99YRLwjaHj0tuVGd0I705A7AEUZV5p5R /Mz8wfIF09np11JbQxS8rnSLxC0XMbMrxPR4yf2uBU++KvVrL/nLx4LdUk8oxGU1aVob30kZ23Zg ht3IqfFjiqWeYP8AnLbzbdxNFoek2uk81KmaV2u5VJ6MlRDGCP8AKRsVeb6bo35ifmf5lkeFLnWt UmI+s3cp/dQoTtzc0jiQV2UU8FHbFX2D+Tv5P6T+XejOgdbzXb0KdS1ClAeO4iiB3WNa/NjuewCq D/M78gPJvnqR7+jaTrrDfUbZQRIe3rxGgkp4gq3virwLXv8AnFH80LCVv0YLTWYK/u2hmWCQj/KS 49NQfk5xVj6/847/AJytN6Q8tycqkVNxaBdv8szcfxxVkug/84m/mXfOp1OSy0iE/b9SX15R8lhD oT/sxirP9T/5w70FtDhi0zXLiPWogTLdXEatbzE9B6SkNEPfm304q8x1f/nFv83LGUra2VrqiDpJ a3USAj5XJgb8MVSu3/5x1/OWd+K+XHTpVpLi0QCv+tKK/RirJdP/AOcSPzLuLN5rm602ynABjtpZ pHYnuGaKN0XbwJ/jiqn/ANClfmn/AL90z/pIk/6pYqjtG/5xC8+T30a6tqOn2VjUetLC8k8vHvwQ pGpPzYYqjvNf/OIPmOHUHfytqdtdaax+CO/ZorhPZmjjZH+fw/LFUj/6FK/NP/fumf8ASRJ/1SxV Vk/5xF/M1YUdbzSXkb7UQnnBX6TAF/HFVL/oUr80/wDfumf9JEn/AFSxV6V+Tf8AzjPc+Vtfg8x+ aLyC6vrI87CxtObxJIRQSSPIqFmSuyhaV3rir37FXYq7FXYq7FXYq7FXYq7FXYq7FXYq87/Nz/lT H1Bf+VhfU+VP9H5cvrtP+KvQ/wBI4/L4fHFXzRqP/QsX1pvQ/wAW+n2+rfUfT+j1/wB59+Ksr8k/ 9ClfWI/rf6Q9eq+n+m/V4cv8r6p+5+fP4cVfTflj/C36Hh/wx9S/Q/8Aun9Hel9XrQVp6Pw18cVT XFXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq/wD/2Q== + + + + + + uuid:88B45AE7E519E7119A76BA5BC76AA065 + uuid:24AD93F6E619E7119A76BA5BC76AA065 + + uuid:87B45AE7E519E7119A76BA5BC76AA065 + uuid:86B45AE7E519E7119A76BA5BC76AA065 + + + + Web + + + + 14400.000000 + 14400.000000 + Pixels + + 1 + False + False + + + Black + + + + + + Groupe de nuances par défaut + 0 + + + + Blanc + RGB + PROCESS + 255 + 255 + 255 + + + Noir + RGB + PROCESS + 0 + 0 + 0 + + + Rouge RVB + RGB + PROCESS + 255 + 0 + 0 + + + Jaune RVB + RGB + PROCESS + 255 + 255 + 0 + + + Vert RVB + RGB + PROCESS + 0 + 255 + 0 + + + Cyan RVB + RGB + PROCESS + 0 + 255 + 255 + + + Bleu RVB + RGB + PROCESS + 0 + 0 + 255 + + + Magenta RVB + RGB + PROCESS + 255 + 0 + 255 + + + R=193 V=39 B=45 + RGB + PROCESS + 193 + 39 + 45 + + + R=237 V=28 B=36 + RGB + PROCESS + 237 + 28 + 36 + + + R=241 V=90 B=36 + RGB + PROCESS + 241 + 90 + 36 + + + R=247 V=147 B=30 + RGB + PROCESS + 247 + 147 + 30 + + + R=251 V=176 B=59 + RGB + PROCESS + 251 + 176 + 59 + + + R=252 V=238 B=33 + RGB + PROCESS + 252 + 238 + 33 + + + R=217 V=224 B=33 + RGB + PROCESS + 217 + 224 + 33 + + + R=140 V=198 B=63 + RGB + PROCESS + 140 + 198 + 63 + + + R=57 V=181 B=74 + RGB + PROCESS + 57 + 181 + 74 + + + R=0 V=146 B=69 + RGB + PROCESS + 0 + 146 + 69 + + + R=0 V=104 B=55 + RGB + PROCESS + 0 + 104 + 55 + + + R=34 V=181 B=115 + RGB + PROCESS + 34 + 181 + 115 + + + R=0 V=169 B=157 + RGB + PROCESS + 0 + 169 + 157 + + + R=41 V=171 B=226 + RGB + PROCESS + 41 + 171 + 226 + + + R=0 V=113 B=188 + RGB + PROCESS + 0 + 113 + 188 + + + R=46 V=49 B=146 + RGB + PROCESS + 46 + 49 + 146 + + + R=27 V=20 B=100 + RGB + PROCESS + 27 + 20 + 100 + + + R=102 V=45 B=145 + RGB + PROCESS + 102 + 45 + 145 + + + R=147 V=39 B=143 + RGB + PROCESS + 147 + 39 + 143 + + + R=158 V=0 B=93 + RGB + PROCESS + 158 + 0 + 93 + + + R=212 V=20 B=90 + RGB + PROCESS + 212 + 20 + 90 + + + R=237 V=30 B=121 + RGB + PROCESS + 237 + 30 + 121 + + + R=199 V=178 B=153 + RGB + PROCESS + 199 + 178 + 153 + + + R=153 V=134 B=117 + RGB + PROCESS + 153 + 134 + 117 + + + R=115 V=99 B=87 + RGB + PROCESS + 115 + 99 + 87 + + + R=83 V=71 B=65 + RGB + PROCESS + 83 + 71 + 65 + + + R=198 V=156 B=109 + RGB + PROCESS + 198 + 156 + 109 + + + R=166 V=124 B=82 + RGB + PROCESS + 166 + 124 + 82 + + + R=140 V=98 B=57 + RGB + PROCESS + 140 + 98 + 57 + + + R=117 V=76 B=36 + RGB + PROCESS + 117 + 76 + 36 + + + R=96 V=56 B=19 + RGB + PROCESS + 96 + 56 + 19 + + + R=66 V=33 B=11 + RGB + PROCESS + 66 + 33 + 11 + + + + + + Groupe de couleurs Web + 1 + + + + R=236 V=28 B=36 + RGB + PROCESS + 236 + 28 + 36 + + + R=0 V=169 B=157 + RGB + PROCESS + 0 + 169 + 157 + + + R=102 V=45 B=145 + RGB + PROCESS + 102 + 45 + 145 + + + R=139 V=146 B=152 1 + RGB + PROCESS + 139 + 146 + 152 + + + + + + Niveaux de gris + 1 + + + + N=100 + GRAY + PROCESS + 255 + + + N=90 + GRAY + PROCESS + 229 + + + N=80 + GRAY + PROCESS + 204 + + + N=70 + GRAY + PROCESS + 178 + + + N=60 + GRAY + PROCESS + 153 + + + N=50 + GRAY + PROCESS + 127 + + + N=40 + GRAY + PROCESS + 101 + + + N=30 + GRAY + PROCESS + 76 + + + N=20 + GRAY + PROCESS + 50 + + + N=10 + GRAY + PROCESS + 25 + + + N=5 + GRAY + PROCESS + 12 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + % &&end XMP packet marker&& [{ai_metadata_stream_123} <> /PUT AI11_PDFMark5 [/Document 1 dict begin /Metadata {ai_metadata_stream_123} def currentdict end /BDC AI11_PDFMark5 +%ADOEndClientInjection: PageSetup End "AI11EPS" +%%EndPageSetup +1 -1 scale 0 -840 translate +pgsv +[1 0 0 1 0 0 ]ct +gsave +np +gsave +0 0 mo +0 840 li +1096 840 li +1096 0 li +cp +clp +[1 0 0 1 0 0 ]ct +904.575 334.257 mo +904.575 320.361 898.182 311.771 892.811 307.004 cv +885.783 300.775 877.066 297.482 867.603 297.482 cv +858.135 297.482 849.418 300.775 842.393 307.006 cv +837.025 311.763 830.63 320.354 830.63 334.257 cv +830.63 342.282 832.76 348.533 835.592 353.299 cv +832.309 351 828.712 349.018 824.782 347.397 cv +817.438 344.379 809.047 342.776 799.205 342.523 cv +799.205 314.912 li +799.205 309.227 796.713 303.826 792.384 300.14 cv +788.849 297.129 784.381 295.508 779.801 295.508 cv +778.775 295.508 777.742 295.588 776.715 295.754 cv +747.302 300.492 li +737.896 302.006 730.983 310.123 730.983 319.648 cv +730.983 344.484 li +730.277 344.316 729.57 344.156 728.861 344.004 cv +722.946 342.736 716.463 342.093 709.595 342.093 cv +698.512 342.093 688.313 344.105 679.273 348.078 cv +675.158 349.891 671.305 352.037 667.71 354.475 cv +665.828 352.313 663.445 350.541 660.656 349.357 cv +655.197 347.042 649.498 345.239 643.722 344.003 cv +637.803 342.734 631.32 342.092 624.455 342.092 cv +613.37 342.092 603.172 344.105 594.137 348.078 cv +585.168 352.024 577.379 357.498 570.993 364.338 cv +570.505 364.861 570.043 365.404 569.571 365.941 cv +567.759 359.079 564.273 352.445 559.656 347.231 cv +554.654 334.644 545.437 324.396 532.739 317.38 cv +522.827 311.904 510.623 308.563 500.098 308.44 cv +499.868 308.439 499.64 308.438 499.413 308.438 cv +477.133 308.438 463.671 320.701 457.115 331.523 cv +448.029 337.533 440.207 347.509 437.402 358.122 cv +436.752 358.81 436.147 359.513 435.567 360.225 cv +430.352 354.689 423.794 350.346 416.015 347.283 cv +407.797 344.057 398.276 342.486 386.911 342.486 cv +377.26 342.486 367.872 343.197 359 344.601 cv +353.201 345.52 348.15 346.486 343.704 347.52 cv +344.549 345.173 li +347.646 336.569 344.319 326.972 336.56 322.135 cv +334.878 321.084 332.65 319.895 329.551 318.387 cv +326.144 316.729 322.15 315.226 317.317 313.781 cv +312.746 312.423 307.58 311.275 301.503 310.271 cv +295.283 309.251 288.347 308.732 280.892 308.732 cv +269.104 308.732 257.795 310.749 247.286 314.723 cv +236.409 318.832 226.785 324.981 218.687 332.993 cv +210.643 340.949 204.265 350.688 199.728 361.941 cv +195.257 373.034 192.99 385.702 192.99 399.594 cv +192.99 413.145 195.016 425.551 199.007 436.455 cv +203.167 447.837 209.248 457.667 217.078 465.667 cv +224.963 473.729 234.529 479.92 245.506 484.068 cv +256.024 488.043 267.729 490.059 280.299 490.059 cv +293.684 490.059 304.816 489.127 314.333 487.21 cv +321.051 485.856 326.354 484.569 330.603 483.257 cv +333.919 485.905 338.121 487.492 342.695 487.492 cv +372.106 487.492 li +378.378 487.492 383.956 484.516 387.503 479.898 cv +391.05 484.516 396.628 487.492 402.899 487.492 cv +432.311 487.492 li +438.632 487.492 444.246 484.467 447.789 479.788 cv +453.011 488.951 460.835 493.996 468.687 496.009 cv +475.153 500.054 484.721 503.735 498.12 503.737 cv +505.128 503.737 512.654 502.694 520.491 500.64 cv +540.638 495.358 557.324 483.058 564.939 468.345 cv +565.798 467.53 566.584 466.669 567.328 465.78 cv +567.969 466.562 568.627 467.331 569.308 468.082 cv +575.643 475.081 583.667 480.618 593.157 484.54 cv +602.332 488.334 613.062 490.256 625.048 490.256 cv +632.506 490.256 639.527 489.592 645.93 488.281 cv +652.391 486.952 657.784 485.354 662.418 483.393 cv +664.66 482.444 666.634 481.103 668.309 479.489 cv +671.426 481.392 674.758 483.078 678.293 484.538 cv +687.468 488.332 698.197 490.256 710.188 490.256 cv +717.642 490.256 724.662 489.592 731.066 488.281 cv +735.035 487.465 738.597 486.545 741.831 485.505 cv +744.413 486.776 747.316 487.492 750.389 487.492 cv +779.801 487.492 li +786.072 487.492 791.649 484.518 795.196 479.901 cv +798.745 484.518 804.321 487.492 810.593 487.492 cv +840.004 487.492 li +842.283 487.492 844.469 487.096 846.5 486.374 cv +848.531 487.096 850.717 487.492 852.995 487.492 cv +882.407 487.492 li +893.124 487.492 901.811 478.805 901.811 468.089 cv +901.811 364.26 li +901.811 360.595 900.797 357.169 899.031 354.245 cv +902.149 349.367 904.575 342.82 904.575 334.257 cv +cp +false sop +/0 +[/DeviceCMYK] /CSA add_res +0 0 0 0.9 cmyk +f +852.995 468.089 mo +882.407 468.089 li +882.407 364.26 li +852.995 364.26 li +852.995 468.089 li +cp +855.265 346.988 mo +858.75 350.082 862.865 351.627 867.602 351.627 cv +872.34 351.627 876.451 350.082 879.939 346.988 cv +883.425 343.897 885.17 339.654 885.17 334.256 cv +885.17 328.861 883.425 324.618 879.939 321.524 cv +876.451 318.434 872.34 316.886 867.602 316.886 cv +862.865 316.886 858.75 318.434 855.265 321.524 cv +851.776 324.618 850.034 328.861 850.034 334.256 cv +850.034 339.654 851.776 343.897 855.265 346.988 cv +cp +830.826 375.116 mo +827.471 370.906 822.995 367.65 817.403 365.346 cv +811.809 363.045 804.801 361.891 796.381 361.891 cv +793.485 361.891 790.49 362.188 787.4 362.779 cv +784.306 363.371 781.773 363.998 779.8 364.654 cv +779.8 314.912 li +750.389 319.648 li +750.389 468.089 li +779.8 468.089 li +779.8 389.131 li +781.642 388.607 783.714 388.113 786.019 387.65 cv +788.319 387.191 790.786 386.96 793.42 386.96 cv +799.999 386.96 804.505 388.934 806.941 392.882 cv +809.375 396.83 810.594 403.54 810.594 413.016 cv +810.594 468.089 li +840.005 468.089 li +840.005 409.463 li +840.005 402.356 839.314 395.91 837.932 390.118 cv +836.55 384.329 834.182 379.33 830.826 375.116 cv +cp +724.597 444.6 mo +720.516 445.259 716.7 445.586 713.147 445.586 cv +703.146 445.586 696.138 443.02 692.125 437.887 cv +688.11 432.755 686.104 425.52 686.104 416.174 cv +686.104 407.359 688.242 400.253 692.521 394.855 cv +696.795 389.461 703.278 386.763 711.963 386.763 cv +716.041 386.763 719.727 387.157 723.017 387.947 cv +726.305 388.736 729.334 389.658 732.097 390.711 cv +738.216 367.221 li +733.873 365.379 729.398 363.964 724.794 362.977 cv +720.186 361.99 715.121 361.496 709.595 361.496 cv +701.171 361.496 693.671 362.945 687.091 365.839 cv +680.51 368.735 674.918 372.648 670.313 377.584 cv +665.705 382.52 662.187 388.311 659.753 394.955 cv +657.316 401.601 656.101 408.673 656.101 416.174 cv +656.101 423.94 657.119 431.146 659.161 437.788 cv +661.199 444.435 664.423 450.193 668.833 455.061 cv +673.24 459.931 678.865 463.779 685.71 466.607 cv +692.551 469.436 700.711 470.852 710.187 470.852 cv +716.37 470.852 722.03 470.324 727.162 469.272 cv +732.294 468.218 736.569 466.972 739.992 465.522 cv +735.848 441.44 li +732.424 442.89 728.673 443.942 724.597 444.6 cv +cp +639.458 444.6 mo +635.378 445.259 631.562 445.586 628.01 445.586 cv +618.008 445.586 610.999 443.02 606.987 437.887 cv +602.972 432.755 600.967 425.52 600.967 416.174 cv +600.967 407.359 603.104 400.253 607.382 394.855 cv +611.656 389.461 618.141 386.763 626.824 386.763 cv +630.902 386.763 634.588 387.157 637.879 387.947 cv +641.166 388.736 644.195 389.658 646.959 390.711 cv +653.078 367.221 li +648.735 365.379 644.26 363.964 639.655 362.977 cv +635.048 361.99 629.983 361.496 624.456 361.496 cv +616.033 361.496 608.532 362.945 601.953 365.839 cv +595.372 368.735 589.78 372.648 585.176 377.584 cv +580.566 382.52 577.048 388.311 574.614 394.955 cv +572.178 401.601 570.963 408.673 570.963 416.174 cv +570.963 423.94 571.98 431.146 574.022 437.788 cv +576.061 444.435 579.284 450.193 583.694 455.061 cv +588.103 459.931 593.728 463.779 600.572 466.607 cv +607.413 469.436 615.573 470.852 625.048 470.852 cv +631.232 470.852 636.892 470.324 642.023 469.272 cv +647.156 468.218 651.431 466.972 654.854 465.522 cv +650.709 441.44 li +647.286 442.89 643.535 443.942 639.458 444.6 cv +cp +422.836 375.116 mo +419.413 370.906 414.773 367.65 408.92 365.346 cv +403.063 363.045 395.725 361.891 386.911 361.891 cv +378.226 361.891 369.935 362.518 362.039 363.766 cv +354.143 365.019 347.695 366.366 342.695 367.813 cv +342.695 468.089 li +372.106 468.089 li +372.106 387.947 li +373.947 387.685 376.054 387.453 378.422 387.256 cv +380.791 387.059 383.027 386.96 385.134 386.96 cv +391.975 386.96 396.647 388.934 399.149 392.882 cv +401.647 396.83 402.899 403.54 402.899 413.016 cv +402.899 468.089 li +432.311 468.089 li +432.311 409.463 li +432.311 402.356 431.586 395.91 430.14 390.118 cv +428.689 384.329 426.256 379.33 422.836 375.116 cv +cp +297.472 443.414 mo +295.628 443.809 293.49 444.073 291.057 444.203 cv +288.62 444.336 285.693 444.4 282.273 444.4 cv +275.957 444.4 270.429 443.316 265.691 441.145 cv +260.954 438.973 257.007 435.913 253.849 431.966 cv +250.69 428.018 248.322 423.314 246.743 417.852 cv +245.163 412.393 244.374 406.305 244.374 399.594 cv +244.374 385.775 247.563 374.889 253.947 366.924 cv +260.328 358.964 270.692 354.982 285.036 354.982 cv +291.483 354.982 297.438 355.806 302.9 357.449 cv +308.359 359.097 313.196 361.037 317.408 363.272 cv +326.291 338.6 li +325.237 337.941 323.494 337.02 321.06 335.836 cv +318.624 334.65 315.534 333.5 311.783 332.381 cv +308.032 331.265 303.558 330.277 298.36 329.42 cv +293.16 328.566 287.337 328.137 280.891 328.137 cv +271.416 328.137 262.5 329.717 254.145 332.875 cv +245.787 336.033 238.517 340.672 232.333 346.791 cv +226.146 352.91 221.279 360.38 217.726 369.195 cv +214.172 378.012 212.396 388.145 212.396 399.594 cv +212.396 410.912 214.006 420.979 217.232 429.794 cv +220.455 438.612 225.029 446.048 230.951 452.099 cv +236.873 458.153 244.009 462.759 252.368 465.917 cv +260.723 469.075 270.035 470.654 280.299 470.654 cv +292.272 470.654 302.339 469.83 310.5 468.187 cv +318.658 466.543 324.58 464.997 328.265 463.548 cv +328.265 395.843 li +297.472 395.843 li +297.472 443.414 li +cp +0 0 0 0 cmyk +f +499.871 327.844 mo +479.593 327.609 472.617 343.076 471.746 345.664 cv +462.806 348.957 454.521 360.719 455.829 367.776 cv +449.177 372.482 444.763 378.48 449.724 388.479 cv +444.926 393.477 441.001 405.299 449.506 412.943 cv +441.818 426.47 450.486 434.057 454.739 437.174 cv +450.813 449.406 459.539 459.127 461.498 460.463 cv +463.078 470.579 467.977 477.244 476.324 477.636 cv +482.209 482.576 494.498 487.394 515.571 481.87 cv +533.066 477.282 545.821 466.344 549.147 455.993 cv +557.104 451.877 556.777 439.526 556.342 436.938 cv +562.828 423.118 558.739 411.298 556.342 405.886 cv +560.702 397.006 555.143 380.422 551.546 376.951 cv +551.872 369.836 547.456 361.543 542.825 357.896 cv +536.173 335.078 511.879 327.983 499.871 327.844 cv +cp +f +502.25 467.75 mo +495.838 466.606 492.5 462.25 489 455.25 cv +486.897 453.815 478.75 444.25 477.25 432 cv +474.695 430.128 471.25 418.5 471.5 409.75 cv +469.75 403.75 468.349 397.448 470 388.75 cv +467.75 379.25 467.599 372.865 472.75 367.5 cv +472.75 358 475.359 351.052 482.5 346 cv +481.349 339.791 484.277 333.904 491.679 328.695 cv +477.657 331.937 472.487 343.462 471.746 345.664 cv +462.806 348.957 454.521 360.719 455.829 367.776 cv +449.177 372.482 444.763 378.48 449.724 388.479 cv +444.926 393.477 441.001 405.299 449.506 412.943 cv +441.818 426.47 450.486 434.057 454.739 437.174 cv +450.813 449.406 459.539 459.127 461.498 460.463 cv +463.078 470.579 467.977 477.244 476.324 477.636 cv +482.209 482.576 494.498 487.394 515.571 481.87 cv +522.207 480.13 528.155 477.474 533.171 474.285 cv +516.934 476.368 507.505 472.161 502.25 467.75 cv +cp +0 0 0 0.05 cmyk +f +479.905 346.547 mo +479.905 346.547 498.071 344.899 507.586 346.71 cv +517.031 348.507 533.404 356.603 533.404 356.603 cv +533.404 356.603 508.984 349.163 501.732 348.135 cv +493.03 346.898 479.905 346.547 479.905 346.547 cv +cp +0 0 0 0.75 cmyk +f +464.782 368.029 mo +464.782 368.029 488.936 365.72 503.083 367.014 cv +517.229 368.308 540.275 375.997 540.275 375.997 cv +540.275 375.997 514.27 371.326 499.886 369.709 cv +489.149 368.502 464.782 368.029 464.782 368.029 cv +cp +f +460.468 387.674 mo +460.468 387.674 484.75 385.621 499.593 386.067 cv +514.435 386.512 540.681 391.008 540.681 391.008 cv +540.681 391.008 506.098 388.892 494.801 388.754 cv +483.505 388.617 460.468 387.674 460.468 387.674 cv +cp +f +461.11 412.032 mo +461.11 412.032 487.129 405.443 501.163 404.417 cv +517.788 403.2 544.817 406.357 544.817 406.357 cv +544.817 406.357 509.509 406.268 498.869 407.439 cv +487.606 408.681 461.11 412.032 461.11 412.032 cv +cp +f +464.962 436.38 mo +464.962 436.38 490.357 427.354 504.871 425.765 cv +519.387 424.175 546.102 424.177 546.102 424.177 cv +546.102 424.177 511.032 427.614 500.03 429.181 cv +489.032 430.748 464.962 436.38 464.962 436.38 cv +cp +f +545.674 439.174 mo +545.674 439.174 524.613 448.131 510.928 451.999 cv +497.242 455.868 469.725 459.093 469.725 459.093 cv +469.725 459.093 501.297 452.146 511.654 448.944 cv +522.01 445.742 545.674 439.174 545.674 439.174 cv +cp +f +484.328 475.342 mo +484.328 475.342 498.696 467.484 507.908 464.136 cv +525.13 457.875 538.541 456.817 538.541 456.817 cv +538.541 456.817 514.27 464.576 505.585 467.402 cv +498.535 469.697 484.328 475.342 484.328 475.342 cv +cp +f +750.389 468.089 mo +779.8 468.089 li +779.8 423.76 li +770.099 424.447 760.291 425.042 750.389 425.543 cv +750.389 468.089 li +cp +724.597 444.6 mo +720.516 445.259 716.7 445.586 713.147 445.586 cv +703.146 445.586 696.138 443.02 692.125 437.887 cv +689.906 435.051 688.324 431.549 687.332 427.428 cv +682.405 427.474 677.462 427.5 672.5 427.5 cv +667.27 427.5 662.06 427.471 656.868 427.419 cv +657.378 431.016 658.142 434.472 659.161 437.788 cv +661.199 444.435 664.423 450.193 668.833 455.061 cv +673.24 459.931 678.865 463.779 685.71 466.607 cv +692.551 469.436 700.711 470.852 710.187 470.852 cv +716.37 470.852 722.03 470.324 727.162 469.272 cv +732.294 468.218 736.569 466.972 739.992 465.522 cv +735.848 441.44 li +732.424 442.89 728.673 443.942 724.597 444.6 cv +cp +852.995 416.62 mo +852.995 468.089 li +882.407 468.089 li +882.407 412.573 li +872.766 414.02 862.957 415.37 852.995 416.62 cv +cp +810.594 468.089 mo +840.005 468.089 li +840.005 418.184 li +830.335 419.297 820.527 420.317 810.594 421.24 cv +810.594 468.089 li +cp +639.458 444.6 mo +635.378 445.259 631.562 445.586 628.01 445.586 cv +618.008 445.586 610.999 443.02 606.987 437.887 cv +604.494 434.701 602.779 430.701 601.835 425.894 cv +591.57 425.423 581.405 424.852 571.351 424.183 cv +571.815 428.952 572.701 433.489 574.022 437.788 cv +576.061 444.435 579.284 450.193 583.694 455.061 cv +588.103 459.931 593.728 463.779 600.572 466.607 cv +607.413 469.436 615.573 470.852 625.048 470.852 cv +631.232 470.852 636.892 470.324 642.023 469.272 cv +647.156 468.218 651.431 466.972 654.854 465.522 cv +650.709 441.44 li +647.286 442.89 643.535 443.942 639.458 444.6 cv +cp +402.117 401.792 mo +402.637 404.961 402.899 408.698 402.899 413.016 cv +402.899 468.089 li +432.311 468.089 li +432.311 409.463 li +432.311 408.838 432.298 408.226 432.287 407.611 cv +422.005 405.783 411.942 403.842 402.117 401.792 cv +cp +297.472 443.414 mo +295.628 443.809 293.49 444.073 291.057 444.203 cv +288.62 444.336 285.693 444.4 282.273 444.4 cv +275.957 444.4 270.429 443.316 265.691 441.145 cv +260.954 438.973 257.007 435.913 253.849 431.966 cv +250.69 428.018 248.322 423.314 246.743 417.852 cv +245.163 412.393 244.374 406.305 244.374 399.594 cv +244.374 385.775 247.563 374.889 253.947 366.924 cv +256.633 363.573 260.034 360.937 264.132 358.996 cv +253.701 354.222 244.047 349.257 235.23 344.12 cv +234.243 344.98 233.271 345.863 232.333 346.791 cv +226.146 352.91 221.279 360.38 217.726 369.195 cv +214.172 378.012 212.396 388.145 212.396 399.594 cv +212.396 410.912 214.006 420.979 217.232 429.794 cv +220.455 438.612 225.029 446.048 230.951 452.099 cv +236.873 458.153 244.009 462.759 252.368 465.917 cv +260.723 469.075 270.035 470.654 280.299 470.654 cv +292.272 470.654 302.339 469.83 310.5 468.187 cv +318.658 466.543 324.58 464.997 328.265 463.548 cv +328.265 395.843 li +297.472 395.843 li +297.472 443.414 li +cp +342.695 468.089 mo +372.106 468.089 li +372.106 395.013 li +361.997 392.548 352.188 389.961 342.695 387.26 cv +342.695 468.089 li +cp +0 0 0 0.05 cmyk +f +0.5 lw +0 lc +0 lj +4 ml +[] 0 dsh +true sadj +27 804 mo +0 804 li +/0 +<< +/Name (All) +/CSA /0 get_csa_by_name +/MappedCSA /0 /CSA get_res +/TintMethod /Subtractive +/TintProc null +/NComponents 4 +/Components [ 0.858823 0.85098 0.788235 1 ] +>> +/CSD add_res +1 /0 /CSD get_res sepcs +1 sep +@ +36 813 mo +36 840 li +@ +27 36 mo +0 36 li +@ +36 27 mo +36 0 li +@ +1069 36 mo +1096 36 li +@ +1060 27 mo +1060 0 li +@ +1069 804 mo +1096 804 li +@ +1060 813 mo +1060 840 li +@ +%ADOBeginClientInjection: EndPageContent "AI11EPS" +userdict /annotatepage 2 copy known {get exec}{pop pop} ifelse +%ADOEndClientInjection: EndPageContent "AI11EPS" +grestore +grestore +pgrs +%%PageTrailer +%ADOBeginClientInjection: PageTrailer Start "AI11EPS" +[/EMC AI11_PDFMark5 [/NamespacePop AI11_PDFMark5 +%ADOEndClientInjection: PageTrailer Start "AI11EPS" +[ +[/CSA [/0 ]] +[/CSD [/0 ]] +] del_res +Adobe_AGM_Image/pt gx +Adobe_CoolType_Core/pt get exec Adobe_AGM_Core/pt gx +currentdict Adobe_AGM_Utils eq {end} if +%%Trailer +Adobe_AGM_Image/dt get exec +Adobe_CoolType_Core/dt get exec Adobe_AGM_Core/dt get exec +%%EOF +%AI9_PrintingDataEnd userdict /AI9_read_buffer 256 string put userdict begin /ai9_skip_data { mark { currentfile AI9_read_buffer { readline } stopped { } { not { exit } if (%AI9_PrivateDataEnd) eq { exit } if } ifelse } loop cleartomark } def end userdict /ai9_skip_data get exec %AI9_PrivateDataBegin %!PS-Adobe-3.0 EPSF-3.0 %%Creator: Adobe Illustrator(R) 13.0 %%AI8_CreatorVersion: 13.0.0 %%For: (Thierry Ung) () %%Title: (gnocchi-nb.eps) %%CreationDate: 4/3/17 10:02 AM %AI9_DataStream %Gb"-6fodW&E?P#[p(-uI1;lPX\@EXJ%N1s2S0W:t"G?bf0u=r^'YCCb2,d"@FZ#6Y,CTlICeRW'F#VF(XA"]U[_jCI,;hU-m8-a1 %>JKsmpu09fhn477h=8Ctr;?!+OR^@%H+9?!ke?iOYM]96lQ71[l29N++5ab\s63T,5CEG+LEB85hL>7p_p@e3DdRqk^YBW\^Ic2P %^3TVPqVgJH=.eoHqZ1)Rr8$n`2fHJ,gE5>Oj6Maj^Pfu2i:&i+p>>o>lFYY4^VfdKrgWXh^]!\lf=reP^Ad85hNa.OJ.JO7iVQus %rq*13YWUF%rg\H-hu@WLQ_3.EpN)ed1#SO0REOIor_Ib;mP"Rump:MDn,N*d2"Ns"r6Mkh+$Y/Qma]AXV`1gZq<)t:GkLa!pRaYIn9Z)OYMOY$ZZc-=l.9kQ^G6<``aPOtoZ?"h %q"FC$^OQ:G;s@WncpD+<`J8A<^Vre&mpFtdqMrTm)Q:F8L>TZ?ptb73AVE@]rUJ@+&+>!S(6h$5lgiT,Isq:c#B"DSh(QNmrVe_t %rYtS+bNQil\snnuLFMle\D/Z//8t"iBdf1a&+fQ]mLcf,/P+>h=fl%rM>u\X2rD;os77mbT'+P]Q[/;h51'H3e]0W+eFfBP]:"-, %3::GNZcEZ"\;^C8Vq:B_!!V=V^SpN._u26;9b7$DmgBW>J+X!mDCk1@W@eSW`US*8deVFk=(RB3ZFGD_qHrZNs8!&kDCD\"i-3K9 %HWf;$kp4Uei,8njBaF3B#k[BS$K%_!jU?@p%0,DtGNB6Wja=a6S'(m-/.:7>PS\^o=l?GoJ\0h\r._gY-b/JaJo@5UnB'ntpKXok %l1LK$I/A5Dc`^FH%eZ4c9DtP3d\lS/q)efS"oO+e?qg&/mXd188EhoI&c@B1?psK'`[ru4hhE'.BZ8m_g]c`%'rG--n&"kX2uEoU %U\0E"e8p/UDn$1p#JXsf#tnr9[d5$'3I1a8\)=LKMI7CqS1oY962$#6.EssINb4iR9Ci?NQPGXf;;^=bGri?.hl"fb76RQ>8_?^P:3/cS>k(8X %>B`/Wa*f,C[I,T6Vi(A)h1GA9s5;WHLLd=7_W1rJ,3792I0>69LK3/"E0D@n-"K?n8 %2,3^mrCa>>VZ5g^Siu]2E1g"l?[<`RoRHg40DEH]9T]+;?C)&u?iSPickk)N=?H:PlUq3#J+U4/DiCT]Y3.=Zk7`?3Q$MrpQLao, %:Zq!/^6m3to.\YnL\Z7N73&4F]$BqHI\2C;]h[_tKDUK$*Tf2UZ/,k*?I.K_qpUA*l$?(!g^!<'(N;U`JSe\b0L2gVlg55Wr6k;) %Ic/sE_=QULoE9Cl:P3;t-fmO@1:/p-IULLI6\JU)*^HdD$!MqP?YB2)Sr_3V1rfB^qB"@`pTU"BY0u``][UTR',UM2oGX>^aVYQM %K(Xlk+:*BFhh9>1Og_$>=bAfs'BoP";JG/>mop2cbd[__n44FDBEm42ar9#Q&raQ,A(;H[45kDZ:M1&^GK/%TY[[60_&"#jI+nIk %H,]YQcO#BcHh7dCqtL\\]?]++;i;[C>8RIF>/]:CKPPa3Bg3#_E(=+^qf9b5^4X8n()ra6L+<`f`A5)!ec7B=m\@<]*'XbDd9P\t %oL=fsJbD%ka/eh4et&C=pG7ghe*->V'7?;H4j/(N=NS6bgl&Q)nftK\=M@j$W\9O0TEF.e@D/diD]o]\X5LW<\I$KJR96S!*`Yd^[p#.F08!d?3BQ<5d2e_:07SVf_SQGmE!-[l5gkg7))epA,G;QhLB/khd+N0tjdMGs*2&H)4PT %,k>ce=*aqm.\&mOmmW0!%qDT_.aiBZQ7tbJZ]-DEZVE)?c4[`JlSI9YMfX62'CW_YNTLeVn$&3GTnsN0fS_Qr^.l:XkpYp*S:R_X %+(3GN_2rU"cDogcGa>nW-Q_Xp_R%k(.JoU[!l7Y.7-H;oA`XVrfLqj@TMM)-UYpHV],4)S1U\tW27s]CUn.&F:5DVMo=!'Z>rU:hU %VXO&=?bcU4j7f?\`W(G^[i`cKDr-,\e,:kI1B6cDDgs;[rRJq!5CWVkn%\hZro3FlLOX]"pR@5:Jc>**QiHhF?[r%=B6Sjqho_Q% %+5d"c$FI\9s6j=.q>P";[eK*k61asCmeH=m++A_6Rh$UtQ8X3^iL:qH^:nsm0BJDlYOC[M0FGJ,qrZ/4huc#s6o1CG9:1?"+TdRH%#E=2#[S?\_Q%hrkhG@K8+mOSt:8ofO!IKqs`jp[lV$#+$[I7iR=:rrSqbTqi?3eiT$gT %*k-#g-hU9nJYuRfhnCQ*rr)/0n8J."f<=%[\7&V?laqZEnFk*;n&dT]II0l84l4]a2/J$$ %OY/OthZ)'j[i`?a*ZEZ)GHX6eiK!^L%>TuU*a%$4hn8ud&;+?ipu[d.\.A&Bs6p,t;[77j:OiC3C%8?Tm=mdpp=9%A+7BcT*QbKk %(L$]GRIb@rp&1q8\:WW9(O=q?DuL(:=Lgd@PC?S(\0]R1Iea!:*f%AfHh7&?'l*<:*"5%beE,Tf%;5E?)k5bK60mH!JA,GT9Lh@;4PB)6[HS@gF64 %*2YEF6TBg!3/:/:7N]Yf@Z2bu35m0bi\@%a3F:]&%EXsC32Dp^RaR^lF@[dGcKULIm.6T3:-%Jdc[!WfI*AL9cBipLL<15Q,#2jp %TG=&aEZM%;F=OB8V/QUUm-u!,Yg1O6:33#MIIfQSKe59MlSP6sbC!U4I>A5Y4LUMY3Y%m*:PGAH7Y8XJ'2*-A!rQk3p'oi"*EseO$-qkW3/=LlWgU9nTqj>b%dKib+-IM %RH@G$bPc!XgQ>R(ep#[[53!Ol\B:LMYgQ\+>#tNPlO>?cS1aWe/39oCk;?&Jl>p(Ob/;pDB'jU2h9PR=K)ir6!XqWOJcgT.Omuh= %#oCY'!9at$6MF=DAUS!0Bmk'/T6ZD:0Cmau@p*AGkD*+7bU`'d=mW`L1]FJbHb89[c"5FFh/VQ"\iX5E>5\'a1F^-4-ZT2(!'_2/#SVkR*#t]* %+9^'u5m]f6J4Lhs+c%;?!@3;RL15ogkQ(rULa&cqO[Wpi+Nt %.Km6fGR7J4KuFd["E];!#=qTBfE\p1/pQOe`he9&9>RoT_(Lf3b(Q\rBQPii'+2V#"\DJ#_*a"'tG#nE[k&M:jP0hroVYp=QiL?L>6bs`9:*Td]!Z> %:^/hE]opR9N.m"GKpiIo[@lfqT^B!,n5:TB.asLeP)"YYIMp:W\00TiQ!JELA_uQ)Hb!sh=Jg*i)f-qppJSHUWGKCZY$.pQFhLbNb\2Y'!`Y %;;H:o[EtJt:X<>$mcsefp[,<=pN-&qK.H;%M-k.SA`#[]kE`N8G@1:9pot)lm'R,5Ig,CYpVlS0jS4B!oUgT4s-M&lB9hXJ/A#1c %qsNL6+*N;pr2;_e&p^T!NGpSK*N?+qE*F*kZ>C8\C`mJ_>ijV&@1u]/+h?s<9KB?b=\%oqfr<)Z+LJHNPrhE(XhgL@Du$]#/JLNYT7(cGKZ[O,,joHNRBTGg..k+ZOOF %XH/;43iiRk?4.a-CR1_JLsj\@`oHX@>CoXf)N2U;+6CEj5-DiKSAYKMZ>perZ %\X*+U[UVL-n:o4?m0tFi_f`=R_\6a2*q`:'H5;JLVa8NO+$ct;DgVKAl%;.J1T^-ra%%#oj8NKLE(u/(1jk3/]#Du-NX1*2cBIJW %+.ES/` %-M*bQPNY;-T@>t2d#suEla*JE;44<_05/@:T6=rBi$2]S=:*V*@Y91?8mOKj%]FG2TmHIm?j#D4jno1O@R='DSR8*33i`a0jRZ#( %N(-7s=@WMWjTE`MN'FYt.0ILWK=.bS^,Mnu?!>5S@/8N76RLrk5tN_tLl7:lYT2W/k*2FaH#Ni8nU/6pBu %S/cTjGRWSJ%#d`.Gp"gc<%dCaR?<.jX3?Au%edh^;@lLr*;d>2Flr"C(S6T;"'u@mF#/\q+Ojs:lp.o[i@[)8N8ALrEemp`":fH5 %b,rl8r.l?U.f%2C?MQ'$s#kb*dt)E3'%c0H6th!r0"/*kk\Nr;Cd\LA;WkseA2579c%tq(:*uen-c0_,]=ci-\)qp=)Qr[t\_+fY %?/0,V6>#9KK\8V6Y@<=*o>^?KCW=nDhi&eGGMUJu1Rt)_LHSIq3WKP2&f(nt5XKLJ@3$Q"ai=eG1?hN.]hL[sR(t$o2!o-2M%h^A %-4bqrW9cg"%L/"Ms5j$f?A7RWl*djV>IKp`riI=j5[F@oLrpW.a06!MV3KqM-0A^0rr %jB49Jb)DkVK(pSWm2DJl/-6GmMSQ_Ag#,_j`Up %Bt52Ok36dRpLsu7=*]@oN^&aL%FWHc2j?7TDdO\HZU>;p2/edh/?L1I+-I.YEOh-=*c&5kW'0L:Q]V_uBpIP6jntj7C82mM<%7+4?EjW=bdL6\DjV4gHIq!#LH4]Cas$a^(cd^Wht`k*5ZJWGE(7*H%d,)LN:g)QEC9^>kR[4eQX8AKfFLHepo%47_X&J0]1=t1^YQnp?:FpHDT/W=4agjk06n.$AX"_>_gjP %>[8_fW%Yj=:q@1&'P"Lc1f$Wl)j-0c>poST@;"Y;0;LX;5$-XHcM-U+mkh9BM(Xj2 %L9@1NA@>Y5%/$1EViFagF]tZf7a5OSB4g5`^I@&+Z<,jo_sZp/Ft.][Ut_t@qnLskRggoQfNrG'>&EZ`"t)!?pCkSs:e/3T6EW%r %OY3e_jAg-2#n=T/N-.T3RR*M6nh`B#^]To]EQK3R_2i %K;BV@Ek%24=,n+pidr(-ngQpRNLVl-3HfZ.SFIl=if7\@2*-1Sg9\)V==_Zsjo]nQOl2!]Z!;i5P()L:?6nho24*kd9S\HJ&<\C6 %8>>BY>X_rR`qrPR];"7AhOY5LP:N!G`[[tQhWbCacj(\&`WsrhZ3r?oRj7#JUYNpC>;LRB/F3@oAnONEJR,e,mr([&K`esaZ(mpI %Nh>Ce-XM3;n:O!e.ni/gH;+S9Nei)DfBF1A!;""gLE=14+<@QAJCH_+\r#l.U"U60_I-Q`7lR3jj+,up-X:X6T:kEL*E^-*LWBTj %+ctB..KJU]LlY;dLbFRHLa#57#TX0[AAK(rNdP=83VaLUWBl$oePar%\tWH4^,7D5AZ8>+rJ.fO]kc<7%dh6(I,^Fr;-KPjFXcV!K1Z`6aqEfVXOE\AZRJu(/*[lA)YLCk,B=*NQ(dlN?*g_u?:T&WbdGgZ=HQdja^!M(WOZJZKmGc/9&$C0mt^Vj %n?SD2F`s\Sk3BX?]@Wai*_>1KeiJ;jTfC-p'LKi`@qM30o[]?d/=TcmK&S %E\SfV^:9T9Ml9Q94^%'lK"a(qNN1b&BW6Br&DX$&A:(B"1 %hts"^[,W=`V/1BLkCq&'KfAn!p,MF0QFMj>0^N]4fa]=M9%@WOKBEL+B1pI*OMagW0d.q+8XWP=PBe]X30h#^Km[4MI*l1nAY*:q %#+V;1c:l5LJI)?,85h70WITi@,7eAek$V59kL%LFK,6Qr1i<#Ktr,T:%[(.6&\VEA]l#ghr[g"thEbsq;X@pdg %U9a,Up$G7e/h?C"o[e+QMVgno4VkTm&PL!p;1'D$9@8&DB%$tS%N;Gu=sK$U"Kdg(-p4\!G?&W^_sB"U2&[bb;r_s12B3D&7]qTf %XMtN#7u0Gt9G4-]@bjn^R)#45." %G[VPoruSG:hce`@cr[R\8g>JkH`OLqIu/QSM#uGQbWt'`*+-X_(XsRGuPm9 %LRV623a>)hWVSc6E* %gW7E4%@rs_:_H:Q1ET:s,5_b>jb,Z3^>L9^g_b#oWck"p.JPnGM[P/5TnNo)d8 %r"B9S_f:/mL#<\frMW;m)''t^?Sqap1fJa_N#Z_ebidQ/B@`LCrjW1q)OgeDs/l9mYb0FFJH(/uN;;!@0V^p!;Mg<'^pF"pM.*&^ %XQ!b9(R-GKm23aAEj[B(K/d76SL`T:V>fRWJ9^Z#qNore`[>#,gF6s;*`0$f1YA>Cn3:,&H%Ps8MpV)13UQI3==<,&m3.-G(Zq": %dV[g,DbsN=_juc*_kA.3(1_-a)Z7gZ94r@5CW3^3DeuhJXn#WsU5oet=[NN1lH`5!oZJmiH4aMp'B2NIF69Q]?k;qg6! %G=GBRW4'RMCfF([XpMNW/[dZ/[q[cT3qFuDD/l_p3_'?c'_*[<BV_]ATUKFuR\ajLQ\Gtj=4'+B5"WX!N\(g-F %PjcW(EP)P=A!JP%)3(<0E8fO!Lp`/,&\OmT$+_hco_kjdal6Ijag2m+Wg@Zh(.>/d__9@P40c[2g6"$An2#7"L0o %3I<;eF*YD/"jO,%qb&e9F!XrI %fjc,i)3Ms'OH+<9btC#^);0+=#'`APaY.7t?D+RLo1TDAX1*:_j-$Q1N"Gb5]AZQ=%=cNe)4$T%=cND3@C>TM!q71('o]0`oKe38#$DdDe!K %9bFBLF@@U9d$@:0KYCCmZV$5?1g.u3WrcVjNToUt&XlXsNn`X9l`#N;\Y7HD)QpUpS=b!V)UdjkeS.71FLqF(Mmj8C3lm4Kc:]Fo %%(AqDF@1L"f/U_"7;e=KLf8,4Met91e2?F)$uqAoV6@Mt#5/ %(>2rG0Iq[)i7,>NZjn!&?%EHc9&nHoNF_O[rbU.Kj.aE"\?^roq?0hWEhaPp,)oNbcj^)@f-(@fBVnhKj?]c_#nP7\I$l^tDIUqU %Kb[JMW[ACM6!a.q1qQiVdtejp]'14ST+fX#AB2gq:a_nk6+t.RYCo#1-qaR%i1mMNoG9e@&P_)G8\A`=_OnNB)8#r6`?_;3#!49]bfm@1F6E0^lP*BJ7La,0-5+4r_CEr/U& %h9imYWJZfCi)IJ6`^n%70J`r&cAJ!;Uh"h]i+0gJGm>L@Z_eK]8H?XcFkoKgbIYjEDKA)iYSL^!WL[Qso`e$>-)O+&5t:[M`$Ccs %X&WtF]_13UOt2MAb@6YEfXWc8fVZfOmmS"k`i<7#0ON*hrs%.]lGP1rXAN.?=G:GP@j]U>T]1@6kYAR_J14N(X=XU6A^YB_bMPR@:UJLSWdBC:Ht(>]k-YZ%/u-t %G<>_N0:Vfi>BoHX_[Nb&d4Fn4XTGNEGl4$b.Tr!j&M;-d^22B#=DT=oL]fX %B4N<(T]tO+:0Vk$6X[CFt)UuTV.IMIZ,&HfJk_c>NrgStu/UEb(; %"f&ik+Zg[M*OQos2)XjC"Z=k#f?D';C_4o\Rj6G+iDUC'M@7KJD^OuS)C'oO`NP2N@*Ut1l4"i %c<['o^bZMJfKVX]f#`VP'!P>0P%,.,4dj+K)W+f[S2k@Uc22p"!_B+`+r)AA(M,uV\WG"ejQR;_=tn3@@em,$[Ur4"?;kZ3@P+'- %bN?L4'3u8Q3KpP)*;dh)DrOaU'?<7Lm":U+3O$X*aI\(GU&#:lOI1pNa1cB,uoKEn)/Y>Q=)#qpkK/SUTR<"\LtP!.eB=2 %R)ofJY!-<9,+"M9Y+)r!`0In5crg1k;FH&E8i9jE37$TC5>kMQYFS[0Z:2UcXj[WK+O1rMgH%X61K-V`eeK$trm>UsBg8iod[ZnK %Fah2q+gGa?-OY^j>YWeY#o0"]*#DRoP'W<+RqH+R;>V0O,Jg)1n%pIQl:Z4l^t>&sT)ZG<;_QYY_`9T9FLb.UNSHZq(ul%T8].'K8cscN=*m[=0F8Ep&ndROJ;Vk%iXVA96O%[F4-Hfd(EJ %o;nH.lb6<^W\(&[j=F6hY3&4iBkuktaZ6Q8/FNo,/uniZT^5[Z2dRbVK'E]D[Lb1;4d9!=&GQ;#oM%^Ujh&TY\4DXQ"V7+: %#5o>m`+bSHAc3'_80seI)1A>o4MZt*TH*Jm]3m8L.irdWruU^oK@gs0h?e>h1\r]IHqGI-If\]7S=e$u&LU,>iG:7OS8)@1=/MF7 %C2f>m,@t2(?SLb]64%$GNR7V$'?d?rQA %r;]d&)qWs*CQu/F&*E'0T1b0q*@[(Qd^Z2,@jrI"Z0g^:i>?Nr^uIMkQI#acj6e's0B1O>9/*:!T`NOQD4Cr/gET %_@d/RUrIQ%`nKK_4gLKh^>d]QmIR[T8\-_A+7oaJ;Wbk/3$)Mel %P.F[%OWjjd0gP"N=lhZ`YaWqh_PS$m&+@?/7V`sY1YSLh=,>uB%Pu:t`)[%T5iQCN<)3i8P`4S6Kcsa;>$n/Xed-Vr"tA\=@Z2+3 %QF2O8RcoqNrt6gfLJUpZp:?"3b;K)X[QF,@(SP?OW"HUV=(pRQp*4[Wf)ODdOB-:(NW@7-Zk8(b9dU1m[Aq_>WoKQgCfR0u[+'p* %dkq0'Ib!PGnRO5#HPT7]Jr)&er'U"9fpHjCbUD]6(ipN5LkX,^k?de)WeV8VX%G>I_Ji4elt %T_Ei,Ri89,N52;;W4U.RqQ)4QG_\LfbTCuBX)H>f1_Y_t,T-e5q/B),goe&?5.@*G;3o1F.8']r3co`fgFO^hL6Fo+Nh-<,UI)GF_Y>Q-Q#4o6$q,k,S+AW<5+N@.+!oJ-alI"Y/+u`n>a1N3lqZ86p&e %;iHjY(:deGQ2T/*b2XD^^.l/b:WZ^#?U]?H_>`HfMV)_+,MW0FA#bFVNt.h+^-D4uG98[VXu#t@14`5gInEbN5CbXbs'@1@I#K1M'dPhbMnWed1C,kc%?M!:bYJ;%LsC-FeT9H_tgDUOYqs@iPQ]ka7Kcq%ojCD4P@<;aE!fAHC[P>1$qRhjKIo_GG+SYYi+SSC,*=+["QULZZ=S28\cCUGgh-*8TKMUGL"(-hdEj"0c=68W%2Oq+Z6np#NT2r)Hs3Pl.]3 %o!^(7Hq1/CeO;RB,0,"Xd1f1&#NFG19kB!LSH.^o%FUJ,`g5&FU'Ao%G! %)AfAFLDZf1%)WBaZJGcu:0)5c_?ND92gHFdj!cT3PT(RKeh2]YuQk[[,`V5'"?Y,:(2$2/?XeYGlM %)su*Lk1JlB:JK23K9N)$ZneUt3G)3OGr;^b9Pis80&('252W?7r)m=dX-kdp:Yg?95MuqL-g4Aq3oDC] %`]``omQ,f]2u&\W8mJ=RcS]m\c`OKib^IK]h3ueik#p.,8]h%5T1g#=;[m@rH2IrW!E!Zu9YE'ZCQg-;Ml:nqMBGu>R]nDYdQ5K!E%kDuC4$>c %1Gi*F7Ws4]ACK`]_E!NuRb3]+13Tk*F1%#E2B)e^KQ*@o@`AjEM'7m_=m'VaAi1C@9S-#jHR,crlnU.os4C>Pe"%QL^W"A6>K1>k %9k5i2^@G#hK>tDUfLmp8`AP3kW.K[JWuEKj*V"Eg4Yk9m\WU:Ym1PY_*_#-4IaK%!;4'hr2V*]R)AE*X34jMc0Uu66)G'>)5HK#n %FuY`+F`<*4>n:Os."4[,R&c%^^mErYPt,1]!d %/8BW^@clPpnkQ_54al0:tQtls!J$fgfR8)]khAur9Qh2J#Y4nS!QQT(6_^_/[\nAb:[WUR<=E^0&9j@^sGEU4\qC9O(Uh+Jlkj/*LA9$Na^P3e^'Par<;1R=fJ %BPR6,ds$tZ."WQKJ.i;I9TcGK(CNK,`$?h^,7Cli.HZIZh=ON]j*&N()>T=NT'A!E8RN.CLn:kPr %(VH?gi3^&ARX4`hpc?1tqH;CnpTTAd,RtJG/C[>A;G"(\Q'?MlnU5fNHfJZOi4SOf>Ol1'MoAK*aaA)-mdY2,Gn %GK8P*d(PaiKh?U1Z18MZ?a":KA?Mk44]+%_a2puGa*"9"F]A@/`VdgN7e]eDRE2P,<6Vq!fds@oS&/XO'6^S7Ff&d184t#?.,(;W7[=aJdOpuq(9YeCGHqQ*P=].,kkk,0r %E.$fE26NK!GJ1_FrCb;LY:bd%?.V\T^X`Y;Z+1A,>,[P1@WK'4bUtcb9?lrM3F9.>?6n!XGNYZ`mcDleMCmed1h3$B,uT8.S*Hip %4l#+Bd4N&W3c'frf?$&.DW'1ra_G,;Z->.2h%O(X%b+HsDM&Eh^Z8hCKRfTZG2V_7&;VO78O;+JK %-2a%L4-!Ub7PO:bj9n^^4**'cn)o-!Gri=+X$dnqr#)Ns8XdAM+u%`pIl?J^AH"Zqd7#\3roZ-:*uSK4h]J+Q9:YH(F!fgJoD9P& %IQpEgLS"'3*O0Ckc"QZ/J;2uN%4WL%7EClsND\\T:2(HZj^dkF826e;J)n.DbflSs,!6e"+o`Vgi(dMs:(#][_:'BZKk;_?f5;&= %A92#p,;Qf,p(G#B,;Pcp^f$H8$]Zj#)7A%%\g,6oSb^@$(b.ML/f^gSdEuo@8&GH)4bb_j.sa2WIZX$c)''$W(kEJFpo-jMeP5A; %PL\u.U@5GnM:5EO[LPm[**tK8Y?-]o?&o,@)f800lBgq(6ut'BdUXinjXR;uWWR!7PaQPArII'nN-5an">YeCm/St$JDKP5rH[?A %)@%$G:,\:?X!QN3Z*cL3VEF'^E5okt3dFjeYI1.WNiSMb-b79GkcCu'+1Dj?72fF!#csB2(lFP^"L\7eHf*NWR:YcCB(6c\f %:snGd>;Lu%.suD@-*G%`TjlfWFPS9@cXa2h\X&3]_jW=$$/k0;mCO3^#%+;4((YsnFhn[UI?Gp:-$H@W_ZlPgSD#kr)]++"EJ"o` %[u%;Bou#e@7U"dqb`GKCKfrDRcJ_Jhg;#M+pi+?&e;157!(!oY%+,Lr46;ihmBN0[GD+FoWEuH.na+hWU%ae0\bJ7 %nugC5['I6!`N@iDWgs2C's=gQc,T$lik4>b@s9/U)d+c6),gl^WMcS&KO!-eWm6lH[AkGu.6_U+@quB'f@IsmI'hnFh"+j\rl`lF %)%Sa:lbRCs)NkGb1A&%G>"r92LN.F,E7Gl.9)81!n6@<\jZbeI8Cs0t0K8tK7ZsnH@bo6qYDuJuF`2NElVH0cn5FtK2>3qUPBi8p %=%]dE2=g+&I,*tpf?GfALD9;T;T+K^brr(aDjBisKm[]4I>4^du" %9M*6kjE=4h%i[abAakXUaZWh!ES3poBPTCBqkrc22?]VYX__Zf9\.@na[?$2V4<84_2-Mo]k)2g^Q`S>%cc%3u0:Jh1N_Kess\`/\S_2/_:iIW`eX2_j6b"`3Q5JX_VK:=q745Xgb3Jf>pr")RsoYK2n&Y %f$nhEd@;/6Cp'nkqp]2daQ@q$=0Yp*h+SNGclU%LgUs&DR#Od0S/W*A=eXfQ]Fe'T(=ASeXa=k^EdlTkabBY\[^7%CLDZAE1X8RE %jL3>9]3crm4dYf*ia)^:+i)$N`jS#Dh6U:1QD(HcD_jXZ]Lg@iP%q!=q6lK'Wgd]0[Bsfbi`5:OD<&6K)uldt>pf4!@=@fCr#;Uc %Uo%%L?aCK4[UEEW95K4V,aPJ>cKXOKa#&"QfuG>S<`)6/AeR7*dB^;]WFA(mmQD'U#8c'FT`D#VU)'X&EFc^\@]B!3jPcGZM1 %AuY'7C0[2QSW^A5]t.%s2'\8Q4GX;=_JK("2@n>"h8\,Q5Sj:a(o)P9Om<8+OP-aJik-^1BQjGg#k#(EeoY[.PO:,05@TF0he($2 %ZbJGLFp6SW#[eONiD/]hVtE*P#I.Tn:8hB&&7]$^Op.CWfp6<=X5l1Qbo9'[,d,cM+#QWDUt.u(`*oJJ,p%0!j.UP;3XJ3DFjUU, %RHlG?h9/u5)!`OE&g^lD9L2F[MJdHl;Au0E[\:a)2]Xp'YVaq,^iGmZl*AHc`dVR;Bb![kCT]Y=grRENS9=,?t(:R,Uags(0"\f;'RPfg)tTGTNH9d3KijQn>\NE%V*2E]*NG %AlBM$0Zb>-F4'+\?-c')%&-KWfFuS&6*-MmZbA&c&Xn*K3hJ+uQQo68Io2R)/C %C?NiV[d]m"'cd/iNmt)_#9)hJa23IijgaGB$o8jb;E`MMt`6"nZR%L2+k]*_TmEgLW:.kf;F-*/-:/FeRWd)*9i@s'U1uYep0kOn:aJX/oE&kqT2qd@EkDW@!qh(XY9>oK4\GpO5G"@?K"0O[,AqjWAon=CFQh$@++= %1t2[I0UW03)4D/dD(Bc!TA.f#FHYH05d;oS8M=:CIF`4?8*-LoU[4J/ZEJ#bO?I]?W!Ie0Ylj@IlT"!41gc$G0m%3D%<_NEcs5&[mXINUMr7*PC751H##?))4j0`Y?nO-8A6.e %fHWD'`f3$!RmETF>#(KUF'#EM9`s;5DbML#\']%m'j]@-Qk^g4"M!Z,m041ro""<^1ag3m(PTrj7n*Um>F0&OSDp0SYOb %.kRb*8MQU'.0tQ01tA9k#oMu"9=cq-Ro-j>aqQ&fZ\=H_aMqC1QMb>d/i[$gaZ9aJhTKioq=[&)rL!T86lcBq@K]Fe)(?4XE1scI %^E%e,7h(+C_o,',rQ4nR18RiA,rs"\S.c]g\l9=YS6UP=M$USoS%P:cmO56d&45^$\B0J$7^+V=1RdVpTRIW`;Q4Mpto`.EAleStf7hLLa=_UTs0a %?,l7KAu#P4H6r9jJQSPhl42FA<-X9n8hZ/jl]T[d9&l(TC+s\b5tKn7WC9?V,fOmB[4^ctoa^Gg"f92oU5*akM6:*Z!Gl\7WDVZC98PQ8$%)2P58Kuprg)@4Gb_67D@8NB/J_-9sJ00<@jW8DNf^PgJ$0kDuC*8 %YpbNj9^;OMGef$i&P[Xjhgb(D\W/bOs&f`UQbT[(p7crp.`rRXMH_t0hWnXRD==9=pdiG$[6`6t.%5]Q[ir\k@)fo/J[-'O[SgX? %l_e+SB1B%u:fl6t#La(5l)i>@e:VG9W:'87ClitOL5`"[PulSE]&q6@Ku>u)K$BN\1@A2M'C;kGS@&pLVY"@,J5%cdgPi'kf6A\VP%JpHc"(`Y#I&f6:i!3]8hHBph"$24KGnfj#c4 %9?a98)g?5,Y4pi`447usU]'(ffZaVd/Sm)L"g,4VDhF+J\F/YDp^_Zp/`Ls$."0.3A#5ie.eZpYpN?s %e@Iu\P?nr4F/@"gYcb&TYo+s+rWtVYi.k'kROQk.k]2eg[p.hCeG)Hi<%[Y[q35T'GM,4%iK7&Sc42@W*$%7Qe$BfQnOYfDb8r&_ %*7ra6=7s\8fa^iB+W'YmVJ&bH.IY,qQl#lJBp0n!WO3;_9go3"n_i*O7CY>(9r!0kZA8nK;kl""@8t+Dc,!GTTe_ %$]K08rZ#'hG:rInLc8_OGiC9`1D_/XeJ9OBFc:h_2-Mf2+!BJFj;UM$_FPL(RlMqh!O[!A`q#(4J9d?SAp09()&72neT)7eEh\kp %E:=6Mj[Rb@ATVbkOUYX$i]o[n5n$pn%(/'f*p\KF=eH*9#+gsJRh9!UGQL]pkQIR]Tn+c),c3TmM>?>:ds7V&>N=1:WZ"p)VHn.8 %(5B3,nkI_805(=4\es1;ftCG:U=`h2\>@%&)Ns/QY6ZoL%RaiZ#Kc)*s0 %/bSC2+'G/B7DVJD52Xl74gLO&Q@'EY+05:/H=P]-\kAgSW4EG!%Z.S+XOMh:*QJ5Yh+>fV-5=*;QeUWUYUD8.^9BAZ%<&B^jim8s %/bh^(a($8/SRm"&Bk(XJHu.bY)>2R@U/"F'$TVn^K,>' %+un.qF`;nS?5$sd&p/p\I^cu$BN)&b8)3&$rJZC"O2&Nc%dj8K]KUVoah*&np.j1KHYFJJBDV^J4Fs>V;r>d)3Y&68]\W00;X\_V %f56#sD82e;-:R6hd*n$YSe$@$V"e %T-MWT`^t^f!@i>)Rn:VV^`sA%!TmcjH7)`F(qe3UTlC/?(H?@XDEuibd,QrBA$$A:ePldUKWG@uM308r\uhDeRS?dWD,gs@0L!?I %8ngZ,%d4c`29b7G%-(OR:;[$nJmC4u:]cUl-=G;WHj/"7U;,tl1=gF^"aOqgrlr]mq5qcSD8FXD(?Y# %aum+$*ik3%E1I>k\!;4QZ$fW/g3$%c5?*TQ?fG:t7a@6nhPPC!Q_\MV;E)S/eILQ"=9`oQ&S&a?DlK*L89;_\UK4MFS+N2I^"1bf %=a.L,re--;-*:FM$M\I:5BlDoND?DN[Xa+DG4:\9[Y?12AY#'H!QK3qREnZD.QsF1MnGA6Zq8G:!87?ZVdu/G=:bU32*Kr]3ts#Yb^`Xan %.n/^RS!IaM8_!.;Bgpo#gihkdd_/-1P2`#bb*j2M!Mr[]e2*9K#TnqiV%S62%Vo+H!e5rY"VfGf[^=c@Ap %FS;V<#,JY"%\h4WQSJ6]!H+Y\CHZ0s$=dARI:-.IXdO>E[rOc54G6h75HrZrno7VBA5%A!8N>h"%jm>\VM`]?66-JAE0?;gor!it %;94DJm>cQr?0Je=;Z<&)_qKF`#BeB.kM2`?%gcY\`.%6u4c>?XZeoNWF)*Pm(XJbZ^car;VMLg=R@RBZ`rD!0C/9,lU:TD\kdc"- %&aI5[^n\.`YkHu>rsAQoX\*;9]jQ:H#0^`l=BOl:-3;Hha!?B?p2PeEOBbVbTXJ>=YKBk7!,Ij@hd6,5/+kR)!lUS3Fk1HEg]fT! %iEFu2@70^a')d.qbc8T^'rG4"0PeOd[2/@"4_@](\rA%X;,tD8[GB;C%S"Qg--qCTGD^CDOJFLWbk`mG3Xr6%$<^fh %-Gmd&a>WhT<)9r=WYg#6@C>n+@T0coL67I0A8N=N@@??#[nb\2BAOLh3MY:8e$;h*[C@R9G-ooa(u;4B`9>FW(q7Jp#4#M"%H:C; %X2oMIPbUaf5J]Uc:M2e37^6i.Eoki+rl6m.F0<@!>_gE(=-AL/GW1ZW^jIt6]$G,E1?a]/9FBZ#G@-E=TD8Di6uJiJ"92prh1k);M([SafGr,g)M7;R*bphM6\p2IcKYR\UL%%P>C:_;3eg0WbUB78KGXYIoF"f[R^/)I:JVQMT3.=&7XW+"F'!1RBQRs'_mnAm7BjQD2bK-RX1;NcA:>Y&EI#5k7jc,jZcPsP_9,C(f7VB;ObI-\C %LO-M.'Nm"Y-'oEVUfi9MbJthD_\Eu?G?B&;=nK*\iV/]kOSsO5j8rm!,LqOC4#A^h<4a;c-U2u1:-p"WsoK"Gt %<0X*JXpX=JnY+V0WLiTA'uS<':YQ7%a]521;=eNk132O^23&D/(bs$i@_&M)O'9tcePhJknu@_\eMC-#D3`mZft@$gJ*^A94*Q,E %?.V5hjd*ZmWfsX]=e$4\:4R?$S0G^?]IS'/Jcp`.PdW'1[@^S,6`Z8JrL>K'AbS5/F+\''O@DJFK0e*N+Es%Z=dY/OjbZS4ToDj7 %3Zpd:jf9`Q@O`6*E2?:RWMfrM2)C8X1FK:UFYu`$!a??OLJlI(f)NMX/,UW+YicOJ7)Wg9%<6lZ?soJ6FrgQI3?k)gES,aYpGTMN %h!=h)23*$!QbCC9c`)tsFiiARd&l/I8!?;Jg#Na\ncqlE!E1\]Ae;/>RVeF(1engVAAT2hX.q,)S(6OC1o52oKAG,M_RHr(O,]3k %\lp!O$h^gR/5J322m'IQ<_`"cmS.K_)B[1-=17rser9.3Y0Ni#hd>"#0s`K`>RMZql4?P&d?Zu!8qL(Q-&R<>6$G.Whf33RBY()i %W/@Oub!*5s_@>V8CVfE:EnWZ5=Kf>>EG(Tl0"(A[?*A9Sj*$,rIGK4#G?21YgYSO`*U?C"Qr^J:1'OeXXnns]&;u\K&,F-r)kU5= %dl+%+&tq9O7ONR.$1Tn'=-SC0;'@,q5qX`.6MrU`O\UGDT8h?7ap-''A>%@];^O0t'T.:qKQTMoV<>s]h#pa^q?6Wa;0d.-/@,,Z %PnbC)']mj['3uh1hCKu@$].X%PZfM5jM[!n@Oigt85@rrM.EraSJ66p5Z/bL#@=B6i6`Gh-F"LsN %McBMN'Y+i"0CD+4E_uR/eDi#A)W>\A_P8p8itGtDrGZ#>$AL#l*ZO1N)RT$/H!DgA.eR[^Zf&FcUNeXiVD.4**L&sD*7RR%LlbAC %80^WfB=O'JXhRtt0bifIZQq9:7Flic[Tj3+2H[Hbj<+'`Y"Yf=-2[Lr8Ej*NI$-mS(ss'HSYEuOCA!$T[bu^.RCl`'aQ4X+?QS=Q %PM-jD,St)FO?lL8O]T;`#$FV84^EQ8P%A$J7H@Xk1L&*>/K;PF"QKLoZkP6kk5%XNr=<[_=5*!IRr0Njg2QpIi4c>a@_Gjnc0?(T %oMIVj/Yon:SFW$MqOir0)Cac[l#U3&EApK=`l[*`3OIcnZa7ru.=3&:q#52Bd"\N2;8oX>ITAr0)X(c"jb*)[2\kauO]HC)qaUJP %Y[i('H8jC!>[?isaJ?F7Mg$20FM6O07k1oIlXmh9="\@AHVZEag9BCn*7INX]bOn@2*A.hn?_3,#HI%!#GorUA6$U!G5[Cqi^oJ.<%^%Lf,QYM.g7PZo?-/4c#mTG])f\>GE %8@I7#o-E(e/(lO>_t7Y$2mYH"Mecqe'.5F+S;X5gbC&BPpLdr?q2!%jAa<`ZRPhs.Q3tBP1mMDP'i.ciik5!0VGsumibVW&R\e&K %R"e@SZMM&%N,PaV[8-ZSQSjig#FiZ45iW*;](ZTa?SbfGR %*S[)-L3$D<0m>R_6^YOUqJV]Z:WH9RJ6oU=SNB_@n27]VH&@7i!505cHRps%JZ" %96-ZXcEh1g*dY2q5g*9iMRE\S6aED%(Str6WX8>XR?4sO6*b:L=XbX?eOoM$-d/TVTLF=4.G@X?i\:5(B6CC9pIY$(.P2Sqm&2Bl %FH^p9bZ^`.34d;3g:!u^o5=,3e=8K,GSlAD:(gf%f#/kp/"tj'o0UdCQ/fX\H:KB?9m-uneQp86K=sCG6B'uek1$XT;gkEPFo:DU %qhNk1ah_>bMY<&>>KVmtE43e)Q_*2U$%%J%HB>qgfsk@Eirsekf\jlRdm[9(M9?'V85ME&/H5n/3-"c-;ghe()9itM"@icc0_rop %-M=:d8/+%G"U1FU_27c*E?k]2_K;Wu_MSP-K*ZR]r's=d(WVK0_#]Q[3cPRk,W)Y0EF.bY]\)R/NrPIMYDZ`]HK>rrB/h^SXtNBL %.T25N'VJ!ukf`>LoEdHO]6hMHZ#_)pl'3P%c' %LV=u87];0dO"bccQeoNZfuWq.<#lt.hn8l1Cf[Ls"nE1qpr_rH2"UaG:XDl@1=s_>T:aVZcZuXuRW%d%G-bE?VRA8E1Yba"WbkA. %DL/gb<$NJc0;/=#S/h;Ld0eIUgokI];YGZ0% %Z^5gJC1V_%oPHL7>W&EPQIm-)RPiO$1$`dj?/:?5IEJuoE@2,/4./)te&6'47["3cQ$*CRNN:5>boHcZbFXr%(VMHu?8\Feaegk7 %4X"#rdd2+ggJ(D9VQ/"bW9"m?SC,^W<-=8$AiIk8\'!8$:q$?\,uEAOV'HV)#_r\D/6O*3Rb7h8'rr5c@Ho]mQ9"'?/&;dmX%Z^1 %:7LS%?B5&Q9FU]tBMA')1!h#I>R[%a076ICHl[(,f7cP1*Cli>nd^\uV-)Ng`=iNj(QLt?U,(dqV28e?kP75[NmEjJh5*Ar'W4N- %DrgF7bcu7([7Oq+.&p&%2^u1q4Ce+[=#DpF/N_B[OLL(:[,-UEH7h2*.lXc92M/VV2ViOT %!,u-WLDRq?a&as'@gr/jVHAb"M^/,6_/J[ %g?e$sLcL_T&>K@n7Y)hB"%7!@b4eh;C#e@O@SK`T)lYI5JVL,]`Ag5aKp0TrL.,^?6Bpp#"C[ChEpba;^cl;.[@5'XVtFW7$rSVM %"L5R-.a2)NCi+n[fB0E(flQR1ZO=-[6']AsOufZk&$**59oTUB%]\B52B$D)PQ7GuM*(^0e\+aXPpb-aT@H*s,0EPk/7VE6H(08s %F-^XF[MB/28TERL+PS@fgM@gU2*Xe!&%9X)R\$kJ\X!ZV27JI?pQUU[jUjB+#c]-i^Em8^Dc62)<2$`m3+@lJ-%dJ&8oT#2fql9F %8Ij8`]mbX>?j'B)\FmL+&pjB<>&.%WsouZhGiZd+NK@gnD %EJWi?2L'e]aP0'1r?<$#Kr\kg:u%m&Ki>SS^@3U>3L(s_;_^sWHp8fFj-ch@mD%Yqs&XLC57?tQn)E.Vmf)0p%!USOr3q/W %riFh$B0b8=h,acgIT?j;07UBH%m]URBtoUqT^l41tpY/d%]>&\2 %+8":iNX!KX/Z$^'J^+9f^tp*T:S.!Pfq[#[#1[\,VL4N4iMajq0.9;SEG^$3Sp[1C^:,HI!PFIn=*uC('IE(KsVtAo2l"l %@t12GS'g-j+^cP%q!c%r7i)Ml?VdnaQPMF6(Q,LT!s.'5C+R]4=']to]JF7\SYDu[KCuGbN)??g9AP: %W,?"f;sYJi^6c'S0:9541b>J3R#%8FK]AjR:U&97c:Yg/&Y1L'BSMHj^)Z*S3X[T(h>:>u%6OV=i#62K.\4fDr>Z2m*"/HFB'nZ* %8`&j9h"L!'4ouWs0G=X;L^IKpI,%*G*fg!A"idk/q`$s,ALrQ,dBTdWrEXnK+D@44%`()&cIhYD#(nFA(uYgN2h`j_`!r0;[[dG! %UkU65j=07`K@>@%d`Zk#VgJ<>Z5`/#.hH]8R8#mJ@`G4N(`j?##FO6lds6_<;`Xisd*+Sq*^$Rk[6!hlC)TQDR-ueV%f&0WKf6gE %04Wc6#"G0(YK#sS@bqS77,Y70h.sG%F>;Utb7`P];>9,=[KTcq'qn:\A2'`:PeR<=@n&Oq`2Ki3"cL)CYHT#DXmXZG>GeS=C0*r$ZcL!?*a>Wd#*U_+&^9n3RRBZ,qY[pe2>gKGkYQ.< %M>Y(D0OUkFc!e_/%jp34WmdP%kO75\e/XhLhlr:(Zh,M!I<,8\it&nkG#U?fkF][*5`CiGOb'XrLW*-%#)M1(_$R_S3;Kl412)'j3rojXAdAl1!p42l!\>R-TWV/VpPHXk:2$'e %0kJ)*:?3ZPY+Eqp)ZBWEA4Fms0f3A?/ruP/%*E9hU;Bjd7t06<:Q&)1rVH_1);glQ"O8,gn;SYUO27J\,quQK?h#NGY`@MW86lFs %DbU)1O=(0kE#^TO8EC8XG0<*)ngK54PMDXp*8djK[ApP$$?P59t_2<7\Sg`7,YeUhp?r1)_E1!I(K22R0@?ssS\8VP(=:DK"jopmGd&d<*+rFEi!>s#*>m)lJ32Q'[ %n/sf$MW;ePL,mGjMnl&/GiP*,P5+p.11lW$OXd2a3`%LdbX(r-QL0r9aF`k8li'T$-lY9\Tb=\-UB#_n"r8A4H19SkN'nDDPXjMK %"^#Z-@3l,DZTK](!p]CHnS$QQt/[YoV@C:8`i-*+Tucs8ds31l\RGBd25fu %S3cjgJKbZNl6mkW/4PL5b+;6nBH%5fZmr\_8D6&W!9?QY^I#VZA,t2O1l"qGjEuk[2_!aeO7V+*rREgct`,8mu\MsYr %8-_Cg5Rd]qQY&]n)o/GC;+_9Bng7q-m`/02(UokGmOc5kH$c1F3!+c0(at)NtF, %(@e,`rVVh'3VQ\!a.aS-\Leqf+8>b5+o)R`JL#T %:ag@Q%#,@u*14SFc+"VuO'YjoR0pbJ\9DAI[Xe>C\_RNL)O^*!bhPT-1UA2m+@lnlo,nI*cn5=E;5P[be9q?WJ`99=VV:H#/]u4) %)FV+l#i9mCi"J%'#,8jp]t:B^6!*iiqZE#^#B49obLq=^Y+UL.[-n<$RCfG*kc#nAa %B=@G@%'mjIYg]QF-=W(VadYAk!O!-o80SHBMi\s5T?i/9$sY,^YJU$mG[07A?8bJ0qr!L5&\*rWO=<5P.,?Su[jpKCKV7F\`3KFq %s"Y-_Rt4GFiT/$OqSn(WN-s4fK+'3k^go5QK7(.u=T$tVp-TH"]?n5)/Np5Y7'&r\QIA`.?kt@-^g?5:N^MrU7t"Jd,!pLrWhA]N %%=j6^mBX).KKn[o3feY,6s9sfQ@WV+,lks,_am0HVb,S$4Om(:+W2+eq0kVX5!0Y>Sn@4'9I'Z;/FsAnBa?8+s6,[%.Gl3U$C2R( %4cecr&np>d!B;%OC[s;:"W.T]_D=9rM-.QB'#^4DRY,l0[ZDD8G,51;&WUF3!`)JjM_`45$uC`X2LqGenc`"oJ*G-fF!i,6Ku_&5 %REGq*3P3a.Et,o.b5CA>h"_@@6Z.*&YS=EY&(8OH`^F\65i\Y&8KQsN_kZrV"p[BF9`RFd_?>kY1a`K>jV.b9maQ+Y4C*UWcdG(= %Jq\b%h'j<=*J)YS\B&bJ2$_.$i46-C$5BIa,RK;G!(KpZGJ)Z6Tbtq94Tt_hT7lL(XWE"HNi&o!7eEPXWs0qG_Lqtmknrt11p=ab %Yt5cT"KPuYQT]?/^qr.S)JC3B`oJF6j\63C@3nCg`/i:g7M:q@3;A>B"LW`t1l!j\Ece*")CJp]=V\qN-s/EG7+)UMaij$Y_[McJ %c_<;?hXJa:EWF]t2_D;#!#nnH"i9lSi"*C4n]5@G5:=8K:p';Z;=:NdS"L=Ti[gd3Wk)(Nh)R5EBC/YhbfBlg&+u<'gB,_0Z[t!S %^f_6b%Uj)p6].pX"pVjG>$W.#WGIGKjCd.9'tKN04UMHNFE5>hV)KY-R7/c+I&9]5;f/S!eqBpHRlM3UM9/gOSsSRLOq.V7Qq,iGEP/:&(."*;"s;Y!,02hDQplj$>00P5BbA#&[E@-Mh"%C-CP2V\uR]O.BsARNadi#NE;0cJ/5,rG[(hK`(; %6ij%mU10^GFF;Bt:uikoqPaN?$S4!UBcB %1'7Upl#3/koSD-U$G40F*B'Rg%("]Z*e<)1n(i$Y(I,P/mJkYJ6[RkTrAT_DnM)766LP9VDJZD:2qoOS,!(qt?Ub-KW.hT+Y\8Mh %a>g,O$K8&DY?'RJpWELA[Z0X([?:2:A;S7UIE'VfE5.i>`1@&j&SqYY&V/)u/;X?8jHSHnXkYoMgJ6Zl",',Oo1-U$oRq`487)&\ %Lj*G"E/%,8G8XrP)-r')`a2?Se.j)5"Jn8m2o*OAJ0XE*:No9h2E_+O9HJc6kiLY4'9 %GR,."_MZ]g&&)%!2f[sa$@k"2Hb=Jbt-`4T]9bV6a!`nDj]lAAV0E66HW6"<=^(:dbb\Eu$^6JMcTm-^ob" %OK:H5"1&!N(o7I*C_3&P+L_CccG2IIBHBVc*nJgAQu?[=h&NDFZMf&\!,qGF1CbcQ!LIe6$sU`)-jliDOEnsq-AU4Z?l1r,,VZt9 %6rc5J_*^n\Lsr!%2?%u-1D;i0`W!i5Ah@H$)H?=>+TSm^gslN#u9l"3io9po-!F4@SWoYAY9(e_FdIN8PP&W %S7H>3OW1[`*+_@tZ<#]I%"8mY4MVDuJ>cqM-BNETdL##7AXAI`O9O?%m4/C9cD=8S!G8JBB5WcV4&.aA*]WdXAfQUg"(rLgQq[Z;aH@\WKJ:p$&.Esq&.&(Mpdkk^I0:?1 %bM\em`WresZji7j0aa8i%-9^Vp%+pa-Zp]:<#oM4hj=Enb$!'Rol!H=k:+:-iQ,SR5o %(hJUNSH?7?2e8W_bA>=Cd?tq9aahQ=K@gq>Nj<"G0''e-]b<*eZKha5(EDh;+tRkE/r1sO %6:sVkRYe*.B)2<[\O8BSO1-M#R/NfiT`5:Y@AJYn(24bROboYKQmX%1EtB$67qIblTL0t*M]B6q_@^o9_2put?j>fQ9'GNrER;B. %d5#h,1,S/12D>2V.B>/@Km?dF*,'/E%9R6,(o/Ml%a/Y"PB&3\2@$VTBAjfC#0%dD$Z6I(!#0(s;[rHpO+"`rK5Q*bf@5Z9e[%K+R[s:Q.+TDIT$%.EhJOd@MR'MU6%`"153?;F/tp50CPM\Le7!!hAoTafM*WYbF*\]=qbD %1ARoT@fVDuk8_\k'$u]FjQPMpbsq9?C6tgQ3sf\!L+P1`EUpt?q,nY%#6UD_jt/j]!.H=%/-JPUKcYpV'LnsM1DgA,!M\fY*.=,j %%5NSn$^lV$[=KXPDMZHtNeO#3FK0n8MMI;]0l&rS"+,;*37/_1-]G]V&p\@l[fVbM6u"T*<7$&WJQD=cbc&3FTUZ`o=mBu.h&ppb %O+Z$gckV'cOXMp=b@4^UD@h=^nt!T)s'EL^W6fKL!Q.0aJuMahjre&746]%u->ak0iOX[a*@GEu,C'Y/E*?_8R2+k[JEPTYDhI`` %P!3phbhtdfM\fb8#(2(Ph96>d?iW/:Z&U*=!,tf$LLYA'JOPubLiEY&S)VAqjpFLV*s'DR^Ffb'q>*GO*X&6(RChRcKi!ef3:aba %49/KLmuO5#)[7#p$ZbdtdHd@#H=k%'OM7@$4m8:6Wo/T'Nn?TkU,%atb_5Hc*!M0hV4>m_VQrqj+Un]9^s"cO>>o"L#]']4E1SjE %&F*@*D]hs*9^E3HT]i8s;=J/'bH_BhJe#9^]Hn,.B?Z6N9@="Go@VuH)u*51E00IL#%IH2*,Lh0"Tg%,1WkU[%NMK1$%O7cLEtAW %.AP\//:p9"b[)5jPf6oro*%kH5u6Eqn2QB6W9l@!8uJHDuWoGjZX]&sWgW.CU> %&c0Jp/:b@/TVBM2T]=We2bq5c@Jl^5nOP,E!.qUZ*]?FkU#0Cu#4^8!eNsd.]=6qfoJNm`!h+ %-HJ;&i:N5oW/Fus?RW,CUkmJQ5YS#icf/8rF(Q2.i`.??+;@M3$`M9N$n_XKo4*muV@Wl?""@.NS.f$P.kOVf7;G"-$&kC\E]9-U %;$%66:c&RgVZpaRM.o"IaDVbJ3O,RkS4:2.g3Q^"GgYJ&7/Z$E@^7*rMi//uY=^mi*)mM@[@g@um3]<2-Q]C^kbQ':#`":$PdLa& %Oe,MaOXb)7]W`4_5c+)boSq9$8GXT&5>,o^<.`25#DOpl6I`@2>[Y%27;79T*sd\jP!hF[$*sWFl7sQoM,4;EIY2u4gB2s\8.Sf; %&O"7YN!:G>")-;aLB4OPMSY3kSuC6`i&!rtIB9K1?5`$!8#N`+9S+8e+0LanYfrjT0,HoK,X&(]:^$0tEn$kO39"V3/(o2f<8Ckp %Qc4gfIRpb_]Lr"SH_k#ak!0hr!g_kF`^Y:BZ<9K^0+EGR-gask;\"Q':,hjij[Va"DX^2_"sADcd-=27CtA2-Z>bO]cj^lYpQlkR %nRLE5Ekm&KC'56I'([\[.E- %]%\ZmW]4BCP_WZ`)Zj2nEuM0"7mPH27T/qV"0(W0+UB:&(LU5J5[leZL1SGXgqo6V3*5`Y(*6T.,T9?-i["jZe1of$WUrho:Md9&.*-^@9%C6mm(!eVV;3ARQcB7:J!,SETK:8:E#apo2k&2)?q`CUefeVTh%l7#(0J7_Wdr5<5_ %$AOeLnJHe!@aR"`%Eqh^N@\815`"-1KERNDpA>UB3J,X^QtE,P]ec\s;`*,U)Yk(Ilc]2D23>mNS]2f>*G(CpR%ABD`=CmH!lJOKWESf?9^6AammXsNX1TK=gkeCk$[>MNV:hY#);9nl0rLYjQ_1[Y %JqM>Xr(Nfj0BTeKVjXfuLcuY"'DnHcAIaqULc1=\P<^KQY$4t+iq7N5][MuP$f-!#\[YrE84H";h %A9KkE31nF5S58LJ[TghTCs%abPj&lEiJ557#j,-Xs/",WD/E&be_6)1I@>ssFh8WbX*JIWT((3rq+[N$O$Ke+&DLH>WM6pk+-Q%*ue_dGSCRAu?$u%9pj10e2iYHqBQg %9mqjb^d)$qkGQtV/He=C7-WAU,S>Qe#gDk%\:Hh9cLjm]N_L2)M)a1/So)pA&7QMkI8bBEHn/e9-&6'P7%iZ5XSGE,mjts$)1m\G %G+9`61Hqm?IN/OtW8)@4Z9%0r`%/qj@u&VrcNn>q>4;_QNOsn#I`Yddr4FQt&CJp[lTq'+ %@TfBOE(9qp"5KHIMkCj0JS+BX=*9OgV/T>`4XpH9/G(l\0l&o=2MFMPg>:JG_/3h.]okW$7FZO'5$L.:r/4LeL*T<`*nfE=K8AjGNWX'+RJe_)Td*jF:VRP^dZH&-mjXE[c>Z8(ARLqI!jW34ZMSm)1 %3**:=86StV!lNuOYeo0[!0eKk3)Mi3@"mI2#gSAPMaI^'&C\UNWjo(rd$&F<#$S!4IGG[4i%3"VZ*JX3W\@Ko+fF#\$/PY`"hami %CqpEg?nO\Il9($Aa+kWqEtk^tI(ad3Ql!)\2bo&:704.t,*6oZ<)2/3Bd$:i=58J5d#&IETa6ka0M/:e(4kau(<^_9+Cn-=f@Hm^ %ipt;kI"Ieh5mUHgIX?uF'2/#?*R>+..DF8%RW*fX/O]q*RpDgeKNek%#4<8E#$;/!+c`D'XGGBc2CehLiQdQsNe&ZTF:B]d(q-kO %`lrF-_@o\s[_T]:r[Y]hIZG]a$UCB%6X%TO01WhI,StNL=K4$!P]IZm.>qCm+u4aT&MXD/;UK-&[Y"iR,[l;]TfTYcZ'NB38o8ao %_=q\0XWkb"\,&3o1ss+l%[ZDJjN=\Y_@7qjeThRt_*of`,VEPhREL.6YR#_43)F7W72&bf$(,Li)J)]3%.$!7Ck],Q'.I#$,h(!I %5fF>5dG/r$74jFQC5],3=BOb"))cE`$X`$de-Io,\;7&62sUXG)Os0ao[o5!`DeTDWUm?9P&#S,TNceK$9Out9Dp?$a!H<$'^1AJ %;]s:C+.=fQ(4uV_XY!`[ %erfUcr4\-6i\7#304nECq8g>Y&pH0SgM9hX\akheb7\\M0?b212HW)MO[Lnp"ur6pn8>ZLrR8jVA!#3QS=I]\(#FHW+X;&WBSgdR %NF#e#(`8L.WRNE[%Ig#`Priam!@E7pl<\'f;\90Ra/dMf<)e0k_5P8ENaQt9QjNrdPBYR?5m":-9ECEYXGdu2IUA4^5<+AfM%qu? %#h\':(e&)7\2U:B.k.tGf_)0C#'^Z9T7ZUS$=F/;NgWMC9JE'TXqZcJ8sVb!qq:t9HN74Zh_b`Kn@)L0,J)MOC)+qGr"t[NZj^X= %I7kbE)3\MWCKOL)0[Nf@.P9^7g:lPrJF5-aKAR]V`6C6XIs';NZugNUo\q-5K(DAp:f'(Y=eJ$R1%?B'3anQ-Bd,au#?7[QUd1XC %ONX[%=sPi1LU[I$=!5'L8]7XOD`]^!%=k=ccTi!'\-VfQK<>R#Ppu_qV[3BeG9CQj=n>TQfhjS5]A;!1:dbDGB5:_A&9VssKpJUP %n'YZ1))H=bRO9?4Y!o>/p>*?W[P#>lCQ#u&e%bMLko,r_hX!!HKUr>oK2m4:LAM@gUKeN=ETdU8V_o:mWc0*43)loc#oAZI2[Q>] %'#$#>6.s7Nf+&676._IB)$12^0\J<$D[s!A;M-Wtbb>OT<4Ol0+("_T %%.pr0$jGYG(h-ljLoMZA&s%]_?/q-aNbMAR;L#ObG@lpU2&7,7aU(:aS7iV1l/>4A9XfgE0>I)lE#uVZOqlH@1f[cc:rHcf,&":X %o%:Oidd9rF*,6O)^k'?7i;+f-9qZg<6DG*:"8m%r6;K4h(mk`&NL(Z)+jah#_lWuVQT7B3s1'5"LlIC\"Vq3egG]I(c'-+828qZ: %clHkOe,%:Ke8=0an.oY[2?H$FXcZ@f3$kA-&M4..i6="Y/lEUEWV.ON8;5b1q5NNXO_H_V#eiaTJC\@o$Q_;\;sM2S]WO5su&4:-<+sU4O,2 %WldiIoN^92.)kAD;"60?_ojgoU"\Lp+Ns:FJ_h=?m^Lp00LGP8)W5U@.R2e.gKhZmi:5[(,ncKilq"Lp %72+=!lG=)7o0WV6T"9'.MfYHn`-$LY&>Aa1?*BFgZSlG7<:@bP^CI/9U:bW4ld5P4;"$U0j#\\CKfi>=E2LN^Wi=s03)MCI%S7A)7Q;[R`e@aZ9;7`jeY#o1\]_d*BPO57S[6*Nb98Z=7.:a"d2i=Vjdbh+@d %B4di=aZOZuUa/;,Mrf"$W%(H*A5cLIp>spMmN-Q?YKVS%HEJM0Vb=9:&-Mp-]%@'%,b.ncWN"JT/,!LULE`!tR6D1r\ %F4/3SLM2J(3(?:gTk?-u^oGOE.TD%mOf1s")tXV=2`i?a9b'"mK.a=Y)<)DI3RP*V#Ru]6X$iUf@)e0](72lmo<'HC!XW!4o\,VU %r;_i`fCNn?d:4VSaM_fR6VGTVRnJ2)[%b;2bgchfqK$V#pH.kf0eVSqKJPE/^j7N]O7\(V0 %@kd.OZrQ8i)94R)QF:@$YCj"*aL1S!eU3LTG]lZ>kB)$p69oC.blVhGab%kZHBelAeM.H+E`(!Bb.iO.L08k$4Kuh#Vh_:5O3A?9 %o>0ap`1O!d)lYeOGe"Qeas71mN!;BU-(-gu"f[lpZ8F2*1B2t'#t%N3NG6CllgV5,SJn$tc)juY"IS85\CCFfpb\Bh$n?,q>+UPups29B607i,7OA;<[Zec&7\UJ,t\pJIP+O?s]n(oW$>LJiW4#kbkU8 %\l7jYC1""_0S"]Zp0m9pl5Ngd*$dTA4Rlu<2Ls<$,aj=76HUQ)`cE_:*Rb['3f/!V?!6gj(W`qN+,`'iFF$%S(pM@'[.VAF5a$VS*"=sa4^l%).%)_"[qUj6UN=YI''i]]2_($f7O'qH<%1='C8.4kg0pCIh6?;V:i=&%LXJlPumEmgu>11sZ2SddMPJt31 %[LB]d797gF\QD\8D\nQU"mej>DX#sBBe%n\X]m\JJg0qT$#P]G65?Fa.UAUu=Zn3'F4"RbS7UM9]3:F7G4[2"S`+6@WSfRc]g^)l3A47K(1TCSq*)YIFG!%'HTF;Y6FVWiWK(i %oRhRf;1UTqJEA7I-O1KsJ%@)VUAr1haqi#f\,N5r+]o-=DktCCi*5?0>Xj/H5!d4E_c>AQ5+!o3P.fRL7`X>%62JpWU@\Osa4XFI %,ZKGc$W.tU7B#4))FBA#cj2sdFH13j?`rDuB7`MBc\$_c.ZKam3>",\ZEc\QlH0^iAd0=#LP66Yj9E-%aI-]N-K.Ij3IYffE:cCJ %4.K)XMlH&2G;f[=2X^U>j1l@XUWE).OX#2W;e]pBUIY51D=-OI[X[L+0'VAV"0"[CQ-72=BBsuN:$Od'8S@"_DEo_E)ClQY_FA64 %7o+%e+"o.,QqX??GrLZ=X&@:#Csi+is!jV2Eq-ao\T=OqF&t8b!8n-iR[j^"4;CYYBZ[UR$_l_!Ee))2JlSk3j%SZVS6:A^.!#*F %S5(AgPG(La9"4GK-Jd1iA23_qcSGHKcNtG/X4]B[jEalmpf85%ObQ(*)%(,Vqge&l`W)D&hYLM@E18A0=ZX[pLFRY2S@ftX#9>j %biVBPWFDTQ8Fo;R+JfUHdS29$TM?=6<+@%CSUjaQoKLRb*qlq(qA@geKm@!rO^'?-E;/GR;j41KBXsH=]el^)MmL2u[K1FU$q=NQ %d57;/D_/FJ2]j^8nt?5CIY>MoFW?2AV4iV^sleY6cCg,E&`Y&'Fq!\OS$m]g!Um#`.i][^Q(m9s&J-c!V]\>D\%t %#0Y&NWD`N!ji-D28>4Y4>msf7?PCK.@aV?BWH>ciRT[(o::`ZVT:4!#Et`3@aH?pLnO.2qA_3ebfNQ5;,%`a2)lC0-],'O@h4\-+ %amd;qEZqOUn8$H)>c\5Z@?Xr4JbJrVnaV38CnlEm=O5pF6E;fBcG.5WUht:'GhLO)b"LqhdIoX:%8CumWG&Cg7]3!0B+`j,@Gda` %;FnaTiPd8s4o`/VoQVQoe@M)8l^t^0$@NE;58hdZh=QMmFq`,F"tD.Ks7oa[;rd*PZEiJAae=8b@4E9n\/@50g`N/',@Qgn6W9\> %o%LrAC+dH5KuL)DnNTpjO%S?NYTkJZKidhinJ5HK[ZSMeQ+f.r#PI9.\bpp%52(_JNJ09+>Fdj/+%b5ROmnmQ`O47CC3QA=*,n]M %ieUDE[^X"L4%72Kk?[hUd(\0&<'&i/`)(a\'LA?)^"PJ!#\,U5mD,Lqaf28X;c(# %K=iocip"C1J:nu8=(-YRP'AmMM(dL]%sLg^<%+R%ftMR-nWf0d0o-I#-Q'j0/AX%!f;*7W]>`q2Lk9&=`8-nb1r1m)8('$%^'WK@ %l40B30)#!sL5Brf"\0Ga_osct=>\tjU%Qb*#_r`$0TeJ4?!ST\LQSOM2L?\VLRFoVR$Kr9BqK(1-C@;m0_,*ZI_s&ACa0_P6ptOl$?NW_F-j5-%2NC?&6PUNgHTUdn(uFmbW\@hib3BcekkX;qhV6@ %DTk"1pp2Btl:/=),-KKB=#44/>V5U+,H_f%[>:lk3*4k*cP7Wm/RiF2^s_"cY#//C`6fch`]74uUdZ7Tib/ooCO_@&R*Lu\+ci+` %Mg,*QLTQ7Or6+:d0A8K3CaG,pH3[F)2QEY%oH(iE5[$5cu9g)b5/tB'n8[MS:>V`tWSij?0bcL3;FsQQXa^X4rr`VTj??>\O()>^pMTp-X[-esjHP5PM5[I-ap[N#*bfg7Eb %%DU4=lF$$k.8+>lq#Y]maam3qeY5X3W+_lKC&FZ(VUWOmSL?_kK/,gW20%+792j,J?]Bc`)MBYML`J<)7-"@+5hONs'/FF#m1lMR %6n'/&+"W->H:KG_U`r5W;U_ot1`PR]Eqc!$>h3aY8ZJlE\lo`e[:r@>Z=;SQ99He"WCnm[F_QNriCI%F.:E*(`%'NfJ68Ls^(_JbSN$aE2gl_,[EPk5JSC$?@H&-Ll5LqWik8Z.H`nPg`%^\stjU'sn9p'e1eAS+&s.SaU2@e_7, %A7qr_l5#_Wi,0='=9RS2DsugI2oqcNi&;o3K$uW&d?r5$E.\FbeQH>!4CU %_*rj5G?u7go1a^3HAmPi!U/l=dR#NuZ#,o+uTdJ91?]B5<[!=To3u+4#_B9dgWSi`2(LWdK8_Ku`P%)5aZ3:2GK:h,[D=Bu-52DiI1-PE7L.33>k=jN_,b#`02a-LqIddcf=RIOfC>'>38XB1 %Jq)Db#&>g+"jqEA4FJR9l,Mtj/6_!Uf4#[$_E(.Ik*?sSObq3:q:?6M$8>dEg7b]9\Y++5OCt@DHL.c8*\*[-EQZe;AnnrJP %/g&D7KH>9jGiOb()]*e[,&^`IN=4dD$Eo.P0"K%L(V/UqP/MP-UV?LjWD';Di"Zh'B&_q8S_205@rg&f92dGb?]>6\pC*jRBek@3 %EM9+8#,mqH`40k-Oo.&mMt.?sPWtqnQpUQn#;)N3,Q'f14F]iq6m,A+ %N;:nLMo5'a,PPpL8^1rrBkhLqfkcIgbKJ*XDapeMc7&EdX3&X8pJe39c:+(DW`c0ke`Gr#9^##j/fNEr50*OpZ]llq<&4e2;!3`\ %hpb1O?;-n@Kss7?KN$VT&l,hF-Q:=O'?,4m5VY-'O0'.qoIGoNBXpR\MGdjKX[uu_No(O,RrP5^6+pg#`"E] %IO(giaX1K^=m;4ndA5OlJqS&5]_K.#WH[gpiEUO[QVEusFJ.-M._,"H0g=QVL3G2'5.,&GVbNC\[k2]m#15&#EnpoS6D/`lAI)Z#:4pKZ01X,s8_lf:%k8(3&"M,\5Lr)a8N(HL'h;-JU/+in7r7b0\QE&eqe\YKJLE2R'\))E")NV_&Q#2*8mG=[&Tu %7>B,4nTGaD%&LAf*SkHdbOSKnYroCD3ll7m'j\&)["XnVX47r>kdY/#+.'>A1PdXgef./%3AA#a(gX4]/%O??)URb)1V+PC>'@?8 %Xk^SXIPK[hCmEPr]`23oZ,C4Vi`;f'HFAN5e,` %XNLHcW7kWF,3u>lXKuf`kZfCL%S_nX[::3miRJETEF8)MD(^T>jO]4c)QdM_/4.++)>Q-nZQTPHI5YF?Y[27at0s%?Y"4ZpCd8jOD7&%];eDT2I5ioX$tlD&h)81WVm%1$t=G=cTdIkCT;)H-"u--]2\D %AghJM>I:uB,J&!+W%SWK#%(p170[+7O/P2N^\P=)3kMXaQ?qNiA#DC\nB\o\3G,4I@60Pkdqi7=%/`D6 %F@Cm*A4<3OT;/*.okCaYlgEVJ(i=A]he\bd:>> %6D+s4A5bi<823V8#uHPZLmeS8LB];H;^3Qa-.qg3/P'U2jOZ4/PFn0Cmu8'S%GV*&u@/1bmdUEFM-W*Mk%G7&-C0=Hb`!,br>h7/a_djtqY2*^1n/;E2H.3F=?qtbg` %/#I4+=Fi/.*Ij/Z*XFhc@EsrT>Pp"*EF[qb2Gb*,A"\7pMCFR>SC9fr`&>6(,Gc9,n5'[4"8C/&fP@8efP>#fI%eN$h*KV%Be`?6 %=Jrn($s^-,?m@PEU64tR1F=ZEUK*8;K?H_f$^^-dYEA@8]/huC6"_VN<3skIXsJVneV6B-EV%jN+G:-A4:,CZbS5fW;73\Pk,HTA %]csc3e!J7SK4V@YZ*_-@PY+i]WH/dGL!^B#lImX,46f`bW6h`N#3=T4ko\%8i)+;sFR$):WmK%gXd$6@8JpYiI.GLN(WPWBfYfac0r!ZG`RD>O8p'j@=Z0cRT)R=9\2De-*I-btK<$/l8j/G4g'<7*n:%bck %X$0R,e]/$!qWKe9+$Y#,FK=Z+V(%"62;8.+TEa3R3l$Poa)SUCeS5Ch`s2ME'mt+][@J-GLYA[bOB9Y*TbjI%FMjoATQi<] %du%iLX:!Jq5jAGQYo;6B!@Om(:ea:Zorh4=gAo6)>@h"@3%_4od]g0Va)&ZOLoOkTflH+%"_q%'Z1.K*IS,`B>IYg4Fp+KP"2UuU %\:Yg%]AT:ud:FlUHk\ChThEmE,@tK4Po6_i['*Ys.i!+kY=8hC:[]!=DLbD;<7I"\[4-ia$jHusC!A!VVbrr7aB,\,QqEQK%A7c@ %-+9[^_EpUCR3:-=ONEX@!Rn6';uiS"`ea1og:2K'&!XlP+G;=8Fo:>@W9PdCYWWN$rB(5ORi.kJYW;W:L@7C %7(u\P7o-V+bSWD?+^tD(C(eFR %a=QnYO)ZRa2(b1<'PBh=p/[B-'oT(ocoKLc5YGAtX`) %`a\en71XHY2>ZK_Sm#MrXOIq39fUG%ck)4,_9T&J?;c?UDHqbaa*_7#BUp/:f %G/.H2ni]MHr5Z:q3_,#KI5$q0\+3m^CcN1_&Eip=80M*S4bsp274-Wjc:Z+e!8q1gb.%\tN*4SK8HhNuM;XV6%- %!aa]TqORZZaaa0L^OQ:\s8L8mqW+9_cL"nsnF`tIr'rpLs+X%2&fG&2[RFL/^%rO(7jkq-h!j):N8?r4Jg2mBH%KDUCX %p(#a@V`-=Hi%0M(JTHdgEX/KDJ-d,R:fB10g5I6cQ6(A9XG!D8;dqdX;5k2.bil=Vn/%>8(MP&?_GD_/JeRK/dbneA;q+`%6WR/V %LERt,*MuX;Z!lDZlT_\i5`)a8gM>W[Q-)#uhaVDq*SSttHcWr`//bmI.EeqW5VY'*\q=a^5S,M9s0a6GSL[`["8TEeV&:3>T3k=e %;kK#Gh<_4Q1=,/YCX^NC@P^DaFqL34A7c"M3PoF_=Nr9Ye%f]>n7`61H,D!T%il;;-IXTfVb!3>%g;* %T*V8IT2rkmJsJeRqF$t>'7HTNoU2gWX,4C<<)+X%)LbjarGH0SV+%V'I;=&CZ)W]-!b,.7T*<'2+[5LVDsHc& %-i_q^r;!lVIdPnIrO7:Rf5H0;bFe[]8)O-RPBZbDhfRuN5Rf9Z7bR+AqAk(ZR=G5(P<]!01I(ci1I(j.;ucj!-dM\D4'q=Be1qd+b]86p;5LXhQaQ`Rhk)9Vm67iq]MP %nb>V'0/KEZ[.4hn=%b3\r;,ip-[tMoTE!3ccXLlhB3==5cT_;uc[Yd_O.J!X:MN#lU[L>E^>Cn^gpH)#-+&;?2XNU%NkbJ2e'G^0?CXu?n[YR4ri&-+%W8e&)3/1J`@Gi3 %NrR\N^@\&D^A(UV/b@DKci3)lIJ>Zie^^#F.$cKtWcKlgN_b/f-blUqLL^#K++NmGDk5N0XTY-oUMIIP^$:5WI.G%P,".Q>p>8+/ %-`?Z\!DZJMZISf"^[q!!rp]+$&]+nio*[^qkl*sl2X9=n<;>.]IJW=EgXe8\iSZf %hYa@alO]Fa>j::Ps6HlgkSH*X)A_a2hSNQCkpp4u?i?*?s5:HZ2/2=QF$KHj7p^U'L84k"HM*#@V>#B2HU"Qfdk>)'g%R]-T$fgX %&@`f_hQk%Ip_.$3iU\q/f08`/]AjYPD^PkDh/Y0^ %pg;qYb$R:odC\frZ:-EsM-V\DT75k+Da%IC1aq+'qOS,^[p.Q^\7G'kt97CQ<\Or2-g=E %YQ)],^.fI_YJV41[m0VLf8gGa?#qH)A11&NXH)[0n\u)< %XHANH[3jNbK:",'d-?@55Dd6Ie6R2qsr@RrNH5>'I[$6Y7O!!A0!o1g:.('8,jK<:f*H2X5i %ejq2EZ$X`a%I25eLQ@M,j)+t(bOg=FX&hq5@OTnK$?fH]fju\;IKepF2`4VV8jTn*2#De&]kmiBq)CX %*L@1/19*ehqoU%;QD8qbLe(Q`e!PSTnq)<;htW4%h?0B?hXQ=*6e6MR#JTd$F"b1Nrqq[-4SSJ4J*9T?%BDVda'X;l]O->_J*O'V %H;!)n-Jn:=5HU0OkC<-#5H`gJrp_(1f+OuF8 %`[WNL-*iJBBGo98EbH9lDH_,)/Wt5!c"h4.!QoRj[]HRoP)G-GnV3NNQ>Tp=%R+ZH %PA\*k3Z9XYH=8IXK%Jh'Za=o3q@-a"fs#q]D:4jlY(NGL#O0Zp0C?Q,\#rrm[)='YEU)fEBl %F2R,lYo_\04g?T.2F,Y/??+L&?iNbP^?g_m=0K0BZYOW`7o;/=W(Kk[Agk6&osIk+.N68QYJ0EOC#+Ng0j3%gTbtARl@E;6q'ZeOMBl%SYW'T %!hsIK^;3l+I%,ZL%3b9.\9`;BZ*1_;C,'n!V5j+nBQfO6]UUW&0)I.!d2HeBmE/'Tk,SgEYIZp-mY84==0G^Yc8[F]G57DVk^\t0 %5teK-?(HY7I'ehRIN[]UY\5tn:E[6ogOPNlJgLXMj6!iQqS2.#FW34NRd]+=\SK*ogMR%\#:t;XA*HG*08H:+4k=dokh603Xppq; %@<5\glAEae>7OFG@>?7_<&LM9T!/]l4p@nXE5t?Ug)F.B`TYbIPu6:o#k*@Ke*AQ'Q1MPL?l$IFVX?< %$J#?^/d$9<'l^#8Y.stIl`Pq^^mM.l0Z**->I!aV/$HP\lDq)#U16VdAd9/XFR_*u5-]Lg`>;P5FKF\hV,6rMAX(NlkK5oEj2)#> %Gi>mC+Z'/*O&C>/fo-2&%8<=d#"Gtu@$1[d&#V&WniYPU$&"X2/I%d#j1s?m[B[m>Er#3!KG+`':cAP^TQ);2#VWq$,H#Pl^I:[ienli/cK>\tW6US.u)5+9M@(j\4o?`:K#bpGE %h]^KQ;q>PVX1e@q:(RtBS^Q%'?X5+'n(]jBo^9>P^2i,V/[o'Js)+ZDmQ$H#3qKB/pl/@GR7\ORA),Mk)moi#l]lEA#b\Q)3hEUnuBE$it?hfE&r9_P]qj(<]mrE:k?XC0T %:]/EU*r4Hd55V1+qc\!M7)^;';%J,H!9I-(f4I,ukSI,Ql2o>,YKG3T"^?$)BNY_FP)gDf&C_2nWPB-)R#1Oo/65Oc_.qeHk_ %jpc,plaFNJilp`FrgjXJlnA@t@iosigOAHfEd6p('n!g+)#K>+IdK)Oq"?rM_mdDF2p\)*pC@.6DmD"*C7V^"BHgM=F1.<+*4`sj %(RSmOr73G]pC/3.4FG2X/[nc`dKLGg\n0^op$6L%eR55kIbIGXb`!o.PE*Zia:+uTMZlU:(ebC29%b$0nZ%ZtB'*0g:!CP^6H:eo %hb*'F[KbpX&!?qImG0;cQAR\Z&msoIh_QIH30Lg,G8*X0i)mh?T5p-jGIhEWBd&ejZDY?jo&%&"6NaVkA=I/X4IhIF$fedFAh3e@ %b[mT%lQ\r#XjM$Mm?k6/N&"W)aKs?UVGqJhY*C7^qjqBX1Kp]4fs$,BU57U/#fg8uf%t5:(rph&OXlI[*<9D&[BYnmG+Waf,ogg, %JraUp:ct5j8rDTW9DLW^"KYLpPWtF+T6mpr_n8cr77X,n[Mn4*PmrmKh6LLi$6)&?"Ij!sa,GZWZt#D33K+V31oV_6jBrq<&'2H# %R\^n6;kI+$$=Eg(u/+*9j?p?fT@YVIh7*akR\=kA7Tg=[ulf:mq\#Z\Zi,TJ:M8Af$G@,;U-L9EU]n6_c)@jr05@aNP/5%?[&S %2!Q2>s(GA#AtdRb.82@-V>M%0K?4;!PY%Z>d(#a60Sr!@-i-6'kjrii[$3,%0Icn+Lt^F3)Ke^+3t$a6 %+WBs)9Lq#&)O[JLQoeiUJ]=eO1D:a?&Xccf@j^up/csM;C3J<9Z=Ks01l0giZLl#q.p$#o5-%^:YELI=1QbqO@E4oDLWS`;SNA`7 %GK$lY8:'u4\nkC0cB6>gn,Fg5]`6s7U$]=lW#E6niJQCd;\+BQZ*4/I;sa)t?[_9.Dn*:RROZQh\>&;\LHk=Z-\(^bq3Tasr:PjXC#P[a]3b9Uo?[?XHNM1P'ZT9b#0"2T %n=_I?d8d(u.K7CO[k2)MCR3e"cQdQ>6h+f\Gdr5<`T=s:r=;-_9\h4]kF_T=CAi19EG&Eel4ZtD^VBZhkJ-`ah5'S;=6t>0X3]DC %GsiU$q@fi'`;IL<7R$HTUT;5GI/^/Wn%<,o"\Y/"a?90[L)2kr5P_"8lRohXaiYW`&V4Zla^^_JbEjbcFm&Z%U[]@96tOE],tkEZ %rlA"]DNpamO8%PBC+5DHBUI4$4nXrrCRQ[Md?Z6+n8;i5R[W],C\c/GIQ@jP)3Ra$HMT:TY.cS8%3jLnX-SL9Dh^1)4&7eK*4Hj> %%BqL0^\qs@)h7*b@N-t'^:qG'r8l8>p"k$UMeKWbcTh+&B"L)#BuEi0SIAaJ=;tgP#IH)A3Dh!Gl?NKlY#B2 %Z?Oi,i4&Alhb2Z$06tt!_I;]7CV&s<;=^>Ts-1Y=./IIk_6\fPWV?G3Vl?0f)>2rIf+r*h"R[*fR\_bNjPS&(3WbKjiEC@eQ*4/q %ee?'qq?E/>p#WWs7STGHIKlf%2f3gMWlf"C9FF83/:>B4j*2T1K1^:U_/L.#[pkB#84q*L$r-#adgZ7)-?`e\V?9&_)K]b.$Ft\f %?r/Ps<,)-=VJ^`FLi6[C2W1R>Qb,sp0`sG8)SF\YeZE8mZ/KB1OM\`GhF+*&3Ku%W8)",cb]6C\I052CkuJspZ7i(B1,OT,7RTV5 %QkA$@@-/=<:jGJ`J6nY)7lGKeEO]KdO2kJkJLE=a&M"^i)]mCUD*ldDAqGifJ;8@IQGW+rLHMoXQAj$>Z(kHGE %iYgD^Nt7KWYJlj7/^&ZQWs6huAm61#6ed7!?%YBmVCB)B+2N[h1.>`DiWmod91mF8c*FX``no)9g(Y[`3N^J4NWT0a)bQhb7ucc! %D$7K55^I9=T]n*[#39R-@dJ>TWNFXuiB-'^DI5FW@Ba87b=]Tk"AHtd^VejEbh&]\YIJQK0QD\:]9!s4*e#a5B!*?mDElA*tN8,T=+Q'onGF'pBPg%\JJ8+\bX7T[;CK&1M](1D),.GU9@`*DgA>6HU=ZN#T9\ikL6]ZRg8a:Y%e=e5-0X5EOWH %VQ!`[.eolM^q=$h/?0dA\83%+6eSn!e3%R_6i@I.W!mL^A/gao$3mn5;3X+L4XS@\c+ljW.VhFN23ic%V%lAJI$bI! %VRb6f)I3egl7jD*Mh..\UT%jBYU.=(1k(1H6=X2V;]?]i`,oISD(;s0!A0G[Lts %HduMpO[NCR1gjg/%[/r^ANLU)is9?$Jri %%T^7N\ghY06H[qEAsB/XgE72g\7h0SN$_8"fYf1GUbcHC33l/ScuBJoD#oBKm0D&D$r$DtQ'F3m88A.LJj@+?1VAAU*"8mLd]@(@ %0&UGCChl(KJC'VBpjd`$'LU?lb/,@F(6eX$U/7KmX?oeK.QEm]93B#P"=W1^*skW86Nun,4:Cf.8P`$BSe0WJ0n:EJ/5K$8ttXW:-jbOQ,.u)<5Yc\TKiS*4YOs\M9E+n(gR-:;(8PW %5XC(#?=)2m)PH..L/A-XY)W@Y^H.!=;\fjADrB?3)siRc].R"d"s*e\a0q@FlSo=%L?:%@@8+bg"7dUNF>N^7iYGT)Q+!]nCr/=X %@%d'QL-@:B.Gu5:1D@9T62ahDB>Cp&5X&hd:1Tm0X>_c9Bb]*\Wm1XV)`HM@P7N>NrY0NK)oRXm*%uo0QotR=VGPJM_/f>;PY79, %[Be`Y1K1i3d46DfG)'-GMmhT9;C^r-[E0c6gqi>Fg+ATs\3R>?4f#0b'\FoOdE5.:ErSfEN!rL%W%R-9_q`raY(ELK1iEl/%hr;. %#S9BS](nH2Tj6E.-.*DKR&C6NE$*h3!_o&n:-$)(1I$sE7b\Yc5)c3#UDlNiH.>TQ%Ogj,AI__ZC8@G#6'8Eh9d6Z9i=Dr@H@S[G!tug^cQ$G6 %:"jP[)>H*E?uTCNAfW>i%C7@.@*flM?+/EEot[6Q;Y:8LQ8L]B%N"[iJs_E&\Heqb+&iG=d='pr`YAJ&-d[OGj'l^tTJJfo+\(3$ %?p9OClD(+$<5_Kk2l/Or0S9o`c00=Ag)lt2c*#;p]7IG+)Qls!7`UePR"?erKD$#;Xkgc<<0atOl;]_W=E4TA+RE)l\]@fd`'QkP>e+r]G %b!\Xb:/$BhQl?ElA^.Y#;59#e]mHO`2'og-78jq\JlGJ';%8Oo1[C5!TXeKA?p62P21i@!G)^,!)E/Uh+f]>%B[e'T1ka>PVbo1: %\R],^#59;kNi>T$dA+Rb5B:\>!>qgT"0)hSE"`Kn2^8c$0nI%YAt)'k1lW>#94/rB.ulUJW_QWFXdkQ[7olp[aCV5hV/0$Z9ZEu8 %.?dE95+63Lfi,bsk&0<3BH %Q+^&"Mu%3`2RZkCOLd[p0L:>u)?lE`5pbXUG9Tmu0DQP(;:RJTlF_f"37NJ;Wc8`%Lc'%@_A@Op>_TU_Ks1!9K/;F:I/D-<\(!O9 %CP'0_YB:WeYfZE72X0A#O]&4CDIeZQCj6`*ceM,DQ7GlG8X%7nKd$tJE+bQ`<"-e]hE9O9c@'d7ZclRFXJcJM_NV=1DAjASaNT:< %_?,OP`J-qo=Y3kolG[r](o492@^L5OAZ_C>,t&Jr&Xr2tlOH)=_B)C"cqEpD;ceOP1:J[,DXRi3#Xu/S!?#m0dN&EGVc'.+/Xlk)kF=]g/P&a)%S^FHJ-(OeG[OOL,u`M>&Te1B(F"I6 %:XFgK-0Vs%(^I4H<,SG!jaa72`m=a`&t0&P:t6kTlql80e.S4jAsg;88R"kcQo9),2.PA4`Ii2CYe=_'nVf@CeH9An/Z#@u/: %=F&XW+BP6]:S2$?&Ch2t/Y0%JFMEpKF'sJSWCBs0r`12f"l>s+^tu"6#A/[c(AFQNQG4-;&)MP=lM0HZXcoGubMWm3oT[q<4mfmmP2:8gfih'<%meRdl0YUiKbTnXNqS=F!m>YNZg(.:Ne %YF>Y!Z$7W+jFKU_k%J=;9b7;XS,$:]J.)O_b]BupXA7ooE3*'sUj3d[-J65e?e-0_YLSLj^^1d*o'*ChM?/QB,Q(n=Q9gAZNa_.d %%m7c`*GY3X6c#gN4QmMK/<+>\_[M3Y_"QLf)s0H;RUbm/-JK<@UP3rR+S6I=m_>VGp!]q4PPN"8S\h$&?ljrA(;\K&3^@Ea:cQi+dYc_/8L+q't#g/HMA)rO/]?NIM37:16l82L)iMeYc6Ql3E2PmH`horZRZHD.$j8dEi;#GFAs#'=bdMQk,_IHE@>ABaK%cc?8KB4 %d(ik*$4I]$?V)Pdeu7m+1,+W&XptZCAOSOn'./aZ7LoXp4K(C?7Y1N8P'bsuBc4feae%#H!9kp$0V1c!Q7l7gl"kNi=YgQ7=RLM2OB\"6hb<+H6;?ipYg,K?n,Bqk$Y/"LGBgF(aTig %WrS:+19oAs:`q6KB]'[1IXjY["nmM%:Z7em+INaXA?*t %`&JK7-NEuBbY>8Vn(!"S:%gW;1gb+[!FB;pT6idX"S`EoLW"foBsD)OX<*5u1$Zj'*m0A](?,9hIp*fNX4G3j$f\4m:go9]Pnki& %-@ADTSPsVJ9Dl9VGRH:C15lt&6PTFdO]Vn:*$I\[Cs?ZmX()fA_r9'$Yr_LrSjfL&d;rqYG[(G+H.$)<\t!=GY^.XG3Z/_)>1']Y#`odf^S$`Sfo7Qu%5X]n4?5Go%S=]oF6?Ab0m>)Wnb$6QiF9_+bUG?iiKF\#fP %Al+'5-BVW2D&lO#E'.L/17!CI:o.-r`\@<*V`!:A^#>L2Y>B/'K[+mF_;0O^boTQA/-od`]r=(1gBpL$%Fi2'qN#\[Nc<1me0mA(`4MW%$W=GPT90gnBa%g!)r#0`0rR)T0m/L7b0,FHQ:3hna_Y19B?heH %[.Pi9i0\anADoQ2,%c:^PCq)6Fr((+?YcWUF.hO",n\ArNpjX&SVicrQ,%K`M/S1`?HV0'.cqF%Q#^C)e.aT-C*mGZ4iStpW^)]a %WFEF^)\R3rjOK[USiH%Yk4>3;AVY>7AT+A%0XIb.Q,b!QGOuG_SiA;S)M>J]\I;%s[\X)0J?UMW6p0iX)'*nl$+I"D9^\-la67Is %fFY#9PfhF\"!q!qe6kC9biOJJ]AmTa;7rA!aT/WC9Rgq0jVFk]4@XiUduVW`NX@uI*U*))?_9s0GL_Gf->L>eaL`.=9Mni>pI:e6S$,mtmTE-4es,Z.e9Kk;n:mWI`_XR<)gW&]A?0QKj05b1<@D+qp%?*e;V]U\a$Sl*bl)Tm4SIT1)k'Go6)BAom;B'b^q5")9e1I*Aj4m+:f6Ci=cAGuS^Hi@[X"gj7fi)cL2`9\h8?BnH"Db#/ZNGbf9a_q]Di>f7,VCHSMVoMcI#82hY+tObe5iP- %T:#7V4W?p,VUG"@o[b!U*/URdY+I4BYa@2VX>gkcOYd>7"*M'MeJKPk=7SW(WsStl"0!^_._](K,!fR\Oap]_2C#1T7qRX`;=^GL %>4sXNR9R@4Z%qML&V\8)Sj^Vm(\8@@,uS`@=m"]0$L11R)r:.,'&%2Cb$=7kfr/E%Q%.ClB?[SE(A=+5T/DUgR;4UHGs#\F*4e]> %-`:USS1HS5WA%07q+WBPVOf$?rVWP!IgoltN]rn?T.P&8+_%(%?i+bQrrQeTZ_lo0??"\"Hp,Sb"UE.9.=uuIJf?@7a^"((`69!Y %RFOa;c2@[LEWa-S8Bd3rU*!7[nE!MM_k8643Z)J%=gf;S;Sn-RbVhK.E??/B0>Qf@2LEEYaWV1^g@:$R.^ZBQf^Xj/_VBC0aS[+' %)3@U)=HEP\EZ`L@Va@Oq>7n>>OmCR)?)odTjM%^)k5.bbCa,$K/YS%TB#3!4rINQYjteWuoqNTR_OtO`g;8LU^0D.-U)::M!tlg# %LaXKWc@e4<.9Kfm:WP9H#W;7n+%.@5.7K!`^ahQm\j^&Sb2/T`D;9^>MI00b]dXqJu4\+@/[b=a$oZPO$JhUL%iQ`bC\hQkA'?2(RXs1nd %b8rdTT2K/ukVQmF[k#uIbG`d'eX2_>^.J,cn>-Hd^)7i^5%$4`<]Y4YDlH\q%sk*G+/hiC)cQ %_*Z,I$2RpE*PLc(%e_)^b.HNmi0,H2Kg.#2#Xu(AkXZe2>_^p5Cgs_2lt%(^;6fc-GU_r3D),(<+r?0r>qKD4T"GJ5N416oU%P%; %B75JNDCKUh&9C/#_(GHiC]*]Bb=9*A+@-D(LFEE*_cL/t]K?&Y)4SCJ,LcCQO9//!,K-oF$@Pa,9\0cndA/!K1e6ic!Et7;`OnA( %MGMITl.i;7q149"c%:[L)]I-ud?'(Hh=S"-q"e\+UHh0H3jJiET0PaJJ"O_h^#P0$QPo^X,__1O1kZ\pn2I^"Mj?2+fS3ZZ1YNE/ %'(NZ6;QHFtA0=L#@c%[5iM:&DFsY[!$'UZWp,c#e0nmmlDb7oNd`=7ZX6kV3lJ>e\4D$l`Be5rlRu0b&`,l)_103PnPN:.R*>'N*-b<'FBD %a6CMlH=G%;`VGkmnIIhtf>sJ_$>3/)K#K,eKAo3)QmVfc57!tB&?<)DIIhIDo[u:$6c+BCniEg"U_UGAc6"+&&#AfB6TLuV3n\k- %b8?`h]H784&\W0u,P%#oV%)d$PW$%W2K..,/>VhAi=+9?cXQ4VhX7/@g5%<5PU<:hQ&`l)TElLtg7\2_+,B'[Dde_n_se,!lit`H %K`F"*b4&L,4RLR-_q4^rDAl7!b1\^mdjdbEfICg*-=mp=$/`L-&='S`MAQ4jn\glX.R^3@p82p,@IcG>bf+?ckED]^b#!\JL#bhD %0NS]GW\UQR8#Cpf[ZL0^fMT@SF`?\h9)4G2I/Y\brce,W>N1?!NhhbO14-0eZq=q0,Q*eo+R8%X4A"d:p=uEN>e6(rs'2Kl*S+il %d)deq:*_M?e^//k$MBNm?E^4gW'$O?=/'"V($n.XK4ruHXInl>RZuk %j@Z*=k6o^8G7;/(_\5>B- %n_\Kc]?,O]kjJ`Uh5%gO66W&;F8I.ne)+1)]>G6?^[hZOn&'Rj0^&.R1Cl#GV$b1^DZu2&XR!L$kEY4:j'_)"KhI!h4e5*;JhI=< %1D`;X\Pb/;%p@:%Ud?M[.[N?n-t8C2+FC>n0:o(T_/T/38d/97aIU=3m*4Ll4)u5:IW&8WMrA?^a5=K=\VWm:&-$Q!J,GJ(;0t9= %iWjL\;?i*LJTro0DgM!3V6/f;T5+ga5IQ(*5VD;%rGnCWFXEq=D*8V[E*+qZ.F0"TDCrfsfE5\+CpC5*39U0J"P*=66"Me;d_beD(7)rJ %]F>54<2/2oYpNCf*^=SG2:f*B-C18q,3u91Z %R.PWb8?DoPjbf`#%(YE;KD?2uNa[ZC=f2G!5@Jm`\PbWMp0m=9Jr>*i;@AIN`\#2oTFK-[%qgWAXA(JE6Q#+Z'h4"8"RFmE-kcPT %8elC2GgVer0Tq/:]1#IF@\.o?3mrJd0I\$"(Q*)ooahR853,&L&e.:4e,5%RR%_&g;49agW";T#`XeAm;c'-9o;5r0#+IcW'[IT9 %Eq+NT9'"F2]VGQ,I$R`=0bX.A'o)r`S4.'l;C\oLjUb-q5]>jXY^<5qm7sO9 %nZc%S.s[W\OZH68t"I;2k+qE[-Ff1.KQ?fagYI" %XCX=dd4\+`Q!_i/2%`G>'i-1"823K7Ym*IIWEf7J!7Vm5[J4hhVc'eXJ74o<@V>B%Q$Q*BS01tD@LG0D<.#&B0;LA"FPJ*NVW!8XPX%!e2ZiY\Ik0(@#`,grY@^_lIV1I-$q`91=5`Y9f:gaP0HKhg2cosG3# %[h1QI8%]+l0pG>LdmsKCnpNAe.*BCLh>H+1InB[lLXC:_?7QrYo$2Cmba/;OaYGFqB!@nEg#>6NQE8'pHiU0DJ70>^991g6^AIN4 %WSY9,pL1un/VpT$N&J_b`toe"2FllVPCY`G@O[QgX->-1MQYTo(`-Cn:\luE.KO\Dfms'%->!(:+SlsA*:abrTB\sE\mKcD %:ibk0OIKV8d*d8cE-sd;A.kh7."iP%,4-EmWAgW'4JI8aPn^2V:\uK?Zb5`LS&HF`:IJdIK5?m7eCpb+=Qg'd`>d-*E2^-Ej0:Kb %6;GiiT"!De90X7_T'6`"rL5:4qCLD1rd %$,a28D@bmLK"+f=R*N`-:qIcXq.)%CT8;#dSs<`U8Dej0nl"O@'Q:$\\PmW)-\\GGM6jT]KpG;@$h^A2aqR.NiAtdTZCK"+L_Y0I %Gb$T4-er6b1'umr[8A!k(1YI.?qIAa.?qjIW3g;&'k1lQX/_%M+Z>Y']ihg8et%VD@\uVg"=ETYeh<4f:p.`p'b$q?>[]CZFRuZU %!iqVR2'M9$auQ-0e=!:<\RnaA$n18Pr4=eA7]=;96!nc3pjNQW&rRO,aCLdBWbut%VuiG0Q,d=N-[YLdWj.Q %efpcc<:j?BVRaVg>*>?c2:YfgQ1n<59V&..c>,H[)QB]Hkf2k]MAk$S+UFK8##hS!h=4sQJjmu6l3Lm'1@IfE3[0uXV4OiEOu5`B %`ANm^A]6-2'W#Bh/Yj`!51?PN'k0J7Y[bo+-_3hB_+Y\M]$ %R^kO1NU[2Y5f%GJZ2>AAk@c_ZNMVHUiGYcOQ2rtBj&:rF&QZ&p#!o&iRml?ih#=#,Rqs3c4i>d#74[Ag6ACEeae=TdYbb49%r.$Q %;4iFlP0/g:O\V\h>2*H92cqIh2KUC,`Uee0HX>nrYb)ghLd5Cb,XRa?#8H,dV>Z@iXB@Y!c^uTgYmsX`@r74KqQLH,\r=>Qcip?< %j10!>X\1LTbS3X<*KO$Hr5%/;,:-n[3r2*X"'$,4(;6k7l`#]tCl9\W*&#R`[M4s2,RMkS5OfQ*\'/.t+K<"N69@WPnj`LQm8#.c0(5[\#Ja/b!\ %]B\*NQ!_0V:!Gd+Q$`,Ae%,C5U&#D0B"^P@"D5o9L[.RoRaq7^,'Q*!3,dG2`M.h?Wc$d'`K3TU4qDLUr=J.kcDI)@`MhN7f3` %-bPG7::3j^F\sKb_IA8M_B[&^kfq_!RIJ(Ko;Tr_1i"!G4lf@k/4jHZcYnfoAeQ<\p=;*T4bc9CEeqS@&OKF>+tk%p;7D*Vat*+X %@G9>SHPIrW\?9fk.!hg0/;F'+;7K2);Va.M3#d5=^E_4-jn`O_kK\$j5Zn"13 %_CPn;cBX!@-,D]i84mGI<>C`OYDOcDi_rbuEi<`'4T^?t!&W05^nLl4QVNi)m*-"O2s1tC@OqZd!rZ["l=h#9C&u@6'o+N_TsXsS %/@=MYp]4PjQ8erNE+0-":#o5O^6A?=om#Be*VED^KGE;f`HX9Yc\HQEer\IBAKkrUWMu!97UG6h-.B2O4%o&Hhn+12d2a17`K+lhVl`29h7"oL-Sr;U,(&Xi %_-=BjDNBq=[bW%[ZtoQ)WPYgTSSCqjkH8Yh\)qt0 %IDrpW4'jqSCV0"poVZm!9FSjK)^[&G)G8iqrE05TSQi4Cp(R`Q4Pm!ne%MS:P8c"DmN!`ODVf*1/I`3Rl]1[%R,2ehUN9gJhD\KC %Z#u?gDB4#a^%_1@mZ/Bfh5S0Ef&OBNT,9])CMP)ZffZL8ZS^=B-TU&28LA6B!?e%dn`*-A^@\T#Z*!&7PK!MmlQ.?+*=o4n816hQ %GAY*C_bo+tTNp]qJ)^GIr3ZIa^u"//>Pl#!#M8X/FK`qe*0/"%V2e=6_62%>h$N,,?2]u`kdKllS_EqbI0[2CY^'=ML:[Zeh9r0k %8+s'Ugm1LL:Z,$Jc+D#O->Ce`g_70fWHA_f:#3Q;fpD->I7&4-o6mIlToo!q&c[j&[dBR63]bN&7um''bpG>Z2Xf;5XiJT>[j,He %4b[mX2kSa'Cd,+(&M5=6p(`V'Zrn>Ql_l-X\#0"Kiip/>rZ,;#>MQs"24U?b\9k/q=9%.ECQIo?iYVi$QbE-^CNit\\be=(5E`[' %X#d]_3,*PK[M&kPe(h;GQfMKEW;4RecI:(Q'-#267[ci16aC`[mJ_[.q"FB5[r/o-O1gqU`VlSI;P^'d*kQ9m:4D'4F)X5BW4r)cQ3+pB'S#P;hsc\U05qE;RQ`f$;Y`gUAIN4E*Mf8V&S&GHfD$]g/:*I0+u[*WescN^HpieHsI0[;TH;9JnN2gNE*/ %o>*i`iIM4m%ug83Hj")S`.k(GP*$Dmi*;b+%`$YFn5=YM2okFrU.RuF>E;>PP=Db9kp)Y4-FQ_St`oN\4DYQUug5"33:;pPMq=Z''PB9&O9fp()gYq./]nc=s99hjpRp"W2n:`if %^Qg`8!^#RQJ$GbipH?XD$2&l:[h$Q4p[-B7$#G*!qqqHNa/1GXqf:!\j_GU$=oZA0KqOZOqdcBgAdQK:T;R*)H[83V.>20=Dk>\V %^gPdhSZuLHqr)nONU-#i:([e-LKQ\!\:\)QV`+6ERYOZ)hE2JkoI(G4^th$30I+2n8>3Y';e=Otd+3+$T:"D>YdPkG3n.s$UE2!o %KAeo?7fmPb[V'gYQ8,;kOuOiE48V*#HOoR@D?^TWdi%?g])+lQCe73*\NP.@j036@X0T<,hd8r8`SnXm_s)#j"3*Lhk+-i?kjI[, %`W`,0N">=t0W[IEqqr('E+6s18%+LS#C7GYg=5k)0q%9!B92,"%o46N#>O!a=Yqh?HVn];[+bP8P?qd1Tg3Zn6U(Alb^A$"5Hdh\@l0dqlUNqFkHL\_qm?>$RS`lM7%;t'uFSU1Y %4bkr7rGr%G`5eqoH*"BbCWFPlStmh4(+?_oBMAJQmRY=kp&genm4F8!a%i]nqNOnShVOA"p@Yb1S_aTpD82i1#"9Rm!aK,HI@kOU %Bs`)[FSPlSmlck=B9ks%'lX\X,YI]^2mW(O,g'?643AOgRdTF-R(&\ja)#QIZeu.?TR$)(8&pU95,nUIa\$KO,l9%(%"&I%^5CaE %PpNkC:APZBjkkZ=WN&#j$f#oTIuW2S?>ORBr'Sm,Q!-91>$'MnBR;>#&"`=CF,AXhYNt]DC'-r15@gn`] %A\3A$p6aPrEXn3/=+u-QlYJ!I$qIRpj34#8qK,BjiD\b;9a#f:hI8-F2=o/WZ9<=IAc(L*`_Pb'`]n:r`[mY,Tg)K;A(STB.G;%9 %YL&qY8o'1bZ/NfCJ$dd3hct$(pXiF"U$[oi:&*lY(4!I'B0Ng_-V"Q==i2nE(s\V*'(gTn;RaBp36CU+:cO=9"9VE.S&SX6am\_+ %D=g)HU>AR:EB2KL8`o[i;j#r4eN7lGM9U8_qu[-*E2g7>5MbPXL1abLXAr?*03?VqeW?Z=ralGm$QSt%aaW6g]Ip< %lkM1f&5j[EBs/LD0rjlIHFCV`\Isp2X.rmNcB\3dM=]jX(@9]7EQT?&o\bgt?@9lJV@pn@n@d"ibBuqBd21BR3e2,["^s1U-NaOV %Dkq*:ZA`Am1S(Q[/LVN50,Wl]RC!L.EUX\jdpR#[aS5&`@uaE1-=8B4]/B&! %l/Z[D]3\b1S:I=++un\l?MXk+jgA+'hefRHj6?I\f6LWK9N&#[M)I,(h?R8aCKnbs)8"ZjnSC4b=5F-OQdqXt^TqoHjh":Pke"E54ML2HJkc*\]19u)i>,(fluAYACUs?=O8!p.5'=:0E,kSU %Hchoob">^U3QuZ\r:jI_Uh`\[C4!6Lcu$!(.e'hMSDUSPV[ga!P&!A`XT,p9FE&J?B=XYOma6.`kEQ-J8\Xi'$rOKmAfFKT)&dgA %"P[m3bE/%`B\Hojf>I!2D'3Scq/4[6K@@boTq5S,QcY,(_hiLOB\&kg0f-^"d&[=4H$4o_Gqi!&[Wu07.4`TgIHhD\jMTtW\pTmT %l'"!AD)Z^i#>S#mOO/GDfu[=)ri$DLYSk50I5f]_cdk[%s)b.F(1.=1dlWg;RuY=+6W)8I!Aqrkb3P>5q!q)lmJVsgI^S^.kQ#lK %Y+\YU$%?I_NSjP%?-fTEHgYcU!B/rk1(@DJr[_[>gXe[hl"G-F.%\G$ZBER3E0BVhYWeip4r<2KA"iI:*FF@Hhqa^4J!5ZLk'+ri %],@gS%pgN>4BBfY]7mY?#jZL-ObN5Yb4F44LYK3'E`"7HqHE8IDtTmVDf=&3p"K/u?:a9e)T/ZC@c9pXT3C"2_h0/S8g2aag+t': %BI?E-HcKd_l8UI=SGs63./ti,G59%<7:,hIZN5Kn\SoeKV+m[*bp[(p`ffe5gmUM)RDKGIlNU$rGU0"cCIII3%Rq6;1 %k4n:W0CIPHX4rfO\s'HAS4J/*pC]TjAW,C?m:JMPb"BINcG@$75(CTc+J"PXot+3$%G*'Ff,ekFMs#k`V3SO6nN2Okr?("/K0'V> %O.*WEFuOjtqk.tl54JT*Ycbhq'-rl4oVn? %Ng7:VmA>&NS%+'L^W)1]jkF;s@7dOZ$hJo7.Z4U`KYE-qkI("c\9rMd0Z:6"aE:t5lddSZeF)e1>B24u-ECokX])jfJ%Pm>A6"Bc %?;e2/%!Q!?P56=U^iPM"!lj`h6?-l6FK)kEomTo,cS"db_t:c\l0'BkD^65[CAEL3mQ)'tdg*KkW,>nKd\q%'d7_9Gf8feVYFR3r %*nTirQJ0k*J62PF>Kg-WPGt@[g_b@UCZ5FM6D%V5=Zf0GP'^:Nm0/b)932iElK>Q#DRnMuC@IEXOf$26QRUoqSg?P0^Z[ %/B><'X2^U%_`6^-3QQFE]tCGZ0-BV68s_-[X6jNQDeZBKjNQo6cdfG\/"lPQ0:eri]r0D/cV(=0gXHJ=0j"'T=5RfHT1^4bUWf'b %jn_^3X.N_I;@!k%jm#WB6gkKj]ucF>uep\^AP:Hgk%V^TlOU9)uHRVUI"Q>A(f %`\u[+`2bQ3pHOr;q*c/4>M:`YDf#CIFB2ct\,Z'lX7*b:psT.6D5XmIrWT=? %b9nG,Dqh+\\,EZ=<@i9l`HM:8Sif\]F3B+^)s`CFLEQF`WHM0;oqPSf)t0d[LV4_J.#b\p([@:6c"k!jHp*?5RZ$M0<_4rlPl+l< %V>h:V::;5/CXp./2Sb.ck)1bQS_'V0gV%[7Gh&K#)]LG=rZcn.'< %InRbeQ]luqmgWiT:sC(FkDCd:s#qFF3W*rmbh'T[7c_`:f>!Kq.Y!$9ErgddZgcPV]oOI5:j40431B_sC@emcX(EI>[C+LniY6@: %rE'!PhiNW`j_u7DfZ.S!A!KgkQ]i&TW1=X&0.5T#i[X]nAWHm,\CJ4%Dh*Z#.X#;jK*DeJ4Kt5!kc7XHE\#]2_f(1m?Qe0;'/ukJM>DrRl10u"NVNWH'm]N.6:=T&?g?6X'(tkB#B^os;/i[Rm %<.j,\F2?6-f?7JM)lHT9,j0=\O\tW);3s#NooTAilUBL %8^O*>f9P0N#dX)5.3==DUS0i/=dZH)Xq*pJnm17.c2&S+eNHu\C3ScSZ2+90DJf(Pi-;PROqUpr!j(Ras`bA@K#5j$jd5Lna; %B]Ebu(ia'i@lnFOJGeD?-24^r[8tG?&Bk<_gIuVKcd1KqY5pj=Zla(EgtFOIQ4?(3kY\$Dp!W"j#D5>N(b=W=7mF,)27rn_ChcQa %")Hc%I%aE'C/=HD%9PCUDS;nfJM$!r.-b12W'uI3lH@f-&fC':'Xo^Ud/\?@A(U9;pqeipi;Dm5l?Otf^RP<9hW=msK_IBJJIH`< %@H;_uFQE&Qg1NQ[CZ#[BZT:/XTfXCU^WKkW^LmUZUN`]lm#`'jSif]Sd6?56]J$@fO[A@N7BGBp()5\hb%FSbGcd7<7^%l@#J#/$ %4*O`a:VV4VSX$$)=S#D43V(2IQ_?oe5'>Z9E_t1*ImHkR#R+\IQ\Q)Gd.MjteX&S"lXdaol"\')L#m_+Uf%,9r>;p/j*JE6$7L?c %3-ck;N?pp]=!=o:+G"^O/g/NO!;adjDWs,bKRJ,?IsY"up,&/O.quQl4K=#@+d\D1J2mDnPbjH(BBALPk"J]4/'Z5keakoW=S'M> %6h.UrZa2Q.`Tk./Xk"S[C;n_m;c#O*_[Io's.4b]?NR\>RJZPZ3r5)e>tk%PU=`+EbZB"s76pd)nul8t(^+:d!sG\dYpa2D0>1Y> %W]YZ.e*hueT7ePs;GG?MR[HCCYQ]E3PR00@cX/TZY<=dWtLc](pl9l5(I98bUU3 %nkC^6W:.OQddtZf@^<&gb4G5Rai`K-j.LjN1O-fbQt>>\f:ZNZ(WmiMjaX\]VoMFd>4(VF#Jf6@eOrF&Y*$T\l5"[B$*_Xn[[Tc* %:BB-M*O'n'k^]O0rqC?+ff6MEZe%$u$-F'lL\os;CtR'kH?u&@2BA4hXE2,RRH%']QeKP11:V\tYm,AbqQIV6J*M=;%MIG(H#%JI %`ZbqWI7/fIjn.KA!&T%kFkc=,"*4&;\Ff5ofm,(iQ55akeGE9G %p#Pl#a(qdoS0+`oVYX3Y&'/\Th#5.VKmId;XHiL'dc["DoF;/6h"Qb?e)JMC\\%2?e^Ou.L%TN!;l`tA086&O*fI'\qk-)lfo,-= %r.T4&cOWVED.Rao#efZkkDX;%;tcBYCMTM"nCG4CIsPb%D<)0f#QX6j96;r#Q%DRZis %r?ig'hNiK9%k*W#kq('@0F&!#*rrU]AeMa-?GSl&?)G'"d6[4Rrr];!*Io8'Sc,`$X*Ds]F]bIr'/AfLj&@h0IH[fl1j5s_nJ#%' %5(&iTTWAXTmF`:`\h@m8CgqRM"-s4]cXN,Mltl4:L@1:uepk$)32?\5Y.\0>)e%]s+1?rbuEl'F]HBRDWQNn,'''n?iR[$c>)2$9+-7.de9?=KKX-i3P1ag(fYrW4tO0Yre!i1$a.dOGC9^eYG5''">t)C %Dlm:CjLcBbW3VLE3%gJJ#/01<$^[dbTThTU%GSr=Qa\%-(VTm`rqE'&K,uQsHb_/7p-50`:r(&jRX7ibJB9aep)X^5)W'$BQ*e80 %ahXRtcO_S^Y13UZPs>$+qt?[@-F7,&]!k=hZ[b@-:WiF;,5Mf'/)?"IK7XOWrEQa[r7AMapHri@9n#[p#$>7D.K7Y/na:@nFeuC8PG@$4)!@s-G]"R!R73"-I"Q%%BH;cE8G@=>@2jWs9[AB2D %`h;^(dPIhBh"j!?K@[1ilE6CY@_u#;p)>iSkfl'dHKrPfTgHYC#Mi*uhZF*8Ib[pq!J=^T\DriLNs8#mo)O"_GJ3V'anC,f4hfZU %%30KY]"),?)\baO))!d+6NG02plGb %#No/$%[n7ZRf.rjeEu2[%]JrDBt`hJ#e"FSK7^D8"1uI]6DD0q@3I=R>`F'7ZI/J:VWPbCJh@M3MrmBDl %S&njb"5\:%^;%kr^$2_:]>0msc)`hT[p]sf[P5o4:\gO7>5AJVed(%t]C240GDjmupWO;WJIUD+o/lhqT\!H$mrU\`O^si;JcIV\Uci<5>G8!WV1)Y?Kn6ENY.rGc]".nqn*3?)O3T$aghBd)4 %;Bb?.Ynjf^m:C21WNpXBE)kWuGe02'X/qj.4\mYc[E@b*`U8(5i9P"KY+hFkq`imgec2bL`0C;N]d&W[s*Ams9Yg7XIpZZg+9Xh* %qYSB_F>X#qm@+4>kTS!\omtaiqT?\CiNM_Lc:i]NoCV!.=J*p>ocT'50)"T/q>H?Rf.Yq(g[`+]H/GNVo>BatA:/=EmqK8CIWlW% %LrI8B&te?BT*sb_rPS1B4rn*!h9t+\07L64omJm2?V.+.C[lat3Wq0^^7Iq5cdNEuf,MK([9q,:q^!SsTf3ef&bk:RpBMGV1fO?H %1T+dr:[bCUnSIm^4J"^J"hX?nq';QE8e(:A!SX02G])[SFJ4U'Q$p*Grpp/s3A/a)p;hqnm<]_pd>J#C4(KYNXBEFn'].F`;kW*o %f>-/j\G9`\?bKk"5e\aZL*Y0B6OT5FYd3!Y_/(S0JI/06eUI9p)\YQokZ<^0CSlsW$W7Vqtp+&m!aK*RsN^N(s,F:V$Q6bo@&9nMd%(c:GD %i1R$.2n5-g[?Gktuf`89E4dX1j!*=qNB]K*aA*foesGB!;=E,Z1iZc+Nb[l6IG$X[P1 %RIX-lO't9NWhB@#7=[*9CJfIhrs3#FRl12uj;dS,ndhF".:nR1oQ7;?luDj$@a+`.W!j>Fq:YO;5+d)57PK5#nCFQcn=Kl"J@*AA %AU&Q05$(`J?2dpGQUKDYc1X%RC'hpF^_q!;gMg[`?9VKf1LDl&C\]Sjb1sFq8C0SW$naobR+]3]JneoX;/A8nk^\$?oDnkq6HM)U %ds+86_3=/0-KUX.7e3kW%tc8$f2++YWkcecMks(K_.]Lj5=\J?('Z<#lY1"H]r\,T?28UQ %@'/;1H=D_n]5_'8CH&q]eV32t^rFZ_Tp5@NAaOfY+$(M'm90AMb3=!gJmAg;kT)mr*DD`t^nlmGX#mGgQKFKB%+Nl:UHuT3BIIO< %>j()6e6\CRa'>Z;/^^Trfk,"VG0mT55SsLO6iU_pQU38H;mKf;K.je#^"l]CSMmTn')*lEj %Br?WOS"krP0uL>/+IZ-@B]n0*CD0:92SO?Nc^"G+31p2i#gVC*B`7bZCGSf#N[9DoCk#`al*\I-TZYl`E&/p2N+"-@B^Y@bp,+JrbJikk %+h:sR+Sh/K[@ODm`u?/:WNjDd]T"Yu7f\*j))`QrV-%3.8PR8+/0&_mgqTs/0`Y>5d^]""i;,?5%X5fV*fC[SW,5F$"R_7,$ %-f"@Npt6c)&7jPSDIE#]gt*ESPQ0Cs?MO>>"T441i\jq+6[_2ebj<4j$-%MT" %(d=ESrI)aS+tg$kUL9<`*5Nr5D.mYsa+<,3jcg#8)?*Yn:;'EDcb"U&lYQ/2YLT %G8q\qOM;!m'BH?:A$U`*\i^F><>nf"3O^KOj&Mr3^lKcc.+?0WX;iid#Wks,A+L"e8WD/O?*O4^EJm-1D2XB"#ZhLK5VG3*))qYg %Y8kRH.>q8&lAZQ<:D72j"i\>5IE*T)-=Qti4"uO^hG"3AE+`Tq=O[N;d$][?$.)SAjY_*K/A<^r#j %@;!ILRSj;_K]DlBC&c\DUJ4#NVrMhT5MlY;1He5iSPAI9qRHtBSJA\1s0P&T'+=GKmCP8mPmasl?(jW8*Q?GYNN"I]!PFQg,]uOM %I#*l*F`A1184O34_fkl2=j$n3*h.l/`aQ/@qgiRlrB#K%K9qs4QN*a_@VQ&M;[tS136]Hm]SbKHZ[oNOrCia`6,!&.K#<`FOo5D] %:7t5I)bm!N`q9+]ahk.<]5UE\o4diD(Qr,)OY!$Ji^4)hKE/@RfP"t)SOG7T"(e1LZ@L5tp:4.1LH:@B9UC\roR/'gd2J)e.]]W&r,2AY;J6&WtAVU8L6`kTB,=S-O$/dHPc0.JXW+_rM*ajUk13+0j<*'MF#Vj?UQRfF'F^ %!]&AZmJXIPQTb\>&VpfGnaaXh\8;p;jfJSirHUhi@pd65hI(!pa@JE+@T!'F6'23B:*M$sdVhK7$W&Q-q*IXC/RS]nW#@WmLbPe^ %[r;Um=VBEW!SIe^9+(D2cZqEAnn:m[BA`\/@qAmqT$q_(IMHt0`,'2P_COi).Y]:OTaS&sA(Ku<20@dlFCE^Oj4)G0=4"LY"XgLI %$a64]-"^(C(LBDjW>;#6L+^,j/s_l!%:+2^*G'H=TS>)s4fWt"5n.+E"eF*M2ba`eGQjY3O,^<9F!i4<=.&s_C"o=AnA1AT`P8[T %']eLA=R(o=gg5J(7];YoBRNfkefHB%a9[dnklj$eN0?mU*90+_%aHmM$VnT>2T(qpl%m;dR0ZP%GDn2NZg?3-,7Sg":$3MWJd>kp@r%2L^qiK?4nNfI7!(QI%RZOX9ajn=X75W)"\#c%?kA<>R"CgqH:GLX*hBY:0qWpWp=c,a?QG:)BqN^6UX9^=Dk %Mr"=lT.bhO_+lBAQ:N?\Na\=mM%4V=W86&`2;t-aaGW\,P%pkq2=_V#D^\GF.H`![%[0.,1Eeh9T;*I %W$RCtD>UnO/DHkZH!P'X*J^%k2p$-YYCL;lLt.7r>hJLH"\-$0JTY@%rGZmgS3"CRUC/:/ff_7q1Q7kb=16R-\28&t^UY@=V%?Y\ %cN=]A$I443[P$#-l&2r2SSH %7&U#Rc_HOqBL'3g7i!csU`Sa/%&oTbEf6S#Ck`U*q09c_In!7YIrd>r&JL(3lj^LK6]A^/1EqKmH[][t0!Gc-f4o;/YP(]r^r0A* %2rP5U7n/)IT&:6jdBl+V*5Ndf&)%6i_f;ir-uaiR0$Wl1:TW#?U?RfW:%'h*eg%XnUR$* %b&7M3d`5`fM5Y;"9-4Op;a2.b>f4bOQ=D-d'bsL#.#o5MbR1sQ-nqYV/oXFSH=SR>)p9H>1*pp&.2Grm(X40\.Q]d[;-_2,]OuG) %o%QKrrDhQg5[69L>`dQG=^U$'$dX4/4G'6CJ1:6R71-X6?qVS(T_Toh5W^r\^5&VF%9Rr`W %!Ac4S5%8LA^da06ajIC>MM%Tho9I5gke-F6b4>#H/8>eV:\jL=JLH6Wo>9n=`i2ni>Gcld)AJGml?;P30k(2_C[ro4kq=GO,[KWkFVo@ %l[$(?1/oLM_i;`6SP_Nin`6YuE)\ujmtm]gq14a\_g/S^bjaM+lf!6:*Q?<9>RqY0P^jY$_`@G0RF4%SH8m!N/0M$HSs2eKFImf` %H8kspcAD050c[9!7`gK,b*&-Np.qS>Y=BoR0b\S<5@KtrOX%NkO2lJdB/Z&>7f?"bA']H=[[[5:i(%jqdnkge2rE;]rD59A$0>S% %>BhJ,Ch1Al95KhiCL8d$pSs"]J.r*t'@3g3%aoE`.rV%e-%j)7G$-EZ-Y)'1R`(XB4\"$8Q_s!^o$0=Z]('E5#Y4R?tboq8X]5B-9F6YQE/HaWZ@6mKAC::5&B,A*o[)W.kIgVY`@Nm(ir;AF.Ng&G[#$b[\tIaRFGj"a;hM-!uHB_?ie> %^ad)B+.[J;#fY&[.7M/\LNh6<4;]-46[!78005T&+'ju-^0J@Me][gBEO!kD[qaaRot0m\l3AD(<6Bp^oBJYTqma5*3BgUG\^)\n %X)S,X/m%C,Jn%W$#oHr*D%5b^G/F\C5D(HPUf9m=h0f/E+#kZMhcW#H(_lH!91XE[qk.E-]BI`NgQGVM %[B669++[T%bIJ^_EpM7`!N^=a*@%RR88sGeg9^3-RgKB %95l-$&U3F2KjGr^,-PoSQcK?50At]rLq1)AV_mtIJ]L*;O@nau95X7sj.b#Gc`$I+rR+uH>sL?kdlW"9h5#LenZ(jOli>cJ2lg[S %^KN\uo3nMqVXsBcqU!EMeT?8JFAl.&NY08@bd="PRa)!NDf,0+Wp0Gm)L4Zjf[Y(m7X[+M4AIc)?_#+Z.q_gUk>t=>M2]ZORa07* %i]\/r(=dMU^387WSQ.OQa>m(K(/2?-omLt3;[V&--G$B5<4dqjVi`pm\rbEn %P.+26.tV4B%0`b[]0IODlIgYb_c)H2$6n:Y2\nUQ9o3G*Cdk;#Ns[8L7#EZ.'*>(,<9eU+Ek*bGT@ibe[Tb$PUfiItjr@8MpnGAT %C=`*Xhm;.btZ %[M.m$o3ua))n(Pm%bD@7Hj'jGelF*Fs]W"%!Tdq5Z%IGUckB0E8PGe&qSb?b*t[+D;IpS %7TeZ$3Rjs:WU:ogjj!,r6^3ilX4TQ3#qTU(L\?':OY.Oj?2;"!SlVD\%%gE[;7)eg)6]Vfi#\qm,DmjSQ1plg0e_PV&4h$h_jD$B %e@j9tKac.S,:c\:P2TA!gtD@crcd-l7#"&fgeE69of+ncKkTLga5&9r+01eIh39jMHm7I42j:hKTKDS]?Bt!,hEuEW;(e:WqhD]p %pUt0oI9<7_\;P[53@:`LUp/$5f*JfkUDMKFbhnDYKoJMSXF6IcoQ;$>0LB'1O#&2c^&IW(DBJVC/(W%VhCQDt,+tPG'\6_HAti56 %/N124i,dkIWAT\l+TooTcGur'pS$eXG?"2uQ1H0:MlHYYNdKZ8&!h47!>13Vm^aHDb:U\&Q8&hKKLI]O*W$I_\6hq#N9-_TMb%aN %-q-gR?'"A:\Qt&GSl(E5X?2,k)&GR1/=8/QV&6Lqe9ob$VFZSQ-(jcWL,_dO5=1Z(<".N4MBV(eN*G=G^3C#JWPS>QH?MW%?q&]R %ZRBS,L-a%18)o74AF.Ij3B;p+1R,X/1:RmA\)?o14[/R1j@-(p*mN*dTdNNZWh.+C>]&cf$MD$$hfrb$=f9^8DXn*7U3EoB[UY0Z %A\ELi]rZ@7V5A:=>q+s/.[>$_#LbYnIndbCpiKSem4.1k]>Km9-Ru[liA.H0qa!Yqnln/[(B8CFA9Yjj %E:%"N^A_@C*0dLrPRm]sN@!4?CsFLbn"BD`fA,J0a.ViMZ#4J+drY::]GKBq(N6G+JD.d6A70+TGAtn*(?#;4#f(5V?]I524Crc/ %g\'>P]aChAlM4-XE-C_3=m^Ri[_*\i?P4TQ58ur(VND*&$(lZ/a>msP:N`1lbu%D[US\R86j@u0/1pE?LQqoFqh`@h %Q]O>3,+HntDQK=5jaW#:fBo["^.eQ!Y%[1e[MC`J)<-:FF5pkQC0V3lIn%sZ\$[)AH!k@98.q75ht@U[m2d)cj%"G4YW %;]!+C8>%:.g^"LPit[_krR#hd6nTasoq$54n*\gc-r6T%nqi0`Xr4bo"Z5o!esPh`tc:V;k8B4t9dm:ko[ %q8U".D;n4agp^knn%+b0c`8bs9DM'7a>SrLq3[/S4<$6`?HPd#`:7`\S)@1.5;1gY0BEST+*hIbnjU7,Ef*Nc>[@T(<(je4R+2+L %Use1O>Q&/R2AG$62j6k:R@OdEh7\:*V0NH<-HNXg^"4pm/*'W=2:9*d*j#"hl8Rt\:u6Qt_S&9eQM?jAq??5IENXq7\XMei^Bg*01Uu%tdQNnZ+\Jn+@&!&I2t'Nd%[6\0fO:hP\-?0Dnkcfi4f-b0PkiAok._GeD_!753u>Ic7d4 %9lV94(eGWSp[Lt+R#ZlTAg5EgHm %Al8:`qHgpMFg'9NlY#l'nS\!@me%MPkK;so^WDDmX]ta/pEMbp8n6I8L#)#1Em!';p\G'_0gcBeP%#*]lPH/V1uqT(n6Wl&ni1=t %`1kjGXX'Z=jja&$NFu0XJ:@0<,L< %h7\:*Sb1e((Kt*+`k?i[_I5hIBB4F<_M*W9.:K/159q:R;TP)j>Stc]n=2!T0'XM^-MJO`?&E(qP&km-]t8+._oJS>OZRlqj=VB&)jcZ]UH>d#AT>AEcZ_QdLc)\h,q=muT %'ee*u$M3XhAS(E,I%d#(pc`RIGM[diZ%R/QqR]ZjSm/D;O8HW"S^+LCmp.mPcAq_XSX8BG_tVL+7U^L[G1T82n*VZneA&HiDu],G %Zhj6^\LdHdMIXLDRHk@>2O8^5ftBm\Ghf7)h"[p]f!W1AglaX^]lCG9ord%rh-N@Hh!b"n,\(2[^3$.%Y?odQXnm?XrQ2bTP2.@m>lHhU#M(7U %*P\-tJtCaLIOfd8o=^]Rhce]*#P\&5oMX].?r:/,/11pf)r&R1a'7km^9jqOXQ!(/VX%D\D(Y\g[)gaDoC]4j]_d-[R#D3=7j)"/ %]5Dqp36C>In`fVHU,l`X>Kc`gD!Bu5_kt!b9<;I.^,Ht&B,B0rVYG=:b5QG#N%TVI(CU2Lm:)d`]NWpN];aM0\\2)1?EN&2D49tW %h\j(iY0rbmE][30"A\KPNQ-5VIjHo$CgmrKe!ZK::]h:&d3&9hbcsBNHCZVc&f90Nmd#(rWu^Wi[eO$2D8j+@mi=;`2U*fII;_8>4!.E7O0OB2 %.-l8&b!0)@o$.KDlS$fkcLE$d\FjGtfLsUaq*o0!J_GoamU[BA2fA4_f'>?ZKARCRgBr[SOS:^c?2EQM8k[#c"kNG.D;rCui%RO% %J=c]6bN"F"LH1:g/-=NNnYV\k4R;0%W'J?elEPgmYU6a>. %\&DA8Cse*0^cWKM-!PHCN\t>IoN/9oKAE.gO.9^7Hsr_XQ8r7<.N>;6?:hhU:,6%T/>OnZis,LkYCt?U:TIRN3Bt9fd7[">Sj1GQ %T0#kFYg]fQT(nbH.cOE7L+I];Se8'1Fp\qH%-rUsUkR22b?S[d0%c>Og@3tR %6URZQG;E"ELH8Pl4O10I.+ZAgFZ/^Vibd`J/reQ(7t"?I:0e%a9Y";V2Jp6oTVY:igEGc_WVkI6lMrYBkT(.W0lV9_>\`!8*Q!J0 %f&I5@s39s<+@\8>)ePQWr0=bF$IjejnCJV`A_CQ)/cB4H+W?]k?TK+!VUE*pIoiTfF*48q!?3Uh,Ac#h-RoBq>r#<0R2Xofg/6,S=:?\EQo1P5(/^.s:4/[2LoH&5Eah.'/9;h6)rY]q)kNpsrBD]aBk@<-:kF8NQK9(S1?eT_=Y!4$.>^pp%S[I@$Y!5-UICHrr/jKA*]Bbo0 %HSFpdo5\9Uq<-e5CEef[f0g\;>SDD\kd_l1q!FNmj55@nPRY0k`1iubqbRqQ%Cf0JW"H):oYashp@,<]F\Nc!SN>M;)Ba?GaC*+- %.Yso9)*1^F;R(1OPi81i#;YUWRaarOm,Nqic)gGJ-''U^RC?pVjJG;`ZF(3>:o'mEd2VL6R9VHgXJG4g:1J1:R'e"AbF>Vn=MfGb %cT<3($uV!cK,/)aE"3DY%nUS&1APLZT41D1*JCp>hh_0Io$^r:Z_,AT] %B3r'^Xl6[K$D&=;qSV-MguBhrpSaUbY>e,l9<7fP2g+;mFAhs!7qC5*nl>P;Y]u`lhJMTO7't>,hRUC^/K7h65dejhC%cd>#(@=` %,tP:J&+aG04!Vh-e*L03;J`)Hdfr3DgG2P.7!WZs6V+Cjb&Y(h/K;hQ_t\j+""c,h1+&FF_Z"u-k6pbM#>9Rn?[Yo;HDV.6?O!M] %?aOkg3[KO`ZU)ss5jegt%''h,BGNJA(M=^b)gN;UD`ggH@C5#I)`NBhj]H7-c>cW1[n`fu#iRE<9- %+>3N.ZdX8^+6CiEmlI*>X`2hX[3#lbg?3;k@PN+^!fcl[\\>`cYr18NotNCoWhSd(GtsQ')?*Po)LQ_>qq_"up[>Y/Cc,6AD=RPg %p[_1ogZb5Mri^eDk@sG9q/F6FB7^6 %?ZL7]*B>3uk6u^\>C)r@Qn9_7df>Vhk=X_icFXs$XD/"1bZ!p`SiNE['GD6OM%$UZm)4)0<`BWUl#X$IQWkgG`jNLDh[!;La]_(]KcJpg/etaSf7$)$LqXp;]Wu5H/#(PfC %#9@@"'Q['_/H=+in5&p/+%mFlqdTKCE6MDS=ZYVt^fG]pki\?ZI*\s+aQ"^%Y1C&[EZecUp3'+oTCEFU32KL!Mn#IX*,L2Rd)LCO %SFTG+OJo()hT(mg0I:=hZ^c9QrMC8fPgZN-(#ClECS]c0Jnu!I %RVc\ee4(!bG73m:h\k0;:WVPD*%,`IfR1gMEdCtpjF4u(*qM/G6nS/^V^!aOHKAgq>foS@AR7Om.(*\/B)9Z\4BQtf:WS=blSn-3 %_X!@P/_Kp\l %mYsJlbU8*bS)9s0<,:iR:aGcE"33(gNk2T$Sie7!.K2V.!h_."dXB\Zj-YGL9qid:(`q#K4s-bPGR:?/C' %/"5[E+uobiS#V/lk;acUITSm65;/CZQ,7J'qsc"e^N`\04fA4]+8dFj?Y5anC^/=;E.*EdQGm<\[ejoLr-g$J[1ph:CgkF`(d9GkhhU?Caa+I(27_*M0+S %6ZiWT]SgcN-2@MZJ)ZMrQpVU;m)QUu5PH)VN7l#q %6CueRAPU[-]HCnOBiFR@ccOP.jE4bekFQ.O"C5]fL!5DCQDD`=&te>$7=l:$0Cp[#LjWOf"PI[Dmf_i[PVgEb]U5%1c-[+c&TpAo %L6H:1%QYD:M+pTA70'ra?DBs;maTE_'_-,NIsT9hq\THrhW-I:#;WgMsW1,QK`Lru6f^`#5$V\Vj('HG81e+>c#!BRV.D(pJ'#"DR"DS2VEQseBC&Z-") %Mjh]eV^\-hF-)*g-Gd&$\seLtO>M3?_sarp!)J+\Gb*kO]*5p4@"kU33Ssm_8V4`]GQE0.)+a*UfI%a'cLV#_A+O4@^^G4=XjV:p %GF`C$Vs"CE&N:5T<`MYB-KR[a7kiS4!'Zg;($H"Z"B[hiWm"2#hpL`-Mf/[2A(3E;GF %?uoja;XG7p%T$NpGt4>*!#9d1D5sI+nsfd>'3(EuSC[q[$M[BGo=V.2KHNCT965t="(8?W)1'/N'bM$s:'/"h&WpM@!d#l)4M@M' %J2rL[js@r*pjX>SKEa\K)&E>p/!>DsV!p(*:-aA1aoDGf+@9a,8qho:Eb\nqE#AckPsBr5_?m4C&cf,:_\<8L1_pJ*QN=-qa#9hGR<<5b\Gb,VfePMi-d&M%&_2'of>&`Sgfj6relc,TqIEq@R/RFeDV=TEgS!=^mlE8NO9LaGmEXP@0Otn0qY_#/gM5 %[;`F+\0E57@J8&,oI1ok$_"V!HsW+I,@j(s5m[rb@GgSnZsiF=0gqFSQ]=Hr-`JdI:1PmdW"$("j## %/79qO+MX_f]QI5N(0T4.k5Z_-<=qs09gON6+OejUUIAQB5]2Stp^TR0M]cNr[fB8>Q"/$6cuF@5]Ybu2WIi5pTH@R13\F3:XA7c* %TK&G_!(#%aeOb9-n:04/)/3$imsr@W-]t?ef7C89$ZusKPr-9>&UC7*X:<7%3(G8Al38sfNf,;=?j$#9JL/0G!]7-U4NA\(i@Z7) %4N0]'G#)0Wjm*OqRL"!`W3@:uF>=C(+TcH1GttVKi&_6"fg&s=(>o444a`/tfIk%DEu`6a(FG,8;WLgX!j3R5r@E3qB@,`kcpn9J]YG0AIQ@Mr6'9?0klGIfG1BD&L8Kja! %!P'?r(a1(7!.4tVU$94!\%lWATff(jo.9ue=CqV"JDCIp#X'\3jq3B(cN$]Qa?Ni^@Kp3]#[(,$J/CSn.!%L9$Rcg;LA>m1@1TO[ %S.R3*"5,L9h]dj9$1!Cmo*VW,75N"FiYh5+/l"m)&-90Qi1iF*;]Rh2BHocAW7P0(5W9=RM"I7c-GoC3)0Q@6i_X"`HNWuD$T&pF %+di=6aO#Ut+kos-P@;X"(sIOB#;N7'M80WjfNuq8nfU5D^g`9Ai#C=(l-XnqlMV&CVL%Ge6m^O!rEB6&lb;=Fp'dYlN1$o?jEbbuc]Q?nau"7gnJ:U'&Al_aJ9>V< %0"L+pas$:6HO^\4?3+qR[n3HcKY<#f6G.nU!,A2f#cLQDNs7MY,NF!a4LUnH41LNMa4]L]NA;V]&(pjbUibs\Wi0/_h@3#DdWXb4>RB)7%A>&iK18loM@6qq2Od'hB %?uV@f^^bqeM'=>`^cppXG`.0)pRjXIL-X?*`(gp?.W?UM@=Sb,q>j0bP9'](;.qf9hGJXH)(Q8<-9T!FGb?HY%#>heWIX!@$lk57 %*,,SP=<9C+iNIda7"pCpGXa0u;.5'TQV2uG7LOU?j"AN82?bsG!GS,q?3-]sJ228_F6Zq?XB=H/[NQnt=f2Vt7Le=4ap?]UMP-%s %KRa5+%`,?aJeSBVNe$C'Cm"j-;ugCS!rB*\as00-9GYQ!),U`?9Rr*JaQtL4dnhb&1m(eh46^J[YrjKB$mJB;"_%LhVAVW"'baG9 %Vo>(I"h)ULcKYCeU@AXNm'eg2WHL0r5UMSXkQD5@'-Jq27Q^B4"_n)MWrsbI8V-[<\mP-\.J`$SFW?!C7<;46D'J0?IG!9u@\Y*C,OCH8+"1 %H'oEVBS-25l2oHZ3o?$dAnC^gO*$hc4[)CWM,OT0pdcBV"*br'RGfe+W!EGngFYg94Dof%L.hoT6ijp*g&T\:7pj-?FG8uZ[$)D# %8Xqj,MkEE%e/,?5"?ID_W:N'?(80Uf0:'@1c+;Bo"SKCo:h'"#JcOI)X"7!ak!CBiJs'b#8W/:8'% %Hk%DDIKqs;TUIL51@n9IJ/*Te)X6"6L?^%FU)+[qWf4aQ8`S6e.O+.?j"pgPY:>ER&C1]$\CKS8Ja>9oPXp-(\g,r:A8Gd)_k[uUL]ZkBBLebA.3Tb2U^EdE75BSuR1g&&=^\qU0l9ZR %f+S!X;%F47M3MH%P)=!*llcXP-CE3KPVk3g&Z'ae&X&"u.&%PtE>V?C(*GN_eCJoW"d'O+7g7aq$,mFZ$gO87O[,?Ue%0h18.foB %TN/:EFHd=s55sZ5-``Eb4!oc/k<.G*s17LRc0HJqb %E%(!@P6a&O\^do'Nat$bc('=<1^jSg0smli(\=GD92+=f&W5eSi_f]=KTo?7'Q@bWW2Ll5GCsDJ1FQ4kMTQ/BdJt(-*KJ&+cqjmU %)up:D;H/o\*DR7WU&_HNg(1bI5hST#@+#TD*!s#s934&VE4^tq3*mdl7an=]*^hJcQ]u-A>2r')6qWaW!qh-TUS5Hn.u#"YS/a50J<36t'DXZ)J7,1)r!cb\:4=6^:r.0R:cB&E>'8OD %ACbEj,"7^Uc#seg/Pfn!#_Z.->;334dW[g;g*0ofLh!%QL+Qen!jZKaRY%[cZUT!X/J;T09`6_`'V%-37-H$Qm1rVk[N'IK:E.Y& %YSA9sCdZ4!hEAdl*mF\P&-u*t>%D_&j*Dt:@,Dj5(X*`7(/IY8\tKEu15%1er#7e6P4eP_R6R?(o%"a.Rj>A(GuI38=pA-'PNB[Q %j:F_b?uL$7__]J(68&oV"5@>fHIpQG!g$N$NoaJWP[>Ak(bFCkL;#,@/PHcT?lB(V8IQIC^:m<&=*2iIh=\ %Va0augkW*IFU?RZ-.J@a]gr9RJ\nT02C<`QOdK^#`hB)VDBp84"`sui3#f1-$j@*p<(EFZU>M.`,:#9['n;=S!:YPbHI+.h2`nG:j:]`[#G:eU*RmX#W`KB`BSM1ZB)DE5Zoh:ra'@"oFNBSE'^mTfrq$8Gi2usW9+,*3/f4d_Y[$nh$5t"-X#ne1nL57Pch@Pmh-SXn=>mV5(6-;K)!r%le %;C$IR/QoHl@F^K9#^8uI6,=`Vcm#6&c5bgNDn"YZ@](W\Xg_8/_3#OCn)S7bd-Y9r%US*#&^UQX$J6[]q]$pc659BbBE=mgPapkc %.O\g/P'ICV/L)jAK9pF:N^=CLisbH^`g--dg)R8PgeBQp()?eo+IL:@8cEmR-B[=1&]&4%+HYE6C^2@\7d;6PWJ[.E`5^g?o]H4? %OrjehaeAQ@ke%7WOt]^!L96Es(7NffNMF'38qs5D_1F3UqgsNB$>_/[kbO(ZitKe7JOhAVD2(4VSY@6_`6@qmOF`"i6JY6KR7JS4 %f/DF/(>O(`^*lNqRt?52?'s9=+S6E,hh3<.2baGbGMn!B+NUW4oEV11\9p`_e#S#3+98*[QW0+O$U&iuD %J.:2Fa:gO4onGF;YpC+9A/3jH8^Se!/[P3bL:U?EC`A#e.&ckabBEA$"k`tB@eO#>*/UU7R#FU&=/E1H#%,5l(er]C9*tr0i\)'S %ZVJ`^dlq'^>^VuMcBs4:acXoh[T0+$.(0eXj(EPZB(lfWJ[+UTD/;L-ZOS+oU`e[RAHG %76.l@6@K;JYdkBmdK&TTRk(2i9G:]*#rWiD'1p8U9'@OqfT!k7'QdOJ:6n=Oc_ZaW9 %pQR:]i$+irBWkL_0BI5#SJT,Z6`J@ %kX0!;(@rWR2FkcAl"@A$)B\61cnIXLNX=/C^bd3lJN:pS%>f]-,>9mY %"Vm;6314Bo0G5UFC[_@_UrXr65TB`tr$BMU&W')r7uT)l#^9\N/E[?:2B#s)M"2`9BGg3"D+t%EUDHL4XN)4b'op,V&Wuh_.NmZ5N:D&%k:;"OCRJ+:q %(<>_,2A69q:DTIT[Z,.$kl`rmm*">jGG(P\k4E9W(AA_bF+!it9:u&.qf-\2Xm\GHm[JS"q>1%-Hp>sMhB%,-F'h/X*Cm!a+N@9% %+(o+$ir:VPN$LW=-2CCbM+WdjKrLnU0m')iD2Hb30dbth?nr#k)BH@M$@1P\$DfG4B_PP:.+LTp$C&4kWA_D$k[C:'UP#/Y9I66L %D[NjZi)2rF6QeOC')@UrJX$N%VNZ9\]M[o;2%3YR6_`WS&@34:Ot`&M8rj+`+G\qeaHG!?n1thbds:)?a3W+lKlKHpF_-RNML]1b %ZAc.&S@C'6b`F/l`bA&hr<4OB_4Enf+AEK&4)&@^>tN(-,!lj/%0?\b9pHS"'"T`9R6'K\TVk>Gd4b`]0d<:SerqfEM>h.4>GaY+ %#ujMPU#(&g'f8`.Cjm.aj"WRa.0a\jX2QApn!L#/R7Nd^Yn/=Ojt]8ne9FPp;#uWZngrBZ7)^->,T/u9.QJ.B4K>>V$YccfHUaJI %j.92n'm'uGP;3b&_%&7f37'MKmuUiG&e!kpllg'3/@SEpCc"c!5d6u"0&p82jAJiX6lQfA0Y*!u?\pe`AWS1'BoR4.)2ePF[0g+N %Hq(Z,PdREo?mU)tbK5PE5Fd[KE3^,h%YA=Kd_RN'N-O:($-=Q7 %2^&4\LQdi3IQ88X_3^@J&UZP]+XQQj@m!;an"9&/?K-g7:c%hmSRW?B*_R(]E"YrS%l^AG'cufkt %b1uV=o-@2Y(/K@Z/FQ#;ECnNP9Mih&(pshLKDCN2l<4n$#L%=Q.?-g8cmcgPN!lG"M-9]AAMuL&*LW$ %A65E#Nu2K;E[#/G<)?=K/5@DUPE5eFT`C:lLn5s!1(#]fF:,:Rk`!LLaG>A\:h_;1^jn<\)agt.03C&7C;;8u+p&dU%EN^#"CsEX %8Of:ojOfp:BEtJ=1a\+j+T,!^0lIMbU+q?>];-i'c4tCkFiH6P/@QbqW$/F'''0AV<(/$7"_6b0-k4FXbuA0*<+MjN*i$$$hHHJH %:p@&XHEMC'%$bFO09fJrhF@>KE6/<-.gAC;)Mm#R":8=&Zdk;0V+UE;G]d@HBFh-3#pWh %qHsdBAOp96*_%=PK*EUt,f_*S7#%QR[U*8@(99o00;[o*U``JL"0NHr5WEc-YEYt0!->k7Q6n.j`!R<>H&63bWlU?S!+_\&=Pd^' %f`oVN,m=)X#m[`HJ/j4lnj?\>Y`TSJA;Eh.ba7:[er81<\eWTqW^rS0d$X'QF`8Oe1("hVEsh,CPQ_[*0L^[.+]K*R4L$i0d"9KO %2c\Y+1*L0(29XZ!f<.Nh8IQS9Q50JY5D[:!4G#ia#e":"ZG/4$+K$4R.E %c;J:1!OKFa.YSO$Z,8O=cm,_3#]277r/aJf"\F;#Go93N5A3/^`&qaUa&=;1d&b+X(Dm@A4h:ikPj$Bb`<$POS>(3T6$/3Lm$O8Ha.OFPhXC-`'iT9MU+0JK/rlYqMA0+bqcf %N:g$tQ)(^m:eLQEF\Kn5$@BRM6.Hd0,6V8W6kpS#Q:kViO=,V#UBU0P8SS3gP_MbD'[]Bc#:$^JL,LQj.9OP;;EDD73tn%//>Gu> %2l/Fk#eYa\#Sbce=dB7lFAWG.>11S2:9%,g*FOjpg6=kEhT-#-$A1tD:`'VEZ1?ct[7Ol-ZY8P7#%=eN7EnDF@4dl6cZ0@`HrOQ? %:os4(gSfo;1]s%INiE^bpqWn6R8b`.WJt"PJuC"P?:ifmQl8>HWJF-U==E](#qc0i&mC%-JR9Y=JJrF'JiVL$ZD0`C%Hdh4;_\r? %$?5K"!7\+:mPTRGr;/u,S\ZID`TT(?KjT?X1gku'Q)%ZC6@LC"=Te-9lRY_cNZJe?ACc!(V:*kf;4S">Nt@p-7tQE9*13LWR#_"u@I4"> %Jc$T]!ZB.&p^5>h5W)PLJQ,W+MQ)\ObF"']$I$D>S0#j2m,$,3_GW(8;Cc>lI;P8&JM4G"B0,H,Qc$R,FoY44qt&R#%=4T51CPs= %^^&a]fXgt,rTMLSI/j--pY>L=J+!?F%r<+c&`'3!+`*g:d"7j+6\lRNNmP49^o%/cDI^Z=bMBSu0.cn+Qks3KKt %$el0D*jurC_uYDRV#0Z\g$-L_HOp/(R="`.7\dE`$W#nk]:E:.VqZdirqT?+JPXMA'8bFl4nemXPQ5@\~> %AI9_PrivateDataEnd \ No newline at end of file diff --git a/logo/gnocchi-icon.eps b/logo/gnocchi-icon.eps new file mode 100644 index 00000000..2c722cd9 --- /dev/null +++ b/logo/gnocchi-icon.eps @@ -0,0 +1,5578 @@ +%!PS-Adobe-3.1 EPSF-3.0 +%ADO_DSC_Encoding: MacOS Roman +%%Title: gnocchi-icon.eps +%%Creator: Adobe Illustrator(R) 13.0 +%%For: Thierry Ung +%%CreationDate: 4/3/17 +%%BoundingBox: 0 0 1096 840 +%%HiResBoundingBox: 0 0 1096 840 +%%CropBox: 0 0 1096 840 +%%LanguageLevel: 2 +%%DocumentData: Clean7Bit +%ADOBeginClientInjection: DocumentHeader "AI11EPS" +%%AI8_CreatorVersion: 13.0.0 %AI9_PrintingDataBegin %AI3_Cropmarks: 36.0000 36.0000 1060.0000 804.0000 +%ADO_BuildNumber: Adobe Illustrator(R) 13.0.0 x409 R agm 4.4378 ct 5.1039 %ADO_ContainsXMP: MainFirst %AI7_Thumbnail: 128 100 8 %%BeginData: 3232 Hex Bytes %0000330000660000990000CC0033000033330033660033990033CC0033FF %0066000066330066660066990066CC0066FF009900009933009966009999 %0099CC0099FF00CC0000CC3300CC6600CC9900CCCC00CCFF00FF3300FF66 %00FF9900FFCC3300003300333300663300993300CC3300FF333300333333 %3333663333993333CC3333FF3366003366333366663366993366CC3366FF %3399003399333399663399993399CC3399FF33CC0033CC3333CC6633CC99 %33CCCC33CCFF33FF0033FF3333FF6633FF9933FFCC33FFFF660000660033 %6600666600996600CC6600FF6633006633336633666633996633CC6633FF %6666006666336666666666996666CC6666FF669900669933669966669999 %6699CC6699FF66CC0066CC3366CC6666CC9966CCCC66CCFF66FF0066FF33 %66FF6666FF9966FFCC66FFFF9900009900339900669900999900CC9900FF %9933009933339933669933999933CC9933FF996600996633996666996699 %9966CC9966FF9999009999339999669999999999CC9999FF99CC0099CC33 %99CC6699CC9999CCCC99CCFF99FF0099FF3399FF6699FF9999FFCC99FFFF %CC0000CC0033CC0066CC0099CC00CCCC00FFCC3300CC3333CC3366CC3399 %CC33CCCC33FFCC6600CC6633CC6666CC6699CC66CCCC66FFCC9900CC9933 %CC9966CC9999CC99CCCC99FFCCCC00CCCC33CCCC66CCCC99CCCCCCCCCCFF %CCFF00CCFF33CCFF66CCFF99CCFFCCCCFFFFFF0033FF0066FF0099FF00CC %FF3300FF3333FF3366FF3399FF33CCFF33FFFF6600FF6633FF6666FF6699 %FF66CCFF66FFFF9900FF9933FF9966FF9999FF99CCFF99FFFFCC00FFCC33 %FFCC66FFCC99FFCCCCFFCCFFFFFF33FFFF66FFFF99FFFFCC110000001100 %000011111111220000002200000022222222440000004400000044444444 %550000005500000055555555770000007700000077777777880000008800 %000088888888AA000000AA000000AAAAAAAABB000000BB000000BBBBBBBB %DD000000DD000000DDDDDDDDEE000000EE000000EEEEEEEE0000000000FF %00FF0000FFFFFF0000FF00FFFFFF00FFFFFF %524C45FDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFF %FDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDE6FF %7DA8A8FD7AFFA87DF827F827F852A8FD76FFA827F8274A4B2627F8277DFD %25FFC9CFCAFD4DFF52F851C7C8C7C8C775F8277DFD23FFC7C7C7C8C7CEFD %4AFF52F8279FFD04C7C8C77BF827A8FD21FFA5C7C7C7C1C8C7C8CFFD47FF %A8F851C7C7C7C8C7C8C1C8C751F87DFD20FFC8C7C7C8C7C8C1C8C7CEFD47 %FF27279FC7C1FD05C79FC77B27F8FD1FFFA6C79FFD08C7FD46FF7D274BC8 %C7C8C7C8C7C8C7C8C7C826277DFD1EFFC8C7C8C7C8C7C8C7C8C7C8C9FD45 %FF52F8C79FFD04C7C8C7C7C1C8C79FF827FD1DFFC89FFD04C7C8C7C7C1C8 %C7C8CFFD44FF27279FC7C1C8C7C8C1C8C7C8C7C8C727F8FD1DFFC8C7C1C8 %C7C8C1C8C7C8C7C8C7CFFD44FF27F89F9FFD05C7C1C7C1C7C1C726277DFD %1CFFCE9FC79FFD05C7C1C79FC7C8FD44FFF8279FC7C7C8C7C8C7C8C7C8C7 %C8C751F8A8FD1CFFC9C7C7C8C7C8C7C8C7C8C7C8C7CEFD44FF27F89FC1C7 %C1C8C7C7C1C8C7C7C7C850277DFD1CFFCEC1C79FC8C1C7C1C8C7C7C7C8C8 %FD44FF2727C1FD04C7C8C7C8C7C8C7C8C77BF87DFD1CFFC8FD04C7C8C7C8 %C7C8C7C8C7CEFD44FF27F8C79FC7C1C79FC8C1C79FC7C1C7502752FD1CFF %C89FC79FC79FC8C1C79FC7C1C7C8FD44FF52274BFD04C7C8C7C8C7C8C7C8 %C751F8A8FD1CFFCFC8C7C7C7C8C7C8C7C8C7C8C7CEFD45FFF8279FC7C1C7 %C7C8C7C7C1C8C7C7F827A8FD1DFF9FC7C1C7C7C8C7C7C1C8C7C7CAFD45FF %27F8C7C1C7C7C8C1C8C7C8C7C8512727FD1EFFC8C1C7C7C8C1C8C7C8C7C8 %C8FD46FF522750C79FC79FC79FC8C1C79F27F87DFD1EFFCFC79FC79FC79F %C8C1C7C7CFFD47FF2727C1C7C7C8C1C8C7C8C751F852FD20FFC8C7C7C8C1 %C8C7C8C7CFFD48FF7DF8277BC7C1C79FC77B27F852A8FD21FFC8C7C1C79F %C7C7CFFD4AFF52F827F8515151F827F852A8FD24FFCFC9CFFD4EFFA85227 %F827F82752A8FD7AFFA8A8FDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFD %FCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFD %FCFFFDFCFFFDFCFFFDFCFFFD2FFFFF %%EndData +%ADOEndClientInjection: DocumentHeader "AI11EPS" +%%Pages: 1 +%%DocumentNeededResources: +%%DocumentSuppliedResources: procset Adobe_AGM_Image 1.0 0 +%%+ procset Adobe_CoolType_Utility_T42 1.0 0 +%%+ procset Adobe_CoolType_Utility_MAKEOCF 1.23 0 +%%+ procset Adobe_CoolType_Core 2.31 0 +%%+ procset Adobe_AGM_Core 2.0 0 +%%+ procset Adobe_AGM_Utils 1.0 0 +%%DocumentFonts: +%%DocumentNeededFonts: +%%DocumentNeededFeatures: +%%DocumentSuppliedFeatures: +%%DocumentProcessColors: Cyan Magenta Yellow Black +%%DocumentCustomColors: +%%CMYKCustomColor: +%%RGBCustomColor: +%%EndComments + + + + + + +%%BeginDefaults +%%ViewingOrientation: 1 0 0 1 +%%EndDefaults +%%BeginProlog +%%BeginResource: procset Adobe_AGM_Utils 1.0 0 +%%Version: 1.0 0 +%%Copyright: Copyright(C)2000-2006 Adobe Systems, Inc. All Rights Reserved. +systemdict/setpacking known +{currentpacking true setpacking}if +userdict/Adobe_AGM_Utils 73 dict dup begin put +/bdf +{bind def}bind def +/nd{null def}bdf +/xdf +{exch def}bdf +/ldf +{load def}bdf +/ddf +{put}bdf +/xddf +{3 -1 roll put}bdf +/xpt +{exch put}bdf +/ndf +{ + exch dup where{ + pop pop pop + }{ + xdf + }ifelse +}def +/cdndf +{ + exch dup currentdict exch known{ + pop pop + }{ + exch def + }ifelse +}def +/gx +{get exec}bdf +/ps_level + /languagelevel where{ + pop systemdict/languagelevel gx + }{ + 1 + }ifelse +def +/level2 + ps_level 2 ge +def +/level3 + ps_level 3 ge +def +/ps_version + {version cvr}stopped{-1}if +def +/set_gvm +{currentglobal exch setglobal}bdf +/reset_gvm +{setglobal}bdf +/makereadonlyarray +{ + /packedarray where{pop packedarray + }{ + array astore readonly}ifelse +}bdf +/map_reserved_ink_name +{ + dup type/stringtype eq{ + dup/Red eq{ + pop(_Red_) + }{ + dup/Green eq{ + pop(_Green_) + }{ + dup/Blue eq{ + pop(_Blue_) + }{ + dup()cvn eq{ + pop(Process) + }if + }ifelse + }ifelse + }ifelse + }if +}bdf +/AGMUTIL_GSTATE 22 dict def +/get_gstate +{ + AGMUTIL_GSTATE begin + /AGMUTIL_GSTATE_clr_spc currentcolorspace def + /AGMUTIL_GSTATE_clr_indx 0 def + /AGMUTIL_GSTATE_clr_comps 12 array def + mark currentcolor counttomark + {AGMUTIL_GSTATE_clr_comps AGMUTIL_GSTATE_clr_indx 3 -1 roll put + /AGMUTIL_GSTATE_clr_indx AGMUTIL_GSTATE_clr_indx 1 add def}repeat pop + /AGMUTIL_GSTATE_fnt rootfont def + /AGMUTIL_GSTATE_lw currentlinewidth def + /AGMUTIL_GSTATE_lc currentlinecap def + /AGMUTIL_GSTATE_lj currentlinejoin def + /AGMUTIL_GSTATE_ml currentmiterlimit def + currentdash/AGMUTIL_GSTATE_do xdf/AGMUTIL_GSTATE_da xdf + /AGMUTIL_GSTATE_sa currentstrokeadjust def + /AGMUTIL_GSTATE_clr_rnd currentcolorrendering def + /AGMUTIL_GSTATE_op currentoverprint def + /AGMUTIL_GSTATE_bg currentblackgeneration cvlit def + /AGMUTIL_GSTATE_ucr currentundercolorremoval cvlit def + currentcolortransfer cvlit/AGMUTIL_GSTATE_gy_xfer xdf cvlit/AGMUTIL_GSTATE_b_xfer xdf + cvlit/AGMUTIL_GSTATE_g_xfer xdf cvlit/AGMUTIL_GSTATE_r_xfer xdf + /AGMUTIL_GSTATE_ht currenthalftone def + /AGMUTIL_GSTATE_flt currentflat def + end +}def +/set_gstate +{ + AGMUTIL_GSTATE begin + AGMUTIL_GSTATE_clr_spc setcolorspace + AGMUTIL_GSTATE_clr_indx{AGMUTIL_GSTATE_clr_comps AGMUTIL_GSTATE_clr_indx 1 sub get + /AGMUTIL_GSTATE_clr_indx AGMUTIL_GSTATE_clr_indx 1 sub def}repeat setcolor + AGMUTIL_GSTATE_fnt setfont + AGMUTIL_GSTATE_lw setlinewidth + AGMUTIL_GSTATE_lc setlinecap + AGMUTIL_GSTATE_lj setlinejoin + AGMUTIL_GSTATE_ml setmiterlimit + AGMUTIL_GSTATE_da AGMUTIL_GSTATE_do setdash + AGMUTIL_GSTATE_sa setstrokeadjust + AGMUTIL_GSTATE_clr_rnd setcolorrendering + AGMUTIL_GSTATE_op setoverprint + AGMUTIL_GSTATE_bg cvx setblackgeneration + AGMUTIL_GSTATE_ucr cvx setundercolorremoval + AGMUTIL_GSTATE_r_xfer cvx AGMUTIL_GSTATE_g_xfer cvx AGMUTIL_GSTATE_b_xfer cvx + AGMUTIL_GSTATE_gy_xfer cvx setcolortransfer + AGMUTIL_GSTATE_ht/HalftoneType get dup 9 eq exch 100 eq or + { + currenthalftone/HalftoneType get AGMUTIL_GSTATE_ht/HalftoneType get ne + { + mark AGMUTIL_GSTATE_ht{sethalftone}stopped cleartomark + }if + }{ + AGMUTIL_GSTATE_ht sethalftone + }ifelse + AGMUTIL_GSTATE_flt setflat + end +}def +/get_gstate_and_matrix +{ + AGMUTIL_GSTATE begin + /AGMUTIL_GSTATE_ctm matrix currentmatrix def + end + get_gstate +}def +/set_gstate_and_matrix +{ + set_gstate + AGMUTIL_GSTATE begin + AGMUTIL_GSTATE_ctm setmatrix + end +}def +/AGMUTIL_str256 256 string def +/AGMUTIL_src256 256 string def +/AGMUTIL_dst64 64 string def +/AGMUTIL_srcLen nd +/AGMUTIL_ndx nd +/AGMUTIL_cpd nd +/capture_cpd{ + //Adobe_AGM_Utils/AGMUTIL_cpd currentpagedevice ddf +}def +/thold_halftone +{ + level3 + {sethalftone currenthalftone} + { + dup/HalftoneType get 3 eq + { + sethalftone currenthalftone + }{ + begin + Width Height mul{ + Thresholds read{pop}if + }repeat + end + currenthalftone + }ifelse + }ifelse +}def +/rdcmntline +{ + currentfile AGMUTIL_str256 readline pop + (%)anchorsearch{pop}if +}bdf +/filter_cmyk +{ + dup type/filetype ne{ + exch()/SubFileDecode filter + }{ + exch pop + } + ifelse + [ + exch + { + AGMUTIL_src256 readstring pop + dup length/AGMUTIL_srcLen exch def + /AGMUTIL_ndx 0 def + AGMCORE_plate_ndx 4 AGMUTIL_srcLen 1 sub{ + 1 index exch get + AGMUTIL_dst64 AGMUTIL_ndx 3 -1 roll put + /AGMUTIL_ndx AGMUTIL_ndx 1 add def + }for + pop + AGMUTIL_dst64 0 AGMUTIL_ndx getinterval + } + bind + /exec cvx + ]cvx +}bdf +/filter_indexed_devn +{ + cvi Names length mul names_index add Lookup exch get +}bdf +/filter_devn +{ + 4 dict begin + /srcStr xdf + /dstStr xdf + dup type/filetype ne{ + 0()/SubFileDecode filter + }if + [ + exch + [ + /devicen_colorspace_dict/AGMCORE_gget cvx/begin cvx + currentdict/srcStr get/readstring cvx/pop cvx + /dup cvx/length cvx 0/gt cvx[ + Adobe_AGM_Utils/AGMUTIL_ndx 0/ddf cvx + names_index Names length currentdict/srcStr get length 1 sub{ + 1/index cvx/exch cvx/get cvx + currentdict/dstStr get/AGMUTIL_ndx/load cvx 3 -1/roll cvx/put cvx + Adobe_AGM_Utils/AGMUTIL_ndx/AGMUTIL_ndx/load cvx 1/add cvx/ddf cvx + }for + currentdict/dstStr get 0/AGMUTIL_ndx/load cvx/getinterval cvx + ]cvx/if cvx + /end cvx + ]cvx + bind + /exec cvx + ]cvx + end +}bdf +/AGMUTIL_imagefile nd +/read_image_file +{ + AGMUTIL_imagefile 0 setfileposition + 10 dict begin + /imageDict xdf + /imbufLen Width BitsPerComponent mul 7 add 8 idiv def + /imbufIdx 0 def + /origDataSource imageDict/DataSource get def + /origMultipleDataSources imageDict/MultipleDataSources get def + /origDecode imageDict/Decode get def + /dstDataStr imageDict/Width get colorSpaceElemCnt mul string def + imageDict/MultipleDataSources known{MultipleDataSources}{false}ifelse + { + /imbufCnt imageDict/DataSource get length def + /imbufs imbufCnt array def + 0 1 imbufCnt 1 sub{ + /imbufIdx xdf + imbufs imbufIdx imbufLen string put + imageDict/DataSource get imbufIdx[AGMUTIL_imagefile imbufs imbufIdx get/readstring cvx/pop cvx]cvx put + }for + DeviceN_PS2{ + imageDict begin + /DataSource[DataSource/devn_sep_datasource cvx]cvx def + /MultipleDataSources false def + /Decode[0 1]def + end + }if + }{ + /imbuf imbufLen string def + Indexed_DeviceN level3 not and DeviceN_NoneName or{ + /srcDataStrs[imageDict begin + currentdict/MultipleDataSources known{MultipleDataSources{DataSource length}{1}ifelse}{1}ifelse + { + Width Decode length 2 div mul cvi string + }repeat + end]def + imageDict begin + /DataSource[AGMUTIL_imagefile Decode BitsPerComponent false 1/filter_indexed_devn load dstDataStr srcDataStrs devn_alt_datasource/exec cvx]cvx def + /Decode[0 1]def + end + }{ + imageDict/DataSource[1 string dup 0 AGMUTIL_imagefile Decode length 2 idiv string/readstring cvx/pop cvx names_index/get cvx/put cvx]cvx put + imageDict/Decode[0 1]put + }ifelse + }ifelse + imageDict exch + load exec + imageDict/DataSource origDataSource put + imageDict/MultipleDataSources origMultipleDataSources put + imageDict/Decode origDecode put + end +}bdf +/write_image_file +{ + begin + {(AGMUTIL_imagefile)(w+)file}stopped{ + false + }{ + Adobe_AGM_Utils/AGMUTIL_imagefile xddf + 2 dict begin + /imbufLen Width BitsPerComponent mul 7 add 8 idiv def + MultipleDataSources{DataSource 0 get}{DataSource}ifelse type/filetype eq{ + /imbuf imbufLen string def + }if + 1 1 Height MultipleDataSources not{Decode length 2 idiv mul}if{ + pop + MultipleDataSources{ + 0 1 DataSource length 1 sub{ + DataSource type dup + /arraytype eq{ + pop DataSource exch gx + }{ + /filetype eq{ + DataSource exch get imbuf readstring pop + }{ + DataSource exch get + }ifelse + }ifelse + AGMUTIL_imagefile exch writestring + }for + }{ + DataSource type dup + /arraytype eq{ + pop DataSource exec + }{ + /filetype eq{ + DataSource imbuf readstring pop + }{ + DataSource + }ifelse + }ifelse + AGMUTIL_imagefile exch writestring + }ifelse + }for + end + true + }ifelse + end +}bdf +/close_image_file +{ + AGMUTIL_imagefile closefile(AGMUTIL_imagefile)deletefile +}def +statusdict/product known userdict/AGMP_current_show known not and{ + /pstr statusdict/product get def + pstr(HP LaserJet 2200)eq + pstr(HP LaserJet 4000 Series)eq or + pstr(HP LaserJet 4050 Series )eq or + pstr(HP LaserJet 8000 Series)eq or + pstr(HP LaserJet 8100 Series)eq or + pstr(HP LaserJet 8150 Series)eq or + pstr(HP LaserJet 5000 Series)eq or + pstr(HP LaserJet 5100 Series)eq or + pstr(HP Color LaserJet 4500)eq or + pstr(HP Color LaserJet 4600)eq or + pstr(HP LaserJet 5Si)eq or + pstr(HP LaserJet 1200 Series)eq or + pstr(HP LaserJet 1300 Series)eq or + pstr(HP LaserJet 4100 Series)eq or + { + userdict/AGMP_current_show/show load put + userdict/show{ + currentcolorspace 0 get + /Pattern eq + {false charpath f} + {AGMP_current_show}ifelse + }put + }if + currentdict/pstr undef +}if +/consumeimagedata +{ + begin + AGMIMG_init_common + currentdict/MultipleDataSources known not + {/MultipleDataSources false def}if + MultipleDataSources + { + DataSource 0 get type + dup/filetype eq + { + 1 dict begin + /flushbuffer Width cvi string def + 1 1 Height cvi + { + pop + 0 1 DataSource length 1 sub + { + DataSource exch get + flushbuffer readstring pop pop + }for + }for + end + }if + dup/arraytype eq exch/packedarraytype eq or DataSource 0 get xcheck and + { + Width Height mul cvi + { + 0 1 DataSource length 1 sub + {dup DataSource exch gx length exch 0 ne{pop}if}for + dup 0 eq + {pop exit}if + sub dup 0 le + {exit}if + }loop + pop + }if + } + { + /DataSource load type + dup/filetype eq + { + 1 dict begin + /flushbuffer Width Decode length 2 idiv mul cvi string def + 1 1 Height{pop DataSource flushbuffer readstring pop pop}for + end + }if + dup/arraytype eq exch/packedarraytype eq or/DataSource load xcheck and + { + Height Width BitsPerComponent mul 8 BitsPerComponent sub add 8 idiv Decode length 2 idiv mul mul + { + DataSource length dup 0 eq + {pop exit}if + sub dup 0 le + {exit}if + }loop + pop + }if + }ifelse + end +}bdf +/addprocs +{ + 2{/exec load}repeat + 3 1 roll + [5 1 roll]bind cvx +}def +/modify_halftone_xfer +{ + currenthalftone dup length dict copy begin + currentdict 2 index known{ + 1 index load dup length dict copy begin + currentdict/TransferFunction known{ + /TransferFunction load + }{ + currenttransfer + }ifelse + addprocs/TransferFunction xdf + currentdict end def + currentdict end sethalftone + }{ + currentdict/TransferFunction known{ + /TransferFunction load + }{ + currenttransfer + }ifelse + addprocs/TransferFunction xdf + currentdict end sethalftone + pop + }ifelse +}def +/clonearray +{ + dup xcheck exch + dup length array exch + Adobe_AGM_Core/AGMCORE_tmp -1 ddf + { + Adobe_AGM_Core/AGMCORE_tmp 2 copy get 1 add ddf + dup type/dicttype eq + { + Adobe_AGM_Core/AGMCORE_tmp get + exch + clonedict + Adobe_AGM_Core/AGMCORE_tmp 4 -1 roll ddf + }if + dup type/arraytype eq + { + Adobe_AGM_Core/AGMCORE_tmp get exch + clonearray + Adobe_AGM_Core/AGMCORE_tmp 4 -1 roll ddf + }if + exch dup + Adobe_AGM_Core/AGMCORE_tmp get 4 -1 roll put + }forall + exch{cvx}if +}bdf +/clonedict +{ + dup length dict + begin + { + dup type/dicttype eq + {clonedict}if + dup type/arraytype eq + {clonearray}if + def + }forall + currentdict + end +}bdf +/DeviceN_PS2 +{ + /currentcolorspace AGMCORE_gget 0 get/DeviceN eq level3 not and +}bdf +/Indexed_DeviceN +{ + /indexed_colorspace_dict AGMCORE_gget dup null ne{ + dup/CSDBase known{ + /CSDBase get/CSD get_res/Names known + }{ + pop false + }ifelse + }{ + pop false + }ifelse +}bdf +/DeviceN_NoneName +{ + /Names where{ + pop + false Names + { + (None)eq or + }forall + }{ + false + }ifelse +}bdf +/DeviceN_PS2_inRip_seps +{ + /AGMCORE_in_rip_sep where + { + pop dup type dup/arraytype eq exch/packedarraytype eq or + { + dup 0 get/DeviceN eq level3 not and AGMCORE_in_rip_sep and + { + /currentcolorspace exch AGMCORE_gput + false + }{ + true + }ifelse + }{ + true + }ifelse + }{ + true + }ifelse +}bdf +/base_colorspace_type +{ + dup type/arraytype eq{0 get}if +}bdf +/currentdistillerparams where{pop currentdistillerparams/CoreDistVersion get 5000 lt}{true}ifelse +{ + /pdfmark_5{cleartomark}bind def +}{ + /pdfmark_5{pdfmark}bind def +}ifelse +/ReadBypdfmark_5 +{ + currentfile exch 0 exch/SubFileDecode filter + /currentdistillerparams where + {pop currentdistillerparams/CoreDistVersion get 5000 lt}{true}ifelse + {flushfile cleartomark} + {/PUT pdfmark}ifelse +}bdf +/xpdfm +{ + { + dup 0 get/Label eq + { + aload length[exch 1 add 1 roll/PAGELABEL + }{ + aload pop + [{ThisPage}<<5 -2 roll>>/PUT + }ifelse + pdfmark_5 + }forall +}bdf +/ds{ + Adobe_AGM_Utils begin +}bdf +/dt{ + currentdict Adobe_AGM_Utils eq{ + end + }if +}bdf +systemdict/setpacking known +{setpacking}if +%%EndResource +%%BeginResource: procset Adobe_AGM_Core 2.0 0 +%%Version: 2.0 0 +%%Copyright: Copyright(C)1997-2007 Adobe Systems, Inc. All Rights Reserved. +systemdict/setpacking known +{ + currentpacking + true setpacking +}if +userdict/Adobe_AGM_Core 209 dict dup begin put +/Adobe_AGM_Core_Id/Adobe_AGM_Core_2.0_0 def +/AGMCORE_str256 256 string def +/AGMCORE_save nd +/AGMCORE_graphicsave nd +/AGMCORE_c 0 def +/AGMCORE_m 0 def +/AGMCORE_y 0 def +/AGMCORE_k 0 def +/AGMCORE_cmykbuf 4 array def +/AGMCORE_screen[currentscreen]cvx def +/AGMCORE_tmp 0 def +/AGMCORE_&setgray nd +/AGMCORE_&setcolor nd +/AGMCORE_&setcolorspace nd +/AGMCORE_&setcmykcolor nd +/AGMCORE_cyan_plate nd +/AGMCORE_magenta_plate nd +/AGMCORE_yellow_plate nd +/AGMCORE_black_plate nd +/AGMCORE_plate_ndx nd +/AGMCORE_get_ink_data nd +/AGMCORE_is_cmyk_sep nd +/AGMCORE_host_sep nd +/AGMCORE_avoid_L2_sep_space nd +/AGMCORE_distilling nd +/AGMCORE_composite_job nd +/AGMCORE_producing_seps nd +/AGMCORE_ps_level -1 def +/AGMCORE_ps_version -1 def +/AGMCORE_environ_ok nd +/AGMCORE_CSD_cache 0 dict def +/AGMCORE_currentoverprint false def +/AGMCORE_deltaX nd +/AGMCORE_deltaY nd +/AGMCORE_name nd +/AGMCORE_sep_special nd +/AGMCORE_err_strings 4 dict def +/AGMCORE_cur_err nd +/AGMCORE_current_spot_alias false def +/AGMCORE_inverting false def +/AGMCORE_feature_dictCount nd +/AGMCORE_feature_opCount nd +/AGMCORE_feature_ctm nd +/AGMCORE_ConvertToProcess false def +/AGMCORE_Default_CTM matrix def +/AGMCORE_Default_PageSize nd +/AGMCORE_Default_flatness nd +/AGMCORE_currentbg nd +/AGMCORE_currentucr nd +/AGMCORE_pattern_paint_type 0 def +/knockout_unitsq nd +currentglobal true setglobal +[/CSA/Gradient/Procedure] +{ + /Generic/Category findresource dup length dict copy/Category defineresource pop +}forall +setglobal +/AGMCORE_key_known +{ + where{ + /Adobe_AGM_Core_Id known + }{ + false + }ifelse +}ndf +/flushinput +{ + save + 2 dict begin + /CompareBuffer 3 -1 roll def + /readbuffer 256 string def + mark + { + currentfile readbuffer{readline}stopped + {cleartomark mark} + { + not + {pop exit} + if + CompareBuffer eq + {exit} + if + }ifelse + }loop + cleartomark + end + restore +}bdf +/getspotfunction +{ + AGMCORE_screen exch pop exch pop + dup type/dicttype eq{ + dup/HalftoneType get 1 eq{ + /SpotFunction get + }{ + dup/HalftoneType get 2 eq{ + /GraySpotFunction get + }{ + pop + { + abs exch abs 2 copy add 1 gt{ + 1 sub dup mul exch 1 sub dup mul add 1 sub + }{ + dup mul exch dup mul add 1 exch sub + }ifelse + }bind + }ifelse + }ifelse + }if +}def +/np +{newpath}bdf +/clp_npth +{clip np}def +/eoclp_npth +{eoclip np}def +/npth_clp +{np clip}def +/graphic_setup +{ + /AGMCORE_graphicsave save store + concat + 0 setgray + 0 setlinecap + 0 setlinejoin + 1 setlinewidth + []0 setdash + 10 setmiterlimit + np + false setoverprint + false setstrokeadjust + //Adobe_AGM_Core/spot_alias gx + /Adobe_AGM_Image where{ + pop + Adobe_AGM_Image/spot_alias 2 copy known{ + gx + }{ + pop pop + }ifelse + }if + /sep_colorspace_dict null AGMCORE_gput + 100 dict begin + /dictstackcount countdictstack def + /showpage{}def + mark +}def +/graphic_cleanup +{ + cleartomark + dictstackcount 1 countdictstack 1 sub{end}for + end + AGMCORE_graphicsave restore +}def +/compose_error_msg +{ + grestoreall initgraphics + /Helvetica findfont 10 scalefont setfont + /AGMCORE_deltaY 100 def + /AGMCORE_deltaX 310 def + clippath pathbbox np pop pop 36 add exch 36 add exch moveto + 0 AGMCORE_deltaY rlineto AGMCORE_deltaX 0 rlineto + 0 AGMCORE_deltaY neg rlineto AGMCORE_deltaX neg 0 rlineto closepath + 0 AGMCORE_&setgray + gsave 1 AGMCORE_&setgray fill grestore + 1 setlinewidth gsave stroke grestore + currentpoint AGMCORE_deltaY 15 sub add exch 8 add exch moveto + /AGMCORE_deltaY 12 def + /AGMCORE_tmp 0 def + AGMCORE_err_strings exch get + { + dup 32 eq + { + pop + AGMCORE_str256 0 AGMCORE_tmp getinterval + stringwidth pop currentpoint pop add AGMCORE_deltaX 28 add gt + { + currentpoint AGMCORE_deltaY sub exch pop + clippath pathbbox pop pop pop 44 add exch moveto + }if + AGMCORE_str256 0 AGMCORE_tmp getinterval show( )show + 0 1 AGMCORE_str256 length 1 sub + { + AGMCORE_str256 exch 0 put + }for + /AGMCORE_tmp 0 def + }{ + AGMCORE_str256 exch AGMCORE_tmp xpt + /AGMCORE_tmp AGMCORE_tmp 1 add def + }ifelse + }forall +}bdf +/AGMCORE_CMYKDeviceNColorspaces[ + [/Separation/None/DeviceCMYK{0 0 0}] + [/Separation(Black)/DeviceCMYK{0 0 0 4 -1 roll}bind] + [/Separation(Yellow)/DeviceCMYK{0 0 3 -1 roll 0}bind] + [/DeviceN[(Yellow)(Black)]/DeviceCMYK{0 0 4 2 roll}bind] + [/Separation(Magenta)/DeviceCMYK{0 exch 0 0}bind] + [/DeviceN[(Magenta)(Black)]/DeviceCMYK{0 3 1 roll 0 exch}bind] + [/DeviceN[(Magenta)(Yellow)]/DeviceCMYK{0 3 1 roll 0}bind] + [/DeviceN[(Magenta)(Yellow)(Black)]/DeviceCMYK{0 4 1 roll}bind] + [/Separation(Cyan)/DeviceCMYK{0 0 0}] + [/DeviceN[(Cyan)(Black)]/DeviceCMYK{0 0 3 -1 roll}bind] + [/DeviceN[(Cyan)(Yellow)]/DeviceCMYK{0 exch 0}bind] + [/DeviceN[(Cyan)(Yellow)(Black)]/DeviceCMYK{0 3 1 roll}bind] + [/DeviceN[(Cyan)(Magenta)]/DeviceCMYK{0 0}] + [/DeviceN[(Cyan)(Magenta)(Black)]/DeviceCMYK{0 exch}bind] + [/DeviceN[(Cyan)(Magenta)(Yellow)]/DeviceCMYK{0}] + [/DeviceCMYK] +]def +/ds{ + Adobe_AGM_Core begin + /currentdistillerparams where + { + pop currentdistillerparams/CoreDistVersion get 5000 lt + {<>setdistillerparams}if + }if + /AGMCORE_ps_version xdf + /AGMCORE_ps_level xdf + errordict/AGM_handleerror known not{ + errordict/AGM_handleerror errordict/handleerror get put + errordict/handleerror{ + Adobe_AGM_Core begin + $error/newerror get AGMCORE_cur_err null ne and{ + $error/newerror false put + AGMCORE_cur_err compose_error_msg + }if + $error/newerror true put + end + errordict/AGM_handleerror get exec + }bind put + }if + /AGMCORE_environ_ok + ps_level AGMCORE_ps_level ge + ps_version AGMCORE_ps_version ge and + AGMCORE_ps_level -1 eq or + def + AGMCORE_environ_ok not + {/AGMCORE_cur_err/AGMCORE_bad_environ def}if + /AGMCORE_&setgray systemdict/setgray get def + level2{ + /AGMCORE_&setcolor systemdict/setcolor get def + /AGMCORE_&setcolorspace systemdict/setcolorspace get def + }if + /AGMCORE_currentbg currentblackgeneration def + /AGMCORE_currentucr currentundercolorremoval def + /AGMCORE_Default_flatness currentflat def + /AGMCORE_distilling + /product where{ + pop systemdict/setdistillerparams known product(Adobe PostScript Parser)ne and + }{ + false + }ifelse + def + /AGMCORE_GSTATE AGMCORE_key_known not{ + /AGMCORE_GSTATE 21 dict def + /AGMCORE_tmpmatrix matrix def + /AGMCORE_gstack 32 array def + /AGMCORE_gstackptr 0 def + /AGMCORE_gstacksaveptr 0 def + /AGMCORE_gstackframekeys 14 def + /AGMCORE_&gsave/gsave ldf + /AGMCORE_&grestore/grestore ldf + /AGMCORE_&grestoreall/grestoreall ldf + /AGMCORE_&save/save ldf + /AGMCORE_&setoverprint/setoverprint ldf + /AGMCORE_gdictcopy{ + begin + {def}forall + end + }def + /AGMCORE_gput{ + AGMCORE_gstack AGMCORE_gstackptr get + 3 1 roll + put + }def + /AGMCORE_gget{ + AGMCORE_gstack AGMCORE_gstackptr get + exch + get + }def + /gsave{ + AGMCORE_&gsave + AGMCORE_gstack AGMCORE_gstackptr get + AGMCORE_gstackptr 1 add + dup 32 ge{limitcheck}if + /AGMCORE_gstackptr exch store + AGMCORE_gstack AGMCORE_gstackptr get + AGMCORE_gdictcopy + }def + /grestore{ + AGMCORE_&grestore + AGMCORE_gstackptr 1 sub + dup AGMCORE_gstacksaveptr lt{1 add}if + dup AGMCORE_gstack exch get dup/AGMCORE_currentoverprint known + {/AGMCORE_currentoverprint get setoverprint}{pop}ifelse + /AGMCORE_gstackptr exch store + }def + /grestoreall{ + AGMCORE_&grestoreall + /AGMCORE_gstackptr AGMCORE_gstacksaveptr store + }def + /save{ + AGMCORE_&save + AGMCORE_gstack AGMCORE_gstackptr get + AGMCORE_gstackptr 1 add + dup 32 ge{limitcheck}if + /AGMCORE_gstackptr exch store + /AGMCORE_gstacksaveptr AGMCORE_gstackptr store + AGMCORE_gstack AGMCORE_gstackptr get + AGMCORE_gdictcopy + }def + /setoverprint{ + dup/AGMCORE_currentoverprint exch AGMCORE_gput AGMCORE_&setoverprint + }def + 0 1 AGMCORE_gstack length 1 sub{ + AGMCORE_gstack exch AGMCORE_gstackframekeys dict put + }for + }if + level3/AGMCORE_&sysshfill AGMCORE_key_known not and + { + /AGMCORE_&sysshfill systemdict/shfill get def + /AGMCORE_&sysmakepattern systemdict/makepattern get def + /AGMCORE_&usrmakepattern/makepattern load def + }if + /currentcmykcolor[0 0 0 0]AGMCORE_gput + /currentstrokeadjust false AGMCORE_gput + /currentcolorspace[/DeviceGray]AGMCORE_gput + /sep_tint 0 AGMCORE_gput + /devicen_tints[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]AGMCORE_gput + /sep_colorspace_dict null AGMCORE_gput + /devicen_colorspace_dict null AGMCORE_gput + /indexed_colorspace_dict null AGMCORE_gput + /currentcolor_intent()AGMCORE_gput + /customcolor_tint 1 AGMCORE_gput + /absolute_colorimetric_crd null AGMCORE_gput + /relative_colorimetric_crd null AGMCORE_gput + /saturation_crd null AGMCORE_gput + /perceptual_crd null AGMCORE_gput + currentcolortransfer cvlit/AGMCore_gray_xfer xdf cvlit/AGMCore_b_xfer xdf + cvlit/AGMCore_g_xfer xdf cvlit/AGMCore_r_xfer xdf + << + /MaxPatternItem currentsystemparams/MaxPatternCache get + >> + setuserparams + end +}def +/ps +{ + /setcmykcolor where{ + pop + Adobe_AGM_Core/AGMCORE_&setcmykcolor/setcmykcolor load put + }if + Adobe_AGM_Core begin + /setcmykcolor + { + 4 copy AGMCORE_cmykbuf astore/currentcmykcolor exch AGMCORE_gput + 1 sub 4 1 roll + 3{ + 3 index add neg dup 0 lt{ + pop 0 + }if + 3 1 roll + }repeat + setrgbcolor pop + }ndf + /currentcmykcolor + { + /currentcmykcolor AGMCORE_gget aload pop + }ndf + /setoverprint + {pop}ndf + /currentoverprint + {false}ndf + /AGMCORE_cyan_plate 1 0 0 0 test_cmyk_color_plate def + /AGMCORE_magenta_plate 0 1 0 0 test_cmyk_color_plate def + /AGMCORE_yellow_plate 0 0 1 0 test_cmyk_color_plate def + /AGMCORE_black_plate 0 0 0 1 test_cmyk_color_plate def + /AGMCORE_plate_ndx + AGMCORE_cyan_plate{ + 0 + }{ + AGMCORE_magenta_plate{ + 1 + }{ + AGMCORE_yellow_plate{ + 2 + }{ + AGMCORE_black_plate{ + 3 + }{ + 4 + }ifelse + }ifelse + }ifelse + }ifelse + def + /AGMCORE_have_reported_unsupported_color_space false def + /AGMCORE_report_unsupported_color_space + { + AGMCORE_have_reported_unsupported_color_space false eq + { + (Warning: Job contains content that cannot be separated with on-host methods. This content appears on the black plate, and knocks out all other plates.)== + Adobe_AGM_Core/AGMCORE_have_reported_unsupported_color_space true ddf + }if + }def + /AGMCORE_composite_job + AGMCORE_cyan_plate AGMCORE_magenta_plate and AGMCORE_yellow_plate and AGMCORE_black_plate and def + /AGMCORE_in_rip_sep + /AGMCORE_in_rip_sep where{ + pop AGMCORE_in_rip_sep + }{ + AGMCORE_distilling + { + false + }{ + userdict/Adobe_AGM_OnHost_Seps known{ + false + }{ + level2{ + currentpagedevice/Separations 2 copy known{ + get + }{ + pop pop false + }ifelse + }{ + false + }ifelse + }ifelse + }ifelse + }ifelse + def + /AGMCORE_producing_seps AGMCORE_composite_job not AGMCORE_in_rip_sep or def + /AGMCORE_host_sep AGMCORE_producing_seps AGMCORE_in_rip_sep not and def + /AGM_preserve_spots + /AGM_preserve_spots where{ + pop AGM_preserve_spots + }{ + AGMCORE_distilling AGMCORE_producing_seps or + }ifelse + def + /AGM_is_distiller_preserving_spotimages + { + currentdistillerparams/PreserveOverprintSettings known + { + currentdistillerparams/PreserveOverprintSettings get + { + currentdistillerparams/ColorConversionStrategy known + { + currentdistillerparams/ColorConversionStrategy get + /sRGB ne + }{ + true + }ifelse + }{ + false + }ifelse + }{ + false + }ifelse + }def + /convert_spot_to_process where{pop}{ + /convert_spot_to_process + { + //Adobe_AGM_Core begin + dup map_alias{ + /Name get exch pop + }if + dup dup(None)eq exch(All)eq or + { + pop false + }{ + AGMCORE_host_sep + { + gsave + 1 0 0 0 setcmykcolor currentgray 1 exch sub + 0 1 0 0 setcmykcolor currentgray 1 exch sub + 0 0 1 0 setcmykcolor currentgray 1 exch sub + 0 0 0 1 setcmykcolor currentgray 1 exch sub + add add add 0 eq + { + pop false + }{ + false setoverprint + current_spot_alias false set_spot_alias + 1 1 1 1 6 -1 roll findcmykcustomcolor 1 setcustomcolor + set_spot_alias + currentgray 1 ne + }ifelse + grestore + }{ + AGMCORE_distilling + { + pop AGM_is_distiller_preserving_spotimages not + }{ + //Adobe_AGM_Core/AGMCORE_name xddf + false + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 0 eq + AGMUTIL_cpd/OverrideSeparations known and + { + AGMUTIL_cpd/OverrideSeparations get + { + /HqnSpots/ProcSet resourcestatus + { + pop pop pop true + }if + }if + }if + { + AGMCORE_name/HqnSpots/ProcSet findresource/TestSpot gx not + }{ + gsave + [/Separation AGMCORE_name/DeviceGray{}]AGMCORE_&setcolorspace + false + AGMUTIL_cpd/SeparationColorNames 2 copy known + { + get + {AGMCORE_name eq or}forall + not + }{ + pop pop pop true + }ifelse + grestore + }ifelse + }ifelse + }ifelse + }ifelse + end + }def + }ifelse + /convert_to_process where{pop}{ + /convert_to_process + { + dup length 0 eq + { + pop false + }{ + AGMCORE_host_sep + { + dup true exch + { + dup(Cyan)eq exch + dup(Magenta)eq 3 -1 roll or exch + dup(Yellow)eq 3 -1 roll or exch + dup(Black)eq 3 -1 roll or + {pop} + {convert_spot_to_process and}ifelse + } + forall + { + true exch + { + dup(Cyan)eq exch + dup(Magenta)eq 3 -1 roll or exch + dup(Yellow)eq 3 -1 roll or exch + (Black)eq or and + }forall + not + }{pop false}ifelse + }{ + false exch + { + /PhotoshopDuotoneList where{pop false}{true}ifelse + { + dup(Cyan)eq exch + dup(Magenta)eq 3 -1 roll or exch + dup(Yellow)eq 3 -1 roll or exch + dup(Black)eq 3 -1 roll or + {pop} + {convert_spot_to_process or}ifelse + } + { + convert_spot_to_process or + } + ifelse + } + forall + }ifelse + }ifelse + }def + }ifelse + /AGMCORE_avoid_L2_sep_space + version cvr 2012 lt + level2 and + AGMCORE_producing_seps not and + def + /AGMCORE_is_cmyk_sep + AGMCORE_cyan_plate AGMCORE_magenta_plate or AGMCORE_yellow_plate or AGMCORE_black_plate or + def + /AGM_avoid_0_cmyk where{ + pop AGM_avoid_0_cmyk + }{ + AGM_preserve_spots + userdict/Adobe_AGM_OnHost_Seps known + userdict/Adobe_AGM_InRip_Seps known or + not and + }ifelse + { + /setcmykcolor[ + { + 4 copy add add add 0 eq currentoverprint and{ + pop 0.0005 + }if + }/exec cvx + /AGMCORE_&setcmykcolor load dup type/operatortype ne{ + /exec cvx + }if + ]cvx def + }if + /AGMCORE_IsSeparationAProcessColor + { + dup(Cyan)eq exch dup(Magenta)eq exch dup(Yellow)eq exch(Black)eq or or or + }def + AGMCORE_host_sep{ + /setcolortransfer + { + AGMCORE_cyan_plate{ + pop pop pop + }{ + AGMCORE_magenta_plate{ + 4 3 roll pop pop pop + }{ + AGMCORE_yellow_plate{ + 4 2 roll pop pop pop + }{ + 4 1 roll pop pop pop + }ifelse + }ifelse + }ifelse + settransfer + } + def + /AGMCORE_get_ink_data + AGMCORE_cyan_plate{ + {pop pop pop} + }{ + AGMCORE_magenta_plate{ + {4 3 roll pop pop pop} + }{ + AGMCORE_yellow_plate{ + {4 2 roll pop pop pop} + }{ + {4 1 roll pop pop pop} + }ifelse + }ifelse + }ifelse + def + /AGMCORE_RemoveProcessColorNames + { + 1 dict begin + /filtername + { + dup/Cyan eq 1 index(Cyan)eq or + {pop(_cyan_)}if + dup/Magenta eq 1 index(Magenta)eq or + {pop(_magenta_)}if + dup/Yellow eq 1 index(Yellow)eq or + {pop(_yellow_)}if + dup/Black eq 1 index(Black)eq or + {pop(_black_)}if + }def + dup type/arraytype eq + {[exch{filtername}forall]} + {filtername}ifelse + end + }def + level3{ + /AGMCORE_IsCurrentColor + { + dup AGMCORE_IsSeparationAProcessColor + { + AGMCORE_plate_ndx 0 eq + {dup(Cyan)eq exch/Cyan eq or}if + AGMCORE_plate_ndx 1 eq + {dup(Magenta)eq exch/Magenta eq or}if + AGMCORE_plate_ndx 2 eq + {dup(Yellow)eq exch/Yellow eq or}if + AGMCORE_plate_ndx 3 eq + {dup(Black)eq exch/Black eq or}if + AGMCORE_plate_ndx 4 eq + {pop false}if + }{ + gsave + false setoverprint + current_spot_alias false set_spot_alias + 1 1 1 1 6 -1 roll findcmykcustomcolor 1 setcustomcolor + set_spot_alias + currentgray 1 ne + grestore + }ifelse + }def + /AGMCORE_filter_functiondatasource + { + 5 dict begin + /data_in xdf + data_in type/stringtype eq + { + /ncomp xdf + /comp xdf + /string_out data_in length ncomp idiv string def + 0 ncomp data_in length 1 sub + { + string_out exch dup ncomp idiv exch data_in exch ncomp getinterval comp get 255 exch sub put + }for + string_out + }{ + string/string_in xdf + /string_out 1 string def + /component xdf + [ + data_in string_in/readstring cvx + [component/get cvx 255/exch cvx/sub cvx string_out/exch cvx 0/exch cvx/put cvx string_out]cvx + [/pop cvx()]cvx/ifelse cvx + ]cvx/ReusableStreamDecode filter + }ifelse + end + }def + /AGMCORE_separateShadingFunction + { + 2 dict begin + /paint? xdf + /channel xdf + dup type/dicttype eq + { + begin + FunctionType 0 eq + { + /DataSource channel Range length 2 idiv DataSource AGMCORE_filter_functiondatasource def + currentdict/Decode known + {/Decode Decode channel 2 mul 2 getinterval def}if + paint? not + {/Decode[1 1]def}if + }if + FunctionType 2 eq + { + paint? + { + /C0[C0 channel get 1 exch sub]def + /C1[C1 channel get 1 exch sub]def + }{ + /C0[1]def + /C1[1]def + }ifelse + }if + FunctionType 3 eq + { + /Functions[Functions{channel paint? AGMCORE_separateShadingFunction}forall]def + }if + currentdict/Range known + {/Range[0 1]def}if + currentdict + end}{ + channel get 0 paint? AGMCORE_separateShadingFunction + }ifelse + end + }def + /AGMCORE_separateShading + { + 3 -1 roll begin + currentdict/Function known + { + currentdict/Background known + {[1 index{Background 3 index get 1 exch sub}{1}ifelse]/Background xdf}if + Function 3 1 roll AGMCORE_separateShadingFunction/Function xdf + /ColorSpace[/DeviceGray]def + }{ + ColorSpace dup type/arraytype eq{0 get}if/DeviceCMYK eq + { + /ColorSpace[/DeviceN[/_cyan_/_magenta_/_yellow_/_black_]/DeviceCMYK{}]def + }{ + ColorSpace dup 1 get AGMCORE_RemoveProcessColorNames 1 exch put + }ifelse + ColorSpace 0 get/Separation eq + { + { + [1/exch cvx/sub cvx]cvx + }{ + [/pop cvx 1]cvx + }ifelse + ColorSpace 3 3 -1 roll put + pop + }{ + { + [exch ColorSpace 1 get length 1 sub exch sub/index cvx 1/exch cvx/sub cvx ColorSpace 1 get length 1 add 1/roll cvx ColorSpace 1 get length{/pop cvx}repeat]cvx + }{ + pop[ColorSpace 1 get length{/pop cvx}repeat cvx 1]cvx + }ifelse + ColorSpace 3 3 -1 roll bind put + }ifelse + ColorSpace 2/DeviceGray put + }ifelse + end + }def + /AGMCORE_separateShadingDict + { + dup/ColorSpace get + dup type/arraytype ne + {[exch]}if + dup 0 get/DeviceCMYK eq + { + exch begin + currentdict + AGMCORE_cyan_plate + {0 true}if + AGMCORE_magenta_plate + {1 true}if + AGMCORE_yellow_plate + {2 true}if + AGMCORE_black_plate + {3 true}if + AGMCORE_plate_ndx 4 eq + {0 false}if + dup not currentoverprint and + {/AGMCORE_ignoreshade true def}if + AGMCORE_separateShading + currentdict + end exch + }if + dup 0 get/Separation eq + { + exch begin + ColorSpace 1 get dup/None ne exch/All ne and + { + ColorSpace 1 get AGMCORE_IsCurrentColor AGMCORE_plate_ndx 4 lt and ColorSpace 1 get AGMCORE_IsSeparationAProcessColor not and + { + ColorSpace 2 get dup type/arraytype eq{0 get}if/DeviceCMYK eq + { + /ColorSpace + [ + /Separation + ColorSpace 1 get + /DeviceGray + [ + ColorSpace 3 get/exec cvx + 4 AGMCORE_plate_ndx sub -1/roll cvx + 4 1/roll cvx + 3[/pop cvx]cvx/repeat cvx + 1/exch cvx/sub cvx + ]cvx + ]def + }{ + AGMCORE_report_unsupported_color_space + AGMCORE_black_plate not + { + currentdict 0 false AGMCORE_separateShading + }if + }ifelse + }{ + currentdict ColorSpace 1 get AGMCORE_IsCurrentColor + 0 exch + dup not currentoverprint and + {/AGMCORE_ignoreshade true def}if + AGMCORE_separateShading + }ifelse + }if + currentdict + end exch + }if + dup 0 get/DeviceN eq + { + exch begin + ColorSpace 1 get convert_to_process + { + ColorSpace 2 get dup type/arraytype eq{0 get}if/DeviceCMYK eq + { + /ColorSpace + [ + /DeviceN + ColorSpace 1 get + /DeviceGray + [ + ColorSpace 3 get/exec cvx + 4 AGMCORE_plate_ndx sub -1/roll cvx + 4 1/roll cvx + 3[/pop cvx]cvx/repeat cvx + 1/exch cvx/sub cvx + ]cvx + ]def + }{ + AGMCORE_report_unsupported_color_space + AGMCORE_black_plate not + { + currentdict 0 false AGMCORE_separateShading + /ColorSpace[/DeviceGray]def + }if + }ifelse + }{ + currentdict + false -1 ColorSpace 1 get + { + AGMCORE_IsCurrentColor + { + 1 add + exch pop true exch exit + }if + 1 add + }forall + exch + dup not currentoverprint and + {/AGMCORE_ignoreshade true def}if + AGMCORE_separateShading + }ifelse + currentdict + end exch + }if + dup 0 get dup/DeviceCMYK eq exch dup/Separation eq exch/DeviceN eq or or not + { + exch begin + ColorSpace dup type/arraytype eq + {0 get}if + /DeviceGray ne + { + AGMCORE_report_unsupported_color_space + AGMCORE_black_plate not + { + ColorSpace 0 get/CIEBasedA eq + { + /ColorSpace[/Separation/_ciebaseda_/DeviceGray{}]def + }if + ColorSpace 0 get dup/CIEBasedABC eq exch dup/CIEBasedDEF eq exch/DeviceRGB eq or or + { + /ColorSpace[/DeviceN[/_red_/_green_/_blue_]/DeviceRGB{}]def + }if + ColorSpace 0 get/CIEBasedDEFG eq + { + /ColorSpace[/DeviceN[/_cyan_/_magenta_/_yellow_/_black_]/DeviceCMYK{}]def + }if + currentdict 0 false AGMCORE_separateShading + }if + }if + currentdict + end exch + }if + pop + dup/AGMCORE_ignoreshade known + { + begin + /ColorSpace[/Separation(None)/DeviceGray{}]def + currentdict end + }if + }def + /shfill + { + AGMCORE_separateShadingDict + dup/AGMCORE_ignoreshade known + {pop} + {AGMCORE_&sysshfill}ifelse + }def + /makepattern + { + exch + dup/PatternType get 2 eq + { + clonedict + begin + /Shading Shading AGMCORE_separateShadingDict def + Shading/AGMCORE_ignoreshade known + currentdict end exch + {pop<>}if + exch AGMCORE_&sysmakepattern + }{ + exch AGMCORE_&usrmakepattern + }ifelse + }def + }if + }if + AGMCORE_in_rip_sep{ + /setcustomcolor + { + exch aload pop + dup 7 1 roll inRip_spot_has_ink not { + 4{4 index mul 4 1 roll} + repeat + /DeviceCMYK setcolorspace + 6 -2 roll pop pop + }{ + //Adobe_AGM_Core begin + /AGMCORE_k xdf/AGMCORE_y xdf/AGMCORE_m xdf/AGMCORE_c xdf + end + [/Separation 4 -1 roll/DeviceCMYK + {dup AGMCORE_c mul exch dup AGMCORE_m mul exch dup AGMCORE_y mul exch AGMCORE_k mul} + ] + setcolorspace + }ifelse + setcolor + }ndf + /setseparationgray + { + [/Separation(All)/DeviceGray{}]setcolorspace_opt + 1 exch sub setcolor + }ndf + }{ + /setseparationgray + { + AGMCORE_&setgray + }ndf + }ifelse + /findcmykcustomcolor + { + 5 makereadonlyarray + }ndf + /setcustomcolor + { + exch aload pop pop + 4{4 index mul 4 1 roll}repeat + setcmykcolor pop + }ndf + /has_color + /colorimage where{ + AGMCORE_producing_seps{ + pop true + }{ + systemdict eq + }ifelse + }{ + false + }ifelse + def + /map_index + { + 1 index mul exch getinterval{255 div}forall + }bdf + /map_indexed_devn + { + Lookup Names length 3 -1 roll cvi map_index + }bdf + /n_color_components + { + base_colorspace_type + dup/DeviceGray eq{ + pop 1 + }{ + /DeviceCMYK eq{ + 4 + }{ + 3 + }ifelse + }ifelse + }bdf + level2{ + /mo/moveto ldf + /li/lineto ldf + /cv/curveto ldf + /knockout_unitsq + { + 1 setgray + 0 0 1 1 rectfill + }def + level2/setcolorspace AGMCORE_key_known not and{ + /AGMCORE_&&&setcolorspace/setcolorspace ldf + /AGMCORE_ReplaceMappedColor + { + dup type dup/arraytype eq exch/packedarraytype eq or + { + /AGMCORE_SpotAliasAry2 where{ + begin + dup 0 get dup/Separation eq + { + pop + dup length array copy + dup dup 1 get + current_spot_alias + { + dup map_alias + { + false set_spot_alias + dup 1 exch setsepcolorspace + true set_spot_alias + begin + /sep_colorspace_dict currentdict AGMCORE_gput + pop pop pop + [ + /Separation Name + CSA map_csa + MappedCSA + /sep_colorspace_proc load + ] + dup Name + end + }if + }if + map_reserved_ink_name 1 xpt + }{ + /DeviceN eq + { + dup length array copy + dup dup 1 get[ + exch{ + current_spot_alias{ + dup map_alias{ + /Name get exch pop + }if + }if + map_reserved_ink_name + }forall + ]1 xpt + }if + }ifelse + end + }if + }if + }def + /setcolorspace + { + dup type dup/arraytype eq exch/packedarraytype eq or + { + dup 0 get/Indexed eq + { + AGMCORE_distilling + { + /PhotoshopDuotoneList where + { + pop false + }{ + true + }ifelse + }{ + true + }ifelse + { + aload pop 3 -1 roll + AGMCORE_ReplaceMappedColor + 3 1 roll 4 array astore + }if + }{ + AGMCORE_ReplaceMappedColor + }ifelse + }if + DeviceN_PS2_inRip_seps{AGMCORE_&&&setcolorspace}if + }def + }if + }{ + /adj + { + currentstrokeadjust{ + transform + 0.25 sub round 0.25 add exch + 0.25 sub round 0.25 add exch + itransform + }if + }def + /mo{ + adj moveto + }def + /li{ + adj lineto + }def + /cv{ + 6 2 roll adj + 6 2 roll adj + 6 2 roll adj curveto + }def + /knockout_unitsq + { + 1 setgray + 8 8 1[8 0 0 8 0 0]{}image + }def + /currentstrokeadjust{ + /currentstrokeadjust AGMCORE_gget + }def + /setstrokeadjust{ + /currentstrokeadjust exch AGMCORE_gput + }def + /setcolorspace + { + /currentcolorspace exch AGMCORE_gput + }def + /currentcolorspace + { + /currentcolorspace AGMCORE_gget + }def + /setcolor_devicecolor + { + base_colorspace_type + dup/DeviceGray eq{ + pop setgray + }{ + /DeviceCMYK eq{ + setcmykcolor + }{ + setrgbcolor + }ifelse + }ifelse + }def + /setcolor + { + currentcolorspace 0 get + dup/DeviceGray ne{ + dup/DeviceCMYK ne{ + dup/DeviceRGB ne{ + dup/Separation eq{ + pop + currentcolorspace 3 gx + currentcolorspace 2 get + }{ + dup/Indexed eq{ + pop + currentcolorspace 3 get dup type/stringtype eq{ + currentcolorspace 1 get n_color_components + 3 -1 roll map_index + }{ + exec + }ifelse + currentcolorspace 1 get + }{ + /AGMCORE_cur_err/AGMCORE_invalid_color_space def + AGMCORE_invalid_color_space + }ifelse + }ifelse + }if + }if + }if + setcolor_devicecolor + }def + }ifelse + /sop/setoverprint ldf + /lw/setlinewidth ldf + /lc/setlinecap ldf + /lj/setlinejoin ldf + /ml/setmiterlimit ldf + /dsh/setdash ldf + /sadj/setstrokeadjust ldf + /gry/setgray ldf + /rgb/setrgbcolor ldf + /cmyk[ + /currentcolorspace[/DeviceCMYK]/AGMCORE_gput cvx + /setcmykcolor load dup type/operatortype ne{/exec cvx}if + ]cvx bdf + level3 AGMCORE_host_sep not and{ + /nzopmsc{ + 6 dict begin + /kk exch def + /yy exch def + /mm exch def + /cc exch def + /sum 0 def + cc 0 ne{/sum sum 2#1000 or def cc}if + mm 0 ne{/sum sum 2#0100 or def mm}if + yy 0 ne{/sum sum 2#0010 or def yy}if + kk 0 ne{/sum sum 2#0001 or def kk}if + AGMCORE_CMYKDeviceNColorspaces sum get setcolorspace + sum 0 eq{0}if + end + setcolor + }bdf + }{ + /nzopmsc/cmyk ldf + }ifelse + /sep/setsepcolor ldf + /devn/setdevicencolor ldf + /idx/setindexedcolor ldf + /colr/setcolor ldf + /csacrd/set_csa_crd ldf + /sepcs/setsepcolorspace ldf + /devncs/setdevicencolorspace ldf + /idxcs/setindexedcolorspace ldf + /cp/closepath ldf + /clp/clp_npth ldf + /eclp/eoclp_npth ldf + /f/fill ldf + /ef/eofill ldf + /@/stroke ldf + /nclp/npth_clp ldf + /gset/graphic_setup ldf + /gcln/graphic_cleanup ldf + /ct/concat ldf + /cf/currentfile ldf + /fl/filter ldf + /rs/readstring ldf + /AGMCORE_def_ht currenthalftone def + /clonedict Adobe_AGM_Utils begin/clonedict load end def + /clonearray Adobe_AGM_Utils begin/clonearray load end def + currentdict{ + dup xcheck 1 index type dup/arraytype eq exch/packedarraytype eq or and{ + bind + }if + def + }forall + /getrampcolor + { + /indx exch def + 0 1 NumComp 1 sub + { + dup + Samples exch get + dup type/stringtype eq{indx get}if + exch + Scaling exch get aload pop + 3 1 roll + mul add + }for + ColorSpaceFamily/Separation eq + {sep} + { + ColorSpaceFamily/DeviceN eq + {devn}{setcolor}ifelse + }ifelse + }bdf + /sssetbackground{ + aload pop + ColorSpaceFamily/Separation eq + {sep} + { + ColorSpaceFamily/DeviceN eq + {devn}{setcolor}ifelse + }ifelse + }bdf + /RadialShade + { + 40 dict begin + /ColorSpaceFamily xdf + /background xdf + /ext1 xdf + /ext0 xdf + /BBox xdf + /r2 xdf + /c2y xdf + /c2x xdf + /r1 xdf + /c1y xdf + /c1x xdf + /rampdict xdf + /setinkoverprint where{pop/setinkoverprint{pop}def}if + gsave + BBox length 0 gt + { + np + BBox 0 get BBox 1 get moveto + BBox 2 get BBox 0 get sub 0 rlineto + 0 BBox 3 get BBox 1 get sub rlineto + BBox 2 get BBox 0 get sub neg 0 rlineto + closepath + clip + np + }if + c1x c2x eq + { + c1y c2y lt{/theta 90 def}{/theta 270 def}ifelse + }{ + /slope c2y c1y sub c2x c1x sub div def + /theta slope 1 atan def + c2x c1x lt c2y c1y ge and{/theta theta 180 sub def}if + c2x c1x lt c2y c1y lt and{/theta theta 180 add def}if + }ifelse + gsave + clippath + c1x c1y translate + theta rotate + -90 rotate + {pathbbox}stopped + {0 0 0 0}if + /yMax xdf + /xMax xdf + /yMin xdf + /xMin xdf + grestore + xMax xMin eq yMax yMin eq or + { + grestore + end + }{ + /max{2 copy gt{pop}{exch pop}ifelse}bdf + /min{2 copy lt{pop}{exch pop}ifelse}bdf + rampdict begin + 40 dict begin + background length 0 gt{background sssetbackground gsave clippath fill grestore}if + gsave + c1x c1y translate + theta rotate + -90 rotate + /c2y c1x c2x sub dup mul c1y c2y sub dup mul add sqrt def + /c1y 0 def + /c1x 0 def + /c2x 0 def + ext0 + { + 0 getrampcolor + c2y r2 add r1 sub 0.0001 lt + { + c1x c1y r1 360 0 arcn + pathbbox + /aymax exch def + /axmax exch def + /aymin exch def + /axmin exch def + /bxMin xMin axmin min def + /byMin yMin aymin min def + /bxMax xMax axmax max def + /byMax yMax aymax max def + bxMin byMin moveto + bxMax byMin lineto + bxMax byMax lineto + bxMin byMax lineto + bxMin byMin lineto + eofill + }{ + c2y r1 add r2 le + { + c1x c1y r1 0 360 arc + fill + } + { + c2x c2y r2 0 360 arc fill + r1 r2 eq + { + /p1x r1 neg def + /p1y c1y def + /p2x r1 def + /p2y c1y def + p1x p1y moveto p2x p2y lineto p2x yMin lineto p1x yMin lineto + fill + }{ + /AA r2 r1 sub c2y div def + AA -1 eq + {/theta 89.99 def} + {/theta AA 1 AA dup mul sub sqrt div 1 atan def} + ifelse + /SS1 90 theta add dup sin exch cos div def + /p1x r1 SS1 SS1 mul SS1 SS1 mul 1 add div sqrt mul neg def + /p1y p1x SS1 div neg def + /SS2 90 theta sub dup sin exch cos div def + /p2x r1 SS2 SS2 mul SS2 SS2 mul 1 add div sqrt mul def + /p2y p2x SS2 div neg def + r1 r2 gt + { + /L1maxX p1x yMin p1y sub SS1 div add def + /L2maxX p2x yMin p2y sub SS2 div add def + }{ + /L1maxX 0 def + /L2maxX 0 def + }ifelse + p1x p1y moveto p2x p2y lineto L2maxX L2maxX p2x sub SS2 mul p2y add lineto + L1maxX L1maxX p1x sub SS1 mul p1y add lineto + fill + }ifelse + }ifelse + }ifelse + }if + c1x c2x sub dup mul + c1y c2y sub dup mul + add 0.5 exp + 0 dtransform + dup mul exch dup mul add 0.5 exp 72 div + 0 72 matrix defaultmatrix dtransform dup mul exch dup mul add sqrt + 72 0 matrix defaultmatrix dtransform dup mul exch dup mul add sqrt + 1 index 1 index lt{exch}if pop + /hires xdf + hires mul + /numpix xdf + /numsteps NumSamples def + /rampIndxInc 1 def + /subsampling false def + numpix 0 ne + { + NumSamples numpix div 0.5 gt + { + /numsteps numpix 2 div round cvi dup 1 le{pop 2}if def + /rampIndxInc NumSamples 1 sub numsteps div def + /subsampling true def + }if + }if + /xInc c2x c1x sub numsteps div def + /yInc c2y c1y sub numsteps div def + /rInc r2 r1 sub numsteps div def + /cx c1x def + /cy c1y def + /radius r1 def + np + xInc 0 eq yInc 0 eq rInc 0 eq and and + { + 0 getrampcolor + cx cy radius 0 360 arc + stroke + NumSamples 1 sub getrampcolor + cx cy radius 72 hires div add 0 360 arc + 0 setlinewidth + stroke + }{ + 0 + numsteps + { + dup + subsampling{round cvi}if + getrampcolor + cx cy radius 0 360 arc + /cx cx xInc add def + /cy cy yInc add def + /radius radius rInc add def + cx cy radius 360 0 arcn + eofill + rampIndxInc add + }repeat + pop + }ifelse + ext1 + { + c2y r2 add r1 lt + { + c2x c2y r2 0 360 arc + fill + }{ + c2y r1 add r2 sub 0.0001 le + { + c2x c2y r2 360 0 arcn + pathbbox + /aymax exch def + /axmax exch def + /aymin exch def + /axmin exch def + /bxMin xMin axmin min def + /byMin yMin aymin min def + /bxMax xMax axmax max def + /byMax yMax aymax max def + bxMin byMin moveto + bxMax byMin lineto + bxMax byMax lineto + bxMin byMax lineto + bxMin byMin lineto + eofill + }{ + c2x c2y r2 0 360 arc fill + r1 r2 eq + { + /p1x r2 neg def + /p1y c2y def + /p2x r2 def + /p2y c2y def + p1x p1y moveto p2x p2y lineto p2x yMax lineto p1x yMax lineto + fill + }{ + /AA r2 r1 sub c2y div def + AA -1 eq + {/theta 89.99 def} + {/theta AA 1 AA dup mul sub sqrt div 1 atan def} + ifelse + /SS1 90 theta add dup sin exch cos div def + /p1x r2 SS1 SS1 mul SS1 SS1 mul 1 add div sqrt mul neg def + /p1y c2y p1x SS1 div sub def + /SS2 90 theta sub dup sin exch cos div def + /p2x r2 SS2 SS2 mul SS2 SS2 mul 1 add div sqrt mul def + /p2y c2y p2x SS2 div sub def + r1 r2 lt + { + /L1maxX p1x yMax p1y sub SS1 div add def + /L2maxX p2x yMax p2y sub SS2 div add def + }{ + /L1maxX 0 def + /L2maxX 0 def + }ifelse + p1x p1y moveto p2x p2y lineto L2maxX L2maxX p2x sub SS2 mul p2y add lineto + L1maxX L1maxX p1x sub SS1 mul p1y add lineto + fill + }ifelse + }ifelse + }ifelse + }if + grestore + grestore + end + end + end + }ifelse + }bdf + /GenStrips + { + 40 dict begin + /ColorSpaceFamily xdf + /background xdf + /ext1 xdf + /ext0 xdf + /BBox xdf + /y2 xdf + /x2 xdf + /y1 xdf + /x1 xdf + /rampdict xdf + /setinkoverprint where{pop/setinkoverprint{pop}def}if + gsave + BBox length 0 gt + { + np + BBox 0 get BBox 1 get moveto + BBox 2 get BBox 0 get sub 0 rlineto + 0 BBox 3 get BBox 1 get sub rlineto + BBox 2 get BBox 0 get sub neg 0 rlineto + closepath + clip + np + }if + x1 x2 eq + { + y1 y2 lt{/theta 90 def}{/theta 270 def}ifelse + }{ + /slope y2 y1 sub x2 x1 sub div def + /theta slope 1 atan def + x2 x1 lt y2 y1 ge and{/theta theta 180 sub def}if + x2 x1 lt y2 y1 lt and{/theta theta 180 add def}if + } + ifelse + gsave + clippath + x1 y1 translate + theta rotate + {pathbbox}stopped + {0 0 0 0}if + /yMax exch def + /xMax exch def + /yMin exch def + /xMin exch def + grestore + xMax xMin eq yMax yMin eq or + { + grestore + end + }{ + rampdict begin + 20 dict begin + background length 0 gt{background sssetbackground gsave clippath fill grestore}if + gsave + x1 y1 translate + theta rotate + /xStart 0 def + /xEnd x2 x1 sub dup mul y2 y1 sub dup mul add 0.5 exp def + /ySpan yMax yMin sub def + /numsteps NumSamples def + /rampIndxInc 1 def + /subsampling false def + xStart 0 transform + xEnd 0 transform + 3 -1 roll + sub dup mul + 3 1 roll + sub dup mul + add 0.5 exp 72 div + 0 72 matrix defaultmatrix dtransform dup mul exch dup mul add sqrt + 72 0 matrix defaultmatrix dtransform dup mul exch dup mul add sqrt + 1 index 1 index lt{exch}if pop + mul + /numpix xdf + numpix 0 ne + { + NumSamples numpix div 0.5 gt + { + /numsteps numpix 2 div round cvi dup 1 le{pop 2}if def + /rampIndxInc NumSamples 1 sub numsteps div def + /subsampling true def + }if + }if + ext0 + { + 0 getrampcolor + xMin xStart lt + { + xMin yMin xMin neg ySpan rectfill + }if + }if + /xInc xEnd xStart sub numsteps div def + /x xStart def + 0 + numsteps + { + dup + subsampling{round cvi}if + getrampcolor + x yMin xInc ySpan rectfill + /x x xInc add def + rampIndxInc add + }repeat + pop + ext1{ + xMax xEnd gt + { + xEnd yMin xMax xEnd sub ySpan rectfill + }if + }if + grestore + grestore + end + end + end + }ifelse + }bdf +}def +/pt +{ + end +}def +/dt{ +}def +/pgsv{ + //Adobe_AGM_Core/AGMCORE_save save put +}def +/pgrs{ + //Adobe_AGM_Core/AGMCORE_save get restore +}def +systemdict/findcolorrendering known{ + /findcolorrendering systemdict/findcolorrendering get def +}if +systemdict/setcolorrendering known{ + /setcolorrendering systemdict/setcolorrendering get def +}if +/test_cmyk_color_plate +{ + gsave + setcmykcolor currentgray 1 ne + grestore +}def +/inRip_spot_has_ink +{ + dup//Adobe_AGM_Core/AGMCORE_name xddf + convert_spot_to_process not +}def +/map255_to_range +{ + 1 index sub + 3 -1 roll 255 div mul add +}def +/set_csa_crd +{ + /sep_colorspace_dict null AGMCORE_gput + begin + CSA get_csa_by_name setcolorspace_opt + set_crd + end +} +def +/map_csa +{ + currentdict/MappedCSA known{MappedCSA null ne}{false}ifelse + {pop}{get_csa_by_name/MappedCSA xdf}ifelse +}def +/setsepcolor +{ + /sep_colorspace_dict AGMCORE_gget begin + dup/sep_tint exch AGMCORE_gput + TintProc + end +}def +/setdevicencolor +{ + /devicen_colorspace_dict AGMCORE_gget begin + Names length copy + Names length 1 sub -1 0 + { + /devicen_tints AGMCORE_gget 3 1 roll xpt + }for + TintProc + end +}def +/sep_colorspace_proc +{ + /AGMCORE_tmp exch store + /sep_colorspace_dict AGMCORE_gget begin + currentdict/Components known{ + Components aload pop + TintMethod/Lab eq{ + 2{AGMCORE_tmp mul NComponents 1 roll}repeat + LMax sub AGMCORE_tmp mul LMax add NComponents 1 roll + }{ + TintMethod/Subtractive eq{ + NComponents{ + AGMCORE_tmp mul NComponents 1 roll + }repeat + }{ + NComponents{ + 1 sub AGMCORE_tmp mul 1 add NComponents 1 roll + }repeat + }ifelse + }ifelse + }{ + ColorLookup AGMCORE_tmp ColorLookup length 1 sub mul round cvi get + aload pop + }ifelse + end +}def +/sep_colorspace_gray_proc +{ + /AGMCORE_tmp exch store + /sep_colorspace_dict AGMCORE_gget begin + GrayLookup AGMCORE_tmp GrayLookup length 1 sub mul round cvi get + end +}def +/sep_proc_name +{ + dup 0 get + dup/DeviceRGB eq exch/DeviceCMYK eq or level2 not and has_color not and{ + pop[/DeviceGray] + /sep_colorspace_gray_proc + }{ + /sep_colorspace_proc + }ifelse +}def +/setsepcolorspace +{ + current_spot_alias{ + dup begin + Name map_alias{ + exch pop + }if + end + }if + dup/sep_colorspace_dict exch AGMCORE_gput + begin + CSA map_csa + /AGMCORE_sep_special Name dup()eq exch(All)eq or store + AGMCORE_avoid_L2_sep_space{ + [/Indexed MappedCSA sep_proc_name 255 exch + {255 div}/exec cvx 3 -1 roll[4 1 roll load/exec cvx]cvx + ]setcolorspace_opt + /TintProc{ + 255 mul round cvi setcolor + }bdf + }{ + MappedCSA 0 get/DeviceCMYK eq + currentdict/Components known and + AGMCORE_sep_special not and{ + /TintProc[ + Components aload pop Name findcmykcustomcolor + /exch cvx/setcustomcolor cvx + ]cvx bdf + }{ + AGMCORE_host_sep Name(All)eq and{ + /TintProc{ + 1 exch sub setseparationgray + }bdf + }{ + AGMCORE_in_rip_sep MappedCSA 0 get/DeviceCMYK eq and + AGMCORE_host_sep or + Name()eq and{ + /TintProc[ + MappedCSA sep_proc_name exch 0 get/DeviceCMYK eq{ + cvx/setcmykcolor cvx + }{ + cvx/setgray cvx + }ifelse + ]cvx bdf + }{ + AGMCORE_producing_seps MappedCSA 0 get dup/DeviceCMYK eq exch/DeviceGray eq or and AGMCORE_sep_special not and{ + /TintProc[ + /dup cvx + MappedCSA sep_proc_name cvx exch + 0 get/DeviceGray eq{ + 1/exch cvx/sub cvx 0 0 0 4 -1/roll cvx + }if + /Name cvx/findcmykcustomcolor cvx/exch cvx + AGMCORE_host_sep{ + AGMCORE_is_cmyk_sep + /Name cvx + /AGMCORE_IsSeparationAProcessColor load/exec cvx + /not cvx/and cvx + }{ + Name inRip_spot_has_ink not + }ifelse + [ + /pop cvx 1 + ]cvx/if cvx + /setcustomcolor cvx + ]cvx bdf + }{ + /TintProc{setcolor}bdf + [/Separation Name MappedCSA sep_proc_name load]setcolorspace_opt + }ifelse + }ifelse + }ifelse + }ifelse + }ifelse + set_crd + setsepcolor + end +}def +/additive_blend +{ + 3 dict begin + /numarrays xdf + /numcolors xdf + 0 1 numcolors 1 sub + { + /c1 xdf + 1 + 0 1 numarrays 1 sub + { + 1 exch add/index cvx + c1/get cvx/mul cvx + }for + numarrays 1 add 1/roll cvx + }for + numarrays[/pop cvx]cvx/repeat cvx + end +}def +/subtractive_blend +{ + 3 dict begin + /numarrays xdf + /numcolors xdf + 0 1 numcolors 1 sub + { + /c1 xdf + 1 1 + 0 1 numarrays 1 sub + { + 1 3 3 -1 roll add/index cvx + c1/get cvx/sub cvx/mul cvx + }for + /sub cvx + numarrays 1 add 1/roll cvx + }for + numarrays[/pop cvx]cvx/repeat cvx + end +}def +/exec_tint_transform +{ + /TintProc[ + /TintTransform cvx/setcolor cvx + ]cvx bdf + MappedCSA setcolorspace_opt +}bdf +/devn_makecustomcolor +{ + 2 dict begin + /names_index xdf + /Names xdf + 1 1 1 1 Names names_index get findcmykcustomcolor + /devicen_tints AGMCORE_gget names_index get setcustomcolor + Names length{pop}repeat + end +}bdf +/setdevicencolorspace +{ + dup/AliasedColorants known{false}{true}ifelse + current_spot_alias and{ + 7 dict begin + /names_index 0 def + dup/names_len exch/Names get length def + /new_names names_len array def + /new_LookupTables names_len array def + /alias_cnt 0 def + dup/Names get + { + dup map_alias{ + exch pop + dup/ColorLookup known{ + dup begin + new_LookupTables names_index ColorLookup put + end + }{ + dup/Components known{ + dup begin + new_LookupTables names_index Components put + end + }{ + dup begin + new_LookupTables names_index[null null null null]put + end + }ifelse + }ifelse + new_names names_index 3 -1 roll/Name get put + /alias_cnt alias_cnt 1 add def + }{ + /name xdf + new_names names_index name put + dup/LookupTables known{ + dup begin + new_LookupTables names_index LookupTables names_index get put + end + }{ + dup begin + new_LookupTables names_index[null null null null]put + end + }ifelse + }ifelse + /names_index names_index 1 add def + }forall + alias_cnt 0 gt{ + /AliasedColorants true def + /lut_entry_len new_LookupTables 0 get dup length 256 ge{0 get length}{length}ifelse def + 0 1 names_len 1 sub{ + /names_index xdf + new_LookupTables names_index get dup length 256 ge{0 get length}{length}ifelse lut_entry_len ne{ + /AliasedColorants false def + exit + }{ + new_LookupTables names_index get 0 get null eq{ + dup/Names get names_index get/name xdf + name(Cyan)eq name(Magenta)eq name(Yellow)eq name(Black)eq + or or or not{ + /AliasedColorants false def + exit + }if + }if + }ifelse + }for + lut_entry_len 1 eq{ + /AliasedColorants false def + }if + AliasedColorants{ + dup begin + /Names new_names def + /LookupTables new_LookupTables def + /AliasedColorants true def + /NComponents lut_entry_len def + /TintMethod NComponents 4 eq{/Subtractive}{/Additive}ifelse def + /MappedCSA TintMethod/Additive eq{/DeviceRGB}{/DeviceCMYK}ifelse def + currentdict/TTTablesIdx known not{ + /TTTablesIdx -1 def + }if + end + }if + }if + end + }if + dup/devicen_colorspace_dict exch AGMCORE_gput + begin + currentdict/AliasedColorants known{ + AliasedColorants + }{ + false + }ifelse + dup not{ + CSA map_csa + }if + /TintTransform load type/nulltype eq or{ + /TintTransform[ + 0 1 Names length 1 sub + { + /TTTablesIdx TTTablesIdx 1 add def + dup LookupTables exch get dup 0 get null eq + { + 1 index + Names exch get + dup(Cyan)eq + { + pop exch + LookupTables length exch sub + /index cvx + 0 0 0 + } + { + dup(Magenta)eq + { + pop exch + LookupTables length exch sub + /index cvx + 0/exch cvx 0 0 + }{ + (Yellow)eq + { + exch + LookupTables length exch sub + /index cvx + 0 0 3 -1/roll cvx 0 + }{ + exch + LookupTables length exch sub + /index cvx + 0 0 0 4 -1/roll cvx + }ifelse + }ifelse + }ifelse + 5 -1/roll cvx/astore cvx + }{ + dup length 1 sub + LookupTables length 4 -1 roll sub 1 add + /index cvx/mul cvx/round cvx/cvi cvx/get cvx + }ifelse + Names length TTTablesIdx add 1 add 1/roll cvx + }for + Names length[/pop cvx]cvx/repeat cvx + NComponents Names length + TintMethod/Subtractive eq + { + subtractive_blend + }{ + additive_blend + }ifelse + ]cvx bdf + }if + AGMCORE_host_sep{ + Names convert_to_process{ + exec_tint_transform + } + { + currentdict/AliasedColorants known{ + AliasedColorants not + }{ + false + }ifelse + 5 dict begin + /AvoidAliasedColorants xdf + /painted? false def + /names_index 0 def + /names_len Names length def + AvoidAliasedColorants{ + /currentspotalias current_spot_alias def + false set_spot_alias + }if + Names{ + AGMCORE_is_cmyk_sep{ + dup(Cyan)eq AGMCORE_cyan_plate and exch + dup(Magenta)eq AGMCORE_magenta_plate and exch + dup(Yellow)eq AGMCORE_yellow_plate and exch + (Black)eq AGMCORE_black_plate and or or or{ + /devicen_colorspace_dict AGMCORE_gget/TintProc[ + Names names_index/devn_makecustomcolor cvx + ]cvx ddf + /painted? true def + }if + painted?{exit}if + }{ + 0 0 0 0 5 -1 roll findcmykcustomcolor 1 setcustomcolor currentgray 0 eq{ + /devicen_colorspace_dict AGMCORE_gget/TintProc[ + Names names_index/devn_makecustomcolor cvx + ]cvx ddf + /painted? true def + exit + }if + }ifelse + /names_index names_index 1 add def + }forall + AvoidAliasedColorants{ + currentspotalias set_spot_alias + }if + painted?{ + /devicen_colorspace_dict AGMCORE_gget/names_index names_index put + }{ + /devicen_colorspace_dict AGMCORE_gget/TintProc[ + names_len[/pop cvx]cvx/repeat cvx 1/setseparationgray cvx + 0 0 0 0/setcmykcolor cvx + ]cvx ddf + }ifelse + end + }ifelse + } + { + AGMCORE_in_rip_sep{ + Names convert_to_process not + }{ + level3 + }ifelse + { + [/DeviceN Names MappedCSA/TintTransform load]setcolorspace_opt + /TintProc level3 not AGMCORE_in_rip_sep and{ + [ + Names/length cvx[/pop cvx]cvx/repeat cvx + ]cvx bdf + }{ + {setcolor}bdf + }ifelse + }{ + exec_tint_transform + }ifelse + }ifelse + set_crd + /AliasedColorants false def + end +}def +/setindexedcolorspace +{ + dup/indexed_colorspace_dict exch AGMCORE_gput + begin + currentdict/CSDBase known{ + CSDBase/CSD get_res begin + currentdict/Names known{ + currentdict devncs + }{ + 1 currentdict sepcs + }ifelse + AGMCORE_host_sep{ + 4 dict begin + /compCnt/Names where{pop Names length}{1}ifelse def + /NewLookup HiVal 1 add string def + 0 1 HiVal{ + /tableIndex xdf + Lookup dup type/stringtype eq{ + compCnt tableIndex map_index + }{ + exec + }ifelse + /Names where{ + pop setdevicencolor + }{ + setsepcolor + }ifelse + currentgray + tableIndex exch + 255 mul cvi + NewLookup 3 1 roll put + }for + [/Indexed currentcolorspace HiVal NewLookup]setcolorspace_opt + end + }{ + level3 + { + currentdict/Names known{ + [/Indexed[/DeviceN Names MappedCSA/TintTransform load]HiVal Lookup]setcolorspace_opt + }{ + [/Indexed[/Separation Name MappedCSA sep_proc_name load]HiVal Lookup]setcolorspace_opt + }ifelse + }{ + [/Indexed MappedCSA HiVal + [ + currentdict/Names known{ + Lookup dup type/stringtype eq + {/exch cvx CSDBase/CSD get_res/Names get length dup/mul cvx exch/getinterval cvx{255 div}/forall cvx} + {/exec cvx}ifelse + /TintTransform load/exec cvx + }{ + Lookup dup type/stringtype eq + {/exch cvx/get cvx 255/div cvx} + {/exec cvx}ifelse + CSDBase/CSD get_res/MappedCSA get sep_proc_name exch pop/load cvx/exec cvx + }ifelse + ]cvx + ]setcolorspace_opt + }ifelse + }ifelse + end + set_crd + } + { + CSA map_csa + AGMCORE_host_sep level2 not and{ + 0 0 0 0 setcmykcolor + }{ + [/Indexed MappedCSA + level2 not has_color not and{ + dup 0 get dup/DeviceRGB eq exch/DeviceCMYK eq or{ + pop[/DeviceGray] + }if + HiVal GrayLookup + }{ + HiVal + currentdict/RangeArray known{ + { + /indexed_colorspace_dict AGMCORE_gget begin + Lookup exch + dup HiVal gt{ + pop HiVal + }if + NComponents mul NComponents getinterval{}forall + NComponents 1 sub -1 0{ + RangeArray exch 2 mul 2 getinterval aload pop map255_to_range + NComponents 1 roll + }for + end + }bind + }{ + Lookup + }ifelse + }ifelse + ]setcolorspace_opt + set_crd + }ifelse + }ifelse + end +}def +/setindexedcolor +{ + AGMCORE_host_sep{ + /indexed_colorspace_dict AGMCORE_gget + begin + currentdict/CSDBase known{ + CSDBase/CSD get_res begin + currentdict/Names known{ + map_indexed_devn + devn + } + { + Lookup 1 3 -1 roll map_index + sep + }ifelse + end + }{ + Lookup MappedCSA/DeviceCMYK eq{4}{1}ifelse 3 -1 roll + map_index + MappedCSA/DeviceCMYK eq{setcmykcolor}{setgray}ifelse + }ifelse + end + }{ + level3 not AGMCORE_in_rip_sep and/indexed_colorspace_dict AGMCORE_gget/CSDBase known and{ + /indexed_colorspace_dict AGMCORE_gget/CSDBase get/CSD get_res begin + map_indexed_devn + devn + end + } + { + setcolor + }ifelse + }ifelse +}def +/ignoreimagedata +{ + currentoverprint not{ + gsave + dup clonedict begin + 1 setgray + /Decode[0 1]def + /DataSourcedef + /MultipleDataSources false def + /BitsPerComponent 8 def + currentdict end + systemdict/image gx + grestore + }if + consumeimagedata +}def +/add_res +{ + dup/CSD eq{ + pop + //Adobe_AGM_Core begin + /AGMCORE_CSD_cache load 3 1 roll put + end + }{ + defineresource pop + }ifelse +}def +/del_res +{ + { + aload pop exch + dup/CSD eq{ + pop + {//Adobe_AGM_Core/AGMCORE_CSD_cache get exch undef}forall + }{ + exch + {1 index undefineresource}forall + pop + }ifelse + }forall +}def +/get_res +{ + dup/CSD eq{ + pop + dup type dup/nametype eq exch/stringtype eq or{ + AGMCORE_CSD_cache exch get + }if + }{ + findresource + }ifelse +}def +/get_csa_by_name +{ + dup type dup/nametype eq exch/stringtype eq or{ + /CSA get_res + }if +}def +/paintproc_buf_init +{ + /count get 0 0 put +}def +/paintproc_buf_next +{ + dup/count get dup 0 get + dup 3 1 roll + 1 add 0 xpt + get +}def +/cachepaintproc_compress +{ + 5 dict begin + currentfile exch 0 exch/SubFileDecode filter/ReadFilter exch def + /ppdict 20 dict def + /string_size 16000 def + /readbuffer string_size string def + currentglobal true setglobal + ppdict 1 array dup 0 1 put/count xpt + setglobal + /LZWFilter + { + exch + dup length 0 eq{ + pop + }{ + ppdict dup length 1 sub 3 -1 roll put + }ifelse + {string_size}{0}ifelse string + }/LZWEncode filter def + { + ReadFilter readbuffer readstring + exch LZWFilter exch writestring + not{exit}if + }loop + LZWFilter closefile + ppdict + end +}def +/cachepaintproc +{ + 2 dict begin + currentfile exch 0 exch/SubFileDecode filter/ReadFilter exch def + /ppdict 20 dict def + currentglobal true setglobal + ppdict 1 array dup 0 1 put/count xpt + setglobal + { + ReadFilter 16000 string readstring exch + ppdict dup length 1 sub 3 -1 roll put + not{exit}if + }loop + ppdict dup dup length 1 sub()put + end +}def +/make_pattern +{ + exch clonedict exch + dup matrix currentmatrix matrix concatmatrix 0 0 3 2 roll itransform + exch 3 index/XStep get 1 index exch 2 copy div cvi mul sub sub + exch 3 index/YStep get 1 index exch 2 copy div cvi mul sub sub + matrix translate exch matrix concatmatrix + 1 index begin + BBox 0 get XStep div cvi XStep mul/xshift exch neg def + BBox 1 get YStep div cvi YStep mul/yshift exch neg def + BBox 0 get xshift add + BBox 1 get yshift add + BBox 2 get xshift add + BBox 3 get yshift add + 4 array astore + /BBox exch def + [xshift yshift/translate load null/exec load]dup + 3/PaintProc load put cvx/PaintProc exch def + end + gsave 0 setgray + makepattern + grestore +}def +/set_pattern +{ + dup/PatternType get 1 eq{ + dup/PaintType get 1 eq{ + currentoverprint sop[/DeviceGray]setcolorspace 0 setgray + }if + }if + setpattern +}def +/setcolorspace_opt +{ + dup currentcolorspace eq{pop}{setcolorspace}ifelse +}def +/updatecolorrendering +{ + currentcolorrendering/RenderingIntent known{ + currentcolorrendering/RenderingIntent get + } + { + Intent/AbsoluteColorimetric eq + { + /absolute_colorimetric_crd AGMCORE_gget dup null eq + } + { + Intent/RelativeColorimetric eq + { + /relative_colorimetric_crd AGMCORE_gget dup null eq + } + { + Intent/Saturation eq + { + /saturation_crd AGMCORE_gget dup null eq + } + { + /perceptual_crd AGMCORE_gget dup null eq + }ifelse + }ifelse + }ifelse + { + pop null + } + { + /RenderingIntent known{null}{Intent}ifelse + }ifelse + }ifelse + Intent ne{ + Intent/ColorRendering{findresource}stopped + { + pop pop systemdict/findcolorrendering known + { + Intent findcolorrendering + { + /ColorRendering findresource true exch + } + { + /ColorRendering findresource + product(Xerox Phaser 5400)ne + exch + }ifelse + dup Intent/AbsoluteColorimetric eq + { + /absolute_colorimetric_crd exch AGMCORE_gput + } + { + Intent/RelativeColorimetric eq + { + /relative_colorimetric_crd exch AGMCORE_gput + } + { + Intent/Saturation eq + { + /saturation_crd exch AGMCORE_gput + } + { + Intent/Perceptual eq + { + /perceptual_crd exch AGMCORE_gput + } + { + pop + }ifelse + }ifelse + }ifelse + }ifelse + 1 index{exch}{pop}ifelse + } + {false}ifelse + } + {true}ifelse + { + dup begin + currentdict/TransformPQR known{ + currentdict/TransformPQR get aload pop + 3{{}eq 3 1 roll}repeat or or + } + {true}ifelse + currentdict/MatrixPQR known{ + currentdict/MatrixPQR get aload pop + 1.0 eq 9 1 roll 0.0 eq 9 1 roll 0.0 eq 9 1 roll + 0.0 eq 9 1 roll 1.0 eq 9 1 roll 0.0 eq 9 1 roll + 0.0 eq 9 1 roll 0.0 eq 9 1 roll 1.0 eq + and and and and and and and and + } + {true}ifelse + end + or + { + clonedict begin + /TransformPQR[ + {4 -1 roll 3 get dup 3 1 roll sub 5 -1 roll 3 get 3 -1 roll sub div + 3 -1 roll 3 get 3 -1 roll 3 get dup 4 1 roll sub mul add}bind + {4 -1 roll 4 get dup 3 1 roll sub 5 -1 roll 4 get 3 -1 roll sub div + 3 -1 roll 4 get 3 -1 roll 4 get dup 4 1 roll sub mul add}bind + {4 -1 roll 5 get dup 3 1 roll sub 5 -1 roll 5 get 3 -1 roll sub div + 3 -1 roll 5 get 3 -1 roll 5 get dup 4 1 roll sub mul add}bind + ]def + /MatrixPQR[0.8951 -0.7502 0.0389 0.2664 1.7135 -0.0685 -0.1614 0.0367 1.0296]def + /RangePQR[-0.3227950745 2.3229645538 -1.5003771057 3.5003465881 -0.1369979095 2.136967392]def + currentdict end + }if + setcolorrendering_opt + }if + }if +}def +/set_crd +{ + AGMCORE_host_sep not level2 and{ + currentdict/ColorRendering known{ + ColorRendering/ColorRendering{findresource}stopped not{setcolorrendering_opt}if + }{ + currentdict/Intent known{ + updatecolorrendering + }if + }ifelse + currentcolorspace dup type/arraytype eq + {0 get}if + /DeviceRGB eq + { + currentdict/UCR known + {/UCR}{/AGMCORE_currentucr}ifelse + load setundercolorremoval + currentdict/BG known + {/BG}{/AGMCORE_currentbg}ifelse + load setblackgeneration + }if + }if +}def +/set_ucrbg +{ + dup null eq{pop/AGMCORE_currentbg load}{/Procedure get_res}ifelse setblackgeneration + dup null eq{pop/AGMCORE_currentucr load}{/Procedure get_res}ifelse setundercolorremoval +}def +/setcolorrendering_opt +{ + dup currentcolorrendering eq{ + pop + }{ + clonedict + begin + /Intent Intent def + currentdict + end + setcolorrendering + }ifelse +}def +/cpaint_gcomp +{ + convert_to_process//Adobe_AGM_Core/AGMCORE_ConvertToProcess xddf + //Adobe_AGM_Core/AGMCORE_ConvertToProcess get not + { + (%end_cpaint_gcomp)flushinput + }if +}def +/cpaint_gsep +{ + //Adobe_AGM_Core/AGMCORE_ConvertToProcess get + { + (%end_cpaint_gsep)flushinput + }if +}def +/cpaint_gend +{np}def +/T1_path +{ + currentfile token pop currentfile token pop mo + { + currentfile token pop dup type/stringtype eq + {pop exit}if + 0 exch rlineto + currentfile token pop dup type/stringtype eq + {pop exit}if + 0 rlineto + }loop +}def +/T1_gsave + level3 + {/clipsave} + {/gsave}ifelse + load def +/T1_grestore + level3 + {/cliprestore} + {/grestore}ifelse + load def +/set_spot_alias_ary +{ + dup inherit_aliases + //Adobe_AGM_Core/AGMCORE_SpotAliasAry xddf +}def +/set_spot_normalization_ary +{ + dup inherit_aliases + dup length + /AGMCORE_SpotAliasAry where{pop AGMCORE_SpotAliasAry length add}if + array + //Adobe_AGM_Core/AGMCORE_SpotAliasAry2 xddf + /AGMCORE_SpotAliasAry where{ + pop + AGMCORE_SpotAliasAry2 0 AGMCORE_SpotAliasAry putinterval + AGMCORE_SpotAliasAry length + }{0}ifelse + AGMCORE_SpotAliasAry2 3 1 roll exch putinterval + true set_spot_alias +}def +/inherit_aliases +{ + {dup/Name get map_alias{/CSD put}{pop}ifelse}forall +}def +/set_spot_alias +{ + /AGMCORE_SpotAliasAry2 where{ + /AGMCORE_current_spot_alias 3 -1 roll put + }{ + pop + }ifelse +}def +/current_spot_alias +{ + /AGMCORE_SpotAliasAry2 where{ + /AGMCORE_current_spot_alias get + }{ + false + }ifelse +}def +/map_alias +{ + /AGMCORE_SpotAliasAry2 where{ + begin + /AGMCORE_name xdf + false + AGMCORE_SpotAliasAry2{ + dup/Name get AGMCORE_name eq{ + /CSD get/CSD get_res + exch pop true + exit + }{ + pop + }ifelse + }forall + end + }{ + pop false + }ifelse +}bdf +/spot_alias +{ + true set_spot_alias + /AGMCORE_&setcustomcolor AGMCORE_key_known not{ + //Adobe_AGM_Core/AGMCORE_&setcustomcolor/setcustomcolor load put + }if + /customcolor_tint 1 AGMCORE_gput + //Adobe_AGM_Core begin + /setcustomcolor + { + //Adobe_AGM_Core begin + dup/customcolor_tint exch AGMCORE_gput + 1 index aload pop pop 1 eq exch 1 eq and exch 1 eq and exch 1 eq and not + current_spot_alias and{1 index 4 get map_alias}{false}ifelse + { + false set_spot_alias + /sep_colorspace_dict AGMCORE_gget null ne + 3 1 roll 2 index{ + exch pop/sep_tint AGMCORE_gget exch + }if + mark 3 1 roll + setsepcolorspace + counttomark 0 ne{ + setsepcolor + }if + pop + not{/sep_tint 1.0 AGMCORE_gput}if + pop + true set_spot_alias + }{ + AGMCORE_&setcustomcolor + }ifelse + end + }bdf + end +}def +/begin_feature +{ + Adobe_AGM_Core/AGMCORE_feature_dictCount countdictstack put + count Adobe_AGM_Core/AGMCORE_feature_opCount 3 -1 roll put + {Adobe_AGM_Core/AGMCORE_feature_ctm matrix currentmatrix put}if +}def +/end_feature +{ + 2 dict begin + /spd/setpagedevice load def + /setpagedevice{get_gstate spd set_gstate}def + stopped{$error/newerror false put}if + end + count Adobe_AGM_Core/AGMCORE_feature_opCount get sub dup 0 gt{{pop}repeat}{pop}ifelse + countdictstack Adobe_AGM_Core/AGMCORE_feature_dictCount get sub dup 0 gt{{end}repeat}{pop}ifelse + {Adobe_AGM_Core/AGMCORE_feature_ctm get setmatrix}if +}def +/set_negative +{ + //Adobe_AGM_Core begin + /AGMCORE_inverting exch def + level2{ + currentpagedevice/NegativePrint known AGMCORE_distilling not and{ + currentpagedevice/NegativePrint get//Adobe_AGM_Core/AGMCORE_inverting get ne{ + true begin_feature true{ + <>setpagedevice + }end_feature + }if + /AGMCORE_inverting false def + }if + }if + AGMCORE_inverting{ + [{1 exch sub}/exec load dup currenttransfer exch]cvx bind settransfer + AGMCORE_distilling{ + erasepage + }{ + gsave np clippath 1/setseparationgray where{pop setseparationgray}{setgray}ifelse + /AGMIRS_&fill where{pop AGMIRS_&fill}{fill}ifelse grestore + }ifelse + }if + end +}def +/lw_save_restore_override{ + /md where{ + pop + md begin + initializepage + /initializepage{}def + /pmSVsetup{}def + /endp{}def + /pse{}def + /psb{}def + /orig_showpage where + {pop} + {/orig_showpage/showpage load def} + ifelse + /showpage{orig_showpage gR}def + end + }if +}def +/pscript_showpage_override{ + /NTPSOct95 where + { + begin + showpage + save + /showpage/restore load def + /restore{exch pop}def + end + }if +}def +/driver_media_override +{ + /md where{ + pop + md/initializepage known{ + md/initializepage{}put + }if + md/rC known{ + md/rC{4{pop}repeat}put + }if + }if + /mysetup where{ + /mysetup[1 0 0 1 0 0]put + }if + Adobe_AGM_Core/AGMCORE_Default_CTM matrix currentmatrix put + level2 + {Adobe_AGM_Core/AGMCORE_Default_PageSize currentpagedevice/PageSize get put}if +}def +/driver_check_media_override +{ + /PrepsDict where + {pop} + { + Adobe_AGM_Core/AGMCORE_Default_CTM get matrix currentmatrix ne + Adobe_AGM_Core/AGMCORE_Default_PageSize get type/arraytype eq + { + Adobe_AGM_Core/AGMCORE_Default_PageSize get 0 get currentpagedevice/PageSize get 0 get eq and + Adobe_AGM_Core/AGMCORE_Default_PageSize get 1 get currentpagedevice/PageSize get 1 get eq and + }if + { + Adobe_AGM_Core/AGMCORE_Default_CTM get setmatrix + }if + }ifelse +}def +AGMCORE_err_strings begin + /AGMCORE_bad_environ(Environment not satisfactory for this job. Ensure that the PPD is correct or that the PostScript level requested is supported by this printer. )def + /AGMCORE_color_space_onhost_seps(This job contains colors that will not separate with on-host methods. )def + /AGMCORE_invalid_color_space(This job contains an invalid color space. )def +end +/set_def_ht +{AGMCORE_def_ht sethalftone}def +/set_def_flat +{AGMCORE_Default_flatness setflat}def +end +systemdict/setpacking known +{setpacking}if +%%EndResource +%%BeginResource: procset Adobe_CoolType_Core 2.31 0 %%Copyright: Copyright 1997-2006 Adobe Systems Incorporated. All Rights Reserved. %%Version: 2.31 0 10 dict begin /Adobe_CoolType_Passthru currentdict def /Adobe_CoolType_Core_Defined userdict/Adobe_CoolType_Core known def Adobe_CoolType_Core_Defined {/Adobe_CoolType_Core userdict/Adobe_CoolType_Core get def} if userdict/Adobe_CoolType_Core 70 dict dup begin put /Adobe_CoolType_Version 2.31 def /Level2? systemdict/languagelevel known dup {pop systemdict/languagelevel get 2 ge} if def Level2? not { /currentglobal false def /setglobal/pop load def /gcheck{pop false}bind def /currentpacking false def /setpacking/pop load def /SharedFontDirectory 0 dict def } if currentpacking true setpacking currentglobal false setglobal userdict/Adobe_CoolType_Data 2 copy known not {2 copy 10 dict put} if get begin /@opStackCountByLevel 32 dict def /@opStackLevel 0 def /@dictStackCountByLevel 32 dict def /@dictStackLevel 0 def end setglobal currentglobal true setglobal userdict/Adobe_CoolType_GVMFonts known not {userdict/Adobe_CoolType_GVMFonts 10 dict put} if setglobal currentglobal false setglobal userdict/Adobe_CoolType_LVMFonts known not {userdict/Adobe_CoolType_LVMFonts 10 dict put} if setglobal /ct_VMDictPut { dup gcheck{Adobe_CoolType_GVMFonts}{Adobe_CoolType_LVMFonts}ifelse 3 1 roll put }bind def /ct_VMDictUndef { dup Adobe_CoolType_GVMFonts exch known {Adobe_CoolType_GVMFonts exch undef} { dup Adobe_CoolType_LVMFonts exch known {Adobe_CoolType_LVMFonts exch undef} {pop} ifelse }ifelse }bind def /ct_str1 1 string def /ct_xshow { /_ct_na exch def /_ct_i 0 def currentpoint /_ct_y exch def /_ct_x exch def { pop pop ct_str1 exch 0 exch put ct_str1 show {_ct_na _ct_i get}stopped {pop pop} { _ct_x _ct_y moveto 0 rmoveto } ifelse /_ct_i _ct_i 1 add def currentpoint /_ct_y exch def /_ct_x exch def } exch @cshow }bind def /ct_yshow { /_ct_na exch def /_ct_i 0 def currentpoint /_ct_y exch def /_ct_x exch def { pop pop ct_str1 exch 0 exch put ct_str1 show {_ct_na _ct_i get}stopped {pop pop} { _ct_x _ct_y moveto 0 exch rmoveto } ifelse /_ct_i _ct_i 1 add def currentpoint /_ct_y exch def /_ct_x exch def } exch @cshow }bind def /ct_xyshow { /_ct_na exch def /_ct_i 0 def currentpoint /_ct_y exch def /_ct_x exch def { pop pop ct_str1 exch 0 exch put ct_str1 show {_ct_na _ct_i get}stopped {pop pop} { {_ct_na _ct_i 1 add get}stopped {pop pop pop} { _ct_x _ct_y moveto rmoveto } ifelse } ifelse /_ct_i _ct_i 2 add def currentpoint /_ct_y exch def /_ct_x exch def } exch @cshow }bind def /xsh{{@xshow}stopped{Adobe_CoolType_Data begin ct_xshow end}if}bind def /ysh{{@yshow}stopped{Adobe_CoolType_Data begin ct_yshow end}if}bind def /xysh{{@xyshow}stopped{Adobe_CoolType_Data begin ct_xyshow end}if}bind def currentglobal true setglobal /ct_T3Defs { /BuildChar { 1 index/Encoding get exch get 1 index/BuildGlyph get exec }bind def /BuildGlyph { exch begin GlyphProcs exch get exec end }bind def }bind def setglobal /@_SaveStackLevels { Adobe_CoolType_Data begin /@vmState currentglobal def false setglobal @opStackCountByLevel @opStackLevel 2 copy known not { 2 copy 3 dict dup/args 7 index 5 add array put put get } { get dup/args get dup length 3 index lt { dup length 5 add array exch 1 index exch 0 exch putinterval 1 index exch/args exch put } {pop} ifelse } ifelse begin count 1 sub 1 index lt {pop count} if dup/argCount exch def dup 0 gt { args exch 0 exch getinterval astore pop } {pop} ifelse count /restCount exch def end /@opStackLevel @opStackLevel 1 add def countdictstack 1 sub @dictStackCountByLevel exch @dictStackLevel exch put /@dictStackLevel @dictStackLevel 1 add def @vmState setglobal end }bind def /@_RestoreStackLevels { Adobe_CoolType_Data begin /@opStackLevel @opStackLevel 1 sub def @opStackCountByLevel @opStackLevel get begin count restCount sub dup 0 gt {{pop}repeat} {pop} ifelse args 0 argCount getinterval{}forall end /@dictStackLevel @dictStackLevel 1 sub def @dictStackCountByLevel @dictStackLevel get end countdictstack exch sub dup 0 gt {{end}repeat} {pop} ifelse }bind def /@_PopStackLevels { Adobe_CoolType_Data begin /@opStackLevel @opStackLevel 1 sub def /@dictStackLevel @dictStackLevel 1 sub def end }bind def /@Raise { exch cvx exch errordict exch get exec stop }bind def /@ReRaise { cvx $error/errorname get errordict exch get exec stop }bind def /@Stopped { 0 @#Stopped }bind def /@#Stopped { @_SaveStackLevels stopped {@_RestoreStackLevels true} {@_PopStackLevels false} ifelse }bind def /@Arg { Adobe_CoolType_Data begin @opStackCountByLevel @opStackLevel 1 sub get begin args exch argCount 1 sub exch sub get end end }bind def currentglobal true setglobal /CTHasResourceForAllBug Level2? { 1 dict dup /@shouldNotDisappearDictValue true def Adobe_CoolType_Data exch/@shouldNotDisappearDict exch put begin count @_SaveStackLevels {(*){pop stop}128 string/Category resourceforall} stopped pop @_RestoreStackLevels currentdict Adobe_CoolType_Data/@shouldNotDisappearDict get dup 3 1 roll ne dup 3 1 roll { /@shouldNotDisappearDictValue known { { end currentdict 1 index eq {pop exit} if } loop } if } { pop end } ifelse } {false} ifelse def true setglobal /CTHasResourceStatusBug Level2? { mark {/steveamerige/Category resourcestatus} stopped {cleartomark true} {cleartomark currentglobal not} ifelse } {false} ifelse def setglobal /CTResourceStatus { mark 3 1 roll /Category findresource begin ({ResourceStatus}stopped)0()/SubFileDecode filter cvx exec {cleartomark false} {{3 2 roll pop true}{cleartomark false}ifelse} ifelse end }bind def /CTWorkAroundBugs { Level2? { /cid_PreLoad/ProcSet resourcestatus { pop pop currentglobal mark { (*) { dup/CMap CTHasResourceStatusBug {CTResourceStatus} {resourcestatus} ifelse { pop dup 0 eq exch 1 eq or { dup/CMap findresource gcheck setglobal /CMap undefineresource } { pop CTHasResourceForAllBug {exit} {stop} ifelse } ifelse } {pop} ifelse } 128 string/CMap resourceforall } stopped {cleartomark} stopped pop setglobal } if } if }bind def /ds { Adobe_CoolType_Core begin CTWorkAroundBugs /mo/moveto load def /nf/newencodedfont load def /msf{makefont setfont}bind def /uf{dup undefinefont ct_VMDictUndef}bind def /ur/undefineresource load def /chp/charpath load def /awsh/awidthshow load def /wsh/widthshow load def /ash/ashow load def /@xshow/xshow load def /@yshow/yshow load def /@xyshow/xyshow load def /@cshow/cshow load def /sh/show load def /rp/repeat load def /.n/.notdef def end currentglobal false setglobal userdict/Adobe_CoolType_Data 2 copy known not {2 copy 10 dict put} if get begin /AddWidths? false def /CC 0 def /charcode 2 string def /@opStackCountByLevel 32 dict def /@opStackLevel 0 def /@dictStackCountByLevel 32 dict def /@dictStackLevel 0 def /InVMFontsByCMap 10 dict def /InVMDeepCopiedFonts 10 dict def end setglobal }bind def /dt { currentdict Adobe_CoolType_Core eq {end} if }bind def /ps { Adobe_CoolType_Core begin Adobe_CoolType_GVMFonts begin Adobe_CoolType_LVMFonts begin SharedFontDirectory begin }bind def /pt { end end end end }bind def /unload { systemdict/languagelevel known { systemdict/languagelevel get 2 ge { userdict/Adobe_CoolType_Core 2 copy known {undef} {pop pop} ifelse } if } if }bind def /ndf { 1 index where {pop pop pop} {dup xcheck{bind}if def} ifelse }def /findfont systemdict begin userdict begin /globaldict where{/globaldict get begin}if dup where pop exch get /globaldict where{pop end}if end end Adobe_CoolType_Core_Defined {/systemfindfont exch def} { /findfont 1 index def /systemfindfont exch def } ifelse /undefinefont {pop}ndf /copyfont { currentglobal 3 1 roll 1 index gcheck setglobal dup null eq{0}{dup length}ifelse 2 index length add 1 add dict begin exch { 1 index/FID eq {pop pop} {def} ifelse } forall dup null eq {pop} {{def}forall} ifelse currentdict end exch setglobal }bind def /copyarray { currentglobal exch dup gcheck setglobal dup length array copy exch setglobal }bind def /newencodedfont { currentglobal { SharedFontDirectory 3 index known {SharedFontDirectory 3 index get/FontReferenced known} {false} ifelse } { FontDirectory 3 index known {FontDirectory 3 index get/FontReferenced known} { SharedFontDirectory 3 index known {SharedFontDirectory 3 index get/FontReferenced known} {false} ifelse } ifelse } ifelse dup { 3 index findfont/FontReferenced get 2 index dup type/nametype eq {findfont} if ne {pop false} if } if dup { 1 index dup type/nametype eq {findfont} if dup/CharStrings known { /CharStrings get length 4 index findfont/CharStrings get length ne { pop false } if } {pop} ifelse } if { pop 1 index findfont /Encoding get exch 0 1 255 {2 copy get 3 index 3 1 roll put} for pop pop pop } { currentglobal 4 1 roll dup type/nametype eq {findfont} if dup gcheck setglobal dup dup maxlength 2 add dict begin exch { 1 index/FID ne 2 index/Encoding ne and {def} {pop pop} ifelse } forall /FontReferenced exch def /Encoding exch dup length array copy def /FontName 1 index dup type/stringtype eq{cvn}if def dup currentdict end definefont ct_VMDictPut setglobal } ifelse }bind def /SetSubstituteStrategy { $SubstituteFont begin dup type/dicttype ne {0 dict} if currentdict/$Strategies known { exch $Strategies exch 2 copy known { get 2 copy maxlength exch maxlength add dict begin {def}forall {def}forall currentdict dup/$Init known {dup/$Init get exec} if end /$Strategy exch def } {pop pop pop} ifelse } {pop pop} ifelse end }bind def /scff { $SubstituteFont begin dup type/stringtype eq {dup length exch} {null} ifelse /$sname exch def /$slen exch def /$inVMIndex $sname null eq { 1 index $str cvs dup length $slen sub $slen getinterval cvn } {$sname} ifelse def end {findfont} @Stopped { dup length 8 add string exch 1 index 0(BadFont:)putinterval 1 index exch 8 exch dup length string cvs putinterval cvn {findfont} @Stopped {pop/Courier findfont} if } if $SubstituteFont begin /$sname null def /$slen 0 def /$inVMIndex null def end }bind def /isWidthsOnlyFont { dup/WidthsOnly known {pop pop true} { dup/FDepVector known {/FDepVector get{isWidthsOnlyFont dup{exit}if}forall} { dup/FDArray known {/FDArray get{isWidthsOnlyFont dup{exit}if}forall} {pop} ifelse } ifelse } ifelse }bind def /ct_StyleDicts 4 dict dup begin /Adobe-Japan1 4 dict dup begin Level2? { /Serif /HeiseiMin-W3-83pv-RKSJ-H/Font resourcestatus {pop pop/HeiseiMin-W3} { /CIDFont/Category resourcestatus { pop pop /HeiseiMin-W3/CIDFont resourcestatus {pop pop/HeiseiMin-W3} {/Ryumin-Light} ifelse } {/Ryumin-Light} ifelse } ifelse def /SansSerif /HeiseiKakuGo-W5-83pv-RKSJ-H/Font resourcestatus {pop pop/HeiseiKakuGo-W5} { /CIDFont/Category resourcestatus { pop pop /HeiseiKakuGo-W5/CIDFont resourcestatus {pop pop/HeiseiKakuGo-W5} {/GothicBBB-Medium} ifelse } {/GothicBBB-Medium} ifelse } ifelse def /HeiseiMaruGo-W4-83pv-RKSJ-H/Font resourcestatus {pop pop/HeiseiMaruGo-W4} { /CIDFont/Category resourcestatus { pop pop /HeiseiMaruGo-W4/CIDFont resourcestatus {pop pop/HeiseiMaruGo-W4} { /Jun101-Light-RKSJ-H/Font resourcestatus {pop pop/Jun101-Light} {SansSerif} ifelse } ifelse } { /Jun101-Light-RKSJ-H/Font resourcestatus {pop pop/Jun101-Light} {SansSerif} ifelse } ifelse } ifelse /RoundSansSerif exch def /Default Serif def } { /Serif/Ryumin-Light def /SansSerif/GothicBBB-Medium def { (fonts/Jun101-Light-83pv-RKSJ-H)status }stopped {pop}{ {pop pop pop pop/Jun101-Light} {SansSerif} ifelse /RoundSansSerif exch def }ifelse /Default Serif def } ifelse end def /Adobe-Korea1 4 dict dup begin /Serif/HYSMyeongJo-Medium def /SansSerif/HYGoThic-Medium def /RoundSansSerif SansSerif def /Default Serif def end def /Adobe-GB1 4 dict dup begin /Serif/STSong-Light def /SansSerif/STHeiti-Regular def /RoundSansSerif SansSerif def /Default Serif def end def /Adobe-CNS1 4 dict dup begin /Serif/MKai-Medium def /SansSerif/MHei-Medium def /RoundSansSerif SansSerif def /Default Serif def end def end def Level2?{currentglobal true setglobal}if /ct_BoldRomanWidthProc { stringwidth 1 index 0 ne{exch .03 add exch}if setcharwidth 0 0 }bind def /ct_Type0WidthProc { dup stringwidth 0 0 moveto 2 index true charpath pathbbox 0 -1 7 index 2 div .88 setcachedevice2 pop 0 0 }bind def /ct_Type0WMode1WidthProc { dup stringwidth pop 2 div neg -0.88 2 copy moveto 0 -1 5 -1 roll true charpath pathbbox setcachedevice }bind def /cHexEncoding [/c00/c01/c02/c03/c04/c05/c06/c07/c08/c09/c0A/c0B/c0C/c0D/c0E/c0F/c10/c11/c12 /c13/c14/c15/c16/c17/c18/c19/c1A/c1B/c1C/c1D/c1E/c1F/c20/c21/c22/c23/c24/c25 /c26/c27/c28/c29/c2A/c2B/c2C/c2D/c2E/c2F/c30/c31/c32/c33/c34/c35/c36/c37/c38 /c39/c3A/c3B/c3C/c3D/c3E/c3F/c40/c41/c42/c43/c44/c45/c46/c47/c48/c49/c4A/c4B /c4C/c4D/c4E/c4F/c50/c51/c52/c53/c54/c55/c56/c57/c58/c59/c5A/c5B/c5C/c5D/c5E /c5F/c60/c61/c62/c63/c64/c65/c66/c67/c68/c69/c6A/c6B/c6C/c6D/c6E/c6F/c70/c71 /c72/c73/c74/c75/c76/c77/c78/c79/c7A/c7B/c7C/c7D/c7E/c7F/c80/c81/c82/c83/c84 /c85/c86/c87/c88/c89/c8A/c8B/c8C/c8D/c8E/c8F/c90/c91/c92/c93/c94/c95/c96/c97 /c98/c99/c9A/c9B/c9C/c9D/c9E/c9F/cA0/cA1/cA2/cA3/cA4/cA5/cA6/cA7/cA8/cA9/cAA /cAB/cAC/cAD/cAE/cAF/cB0/cB1/cB2/cB3/cB4/cB5/cB6/cB7/cB8/cB9/cBA/cBB/cBC/cBD /cBE/cBF/cC0/cC1/cC2/cC3/cC4/cC5/cC6/cC7/cC8/cC9/cCA/cCB/cCC/cCD/cCE/cCF/cD0 /cD1/cD2/cD3/cD4/cD5/cD6/cD7/cD8/cD9/cDA/cDB/cDC/cDD/cDE/cDF/cE0/cE1/cE2/cE3 /cE4/cE5/cE6/cE7/cE8/cE9/cEA/cEB/cEC/cED/cEE/cEF/cF0/cF1/cF2/cF3/cF4/cF5/cF6 /cF7/cF8/cF9/cFA/cFB/cFC/cFD/cFE/cFF]def /ct_BoldBaseFont 11 dict begin /FontType 3 def /FontMatrix[1 0 0 1 0 0]def /FontBBox[0 0 1 1]def /Encoding cHexEncoding def /_setwidthProc/ct_BoldRomanWidthProc load def /_bcstr1 1 string def /BuildChar { exch begin _basefont setfont _bcstr1 dup 0 4 -1 roll put dup _setwidthProc 3 copy moveto show _basefonto setfont moveto show end }bind def currentdict end def systemdict/composefont known { /ct_DefineIdentity-H { /Identity-H/CMap resourcestatus { pop pop } { /CIDInit/ProcSet findresource begin 12 dict begin begincmap /CIDSystemInfo 3 dict dup begin /Registry(Adobe)def /Ordering(Identity)def /Supplement 0 def end def /CMapName/Identity-H def /CMapVersion 1.000 def /CMapType 1 def 1 begincodespacerange <0000> endcodespacerange 1 begincidrange <0000>0 endcidrange endcmap CMapName currentdict/CMap defineresource pop end end } ifelse } def /ct_BoldBaseCIDFont 11 dict begin /CIDFontType 1 def /CIDFontName/ct_BoldBaseCIDFont def /FontMatrix[1 0 0 1 0 0]def /FontBBox[0 0 1 1]def /_setwidthProc/ct_Type0WidthProc load def /_bcstr2 2 string def /BuildGlyph { exch begin _basefont setfont _bcstr2 1 2 index 256 mod put _bcstr2 0 3 -1 roll 256 idiv put _bcstr2 dup _setwidthProc 3 copy moveto show _basefonto setfont moveto show end }bind def currentdict end def }if Level2?{setglobal}if /ct_CopyFont{ { 1 index/FID ne 2 index/UniqueID ne and {def}{pop pop}ifelse }forall }bind def /ct_Type0CopyFont { exch dup length dict begin ct_CopyFont [ exch FDepVector { dup/FontType get 0 eq { 1 index ct_Type0CopyFont /_ctType0 exch definefont } { /_ctBaseFont exch 2 index exec } ifelse exch } forall pop ] /FDepVector exch def currentdict end }bind def /ct_MakeBoldFont { dup/ct_SyntheticBold known { dup length 3 add dict begin ct_CopyFont /ct_StrokeWidth .03 0 FontMatrix idtransform pop def /ct_SyntheticBold true def currentdict end definefont } { dup dup length 3 add dict begin ct_CopyFont /PaintType 2 def /StrokeWidth .03 0 FontMatrix idtransform pop def /dummybold currentdict end definefont dup/FontType get dup 9 ge exch 11 le and { ct_BoldBaseCIDFont dup length 3 add dict copy begin dup/CIDSystemInfo get/CIDSystemInfo exch def ct_DefineIdentity-H /_Type0Identity/Identity-H 3 -1 roll[exch]composefont /_basefont exch def /_Type0Identity/Identity-H 3 -1 roll[exch]composefont /_basefonto exch def currentdict end /CIDFont defineresource } { ct_BoldBaseFont dup length 3 add dict copy begin /_basefont exch def /_basefonto exch def currentdict end definefont } ifelse } ifelse }bind def /ct_MakeBold{ 1 index 1 index findfont currentglobal 5 1 roll dup gcheck setglobal dup /FontType get 0 eq { dup/WMode known{dup/WMode get 1 eq}{false}ifelse version length 4 ge and {version 0 4 getinterval cvi 2015 ge} {true} ifelse {/ct_Type0WidthProc} {/ct_Type0WMode1WidthProc} ifelse ct_BoldBaseFont/_setwidthProc 3 -1 roll load put {ct_MakeBoldFont}ct_Type0CopyFont definefont } { dup/_fauxfont known not 1 index/SubstMaster known not and { ct_BoldBaseFont/_setwidthProc /ct_BoldRomanWidthProc load put ct_MakeBoldFont } { 2 index 2 index eq {exch pop } { dup length dict begin ct_CopyFont currentdict end definefont } ifelse } ifelse } ifelse pop pop pop setglobal }bind def /?str1 256 string def /?set { $SubstituteFont begin /$substituteFound false def /$fontname 1 index def /$doSmartSub false def end dup findfont $SubstituteFont begin $substituteFound {false} { dup/FontName known { dup/FontName get $fontname eq 1 index/DistillerFauxFont known not and /currentdistillerparams where {pop false 2 index isWidthsOnlyFont not and} if } {false} ifelse } ifelse exch pop /$doSmartSub true def end { 5 1 roll pop pop pop pop findfont } { 1 index findfont dup/FontType get 3 eq { 6 1 roll pop pop pop pop pop false } {pop true} ifelse { $SubstituteFont begin pop pop /$styleArray 1 index def /$regOrdering 2 index def pop pop 0 1 $styleArray length 1 sub { $styleArray exch get ct_StyleDicts $regOrdering 2 copy known { get exch 2 copy known not {pop/Default} if get dup type/nametype eq { ?str1 cvs length dup 1 add exch ?str1 exch(-)putinterval exch dup length exch ?str1 exch 3 index exch putinterval add ?str1 exch 0 exch getinterval cvn } { pop pop/Unknown } ifelse } { pop pop pop pop/Unknown } ifelse } for end findfont }if } ifelse currentglobal false setglobal 3 1 roll null copyfont definefont pop setglobal }bind def setpacking userdict/$SubstituteFont 25 dict put 1 dict begin /SubstituteFont dup $error exch 2 copy known {get} {pop pop{pop/Courier}bind} ifelse def /currentdistillerparams where dup { pop pop currentdistillerparams/CannotEmbedFontPolicy 2 copy known {get/Error eq} {pop pop false} ifelse } if not { countdictstack array dictstack 0 get begin userdict begin $SubstituteFont begin /$str 128 string def /$fontpat 128 string def /$slen 0 def /$sname null def /$match false def /$fontname null def /$substituteFound false def /$inVMIndex null def /$doSmartSub true def /$depth 0 def /$fontname null def /$italicangle 26.5 def /$dstack null def /$Strategies 10 dict dup begin /$Type3Underprint { currentglobal exch false setglobal 11 dict begin /UseFont exch $WMode 0 ne { dup length dict copy dup/WMode $WMode put /UseFont exch definefont } if def /FontName $fontname dup type/stringtype eq{cvn}if def /FontType 3 def /FontMatrix[.001 0 0 .001 0 0]def /Encoding 256 array dup 0 1 255{/.notdef put dup}for pop def /FontBBox[0 0 0 0]def /CCInfo 7 dict dup begin /cc null def /x 0 def /y 0 def end def /BuildChar { exch begin CCInfo begin 1 string dup 0 3 index put exch pop /cc exch def UseFont 1000 scalefont setfont cc stringwidth/y exch def/x exch def x y setcharwidth $SubstituteFont/$Strategy get/$Underprint get exec 0 0 moveto cc show x y moveto end end }bind def currentdict end exch setglobal }bind def /$GetaTint 2 dict dup begin /$BuildFont { dup/WMode known {dup/WMode get} {0} ifelse /$WMode exch def $fontname exch dup/FontName known { dup/FontName get dup type/stringtype eq{cvn}if } {/unnamedfont} ifelse exch Adobe_CoolType_Data/InVMDeepCopiedFonts get 1 index/FontName get known { pop Adobe_CoolType_Data/InVMDeepCopiedFonts get 1 index get null copyfont } {$deepcopyfont} ifelse exch 1 index exch/FontBasedOn exch put dup/FontName $fontname dup type/stringtype eq{cvn}if put definefont Adobe_CoolType_Data/InVMDeepCopiedFonts get begin dup/FontBasedOn get 1 index def end }bind def /$Underprint { gsave x abs y abs gt {/y 1000 def} {/x -1000 def 500 120 translate} ifelse Level2? { [/Separation(All)/DeviceCMYK{0 0 0 1 pop}] setcolorspace } {0 setgray} ifelse 10 setlinewidth x .8 mul [7 3] { y mul 8 div 120 sub x 10 div exch moveto 0 y 4 div neg rlineto dup 0 rlineto 0 y 4 div rlineto closepath gsave Level2? {.2 setcolor} {.8 setgray} ifelse fill grestore stroke } forall pop grestore }bind def end def /$Oblique 1 dict dup begin /$BuildFont { currentglobal exch dup gcheck setglobal null copyfont begin /FontBasedOn currentdict/FontName known { FontName dup type/stringtype eq{cvn}if } {/unnamedfont} ifelse def /FontName $fontname dup type/stringtype eq{cvn}if def /currentdistillerparams where {pop} { /FontInfo currentdict/FontInfo known {FontInfo null copyfont} {2 dict} ifelse dup begin /ItalicAngle $italicangle def /FontMatrix FontMatrix [1 0 ItalicAngle dup sin exch cos div 1 0 0] matrix concatmatrix readonly end 4 2 roll def def } ifelse FontName currentdict end definefont exch setglobal }bind def end def /$None 1 dict dup begin /$BuildFont{}bind def end def end def /$Oblique SetSubstituteStrategy /$findfontByEnum { dup type/stringtype eq{cvn}if dup/$fontname exch def $sname null eq {$str cvs dup length $slen sub $slen getinterval} {pop $sname} ifelse $fontpat dup 0(fonts/*)putinterval exch 7 exch putinterval /$match false def $SubstituteFont/$dstack countdictstack array dictstack put mark { $fontpat 0 $slen 7 add getinterval {/$match exch def exit} $str filenameforall } stopped { cleardictstack currentdict true $SubstituteFont/$dstack get { exch { 1 index eq {pop false} {true} ifelse } {begin false} ifelse } forall pop } if cleartomark /$slen 0 def $match false ne {$match(fonts/)anchorsearch pop pop cvn} {/Courier} ifelse }bind def /$ROS 1 dict dup begin /Adobe 4 dict dup begin /Japan1 [/Ryumin-Light/HeiseiMin-W3 /GothicBBB-Medium/HeiseiKakuGo-W5 /HeiseiMaruGo-W4/Jun101-Light]def /Korea1 [/HYSMyeongJo-Medium/HYGoThic-Medium]def /GB1 [/STSong-Light/STHeiti-Regular]def /CNS1 [/MKai-Medium/MHei-Medium]def end def end def /$cmapname null def /$deepcopyfont { dup/FontType get 0 eq { 1 dict dup/FontName/copied put copyfont begin /FDepVector FDepVector copyarray 0 1 2 index length 1 sub { 2 copy get $deepcopyfont dup/FontName/copied put /copied exch definefont 3 copy put pop pop } for def currentdict end } {$Strategies/$Type3Underprint get exec} ifelse }bind def /$buildfontname { dup/CIDFont findresource/CIDSystemInfo get begin Registry length Ordering length Supplement 8 string cvs 3 copy length 2 add add add string dup 5 1 roll dup 0 Registry putinterval dup 4 index(-)putinterval dup 4 index 1 add Ordering putinterval 4 2 roll add 1 add 2 copy(-)putinterval end 1 add 2 copy 0 exch getinterval $cmapname $fontpat cvs exch anchorsearch {pop pop 3 2 roll putinterval cvn/$cmapname exch def} {pop pop pop pop pop} ifelse length $str 1 index(-)putinterval 1 add $str 1 index $cmapname $fontpat cvs putinterval $cmapname length add $str exch 0 exch getinterval cvn }bind def /$findfontByROS { /$fontname exch def $ROS Registry 2 copy known { get Ordering 2 copy known {get} {pop pop[]} ifelse } {pop pop[]} ifelse false exch { dup/CIDFont resourcestatus { pop pop save 1 index/CIDFont findresource dup/WidthsOnly known {dup/WidthsOnly get} {false} ifelse exch pop exch restore {pop} {exch pop true exit} ifelse } {pop} ifelse } forall {$str cvs $buildfontname} { false(*) { save exch dup/CIDFont findresource dup/WidthsOnly known {dup/WidthsOnly get not} {true} ifelse exch/CIDSystemInfo get dup/Registry get Registry eq exch/Ordering get Ordering eq and and {exch restore exch pop true exit} {pop restore} ifelse } $str/CIDFont resourceforall {$buildfontname} {$fontname $findfontByEnum} ifelse } ifelse }bind def end end currentdict/$error known currentdict/languagelevel known and dup {pop $error/SubstituteFont known} if dup {$error} {Adobe_CoolType_Core} ifelse begin { /SubstituteFont /CMap/Category resourcestatus { pop pop { $SubstituteFont begin /$substituteFound true def dup length $slen gt $sname null ne or $slen 0 gt and { $sname null eq {dup $str cvs dup length $slen sub $slen getinterval cvn} {$sname} ifelse Adobe_CoolType_Data/InVMFontsByCMap get 1 index 2 copy known { get false exch { pop currentglobal { GlobalFontDirectory 1 index known {exch pop true exit} {pop} ifelse } { FontDirectory 1 index known {exch pop true exit} { GlobalFontDirectory 1 index known {exch pop true exit} {pop} ifelse } ifelse } ifelse } forall } {pop pop false} ifelse { exch pop exch pop } { dup/CMap resourcestatus { pop pop dup/$cmapname exch def /CMap findresource/CIDSystemInfo get{def}forall $findfontByROS } { 128 string cvs dup(-)search { 3 1 roll search { 3 1 roll pop {dup cvi} stopped {pop pop pop pop pop $findfontByEnum} { 4 2 roll pop pop exch length exch 2 index length 2 index sub exch 1 sub -1 0 { $str cvs dup length 4 index 0 4 index 4 3 roll add getinterval exch 1 index exch 3 index exch putinterval dup/CMap resourcestatus { pop pop 4 1 roll pop pop pop dup/$cmapname exch def /CMap findresource/CIDSystemInfo get{def}forall $findfontByROS true exit } {pop} ifelse } for dup type/booleantype eq {pop} {pop pop pop $findfontByEnum} ifelse } ifelse } {pop pop pop $findfontByEnum} ifelse } {pop pop $findfontByEnum} ifelse } ifelse } ifelse } {//SubstituteFont exec} ifelse /$slen 0 def end } } { { $SubstituteFont begin /$substituteFound true def dup length $slen gt $sname null ne or $slen 0 gt and {$findfontByEnum} {//SubstituteFont exec} ifelse end } } ifelse bind readonly def Adobe_CoolType_Core/scfindfont/systemfindfont load put } { /scfindfont { $SubstituteFont begin dup systemfindfont dup/FontName known {dup/FontName get dup 3 index ne} {/noname true} ifelse dup { /$origfontnamefound 2 index def /$origfontname 4 index def/$substituteFound true def } if exch pop { $slen 0 gt $sname null ne 3 index length $slen gt or and { pop dup $findfontByEnum findfont dup maxlength 1 add dict begin {1 index/FID eq{pop pop}{def}ifelse} forall currentdict end definefont dup/FontName known{dup/FontName get}{null}ifelse $origfontnamefound ne { $origfontname $str cvs print ( substitution revised, using )print dup/FontName known {dup/FontName get}{(unspecified font)} ifelse $str cvs print(.\n)print } if } {exch pop} ifelse } {exch pop} ifelse end }bind def } ifelse end end Adobe_CoolType_Core_Defined not { Adobe_CoolType_Core/findfont { $SubstituteFont begin $depth 0 eq { /$fontname 1 index dup type/stringtype ne{$str cvs}if def /$substituteFound false def } if /$depth $depth 1 add def end scfindfont $SubstituteFont begin /$depth $depth 1 sub def $substituteFound $depth 0 eq and { $inVMIndex null ne {dup $inVMIndex $AddInVMFont} if $doSmartSub { currentdict/$Strategy known {$Strategy/$BuildFont get exec} if } if } if end }bind put } if } if end /$AddInVMFont { exch/FontName 2 copy known { get 1 dict dup begin exch 1 index gcheck def end exch Adobe_CoolType_Data/InVMFontsByCMap get exch $DictAdd } {pop pop pop} ifelse }bind def /$DictAdd { 2 copy known not {2 copy 4 index length dict put} if Level2? not { 2 copy get dup maxlength exch length 4 index length add lt 2 copy get dup length 4 index length add exch maxlength 1 index lt { 2 mul dict begin 2 copy get{forall}def 2 copy currentdict put end } {pop} ifelse } if get begin {def} forall end }bind def end end %%EndResource currentglobal true setglobal %%BeginResource: procset Adobe_CoolType_Utility_MAKEOCF 1.23 0 %%Copyright: Copyright 1987-2006 Adobe Systems Incorporated. %%Version: 1.23 0 systemdict/languagelevel known dup {currentglobal false setglobal} {false} ifelse exch userdict/Adobe_CoolType_Utility 2 copy known {2 copy get dup maxlength 27 add dict copy} {27 dict} ifelse put Adobe_CoolType_Utility begin /@eexecStartData def /@recognizeCIDFont null def /ct_Level2? exch def /ct_Clone? 1183615869 internaldict dup /CCRun known not exch/eCCRun known not ct_Level2? and or def ct_Level2? {globaldict begin currentglobal true setglobal} if /ct_AddStdCIDMap ct_Level2? {{ mark Adobe_CoolType_Utility/@recognizeCIDFont currentdict put { ((Hex)57 StartData 0615 1e27 2c39 1c60 d8a8 cc31 fe2b f6e0 7aa3 e541 e21c 60d8 a8c9 c3d0 6d9e 1c60 d8a8 c9c2 02d7 9a1c 60d8 a849 1c60 d8a8 cc36 74f4 1144 b13b 77)0()/SubFileDecode filter cvx exec } stopped { cleartomark Adobe_CoolType_Utility/@recognizeCIDFont get countdictstack dup array dictstack exch 1 sub -1 0 { 2 copy get 3 index eq {1 index length exch sub 1 sub{end}repeat exit} {pop} ifelse } for pop pop Adobe_CoolType_Utility/@eexecStartData get eexec } {cleartomark} ifelse }} {{ Adobe_CoolType_Utility/@eexecStartData get eexec }} ifelse bind def userdict/cid_extensions known dup{cid_extensions/cid_UpdateDB known and}if { cid_extensions begin /cid_GetCIDSystemInfo { 1 index type/stringtype eq {exch cvn exch} if cid_extensions begin dup load 2 index known { 2 copy cid_GetStatusInfo dup null ne { 1 index load 3 index get dup null eq {pop pop cid_UpdateDB} { exch 1 index/Created get eq {exch pop exch pop} {pop cid_UpdateDB} ifelse } ifelse } {pop cid_UpdateDB} ifelse } {cid_UpdateDB} ifelse end }bind def end } if ct_Level2? {end setglobal} if /ct_UseNativeCapability? systemdict/composefont known def /ct_MakeOCF 35 dict def /ct_Vars 25 dict def /ct_GlyphDirProcs 6 dict def /ct_BuildCharDict 15 dict dup begin /charcode 2 string def /dst_string 1500 string def /nullstring()def /usewidths? true def end def ct_Level2?{setglobal}{pop}ifelse ct_GlyphDirProcs begin /GetGlyphDirectory { systemdict/languagelevel known {pop/CIDFont findresource/GlyphDirectory get} { 1 index/CIDFont findresource/GlyphDirectory get dup type/dicttype eq { dup dup maxlength exch length sub 2 index lt { dup length 2 index add dict copy 2 index /CIDFont findresource/GlyphDirectory 2 index put } if } if exch pop exch pop } ifelse + }def /+ { systemdict/languagelevel known { currentglobal false setglobal 3 dict begin /vm exch def } {1 dict begin} ifelse /$ exch def systemdict/languagelevel known { vm setglobal /gvm currentglobal def $ gcheck setglobal } if ?{$ begin}if }def /?{$ type/dicttype eq}def /|{ userdict/Adobe_CoolType_Data known { Adobe_CoolType_Data/AddWidths? known { currentdict Adobe_CoolType_Data begin begin AddWidths? { Adobe_CoolType_Data/CC 3 index put ?{def}{$ 3 1 roll put}ifelse CC charcode exch 1 index 0 2 index 256 idiv put 1 index exch 1 exch 256 mod put stringwidth 2 array astore currentfont/Widths get exch CC exch put } {?{def}{$ 3 1 roll put}ifelse} ifelse end end } {?{def}{$ 3 1 roll put}ifelse} ifelse } {?{def}{$ 3 1 roll put}ifelse} ifelse }def /! { ?{end}if systemdict/languagelevel known {gvm setglobal} if end }def /:{string currentfile exch readstring pop}executeonly def end ct_MakeOCF begin /ct_cHexEncoding [/c00/c01/c02/c03/c04/c05/c06/c07/c08/c09/c0A/c0B/c0C/c0D/c0E/c0F/c10/c11/c12 /c13/c14/c15/c16/c17/c18/c19/c1A/c1B/c1C/c1D/c1E/c1F/c20/c21/c22/c23/c24/c25 /c26/c27/c28/c29/c2A/c2B/c2C/c2D/c2E/c2F/c30/c31/c32/c33/c34/c35/c36/c37/c38 /c39/c3A/c3B/c3C/c3D/c3E/c3F/c40/c41/c42/c43/c44/c45/c46/c47/c48/c49/c4A/c4B /c4C/c4D/c4E/c4F/c50/c51/c52/c53/c54/c55/c56/c57/c58/c59/c5A/c5B/c5C/c5D/c5E /c5F/c60/c61/c62/c63/c64/c65/c66/c67/c68/c69/c6A/c6B/c6C/c6D/c6E/c6F/c70/c71 /c72/c73/c74/c75/c76/c77/c78/c79/c7A/c7B/c7C/c7D/c7E/c7F/c80/c81/c82/c83/c84 /c85/c86/c87/c88/c89/c8A/c8B/c8C/c8D/c8E/c8F/c90/c91/c92/c93/c94/c95/c96/c97 /c98/c99/c9A/c9B/c9C/c9D/c9E/c9F/cA0/cA1/cA2/cA3/cA4/cA5/cA6/cA7/cA8/cA9/cAA /cAB/cAC/cAD/cAE/cAF/cB0/cB1/cB2/cB3/cB4/cB5/cB6/cB7/cB8/cB9/cBA/cBB/cBC/cBD /cBE/cBF/cC0/cC1/cC2/cC3/cC4/cC5/cC6/cC7/cC8/cC9/cCA/cCB/cCC/cCD/cCE/cCF/cD0 /cD1/cD2/cD3/cD4/cD5/cD6/cD7/cD8/cD9/cDA/cDB/cDC/cDD/cDE/cDF/cE0/cE1/cE2/cE3 /cE4/cE5/cE6/cE7/cE8/cE9/cEA/cEB/cEC/cED/cEE/cEF/cF0/cF1/cF2/cF3/cF4/cF5/cF6 /cF7/cF8/cF9/cFA/cFB/cFC/cFD/cFE/cFF]def /ct_CID_STR_SIZE 8000 def /ct_mkocfStr100 100 string def /ct_defaultFontMtx[.001 0 0 .001 0 0]def /ct_1000Mtx[1000 0 0 1000 0 0]def /ct_raise{exch cvx exch errordict exch get exec stop}bind def /ct_reraise {cvx $error/errorname get(Error: )print dup( )cvs print errordict exch get exec stop }bind def /ct_cvnsi { 1 index add 1 sub 1 exch 0 4 1 roll { 2 index exch get exch 8 bitshift add } for exch pop }bind def /ct_GetInterval { Adobe_CoolType_Utility/ct_BuildCharDict get begin /dst_index 0 def dup dst_string length gt {dup string/dst_string exch def} if 1 index ct_CID_STR_SIZE idiv /arrayIndex exch def 2 index arrayIndex get 2 index arrayIndex ct_CID_STR_SIZE mul sub { dup 3 index add 2 index length le { 2 index getinterval dst_string dst_index 2 index putinterval length dst_index add/dst_index exch def exit } { 1 index length 1 index sub dup 4 1 roll getinterval dst_string dst_index 2 index putinterval pop dup dst_index add/dst_index exch def sub /arrayIndex arrayIndex 1 add def 2 index dup length arrayIndex gt {arrayIndex get} { pop exit } ifelse 0 } ifelse } loop pop pop pop dst_string 0 dst_index getinterval end }bind def ct_Level2? { /ct_resourcestatus currentglobal mark true setglobal {/unknowninstancename/Category resourcestatus} stopped {cleartomark setglobal true} {cleartomark currentglobal not exch setglobal} ifelse { { mark 3 1 roll/Category findresource begin ct_Vars/vm currentglobal put ({ResourceStatus}stopped)0()/SubFileDecode filter cvx exec {cleartomark false} {{3 2 roll pop true}{cleartomark false}ifelse} ifelse ct_Vars/vm get setglobal end } } {{resourcestatus}} ifelse bind def /CIDFont/Category ct_resourcestatus {pop pop} { currentglobal true setglobal /Generic/Category findresource dup length dict copy dup/InstanceType/dicttype put /CIDFont exch/Category defineresource pop setglobal } ifelse ct_UseNativeCapability? { /CIDInit/ProcSet findresource begin 12 dict begin begincmap /CIDSystemInfo 3 dict dup begin /Registry(Adobe)def /Ordering(Identity)def /Supplement 0 def end def /CMapName/Identity-H def /CMapVersion 1.000 def /CMapType 1 def 1 begincodespacerange <0000> endcodespacerange 1 begincidrange <0000>0 endcidrange endcmap CMapName currentdict/CMap defineresource pop end end } if } { /ct_Category 2 dict begin /CIDFont 10 dict def /ProcSet 2 dict def currentdict end def /defineresource { ct_Category 1 index 2 copy known { get dup dup maxlength exch length eq { dup length 10 add dict copy ct_Category 2 index 2 index put } if 3 index 3 index put pop exch pop } {pop pop/defineresource/undefined ct_raise} ifelse }bind def /findresource { ct_Category 1 index 2 copy known { get 2 index 2 copy known {get 3 1 roll pop pop} {pop pop/findresource/undefinedresource ct_raise} ifelse } {pop pop/findresource/undefined ct_raise} ifelse }bind def /resourcestatus { ct_Category 1 index 2 copy known { get 2 index known exch pop exch pop { 0 -1 true } { false } ifelse } {pop pop/findresource/undefined ct_raise} ifelse }bind def /ct_resourcestatus/resourcestatus load def } ifelse /ct_CIDInit 2 dict begin /ct_cidfont_stream_init { { dup(Binary)eq { pop null currentfile ct_Level2? { {cid_BYTE_COUNT()/SubFileDecode filter} stopped {pop pop pop} if } if /readstring load exit } if dup(Hex)eq { pop currentfile ct_Level2? { {null exch/ASCIIHexDecode filter/readstring} stopped {pop exch pop(>)exch/readhexstring} if } {(>)exch/readhexstring} ifelse load exit } if /StartData/typecheck ct_raise } loop cid_BYTE_COUNT ct_CID_STR_SIZE le { 2 copy cid_BYTE_COUNT string exch exec pop 1 array dup 3 -1 roll 0 exch put } { cid_BYTE_COUNT ct_CID_STR_SIZE div ceiling cvi dup array exch 2 sub 0 exch 1 exch { 2 copy 5 index ct_CID_STR_SIZE string 6 index exec pop put pop } for 2 index cid_BYTE_COUNT ct_CID_STR_SIZE mod string 3 index exec pop 1 index exch 1 index length 1 sub exch put } ifelse cid_CIDFONT exch/GlyphData exch put 2 index null eq { pop pop pop } { pop/readstring load 1 string exch { 3 copy exec pop dup length 0 eq { pop pop pop pop pop true exit } if 4 index eq { pop pop pop pop false exit } if } loop pop } ifelse }bind def /StartData { mark { currentdict dup/FDArray get 0 get/FontMatrix get 0 get 0.001 eq { dup/CDevProc known not { /CDevProc 1183615869 internaldict/stdCDevProc 2 copy known {get} { pop pop {pop pop pop pop pop 0 -1000 7 index 2 div 880} } ifelse def } if } { /CDevProc { pop pop pop pop pop 0 1 cid_temp/cid_CIDFONT get /FDArray get 0 get /FontMatrix get 0 get div 7 index 2 div 1 index 0.88 mul }def } ifelse /cid_temp 15 dict def cid_temp begin /cid_CIDFONT exch def 3 copy pop dup/cid_BYTE_COUNT exch def 0 gt { ct_cidfont_stream_init FDArray { /Private get dup/SubrMapOffset known { begin /Subrs SubrCount array def Subrs SubrMapOffset SubrCount SDBytes ct_Level2? { currentdict dup/SubrMapOffset undef dup/SubrCount undef /SDBytes undef } if end /cid_SD_BYTES exch def /cid_SUBR_COUNT exch def /cid_SUBR_MAP_OFFSET exch def /cid_SUBRS exch def cid_SUBR_COUNT 0 gt { GlyphData cid_SUBR_MAP_OFFSET cid_SD_BYTES ct_GetInterval 0 cid_SD_BYTES ct_cvnsi 0 1 cid_SUBR_COUNT 1 sub { exch 1 index 1 add cid_SD_BYTES mul cid_SUBR_MAP_OFFSET add GlyphData exch cid_SD_BYTES ct_GetInterval 0 cid_SD_BYTES ct_cvnsi cid_SUBRS 4 2 roll GlyphData exch 4 index 1 index sub ct_GetInterval dup length string copy put } for pop } if } {pop} ifelse } forall } if cleartomark pop pop end CIDFontName currentdict/CIDFont defineresource pop end end } stopped {cleartomark/StartData ct_reraise} if }bind def currentdict end def /ct_saveCIDInit { /CIDInit/ProcSet ct_resourcestatus {true} {/CIDInitC/ProcSet ct_resourcestatus} ifelse { pop pop /CIDInit/ProcSet findresource ct_UseNativeCapability? {pop null} {/CIDInit ct_CIDInit/ProcSet defineresource pop} ifelse } {/CIDInit ct_CIDInit/ProcSet defineresource pop null} ifelse ct_Vars exch/ct_oldCIDInit exch put }bind def /ct_restoreCIDInit { ct_Vars/ct_oldCIDInit get dup null ne {/CIDInit exch/ProcSet defineresource pop} {pop} ifelse }bind def /ct_BuildCharSetUp { 1 index begin CIDFont begin Adobe_CoolType_Utility/ct_BuildCharDict get begin /ct_dfCharCode exch def /ct_dfDict exch def CIDFirstByte ct_dfCharCode add dup CIDCount ge {pop 0} if /cid exch def { GlyphDirectory cid 2 copy known {get} {pop pop nullstring} ifelse dup length FDBytes sub 0 gt { dup FDBytes 0 ne {0 FDBytes ct_cvnsi} {pop 0} ifelse /fdIndex exch def dup length FDBytes sub FDBytes exch getinterval /charstring exch def exit } { pop cid 0 eq {/charstring nullstring def exit} if /cid 0 def } ifelse } loop }def /ct_SetCacheDevice { 0 0 moveto dup stringwidth 3 -1 roll true charpath pathbbox 0 -1000 7 index 2 div 880 setcachedevice2 0 0 moveto }def /ct_CloneSetCacheProc { 1 eq { stringwidth pop -2 div -880 0 -1000 setcharwidth moveto } { usewidths? { currentfont/Widths get cid 2 copy known {get exch pop aload pop} {pop pop stringwidth} ifelse } {stringwidth} ifelse setcharwidth 0 0 moveto } ifelse }def /ct_Type3ShowCharString { ct_FDDict fdIndex 2 copy known {get} { currentglobal 3 1 roll 1 index gcheck setglobal ct_Type1FontTemplate dup maxlength dict copy begin FDArray fdIndex get dup/FontMatrix 2 copy known {get} {pop pop ct_defaultFontMtx} ifelse /FontMatrix exch dup length array copy def /Private get /Private exch def /Widths rootfont/Widths get def /CharStrings 1 dict dup/.notdef dup length string copy put def currentdict end /ct_Type1Font exch definefont dup 5 1 roll put setglobal } ifelse dup/CharStrings get 1 index/Encoding get ct_dfCharCode get charstring put rootfont/WMode 2 copy known {get} {pop pop 0} ifelse exch 1000 scalefont setfont ct_str1 0 ct_dfCharCode put ct_str1 exch ct_dfSetCacheProc ct_SyntheticBold { currentpoint ct_str1 show newpath moveto ct_str1 true charpath ct_StrokeWidth setlinewidth stroke } {ct_str1 show} ifelse }def /ct_Type4ShowCharString { ct_dfDict ct_dfCharCode charstring FDArray fdIndex get dup/FontMatrix get dup ct_defaultFontMtx ct_matrixeq not {ct_1000Mtx matrix concatmatrix concat} {pop} ifelse /Private get Adobe_CoolType_Utility/ct_Level2? get not { ct_dfDict/Private 3 -1 roll {put} 1183615869 internaldict/superexec get exec } if 1183615869 internaldict Adobe_CoolType_Utility/ct_Level2? get {1 index} {3 index/Private get mark 6 1 roll} ifelse dup/RunInt known {/RunInt get} {pop/CCRun} ifelse get exec Adobe_CoolType_Utility/ct_Level2? get not {cleartomark} if }bind def /ct_BuildCharIncremental { { Adobe_CoolType_Utility/ct_MakeOCF get begin ct_BuildCharSetUp ct_ShowCharString } stopped {stop} if end end end end }bind def /BaseFontNameStr(BF00)def /ct_Type1FontTemplate 14 dict begin /FontType 1 def /FontMatrix [0.001 0 0 0.001 0 0]def /FontBBox [-250 -250 1250 1250]def /Encoding ct_cHexEncoding def /PaintType 0 def currentdict end def /BaseFontTemplate 11 dict begin /FontMatrix [0.001 0 0 0.001 0 0]def /FontBBox [-250 -250 1250 1250]def /Encoding ct_cHexEncoding def /BuildChar/ct_BuildCharIncremental load def ct_Clone? { /FontType 3 def /ct_ShowCharString/ct_Type3ShowCharString load def /ct_dfSetCacheProc/ct_CloneSetCacheProc load def /ct_SyntheticBold false def /ct_StrokeWidth 1 def } { /FontType 4 def /Private 1 dict dup/lenIV 4 put def /CharStrings 1 dict dup/.notdefput def /PaintType 0 def /ct_ShowCharString/ct_Type4ShowCharString load def } ifelse /ct_str1 1 string def currentdict end def /BaseFontDictSize BaseFontTemplate length 5 add def /ct_matrixeq { true 0 1 5 { dup 4 index exch get exch 3 index exch get eq and dup not {exit} if } for exch pop exch pop }bind def /ct_makeocf { 15 dict begin exch/WMode exch def exch/FontName exch def /FontType 0 def /FMapType 2 def dup/FontMatrix known {dup/FontMatrix get/FontMatrix exch def} {/FontMatrix matrix def} ifelse /bfCount 1 index/CIDCount get 256 idiv 1 add dup 256 gt{pop 256}if def /Encoding 256 array 0 1 bfCount 1 sub{2 copy dup put pop}for bfCount 1 255{2 copy bfCount put pop}for def /FDepVector bfCount dup 256 lt{1 add}if array def BaseFontTemplate BaseFontDictSize dict copy begin /CIDFont exch def CIDFont/FontBBox known {CIDFont/FontBBox get/FontBBox exch def} if CIDFont/CDevProc known {CIDFont/CDevProc get/CDevProc exch def} if currentdict end BaseFontNameStr 3(0)putinterval 0 1 bfCount dup 256 eq{1 sub}if { FDepVector exch 2 index BaseFontDictSize dict copy begin dup/CIDFirstByte exch 256 mul def FontType 3 eq {/ct_FDDict 2 dict def} if currentdict end 1 index 16 BaseFontNameStr 2 2 getinterval cvrs pop BaseFontNameStr exch definefont put } for ct_Clone? {/Widths 1 index/CIDFont get/GlyphDirectory get length dict def} if FontName currentdict end definefont ct_Clone? { gsave dup 1000 scalefont setfont ct_BuildCharDict begin /usewidths? false def currentfont/Widths get begin exch/CIDFont get/GlyphDirectory get { pop dup charcode exch 1 index 0 2 index 256 idiv put 1 index exch 1 exch 256 mod put stringwidth 2 array astore def } forall end /usewidths? true def end grestore } {exch pop} ifelse }bind def currentglobal true setglobal /ct_ComposeFont { ct_UseNativeCapability? { 2 index/CMap ct_resourcestatus {pop pop exch pop} { /CIDInit/ProcSet findresource begin 12 dict begin begincmap /CMapName 3 index def /CMapVersion 1.000 def /CMapType 1 def exch/WMode exch def /CIDSystemInfo 3 dict dup begin /Registry(Adobe)def /Ordering CMapName ct_mkocfStr100 cvs (Adobe-)search { pop pop (-)search { dup length string copy exch pop exch pop } {pop(Identity)} ifelse } {pop (Identity)} ifelse def /Supplement 0 def end def 1 begincodespacerange <0000> endcodespacerange 1 begincidrange <0000>0 endcidrange endcmap CMapName currentdict/CMap defineresource pop end end } ifelse composefont } { 3 2 roll pop 0 get/CIDFont findresource ct_makeocf } ifelse }bind def setglobal /ct_MakeIdentity { ct_UseNativeCapability? { 1 index/CMap ct_resourcestatus {pop pop} { /CIDInit/ProcSet findresource begin 12 dict begin begincmap /CMapName 2 index def /CMapVersion 1.000 def /CMapType 1 def /CIDSystemInfo 3 dict dup begin /Registry(Adobe)def /Ordering CMapName ct_mkocfStr100 cvs (Adobe-)search { pop pop (-)search {dup length string copy exch pop exch pop} {pop(Identity)} ifelse } {pop(Identity)} ifelse def /Supplement 0 def end def 1 begincodespacerange <0000> endcodespacerange 1 begincidrange <0000>0 endcidrange endcmap CMapName currentdict/CMap defineresource pop end end } ifelse composefont } { exch pop 0 get/CIDFont findresource ct_makeocf } ifelse }bind def currentdict readonly pop end end %%EndResource setglobal %%BeginResource: procset Adobe_CoolType_Utility_T42 1.0 0 %%Copyright: Copyright 1987-2004 Adobe Systems Incorporated. %%Version: 1.0 0 userdict/ct_T42Dict 15 dict put ct_T42Dict begin /Is2015? { version cvi 2015 ge }bind def /AllocGlyphStorage { Is2015? { pop } { {string}forall }ifelse }bind def /Type42DictBegin { 25 dict begin /FontName exch def /CharStrings 256 dict begin /.notdef 0 def currentdict end def /Encoding exch def /PaintType 0 def /FontType 42 def /FontMatrix[1 0 0 1 0 0]def 4 array astore cvx/FontBBox exch def /sfnts }bind def /Type42DictEnd { currentdict dup/FontName get exch definefont end ct_T42Dict exch dup/FontName get exch put }bind def /RD{string currentfile exch readstring pop}executeonly def /PrepFor2015 { Is2015? { /GlyphDirectory 16 dict def sfnts 0 get dup 2 index (glyx) putinterval 2 index (locx) putinterval pop pop } { pop pop }ifelse }bind def /AddT42Char { Is2015? { /GlyphDirectory get begin def end pop pop } { /sfnts get 4 index get 3 index 2 index putinterval pop pop pop pop }ifelse }bind def /T0AddT42Mtx2 { /CIDFont findresource/Metrics2 get begin def end }bind def end %%EndResource currentglobal true setglobal %%BeginFile: MMFauxFont.prc %%Copyright: Copyright 1987-2001 Adobe Systems Incorporated. %%All Rights Reserved. userdict /ct_EuroDict 10 dict put ct_EuroDict begin /ct_CopyFont { { 1 index /FID ne {def} {pop pop} ifelse} forall } def /ct_GetGlyphOutline { gsave initmatrix newpath exch findfont dup length 1 add dict begin ct_CopyFont /Encoding Encoding dup length array copy dup 4 -1 roll 0 exch put def currentdict end /ct_EuroFont exch definefont 1000 scalefont setfont 0 0 moveto [ <00> stringwidth <00> false charpath pathbbox [ {/m cvx} {/l cvx} {/c cvx} {/cp cvx} pathforall grestore counttomark 8 add } def /ct_MakeGlyphProc { ] cvx /ct_PSBuildGlyph cvx ] cvx } def /ct_PSBuildGlyph { gsave 8 -1 roll pop 7 1 roll 6 -2 roll ct_FontMatrix transform 6 2 roll 4 -2 roll ct_FontMatrix transform 4 2 roll ct_FontMatrix transform currentdict /PaintType 2 copy known {get 2 eq}{pop pop false} ifelse dup 9 1 roll { currentdict /StrokeWidth 2 copy known { get 2 div 0 ct_FontMatrix dtransform pop 5 1 roll 4 -1 roll 4 index sub 4 1 roll 3 -1 roll 4 index sub 3 1 roll exch 4 index add exch 4 index add 5 -1 roll pop } { pop pop } ifelse } if setcachedevice ct_FontMatrix concat ct_PSPathOps begin exec end { currentdict /StrokeWidth 2 copy known { get } { pop pop 0 } ifelse setlinewidth stroke } { fill } ifelse grestore } def /ct_PSPathOps 4 dict dup begin /m {moveto} def /l {lineto} def /c {curveto} def /cp {closepath} def end def /ct_matrix1000 [1000 0 0 1000 0 0] def /ct_AddGlyphProc { 2 index findfont dup length 4 add dict begin ct_CopyFont /CharStrings CharStrings dup length 1 add dict copy begin 3 1 roll def currentdict end def /ct_FontMatrix ct_matrix1000 FontMatrix matrix concatmatrix def /ct_PSBuildGlyph /ct_PSBuildGlyph load def /ct_PSPathOps /ct_PSPathOps load def currentdict end definefont pop } def systemdict /languagelevel known { /ct_AddGlyphToPrinterFont { 2 copy ct_GetGlyphOutline 3 add -1 roll restore ct_MakeGlyphProc ct_AddGlyphProc } def } { /ct_AddGlyphToPrinterFont { pop pop restore Adobe_CTFauxDict /$$$FONTNAME get /Euro Adobe_CTFauxDict /$$$SUBSTITUTEBASE get ct_EuroDict exch get ct_AddGlyphProc } def } ifelse /AdobeSansMM { 556 0 24 -19 541 703 { 541 628 m 510 669 442 703 354 703 c 201 703 117 607 101 444 c 50 444 l 25 372 l 97 372 l 97 301 l 49 301 l 24 229 l 103 229 l 124 67 209 -19 350 -19 c 435 -19 501 25 509 32 c 509 131 l 492 105 417 60 343 60 c 267 60 204 127 197 229 c 406 229 l 430 301 l 191 301 l 191 372 l 455 372 l 479 444 l 194 444 l 201 531 245 624 348 624 c 433 624 484 583 509 534 c cp 556 0 m } ct_PSBuildGlyph } def /AdobeSerifMM { 500 0 10 -12 484 692 { 347 298 m 171 298 l 170 310 170 322 170 335 c 170 362 l 362 362 l 374 403 l 172 403 l 184 580 244 642 308 642 c 380 642 434 574 457 457 c 481 462 l 474 691 l 449 691 l 433 670 429 657 410 657 c 394 657 360 692 299 692 c 204 692 94 604 73 403 c 22 403 l 10 362 l 70 362 l 69 352 69 341 69 330 c 69 319 69 308 70 298 c 22 298 l 10 257 l 73 257 l 97 57 216 -12 295 -12 c 364 -12 427 25 484 123 c 458 142 l 425 101 384 37 316 37 c 256 37 189 84 173 257 c 335 257 l cp 500 0 m } ct_PSBuildGlyph } def end %%EndFile setglobal Adobe_CoolType_Core begin /$Oblique SetSubstituteStrategy end %%BeginResource: procset Adobe_AGM_Image 1.0 0 +%%Version: 1.0 0 +%%Copyright: Copyright(C)2000-2006 Adobe Systems, Inc. All Rights Reserved. +systemdict/setpacking known +{ + currentpacking + true setpacking +}if +userdict/Adobe_AGM_Image 71 dict dup begin put +/Adobe_AGM_Image_Id/Adobe_AGM_Image_1.0_0 def +/nd{ + null def +}bind def +/AGMIMG_&image nd +/AGMIMG_&colorimage nd +/AGMIMG_&imagemask nd +/AGMIMG_mbuf()def +/AGMIMG_ybuf()def +/AGMIMG_kbuf()def +/AGMIMG_c 0 def +/AGMIMG_m 0 def +/AGMIMG_y 0 def +/AGMIMG_k 0 def +/AGMIMG_tmp nd +/AGMIMG_imagestring0 nd +/AGMIMG_imagestring1 nd +/AGMIMG_imagestring2 nd +/AGMIMG_imagestring3 nd +/AGMIMG_imagestring4 nd +/AGMIMG_imagestring5 nd +/AGMIMG_cnt nd +/AGMIMG_fsave nd +/AGMIMG_colorAry nd +/AGMIMG_override nd +/AGMIMG_name nd +/AGMIMG_maskSource nd +/AGMIMG_flushfilters nd +/invert_image_samples nd +/knockout_image_samples nd +/img nd +/sepimg nd +/devnimg nd +/idximg nd +/ds +{ + Adobe_AGM_Core begin + Adobe_AGM_Image begin + /AGMIMG_&image systemdict/image get def + /AGMIMG_&imagemask systemdict/imagemask get def + /colorimage where{ + pop + /AGMIMG_&colorimage/colorimage ldf + }if + end + end +}def +/ps +{ + Adobe_AGM_Image begin + /AGMIMG_ccimage_exists{/customcolorimage where + { + pop + /Adobe_AGM_OnHost_Seps where + { + pop false + }{ + /Adobe_AGM_InRip_Seps where + { + pop false + }{ + true + }ifelse + }ifelse + }{ + false + }ifelse + }bdf + level2{ + /invert_image_samples + { + Adobe_AGM_Image/AGMIMG_tmp Decode length ddf + /Decode[Decode 1 get Decode 0 get]def + }def + /knockout_image_samples + { + Operator/imagemask ne{ + /Decode[1 1]def + }if + }def + }{ + /invert_image_samples + { + {1 exch sub}currenttransfer addprocs settransfer + }def + /knockout_image_samples + { + {pop 1}currenttransfer addprocs settransfer + }def + }ifelse + /img/imageormask ldf + /sepimg/sep_imageormask ldf + /devnimg/devn_imageormask ldf + /idximg/indexed_imageormask ldf + /_ctype 7 def + currentdict{ + dup xcheck 1 index type dup/arraytype eq exch/packedarraytype eq or and{ + bind + }if + def + }forall +}def +/pt +{ + end +}def +/dt +{ +}def +/AGMIMG_flushfilters +{ + dup type/arraytype ne + {1 array astore}if + dup 0 get currentfile ne + {dup 0 get flushfile}if + { + dup type/filetype eq + { + dup status 1 index currentfile ne and + {closefile} + {pop} + ifelse + }{pop}ifelse + }forall +}def +/AGMIMG_init_common +{ + currentdict/T known{/ImageType/T ldf currentdict/T undef}if + currentdict/W known{/Width/W ldf currentdict/W undef}if + currentdict/H known{/Height/H ldf currentdict/H undef}if + currentdict/M known{/ImageMatrix/M ldf currentdict/M undef}if + currentdict/BC known{/BitsPerComponent/BC ldf currentdict/BC undef}if + currentdict/D known{/Decode/D ldf currentdict/D undef}if + currentdict/DS known{/DataSource/DS ldf currentdict/DS undef}if + currentdict/O known{ + /Operator/O load 1 eq{ + /imagemask + }{ + /O load 2 eq{ + /image + }{ + /colorimage + }ifelse + }ifelse + def + currentdict/O undef + }if + currentdict/HSCI known{/HostSepColorImage/HSCI ldf currentdict/HSCI undef}if + currentdict/MD known{/MultipleDataSources/MD ldf currentdict/MD undef}if + currentdict/I known{/Interpolate/I ldf currentdict/I undef}if + currentdict/SI known{/SkipImageProc/SI ldf currentdict/SI undef}if + /DataSource load xcheck not{ + DataSource type/arraytype eq{ + DataSource 0 get type/filetype eq{ + /_Filters DataSource def + currentdict/MultipleDataSources known not{ + /DataSource DataSource dup length 1 sub get def + }if + }if + }if + currentdict/MultipleDataSources known not{ + /MultipleDataSources DataSource type/arraytype eq{ + DataSource length 1 gt + } + {false}ifelse def + }if + }if + /NComponents Decode length 2 div def + currentdict/SkipImageProc known not{/SkipImageProc{false}def}if +}bdf +/imageormask_sys +{ + begin + AGMIMG_init_common + save mark + level2{ + currentdict + Operator/imagemask eq{ + AGMIMG_&imagemask + }{ + use_mask{ + process_mask AGMIMG_&image + }{ + AGMIMG_&image + }ifelse + }ifelse + }{ + Width Height + Operator/imagemask eq{ + Decode 0 get 1 eq Decode 1 get 0 eq and + ImageMatrix/DataSource load + AGMIMG_&imagemask + }{ + BitsPerComponent ImageMatrix/DataSource load + AGMIMG_&image + }ifelse + }ifelse + currentdict/_Filters known{_Filters AGMIMG_flushfilters}if + cleartomark restore + end +}def +/overprint_plate +{ + currentoverprint{ + 0 get dup type/nametype eq{ + dup/DeviceGray eq{ + pop AGMCORE_black_plate not + }{ + /DeviceCMYK eq{ + AGMCORE_is_cmyk_sep not + }if + }ifelse + }{ + false exch + { + AGMOHS_sepink eq or + }forall + not + }ifelse + }{ + pop false + }ifelse +}def +/process_mask +{ + level3{ + dup begin + /ImageType 1 def + end + 4 dict begin + /DataDict exch def + /ImageType 3 def + /InterleaveType 3 def + /MaskDict 9 dict begin + /ImageType 1 def + /Width DataDict dup/MaskWidth known{/MaskWidth}{/Width}ifelse get def + /Height DataDict dup/MaskHeight known{/MaskHeight}{/Height}ifelse get def + /ImageMatrix[Width 0 0 Height neg 0 Height]def + /NComponents 1 def + /BitsPerComponent 1 def + /Decode DataDict dup/MaskD known{/MaskD}{[1 0]}ifelse get def + /DataSource Adobe_AGM_Core/AGMIMG_maskSource get def + currentdict end def + currentdict end + }if +}def +/use_mask +{ + dup/Mask known {dup/Mask get}{false}ifelse +}def +/imageormask +{ + begin + AGMIMG_init_common + SkipImageProc{ + currentdict consumeimagedata + } + { + save mark + level2 AGMCORE_host_sep not and{ + currentdict + Operator/imagemask eq DeviceN_PS2 not and{ + imagemask + }{ + AGMCORE_in_rip_sep currentoverprint and currentcolorspace 0 get/DeviceGray eq and{ + [/Separation/Black/DeviceGray{}]setcolorspace + /Decode[Decode 1 get Decode 0 get]def + }if + use_mask{ + process_mask image + }{ + DeviceN_NoneName DeviceN_PS2 Indexed_DeviceN level3 not and or or AGMCORE_in_rip_sep and + { + Names convert_to_process not{ + 2 dict begin + /imageDict xdf + /names_index 0 def + gsave + imageDict write_image_file{ + Names{ + dup(None)ne{ + [/Separation 3 -1 roll/DeviceGray{1 exch sub}]setcolorspace + Operator imageDict read_image_file + names_index 0 eq{true setoverprint}if + /names_index names_index 1 add def + }{ + pop + }ifelse + }forall + close_image_file + }if + grestore + end + }{ + Operator/imagemask eq{ + imagemask + }{ + image + }ifelse + }ifelse + }{ + Operator/imagemask eq{ + imagemask + }{ + image + }ifelse + }ifelse + }ifelse + }ifelse + }{ + Width Height + Operator/imagemask eq{ + Decode 0 get 1 eq Decode 1 get 0 eq and + ImageMatrix/DataSource load + /Adobe_AGM_OnHost_Seps where{ + pop imagemask + }{ + currentgray 1 ne{ + currentdict imageormask_sys + }{ + currentoverprint not{ + 1 AGMCORE_&setgray + currentdict imageormask_sys + }{ + currentdict ignoreimagedata + }ifelse + }ifelse + }ifelse + }{ + BitsPerComponent ImageMatrix + MultipleDataSources{ + 0 1 NComponents 1 sub{ + DataSource exch get + }for + }{ + /DataSource load + }ifelse + Operator/colorimage eq{ + AGMCORE_host_sep{ + MultipleDataSources level2 or NComponents 4 eq and{ + AGMCORE_is_cmyk_sep{ + MultipleDataSources{ + /DataSource DataSource 0 get xcheck + { + [ + DataSource 0 get/exec cvx + DataSource 1 get/exec cvx + DataSource 2 get/exec cvx + DataSource 3 get/exec cvx + /AGMCORE_get_ink_data cvx + ]cvx + }{ + DataSource aload pop AGMCORE_get_ink_data + }ifelse def + }{ + /DataSource + Width BitsPerComponent mul 7 add 8 idiv Height mul 4 mul + /DataSource load + filter_cmyk 0()/SubFileDecode filter def + }ifelse + /Decode[Decode 0 get Decode 1 get]def + /MultipleDataSources false def + /NComponents 1 def + /Operator/image def + invert_image_samples + 1 AGMCORE_&setgray + currentdict imageormask_sys + }{ + currentoverprint not Operator/imagemask eq and{ + 1 AGMCORE_&setgray + currentdict imageormask_sys + }{ + currentdict ignoreimagedata + }ifelse + }ifelse + }{ + MultipleDataSources NComponents AGMIMG_&colorimage + }ifelse + }{ + true NComponents colorimage + }ifelse + }{ + Operator/image eq{ + AGMCORE_host_sep{ + /DoImage true def + currentdict/HostSepColorImage known{HostSepColorImage not}{false}ifelse + { + AGMCORE_black_plate not Operator/imagemask ne and{ + /DoImage false def + currentdict ignoreimagedata + }if + }if + 1 AGMCORE_&setgray + DoImage + {currentdict imageormask_sys}if + }{ + use_mask{ + process_mask image + }{ + image + }ifelse + }ifelse + }{ + Operator/knockout eq{ + pop pop pop pop pop + currentcolorspace overprint_plate not{ + knockout_unitsq + }if + }if + }ifelse + }ifelse + }ifelse + }ifelse + cleartomark restore + }ifelse + currentdict/_Filters known{_Filters AGMIMG_flushfilters}if + end +}def +/sep_imageormask +{ + /sep_colorspace_dict AGMCORE_gget begin + CSA map_csa + begin + AGMIMG_init_common + SkipImageProc{ + currentdict consumeimagedata + }{ + save mark + AGMCORE_avoid_L2_sep_space{ + /Decode[Decode 0 get 255 mul Decode 1 get 255 mul]def + }if + AGMIMG_ccimage_exists + MappedCSA 0 get/DeviceCMYK eq and + currentdict/Components known and + Name()ne and + Name(All)ne and + Operator/image eq and + AGMCORE_producing_seps not and + level2 not and + { + Width Height BitsPerComponent ImageMatrix + [ + /DataSource load/exec cvx + { + 0 1 2 index length 1 sub{ + 1 index exch + 2 copy get 255 xor put + }for + }/exec cvx + ]cvx bind + MappedCSA 0 get/DeviceCMYK eq{ + Components aload pop + }{ + 0 0 0 Components aload pop 1 exch sub + }ifelse + Name findcmykcustomcolor + customcolorimage + }{ + AGMCORE_producing_seps not{ + level2{ + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 2 ne AGMCORE_avoid_L2_sep_space not and currentcolorspace 0 get/Separation ne and{ + [/Separation Name MappedCSA sep_proc_name exch dup 0 get 15 string cvs(/Device)anchorsearch{pop pop 0 get}{pop}ifelse exch load]setcolorspace_opt + /sep_tint AGMCORE_gget setcolor + }if + currentdict imageormask + }{ + currentdict + Operator/imagemask eq{ + imageormask + }{ + sep_imageormask_lev1 + }ifelse + }ifelse + }{ + AGMCORE_host_sep{ + Operator/knockout eq{ + currentdict/ImageMatrix get concat + knockout_unitsq + }{ + currentgray 1 ne{ + AGMCORE_is_cmyk_sep Name(All)ne and{ + level2{ + Name AGMCORE_IsSeparationAProcessColor + { + Operator/imagemask eq{ + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 2 ne{ + /sep_tint AGMCORE_gget 1 exch sub AGMCORE_&setcolor + }if + }{ + invert_image_samples + }ifelse + }{ + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 2 ne{ + [/Separation Name[/DeviceGray] + { + sep_colorspace_proc AGMCORE_get_ink_data + 1 exch sub + }bind + ]AGMCORE_&setcolorspace + /sep_tint AGMCORE_gget AGMCORE_&setcolor + }if + }ifelse + currentdict imageormask_sys + }{ + currentdict + Operator/imagemask eq{ + imageormask_sys + }{ + sep_image_lev1_sep + }ifelse + }ifelse + }{ + Operator/imagemask ne{ + invert_image_samples + }if + currentdict imageormask_sys + }ifelse + }{ + currentoverprint not Name(All)eq or Operator/imagemask eq and{ + currentdict imageormask_sys + }{ + currentoverprint not + { + gsave + knockout_unitsq + grestore + }if + currentdict consumeimagedata + }ifelse + }ifelse + }ifelse + }{ + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 2 ne{ + currentcolorspace 0 get/Separation ne{ + [/Separation Name MappedCSA sep_proc_name exch 0 get exch load]setcolorspace_opt + /sep_tint AGMCORE_gget setcolor + }if + }if + currentoverprint + MappedCSA 0 get/DeviceCMYK eq and + Name AGMCORE_IsSeparationAProcessColor not and + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 2 ne{Name inRip_spot_has_ink not and}{false}ifelse + Name(All)ne and{ + imageormask_l2_overprint + }{ + currentdict imageormask + }ifelse + }ifelse + }ifelse + }ifelse + cleartomark restore + }ifelse + currentdict/_Filters known{_Filters AGMIMG_flushfilters}if + end + end +}def +/colorSpaceElemCnt +{ + mark currentcolor counttomark dup 2 add 1 roll cleartomark +}bdf +/devn_sep_datasource +{ + 1 dict begin + /dataSource xdf + [ + 0 1 dataSource length 1 sub{ + dup currentdict/dataSource get/exch cvx/get cvx/exec cvx + /exch cvx names_index/ne cvx[/pop cvx]cvx/if cvx + }for + ]cvx bind + end +}bdf +/devn_alt_datasource +{ + 11 dict begin + /convProc xdf + /origcolorSpaceElemCnt xdf + /origMultipleDataSources xdf + /origBitsPerComponent xdf + /origDecode xdf + /origDataSource xdf + /dsCnt origMultipleDataSources{origDataSource length}{1}ifelse def + /DataSource origMultipleDataSources + { + [ + BitsPerComponent 8 idiv origDecode length 2 idiv mul string + 0 1 origDecode length 2 idiv 1 sub + { + dup 7 mul 1 add index exch dup BitsPerComponent 8 idiv mul exch + origDataSource exch get 0()/SubFileDecode filter + BitsPerComponent 8 idiv string/readstring cvx/pop cvx/putinterval cvx + }for + ]bind cvx + }{origDataSource}ifelse 0()/SubFileDecode filter def + [ + origcolorSpaceElemCnt string + 0 2 origDecode length 2 sub + { + dup origDecode exch get dup 3 -1 roll 1 add origDecode exch get exch sub 2 BitsPerComponent exp 1 sub div + 1 BitsPerComponent 8 idiv{DataSource/read cvx/not cvx{0}/if cvx/mul cvx}repeat/mul cvx/add cvx + }for + /convProc load/exec cvx + origcolorSpaceElemCnt 1 sub -1 0 + { + /dup cvx 2/add cvx/index cvx + 3 1/roll cvx/exch cvx 255/mul cvx/cvi cvx/put cvx + }for + ]bind cvx 0()/SubFileDecode filter + end +}bdf +/devn_imageormask +{ + /devicen_colorspace_dict AGMCORE_gget begin + CSA map_csa + 2 dict begin + dup + /srcDataStrs[3 -1 roll begin + AGMIMG_init_common + currentdict/MultipleDataSources known{MultipleDataSources{DataSource length}{1}ifelse}{1}ifelse + { + Width Decode length 2 div mul cvi + { + dup 65535 gt{1 add 2 div cvi}{exit}ifelse + }loop + string + }repeat + end]def + /dstDataStr srcDataStrs 0 get length string def + begin + AGMIMG_init_common + SkipImageProc{ + currentdict consumeimagedata + }{ + save mark + AGMCORE_producing_seps not{ + level3 not{ + Operator/imagemask ne{ + /DataSource[[ + DataSource Decode BitsPerComponent currentdict/MultipleDataSources known{MultipleDataSources}{false}ifelse + colorSpaceElemCnt/devicen_colorspace_dict AGMCORE_gget/TintTransform get + devn_alt_datasource 1/string cvx/readstring cvx/pop cvx]cvx colorSpaceElemCnt 1 sub{dup}repeat]def + /MultipleDataSources true def + /Decode colorSpaceElemCnt[exch{0 1}repeat]def + }if + }if + currentdict imageormask + }{ + AGMCORE_host_sep{ + Names convert_to_process{ + CSA get_csa_by_name 0 get/DeviceCMYK eq{ + /DataSource + Width BitsPerComponent mul 7 add 8 idiv Height mul 4 mul + DataSource Decode BitsPerComponent currentdict/MultipleDataSources known{MultipleDataSources}{false}ifelse + 4/devicen_colorspace_dict AGMCORE_gget/TintTransform get + devn_alt_datasource + filter_cmyk 0()/SubFileDecode filter def + /MultipleDataSources false def + /Decode[1 0]def + /DeviceGray setcolorspace + currentdict imageormask_sys + }{ + AGMCORE_report_unsupported_color_space + AGMCORE_black_plate{ + /DataSource + DataSource Decode BitsPerComponent currentdict/MultipleDataSources known{MultipleDataSources}{false}ifelse + CSA get_csa_by_name 0 get/DeviceRGB eq{3}{1}ifelse/devicen_colorspace_dict AGMCORE_gget/TintTransform get + devn_alt_datasource + /MultipleDataSources false def + /Decode colorSpaceElemCnt[exch{0 1}repeat]def + currentdict imageormask_sys + }{ + gsave + knockout_unitsq + grestore + currentdict consumeimagedata + }ifelse + }ifelse + } + { + /devicen_colorspace_dict AGMCORE_gget/names_index known{ + Operator/imagemask ne{ + MultipleDataSources{ + /DataSource[DataSource devn_sep_datasource/exec cvx]cvx def + /MultipleDataSources false def + }{ + /DataSource/DataSource load dstDataStr srcDataStrs 0 get filter_devn def + }ifelse + invert_image_samples + }if + currentdict imageormask_sys + }{ + currentoverprint not Operator/imagemask eq and{ + currentdict imageormask_sys + }{ + currentoverprint not + { + gsave + knockout_unitsq + grestore + }if + currentdict consumeimagedata + }ifelse + }ifelse + }ifelse + }{ + currentdict imageormask + }ifelse + }ifelse + cleartomark restore + }ifelse + currentdict/_Filters known{_Filters AGMIMG_flushfilters}if + end + end + end +}def +/imageormask_l2_overprint +{ + currentdict + currentcmykcolor add add add 0 eq{ + currentdict consumeimagedata + }{ + level3{ + currentcmykcolor + /AGMIMG_k xdf + /AGMIMG_y xdf + /AGMIMG_m xdf + /AGMIMG_c xdf + Operator/imagemask eq{ + [/DeviceN[ + AGMIMG_c 0 ne{/Cyan}if + AGMIMG_m 0 ne{/Magenta}if + AGMIMG_y 0 ne{/Yellow}if + AGMIMG_k 0 ne{/Black}if + ]/DeviceCMYK{}]setcolorspace + AGMIMG_c 0 ne{AGMIMG_c}if + AGMIMG_m 0 ne{AGMIMG_m}if + AGMIMG_y 0 ne{AGMIMG_y}if + AGMIMG_k 0 ne{AGMIMG_k}if + setcolor + }{ + /Decode[Decode 0 get 255 mul Decode 1 get 255 mul]def + [/Indexed + [ + /DeviceN[ + AGMIMG_c 0 ne{/Cyan}if + AGMIMG_m 0 ne{/Magenta}if + AGMIMG_y 0 ne{/Yellow}if + AGMIMG_k 0 ne{/Black}if + ] + /DeviceCMYK{ + AGMIMG_k 0 eq{0}if + AGMIMG_y 0 eq{0 exch}if + AGMIMG_m 0 eq{0 3 1 roll}if + AGMIMG_c 0 eq{0 4 1 roll}if + } + ] + 255 + { + 255 div + mark exch + dup dup dup + AGMIMG_k 0 ne{ + /sep_tint AGMCORE_gget mul MappedCSA sep_proc_name exch pop load exec 4 1 roll pop pop pop + counttomark 1 roll + }{ + pop + }ifelse + AGMIMG_y 0 ne{ + /sep_tint AGMCORE_gget mul MappedCSA sep_proc_name exch pop load exec 4 2 roll pop pop pop + counttomark 1 roll + }{ + pop + }ifelse + AGMIMG_m 0 ne{ + /sep_tint AGMCORE_gget mul MappedCSA sep_proc_name exch pop load exec 4 3 roll pop pop pop + counttomark 1 roll + }{ + pop + }ifelse + AGMIMG_c 0 ne{ + /sep_tint AGMCORE_gget mul MappedCSA sep_proc_name exch pop load exec pop pop pop + counttomark 1 roll + }{ + pop + }ifelse + counttomark 1 add -1 roll pop + } + ]setcolorspace + }ifelse + imageormask_sys + }{ + write_image_file{ + currentcmykcolor + 0 ne{ + [/Separation/Black/DeviceGray{}]setcolorspace + gsave + /Black + [{1 exch sub/sep_tint AGMCORE_gget mul}/exec cvx MappedCSA sep_proc_name cvx exch pop{4 1 roll pop pop pop 1 exch sub}/exec cvx] + cvx modify_halftone_xfer + Operator currentdict read_image_file + grestore + }if + 0 ne{ + [/Separation/Yellow/DeviceGray{}]setcolorspace + gsave + /Yellow + [{1 exch sub/sep_tint AGMCORE_gget mul}/exec cvx MappedCSA sep_proc_name cvx exch pop{4 2 roll pop pop pop 1 exch sub}/exec cvx] + cvx modify_halftone_xfer + Operator currentdict read_image_file + grestore + }if + 0 ne{ + [/Separation/Magenta/DeviceGray{}]setcolorspace + gsave + /Magenta + [{1 exch sub/sep_tint AGMCORE_gget mul}/exec cvx MappedCSA sep_proc_name cvx exch pop{4 3 roll pop pop pop 1 exch sub}/exec cvx] + cvx modify_halftone_xfer + Operator currentdict read_image_file + grestore + }if + 0 ne{ + [/Separation/Cyan/DeviceGray{}]setcolorspace + gsave + /Cyan + [{1 exch sub/sep_tint AGMCORE_gget mul}/exec cvx MappedCSA sep_proc_name cvx exch pop{pop pop pop 1 exch sub}/exec cvx] + cvx modify_halftone_xfer + Operator currentdict read_image_file + grestore + }if + close_image_file + }{ + imageormask + }ifelse + }ifelse + }ifelse +}def +/indexed_imageormask +{ + begin + AGMIMG_init_common + save mark + currentdict + AGMCORE_host_sep{ + Operator/knockout eq{ + /indexed_colorspace_dict AGMCORE_gget dup/CSA known{ + /CSA get get_csa_by_name + }{ + /Names get + }ifelse + overprint_plate not{ + knockout_unitsq + }if + }{ + Indexed_DeviceN{ + /devicen_colorspace_dict AGMCORE_gget dup/names_index known exch/Names get convert_to_process or{ + indexed_image_lev2_sep + }{ + currentoverprint not{ + knockout_unitsq + }if + currentdict consumeimagedata + }ifelse + }{ + AGMCORE_is_cmyk_sep{ + Operator/imagemask eq{ + imageormask_sys + }{ + level2{ + indexed_image_lev2_sep + }{ + indexed_image_lev1_sep + }ifelse + }ifelse + }{ + currentoverprint not{ + knockout_unitsq + }if + currentdict consumeimagedata + }ifelse + }ifelse + }ifelse + }{ + level2{ + Indexed_DeviceN{ + /indexed_colorspace_dict AGMCORE_gget begin + }{ + /indexed_colorspace_dict AGMCORE_gget dup null ne + { + begin + currentdict/CSDBase known{CSDBase/CSD get_res/MappedCSA get}{CSA}ifelse + get_csa_by_name 0 get/DeviceCMYK eq ps_level 3 ge and ps_version 3015.007 lt and + AGMCORE_in_rip_sep and{ + [/Indexed[/DeviceN[/Cyan/Magenta/Yellow/Black]/DeviceCMYK{}]HiVal Lookup] + setcolorspace + }if + end + } + {pop}ifelse + }ifelse + imageormask + Indexed_DeviceN{ + end + }if + }{ + Operator/imagemask eq{ + imageormask + }{ + indexed_imageormask_lev1 + }ifelse + }ifelse + }ifelse + cleartomark restore + currentdict/_Filters known{_Filters AGMIMG_flushfilters}if + end +}def +/indexed_image_lev2_sep +{ + /indexed_colorspace_dict AGMCORE_gget begin + begin + Indexed_DeviceN not{ + currentcolorspace + dup 1/DeviceGray put + dup 3 + currentcolorspace 2 get 1 add string + 0 1 2 3 AGMCORE_get_ink_data 4 currentcolorspace 3 get length 1 sub + { + dup 4 idiv exch currentcolorspace 3 get exch get 255 exch sub 2 index 3 1 roll put + }for + put setcolorspace + }if + currentdict + Operator/imagemask eq{ + AGMIMG_&imagemask + }{ + use_mask{ + process_mask AGMIMG_&image + }{ + AGMIMG_&image + }ifelse + }ifelse + end end +}def + /OPIimage + { + dup type/dicttype ne{ + 10 dict begin + /DataSource xdf + /ImageMatrix xdf + /BitsPerComponent xdf + /Height xdf + /Width xdf + /ImageType 1 def + /Decode[0 1 def] + currentdict + end + }if + dup begin + /NComponents 1 cdndf + /MultipleDataSources false cdndf + /SkipImageProc{false}cdndf + /Decode[ + 0 + currentcolorspace 0 get/Indexed eq{ + 2 BitsPerComponent exp 1 sub + }{ + 1 + }ifelse + ]cdndf + /Operator/image cdndf + end + /sep_colorspace_dict AGMCORE_gget null eq{ + imageormask + }{ + gsave + dup begin invert_image_samples end + sep_imageormask + grestore + }ifelse + }def +/cachemask_level2 +{ + 3 dict begin + /LZWEncode filter/WriteFilter xdf + /readBuffer 256 string def + /ReadFilter + currentfile + 0(%EndMask)/SubFileDecode filter + /ASCII85Decode filter + /RunLengthDecode filter + def + { + ReadFilter readBuffer readstring exch + WriteFilter exch writestring + not{exit}if + }loop + WriteFilter closefile + end +}def +/spot_alias +{ + /mapto_sep_imageormask + { + dup type/dicttype ne{ + 12 dict begin + /ImageType 1 def + /DataSource xdf + /ImageMatrix xdf + /BitsPerComponent xdf + /Height xdf + /Width xdf + /MultipleDataSources false def + }{ + begin + }ifelse + /Decode[/customcolor_tint AGMCORE_gget 0]def + /Operator/image def + /SkipImageProc{false}def + currentdict + end + sep_imageormask + }bdf + /customcolorimage + { + Adobe_AGM_Image/AGMIMG_colorAry xddf + /customcolor_tint AGMCORE_gget + << + /Name AGMIMG_colorAry 4 get + /CSA[/DeviceCMYK] + /TintMethod/Subtractive + /TintProc null + /MappedCSA null + /NComponents 4 + /Components[AGMIMG_colorAry aload pop pop] + >> + setsepcolorspace + mapto_sep_imageormask + }ndf + Adobe_AGM_Image/AGMIMG_&customcolorimage/customcolorimage load put + /customcolorimage + { + Adobe_AGM_Image/AGMIMG_override false put + current_spot_alias{dup 4 get map_alias}{false}ifelse + { + false set_spot_alias + /customcolor_tint AGMCORE_gget exch setsepcolorspace + pop + mapto_sep_imageormask + true set_spot_alias + }{ + //Adobe_AGM_Image/AGMIMG_&customcolorimage get exec + }ifelse + }bdf +}def +/snap_to_device +{ + 6 dict begin + matrix currentmatrix + dup 0 get 0 eq 1 index 3 get 0 eq and + 1 index 1 get 0 eq 2 index 2 get 0 eq and or exch pop + { + 1 1 dtransform 0 gt exch 0 gt/AGMIMG_xSign? exch def/AGMIMG_ySign? exch def + 0 0 transform + AGMIMG_ySign?{floor 0.1 sub}{ceiling 0.1 add}ifelse exch + AGMIMG_xSign?{floor 0.1 sub}{ceiling 0.1 add}ifelse exch + itransform/AGMIMG_llY exch def/AGMIMG_llX exch def + 1 1 transform + AGMIMG_ySign?{ceiling 0.1 add}{floor 0.1 sub}ifelse exch + AGMIMG_xSign?{ceiling 0.1 add}{floor 0.1 sub}ifelse exch + itransform/AGMIMG_urY exch def/AGMIMG_urX exch def + [AGMIMG_urX AGMIMG_llX sub 0 0 AGMIMG_urY AGMIMG_llY sub AGMIMG_llX AGMIMG_llY]concat + }{ + }ifelse + end +}def +level2 not{ + /colorbuf + { + 0 1 2 index length 1 sub{ + dup 2 index exch get + 255 exch sub + 2 index + 3 1 roll + put + }for + }def + /tint_image_to_color + { + begin + Width Height BitsPerComponent ImageMatrix + /DataSource load + end + Adobe_AGM_Image begin + /AGMIMG_mbuf 0 string def + /AGMIMG_ybuf 0 string def + /AGMIMG_kbuf 0 string def + { + colorbuf dup length AGMIMG_mbuf length ne + { + dup length dup dup + /AGMIMG_mbuf exch string def + /AGMIMG_ybuf exch string def + /AGMIMG_kbuf exch string def + }if + dup AGMIMG_mbuf copy AGMIMG_ybuf copy AGMIMG_kbuf copy pop + } + addprocs + {AGMIMG_mbuf}{AGMIMG_ybuf}{AGMIMG_kbuf}true 4 colorimage + end + }def + /sep_imageormask_lev1 + { + begin + MappedCSA 0 get dup/DeviceRGB eq exch/DeviceCMYK eq or has_color not and{ + { + 255 mul round cvi GrayLookup exch get + }currenttransfer addprocs settransfer + currentdict imageormask + }{ + /sep_colorspace_dict AGMCORE_gget/Components known{ + MappedCSA 0 get/DeviceCMYK eq{ + Components aload pop + }{ + 0 0 0 Components aload pop 1 exch sub + }ifelse + Adobe_AGM_Image/AGMIMG_k xddf + Adobe_AGM_Image/AGMIMG_y xddf + Adobe_AGM_Image/AGMIMG_m xddf + Adobe_AGM_Image/AGMIMG_c xddf + AGMIMG_y 0.0 eq AGMIMG_m 0.0 eq and AGMIMG_c 0.0 eq and{ + {AGMIMG_k mul 1 exch sub}currenttransfer addprocs settransfer + currentdict imageormask + }{ + currentcolortransfer + {AGMIMG_k mul 1 exch sub}exch addprocs 4 1 roll + {AGMIMG_y mul 1 exch sub}exch addprocs 4 1 roll + {AGMIMG_m mul 1 exch sub}exch addprocs 4 1 roll + {AGMIMG_c mul 1 exch sub}exch addprocs 4 1 roll + setcolortransfer + currentdict tint_image_to_color + }ifelse + }{ + MappedCSA 0 get/DeviceGray eq{ + {255 mul round cvi ColorLookup exch get 0 get}currenttransfer addprocs settransfer + currentdict imageormask + }{ + MappedCSA 0 get/DeviceCMYK eq{ + currentcolortransfer + {255 mul round cvi ColorLookup exch get 3 get 1 exch sub}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 2 get 1 exch sub}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 1 get 1 exch sub}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 0 get 1 exch sub}exch addprocs 4 1 roll + setcolortransfer + currentdict tint_image_to_color + }{ + currentcolortransfer + {pop 1}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 2 get}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 1 get}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 0 get}exch addprocs 4 1 roll + setcolortransfer + currentdict tint_image_to_color + }ifelse + }ifelse + }ifelse + }ifelse + end + }def + /sep_image_lev1_sep + { + begin + /sep_colorspace_dict AGMCORE_gget/Components known{ + Components aload pop + Adobe_AGM_Image/AGMIMG_k xddf + Adobe_AGM_Image/AGMIMG_y xddf + Adobe_AGM_Image/AGMIMG_m xddf + Adobe_AGM_Image/AGMIMG_c xddf + {AGMIMG_c mul 1 exch sub} + {AGMIMG_m mul 1 exch sub} + {AGMIMG_y mul 1 exch sub} + {AGMIMG_k mul 1 exch sub} + }{ + {255 mul round cvi ColorLookup exch get 0 get 1 exch sub} + {255 mul round cvi ColorLookup exch get 1 get 1 exch sub} + {255 mul round cvi ColorLookup exch get 2 get 1 exch sub} + {255 mul round cvi ColorLookup exch get 3 get 1 exch sub} + }ifelse + AGMCORE_get_ink_data currenttransfer addprocs settransfer + currentdict imageormask_sys + end + }def + /indexed_imageormask_lev1 + { + /indexed_colorspace_dict AGMCORE_gget begin + begin + currentdict + MappedCSA 0 get dup/DeviceRGB eq exch/DeviceCMYK eq or has_color not and{ + {HiVal mul round cvi GrayLookup exch get HiVal div}currenttransfer addprocs settransfer + imageormask + }{ + MappedCSA 0 get/DeviceGray eq{ + {HiVal mul round cvi Lookup exch get HiVal div}currenttransfer addprocs settransfer + imageormask + }{ + MappedCSA 0 get/DeviceCMYK eq{ + currentcolortransfer + {4 mul HiVal mul round cvi 3 add Lookup exch get HiVal div 1 exch sub}exch addprocs 4 1 roll + {4 mul HiVal mul round cvi 2 add Lookup exch get HiVal div 1 exch sub}exch addprocs 4 1 roll + {4 mul HiVal mul round cvi 1 add Lookup exch get HiVal div 1 exch sub}exch addprocs 4 1 roll + {4 mul HiVal mul round cvi Lookup exch get HiVal div 1 exch sub}exch addprocs 4 1 roll + setcolortransfer + tint_image_to_color + }{ + currentcolortransfer + {pop 1}exch addprocs 4 1 roll + {3 mul HiVal mul round cvi 2 add Lookup exch get HiVal div}exch addprocs 4 1 roll + {3 mul HiVal mul round cvi 1 add Lookup exch get HiVal div}exch addprocs 4 1 roll + {3 mul HiVal mul round cvi Lookup exch get HiVal div}exch addprocs 4 1 roll + setcolortransfer + tint_image_to_color + }ifelse + }ifelse + }ifelse + end end + }def + /indexed_image_lev1_sep + { + /indexed_colorspace_dict AGMCORE_gget begin + begin + {4 mul HiVal mul round cvi Lookup exch get HiVal div 1 exch sub} + {4 mul HiVal mul round cvi 1 add Lookup exch get HiVal div 1 exch sub} + {4 mul HiVal mul round cvi 2 add Lookup exch get HiVal div 1 exch sub} + {4 mul HiVal mul round cvi 3 add Lookup exch get HiVal div 1 exch sub} + AGMCORE_get_ink_data currenttransfer addprocs settransfer + currentdict imageormask_sys + end end + }def +}if +end +systemdict/setpacking known +{setpacking}if +%%EndResource +currentdict Adobe_AGM_Utils eq {end} if +%%EndProlog +%%BeginSetup +Adobe_AGM_Utils begin +2 2010 Adobe_AGM_Core/ds gx +Adobe_CoolType_Core/ds get exec Adobe_AGM_Image/ds gx +currentdict Adobe_AGM_Utils eq {end} if +%%EndSetup +%%Page: (Page 1) 1 +%%EndPageComments +%%BeginPageSetup +%ADOBeginClientInjection: PageSetup Start "AI11EPS" +%AI12_RMC_Transparency: Balance=75 RasterRes=300 GradRes=150 Text=0 Stroke=1 Clip=1 OP=0 +%ADOEndClientInjection: PageSetup Start "AI11EPS" +Adobe_AGM_Utils begin +Adobe_AGM_Core/ps gx +Adobe_AGM_Utils/capture_cpd gx +Adobe_CoolType_Core/ps get exec Adobe_AGM_Image/ps gx +%ADOBeginClientInjection: PageSetup End "AI11EPS" +/currentdistillerparams where {pop currentdistillerparams /CoreDistVersion get 5000 lt} {true} ifelse { userdict /AI11_PDFMark5 /cleartomark load put userdict /AI11_ReadMetadata_PDFMark5 {flushfile cleartomark } bind put} { userdict /AI11_PDFMark5 /pdfmark load put userdict /AI11_ReadMetadata_PDFMark5 {/PUT pdfmark} bind put } ifelse [/NamespacePush AI11_PDFMark5 [/_objdef {ai_metadata_stream_123} /type /stream /OBJ AI11_PDFMark5 [{ai_metadata_stream_123} currentfile 0 (% &&end XMP packet marker&&) /SubFileDecode filter AI11_ReadMetadata_PDFMark5 + + + + application/postscript + + + Web + + + + + Adobe Illustrator CS3 + 2017-04-03T09:54:57+02:00 + 2017-04-03T10:03:08+02:00 + 2017-04-03T10:03:08+02:00 + + + + 256 + 96 + JPEG + /9j/4AAQSkZJRgABAgEASABIAAD/7QAsUGhvdG9zaG9wIDMuMAA4QklNA+0AAAAAABAASAAAAAEA AQBIAAAAAQAB/+4ADkFkb2JlAGTAAAAAAf/bAIQABgQEBAUEBgUFBgkGBQYJCwgGBggLDAoKCwoK DBAMDAwMDAwQDA4PEA8ODBMTFBQTExwbGxscHx8fHx8fHx8fHwEHBwcNDA0YEBAYGhURFRofHx8f Hx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8f/8AAEQgAYAEAAwER AAIRAQMRAf/EAaIAAAAHAQEBAQEAAAAAAAAAAAQFAwIGAQAHCAkKCwEAAgIDAQEBAQEAAAAAAAAA AQACAwQFBgcICQoLEAACAQMDAgQCBgcDBAIGAnMBAgMRBAAFIRIxQVEGE2EicYEUMpGhBxWxQiPB UtHhMxZi8CRygvElQzRTkqKyY3PCNUQnk6OzNhdUZHTD0uIIJoMJChgZhJRFRqS0VtNVKBry4/PE 1OT0ZXWFlaW1xdXl9WZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo+Ck5SVlpeYmZ qbnJ2en5KjpKWmp6ipqqusra6voRAAICAQIDBQUEBQYECAMDbQEAAhEDBCESMUEFURNhIgZxgZEy obHwFMHR4SNCFVJicvEzJDRDghaSUyWiY7LCB3PSNeJEgxdUkwgJChgZJjZFGidkdFU38qOzwygp 0+PzhJSktMTU5PRldYWVpbXF1eX1RlZmdoaWprbG1ub2R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo +DlJWWl5iZmpucnZ6fkqOkpaanqKmqq6ytrq+v/aAAwDAQACEQMRAD8A9U4q7FXlf5yfnzo3kBP0 bZxrqXmaVA6WRJEUCsPhknYePUINz7ChxV8pebPzc/MPzTPI+q63ceg5NLK3cwWyjsBFGVU08Wqf fFWIVNa1361xVk/l380PzB8uyo+ka/eQIhBEDSmWA08YZecZ/wCBxV71+W//ADlnbXUsWneebZLR 2IVdYtFYxV/4uh+Jl/1kJ/1QMVfRVrdWt3bRXVrMk9tMoeGaJg6OrCoZWFQQcVVcVdirsVdirsVd irsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVeW/nN+emkfl/bjT7SNdQ8zXEfOC zJ/dwqdlkuCN6H9lBu3sN8VfGM02teZNeaWVnvtY1W4qzEjlJNM30AVJ+Q+WQyZIwiZSNAJjEk0O b6M8gfkl5Y0a2SfV4I9W1QrWZ515wITvxjib4SB/Mwr32rTOF1/bmXNKoEwh5c/if0fe73DoYQHq 3kz/APw75f4cP0ZacKcePoR0p0pTjmq/MZP50vmXJ8OPcGLeZPyX8ga3E9NPXTboj4LmxpDQ+8YH pN9K/Tmfpu2tRiP1cQ7pb/bzcfJo8cule58//mH+V2u+S7hXnIu9KmbjbahGCAT14SLvwenapB7H rTsOzu1MepG20xzH6u91Oo00sZ7wzj/nHX86LryvrMHljWZy/lrUJPTgZz/vHcSNRXUnpE7H4x0H 2v5q7Nxn2NirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVQWrapBptoZ5fiY/DHGOrN/ TxzC1+uhpsfHLc9B3lv0+A5ZUGIS6xqd2xllnZQ32YkJVAPkOv05wmp7Uz5jZkQO4bD8e93kNNjh sAsS8u0PJJpFPiGI/jmNHVZYmxKQ+JZnFE8wEzsfM15CwW4/fx9ydnHyPf6c3Gj9oM2M1k9cft/H vcTLoIS+nYsmtLuC6hE0LckP3g+BGdjptTDNATgbDqMmOUDRVsyGtJPO/mm18qeU9U8w3Q5x6dA0 ix1pzkJCxR1/y5GVfpxV+fepajrXmXX5r27d73V9Unqx6s8srUVVH3KoHQbZGUhEEnkEgEmg+i/y 3/KbSfK8MF/doLvzAVq9wd0hLijJCvTYGnM7n2BpnBdp9sT1BMY7Y+7v9/6nf6XRxxizvJ6XHCFW lTU9SDmnBcu2zET0dh9P9cPF5LbQE6/tBx77H8MNxPkuyF1fSbDWtLudL1GH1LS6QxzRnwPQg9iD uD2OTxZZYpicTuGE4CQo8nxt5p0C58v+Yb/Rrg1kspmjDkU5p1R6f5SENnpWl1AzY4zH8Qeby4zC Riej7F/5x4/NNPOnlJbC9YDXtESOC7BO80QXjFcCu9W40f8AyvmMyGt6virsVdirsVdirsVdirsV dirsVdirsVdirsVdirsVQmqalDp1m1xLuRtGndmPQZh67WR0+Mzl8B3luwYTklwhgV7qN7qlyGuH qBXgg2VQetBnn2s12TUS4pn3DoHocWCOIVFsRqAACQB7nMW2drTE/VZGB96EZLiHcm/JwMy/aAce K7H7jjsV2TXQtRa1vUqSIZSEkB9+h+jNn2RrTgzCz6JbH9fwcPWYOOHmGaZ6E6B8nf8AOV35j3l9 5jXyVZTMmm6YqS6iqmgmupFEiK1OqxIy0H8xPgMVYf8AkX5M1LUPMcHmJ41Glaa7gtJ1kmMTKqoP 8guGJzn+39dCGI4v45fYL/S7Ds/AZT4+gfR0Ks0goK0339s4cO9KLJn7Kv8AwR/pkqDHZbzuB/us H5N/UYaj3pod7hOR9qNl+QqPwx4O4o4fNeskbbBhXw7/AHZExIQQXkv/ADkR5Rtb3y0vmOKMLfaY 6JNIBu9vKwTi3jxdgR4b50Ps7qzHL4R+mX3j9jru0MQMeLqHnP8Azjx5oPl/81dJaSX0rPUi2nXV TRSJx+6r/wA9lQ52zpn3PirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVYR5u1SG7uo4IGLJb cg7dixpWnjSmcN2/ro5sghDcQv5u97PwGEST/Ek9sr7soB7bmn8DmhFOeaVi0/ZF/wCCP9MlUUbN epOOsVfkwx4Y96aHe2Jx+0rL8xt+GPAjhXq6t9kg/LIkUghl/lq9e4sjFIavAQoP+Sfs/qzuewNW cuExlzht8Ojo9fiEZ2Or4b/Oq5huPzX80SQyCWP69InNdxyjojD/AGLKRm+cF7Z+STo35a6UFBBR rlWJBFT9ZkO3jsc8+7dH+Fy+H+5D0Og/uh8fvZ/bOFc1r07AnNSA5ZCuZx/K5/2JyXB7kcLvrC91 cf7E48C8LhcReJHzBGPAV4Su5xNtyU+1RgohFFBa/otnrei3mk3i1tryJonp1FfssPdWoRlmnzSx TE484lhkgJRIPV8WX1pdaZqdxaSEpdWUzxOVJBWSJypoeuxXPT8cxOIkORFvMyjRp+g/5f8AmE+Y vJGh625rNfWUMtx/xm4ASgfKQNk0MgxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KrZATGwU8SQQG 8DTrkZgmJpI5vLGBBIPUfTnlJD1iIt5AI6UJoewJwiKCFT1x/I//AAJyXB5hHC76wncMPmpx4CvC WxPEf2qfOo/Xg4CvCVwaNjUEE+I3wUQjdN/Ld40GoLET+7n+Fh7jdT/DN12DqjjziP8ADPb49Px5 uFrsXFC+ofAV1czXVzNcztzmndpJXPdnPJj95zvXRPsfy/YWun6Fp9laKFtre3jSIDuAo3NOpPUn PLdTklPJKUuZJeqxREYgDuTW1P736MpZSRZIHU0xYrDPCOrj78lwHuTwloXEH84w8Eu5eEtiaE/t r94wcJ7l4SvBBGxqPbIofG35jywS+ffMDwbxm/uBUGoJEhDHoOrA56X2aCNPC/5oeb1B/eS977J/ 5x5WVfyb8tiU8m9KcggU+E3UpUfQtMzWl6LirsVdirsVdirsVdirsVdirsVdirsVdirsVS7zDNJF ot08Zo3ELX2Zgp/A5re18hhpZkc6+805OjiDliC86zzh6RF2h/dn54WMlVnRftMB8zTCASilhuIB +2MlwS7k8JcLiA/tj78eA9y8JXCSI9HU/SMHCUUUXpqs2oWwT7XqoR9DA5k6CJOeFc+Ife05yBCV 9xfE/n2wstP88eYbCxr9TtNSu4LetPsRzso6bU22z015t9KfladTPkDRTqLh5zbgxEChEFT6ANOp 9Lj/AJ755v2twfmZ8HK/t6/a9JpL8KNsthAMq1JodtjT9WYALkkov0Ia14An33/Xh4yx4iuCIOig fRgsotvAh2KuxV8lfnTpsNh+ZOsJCKRzvHckf5c8Su/3uSc9D7FyGelhfTb5F0GsjWUvpv8A5xVv 5rr8pYIZCStle3MEVTX4Swm/4lMc2rivX8VdirsVdirsVdirsVdirsVdirsVdirsVdiqhfCE2U/r ryhEbGRfFQKnKNVw+FLjFx4TfubMV8QrnbzE+2eXPUq1qityDVPTapH6sIkglXEEI6IPuw8Z70cR XBVHQDBaLbwIdiqpDK0UySr9qNgw+YNcsxZDCYkOYNsZx4gR3vkf87PL1zoX5peYrWZCiXN5Je2x pRWiu2My8fYc+PzGeqPMPXfyU87WWs+WbfRpJAuraVEIngOxe3Q8Y5Er1AWit4HwqM4Pt3QSxZTk A9Ez9vUfpd/oM4lDh/iD0apBqOozRgOcjVaZ1BUKoPc1P9Mlsx2d6Uh+1K3+xAGHiHcvEO5wgXuz n5sf4Y8a8S4RIOx+kk/rwcRRZXAAdBTI2r5D/NzWI9W/MTWbmI1iimFsn/RughY/SyE56N2RhOPT QB51fz3ed1c+LIS+qP8AnF7SZrD8pLKaUEHUbm4u1U9lL+iv3iGv05snHes4q7FXYq7FXYq7FXYq 7FXYq7FXYq7FXYq7FVlxCs8EkLGiyqyMR1owplebGJwMTykCPmyhLhIPc81v7C4sblredaMu6nsy 9mHsc8z1WlngmYTG/wB/mHp8WWOSPEFGGQpICBWu3hlADYQiiJz3VB7fEf4ZL0sdmvRY/alc/Kg/ Vjx+S8Xk2IE7lj82P9cHGV4lwjTwr89/14OIotXtIDPcxQL/ALsYL9BOXabCcuSMB/EWvJPhiT3P Nv8AnMXQbd9C0LX1gX6zDctYy3Iry9OWNpUQ06gNGxFeldupz1F5l5H/AM4+3lvb+e5IpXCvd2M0 MAP7Th45aD/YRsc0HtHAy01jpIE/aP0uf2bIDJ7w+j84V3yJtpRTgT7jCxIVWniBpyqfAbn8MkIF eErfVlb7EZA8X2/DDwgcymh3rgjn7b/Quw/rgsdEWFHUL+y0zT7i/vJBDaWsbSzyHoFQVJyWOEpy ERuSwlIAWXxLqd69/qV3fOKPdTSTsPeRix/XnqGKHBER7hTzMpWSX6D/AJeaTLpHkPy9pkylJ7TT rWKdT1EghX1B/wAFXLGLIcVdirsVdirsVdirsVdirsVdirsVdirsVdirsVYV51ikXU4pTUpJEAp7 VUmoH31ziPaTGRnEuhj934+13nZkh4ZHmx7OddijYplZKkgEda4QLYEONxF0WrnwUVyfAU8JcGnb ooQeLbn7hjUQtBcI/wCZix+4fcMHF3Laa+XbZ5dTjYD4Iau58NqD8c23YeAz1MT0jufx73C1sxHG fNE/md5Jh86+SdS8vuVSe4j52UzdI7mI84mPtyFG/wAknPQHQvgdl1ny7rrIedjq+l3BVh0eKaFq EGtRsR8jkMmOM4mMhYLKMjE2Ob6z8n66Ne8r6Zq3JTJdwI0/pghRMBxlVQSTRZAw655nrcHg5pQ7 jt7un2PTYMnHAS704oMx+It1opLmPiAB8XTiowgE82PCV/8ApDdhGP8Agj/TD6R5o2XBKbs5NO5N B+FMBKCXzh+e/wCZNtrt7H5f0ib1dMsXL3VwjVjnn6AKRsyR+Pcn2BztOwezTij4kx6pcvIftdLr tQJHhHINf849flNP5z8zJq19HTy3o8qvdluk860dLdfwZ/8AJ2/aGdE699r4q7FXYq7FXYq7FXYq 7FXYq7FXYq7FXYq7FXYq7FUv17TY77TpUK8po1LwEdeYHQf63TNb2roxnwkV6hvH3/tcnS5jjmD0 PN5zTPObekbjKowYgEd64eIpNosXCttEpc/cPxyXD3seHvbCzt9pgg8FFT95xuIRsrW9tJLKsUQa SRzQCtclixyySEYiyWE5iIs8mcaZYLZWiQihfrIw7sc9F7P0Q0+IQ69T5vO6jMckrReZzS+Sv+cq Pyyu9M8xHzpYQl9K1XiuolBtBdqAoZqdFmUDf+ateoqq818h/mlr3lBXtoES802Vub2cxIoxFC0b jdSaCuxHtmr7Q7Jx6nc+mfeP0uVp9XLFsNw9q8tfnZ5J1iGNby5/RN6R+8t7raOtBXjMBwK16cuJ 9s5TVdh6jEfSOOPeP1c/vdti1+OXP0lkZ89eSAK/4g02ntdwfq55g/kNQf8AJz/0pb/zGP8AnD5p HrH54eRNKRhHfvqEyUHoWaGStfCR+EX/AA+ZmDsLU5P4eEef6uf2NGTXYo9b9zyTz3+d/mPzLBJp 9mv6L0p9pI42JmlXpSSQcfhP8qj5k50ug7Dx4DxS9c/sHuDrNRrZT2GwST8uPyy8zefdZXT9IhK2 0ZBvtRkB9C3Qnqzd2P7KDc/KpG8cJ90+TPJ+jeUPLlpoOkRlLS1Xd2oZJZG3eWQgCrMf6DbFU7xV 2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxVj+p+ULW5kea2kMEr1JSlULH8RXOd1vs9jyky geGR6dP2OxwdoyiAJCwxu58uaxA9GtnkHZoxzB/4H+Oc3m7J1OM1wE/1d/udnDWY5DnXv2Uk0bV2 YBbOYHsSjL+JAymPZ2oP+Tn8izOpxj+IfNNrLyprEpBuJFt071IdvoA2/HNnp/Z3PM+uoD5n7P1u Jk7Qxj6d2T6bpVrYR8YgWkP25WpyP9mdVoezsWmFRG/U9XVZ9RLId+SMzPaHYqo3tjZ39pNZ3sEd zaXCmOe3lUPG6NsVZWqCMVeB+ef+cR/L996935Rvn0u6Ylk0+6rLa1/lVwPVjHz54q8J8wfkj+ae hSsl35du541rSeyQ3cZA/a5Qc+I/1qYqx5fJ3m55TCuh6g0y1rGLWYsKdduNcVZJof5FfmzrLKLf y3dW6HrJegWYA8aXBjb7hir2DyP/AM4gxxyR3XnPUxKFIY6Zp5YKe/GS4cK1OxCKPZsVfQ2haBou gaZFpejWcVjYQCkcEK8V9ye7Me7Hc98VR+KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2KuxV2Ku xV2KuxV2KuxV2KuxV//Z + + + + + + uuid:82B45AE7E519E7119A76BA5BC76AA065 + uuid:26AD93F6E619E7119A76BA5BC76AA065 + + uuid:81B45AE7E519E7119A76BA5BC76AA065 + uuid:80B45AE7E519E7119A76BA5BC76AA065 + + + + Web + + + + 14400.000000 + 14400.000000 + Pixels + + 1 + False + False + + + Cyan + Magenta + Yellow + Black + + + + + + Groupe de nuances par défaut + 0 + + + + Blanc + RGB + PROCESS + 255 + 255 + 255 + + + Noir + RGB + PROCESS + 0 + 0 + 0 + + + Rouge RVB + RGB + PROCESS + 255 + 0 + 0 + + + Jaune RVB + RGB + PROCESS + 255 + 255 + 0 + + + Vert RVB + RGB + PROCESS + 0 + 255 + 0 + + + Cyan RVB + RGB + PROCESS + 0 + 255 + 255 + + + Bleu RVB + RGB + PROCESS + 0 + 0 + 255 + + + Magenta RVB + RGB + PROCESS + 255 + 0 + 255 + + + R=193 V=39 B=45 + RGB + PROCESS + 193 + 39 + 45 + + + R=237 V=28 B=36 + RGB + PROCESS + 237 + 28 + 36 + + + R=241 V=90 B=36 + RGB + PROCESS + 241 + 90 + 36 + + + R=247 V=147 B=30 + RGB + PROCESS + 247 + 147 + 30 + + + R=251 V=176 B=59 + RGB + PROCESS + 251 + 176 + 59 + + + R=252 V=238 B=33 + RGB + PROCESS + 252 + 238 + 33 + + + R=217 V=224 B=33 + RGB + PROCESS + 217 + 224 + 33 + + + R=140 V=198 B=63 + RGB + PROCESS + 140 + 198 + 63 + + + R=57 V=181 B=74 + RGB + PROCESS + 57 + 181 + 74 + + + R=0 V=146 B=69 + RGB + PROCESS + 0 + 146 + 69 + + + R=0 V=104 B=55 + RGB + PROCESS + 0 + 104 + 55 + + + R=34 V=181 B=115 + RGB + PROCESS + 34 + 181 + 115 + + + R=0 V=169 B=157 + RGB + PROCESS + 0 + 169 + 157 + + + R=41 V=171 B=226 + RGB + PROCESS + 41 + 171 + 226 + + + R=0 V=113 B=188 + RGB + PROCESS + 0 + 113 + 188 + + + R=46 V=49 B=146 + RGB + PROCESS + 46 + 49 + 146 + + + R=27 V=20 B=100 + RGB + PROCESS + 27 + 20 + 100 + + + R=102 V=45 B=145 + RGB + PROCESS + 102 + 45 + 145 + + + R=147 V=39 B=143 + RGB + PROCESS + 147 + 39 + 143 + + + R=158 V=0 B=93 + RGB + PROCESS + 158 + 0 + 93 + + + R=212 V=20 B=90 + RGB + PROCESS + 212 + 20 + 90 + + + R=237 V=30 B=121 + RGB + PROCESS + 237 + 30 + 121 + + + R=199 V=178 B=153 + RGB + PROCESS + 199 + 178 + 153 + + + R=153 V=134 B=117 + RGB + PROCESS + 153 + 134 + 117 + + + R=115 V=99 B=87 + RGB + PROCESS + 115 + 99 + 87 + + + R=83 V=71 B=65 + RGB + PROCESS + 83 + 71 + 65 + + + R=198 V=156 B=109 + RGB + PROCESS + 198 + 156 + 109 + + + R=166 V=124 B=82 + RGB + PROCESS + 166 + 124 + 82 + + + R=140 V=98 B=57 + RGB + PROCESS + 140 + 98 + 57 + + + R=117 V=76 B=36 + RGB + PROCESS + 117 + 76 + 36 + + + R=96 V=56 B=19 + RGB + PROCESS + 96 + 56 + 19 + + + R=66 V=33 B=11 + RGB + PROCESS + 66 + 33 + 11 + + + + + + Groupe de couleurs Web + 1 + + + + R=236 V=28 B=36 + RGB + PROCESS + 236 + 28 + 36 + + + R=0 V=169 B=157 + RGB + PROCESS + 0 + 169 + 157 + + + R=102 V=45 B=145 + RGB + PROCESS + 102 + 45 + 145 + + + R=139 V=146 B=152 1 + RGB + PROCESS + 139 + 146 + 152 + + + + + + Niveaux de gris + 1 + + + + N=100 + GRAY + PROCESS + 255 + + + N=90 + GRAY + PROCESS + 229 + + + N=80 + GRAY + PROCESS + 204 + + + N=70 + GRAY + PROCESS + 178 + + + N=60 + GRAY + PROCESS + 153 + + + N=50 + GRAY + PROCESS + 127 + + + N=40 + GRAY + PROCESS + 101 + + + N=30 + GRAY + PROCESS + 76 + + + N=20 + GRAY + PROCESS + 50 + + + N=10 + GRAY + PROCESS + 25 + + + N=5 + GRAY + PROCESS + 12 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + % &&end XMP packet marker&& [{ai_metadata_stream_123} <> /PUT AI11_PDFMark5 [/Document 1 dict begin /Metadata {ai_metadata_stream_123} def currentdict end /BDC AI11_PDFMark5 +%ADOEndClientInjection: PageSetup End "AI11EPS" +%%EndPageSetup +1 -1 scale 0 -840 translate +pgsv +[1 0 0 1 0 0 ]ct +gsave +np +gsave +0 0 mo +0 840 li +1096 840 li +1096 0 li +cp +clp +[1 0 0 1 0 0 ]ct +267.457 514.331 mo +254.36 514.331 245.123 510.693 238.965 506.761 cv +229.159 504.354 219.315 496.853 215.231 481.76 cv +210.479 475.936 205.932 466.968 206.03 456.302 cv +199.512 448.136 197.32 438.278 199.767 428.182 cv +198.019 424.397 197.024 420.269 196.866 415.971 cv +196.654 410.24 197.967 404.288 200.472 398.989 cv +199.101 391.374 199.994 381.547 208.857 372.399 cv +211.362 361.883 219.241 351.91 228.341 346.145 cv +234.454 336.039 247.396 323.841 268.749 323.841 cv +269.398 323.845 li +287.271 324.053 316.313 334.345 326.901 361.497 cv +332.496 367.651 336.394 376.008 337.542 384.271 cv +342.521 393.116 345.776 406.585 343.587 418.229 cv +347.276 429.814 347.05 441.576 342.909 452.764 cv +342.979 461.48 340.28 472.554 332.324 479.874 cv +325.125 494.155 308.89 506.154 289.214 511.314 cv +281.577 513.315 274.258 514.331 267.457 514.331 cv +267.457 514.331 li +cp +false sop +/0 +[/DeviceCMYK] /CSA add_res +0 0 0 0.9 cmyk +f +269.204 340.844 mo +248.926 340.609 241.95 356.076 241.079 358.664 cv +232.139 361.957 223.854 373.719 225.162 380.776 cv +218.51 385.482 214.096 391.48 219.057 401.479 cv +214.259 406.477 210.334 418.299 218.839 425.943 cv +211.151 439.47 219.819 447.057 224.072 450.174 cv +220.146 462.406 228.872 472.127 230.831 473.463 cv +232.411 483.579 237.31 490.244 245.657 490.636 cv +251.542 495.576 263.831 500.394 284.903 494.87 cv +302.399 490.282 315.153 479.344 318.48 468.993 cv +326.437 464.877 326.11 452.526 325.675 449.938 cv +332.161 436.118 328.072 424.298 325.675 418.886 cv +330.035 410.006 324.476 393.422 320.879 389.951 cv +321.205 382.836 316.789 374.543 312.157 370.896 cv +305.506 348.078 281.212 340.983 269.204 340.844 cv +cp +0 0.203922 0.847059 0 cmyk +f +271.583 480.75 mo +265.171 479.606 261.833 475.25 258.333 468.25 cv +256.23 466.815 248.083 457.25 246.583 445 cv +244.028 443.128 240.583 431.5 240.833 422.75 cv +239.083 416.75 237.682 410.448 239.333 401.75 cv +237.083 392.25 236.932 385.865 242.083 380.5 cv +242.083 371 244.692 364.052 251.833 359 cv +250.682 352.791 253.61 346.904 261.012 341.695 cv +246.99 344.937 241.82 356.462 241.079 358.664 cv +232.139 361.957 223.854 373.719 225.162 380.776 cv +218.51 385.482 214.096 391.48 219.057 401.479 cv +214.259 406.477 210.334 418.299 218.839 425.943 cv +211.151 439.47 219.819 447.057 224.072 450.174 cv +220.146 462.406 228.872 472.127 230.831 473.463 cv +232.411 483.579 237.31 490.244 245.657 490.636 cv +251.542 495.576 263.831 500.394 284.903 494.87 cv +291.54 493.13 297.488 490.474 302.505 487.285 cv +286.267 489.368 276.838 485.161 271.583 480.75 cv +cp +0.027451 0.278431 0.905882 0 cmyk +f +249.238 359.547 mo +249.238 359.547 267.404 357.899 276.919 359.71 cv +286.364 361.507 302.737 369.603 302.737 369.603 cv +302.737 369.603 278.317 362.163 271.065 361.135 cv +262.363 359.898 249.238 359.547 249.238 359.547 cv +cp +0 0.376471 0.819608 0 cmyk +f +234.115 381.029 mo +234.115 381.029 258.269 378.72 272.415 380.014 cv +286.562 381.308 309.608 388.997 309.608 388.997 cv +309.608 388.997 283.603 384.326 269.219 382.709 cv +258.482 381.502 234.115 381.029 234.115 381.029 cv +cp +f +229.801 400.674 mo +229.801 400.674 254.083 398.621 268.926 399.067 cv +283.769 399.512 310.014 404.008 310.014 404.008 cv +310.014 404.008 275.431 401.892 264.134 401.754 cv +252.838 401.617 229.801 400.674 229.801 400.674 cv +cp +f +230.443 425.032 mo +230.443 425.032 256.462 418.443 270.496 417.417 cv +287.121 416.2 314.15 419.357 314.15 419.357 cv +314.15 419.357 278.842 419.268 268.202 420.439 cv +256.939 421.681 230.443 425.032 230.443 425.032 cv +cp +f +234.295 449.38 mo +234.295 449.38 259.69 440.354 274.204 438.765 cv +288.72 437.175 315.435 437.177 315.435 437.177 cv +315.435 437.177 280.365 440.614 269.363 442.181 cv +258.365 443.748 234.295 449.38 234.295 449.38 cv +cp +f +315.007 452.174 mo +315.007 452.174 293.946 461.131 280.261 464.999 cv +266.575 468.868 239.058 472.093 239.058 472.093 cv +239.058 472.093 270.63 465.146 280.987 461.944 cv +291.343 458.742 315.007 452.174 315.007 452.174 cv +cp +f +253.661 488.342 mo +253.661 488.342 268.029 480.484 277.241 477.136 cv +294.463 470.875 307.874 469.817 307.874 469.817 cv +307.874 469.817 283.603 477.576 274.918 480.402 cv +267.868 482.697 253.661 488.342 253.661 488.342 cv +cp +f +648.537 340.844 mo +628.26 340.609 621.283 356.076 620.412 358.664 cv +611.473 361.957 603.188 373.719 604.496 380.776 cv +597.844 385.482 593.43 391.48 598.391 401.479 cv +593.592 406.477 589.668 418.299 598.172 425.943 cv +590.484 439.47 599.152 447.057 603.406 450.174 cv +599.48 462.406 608.205 472.127 610.164 473.463 cv +611.744 483.579 616.643 490.244 624.99 490.636 cv +630.876 495.576 643.164 500.394 664.236 494.87 cv +681.732 490.282 694.486 479.344 697.814 468.993 cv +705.77 464.877 705.443 452.526 705.008 449.938 cv +711.494 436.118 707.406 424.298 705.008 418.886 cv +709.369 410.006 703.809 393.422 700.213 389.951 cv +700.539 382.836 696.123 374.543 691.49 370.896 cv +684.84 348.078 660.545 340.983 648.537 340.844 cv +cp +0 0.203922 0.847059 0 cmyk +f +650.916 480.75 mo +644.504 479.606 641.166 475.25 637.666 468.25 cv +635.564 466.815 627.416 457.25 625.916 445 cv +623.361 443.128 619.916 431.5 620.166 422.75 cv +618.416 416.75 617.016 410.448 618.666 401.75 cv +616.416 392.25 616.266 385.865 621.416 380.5 cv +621.416 371 624.025 364.052 631.166 359 cv +630.016 352.791 632.943 346.904 640.346 341.695 cv +626.324 344.937 621.154 356.462 620.412 358.664 cv +611.473 361.957 603.188 373.719 604.496 380.776 cv +597.844 385.482 593.43 391.48 598.391 401.479 cv +593.592 406.477 589.668 418.299 598.172 425.943 cv +590.484 439.47 599.152 447.057 603.406 450.174 cv +599.48 462.406 608.205 472.127 610.164 473.463 cv +611.744 483.579 616.643 490.244 624.99 490.636 cv +630.876 495.576 643.164 500.394 664.236 494.87 cv +670.873 493.13 676.822 490.474 681.838 487.285 cv +665.6 489.368 656.172 485.161 650.916 480.75 cv +cp +0.027451 0.278431 0.905882 0 cmyk +f +628.571 359.547 mo +628.571 359.547 646.738 357.899 656.252 359.71 cv +665.697 361.507 682.07 369.603 682.07 369.603 cv +682.07 369.603 657.65 362.163 650.398 361.135 cv +641.697 359.898 628.571 359.547 628.571 359.547 cv +cp +0 0.376471 0.819608 0 cmyk +f +613.448 381.029 mo +613.448 381.029 637.603 378.72 651.748 380.014 cv +665.895 381.308 688.941 388.997 688.941 388.997 cv +688.941 388.997 662.936 384.326 648.553 382.709 cv +637.816 381.502 613.448 381.029 613.448 381.029 cv +cp +f +609.135 400.674 mo +609.135 400.674 633.416 398.621 648.26 399.067 cv +663.102 399.512 689.348 404.008 689.348 404.008 cv +689.348 404.008 654.764 401.892 643.467 401.754 cv +632.172 401.617 609.135 400.674 609.135 400.674 cv +cp +f +609.776 425.032 mo +609.776 425.032 635.795 418.443 649.829 417.417 cv +666.455 416.2 693.484 419.357 693.484 419.357 cv +693.484 419.357 658.176 419.268 647.535 420.439 cv +636.273 421.681 609.776 425.032 609.776 425.032 cv +cp +f +613.629 449.38 mo +613.629 449.38 639.023 440.354 653.537 438.765 cv +668.053 437.175 694.768 437.177 694.768 437.177 cv +694.768 437.177 659.699 440.614 648.697 442.181 cv +637.698 443.748 613.629 449.38 613.629 449.38 cv +cp +f +694.34 452.174 mo +694.34 452.174 673.279 461.131 659.594 464.999 cv +645.908 468.868 618.392 472.093 618.392 472.093 cv +618.392 472.093 649.964 465.146 660.32 461.944 cv +670.676 458.742 694.34 452.174 694.34 452.174 cv +cp +f +632.994 488.342 mo +632.994 488.342 647.363 480.484 656.574 477.136 cv +673.797 470.875 687.207 469.817 687.207 469.817 cv +687.207 469.817 662.936 477.576 654.252 480.402 cv +647.201 482.697 632.994 488.342 632.994 488.342 cv +cp +f +0.5 lw +0 lc +0 lj +4 ml +[] 0 dsh +true sadj +27 804 mo +0 804 li +/0 +<< +/Name (All) +/CSA /0 get_csa_by_name +/MappedCSA /0 /CSA get_res +/TintMethod /Subtractive +/TintProc null +/NComponents 4 +/Components [ 0.858823 0.85098 0.788235 1 ] +>> +/CSD add_res +1 /0 /CSD get_res sepcs +1 sep +@ +36 813 mo +36 840 li +@ +27 36 mo +0 36 li +@ +36 27 mo +36 0 li +@ +1069 36 mo +1096 36 li +@ +1060 27 mo +1060 0 li +@ +1069 804 mo +1096 804 li +@ +1060 813 mo +1060 840 li +@ +%ADOBeginClientInjection: EndPageContent "AI11EPS" +userdict /annotatepage 2 copy known {get exec}{pop pop} ifelse +%ADOEndClientInjection: EndPageContent "AI11EPS" +grestore +grestore +pgrs +%%PageTrailer +%ADOBeginClientInjection: PageTrailer Start "AI11EPS" +[/EMC AI11_PDFMark5 [/NamespacePop AI11_PDFMark5 +%ADOEndClientInjection: PageTrailer Start "AI11EPS" +[ +[/CSA [/0 ]] +[/CSD [/0 ]] +] del_res +Adobe_AGM_Image/pt gx +Adobe_CoolType_Core/pt get exec Adobe_AGM_Core/pt gx +currentdict Adobe_AGM_Utils eq {end} if +%%Trailer +Adobe_AGM_Image/dt get exec +Adobe_CoolType_Core/dt get exec Adobe_AGM_Core/dt get exec +%%EOF +%AI9_PrintingDataEnd userdict /AI9_read_buffer 256 string put userdict begin /ai9_skip_data { mark { currentfile AI9_read_buffer { readline } stopped { } { not { exit } if (%AI9_PrivateDataEnd) eq { exit } if } ifelse } loop cleartomark } def end userdict /ai9_skip_data get exec %AI9_PrivateDataBegin %!PS-Adobe-3.0 EPSF-3.0 %%Creator: Adobe Illustrator(R) 13.0 %%AI8_CreatorVersion: 13.0.0 %%For: (Thierry Ung) () %%Title: (gnocchi-icon.eps) %%CreationDate: 4/3/17 10:03 AM %AI9_DataStream %Gb"-6fs9_\E?P#[nIOfZXMot+"p?+O!3Jt-B9b;P>YLr>.+>PgV5ghYfptcYAsO^f+6blZA*nVf!''_E]8d9*kLV.iL:qH]mg"Wf,m5)oD6q6j+k)c5(N-X:tC*?f&Hpf:Ts^IIlV"k7=26cU7a*RrW"Fo7Ffq>28k^AdmEVH2HlR)$;]lJ+=&`IJ*.-f:QKr5J6sbo:$9ZIh#+&+2Ot)p$*'k5CSdH %k-':-_s@'3rUA^Ca'[-P?/CQh%mT<*MXt_W'Dt/ba;rKj]WJ(j8scaoCdQkt@Bk7joDYeODn^_mro[XS1/6-PQY#IR?0:hYRu_L% %XiifGh8DNk+Es%HT(SS"%"nTXq=T(P!T;`&(U]g:]DKQcp]\G24eUa1=)W'?,.%`!%4hCriAG`Ao]l%KTC7Lo`80=brjU_B&"aI$ %3qsTtr)jC+S7hbF4=YFinFnWpVo+%W%Kh3,\_ts7<%3&n.JR7!GZQ>=="0Jo[Q2ZYcTL5N1OS01o8In<1Z!h8iD7,PGf74S7sslP %`(uC`]]X=O:G8&4nEJ+"5JLIu)=B6RQi"[=jA5;;rHoI!(\e[4_D1"F]F2cU,N[6j7/.Ga_%GX%*4O2r5Ds$gTD0Lg#_-;V4AX`f %NX1-o\FDLEED6.&SkXYuhM(",T6S]DJhUIPloEBHcQ^[Jro>GPjP5+M484`lN]kuOdQ@43n&"VR:MbkhO@RQfnVZ:D]F2cX/'P?: %cf4`h-iSmrj7UHbgiJ/,:V7(ULHg&!),#3\q>L(X`<#>9ifC5[pQ&r-n)AWAH/Xm,n$2'207GR`XR!iLkSN*.UJoCTdG`P:CAnpf %LHk;]`buh=cNugo5Ofd7?2-`@>PAWR*5@'0>B*bb7Bm;KcBuJYT_h(-PinTaa0X([ja"b8l\aoY\F4[/^7AKkkP92uih#beA8LBc %kIKOe0(d-X>B`.n'>DiL>'H:X;oO1)DT2L1J,"-)+l2C*Q$&DY3ae"j-Lp=])qnB\YJ-@XjX9r>UG)Pi<3_dn)#9BlS%nG$_lrp+ %^VUdI1Sk$4YArY'lYlck-N8L`o\G^YmJ$NMrQS])"92_Xo2k_HFCIf>c!TWis'L9I=(bl6cZ/@A-a,'#YQ+-2(gftWPV8pBcK?F% %_TG"*\]a52P]o7WVu:]tNVgn15E2Sa'M&/JOl-E%$qpJ>LkYk`4&=gnQsm,[j5mecSC6YBaUt-1O5k6>mkO.A&U2u#(OgPIVph60 %La+%eWCug+!?q7+fYb>-4Om)oE+^)okAU[@CUEi`4h8=$oL?dTC&'3l3K8j@EQ%T\bOBCo$iW:1E-.E[]_Jcc8+%0U"jatHN$k!F %_\"kY3l:`Q1P>/`cgorcpC-G"HoI)^fSj!(#V_e9;h)Ybep#3IE>F]Z6Sh4-QEcsU5P=:'Y1hG1hFDM.FLQ\aaMJ`VmeS`RdGIGW %R(*l;9fMD3%I]V5_6p,*k,nMsQ)mdV%X>SZo>FGe:?J)C-%22Whf]fr]?V4pG?V`pn]ojWr&r;N]:L$cmd7.Q5P>K9R+fDso8Kg? %dojj+0l*gsq!Mb>/oFq[3Ql:DGh0lXeoYgR<`h!jpX!g'fQ/+1h;du/KUS%mPd,Ys%NIFr)]W/OC^/ZG@/IBF0)c?t(2d,q4KmT3 %4ZGA[e'2e!):o;/-eu=%M%181H*9#h6V8SMK\rp#2%#&E"trakA"m;UFQXP+,`WRX:$AEM:nk&df.Br>Sph;0J*?,FB?_7UpYV_+ %_LERj]L-u9PKG^))8H.>4b%'ndjH7ZrV-IfC@qPX^\e(UK5_/Q;tqYe14B,e[m.`"J+N3JO$EQuqsQ_%^Y427iLaKGIes_rf^F%! %qWInKqO>eqg\oZPK3o]a^A7C.7EafmGH]m$n?[Jrqoq&J]m\ZV55t?ApUf=3q!]'Q!!*m(hqdUJK2:@=hXR/8=*VFUp<@g=rV5V7 %gsl9%-hO-?^\[m*h;RbepYUF'^Z"hb`kHj#n^Y,DHhZpnh07NH-[ml,hL,=eVO(`/^O"4E]le.pGOI_CIe0*>ma]A(Da&Suo=oae %D1)A]?b-43q1dKQgU0WO1O.mKe_@QO@,6>-rL7_CGiq$Yp@Wde%j-8pjmB+0YcRI?2M1tGn%X;Dp;7?ol4<@_q8:rGTAIBtp35n$ %"[1Yjkr!@5g6#*G3kt]]f5EnOn,kRdJ+$]nhU.AZNc^*.n(t_W(5TeFIXLb?:#(MJ5CE6ZmF20,iU-(u6&WAOfF<2gmAoub %54/]j*m(Tm"0MM6k5B6Bhn=I0r]gD\ %eJm^>s0HYJ^?]r^GPF)o;u!PIjhG]s;O>#DNSWCW]0gf/cJ8Zc^coS[3qcK:p\Xs:++%D-@i%ErN'g8n"5>iT3UYoN*oe]@69N<3 %cj^T*h_^R+*2[./*/LN4rD!YWr6 %NBQbOC!:'Qa`[(_ %0O%rr;mV6.fFN7lX_>u"FO:UFQA&-A1kd:Y1K1+Q@>UR&,LWeXsK[g-q %W'A9H9_1!t(&keID3WT4opooQ:UJ+^XmuH.Y&pqVf$ut5HK0kn_3TIb(7eO>QSm*plI.H!Q,g<$gtL6BLF^Q?*G\0XMsu&=b/%'9!Z^(WQMh6&;e&)OXESO^h2.E?sU)=6'qJ(^N�*7ie6qG-BE?^WTKMWBPX$>(% %+7!Z+['oAelM;jeN,a8=Z_(fgK.M&u=CYWJ?0D)cqBRTI#?^?(e.[6`[sPaHFj5'ZC#*Lb@GQPe4YQeTq1TO0qD1,s%)D=h#E/s] %*qk035nS_5KVASn`BZtfBVC6UW.?4HM2#G/!;&MZ@73slWQ:g@";0c'4fN5oJm\OL%JFrWD?6UYXsV%f``U143=+%cM,IM"le)c? %6s,=+b#AXd'Cc^?EZF3A#`7c$#7coe!n`:T!j5&aoN&]V?O`MaptY9'!)QHojcBKZ%P %%cm-Nl#\rfp"q"F"JAQNWPDh3Ykd:X5BTKGZCKjD#C,i6&p&VQ,j7sUrS@Pi3U1;eP_ %JY$GgTE4P\+1c[tX?$B"*<=,l^bjQGA\bnYn?7mYIfXpZ!EL9ub4\n=3J^\hj$> %F4YSVFoU\&p=\d/P^RoI$3snsQWmnS\'N^;YdS9?`Rfm++:O+i"8WN.j>Z%UY8u]n.J3P1I;8f"B8;\mF"3L3/ua:!n]ds^_A[h* %Y8C8GK;0"h`dC3)WsmdP7B^O^81Y^;)TPZ72IkC(cO5;&\Pj>MYgKE] %Z4oYUH[7L\P_Z8jW",0hd_OmK$7"@%QFDTe)2-..3/"+2V)$P',Uj`0a5lAgTe*-WEc/g;?2*>1%P %;VPaIl63N+V1;qE3ImNTsX\)bu$R7lc)$$u^Rnd-3n\ZSS]R.>J-Gd)5Oo/C9h>JSFnFT';QR7l_dk*]MrofClJ;l!8l@U#D*g %g\NHOa9iARB2iH^6Bsi/X%3Su%!Xi7%dsFbO+>\%/=ei;nd+HN:,r=JO0B"(fFJ5&q%N+Ub8KO43RS0lhJqp7UO1>,S+oFghUK1G %d?=kEPk.O.N(-5M6q;pnkMNAMq5ru7\*+t'aiLS?&A.4C]e+gSJA2=MW&nk(4h8Bbu %[E,_O=D;(Z["Hi%Fq(4DBDt(K^IHi.P\`;&7H`/R/UegsA=bo/1)JOrj!nrRF.op[I`-%6 %]2p^4d!=Ys^*Vh)\8T1qcb$@B,<3upk//8h6lh?m4":V-A0;Pr,hGkXg@XEnDAhi=k\nq2B-FLI#YfZ880U;'@.K-+UWOYNk@ajH %q1n#!E`&G.VVC+M:!;;VO>"U*SVU)aLhJ"D1D::o9PgeX/`2Ik9]#TM.\Dn.2-^(1Z=D# %[jT$FjC!thm>4bL?6S"^PlmopnufSD#%2-7O#V@^9T^NCV&SP6nIN:3a$8#)j`qYde$tnE]0$S# %=*]@o%\KCaSMnse%IaVlm;o"6js6'UmZ4o:cq<`pL>37:FZ8qORarjp?d%NHMjA\Ed)X %CVO2K/aCe=_G&sB`_9Zn58)o8aO<=Jn..uhiC`S]cqr_FqXG&_T!A"R"kpgW=`>$\EECR/ne!ju/pfb:k"X1>Jo]J("9B8e59C3= %3Ec@6kCO6=4E13#6eHY*G_'i$GT#rs_K=[-?2WlooN_@U`t(C`s8%V)P!=Sc'\-$K8f!iH6Ahg3P3,#=J]bB#dUZM0bI(?tJ>O'Z %CJa;QbkYQT)EY#92G-"M:7KjTj#UFR$Mfkb6N_/8,f_8ek?+.)2$7mdlg1csn$I$*arkWaGKJSZ$\D'nhXjL*Z6%L3POZ==@Dh_G %a&\tY0j\94d/d#=+O\d_&m)dCk1Wu`EJ6A98Sc*SFsC@LO2r\>Id[/4C\q6Eg@I3\7G/?JLmLgQ'O$ %C9FnT0)jjq#dP_(F1#A-]+SJ1(9mHA@@:uipkPg75<,dQ;oJ9&n@OQt>'JkM?Z,:>DJ&2X^O[I(@_R!L0iS$^<1!&:U&5U\\4Y$$ %TF-06&^IF`nRm\e9+5'7W*h5:UPnT'U.#lZ805_Qm2F6u_=U4`UVYR0ITP0Y3866i:5$s$^I773$)2Y/7/Oo(:NV>p_6,&"Zg&Fn %?Z)J?;*i]G))jHgE>(D?,EMlLp@kA0268_&[9_E[jjb@hC1.XtbU70!9N]OYRNssjoLq8F-d!P:90HJNE+&pQ %*\j\575\tG;he'd4_mH,*9bI(d7J_Z*RTaZI/KqMk@HfafOA]*G1$k8Xe?XRB!<-Q#q!r %mm#@iWeT,NHNqtq/mLRZX/)_AX!cgj4ogG67To110#39[JX+!%E`$)M-LrjX`;u4Ic@gG5\8\_1haG0cHZAcn8&*t%5M>KH$5-h"8aXZ2J>eF]CD%.G\W18.SOATjjVLcHmC %PLC]6$XkB.#4.ddh(O %93#?e9o><4Q@LKA:3.,oVI%rYACk3j[*'PO`$GO`L=WDKc$pc$3k/toY*lI9MUZs3mOIYIc'=`5W)k+D[(k>d^2'ne07.#1Vf4c# %Y-"nFeSF&K6%0D5q@p,lHb9H5q.3fP+BuG`mT2S_cAO4b3VJJDf3HFt1KeiJ1RCE!-p%Sei`@qM3gR$*?d8C]ctFtmLE^:K_h %MeFe+4^74)Jj)-INN1asa]-fY*W8A2keV?\F.<<&Y_@bu9S.I4=\IF'(7?[[=II?e2`HS6&p]$Er''r:+_R=)AIl09CVR]@(>c\k %C#PW[?t_Y.rR+b#_WiLpIIYipe3&<4UTe$Qp>\uq6Ls8P&:f0ifZ0(:4%^V(W&VSq)V%uc7=rW$.@)G96\HSNXpnVISJS%g0m#r1 %I^cu$C/Z`/8)3#=n;N"j:VXa#%diYiH$StR:UUh(@"gRY_ku+jqW[tWSqQ<>+ %c3s)Mo%+4DMI/mE4VYH[':'&"8+C-r01Y@(Qq\[@gKCn\C5jc>'=C;\U@%MKtDjkj=BW8-")-kDoJ,RmT+1X\oj2Jl3gTV-Zr1LX7Ar9"t/\jibZ-poX=EMeC^..iLmTr[FCM`T9+fdJj))lC> %0!p&qig)5l4F8CbDem76[UE=r3<_uh:_ObSdHe[oImKp!/rEpi(=:&1l^]OupZe[$MKcd\;SER6JBst/;&0"Vg9Xrc7-B5/=j]D)Q1*&!AtUOHc2iU.C;;1G.i4e66Xmr1_[/`+ke&33&ZN %UDunkBR>npWRhE*`b.H.p5UdcW\ZS&>Y!92o.bsVHX_!bYWDAq'jQV$4L'9ms'@P[B8c7$%/2R9r_1soTL0+?Dpe+L9cX1(6/6_? %+%[W#6!Z]h;LCq=%F/%7s,"tL4\A1p2F3IHCSioR`-Qpe"$INk#19okFcNq\ER.Xp7r6Fc-o]MMge[JTM( %lKVrjZVt?:dUV(,\b&M[)Z!]kT1*l?SF1Es\G#cY[J$n+?>Dgu+ct'cemE_,2N.Q=)U,5"2AKh+kV-d#fM[WQY9($M0uHVI3L_L+ %WS64WATghjI::HXV0@fUTI_jU@8"MZau$C\Nbi-llH!,+[)TF93'FgF %g&f`\;_b\iVV$(]fG4VGWn#&PbVq7oNH5Q["\qs6Lf[HW+s1UJ)>b9N'Ec$4?19O-`O\R]Ki]54rE!poWOeeLkh+\#h57n.b:`3l %]L1gkOXf9,NKD17E,@U5rI[,)G;M?H","qG(NW2Lcg%)bX3%&]*pt3p;Y:oVJ[WLR*P\g!I<`b%&iiD,R-Luh)u13K;?(_-=Jk4Q %]Ggjq]J'.T29F/$icKVcd1+sW0Wtc^GrMIaFKAJ0ibk.P&/f[Z\snH2]77RlKC%Cm8>Jfo+@QS6Phki,m"R`92uG,:iD8,9J')[(a(b%@m"R`92uG,:iD8,9J')[( %;g-&CL[9ciAM8EZoAS/0["!AmiU,sW>Cr;k!n,fL88Fc8I4VWt-F4^D+-.J7b0_m0@?rZ&\=>Y'nK$sYm7`Ppiq;$\HBGNK"VL'b %dlS5@a'>5,GnZa^bi9]qBSDDTQ2QOdP.!]8EX,TL;.6j4JgF&MD5r,QH"4;SUaXIgWY8Dno![&ae_F!c:CYo\&C.0oMiMn6#Bea],b%Mai_Rol' %JlD!,d5ks&FG2WS.ZIMZ2O;1;q6PA@N3`iJlH5u9kp'BbfSqAd7@L)#F%&!/_]TALd>Y2`.>'C2',anjnn"NI\ktRcAl1saX.De* %%d'cMOkIE_T-@]Wl_\l>Hqme9:kC)/:n9ko"S2t!^Cp5gi@G#"K9*o.a^9^k6o[3B5V[Fl+9rFL$tl4M9?o3?`6bne?e6=6qUJr$ %qd8"RR"+G(qKFbU$_KTXT-8<1=6sS?GOZU04g+_0Smg %^un44VB9tK^72I8'0`lKlAZZBWt!R;ZXfV\56eMUp6^O@RnT@BM\;(ZIa^c,Hc\/f,Q%GWqtI;N^&E3L]mt8K`dm!V$2-9gC@,s1 %&23)+eD=Q=7<)Mm,"^aWL/A.t1I6:XS4ns/m8%eOE'=h-.)H:kOaNcI]@;rg1+f5+uQam9RP7h##V&j\3(dcVmqJ>O>@Fh6hp`J4qVNAC[^Z %eS"&HXWH"V-`&(K$.gt:8n+!QbXBmGh\%&12@XVp#1p'bNYrVXouhl]letBHEVIjOLP9gJ+qTSU#(3D*Mn*3(<@]tdNo[`-3;ngF %3RF/lW;X6>WVLjF:h+>NboBuH%?d,mF+j75psd68l2oDu+/=,^78n7[J'N\\smDkH:^? %nB:tkD',EbmGu,k)R*18P-G=E*aR]FNGkt>cB:&\k5@IL!G#*,OU@PdM_h(t.HD,24C(Q[!RlWGQ7nPBCSGq828tk.f(*O]I,6A; %d4K?1pNa1_3)[kKEn(oY>Q=)M-oU9ZCMeP%b5oZNk.,'jKe9lU^9RC(["s6KQ"mhUE1T'LJ#'+,?TP7_A;c>WCSqn.6%V0beocSQc:e]#=]SqWqF=US9=G!rcct^B(?[': %)&etkGVVo_cGa@C)bJG6aDiOA-`]1/Z$AYoP-f^q4V@h\rQ]urW4jOd"H&blVs,j=1%djBhOd`!86=G7XALn%Ph@g'pmdI!1F*dd %#`F(krp2&`33A$WDlHsFcH0:#P(m/f+o:,h7=:,VNq%t1Hibjcq'oM#/"c7PVn%tA2gMNQ2QJ`k=tCpZY.!>s3rtJ4u`7"t%NB^Cmq^@,jZ6"rme4%?:>B$q/oCD_K]&1Sq%_/OedPq.)BdBQ(I'L3Ut(*GK0/"am"r0$;L27j#aZd.E?o9:s:D@?OR[5lM-NGAr!jC(F,#n%$ui:n!7t$RO_amM2)ZZbVnX7$ZO"ii+u@Y6fTf/K9ZBe$iosT(hc]RkXI %2-8hl2u"?H6%BLh[_u*F'k4 %XVRX""<(d#ekP_XF,FCKTrYu+!9ZlH:f4!a%H&+@E6Kh%o-/rmoT(P("`-HaAs67k)E9.5_Kg\US^CUO;sS4P]hj>$o;saeZSK^) %`61jHfnt9Y'gtBe3F$o6_sHKRHlh$*C`>$^%HBm:cs=4L.j&su6?d8ET>Ng+p"&41W6@[LES-2k,h';)Q+@3AI+*_M.Wi"^)R4aG %-GZuqFLbsF7>p&p`S-Zp`Kgu(c1-n7R<,5oDtWu=XQ'Q5_OXoEZj]D/"Q-VKl>tK7NS?gF]^hq"8$kEnqJrc;@W@!T7sJN7hmU4k %YPCFB2,Y5.JR)!sE0CP!a]K&F%Y(pS+)Sc^DpX:sPDIhh)$WS/E6;A>Y-j$K\"2\ZQFYnK/-,Bdki=So=\dZZ-_7L^>7)3EU"/M? %JPKB4@86^KYSU$9GDjGF6?ZDM%ZR;*<^F#(TWY)*/Z=!go[e3W7o9O0rrZ>6J*Jb*ZhZ?ol#mLZF0Q%4 %0h+3.jRik^ig5+r4Kan7gQW*#.2cpaX,-3pfUp'h,5,(5aERZq1"F5;#7"'e^HM1tGFp]+@:T"tAHEr.s.$1Yq>f5#rRXr0FCO]G %7"^\Fm.gj`X9u,gSsEl$>bk'dj?87I7;?Q%NHkem7p$-PZ+9VUE_"lp2R`3o<.8MGE?d"oU*;]s>Q9+SEN0jOVrcg\74POic,Rsf %clYdm_Ff_tkG+8aPp]ee':BA00X(*/,3J`+ %Ce5/BOfRo*-KK(_n95KiV_XL;!N_M=qr%*"D@#u\rk>r#ef%E[K2W^pX %B_gBsG5[.n^T*3p.#W]@oi4qdN'PK'od*q^-NqLpQX%NkG$p)5cD/h_`K_d-/rE`Ga?$ %O0rtiX(9kAKr3MI6e:4CN/O\Y$:3fo>DdEbD#qOQ% %:1YC3dnXLkoT@0@kbl8=BKWClC0Q_O'kCWElN7e.SM&8phC`%C7!FM\1>F<3_pj/151'1Z3Qdu<7D>3^Mt,3\gWn"Z9Eo``SaD_g#/F;VY-!,Z,UoXe9cP- %`W,UHQ>>A_RYC.-[^gIKODU,6NG#`_FOYTeoR3/`JEnSh`S4D1M(Xi2j#/*=YEEVOH#]tW`(L]MhNB)H2$?QSQRupBjqQI[X9(8n %7+)sa.Pt"m?P(mAqM'"8j..JSjFP7oCUeDmNYakuK]IdlQ5!f&%E"#G%TZ`qjQC,0(PA1[RtC,p?3T+qT)lc(uZI'>]j:;6Bfqg:0,D_hUW_cVp\i'0(jB*N2_geE\P6F*6Y9SULtN]7!O[t^GhZGg_UgQX:!_VU'X9t'\*BO2dVVr"C %ND%p=ocZGOmA.B6LV%F@7c1e0ZL\(2EL@*/"k0"Z*^+[j'=nnN_]<\!/hUPt6g;Y!"I2c,(5Q1t!c=&".>I_/f?6'Cp7_:Rf>5*% %pMq=s'b#J+Y%iVN%]@9G9iLFJmOgfkFD5-U?+mjt!L@03"l;8S#8=h;:@%WZ80,`02Z7IA(bYP/sV)PlUK[8IBD\7@ZK-Y;I. %p-e$+cBap4Zh0e>/?0g`!oPKDhqj]!>pO0gU7Psmi$+-nbTTk9/9IPPM/^.S.+BMU5r)p=\6=b*HjT %*T"jGq1@4>)B;fp3V-RBcGb1BI>oFnlIG7ErPiu!f5aZoHXIfYff+\6$^]MZ`9`B2UEl"n`C6=/3f%XEds5>%f!?'gZK#^%RrI]XgW?+!alnUZLfAgm-]\m&DFjAa_VPRce[Yl#@S@J$KR9S<'-3Oe#S+s'lJu_rB-0W]Q6,?tsaJegunanj!E& %;4?3PUK)ck7<[*J`0$lC%@`1sX!^JBH'3"@P$#-:E%MEd'5:c[qBoRpBlFa;_A*ctNf#?oCp1'oSkBd2Ni!bo)OfFlL8p;&tu3.fqi;)W>T#!a3-NiMe7;"_l;B]pMt.H`Kp!mR^(03 %OuP_7:%crs*cCP\fToiU*HjqJfZ6).DW&Vca_G,;f]F*TmYRW/#O3cu[m>eST1_+?69n+cgIZQP$q*tFW$-eV;)%F8r[n^[hY$\^ %*4iuN[pH"!:AClPG7qMYNDSI>bnln\G363n^/'+TGric?-2ASG0tM7K-XR!'46Nj=:VMQM76KEA6FtHJfB,0P3SCj)i9kg`js2j[[!np%]Kr:9i:&qWGE++raE0dYT^5:,ch5Y %YlWijcgK,cjNuP;+EI+B1diqKer?f>8QOq=%pa-,?Id0@]6PtK]rnkkBm`g'fS@cL$N+TW!FDS;f@4'sb(S`u)!hBE$GuM-SNT=mdK6J)5VubcU3`H5$-lZnlc?:rk)EcW %NT808TPN?@eBMipCDQlQqR*So(k`cMaZ597MV:(:=-@`g)9?j.'KKpXf5IAc#`I43WECE'/JDR15k4>d_d:&k>IeO[%1ML=(@Eh, %ESF#i^HH[1J[M=p@:6AIYg->hN/Y3`)"!j/P)IQ/E!dD/@Bc %X1lb]hTOc*fm>F%+W%;-0RKRLVLme9rU#_J;]"J&SAIb*C(45*I/>jI0!jU/T:@oPe!=u<2uWq)akD'GD!&I8%AQJH]\s`8J&]K_ %T,PAEYs/J_b_n!7"F&BF`D"4j5UJYsGH %g>ZaI+D5I[]#X0jR$*so.9nTUhJ`FKHO6f#b=a5FhQ26L(lRXSU;No1F*,uG8V$/N1&PEPN/O0]5i %1MphPh/t0k05$g(#2*j!#'MqH&pc"kWPuP3;F7%5NJq`+9Q\Tqj]R>Lc$Gi]B#P_69W+ke[Nj\4,b*N31dtU%YR9p#FbSE:?)Kt*3;i]5L!Os;+j#UDn %XT_BVLCps>L:HhlVQ;iUf*9U)5e@4&>/kUe?(?Z1Tr[XJrYb*90CEeEgsef)oT3rWS"6n2+`M/ejZ(H@A*s< %*d!h1@H3Yb&r8I3U)P"*f5&r`.r1D[XIJ9DS9RU\"U3(!]#lJEBmeLbL)rtDFHA>sg/sO?.p)kNo-,!:'OYdAXlK/cnYV5fD7"[h %[f#D24*K+427\]W"eV%NA!QSZ-N"/$NPLu-F1_"^rL$)4hP;;#qPI81&UrW>n(PimZB]_lpi3^09NL-AlRu7@02"BGEC=h"!pF=6SVI:2*j6NR85D%4Bc&[rNE?Lt\E53NU*>J`4,bai=WS7%'5e.BKi"9MN,^9-22(\<;&@ %3L(V41rXW4Yap_]dHGX8mjHcs_R]lPK'MW27,_E.0nIXn?A%d[L/G:%o\cJ@cH=2_1)F[I=lW#L'OP&>Y:-9$-4PgW#-i$do>k`9 %Q[g2U[a<16[J\?*eC5_/9jJdV1j:@o7@QM`3?qZ_L^)mIO"r)5=>eoHrAr]06M5N`_']Y3\[Y[qEEo;.LF]aBd2c"p%S\n,M",/pS=%lKZHd63F]Re?KUUK!E]U\R`APlLMAtqfe/\1kcULVC %J*+E;`s'euA6^/L/NDa8f@>6Oa8+D9p]H5K""26mGobM)1>U!W9=MT.:M:irQ9?p=k2_h+nQPeR>2naMDi5Pn>tsZsV."aW(p+'" %MT75g[Dpf>/MXeJ!rcH!W+>1718;g_pB^"G/@O+]5DnZJ.jPmU]brJ?rthIYqX-G"(%+58$[@+8_$pS:GteP6DY %PG%C8TME^.l2Z//#i.o6,H_e!GLO'kqn"sOTBt`"`Pl8mREd8q9">g=he3l0pP2^&kjO8]&%f3M^%+MWECE?td=Q[%re#"/TH6a7 %GT4H]\[s^`Nl`i:++ShJ:V54X4%GiNa!S?,A'Oeu'MM-.5BQ=#iVn%<5m!=m2V,,AV@@Bh#7oNCt/I;kq*d`plf4Tq1R%l!Bj3CTX*I?2?,fQXCND#3)q, %XYOg#9i+t#Dt,b=PBp?q[]54fC:>ZO>h*qli7^dooX%:cMnJa:RZcm623K7/ %d<DFD+PD>=FFION&`*apCpZY_\1!->YlOhV>cP]dN2jIaZm0!m-bgG42IR#9+[r %XAsaR:('tCYXZK+b6CW`,N_TkW^0YoQJ`+eO;?)_BNbJWHd]4fN:gdfDhg-&W;A.8Z%:P0cc!-H/B\2%Y:s*+_793hYQ!hAs2*p= %r4=<@kX65^E7W-ERCH!!`?kF"j^>$_kX$9(Ag#2.>Icop:o!E;apC\$NhH1TsUaB'74Cl,5K]XT)Lb %72E6OX?9#G!JjH!f@)4]6?OpOmR:$=4"O:8[\88qYe\LO\P6dl(sM#2I?&]+$;P)E*T*e_.o$LWh+p66hRoVVn"H_YZ/m*.X,^F: %;X;#lK%P+P*2V@M,cdXXNFg/s1?"+!7S?".=5m7=&FZr.h;Ofe$BK0ufh/$d\tj-eV/o3g2AeG[6--cC;id$C84qN>e@MaUeiUM` %B9F%6(-U.d=EKfPXen-e3E'LsXO?C99WkO&Yq@-1u&Ii@igFo-D&_.IuSt3hn3, %m"uN^X^^*3Fdq`'h.F*%p>4+L3/b*-aL'7M8-dqSF[S$X]Bqh,gpdhq<<&-sSql3s!H(@%VF?_&W>4$"k%e=:GHH4iIA9jJH)hI= %m[^'i)0?re/.-"SM\.@U0lAL1[^CRm/,`/[<<..N)s(J9!d;$'kQU6;1fi,qZm$HjSuj02H#]+g2(l%GF^ZG5ac#)`,Ll%`\BL#& %qm(?K','49/#Yh"?5J+]8d&:[^:Dt$_-I[*g\?*%)7(\`b])'2$teH=S8XR3\)Nt:G89/Fm_8A.gt]a$bgo.1"$QEZb:]mFVu,Q> %5&n0+!n#Kd&c]'Xs+%]pYBn0[-&+!+E/C9?\_N,r^.4%r>s'Iu.ga-.F+RA1rs@`-&ENa61jW;'C7u#Q?[/^"4]@t'3L&Db;TZ,L=g>L?nf3Nb-q*glGZasFYPW5W7KrP2o %,ds1BLaY^eg-T?jK9\DU=GkM7=;*2!C0dN^,J>MFk%^/#A6hW"i\OF+r"FIDi&@8T2,gZ=d?Po^iG+'R=6#Xs:pnA_P#J6;h%fs_ %5Y"SO3SQ+oEd[e`:![&9e0jW_/UOG##Z=a5^R.etc#u@@%)&#EECG5CbBOhs_orZ?8K08?:9B4hC+:G".hA4o?%d8ah)+AoI?e"! %HYQ;=B?TM_)%Vm@4O@QFHcEL%MYBY>h&o6Ie+Z/XG:/bR7(ePs`Ns*cSYQ5tX0s:iD%HhDUsYV6r=B"U.,PU,!@&&p2b?p04_a(U %NR\En+m+cQRQ4bfAl?h^Um=ZLPZ+t0LP)3j.i1c%=3.cY75k(\'h@hO$=_M:;Q.R+oN4VhbT^'F0^qEqgdLqS_%VVK9ICYt(_(T' %2)3?A:<^IhS^'A-hAW$C;Uo2q=k\SVA7*Set0t5ZC>XOrBWQGQ*NKU=7eOa=j;T*CW"C+7BrM*#k %U`#&qjXEf6#Zq:6XWjIU.ctG,[V7gjBd8Qc>`W@*)T:,01AP<)1J5TF\>D]:diufR@\&f>e6>f!Nsg!bc8s)Vku2uumkH9qZRT62 %FKr">Td_lY'5WD'L/":MF.KCke(1U87+G2=61g;i=^)VgXQ&Q8]"&&CoD+QQWpCCA?SHD7?V5N[Zl<#Sd??q,XTY]4?rb6%UM4qZ %1:rqWmj." %/.UpdI_[I0?nL9CmH%Ik#F7I.n+d-d&%:`g[QhpC?dX1dU30&kH,iRtDboP]atEkPX5U$(mbV]rM7lKDXAiKk`?1oTGpVV'>&tBE %5&1:I:Qp'S.3)da"sBNSQnP&3&`sC4WMB8Ld**2m'V#`1Jd8QK'Uf:gno,@`%ORrpC=U2;/"KTWf:d.L")$VJKM;[la"B>eQ!13- %3bTaK2<[Sj<120OeI9l8WX<@=R[JcL&`ut`47]d,cZlR#4uHu?e1qOQr_F6pCf,s+BJ^(t)cu*9+C-/dX$.kob[rDQH\G[A>$^lM %L&4'Hl0Hc(@0q2H'uZU98XDIc[iT/I8r$V^*nqm"e\u>1)\ij$/+]]Kqk=^GfITI>?SUj5'&TQD$EbBA3i1T:,^!%M\gd>iG)/E% %E\J:K2+B.gQE!N.2+OB&C=Oe<]`?V7,F+2`"8a9@,`&mb7;KI,3S^8UBf+ %:*;.f9,g@%I\d#UGZ\(TeCO:$Y3(GQIVSA5g`Z(U+rS!M<)<72EJ9']E+>1X@@BYO\$=@Vb3O@UeAp\,lRV"ARcu>JmAoiMCg&9` %?CZnd\g)N6`0#/4lODV%L1Per`3)nm*V!Tn>:iIVgpBa2O/d)q9rn-a+fomj.,AYq/h>fSiiJQ#.,_FFEE!*+UXSU:oKl&F_k/c? %T,jaZZV`^DFZ8/EbUJatK>_ %@mPHl%8D[ASVfD/MuGH3@6f3FnRHY13`CF0+gF7$=/YHC1B!@oMn3:,&H%c*:MpV*pN1]j%FXhjtSg\2$:j&J?%-o%d)q8k_c`&8?B=QhGS_N1N/o3ORl`Qr1h_Q>POX@sj3j2`"%WK)An[*>IBnZSbXVPX`:M[\hFO%i`4L/aENf3UBe\$`B^V&(H`+$Z0kJ0iRLEsa#.IMWkl\QLWK^$ZAWST`H[0^8)pEB %X@>1%?Gp5LI"pI*H`I#ic^P`VVW`XJ.=RT"B'8,*![>29H-^D%6C%)!;ZFkVeHHiHV9@/0-[-J?/9.Z=>`!Y,TUCjCI]4YS`)f9` %mio>)],B`G&Z=hi')'-`(_$l?%.bJ`j).D3e'Rt7h\U$h6^SsP'WKmL*i>UN5j8uTNa9h_Gch`2HAThrN.j?[jADDuTQ(h%GR">` %MeVN`I`@^flri3/\_lsI#:O[;W].N9Kj1_UlF!fsBM+E;Jn01-kCDj`T[$a+e#4n-oG*f_ITeJ^Y>oM3P<7DJtLb$Gb<@o;o %Ul21LJp??V*HS8ok+LEX\Vmb=l#HaM[Ie\"oZ[o9meI7"r5D[,I+?R&A12OfN)Sl'pf6H^Kp,L^?khP+.e?.e!co3#[b_++8&^UEdJ6re\O7"^phZo7@`Yp(eS&)Z28]Hs[&Z3PMu\ZG-AZ^)BU,DH0'U1p(54<5e<@'.s)7iCs$Rd\ %nqS>3&hO!9\j'j^YYSN9%"I1TOcRjK>lSj4D.2P:\*:NV3:8A(p)6H7?p8nn-#['S[sHXH>Rl=1Pt-AI+#M(B>f#3a6+hqe[UGWH %3E\n9M;TAT%^%p30oq6d.?3X6m+.aEI9ULcd.l7]X-N:k0J=s"noWo7dn5D?Wla"rHdg]'`Wu+dQ*#Lu2.O;#CN-94YHMa7Qs#_? %Xp'n5FbhC&BM0FK,seSd;FNnETKBZZn$KA=1qf_6W/@P(b!*8t_@>V8CVfE(jg;MD@(G]C`IhB4^XV,lG.Gb2FoaT(J^9Y?; %g2?:\fs/A)KkV9[7p"&`>cW?4*F^I\s$!(-H0#\iCYGXbb:42RbihHEBL7)>op%AmHnpM8=dVk*LCh3oF0$&5n7O+k]!;@cL`*-B %fp'\'57Sgn2dtOgDs;QtALqP!(Xl;pRB![a3&Ij?`!#VdO %0rO118HSiTf&0PBF%b-o$[L3VgU@4XD3Yq+]kVFc\&bBVb+VHO-#CXO%nWGg$7V[2;JqdaY6uf<7Oc;:q7@o-?MkL\2E3_Y)FpjB %s!nto@;851$-RjkL!dO;H?jj_kBu8pKtCQbe_AZhd6?>=RGP7+9m*d=(]6!CkuFFR%i[:gEkTL&WRP.SS+R8\EbW/Bf-Jg>)g8_8 %a0a:Zequ-()BM=[pa!ECSn`T1?>9'R=k,4!?%8c9Ga,"M>u2!R/\\LL>O/tr?/jBEX6j1.dd#7$k-3Ro?3tTXpI_OA)CE5IbrW=s %D*e%K8pshu5UR<>HF4\%>1UroeAULh[:K.ibrn_*7=0B-&]rU-HCeY/Db@J0cR=41cct7W`5OSio2-,:pQCH2c-(KrXS4)S>igd= %Wb'@]LRL*;H^6.rH#T$U3YP;1jl1UPRnp$MMo>FkC,JX[^VMaCoVS"*ld>O:W`/5n*TjZg1&K6DnZ:t!UO;sR3qm"[XOhtlhaD9u %*TYuTf5LUh9:]a>hdrBkc0/Bt;I%DnX>,SQHBk(mj&L6eo<)/!V8p7G'ffZ=V-&1=K&t^S#npm_:ib[bgrMZ$h#3Y'eO@qWIa4Y/ %Mqf*DkT?!)q>Qn@pSCNi)P'KF##-)@kl]=q"\Bt;.SIuM0kFXSgh4(e/D=.9/_UD*(i*=%[R&u9i[ER7.%@,L8j/1eA?Jgl'&%A, %7S-^i;Z5_11@$6"Q(MiLG1)l`MZ286\lYBuB_i@*QCSmORYm*[j9$^7f'![%mW,>9FXl]t2>K%B:'(S]FA;q9$*KJ*ij-LfnBYQh %TdT$GZ'PI(".$l;]8#V9cKsL5?63>p7WV*nlI_B$^__-a7#DB0\1PaRcoCl^A$4XK&"rX]t7+MmN1F1kDj$h;C0 %_38Z4MlJI0QO=:ci-8X7r\M-*ddm.KoOZ]e1nPd$*&M'?jYIu,WB[:MikUDF-kHdtNfp;5+t4?@`X?Dp2Dg4!,4NSB+")W1K.r;q %#:000-o"8]d't:Q![@Z@G`??4rXs08.gJ)5`nL_=2\DMkuOqC;1*B246RkK\ld'E^*=XeW2[q#0t.g_hEb8M./5ol3d[8'<$R@\. %Z/DWid#&%mGq`P:;qc8L&GR'a(G-*rCs]&jDV/Cu[7bINm\]kV,eEp^CZF"),eCR^SGJamTAr[Y %/p9!AMPa8'`^2!g^+/Lkg_CWl1R\LCp"*DGC0gJCm+86D9hdO:..r7@#t_VXrGHg:Kpo#.8#MXhKEt=?];;NS82\"WcA"cjrqj"o %0.DJ2N[lDA(rSDP=QIR*A()\Mn7#MMO9epkJZp[Bmp-6S,$ed.l?-?;L\;)Gb@P.UG9pmrT:CK35ilJ-7":M4U?:cTF1oj=Jf>sJ %h3)tQ,VI-s%T5E?d/Jd=-\N`lVKa5YYgKos)@sWM4sR,`220UXYiCu-5ImScmB11 %_F?OW--AeXjeG]t=C"ZS[s:(0n^fYgo>/^kU[!$ug_n9T)9_[@rKCA&Y$'6eX:92][*$iH:S0*oK!#@#1O"sXs-(CLERZ>%6/eb' %NSigL3qeNGDRujY$#A4VVLlHVjCIpQr6e6SOf`XKlY7ZoRqC-O2eA8`QIgEZ$Hka@Xe`\@S;ckp\gqQjA5\>TkZ)u30pp]ag?<>^ %X'eV%((3oW>TFeEB'WqH&.,7DbVH>&tLX8FcMNQV!7@V`hVoK4!f1@T'qC %Q=S#"NFG8g=g?\>PSS6X>-fOCp\U2Z_f3YiNRp"lo`e=7Ads:7=N.^FaJ"j`Zlm;_!K]krRH"9=S!qOV#V5;t4C>0]/nUm/AL5]b %'YG,mW3pS"S(O[\bR?mR2ed0B9;BW]gZ[QCNCgZouaEmLD%\&KUc42D1qs3)!8+e`:/Heu>^T %+jiU+S'OTWFds(0of22n5A53.;cVpVo[P=1%m?ua1-!HZPBbr^TAMV$<``K5K^tplc&s48.Ndtm)qShR\C)I.Q'j0dB?bM")6dmo %'he"p)+3f=+LSA(t[Bcrh`BC8m6eSdM!?ZAJj_S5R1j>0]h?T."sU+)V&\Q8\!EJN.<\SRLLECE[e$LB^qSS]#\B`:_ul(I0! %W7?:HjZe&m7q?,b>@V!#G?te;Li[Cmo-&iT,d=W*-aRkbMJ1miC00cO+^ut2_$dWhhB#;RB=1je;E_XX=#4>uYEANZq[JSY#ff4! %C=g4&Mt\5A!;QDW+5[%oj.#e8hn9!gb:h[Sj$*gaquK#"plf("qY^0^a7jNYS\P"!S,`0AS(Ab@kc'&A_>khq]*qUtk$?jHYL:7d %4CXb:DM7QY8/t>W4O*?o5l^gjnE9fOi4n^.mMEKbqX<8.>CunCn);lmc$'XCc0i:If^`<":`fUNh&_1)e(]>*&+%#cT>U)(iFgA^ %A)Gls&,kePMn:jMn'n9.<`hIqY<$ee9(GJV.^79omXFnfHM@/pj+DfiVpXE^`YmL#BC$V$"R=eD^<:a[:-CU#ouQ:E#7Bg+Mj1cM %DNF#l8cGk7Jb+g'RaPK5@7*$$gr$b99M`N54:'i+J[>u-oi_LtoCbL\2cG4XG69_>Mi[o4r+_nOoQ\A]4L_!R%OfKT$j:"DTA*HE %p9cE02_b'N341*lc#qK&#"WGP^f^&rdB_;pQ(g8IG"<*-L#%)lpE!^S=eP;ET2^O!p#_[5iXHUgRH$sosc+N*'iS.bDeVVU^dkJUmBiiAsW37ufZk:o>Nc"5((jDPm:$_>FZ5tu_^gCr#YLsQ@`I'VhF)&0Ph2bXH/]'bR %`%K$V@BB*KP-%*@!r>SE:VC0`BfMZ9Q$.Rt6htl[+;[M$Eh]>;08g8L>oLaF'F/kB)r9!eqV`hW+]sR9#)e<)`S?DU?_Lc"6Or'q %qD\2$>QNWP"ckS25Z,UtMb]G-\?-'<(j0Of%BAZ.gHFc1P]d`*1A/?8KP_5lIkCM5=E=#jEciUDb$]e[/YAmeQCH/(`%e)Q(Q8Pl"ihL/e;qZh@11@4b/IlQGIQrKm\$4g+V[LUK9%a\d6%2QH0j'C;$1Rb(U!S]YR %Q5atg3-]&pl:JUqCT0lla"nm(Fl\?e<4DIa3k;\Ig#c&p-:3sS-=$Ip,fXDYlHfrmD+5pu,+PU'2dXcd+9O.[H"7h_s%QVW3=8,?1I1mT]ZNkuf!2K7]Ns,09>QJ%Rn#:ZK6L,(1a!aHS %"sF`3N`@ZqSmB%@o!;X0NiH)t(c*$LHYBtW%>"To"H4\qXW]D[IRbI[_h+gS+2EB$Zju"(mNV.V>`\r\^(P*%[l_odj:$XS+6@?R %!`k!32TF-*0j9p!/re9\#`&Qb-Q)VrZWH$dGP_^/MEaA6N-6"ciW\*Ii%8lkQr*D1PT:^mDr[7:=BgV:OSSjFn(J. %"-3>lN+\R6=s3Z0*1DZ5-d).0%cGnP,P`$GbJri$$dej"P5/H>Ti-U:qo(kK2f/PEPN2pVKUJMYD1X"u'.Vn"^VN)X(cq655B9.fjgWG+e0- %!bIbGC5T>S!5tV\r@uQM6pS3cI#;?GJONsuSNA?2U2\9$"1ZgN0W#]KhI.m[n#WbCGHu3sRq-dc-t>P7R&En(\H@AM__gSK/%WDd %*fAs(.^hS5*-Hn3U/YUeF&ZOSXFXL\@-N"%!C`_48LfjG*!"qu*EEpuSE8F;%jI)n:ao[>,Tt<#8qpMNDKj7S6lD^<"9V68jE@:N %/Mrc*SJa_0C_V+oI2=+Y5+cqP@uujJ/K>[]:2Ss6_#`BVI$4UAJ\tb_e[WhIYIH=&MK[]+l7$7\NV!s"1r49uUc7T,!:Vu6%-0,l %*mA,^((6^B'9@QYPb0%+"t#nnOce+0"@kdkq2bi6`PME98l$ %+!%op:1U&cWR#b7V/-ieRZc*M1b0_=@U8=cMF970#/obJ)Zu=TGZ6%2!?s61]E,HGmY80$Tg5Zi3C(rgd1/]Qo]u\>/L*WeD5:m% %HE3/5)?U'hgRD+H`.=LUaGfCZSdHPY$#/5&fPr>aZ=jJ>?bd*nfTp+nGm;9=BH\;5%X\kVbR8'qkdU0n)1DBKnd*16oN$!_$V<(_Na+Y)otV%HlD?J',#j]^jJ/?nIXfmSRuBdn=\D#1e61OG0J7ERHKMkRCk(l#EV_%46]a0GI$@%]r+/6 %9m!7,n5_MJ"i"XpTa6UUT[lkt6;/_TJKdTZU[[IL&EGr7$tCEMQc]A(Zl+B_LXe>T6?*4uSVBL#=EEe-nL>^(?f3Dl*A0D0Hn2j5^g`!q%$BDgTF\UGNMeVS=n"='Ghp=^+=G=;sO*=..r*NB7nu@]-7_0`8dUK!*-$-BcZi8N%4',77Bs\EMnnYFqsWP+]:j>cl$:2Z^`!ALe^K0YO'PHseVl\bc+;:c3NSk-mJh:q7M %.Bj>+9T@DW7"@VOO[RlW+h#oLf;o+WbUf"[&H3CeTsarbDSr`?Q1W%U8W5jfGF]3mPsNK'Si_?9:+UZ+^jVQ7_TmPta@jj[1,1"X`=T&_nnm/?(.?DmYiG-6di8XpU#gg]0rkg9(_98GEEO<2V.B6/o9`AEcM#Qj:PhI2P\6E14QXQ`6QBJeVUMqE1Fb*1es, %:QtJ[-^'&ZV"9WWB`nRA5PA7KeE8oKJhTR)kNSKm!_kd)![f@ANBL&G!!hF`A7hU.L]Hj57BEKK3LE9J$b6:HP? %\e+Tk1a1/q!.f[]/#jfmo#%]BZ]ZltZ"R/b=Fm8\G92%4&_JD#^QO:4T9*\6m4Lko@P[%2Nl#Ffgm/@*NQ.0H:(KNLHG:*^d!'pP %+:sqp7H64"=b+INK`M^.DRUa[c:X$CD4i/n_Ee=\YrM9EJD*8YFh39q,@^n%DQJ]r<@EkY+`RV*L`5MQ];m';6=mI7_&i#7V7L3; %7j-1+>R];Qh>jc0E:Tr$."[G=&\l0R3]K;].g&1Ie&rHX'0?3rl_J<(IAdu]jrkU\l3NI70M5c&N!Bqt:b(\dVhu'ar\;ka^te8' %OU"lG+k&!20PSD>[W)tCIY<]oc42]_5^drVV-3%'cR^FKEp\"u12D`Nc;\!b>Ajq!B0269I,Xop<\-M\Wna$7cFUS)lOXrZ1)f;T %.RhckOoS.[:(:>CGm)lqShb4g"SU!>cjU8#15*V6Ql@H@i@;Bh+Y]Zhf1*q%)j#gN8bZ(NmbF9M"bdgX\^G[87n+k %&3T3!`@8rUVh1G:b%/_^N@CQA7X6CEeQD`JD#nCrFD]Cg$lpjW(sJe(^]`bFZuqS4*Sf\ROeYqhli]uD=qI&q-]=lG")$Y+ag$@8R,HiKa9Nqr6l-9f%m;R5/FF@Q7eq=?l##a/\dYi' %DMeSR01heaM<4m0Nepq5:,4[u%DenPAB)R*.0f^4Y>T\8NN_S#cHTC%+%s"o4f\afUsgW%n@O:@AG %3rn_Y@uS(R'KLd/!#ltKK5mhTg+sYS^B?hP=^e=&]$GK?:e[V)+^cJ#,6I5!9_k@""Ro!;9WYg'nG<+a\+hDgus9q=b*5r-0T"E8;ZJjJ:i"p,3$f;L0VT; %0bfUbK!IO(&'W%pca0uJ^kaP!9u2E!S1ipE1U\CP$j/4OR5saAZfH!U]CJi[KQQQF:>tcOOqb)RNTa)UQsM%#u#!$Pi1>7 %iPUQM4*B7p@HNg/SN@m3!END$bA'WNQE3`C6#NoM9@EcD@'-Nd$/I"sGb>j:"%gJI9b?cNU_Bf.l7S./O8s.Tp6C/"k>E[eJWl22 %JZ6'X;:!3%?u!_ifS.c0geZWh19;du)?WX4/"(Ll)#9g4;0ps-V5NhC=cnh:b8,.>68#PT]l8>RgGmV,oZ3_M3o`cd"ZY`X@W;jQ %6'mOT+G,`!L10.P5X8#p+E*][mh6OoXFM\SH+OQ]?gY)YIAP*L.&0YFZ`#m?3 %bO1T97,Yb\!HRD`#1/P@JLGc(UJ@lk;pm7\D$n3$hiJ-E_JJLZnX56<2!D==MF@Ll<9I1HP %L&<1O4!N.V]=sJPdEauo_K#l.5YN15;&Xo8`*Bu4/'h#X&`&s@]g"emksoN>gdbl`O^A;D;N/W:Ug=t,7O9_:TI9e1je*V:In0cq %*4VBQ832pMO[oTMi\o7G@ %pL<'h2UEml[*tq/IY,CjiahHQ9#1&NO-L_Q=MccT_+c*Uql!._rk7*'@Mc+hC#IiF=T]MLc4#HL.l5^Oec7]In0A?ZJN=&.6QmQ` %,jc@-'FIYS)#tI0%S-[qdh*GoU>>*V#^K<'Kst7gF5l#;N6R?tc4,0$Tls%_ORFT34okn.nmZS#3Ij%Vff,<>\%#XpOGV1r21F(` %qkbLDRjjS+NO_RoG/1=:NH/g65Z7800TPk: %*glHlbk<(ZP9.dk..nV1Lt>f':Gu,X;uG.[AKfJ7[7P%+k0LhW@GM3%hlpEM( %@q&Y$"3JreSeM$L@:Ppg[ckC=RP+eHjoq<)Z6".f\*&>5RSJ`#I/m.l"aQI,$I5&2a[pD3T`HCS%(VJBHmN/;\=OeK[P/2D@dLnQ %e,dmPU6Cl*ABg8p?B,.ScH3Sc`;D"c]1FFi7>5db'>)1>*<>Z"PY@+3W^ORS,ubOl %&#p&&;#>,MF]9#EMXJ"&SX6)8p7P\'.h_\%l+ZCVb$X&,m4(")XANRP*1o8MeoS`@7D6GoJC:!s>Bs"-(m6l7e2(04Q1VS[,?Yu$ %oPmL(8"Hp,cDr5-)>1d'A?*u%,q7k%]PE[Tq+B-RW(JAp7CWA_is/kGC"@J/&\"N0epLjY>m=n(D$[4U5SD1#iM!E$08hP5U%UR- %.qF=>YtTV=8um$a.1erKJ\iE5!OW..DI-^!2DEej0O)#\''lQc_e@]5BZC=JY0)nD;9L4q09,]Fk2)]um1<\:-3i0Q9P:-5Bi+4R %)GR7>]0qDAY7t_f*mki-f/$JhRlI4US\eop"$o;&)0N2-TE(L&oYMUNmC_Wf&.^aM9O!Zsi;DJ'5)Fn:!)Y\RX&V/'"<2iR[0I@l?-,p@=b)d8-OV.X0V_.HcM#KI61P=[A\]j3gYA2;/=BEKJb9FN %m3LOk-JaG4JI!5i59O:L">3&S7i6<]']btA\lh^5)81='Vma,Ig80K;gt\?2Aqr07RjPn/b)BeWF]:t"3>`=[4*KMI"NE"<*Hc9T0[?PS0bU$'r6jHpPs?@%i+]CQS1ANnL._RtJJ!>n7o$2%XVTaK#bLZh-(Gj4 %]1,MA5-)"L&(FO4)aMZ@DY[/Te3#]g,0"*qeqk^%YVn18iAQO+R0Q42Ag:i^gq)[h,fh]"XbCOK3SqB(\hGLeSZ7guagAe!T5at$ %S^_on\rOb2E1,8(#TWeVEMmaX!&gfgMNm+S_;l7_pr1/jJ4XEB(,$^1;T'ii40!91)^m0N9VR!Y,KG,\h&m.876*cd#%5<%@TueK %g,s@'%Ss3/Q2hb!nu1>fE%:92#I5ZP<0'5ISQGO?jk %ciBZ^5j:2k`.g1IF2I0So&UZ>B[tr!lXi#pOt4ORqRU,?3i-P(>nW7OPM+>_c',+&5@/gZU[M3.5PM_l`JEGQ."0-CDL`-adhi5sZFjl,0BN[-n#D*["S_37(e(cb5g`JB,sE/o+GP0;lKT@>d0Eqi@R&Lj=U*.d`1'9kahT:3X?C@=!(c)+ %EaDMog.b#$CR?$pg*LA6R70:nMDuP3S50`+/gp?tgBDVuS$HNM;F*a]NOMI!'"_8gaHT+BU4hHeTk)-X^9:%5(78hBZEmUH0d6t4 %Z%42KS#BAZhDHkZ]TP6nW1M\rDIq>In":s"#aKl'\;L&]AZ"hj4SgrB,)B.2+GlJkE]D:i@AB^\SQ1CnfoRhSJIWWiB73ij[[\PN %Pq4lta#auH<'pu-b!:kh.0@U4cU3\gp:s8e7?.0Jh5!cX7?*%Re]f]ao7eilo,+*G[D1e!26ed]%N)^"9PVN^[1E()Y>4&(Fr<$:j$M.V\VsWQ?C&S %dU%pHU2m89!8u/t[m>g->.]\A<*8k62eA8F-+oD6F)TCN8S:]0)68=i+CB&WK@qd5??H9@Cge)edR-]5:C(!c6C9V@Rh_DiB!qP< %!q&\1V3QjK!ip[Lkd[DpYp*hh5m-1tZhGeSEa7.gYU(rcD2l3L`g1JpAP48-QKbAsd:UXtOX6!_Yst$ibHfpP>r0r-0B6BG3%qoE %s%>lg"LDY_@!Eh*bb!pYKKkRULd1%<7fk$uB87a>&qeG@*#i]3rc@L.s4D5"$4P<(tV_"7mReOuH9;\&R(pS-Vfog&9fH?N)Hj;&0 %o4m5d`Ps$';>CJ+LE");F8l5:Fq!=tomhW/HA<\(1PRFj)/MZ6(D!P*WjY8j_JL3)V0_fW>D.U7-mQ[Qk0^-E>]GFeB"'.4"'cT5 %!l2#@08tIm!"l1$0ErU@Ep1_EYU8u7imD4AbR32d'fm$89]1goZLLgQBiCm_5[do;'>d'd9%\kB:^Xu`V^bmRWq0@Ea^p!a/VRM8 %UI'j]1J@6EC17>i;"a_Q/LjIGeL,Q'0;t`B:*XJi"Tl%F+lmH_O[]q#0shh.>7;HY+_6W]8B`W!Hha %g.iMAa)MD]3+:PC",$KSpj2a!N]\imJI2p%--*\=!Q1++bjf,@^>Ml6@BcOYDXiP(#]`AP&9LH3TB4`9+qTli$$qBlgh+N/)@n7( %fdR0`Wr]Vc(g#MXo6eqR"(s]#U;+g8@9\C(;P:&](ds"7;K>?6`$@.*"'o:Hra9]P#Emgb7Z((\q2=#[@fc?h?`hPqC+b`A\7B_$-T:BPOd$W&]?LGnD%+%;*eA6;am@Hf&h!,k-3pp_#]7"L^$/F?'$TA!V0H! %[KrrZfua&<)4AHP5SIJJ[USfiJJBHuB]unVXGCoY+hYK4ah`rA"$HZlHeq'GfVFYoH5TYsm.lQF6NC.'[J( %d,b(i6uk%0`D5!gUuGgUX#s?baBu<&`DlooA!@244(sJ?DhS/UWc)V9ma14;8_O=1M12+jDAKNCJO3l+6/$Ft32b4nI(g,i;?9@? %6K')SKN);qjYO%60r]D/hnl`>K@n %RKgN"MZID5bctiQN*s75V&%g0n!;\=r9ga?!]NjRcJtg<@+ot6bG5\q(75M2l"q:*0Aqk2u(B %Qi]hR^;pIh)_YJY1lMQC?P[8[/-Nk$f4;NSj=WSgFN-[8cmL5G3oAsUG80=s`]'m,\j0MqHl[L.Xf9],)GqWdlS-A;ai>\oT?u]C %]KW@Y\l-IP1t:'CCP$+7AZbN590@[Ed %Kn%*-?]2m"+O5ri#3LZ=.\\Ka8D)KN4pgSC(3M6`2_08g#.[T@Yu-UQ+9]cB7M2(1SlEmTM^\5g4$Nc/YtH?D0g/--`H;/L*(D-( %-CZZLXW&hG0#c#-jtR&[-l`8jJ!/.]W[B;R$@t.On(&oZjDp'-ajA^cf`;ljD+6D<._p4q,=M3sqpG/BZ15-8@K:`O7cC7jZFHj1 %GXiab6Oe!mj'PEuOeib!b.&=A&\53!tG %q0\l#_$4FU:`&;9,g(pA4ZNYDe#K;8lNI?@@t"$4W#i5jlJ]BN*C!FVQQ82@[8WpmrWVIhZQ6Hj]YBYbm>,Us_?6]E,q.RZjbUq]!,Va/%'Q:C:&X %0(;:DKVm5jrs[$oo\%&F,@c9)5J4rd^RXb+Wjm/NHFHlgHJsEM(CRLaRl)6f6<:(GDqdA5\dZ?4N4HeFILD")^p+k:]KX(DNu:kT7L=ZhRB8&$s579Xk3F/\FJ&`(j/]Ji7p-L<#Dum(Lqh7sFbYXFFAW@UD5+QF*n&YoHO %fI:oTAt`;J4EQsd,XVNhj`;KA.SH%;;cUg1jk(aE*d7!QV>Q+6'^g/uXd[7\e9XEL)QM"+uWW$079I$6eiPf %1$Z^T&`LhO27+npck(u6S0_)fG=m5=>;)3KJL1mT1dm.SjkX(k4\$1m8)\IJ%X4t?NIVO6p4]Xg.XA3G"'b[W7NeC\p]u&c7=j5% %JaRR(8LrO9U)fEW(_O<3T#hBCX""XZ:^qkCU,(tMcoXqWm$L=$3RI7*]!u'ZLg+!B7*":11b@(W&qPL_))o]?>RJNq]:NC:,\.M6 %CKi^k"#qK0cb#0,O/;Up"$aYE7%P2>-B$oZZMljn#/3BRO,^ljOb"7$7r-4/8AW^h&J\bp+HL"XkG?sAkNA5HQjf0bp987IO3V5j %b.?b\H/U4W6OAUmi^^eiYkL`7BF89"1PX6n?:6R#+OE%i6<.RY9/bJ"$Ap;lpq;%Rk^!eqrdL@SacW'Fr/?9A>F?tSZ3^@S7st5t %TIds*M0E@+H6]M6<>UM*FK.Q:(t^n_k-m6pn[(Wkl@5oPJ+D4.:Nr\H8_iS(tV-)A,L7U1)H?QjJA,;UL8fAp,c@%B[u" %RPM+hZ9>$G7\H+)8Xc=aYp/p=;+7_ZhE(b#S:l\A:Kn+cMLP/lEL^:r$"Z'7YmUspa99>6,#Xg@LDnipL+_o[+Aj%FmGYh`o;T(. %4oVa1=bF/d?WrFS<"\-J25g%^ZEu39H6%*e7]r$&WTK*a`Nr3`4K$YD('867CDM.N:DSWGd&UDrXc?NqQ&AO(=8ir91tN)bQ2K)?lH@@"@%5&5R.@o>(,&Bhn_0N-Qh5#+3Q'l"7kN?D92h>)>+:Z:$"og6G)[Dim`/R&=+6joN&UE@1jM`5iK_ %!)nlF/-ho.D[%mDO;uhLQPCRo=_qf^G=l:haU"!k7`!l5FB`.=/RZDBAQ\"XlF/O.1f8qt!&JOR4A?hX5#@A+Z#rA"Ni#oDQQ-GI %62]s>%ErX+ZUAI2O8W$4j9:93l^L"2"X_<"D?n_Q=67gSa>;gYe-mWgilXqK:S^;4_3f>ed_,3DA-]0DQO*mKBTiIF/p3.#X+qEd %72do)bX,#IbnaBQ4q"O,+V %S6;`i%YD(ZS1b?0dg$O.aRB:dWoEU%&?V8CCtFYrX33Q\jUUB4O9#9"e4i6mb^t:k]GPcW2*3 %A%K)j:)YkbOKc^K=/oa*@$hF^#ElEnL %)Y&CW?2DB$d\ssM:-`lbTp#QegHXMYB[Qih)J+ieUKdarq@K_3p,>lZ:@M9GQGSF.R<02cFTYhCe7biF %(=l-J3dQ,Dq06&!`'c)Gbe%#*Y"CjhZUO=#r[FG.FR`M#gppbV\L3k=!,r'Ec&N#:SrT^A[,edo"]9;K3C)RD_5^MUEAmGKcHapi=CbQh[1Zq-INLk5*`P,S+RfqOCEXfI42ki%Db+NFQ&:2.o %FZ+pj;6-qC@OXNF9"Wc0)g6O@eWBC>qjE4:KcNtG/X4]B[jEalk@7E-%ObQ(*(Ue(Vqpk'l`W)<&hYLM@E18E0=ZX[pRr3rCj-:1(2&2tC]if4 %(lUKdUM2R>hH4f^DE_CNj[0R"X"\J(VE^E8bS=$p0UlZ4&0T+g_nLI%gbEM9AF6eE3$N.AF[sr)et*pcH`m]jD_)t82*\c=QRYDs %?AmQbU%`*pfmBE`99Hb$RiY7M]?<*gH\uo8em's$/cT&J3WDD/K$dRp,!:R_aA& %[h9's%$lr%;l/u!bDX^BO@$-F],7\M^F"u;Kq_j#;sAN]1p_!gSTJ3k5<4j!kJkiUOX1h#iec;kbH=ROYdK:T7EfRK2b\99G:h%` %\jO*4PMqMmjEt(miRal2\6'D>_BuhG4RMeD@q,\OfLGacTq9)[L/q_dSUZ>88_`V.n?Sn0Q']j[U@!.S)Ofrc;pecYN(ug>c6BWL %_S/AJUl^D1_i%MqHL?'&kj_'hW-'tOej"4?)#>5XIj1K/]'0ecm3\:l$rg8tqt%#g3Y=d_gfFaMn-_Sj\$#V6Qj^+Fk2o#=$r(0*Qn0YuQ]dZ+ %qpYN9RJ=>>@fg(`lG^K:PKF(OMP+A514O& %lc'mb+E*3qGWN)?+JFSR.t,b@_Aiq%?D_qc%\'agpb[P(p$I-%,G%&1\W$&5]$8h`s62/+#YDP;>?U0s?(EVCdq9JB08OgN<7 %DaLn>\jIjPMnT(`@A992_,3\\0QYD_(/-\]B`58#_I:;79I,[&='(3p@AG:kArbtL'Fn&QIHJ^:>.ENFat"bX-N"djN?jP$@9S9? %,o0069_aaWboV_Y@:gr.VPVUgg*`>e$0KV^9RNJ%+*V]/gPCPB&^_Gt_Q9lVm,b<^K12.j"Ql]TpV5U+,H_f%[>:lk3*4k&cP7Wm/RiF2^s_"cY#//C`6fch`]74uUdZ7Tib/ooCO_@&R*Lu\ %+ci+`NVE4U%pKE)pfGgf?F4RdC`D]*]PTtG52%oun86Y1`%.E_r2j0tN24F"]4EP'f5bf'Z4h*Y@u*;PE\U7'GE5m+V0@ %>GLFPflc!rP-LF[ZqDt1K(L!MAl,O7ObM&)o>9sZ2MiFQS %\R[W:dcSr!4E5+o6p\B*GTp-M[on_K+]?WDF+p'KT)1#Xbq.b:3SZ`6VEnJ!2VLfXM0Le"VLKcVW.cm"Rq$t#/l/CZ>ciL6^X:iG %pBbSa/i/nGUdAW/C__Dc9T>$s_C,4qF^E\\E8$#^/L5]%bX! %kH]eN4<&aA2F:iC?hlAm^PnbSl-c@>Gp\c:?pqi'>rD&/-g9 %V!SmDh*`\4cQ=Qhp3K4Y;PYL*.T4a$,r?S'>B8o[a^!D66:n2YfXb6!a0fVUYd.`mkt@/M'WFa/BQ\$BmGUTCC*e:B$:ATO[>Dm6 %75"2QANm.F&mAh9C2d8-[hMqdc:KPa5f.H&;tI7a6L1>C)O0k)6ttm#Bb1C*+,E)k"%fDJ_sp\N7DJesk(4"=D>"CBj^i$dT.3TH %gg^%)MI^A#EU"=+S`'h%ij(AQaGFG?,Wk10>cfV:Vdr(`U3"/,cM$cq:FDsBaZVUZ$egWoTP'f;[_Z.k*Wg1H#Rg=-X+<2=/gs_3 %8tP1ren,]cb=eUSQ^3UdH?\VD/l-)q]e3%l03P@L9gHQ*R=#B_%][(:^F27bI'Wfpp*+4EQF"rH9[bJ*X7)CSF1Kjd>DY]B>+%7$^=V!i]O<:;YLVB+&2m %hB^K0oT$F[0Dn/de#&jl[^t!F`EO^P5:#fT1q+YSEiq?lSoppW5.8bd\7N:BO1.ibk%o.rri^k;h0i`lFk09Z#T[[o]tsuY.slup %>\?6`,pTdK:/O1,KR3'ed8#WYmq-U<6Shg]A-47qS<?"a'%/;>GnGb*0*WEYS*ea4hZqGA.+u_d8GLG:q;k*fc]/1V? %ahj*Me/`Hof!EeGJ#b)J-@RT>Qp=@r`q)p(FgV'/6p>6NF,Y>f;9G-OnOAr>r->8hm,nU/tUdID3uHlO.K8&r[!)C?LHa$]a6UZ:BTXqD`s2nL]inSr>kn"Sk&kkKB4f;@6q %_R)9mBQ?*"_A!d6623H!S^*c44.>BXa&REA4MU\OZl)ZKA(.UaY,Ks`6_li<\`F4KM4AJTl#`]UBZkYWFgG;&c+7,pE(Q&P[[^+S %dVa%thG%n0jl>%ad%bItDk%Dbj!&DHjGf)1EFIX%iO,o2,o=,Q,*uP5P1U7me/*ToPpmFe]EQ6gR<\Nf-T3dQR+sOrBZfGsX"ePl %Sq.S19nn`cc;ujH!&]sp(jBjBP+TU<'dI)1I&Jk2r-du$W)4>=V^D?*fT+a4Q6na&5Ji;8o+5$PlmS@s86&]Lr&402[p`S]lF11s %*9+\hr&0\p-M#p8oXH*[:#rd?r&0\p-M#oUn#h?W3>"9F+t(m)E#<#L;<3G2Y-H64(MX:ECNX:>K"M1*jj.TtV@Yr56&XpP#X=Cd %c^te5&P/-NA$.ZGic%$1aF2ck;KI^eRT`p52B1Z3nhN#DS'e7A1Whl2C9fY<4k6R@OR\/c;QMCiM&IH54H7N^Hdg_lDE_V#s#k(k(I;7.n:7EZBDHlt5fg*f2LP#.p#\%D9t4=5`H0;bQ(b&;,q*X3?C+keCg^[!1ho)%:Ko/& %C3G]BY^AprM@?*\ipAFg(eSSU41`sSQK@)P@sN;?FcbQe._9"0Ba`B-c7)EaWuL0Y1BDIf%:rBe.;fIEpVl^+F%?nGt$JLGbV#!sGE,O?SbXP&2g+c>c&$T0_^=Dei3&OgqS %XP'_a&<26A=d?Pf:pn,k7+PMa=_HQJdCDMo*L[g:CWAIe^oJL0_S4`DU:D7-O^B@(CUB,pZ2jhAB&qQW=`)fOr#4t>)&ABB,CXm4M?-j_C>9Y(RV_>,pV(fo/g3>_<] %bs<-.=]&GY1.,J+gnN>)*RU^q"atA51AE&G0/E<+HUR78n:aoZ,(m)$%DIPm.]%i4nqE+ED*O4;o["e?0=(?sM_DC2,)^B2/ABEd %h26ShB/V1i56Mi03O(f8*JPn3DTZi(mW6EI=oX,7rKF;lnn@)hAi,kBkj")hV1#mP_3G4FBpCEJ+QE)$3#"=6]N%a@[N0$ %a#)OO2$:UlNio>M791,G7#7NUZ=Do29/UfBCcJ;.g!\8-[%^)Hq02HNr7D*Y:'0X\I]kdF/RN^4J]A/hB];`)IZ %11ai>Am22Z+]+"UZDeuYUmNC-"K9:=7*Pi-6N`Wl`%8g7IGc7M[UQP;>Ce+Ph2]=Elp,34+@D*r/&8S.ZV?pul[^(D/5)*,SN/Ac %3*QF%33^/M%`?dIJ(YtHp-`FA48bjSBK(i*:*2$Y5eYg-_;4g5]c/Oa/jXN79!5^4j7'FR($/%4I? %dL"?3L(-!"JVVm(fT'PJR]plijqtN=kfaQJLs^ALfJ[6:@e,BM^fJZVD+b4XL(iXHFHm#=*(VGJ%"j1f3W_\BRda%3f9m`%&F6M` %^I'#iUVK"'m`FHX:J0[CGV\@^-dgN*`mTCCAZ=F8D%d#O0iX/",5Sce]V?ER&4Z#;dXeSY1ZLQOW%WTaB5Ya',JpA.RqRsAL"=296IJjaB6JG*M"QCB.4$aZd'4h9*"L_%>0Dqk-G:Y#T&AD'#8mN.1Dap %eB3B1V`%oSMTA[k?S7ZTQH@-:e*aA:.S80fc4cj(SEOpfY4^5j,HfWV(f>fBWk_SK %9+amYR>ulH:fh+p5r:`c.EOL:*q4@^>q(s]\-G`iZ41Q<.]ep:WsgQ(cOfc:#qW)bh]Pl=gFd-:+o=`?6dlaga%g8Bp$q?-T'E(0 %U&%mA<$/0Pl=3gqIB:eqO'Y3HnUp"0p!F48\!jI'RNgrrZQ7BqC="Q*",p?+*/=o#Xd1Y`Cu?sC_+1t:gPGrNW/YNoH,WGlH6M!< %2b])8MFZ\jXdJ7dW_f5eUcr`4X)T^3E2#r,eMK*Zru+nC%:EGL=0[Q\,JDu-7PeVDP %,Ro!CqT=+/o!MX]HFhpD`mF5tfubZ*Ckcge(%W]d-AjTLBCfXa1:Cq"TPnOffh=+CT.+XKS]a/?dID)8Nd\r1k+Q;iQ&(SUqMThaCK;6;^"_nAm5T:FM%1U.a!9_1$>s.DKCVJZSI=PsfF!i!JT^AVN58 %fJ%NJ8ITM]ZcGKUj&"BC=ti5jcA5rsXX4'!(0.W4F/!l0P\.?-8:,5B=:U].<:baEBZk4I_@KYE9!^f`D?N!S1=i>>f#",5p%kT: %&M+T-TRnW3#fiSf'NlPOVok[a56;Ru6Q.)W)AWLG[R*%H%9CHi;5L9Kcjs&[R'3,ph!+HS;sAVYIJ74>4YP+X;f),'Bq41%4p9qHCiO+AL>Ae*348,OYs6.N'pA>S"I.gJ\ %J+U3Fm?S$&AUiuhrn"(InQZ%mrU9d`\,PnrJ,4K#G9t,XhL4MsrpgHSIce0W?[V:0Dh%c4hbbB]7/350e%bqGs6!TNGjZB_Gjag7 %54Pne(&"gspl0\8^\-Huq>AkEmA[#_mH^DE`4K>7HKRoSmk4@dhu:I,H2R.2rpdaLm&fG?#<5&ZlFYhYHi9GkqURV(k33*U[9J%] %le4,(B4tuQJA$,@'6=c212%I`a%.b*E@LsUTRm&!2']9K37;4!iJ5`N8@gu2VXjp?5)s3nr:Ss&_f\YGP0o9o'[&]L2C>@1@,KIbRBp-TGf*f6qbqB[Y2A9`e\'g=#HN-gV\W>)NDV].o8O7W %Znb8FNC_t@TbQY1"Vl:8VC!Z:TLJPX_a]<#Ze'GmERdocN6c;_2LT+J&@%WlAt"P#*\pH`2oElm&t^k!Y\;GDM_DetHnN-XEPkCFqU3QQ??,'^pfr*`j\%5>A9#:4=FBA=CT_#i %o.@"Dh/`,0='`H9MC"TU5IHDV^APQH3kKN]o9LttD*>=&>'Xu*dMc+$/\Z=@a7ZoZYQbWp5O+8iO,Y2FZtI"^DUil+V+Q_9(LM@I %MH(Uc]uP2trgcR9rf238r@Mr4mHO-cGM*p9Xgmr-Mp#le\p2?f)qS8?:)&3TO$E8 %YAnb:Wrb-YT;u%m6`8f1rr;0'n,;1Bs7Pt"?\6U^^Ne_J++O=&:]B2Ihf*=$?$3Q1eY:$L/\a^lb5C*dTD7PI5C*5E(LI(eq7M+6 %V-c@bPGc.j`(,;11Sig3r%^H;M_Dd7`pVZ\Idc<(q!Kra@<#%T=Q^LH4nIJ_I/N_6mI%^Yb"EN?!N]DXkJ#oM?hiK=Sq"j4:@#Rj %ddo^"hA8BP#.JO<$9!j^QZfI9mX*4UTD7i8?77I&C:s&2mlp:@h`,B+ZL$".["I[&m!7fcJ+q3#Sq!`P32uJso?VgEdsm`*>^,QJ %*6"XFQec*ks/h?3q*1Kul/epLk2hr$qp0ciaD6j8/$H`c/7*?kk@!_KTD@nNISkY,a6MhPrmn"(2SNI#%QFJYf68n+ %Dh%\3cdp#?\p3bChtcnK')'SSG#2Vtf(!s3k$NU=YJNrYrla>?+5:hZ10.iCpWkdsn?[WCbWEhRY%jiNq;7g37YSil#bZ:_(7:qJ %WB3b(6/t#=jjSA(+Y^%.d`:87fAHAN05@XH/DQ@^O5Qa %s7X"tQM06k_#:M8[VbFWTD\Nr?`;?Ud,#;JrlI[cD7:[6,IuYZO8SkO5Q>ZJ^0#MFr:8qcTDc7F^]2nUj;Y;<9t^gUd$ih]8,heS %huBm[>OLU>rp0GgrMrH.He2Li[cg.6Yq+qp1QOa!X_0,N#%\b"I-^;mcO@uI)4ppnG."^Qq!1-5\aTO%.\$7mG#O^>UY^D^6$bfY)1Q3d(=k6cV^Q0t2_dAU6V^[4A(r0pM#6"ZrcjjGN'N^'\t;>8jOPUroFNQX%,_Eh]+X:ek[=U&TdCImg-e %$C2JcLbXOuLr'&(gBUS\pC,#`lg2>1?SSpV.ZGp*4psj#KJ+9?C(LDFXq.K]k_f$T7=QSTcJ*!(+ %IM$b^l*#O1X`aL[J*0&pZr429psGH.\9<)l-[o$#6ho2mcBi:q.=C[))KgN%X-^P"]iO)b[C%$:ra.WhXj0/\Wrli-bE[M+mt^UZ %VTF9XXY1E@oCe6)lbI?7]mo1bs8Ji+Faj"aBAVmRo^5oK;uZUUS>\76jcFV6p"[/IV76@VCo(Keg#\J_FJ3\gpA)Oo3-JX9B %I"YLGdkfr5rTaDmZi885S#GuO)el/Ce0aV+cguWS/%mmsb0nUoO4$Z#1]6;7.D9p4PqGb0df5iD9(/=fm.#pW[$c'r;r/3WnATfc %1UQlb,P.-!e3p,\3Lfe?pmLpQ@ses(q$3TD4nT)aPRNW15GeRIGkB[NFe8CrO+h/^DYPksI#RM)T+4la.6k/O./CD-D#*8W;uTB* %SZ*j\pufNWK)u$A9qsa&J+QYEK8`Z!s_a\IdDC]kenRZEG^`S-:BS5OpAbKl^\SOqiE`='+pAE$8=T`>37,:ud`[q/?$c>.9:%9A$ls/lb:UF*b)YRb?$4]gjiag>HteSGM_;_cknVCE]&q]]f>$VIlW7>< %p$u[\0=eHIdrFn(QX.>lWb\fQ@HC3Bh0efg.rW)ZqJ36;G55^9/?UD2jGJt5Y.sM(>?9R'Rj?g5l`+0,K!HCu/1E*\kRul^]]k_d %V)q_]J^b<)rdm;(9rD;dn]OX9Z4::MeL7F\q_-='`/VfDji'YINloQ)efkm+N-i*V9NF#].#+n( %EX[gt\(gtCE_I9THMLRm6f2Z*a#0QD(@"pP-)"gq%OFEo@a?I+ae:@$=lcHE\MG7)uPa;ce9'6)-BRlKTlP>KisXag,]# %>QmjA'[^81M3hRd%cr+[i=!s[f+"a7C*sh[#/DAA5'8Wg_%NskKTLhGl\2g^DrboDYc+4@;k.D=FA6=/"nA-O4Bfe %\i.@%CL-+!NF\T8d*eoZH:I+:H"AKj/H/ %l,FZC>_BZ%Ji,ip97Yi'"'k)F)fNCJL?kNQ0W&SIN3KlDQ7]l:l>*Q9p6r%cl<%)Nf`;;D>e3=V:Fodlkp9=h*@n^70bh?<$0AMd %\MB6*\@=gZ?8ub?aRh^aUXU>u2BtOb"&l+sJIsI`Qt4%]_l:hn4:a>C!o[]eMa?Ma3=/Q)9\YH:^AcDP)t@"i4m73,3_ %.@.f7Bp.s3O=5kdBc7jFAo"gr/F05h%J,;Z>4hn*^P;-,*U&fgo:f5XV($>7WHqNN*ZP\p?Wl9r]N-QG+L/`Qd#]e`#,nUA=Dr6Q %4>`c@JnR1@2hY@Mb1b'R[Fa9d#m-j%;nBEsdFD.hp,sWjm_/A9h0ZSK])bM)51l %O["*bHf"@s5Buf"F_9X)*?0:sAR59/j^.Q"]0H('J+^oZIcAZpHe0LWOhJ6=CDhPHoB*rYf=pX#J+fkfrgtimnc+"1Da468\%X2; %O)m+&Gf>DCn[!.]o])SCgmA':CL?u(Q_+!%`ub38Zgh[jVf."VplIg3G-*mfrq]ju %oIiH#`NOaNgY_kI5CMM\^NeX?IbeR2YNX^95j`TjS(NPJCqb8om-ER!h@G)50E0tk++2DhJ*V2sGk:33I)*3/H]D`fm5NqpN^*HZGJ*`1c^d+?Q[Yto=VkAM`n>DYOXltc %=3;g!hdrnr9Jg.+qkib-L(>:C^2(ohgFH@iU5N(-[V"p\27K`%YgQrEm3P?$a-2uaa5[;5L/ZjGJtjFsVM7mCm(lZ6.\qpcq,M)A %e+'NdED4E^dWm$@KCe[NOoX8(h(A;cME-M+?g;(WCIC.%,]faK#pbW8UBNT+g.e0kl^g1($aGq^4lnhks6b%=%EM#$D3dW^:d1I& %N90Vuc1!$Ib=Dfg8!'`SUYR9O0k[@$AZ"1DB;Q\DWSS+j.nN_6]p=AuBUp>Z_\,Rt,'(RF*.'tU]k,c#!T6D1lRU(q %k?0`@_Vk\Lpm@?K`O!+VU_lk?Pcn"Y^a^'kq9unHq8^W5bki)VF>S;[X$JpLfc[9D/M!]uYd#uMY\D+!bXdOT4(^L=_`3cGP<.]m %*,J":!P(-kZSWn/ecG_GEKjk3*fsmIZ`5c7GqXk`8V'n_1M.LKB-YIVm!m]f?h>@9mM6,r@d-7Q9 %RETaW1\Re&166SeD$_@XC51@>V#*G;c1o>oObT\_EXCe?S7>hq>:P5%Z7>ig*\pVJ>m'C0sdQ4oo0GDP;laH1D %0MFY8p24)Y_c9@P,O*lonWFWXgIF#[I,JN$o79Ce#IiR(XD6ta`QR]5:He92a1q%J,YdA^AQF*^AYO%gt,P,*"0OUT:`5ZH1UUQT76@Y#M'-;e&">MG.mC3(S:TgpBHN, %.?)OO%#T6\j$R+bXK,*);Z+r>mT@>Heh"Al^.7K'L>pNCq+1MmMT`shqB("Rft(6/00K3TeCp`Ej3Eh5e0glkIt.3Z07<^=4%\Ij %YM/usf3YDHo8#=(bU6>[M##"KN-tE"7TY4lq81`)fm_K2M\[mMQ[/KV%52a.Gj<0k2dCaXU?1,J,6H?bPK,KFQ7c@R@daR$U[]@9 %6tP-D7>n>grohfSDNnK&Nt:S_Yo$)_/55;pTDn'DF\S,"=Ncq@GW.E+:!In'[J],^5Egf/$qVg"5P#[]Us]fns.AKHGZ%UhdEb%^>/G6W2UH[cTkLlVCg+[<>dBPBn9rcq2ZA-*3[8#:oOr=0O0>"XdGk6OqP)A6^,+G1rFeW'0@midt+^& %#2L2d68%1G(_ho`<>rSW;n'FniX(Rh#!=d:k&+*ubWnq'#4H?rlIIZGfPiZS87>k4D^&'$*6]/>9A9Pgb]6C\I052CkuJspZ3O;O %)&g=Q/Y\q59T*_30_7+,Ve.&kppCAqW_,GOF2`cL,*u(,p#4q#Ut'P5NMlNiTnfs"U:nB-0.AX)2Ts7s)LI7Jf;8Pqc=pPgEtmi! %h,"a/3GM6sCa7Ri#L[B\!_n/I0d'i('k>9g95U5k%c9(fj7]>0&dCEu^T8,#8&1oOr>Xl?^=qM^dO-_OatJ8@$2jd4We:3d^GC$" %!e+dM[@S`,8W'qurXH".a_-(paa,/DI4hm#1#L"k'B-%JRVNpQ>c%2_\uX.ZL#:F--\(Nu4`jB8n!b\2pW-S_l"VP8/"o;.(&\)6jasNFG.dV?AU.1#F(XhOE+DTis'Cpq!Xop/;r?1P;Q/g %2,o+Z(NOlK1f[1="lE8fR.gqfUqt"b.q\+57J0"OQS$*m.,1&AR]!8sW4sJbQ%@2ICkGVU_4]@.MGa\\a(3l3)&Pq2AW:B'VG]+L %Zsou&%hf#a1q,MIUT8r?4U+;b0sD:31tAZ:[cEg-`m52pBM;'=C4cVJQqkl29r0a-J7.KW%Y5,.N8-2=&M[6pQ>VnEc:Uc324Lb$ %NCJZjpqT7.?mDPk@+,RM2,XN-)B%QCUa]o$Vg'X2J!>*@"8cXMouEWm-_bW%g`iD9>5]8u?0"iG_&:j9VDp %BG@C[L+XD&@+Dl^Wri#Vo'Vq*s)%6d<28%'?$7#W-o:kT?\.!_; %d023BQ*OQt'LZX"j^gTP!D8A\(hGtjg;oo_X#$,"A`5?9#-JuW_6+gmKnKMY'fn1DX$H5+> %/;MMF\l8u8.gtc-4.48.]csapGBd])BH-@hV37ugRqtF8*QX8=h-ue&$RFKS6cplOJ\>M)Xis`mpWiuYaRBn!l9]qRbcS#+.ThR# %oB$V]5ga_,!(!udS'M0Lb11?2&l%nIM0bfspTY_bd"JCaPk4c6o)fk>Zs<^p_\fC;\fl0[=3X.tJ.doAk"o'Z7dWdl'jJa\^N?p9-<nS@[j[ZCaB7B3XmTj8`SH0-.O.s9AMY%Cah\CFMg9DP=ogr$!&$^i?1>p3S14.=VfWj6&lp>iL3GB!n4`o("7eQ@`$C- %s3#N5\b9IS6q!D;9SOm])[hU9c*eHdUPd"'H__'--+Wb=;\b:D<))=K%?=jQl]V%9\RB#V4t^;%K"0lbNFqGo2]IGoWb]gle/d0u %K[ZZn;)+KLA79-UpED)IWC\YgUVa?%Ec7fU(oeJVWX^1k'*oC5,7R*sJ\E+9Rm@99N>O':?m8N"N]?!t:02qISP=rJNRVD!=;o6"D_)"^?@0k9pr[i2bd`@:qKKpVYSVO4$<\_5bU@#5k6fLCs7B3edPtTHWXs=b5G#c,R8q7Ypl8;l@ %?q;Pb6ii7&@TX0/0-#B<]p3$)qpK!?"nt./XWlaukA2o[LDYD4q;DiAh.@_--R, %R0IT/Gt?NiG:?nb/eDD(A#X:mbtB:X<7,!t(A[1`$uhilPuoS=VBV0MPG8IR)X-,)0V,0[N[_Vd[R6S^kQ#4_ZTqYj(%-`&B49UQ %0[,ZI#?c`a&/gB8P8pl3\1H-fmjKk\>GK,Ml'fau2)S!^.f(>GiPX4eHH:e'R/EVDj=-t/!aB5R;+BBu&HWN$C9n!tiWc5@#%gW, %4p)0uRoKYgeOVkW$79sAgPHn)B%Xp@)K6*jmbKh#5tCEb<0dq2O!i"/"RIfUZS1n]9O66%2Utfb*6aAC=Ti6eYVJ;IE2XkC@'&\2 %9khQTfK]jXMk-.!+s9^CR4Kd-<$Ru:lJ4BU6TF6LS*Wq@:Q1lUgr%Lk)3:aVgf7Q:#L(LSF0]#3U+@=T_Bo6d&0:;VWm4/4ZnEWe %$A'lu"C7Xm$StH&pN/)!>[=>a5W-^c:U,kP"&(!t&,)Zr9-i2aXJ)R=1lom8IC#*FR%c9GlJcQuPj+)2j\002d0ScHD(qspb%+hZ %Ot%!`SeeU%[\lEo6:[V._=A_KBl%!7?+&mF5'?&(rb,2hY2oM_6CdY[6aM?W;+1u=#Y?V8m?Of[2eL0bR(a&`*f+p5F@W>l#;tBrdm_Od'MER8?/"Qj&OCe!$L(@hl6ol(iX&XK2#`r1(T+LXnjBh_]UYL*k)'RhRIs8YjdiPNHJ&+8.6Hrs#RFo %9"]JuCch\ls`@UQ9=omp4aG&F?Z,3Iue$l<'SHV[90h1.2a9?jcr_n#>',"d-h#p5>]*7Q->.JSp'K:"q!65W"S5^j'N9T!@`;fo";Q.a^*O'mE1KcT`RG\EDB"@G-7!*XnI %"kV"u!nQjkNR4naEI*l$Q%9glf)TkcL8lIX7'(Qr5r]6-:5DC>9/`W#d0M]\-@;Rabf1MG+patRM^hQV8L;A-b01=C"9s"G#T/pt %GSP.DW-XdIYlJQsY?+-3!aA(SmrF>CES3D@:3^O42_VVW&[lr7 %L.N'?`=M+HC=L*WN$TngQEN90c@IP#PC^#fnSC++5Nk*$X+m-g-)J76`MIGm[=/qjq=qL*<[&"/YNb?NTYoTHa*E<:D#KfTV*Z0E %XI!C\9Rs8I6o)Zuf'?hBi+4Dn.iAlm8%Gn2frtH(Qg^W%)1K\J0bkFa:RbE9OY)%]"3;&3(PiQX#?0&[_mif3')uUoOdA4q'p1)bs$-0jD2>B!'43>IYKB1c %/Z)QPW4\MeOQ&@&kVAJ!YJ9U.A:eA0/%13]p>7+"&>E;oL2tt9e#$0d-DlO8VeKU_kS'OVV9mVr2k)mm,sOZT@=dB#qZ0Df7s]JI %)`mBWV`k"&^H.`SKD'oYq^5cn&hQABoL;7[/$c-XV3,4LEN4Ja^3CfdnCPbE::YMJW8r=%5b=6p7d?hKl(Q)Kd%p0))oO#j)0!Wl %/Q.ARj_G&8\1J?XB^#7aa+'l+5q?ZA]e?.Kt?8c9"hAkD3fpCuDRVqp]j,ftX'B%i`c"/-a;hCHaL^6cLhI2iko %m*3/X+QTWuV-OW@;kAdd;VrF6f]uQZ_Hm=%0WtCtQH'STfpWgJ7uqnu-[@uBU3kGAWS0eQ/f1)$*\pq)$XV8gc3qU:`u0^\bPF!c)\hEnCKVP?dH+V&fIJ%r"N %PL*:ONQ"h[^`]T_N@FW>X"::Q71Q/Ol%X"Fb\`JDB0Lb'aOf*j^gnoVS-'.H4fs9<:Sk3V#"Z6O%`7@be)I)VBIfLoO;)8q3q.G$ %X&;L,e*c0._eqZsQF3%#STaH_0jG,F6n/VAZLhBfM,cAi(f(ieq-`CR=P?I`mq"a>J8?[`T:8NbKk_op&n&rUUN2`sK!,:foQce, %:Sm&R[@sj^)Kd1=&=;)_7;mnaWmrP4'Z'XB8EO^%r.DTHFKVJjlmTt;Nn"[4IS#u %Y>-/<*3NlFZ#aa*/P8T9)GB6c2+[jmFYQt?99]/koNirA`kmmmODq_@V%AfgXnY5h('$Qhl^Oj@``8rtJC-)l9Q`Uk7#,YR`FN:2 %CC\"?p]dsAf=sQM.N2Q5qehEQ<-He;h17fj"dT$bo9p]OaR*@q-a`rDhKQT>m@/_2^"$LKBI6a>@ %@c=5Yd2!AZHmFq#U"&3kC&V\,(-,Y`HX,_M"uK8fA7gmD)kC:)On#G%5+HtP*3*(M?ot]mh/mOTdZonrOaRPe<0Lo!!l^TD393gP %6(UVP:<"!a]5m]]&K4qFiq^^@;K"`RiObC6O=Er$j6,XP)GM33Vb=1b.oNU=1:aJG'2"BD#Y5&?VYns]5WE!=5gcV>"?Zdc.2+0P %/KBnGcbE"9Y%O5d<1"sf`E:GYZuf!41:qkKI#3)&nWg9`S5K+<7MHnBH%I&l?J8?U(JMdWB:4Od/#X];o(9VIKm&rU1=nQs2X2-" %jj2ZHN#D?0r#RZS.o2pMk1WL@%Zd"oL+ZC(Fk"l=:)9;U<#DS,r2(38;pUT[IX]b6^S3Fj7j$![:F0JBYAEnm=5&-Sq\LsHRl*(< %o=X+digZ3lD;d9#s2H*=-spW'Udj23j@S#&[e#7"t=J*p4GZ<3c_M'=aL=)ba7p3i;@(5$-tg_9jj2C(kaS=j7uZjk>3$fAL>mCU!el2Y*2UMU3U:L %b?VcPLI(nRRm]H4.uh0IM7mfN&Gh'b7ujXkERh?"cg@X[(_o)k):RtF[DjsOoGTTaR,s3b+3$/WV6OXM@U2u].iKc-,B9kAr]2ne %.a9J9A0$<^&hs9hGc>r#!McjN.-)CBjNT*ODt0Q4=u(@u\Yco869/22AlML.P;p %]dI./qFd:d'q`"T@X(\l=,J@X_q!a`b$o&_kMfWO@@LJXS/fIPd!9S'^;:e*f-NU?V1_qCr+#'Kp:-<7^oOgc[tA/PRn,U5Z]FkX'<6[OH]V._BJ0#i\`U0'KO^JL*RA)aXi%#[&D[]a@m %L$p.J/LdM.K/KH'b!_IneJ'R"V-%8tL,Gh!QRH!Y!e%!q)PU]qX9"+E,QDXK<#A\o.H8Ti_W-?KR*_!k`h[ZL*/Zm^28MM%D2#_@ %:0c4UYDcs::+.+XJ0N0aNl9@e&sZ!#FjAM+RPS@ZN4NTUDD;$FP7cRrJL3Jh_Kh;11$?0ei`oC/q(C'JS;M#INXMbXa':H@nJYUe %ChO+[jm[H31^JQ-Zc^/@#Wml)+N7;W6od2&q-5p$V\?,pM('M_k7?fM62XD],BL";S3:j!(8IZ6M@1k@ReR?,#bg5hWjE(\o5X'f %F`p<22#VMik7r]X+rAf*$d%Uje+pYMS5M27loi'o$r.;h(Vrd5Dd;"PS>:UJ"8`q3%BceW>;\DKWVdq[_4':bCd$E4rFi5i,"jhj %af/%3U[^&e_?ROtke6qU6Zgkf0Z]nqpkD@`")!EVJ/WfHptPR+c-g.?fD5=#1n(5*[;4d!eEOJ5"R6%$miDc8=i3XF4J($.[-lYA3)\e$X'Zj*T`bi#KKT;Pg?;RkKi.jItu@T-/P04OVC\fB!8Hb %XcaRP#X>.ILcTcC02air4oZ?9QMBG\T4V?mi2KS/[2D)Gnqs.D#n0B_FPENVI:SbdX(d6$^%uSE=('KOPA9'Rj)g0kYdDM4@C<=t %Y+:E+e3g&H2eTgn`q]"iYJ&uEUq\`eXen2J84bMJ0N(D[ejb(Qr2+K'H+Ia8E %_gLlJSTa%iZT+YCHItWGK=0oA(_d>8;DG&sh_tM:dGG`>Xjc/Ss)$-?#6/PghmuWTk\k?G`,,9SjhDgr&HBuo,eo-3KMg$3mq,n= %1mBI(b<6S&;R*C^\t2E`H]3Scq\AN%de?^I*Yj'd0K*taFHn?kF81>;qF)lunu$Qno8b%Qfl=;&]DCN4MA?VE?`m@IV1ID>XM`m` %giZ[NK?naS`L.R'`d9.^f0M>MiTB,!lcWXm8^XJ"*&d;!Q=oW:?eT]tRn\^Yu3Ph7WToA/5Cj9&M#ieCZLTLU^]gOiMOUB_XMVDEh:=j,=H8UBJ`cnNG$[)tE]+T;]))p+B9$+CEIQf_TQK9VgC %'JaP\Scj1L\p-3``+*qQq&dIMQ`i)06l9%pDFK]_1FZ>FV;k!G)T=BKQK]/3bR8[\hZPYLJF&+]Z!)hq>>HMPhZC@cU(F@DO05(-MLngGu-^/,;Qm$ZU"/*MAY'`G6lAWD[HodFSWE=,T %0[4mJZ[-gg]qo15OK;Zk"WV534ph!#;OLEDmZFbPEsX@@OXuKKH%6p":1[_1#ch(_KdnemaEf.]1\o=N7'0l %G\'kfl-ptdN!A9E+M)RPFKJbWP*9;A[%![U05I9X=pChXLmu+/?3JCfasH%]UjrMl?;2&iZ7ng.^O*b'df9(R)K48D73c*piJrN3 %'&Y!;VWG`)p>)#drnY"n/VXK5:lPMbb?tiD`m";IcfZX%o5Nbsh12R7.Zb!RNYF7)TPS2\Db.SEek=%F'3Yp1tO %[b@>FD+7M,lE7GorMrW7p*2lq1P.UM-O;reOeQs4oGjahee]Gf^VHP>GO`cLGW*Yh:mk,c2^1]E3BU2X8-+!(qo8L];K&0#9iu%R %ha'/Y4k-'Zb-D9)/fgq!951ZX'1Rm=T!@3MBs#BbD*WcESpNSB[:aM8+/;TH+QFt*PB=k)V)6qfVBYA]no?qS`c>g[o%OLri\d%E %VLT<^%n`[>\.?`Y:O*&6![Uq6,>j1fm6Z7qWm)&'7'mV^D1127k!k!5eO?'p@BN/YA\R_5j?=8rX'rQf %FYG1s-I[fm[7Q(<8Rr5%VA./e6+>f6CfRb\c^]k-Gd)F`1*@t"A:lWDi8:q>*m5=/a`Lo^Wg5)u7es!iC-<;@Bhte0MG#*NR_la@ %[n,'jO*b?57\Krsm`?=V6!p1,+`oJY/^UEQ9mGPlP@W'TYK"0a]Q)c;QU?W-FFG@!kHE@Y!IV*SY;5Ka\m36D-6u6 %[r=O!LC#D@';8M0.36`H\N]?$]S]UH(i^b[@i\kR3NraPdbo[^#CBHD]"`%Ta8nk0:#5)82lj6chnQlhB$f=D4u\d"QPj=FAO&n. %;qsG$3d/F^m5^\/Spr//A.n&_SILS.A288eFAYJdlIul' %9Ib[?e^Ekf0iYt5k@o'l0eJgf,[;@P9Zp=0=$t)u,?a"Eb1ok38nR7\ZQ:XD@K&b/Fa`WF@2\ %LEg5)=sdV@8uX;sJNMn'0f3;Hoh,WJ?o/m"0G@QrC'i-i*gO`i!sG:T(pI+kKrsr72-u9=Ls=msX+lo@32Y`"NO9@!Y)W]PUp6CX %j"B-dU#MPX+E'E\e:C%=ie9^RMsl']'h)P(UP^/C(9=2o$!S[?*NLEU827K[_5qRZ:ipO;;AbDV$ha5+cuWSZ2.R8L_Lb@4.%:_q %_Mq1b)225"GUB"H<=!%SU9M\l)OL,**MRWnH`NhJ:?KT;fm6khBOI6"Ou'6.&qbAh(5&V#;8fI`/bsG\67g/l--*nW,1NtJ*t%e; %lBJ'8W!7!4@/!]%(9)JqXS>+&?;8o[l>fL-cCLuQd_(1Bie6Xlmh*kp\l!d!EQN8@2=!)aCRY9Y.]1QC;W]O2n@MYB+Z-FQX6YR0 %BE?CdNK2W<;m9B_<,*%V9"*$H@^/$t/GXmU*TuaT7*R;iFK8.7T%k_`G5O"`HN3TH@d&,USU8la3CQ6KK;8;B:c#LqPCfIh"j"PgYfjsLCY#UB?qniha=$D(NWoFN=/K\RfKX#,` %I-];:QcnM('DYnQp%n?YW&fR5=1]#g@G(`c;UdT6CKsLBiM_48PZ&'/W@ZL28#nQ7/C*2ZI7>bU+]3`XL5aK)Ws %]<_h%feC4Gobc"BrM@/1"#D*V39H&@`mF-!$m7En$L)PD0RQXh5#F.2Db1D`7$+pVi"CloaQHg1k,S%V#5SR'rc@55ll$0;FFZ^l %V9EU2Sk@V^ON"@+b!4mG*_s7Y640r3/6t+O8kU#-4!IFE)bHT8S^cu"9N$S`%',(PN-EX@tQmuT2+VD2?_eX@5TWum=c %7T"VerHf+;?/sN#OAjr_kSfKEj-fK\-it^l=e1d^k@U,l`$KS/Ea:+ENMR7$^hY5\K64G+=;&k<"oZB8Hgo7Xi'&\"Y)?!jQ]R$$o>,;j4qZpsA\Yht8#_<@WY:h'9m`2AshrEs1'U %er=5bCmE,aOPWH5k9))"F+HtWn:3![rB`"2.K4crZ&4psN8t4m1D$SdCDhppTZ92!t8l-!.EphT^#0ilaM)6J+mj8(iU(. %I&PqJ@-t4Y>Edh.NU+A1%2YgVn%-FNdGqdhl#ABH0G6Mqf_$n$#=LX]$KeDFFCPJ %(C!VEN,Om2m+CkoCr;:8RjJM)$@C3nN*1r)5'DGEU&N^'g7A)`D*G[Dl`%_\4#RmaR>h<<3_P*2]96YESRf#1 %*s;W`(d!2bl:^S:+5K'12c8q6YjPEke+)AMJV5fNd#P5PXueu)!L*uj%u``7K5YNI)VCV9I=#>1I<&'N8GDC#`SkJm=mG3m:3LJJ %lqA7?GP+bq-amjYmF9^)m+];>_2UPQfA/78n!5kJ5J1Y%`0Ne8ILc?7t=NVAccLITiY %K_Vb.Z";77f$S2L3IH3ED`4jQZZ4a6/Nm.srbX+-l7%[+^%oTaA`Kc7MJ+_!YbuFWVTi<->":D0\N/3t'8Q6\p20-\hW4C,Xl0[/ %_7`4,48O/>?%$8H-H@OM]'MUmY24'hk8r9col0KSAp&4gERO4ICaTX)]=D?-0$,j>Lj=1[k@o;#WV'j^,L+-J+^$h>HJ^t`I-]M& %gGduUa'MGen,R0V'^=hG8+5*4POBI"%rPt9[N=,4QPT&I[.?T;R!PsI)bB_%]Q*iBSX\,[43+h8G)gCn\g;WBrrZdX"iAXCJNe_Z %!epLE#R$07#Uj!5%UpS_-SXTS^4?r.We$l"WLNXM-PO1K##9%U)!flPd*?l`h3pNKoU`[ciQe4moH*5b)]fh88NjH@U#VW_4$*92 %HOl[?PBidg]Pls>A`M`5rFkYN_1@e?:@tGZRQXlp17dHXrF6c\R)&)">1UN#cKidtGpp#.pV>.1]n6FQPC)QLn;4$L&$''-N8;2; %cGT1-k^^+2G'q>r6cD-5&+(g3.Yh%_ZbC_C$ZS"Q4)\j:G0Y[<@RoV,qi[N1l@W=0*0:Eohk:Vpf_XOW_#^3h%@]9LjdZRuf.&bd %re9GA5q2!BM*d'C&rado:O@h>oq'g2&;o/PEH96TK3_u6A%hHa6hdSGf0$:&+qgCUZZOQ6#J8P^Y4%u:&"_LcE@6[-MD*B=9mB:YO+,)%S]c;\UIq&h-D@8S"$'UE9H2TGB"B.*UGq<8/p>=^?m=(p]h$nonap,pN._h_/a=<%j!ct %[SNd4[h=_`#m0>0H_/t:Ds]X:245B<\kD;Ynb2hiDM\2Q"D!@M=Y'&VBkQ&+LQ6VBVX,a.E`^X,#P?f6MWa\c)n5/dMugsoX>]p> %;YjMaaFej9fi+gC_&ZbKlAn5n#7gnQ(g>[lgfHPa4&S,8HV[4Ai*uc.m)UP4YGJ;]hk009`ak-OT4RDLYEnns%r@O*FrfBkmoA@/ %fXLNXn!8`hjUVB.]T59JijWI%]]\-g%X^@H*I@6*;J9.E(Fka=JT'TIr9;=Pg5]6Fcb0R/MsKgokjP21j8f+G*2HkK2=/;/NgTVm %iNMi^[_8mCa21KKA1Sl8!\"*KX[8^#MZ\3bO; %h/atA3M.5h0G9b3D>qs*oCQ]p6m04ST&lK.'9Fc2e[%TO:#R`InO2.S,r[3I>r+B:i[@ %Co_I:VRpQZS25ZmcM)_b1`^J4>PV=U_Hq=,W,R(8=6HRo^-Vd`[J)M&RX2tl'mqam@tHRJkdW5L7#bdb*[nfJ&'0>@.rAVHfl%Jt %VBPo&ol\Xo)IM2sUBB8f/*oP,V(p1O:o,2d#ZMh%lI0O&o,CBW %J_-:aMOA0,pL]NUe]Pp+]st8BNaCH*ZP\K=R)%jCOB,&hM;25kms*gNZLM/D7sYtZBY?-4GqBh]5ui2KrB/epAj0*-f:pJb* %SGeaLo&gOYBdFhe+a-?^)M8+&9$rOM+B,e!_)6/"g&)2&3bE,Eb=gQ)kY(qeH@3A2"ktt_lK6Fc.Wi+I(7:J\fiM8hgKtKc!RjkgY%Y+q'Ut_q2=1AM*'&YO6W_8i@+]hN%`">:@l?eoP80b9Y^#Fb6Wf=<="A+)YLCXXjm+mY@M>rG[-5N:+(`bLON5=ENaB[>(6F4PD4IADO00(qc(+* %hbk_(C(SUc'gIX$U9=4bD!uAUVo#E^RON]`cY."cr7=b,ZH$hH^=%E8hnmatKDE:A6SoA*-.[j3H,+QV:`(;u+/$i'Y<6H85-6jS %iRkWXFe[%Eju]6cdI>p@\HN_3`Efq#X#un7G1KNPhuII`G[2i/0@>k;+%lkon`?]+#$% %&olmT/Db?JL>CiVr\?4+fuI %s-^5][4s-?i4U%lh'[sm8<[>NL*5h_s6@5Xmj;s$_C^&"#kDt8Dk@TMT`9'f%^U\uJ6Xpu;j\?"56i#pt6PiM'Pt$5#3!?6< %C#8-rG,+k18tI5&c54FrfAlFkr0XJ=oHr#aSMQSQhQ.i5":rcbsWg4"dn't>8r@AM_L\BqaJt2'AS6mba^[d^-#>`n+o#*@'i5AihqPA.T,0HQREcdg0U//2t(/p@5O5:8I?(1177+Mdh4`Vp>f"YRiL5uG88Pi")_qtV8tmDnDo..CVls&]@8h8kYc %*[^!X_'n]$YVJbopD@eM>us[;qs5m0gl(A"F %AE2()`/f)E5UWR['i*s:$V'i=+"b4^E6Cq5RMl<1Y8L7Pe2V^ujP!k8^o&)&A/)Da(\.nNk!PEE0K>[B@^)lWhoZ?,gX#>cdP1u7 %71/&M;&.ZTF'9CphIcOGp&]2O8@A_ESp[p^"oX:(cQ4B7OJNcJ6!olA&+,WF$Z5?bRK*q,r"^\m^Y/abHDE#DSIo5Sf8"k0dO#3t %dKp71eN3MEZ>WCCMo=K2c(PG%+EQt/hdSbPg,PN6l9;M(HTP\aMYuHbRsNg0kqc>0&;gkTW#h^khqu=A])b,H[%Akpe8>D299UF! %[O@5-e\_&Z%l]3NE3G3MS.1pNEq8;[='de=Ha%%4UM<4Y"'`tr:rL-F=d']F4)[@kRL/YYB4$3*u0?i)rJ^@HR'9YMmd(8Zql]&qB3FhnPpV%37&cQJ>*45,)C,Q#$."4@nGU:u]#^bR75g3O9$+'ep!.h9icpr8?$KoMnXNPtgf7o`ZQg&+k+JmMUW"'.;\]c#t %`6iM,aRrnIL!02gV'PJqlD9Qrf1iWW2Y4O4p7^8Tlu)*8p%H^TW__B7r6B^0PYSKKTU=ft$h3p1i0GL,hhHikDuRnrKV,F=j4O$: %\T,[liboQeqq$ZZLAeF"3_Ph-"2A:2e(/a/orB>k.a;"8+)?;<-!N=$A4(gk!]6 %G%.XOGnkiZT.^OQj`g.,%gt8:S",Ya9.,l!8kKXZJZZmM'd&7OXt&,Vf9_E^n\at*qKP7'^j)4PEi_?!YW?"-IQ?t!\TrJ7d<;QB %/IhdYoi/><(*':s/='n1[pK-YX;tJiV"tdeIE)K&kNa1&9#\!;>eP=."g_8dPR3SX.o25D">&1J/%-N4kF+c%!:,LpI])Cfg5U4% %7L&UC/QTXsNW3"&X"Rs%gd*,g+g3QE=IitQ3^IK>s8\2;&8nO$WNCdC'5rh?B\+o._IZdMfqt>IQf%7$Q%oEggghE'`12^2/_e!W3=:JMQNU4;":!1l2sFBf[LDWuDK8p0`jo=WUZ0he7>DEVaG0Wn %qG@Afj/Q&]?5+GP-TAjYp!uhXAWH3XG\"[HgmB%213js;_J!+%R/#\UOSqh&WCXg>\gZjLjM;daf^_FH<"f(Xcq9E_k!0Tk\i#%P\4E\R=Z$'-.bd_WjYHQGj%?'g0mlA2,hH^uSLSe%,r %/T/dk6)O265.?%4Qt)$e.tNHQF$lI,,TP-T9%CO:OStqODXY,t*$-3a)tGs9eT\4Fe\M]==W-&O;!"0]kG#OhfR';Xq=ms:@7..b %.2>("rgJGnFHm0/fqo2Sjm5XbnS#9c^48tA#?6F>=/KDbCb/R6Qd!@?^J2CZe"l.?UY4$X2;X%qV.h,T$Z;/bPBA>Ke![I?j.sG\ %0DJ]IL9E[MCSuBOqkLchF/LncJ^5B"U`DOF#TX"S[WQA-.XX-h@HUC0K@/)6%Zd,A4f.VW9dUf<3Xpb%DT@n#;9G_6rZh7Rku=07 %jZ;!mQ&6YDG.LgFW>kssmau6V<;#7Nm*_l;'g=c6'\k`tUhX*mD51#B4fij39B@B'cfo48!8i2f$#6B9ObaIZm*=\hQ#mACpft), %Ba6?fSiJpQDI^#RR_InWN_0WAPT.i>d_ftQ[8[)%A924%C1+?qmF19-4+-;@C%XpiNt&l^V+jVPcDlkSC&eX@U7O2q;l;1\PoYP:K?lLB9"e(XH%MArQOB6;l?;/e<89jQamH-OktHa`+>g^b\0+$%:Q1))=Y)7 %nU3,64G$>I@3BBYCJud-e4r*)W3'"W]\1&!3j%31X-T11@),ZoGNq!r*cKX0`aXFu-'3$3SK>a?T<=aIqqY/dV:409Fe7u[8Z.AS %2?5[6GE(3\EoPa9H$=bY[F=0JqI.dN;rml3D2Gi_>YC(WhJ@((cek@%--&b6S+OM(drm<>Q?`p\[efP/Ll'REUo6o,jr %gCgO5Bmr1^$L\2)dcl4fP-fd!=K%fZPYUZpfg?-lA*q30II@_nJSm9MXKYEs`9-b0FBY6h3urb;a4*kjXgE5Jol`(&]h:CtpY3q> %knJJAk.5[sk+?QfYu>BA^rQ$>k4PNm!1bl$gj)kDrbX%"m&os/U'*Thdr+"_3^00""a?qu]>VYLBd?25NDI$RT#(h6PJM2m<5th2 %*2`]D7U.f=j7=.qfrTZ32,WN.oQMRPgt-i+;gn?)PDM]jDFEZk&%7,dPH.=!jlpr-hs8[7ObZ/5.nFg?;@F8l#q])RS6*Gdc#%:?ML0;HAD %IAa!-1lBW.Zd5'!_+C^&K26-Id&>=i)$fUTA]L^ta)!RRh@2Z3V_\m:kBjIYr[RnXI%;4rG)*ZJ2^D'H-eC^mKR6Sh %a4$2hRYae:RG"kO`Z%!]0NgTO$77DJ^om%iK%I74GV5YB";jJQAaS0GIgq*8@`n9)g1dJ=k#&th+I@B5@Jl9F^"[N!r1X-.XD$WUBB-+8G!VdN5N %mI9Jnl'#n\W/]E-XQs,J7cN,Nl6C.)n0X,i(%&k!f7RqpWmXJc2+[K&8mR85l)V#^::>I,g37g`o40Ntj(c$6TLVr0`,(mk]:e,]'c_-fL4gT8E!)7F9[85cE %2G1,dSFf>8`I1QmgRr@M_O\=bq4k`(n^ik'$rheE_%qqFRa_'t-\'Fc2S$a'p?>&qBmtW;H%tD+drj>X"c`1bpFe1[k,i"]8AuCP %LChL8?&aUH?+D'/oHm[Q!7i(CW`%tMQ'JtFMI2e$#'L`gTGpXBgRTg(Y#Agt4,rAh;mT(N"k7a#o0L'l %d#d!IW8:I^D9U.]A!U9:[`PV@fk;`n[#0]#'6<%cQ+M(j_I7OA[,%,KGr`[QAYV2%;S&\L:""^:$'3(u %l!#6h3B%+@BCWh#Ssb/#m[c>+Im3)AS*C8+5?7BC>h1Z58Lf5>[A&/AdGgSMFBVGhPcP7MIoJ$kSCe=]n[*R0]Ac-i6LP?80F(L5k31gO@5Ei+7%r,^4(=?+(> %g`ae/*jX32Nt7KcqYSoc)cC"=ID7;rWm[k?[QBInlMQq_S>Xg2?b_^UW`T^o=)%"Ynl5-#=!6k=Z$CMJf=3AYihS:fS6pZ,L/tbf %Lflb*M@d+7["T[#HF?d@@uAm/Pgs`T,H4WXf8biq&jJU@B9Jb%3D9d^1EUtI];jJ6n;MtdX=MffQM:edq-I_%Vo?XWL9e+S41gZ> %rNH;AeIl4/3r[AfJ;pR5c9S:NqW$1ei%gYSfO,&(o\sAK]M(HB5SQr7E@CFgJ=hDFFj$Uh16:e0JS6qsBMpQT&QQL3Ef8W3b*Jpm %ijQde4D)\^q3fpD*A(&&lbLK=EM2[feeVX8/d)5ffV[tr!o\h;[3Pk$EQggk?O(@U!&21T7I>NTq<_g!8)>@!p]Png@d"05r#$8E %hX'QZ=@o6..Y@3fBHn2IiV@(oOc,KIKnr<@!Uau`*LV/eOi/9nLFTTBP&-CHP7?L,9NZkV`q3Mq;.r84+Y"h8A'>)h.e&R^C\F3#6dtB,T/_a*pXD$"?a`s[pXD:?ORO3^OW\A%G=8sB)h#Wp`&3_,)iE4#>u*6l_q!jo3:N]-=[GH:[Xn.(GgDu+.Q %dcV)Q2TDi$.@%\:38[u+p=i=^D$ep70@%t8Hl&TLeN$f[4aXB.SZ_;0=e=nQg)5K+!E[UuM.OMH-s$f1Z?>B4UZf:c3g9me^q$s, %W69,;K9)D#lUt;HgAdI$/>j/FeNND[YutaV&LL.ahj)H0L5SPdfSTDUo#CpN.k<"bEffe2@HP".=6(,8%Eq7VBB9[1dYaeH34ncG %[iK\8EE1+$G8!MW$[VcaA78C/?!*r^eLLF/"\J1^Lb)fUi^4)hNW@Q(fR.B=NVP:N?"K_KZ@Kqqp:6DqKmKHCV:a>X/hKRBG.!3V %DeVY*Ke#a".jt;PWKf_nDEGS.TslT&36]k6&[R?O[)kWC#u;JF;!hg@p1:4B,b4h@P:SP$V)`eC#fCW3"^kRGY3)YdP_:O"4Vh]^ %bg@heBY#*XL;nTQU^p09RcBhZ6ZOMO"d=@rQVB01*Q1jr<@k]WP?e@@ %o@UrOF,=.C\+_uX=F)o&qB.Dp`/E5qDqi?OaBJt>qQl(qe%.*;H\7;PHmA/dM#$Qg]Bp#/LjPrN6?&Cc^aSk %^[sX7nI9TpB$U;t\S"+OEJ&NFBf9N4]qYr^c2$BWSYV+D/[7Hs,u?0)$DPn]>;4,cANbB]9'iKd1=9Q0o2OQ=W11?>DUqA"`>jN! %Hs,!0X'!:Y>&MB\0=`J(4(Wu*=9[;=YhP#D3jD3sTD-_:^>nQdCT\8hk*R[:C#tqTQCI4_ %\dZ\JQ$2`6_k>dpi:Wr<[=3.0?4?`5H$mB`B+8Z/ON0:=-cR/E/31@XA*:Vm_(9*Z_`]eeOiFJ#AOVF2c*:FfJscF9aMO-?3SLbM %ZTXT=(5\7Rp2d)qt@e32Iqq(Q7ZIZ1rXf<0n6kAj#p;s\=TYQHkB@NI`JgdlBhFs' %EXl?p4X8*`"d[TW?q=_@D@9_lF+r'glBbbhe*J903J6l1-i5NV'a:rQ4'sBi0/Lmr8@EnV9P7SN,NCL(FtkH.Q!rk%,;Uu6h+(A5 %a,oY#GO3e6A",uC%EZLJ2OtqNbWak8>Wk^Hd"b+\hrC292Z+PtifULs)PA%3eYhQfOYNRlr265?mZDM1(6nib%1kG$eP&HtEqt!= %G[t(Wr!8[8OnLu&Er\+M022'U-'=hF,ar@M_>/*2I[&r&@i[Rdqeb%K@_e%uTm5N7lZ)3OWB!Dm=s^JH1pjsKi^%TCq(cd]$=0$g %5%Y1.E6B#eQWLXu/>H^.C;Xu&Sp+M3+lb[@L$&\sOQ8Is6`S@tln;_Wm %LNAK-??\Tg8eJ);oASh"W3.QkNpu+p.<&TWSE#I!Y47(dk/5lITsX4%p#a.PS3)UfXZ7KKa!h%)aG2RRM`ISOc1u;g[[>7H%XM$= %h4V+9McIG&aEL=Rm>,1ph9o(mj@kfVEjX_14hVo)OO$J=\a=W1#5d99`HH(;o-0sWoeFn?W]BcF/$jE*Et%toNn3oGLtUgZ&lZf.^uVb^.4O][P6O:!K3ZLbh`WHrtW3Xt&gM:n'4DD+*[SL(nX#@FmB"5b:&/84 %:)00Y)FsC5d>V')a$?+g@ct6b66I>)VCictKI?%qUWFB/bXUbHKKm5;RL86#a4?[dL\fZYL`Yk-QUlgO>F5#[_QDn.oUQ>uQsI7= %.\,l7U@.50Op^C9#PVgSd?dgmt5SY,82pgahq=R8G#]0=eKgOX+,E2L1,^!@bliBYE&X9XlkTYimQXbR<<[^fej7PCSo %^:%uC65M?!DoP:[m_q\:ZY`5s0uDulFV#(`#H4IuNH7L#:%]_71<$`W2mIa/]faVR-H0S.oWZs'].!F<5e'e;*+"o>HAC\FrP\NN %hV^+3^fBb9hMd]!f6>bH-Bd+7.lnhnpR;$%QO_,Ta8>"(/54\>4,I.HgX]s!eEjoTUk%R4V;%#7ZJ2d&eeqNNUHLC!86Ker:5(8N %WdX?%lo$6/DTKD5MIj2$42mrJmr#[<0l)N7/_TNUB2"k";ntr'Bt[GlZ5XS#r-$8fj'j&IG+7$[ZtlR=[BY)DdOEc-$ZjAM8%3$< %Y#ho589"%)f%R\i3h<7j(I%XWH?/?DbQGMuND%kuJWEe\mAb;/6csr`g6GjdDAqEUpj.uQ;7?35HEk4V4iXf3+"\hFK1X#nSM94: %B3.J1.k0I#:eP+LpN&]>m2@O+Q&f)BLrqm_SG]5Wm+G.a=Jb5JpJKp+0j!dq.aj(9JpW[?PmbAu1XYo#UkWV<^/SD%7?U)h#]aX%oQgYXVlE:3D4AYG*flYF6@2@%`F0YlV0AO.E %l'ueYSBlsn-(E3%-g3fK.q[6jjjL8mq7+3u[VM];db/_#Yh4#tf9B"meC<7e65IkDF#*%Z8`W=Gl(%H]9X8\5flU!"FLIa&2.Y/r %q,sf^_Q>&PcH4[U=Kh&5VXle1C5png::YJtf[4hPdl@_OZMJ@]VK3Z=rFFs=Zg""20+_G\4nXq3kHo\ud82?)e'0d.\#/X-flZYn %jZ9a4(4t2eflXPpiE9p]cDFe]@Z+gH3E]I8[:drK2*c2_f:#Fse3IF/flU!$,r7:F.qdAT(R9XOKVK9nVo;K^=\cK;_/WY\gBWEf[5"ET]R339P_;1eCLVLbThqUQG0%O=3c=oh@g;Qt_l9)H^IrLi\g"(bN %3gAuViWhX]$\'R$Seqb@Y$')rK:3gD037#3]%fXIj>8,.]&URr5.t;_3<`<*E>T3Gp&%^k-'QMe&NT2@ru>32JjYp5LM+mX`S:jW %/Wd=$>is=XcBUVZI.8t9mMtB9a8W=#13Xe->lGB#I%:T3*M9DK\m'/@LJr`UWlZhVb1ug_-lt!Im`3kQkY^Qh:TdU;X3@-#qmK$8 %YcCi]h/=[LSE@K23EO,`Q-hB($nl?$1`Uu4[5^9PWQ4nNZ.uCX> %.Ktu9CqSLS;g$HfPYg]g@moj0eG#h7F7p'>ECB.q[i %`tETUTl)NZ\pIE!>?_7uPa-%-fXr36-8I,=qr>Do6::?#+*Z_FWY;5^;i6pqXX@H!1ErK_2'ga^.pAU0[k0MQ %*VXX6bb6nk1Eg&QR'%4IDg.l,JlLmK^p;e,UR^9\kiUf!7oQ\S4_n).ENgG14%4)Xc3-#@j1#o%KG"g.r>[ %G@\&9Qa\J"dF3"HP?q14Q>lpn0ARN;6AhJ"Odc!5`oKcI/>Z9'`=RAFV<>o4T8I'tb=(Q7Fc(9]r-:MOi2ZX,Ych)-EnN`kFe %a$tWI]gh%F%V0Cud5ear%Pnd50m@]TZaSfWjW:&77-+0dHCiI13h@]mfs73_Ea0(=V)$S=b,ITWc)atbNV;V@P+m0*06REUnkHN5 %;80#gnqFYoEEF]f?V7/(VL`0GkTf!IBt#.;mTS:B1FcPui=\4*-Z2M#r'rl^)]Y5jZ8H(64nim- %=aeQ)G"PafMCk`$"(A#9RlO-chW^e:RCe]"lI;7K2B$3AA!?Au?KdE]rb1`0jSj!c>uT3Admr+H#nJ.J!5,r8A#_P%9>e)9f<`cp %egp@\LU$V^mC@_!M[S-3\!t'l=1:qO:7Z-Bhk];`[4?d,b40>nV,O@2.:-]VZpd#58`VHiRd'moQ(!AoSQUC(pNBg!;e)r;^@.N) %>3'.jb'U.UC^LYEVae2Hl1[m=-VlsHhFZFHKhWnYe)bCGbdIb*j2\fCo530lYJBr&OLV26rV7IsOoaMrW.dQHg_-*c$7dM:35*jKn1K_pPKRU8c&9qJj'[;oe%g^.bJs7?(CAcb2^Bng3m`e'T_jeu,5u,V<'YXlnUgDm&Z:mq %`\SPQCLpT3_9D%a-Y]TIl(#lWO'OnJr,,kfpgD4SEGIQlY(E^hVJN&`ar5GK?tsndqo9a-CH/d8f%au_HGO[)Qh2i`@n/'83.r&g %!<)& %Y*SiubsOHCQ:N>sk1<;sSOiH^5Nfgah1]pOjsb3aDlbjmg&.2X?I/;7"!?7,Gt9`28h6\=>5>jA.];[(/,*#YQ.F9EB+`h/9W\aA %0%2_h^J\PZ+60Y=f6T83\+oD9&=Zn]a8[EQF[D$m-cA!r`b*YQB=1*Z?C&\7oU>qrO6Bmo6-P,KWL^'Jf#6@N@p_6H\GFB]!SZ/O %/I0W:Tt3W-TQ^mdbeD_m=b&D0EO@K5j'^\DBU+I$$H/7T!jjM87h4HNNg2+SiLt1FnsgZuAmVKdsA[ %?'NZ=/aGW$Pg@IPYl:AEo..,KUF;aD,=g;e-eG)?A1)B%$@#2Xj`C"0XhQ9VB78jK($5$k^N,%&Hkrme%][Y,\I@c!m41'HV5iE6 %cA'3IIV['d>2._,X4?mL:fG[&SS`l3p>; %opZDC1KX7uifoa(:JpHSGr)dW9m/*mf?\H/7TU`I.r"[fIbk+Wer8,?lhXcT[q;Z:c(&j_A6hE0nZADWi*=fNnSA]u\piL?`9u$]Mq.Llf&W*dpXSU+GusZC:KH2^pl%I_4a$_9pRCPI"O#Bho<.Af69W$dJi(.*nZ@2+ %NaC7S/bSo$gsB@C*38ofPEiSdRcB98$eG"B;KM:*^trEk?,(OC^!$/G5.M]1NIo4F's"o/PiJTt5\*Hq[NBAu+(BFoArTSS!7A9@icW998,q6-Oi(;`'\piL9r>bI7V<[o&.7&h$Z];VGIW_%.lM=XbdU(JM1V7g]\piMQ %@87T9n%*u)_El_:E2t`W,9E;Da,h%H0H9Oj4'.:Kao./BpJ%erZA[s>cJF-Hf!Ijb+6/Msf%lg4$e\%"bUtQcR.Z%R,Y^NsJWe7& %DL2KuAqU>_\$9X$Q&-KWfqob;?>>Kj,A(0d`ns:g9IcRgB!76i!\Yh=*s\4MoI3],8TK4Y2KZ8C4IF'=L$DfQZh#;M6g%NTj`(7o %]G]s^$r__]E8LNYMZ)/hHbTN)r6a`!bOY1/rU9U>9]9&c:k_?RJ(9M4M+ai[-l-L!QNVd>/582[@#^ %-;9+WG%*DTQck&P3/Zkt69e2?rU=`N1tp9I#35HcUEEJtq+g:hE-\HUmH:G5`Dr2'D%2N)g<&m*B_8j-)GNG:O`;Xlr/;:"WgT0` %^Ihs23kiQZ$'MYSWcBP38)s?]/YpOQSg-U#f7i:Pk/NVU9sr?LmM5G%N"V4E*Pk;:n&uYOh_]QVd11`!4Nd[2a2+h:7`&lBb"S^o %ktlahI!lorG.rmne+;:%jRN.UO-@&6KS5p:$HmNm8Vt^Z00kF`FhCj?8-MMUpOLAQ^j--Yj^_nuVimHq*lHkNh^YW>WkYrT9[/Y_ %AiT.8O[+0SVYQ^^I:$=<($b-UX,n`AR`f]BA=>)(?M %a:09YZTn5Q:FSNsa1Wb/@RIc>O4A.7[R%SijPTPjc/8lIh,"Tul7h7VVbq+oJ,ncq9C$5YlhMdWf4k/#>%o&>2`3/&emD8in[m %M8>eu3C'0bWLo9EC9oQP[kogAW(a+Xa;gcF=a57T,YshqOF8u/G*,gOTjf1%@_U1+Q)#CHaR;rl+Z"-?o2JgBYr-1]=B,PjHPrSA %=T*BL8=;qglM>#4caL/S=Jp@;6$78Wqb>ZAI:YSk.&dJ\p?3[Mht/8qqAul9W5gml[,QD?7FOOP]%%F"msJd-TnpH %KUMCTRo/WLD]]8<1R6TkG=De2A2@fAmL@&\#,"nk'*H%?G@1Hs$p`I[dhol>+,KmZYWR=p4X%!g1&tN)>nn/1)dP0sJ6g4:KRfg< %&unZ>04"PX3@E:c3)THr2o_b`7-4!NN^19VA".a)8d]I!g6hA.N%;:8[!BqGqdL_UFT0d4!SSo8f;N!1J;h5_.;7Es'mCBNqUWsK %3k@n51Yet.UAn),0fg3Ja<2eLbTnf:d/2QgkX3(>5om`XF!\\QU0qE=pJoM*4317rF(8S9#5Tu;!0CqW>0N)AJl>?)06^!UZ=SK8#MY+SZ_QDTt'o[5:bt2KJ0%UNX5b*jX"b?h,?9*'1\6"Q=+h@h4Ys&O-]@KSr!\oUan0pfpaC@NBj#V(+mjm:2'S\c"nH %76*KIW!%PB_`V%9f>:ePl8])+S%9Ub4bF?M>)lQ8C %ip&$J0o\9^12bO>\e%t"qVGd,88OT;6Ma7fIMW'0XL'>BbU^6qF/Cm8D/6rHlQZo1Q&l#B$Zd-eY)Cm[/5:4VMbh5'"&FrR=`#'! %p=1k:@"m7tSIhRJWj\fN56,s(bi %7*KsTrE2`;9<>&a>TUUs;R$Yt!u)G%WXn`nHAtD#g4O4sG[NGlEquO$J;NVOF+LRhW1#V-\t;1I[0KFKG[Tq\<$*s.8LIPRNaQ<:'cE_bqs\P3k,BSE-b!ILn`kS/,m1^ %RUu4H5dHLEMhTS6bq(mu.g`nRkT.([c:OJs_A4Fug?Cc=h>h",6M7/L/T"(VBEVU(;]F-a)=kgUZV^@)ilPgQ5PhTM2l %mHpIbdYHHl(+os/=,?2u;nS)SW#!_&oqu63?EcJ:4dt!s\S320'<&?R0[638Lk22:;[HpUe^NY,d@Cm1kAfU`Gf/+[iopFem2fr= %!hqe9]HK\H;j?Bg6&MV`SV$C0!u3Djq[&8+*ansCS*A/[k34d3aj1))]!5j7Ua&-i`Tp9VCn22oK49J+_h#<:BJ?N.$DH_6"CGD3 %K\D6K&`^m[A7HIYp`"qZXAKT24\h_HWkt"mE//;t!SU#94k[L^`!B/)_F&Ie5`:NATm/gRoS7(WO.ZSK*roo&9"eJsT5G %K8,URGQe7cHZVgC<(FXY7IP_#b(H&F:[`M5Okn(mc:\BI %&mt(?$sn9",U&Y,'MWnO1'/21=Z8"u%MmJAQJ^b,^V4j!pJcU:3]RlP&C!GZf([OD$Hb[hOmDH?A&gNK0_t5FNum3,_8J0W*_[V\ %Q\-aPL]MA!j1#V.U4sWGk@Q[G=<33!/G8:9=QfCL_3-uGbW"U&:9*bhV5o4fm]nUZ]:m;sG/X2IGgiiD4*J%YRiCI6MnZ3p'*" %,WXq>J1^A<4\7/en2$J,&TDAd7Zq2q77EgTC'3e:H.(>PiL>5>"'"(H3*F/CX9k$"0h2+W@Y5hSRMLU6; %T`^]#-Sm"V!kPrnR&,\\DN98o`L-uRUT?(Z/#5NX6`BRklV9;BO4ZF'p5$CQ%@^9J(n="7-E#S^9G:[DQKIId+3j[ALsLDI;FcK= %OGV+#f&HQ?[Y!IIW(\,F?M7^6B::5\9MNmalPaWM[8etM.?1oGjtkVl&FBr>,Y.?&]6F'@mLb7t7[_Sdhh9s_-cfcE*1(Yd(jns1 %jKuJ+M#.K;pCo7o%TIX:I@I[!;6QMYOGg*3TScfH4Gt4qLRGkT((V2JcP+`nL/+KRK^CBPe:Pqo)q?6YB,t;n+U9Esrf$#WDmGP4 %BKpi)Yt[c/6Mk'p[AoD<7$]a5AH"(XUc"6/fbuOL2lRO#Q:D-:R!!Q\YV)NBQjL]8F;i_<%"97ItkC3i-j6Dc\nQK\TaYnjBpdciL8srckbM0,e+>SGl3#JoHN[hYAhYGRq8^*n?5iSrPSF%XgU6>4#LTaE4.0ifV!H#>K8" %qUO\1f?dqkdXE]MMPcP\eGV%+8uj]eCg?@bNY7bSl^"D"2VcR:pQRg_?j4$L+'2'M+'(2/a>\$W-R0^=h,LfLQp$iqd`q[Ea_!ls %kAH[]\W[P5:OeH\58]]FI=0b]1LC!;.i[a8:&J0VqQ+V2S%%m&n!q+GJ".i**K8lkh>hh-Ai%5d7ZGAm\_YdOZI;]XpTHEBNqVXDJ?c;B->9KaBG,Cd5\0S&/gWn+:t>E;K.V/2n&jk=.kQ %XtGs?aVhPRa?iLY#`cA>I2/6AC@mCo'C,lT-H'N+40Qn48II)#lZ6!*^,m7GZ,TX4^Je#`%BaH#GGYiX:.CfR*YrD*Ip!2[[nNb't-'LOVaJS=<4 %qet3CTOSqcha"]@.?#V8c^2c:i:gsXYDk*i\isl;&'>jE%JAg8]Y(Q_L%.uFGTEBpcpQ?;0m3ZK7KNM'rb#VrQUCOh&tm\F`d<56 %`r]':"[pkZN.nkBoWug(eII1Jg>:4";k<7ZNA)`pD?LtdgWk)aD@Q=IItn"_`-O %o.gFBi%kii:.t(;c#XgT-)PBOKKGn:YL@HG@F>QS1&.<\V)\Mlb=H=S^>#:a3A%QG]tq!`RY!KW1C>`_G(\npZli$&1P&9l1d#;o %4tR'\$fB?GQGL$54tSOs\:O+)Jd0^F^b!8\B6$WnW/Bo*Ye%1X.@^ipA?4=gOcD/^JYVVqsc9`UTk_=92]#mVS/ %BpPOP*F=C'!91Kq&i>KJ$R1OiK#OJc/@*'0E"]!"&Bgk<:,[!S!"E>8A5TtZZQ%-h_p`1(bdWl)&J:,dl[kcU&f@pN$ASG9(NH#NRIlU#pBak-].sV$t'qX:CrS3#lqq:YehSY5,3+^ %p_)Ks%8b]m3tqqh@:?,qL,:0k-tU=3GaT:K[6%NUi,p#"/S"*O)J09@M)<$T3'3K-"2fdV5QJ;H8/!htPHd\!#D@_f-clE5"c812ieP/P.&>sG$\\:@!r8:_J+!0\[[%6VN0RDOif/>lkX73dZ)>-IuR(0fi'NK@fp=UkujO%]Zq#A*rFnULZ\2M>Tn!eXukkp/_@ %r'c2`k6/^65ZO4PaA!9R?_ZK\)Nm,E%cG=CI!HK^WrN;7)"'*m+&`EfJTg3o+1uTO, %-cdtQAK^bf9qX!XAJU4CZpL1^&lC4t##m^jC_EBJqukg]J&7Nr@0qH`$qjV5PYiWUC"/nSW>*Di#X\G\W0O&Qo>@3!$R5/Y'U@i( %n9OS_F=5Hq,5csk=p(4*G_3MY6jmP-GTKp(?GI-rIB$Z$j9lIi($m$,,akJ46KBt]au-+Gj9:pYnq$WV>/B_81+iZear.u+M#^$K %/;M:[:?eB3\DdoHWLtSZTI5<],EgA\&2LOH?3ZX*H,@]Le>eYAJc8=BO"Tp/WseFl*OLJ:9>NrGJH?gA-#N5^ %IKf(u#Mg6b!7'%5&[=GLO=^p3rB+l1Qt^fXZMp7kc/-X/O[VBMX1N.Oe%/&K<_o"+:(iUcVruS76^Uqp@SSAYakN'O$dCY`Ji!QN/=?q`t%[82(+Q+#l7A/c:5AO4*139FbG[P=9R,%fJ7Aj)5XiL6$2>IM-aL76!'B*A,&oU3 %!m%0eO:Gh;X\]>O:0'?3F]_uAV9q4FqOB2m:Dk#RW$MKRKZocOE^S+X(WbltI>UoC]d\L-*18J_>XO6s"2+[q0LT+HRM"%2/6QAr %HPWR_gB./,";qN.YeZAHS=G1j5_L8:be/5KE`SJUP[;0;?m!5;)89H-0fOc7;rf@\VNTU_P^O:L4dL?Y^ID- %"Mn_]Rg;'*R)FXL`MR/[$J7>W!s&n#.)t.t;:Gt%/V$1o]L4(T3(H)6aMN5LdU8!C8Tu$kmNY3(0,,Q3aFA %TI`#>J@pj+)pl06rpcpl9?BA.,R@teo822+T7SQ5OHigMS&aC8IO89fU`2"R8>,3_g'$hfck"H[+:SpnZ+]UnHJTI&`d?*iL`CA94,g^6>P=*f\GkB=Ul!`#%&1A`XpG<>9#J5XC:0g3g2,@(@ %9c=Y*g6O-($Bfo\B%"2d+:0/iLo\jt+:57lKELPkSq%*-P#f'c"N$lmMmXp_'dO-l@I)k]%Ul'cH?b2HXk7IU$+T%OB+V*$u"T8[n9oCk.[BG+J^"!sX_'c4e9faX/MK#`ajF1\(Yi7mpL=4FojG%hVu$nH=@GcZ(9FDjCt$^(Mek$$V^^(AMo1 %Ee+af,T-;s3Kh2hQQmCglr%AhJ7fb:_'N6AFC9fh,Tc`$3V%_3QR!^OE]FcL_(J)C#bL/!B[*+]!TkT$gHkFqJ>iMq+)CG3JAR_o %dAWWL!Bh1m;tH7CTW@YcBC_8e<<.j_R\tfD9:kdL#W>M.6*E(i8jGI=SeFm+dPpDco9Ab.:W(51VZbKuJ.?MgPi)p/,2H1L\BPW3 %#d*`[(+X=oPTX$N0;tHTrAWr14P@V>X%i2Mn?N)q`D_?k62K1q-+p>-XR7ML^n+tSktR`^?D`t#+l-(KS^KW5ns''5*q3O:#I#0c %MXDo*d,Vqr-;Oqg44OS1HV-OXL,m@&:C3C;.d/]bM2hN690=LKLO#"oc(5OK-82Zfa]!et5tamqPi.2u=Yr-2^be_`,%YbY#f%hl %1jX:7#m(-0TuBl\>XrX(Fd1)[$7u2*"87M^%Qubu4ZV,)uSAPG\Ng/'4=';Z[sAVU?5UM#^#'bjm)V$":t=&u=qE_>Y]F %,Mc9R4"+[=XMTE[Se@W2<==PV"IiIl+N%%Xgfjf1mO/*O(-%7dEKg`Z;kh'r*$pEs*7D"3;1dS\TV1^$R2skI*$s7AZ6+!E8l/WG %11tWlJ1(D$6IDhq9#*QD"m/O>&cnC:i@fq\6su)enKpb_!;rE9)]ja/7%d@)&l5^r.%)+,RN)RBlG,SoZ_KoR%fin4h,,BCU;FoD %NY2ApD8[mZ;1cBB.#8X/M*RR`ZH5sqa?k^$j`uG!7_*jQPp[LlO652lZ$*);E:MANf %;*L^'J^V<-.H!kALZHnV>>"dU1;4u#<3Bh&BjCV/NR'Op+^e`CAI+&e!DjT-VpFhg..BELqF>Jal1Z"G\2T'g)t0QYGVU-/5_BpO %'CW^ag?[^:jeNgis4\X(g]~> %AI9_PrivateDataEnd \ No newline at end of file diff --git a/logo/gnocchi.eps b/logo/gnocchi.eps new file mode 100644 index 00000000..cf707f09 --- /dev/null +++ b/logo/gnocchi.eps @@ -0,0 +1,5775 @@ +%!PS-Adobe-3.1 EPSF-3.0 +%ADO_DSC_Encoding: MacOS Roman +%%Title: gnocchi.eps +%%Creator: Adobe Illustrator(R) 13.0 +%%For: Thierry Ung +%%CreationDate: 4/3/17 +%%BoundingBox: 0 0 1096 840 +%%HiResBoundingBox: 0 0 1096 840 +%%CropBox: 0 0 1096 840 +%%LanguageLevel: 2 +%%DocumentData: Clean7Bit +%ADOBeginClientInjection: DocumentHeader "AI11EPS" +%%AI8_CreatorVersion: 13.0.0 %AI9_PrintingDataBegin %AI3_Cropmarks: 36.0000 36.0000 1060.0000 804.0000 +%ADO_BuildNumber: Adobe Illustrator(R) 13.0.0 x409 R agm 4.4378 ct 5.1039 %ADO_ContainsXMP: MainFirst %AI7_Thumbnail: 128 100 8 %%BeginData: 5284 Hex Bytes %0000330000660000990000CC0033000033330033660033990033CC0033FF %0066000066330066660066990066CC0066FF009900009933009966009999 %0099CC0099FF00CC0000CC3300CC6600CC9900CCCC00CCFF00FF3300FF66 %00FF9900FFCC3300003300333300663300993300CC3300FF333300333333 %3333663333993333CC3333FF3366003366333366663366993366CC3366FF %3399003399333399663399993399CC3399FF33CC0033CC3333CC6633CC99 %33CCCC33CCFF33FF0033FF3333FF6633FF9933FFCC33FFFF660000660033 %6600666600996600CC6600FF6633006633336633666633996633CC6633FF %6666006666336666666666996666CC6666FF669900669933669966669999 %6699CC6699FF66CC0066CC3366CC6666CC9966CCCC66CCFF66FF0066FF33 %66FF6666FF9966FFCC66FFFF9900009900339900669900999900CC9900FF %9933009933339933669933999933CC9933FF996600996633996666996699 %9966CC9966FF9999009999339999669999999999CC9999FF99CC0099CC33 %99CC6699CC9999CCCC99CCFF99FF0099FF3399FF6699FF9999FFCC99FFFF %CC0000CC0033CC0066CC0099CC00CCCC00FFCC3300CC3333CC3366CC3399 %CC33CCCC33FFCC6600CC6633CC6666CC6699CC66CCCC66FFCC9900CC9933 %CC9966CC9999CC99CCCC99FFCCCC00CCCC33CCCC66CCCC99CCCCCCCCCCFF %CCFF00CCFF33CCFF66CCFF99CCFFCCCCFFFFFF0033FF0066FF0099FF00CC %FF3300FF3333FF3366FF3399FF33CCFF33FFFF6600FF6633FF6666FF6699 %FF66CCFF66FFFF9900FF9933FF9966FF9999FF99CCFF99FFFFCC00FFCC33 %FFCC66FFCC99FFCCCCFFCCFFFFFF33FFFF66FFFF99FFFFCC110000001100 %000011111111220000002200000022222222440000004400000044444444 %550000005500000055555555770000007700000077777777880000008800 %000088888888AA000000AA000000AAAAAAAABB000000BB000000BBBBBBBB %DD000000DD000000DDDDDDDDEE000000EE000000EEEEEEEE0000000000FF %00FF0000FFFFFF0000FF00FFFFFF00FFFFFF %524C45FDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFF %FDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFD9CFFA87D7D %7DFD08FFA87DA8FD6FFF2727F827F82727FD05FF7D27F827F827A8FD33FF %A85252F827F827277D7DFD11FF7D27F827F8527DFD17FF5227F827275227 %277DFFFFFF7D27F827F827F827A8FD30FFA852F827F827F827F827F827F8 %7DFD0EFF2727F827F827F827F8A8FD15FF52F852FFFFFF52F87DFFFFFF27 %F852A8FF7D27F87DFD2FFF7D27F827277DFD04A87D52F827F8A8FD0CFF52 %27F8FD049F752627F8A8FD14FF52277DFFFFFF27277DFFFFFFF8277DFFFF %FF522752FD2EFF7D27F8277DFD09FF272752FFFFFFA8FD07FF2727F89F9F %FD05C72627F8FD07FF7DFD09FFA8A8FF52F87DFFFFFF52F87DFFFFFF27F8 %A8FFFFFF52F852FD2EFF27F852FD0BFF27F8522727F827F827277DA8FF7D %27F89FC7C8C7C8C1C8C7C826277DFFFFFF5227F827F827277DA8FF5227F8 %27F827F8277DFFFFFF2727F827277D272727FFFFFFF82752FD2DFF7DF827 %A8FFA8FFFFFF7DA8A8FFFF7DF827F827F827F827F827F8277D27F89FC1C7 %C7C8C7C7C1C8C775F827A87DF827F827F827F827F827F827F827F827F827 %F87DFFFFFF52F827F827F827F827F852F827F8A8FD2DFF2727A8FD04FFA8 %F827F827F8522727F8527DFD05A87D27F827F827C1C7C7C8C7C8C7C8C7C8 %9F27F827F827277DFD04A87D27F827277D7DFFA8A87D277DFFFFFFFD04A8 %5227F827527D7D7D27277DFD2CFF7D27F8FFA8FFA8A8F827F827F827F827 %F827FD08FFA852F8279FC79FC7C1C79FC8C1C79FC74A27F82727A8FD06FF %272727FD07FF277DFD08FFA827F87DFFFFFF52F87DFD2CFF7DF852FD04FF %F827F827F827F827F827F8FD09FFA827F8C7C7C8C7C8C7C8C7C8C7C8C7C8 %F82727FD08FF2727FD08FF277DFD09FF7D277DFFFFFF52277DFD2CFF5227 %52FFA8FFA827F827F827F827F827F827FFFFFFA8277DA8FFFFFF52279FC7 %9FC8C1C7C1C8C1C7C1C8C727F8A8FFFFFFA85252527DF8A8FFFFFFA85252 %527DF87DFFFFFFA852A8FD04FFF87DFFFFFF52F87DFD2CFF7DF87DFD04FF %F827F827F8A8FFFFFFA8F8FFFFFFA827F852FFFFFF7DF89FC7C7C7C8C7C8 %C7C8C7C8C7C84B27FD04FF2727F827F852FD04FFF827F827F8277DFFFFFF %2727F8A8FFFFFF277DFFFFFF27277DFD2CFF522752FFA8FFA827F827F827 %7DFFA8FF7D27A8FFA8A8F82727FFA8FF7D279EC79FC79FC8C1C79FC7C1C7 %C75127FFFFFF5227F827F82727FFFFFF5227F827F827F87DFFFFFF52F827 %52FFFFFF277DFFFFFF52F87DFD2CFF7DF87DFD04FFF827F827F8A8FFFFFF %A8F8FFFFFFA827F827FFFFFFA8F8C8C7C7C7C8C7C8C7C8C7C8C7C8757DFF %FFFF52F827F827F87DFFFFFF52F827F827F8277DFFFFFF2727F87DFFFFFF %527DFFFFFF52277DFD2CFF7D27F8FFA8FFA87DF827F8277DFFA8FF7D27A8 %FFA8A8F827F8FFA8FF7D279FC7C1C7C7C8C1C7C1C8C7C7C77552FFA8FF7D %27F827F82752FFFFFF5227F827F827F87DA8FFFF52F8277DFFA8FF52A8A8 %FFA852F87DFD2DFFF827A8FFFFFFA852F827F8A8FFFFFFA8F8FFFFFFA827 %F827FFFFFFA8F8C7C1C7C7C8C1C8C7C8C7C8C7C8517DFD04FFF827F827F8 %7DFD04FFF827F827F827A8FFFFFF5227F8A8FFFFFF7DA8FFFFFF52277DFD %2DFF52F852A8FFA8FFA8A85252A8FFA8FF7D27A8FFA8A8F827F8FFA8FF7D %2726C79FFD07C79FC79F27F8FFA8FFA8A8522752A852FFA8FFA8A8525252 %A827A8A8FFA87DF8277DFFA8FF52A8A8FFA87DF87DFD2DFFA827F8A8FD0B %FFA8F8FFFFFFA827F827FFFFFFA8F827FD05C7C8C7C8C7C8C79FF8277DFD %08FF7D7DFD08FF7DA8FFFFFF5227F8A8FFFFFF7DA8FFFFFF52277DFD2EFF %5227F87DA8FFA8FFA8FFA8FFA8FF7D27A8FFA8A8F827F8FFA8FF7D27F89F %9FC7C1C7C1C8C7C7C1C82627F827A8FFA8FFA8FFA8FF7D27A8FFA8FFA8FF %A8FF7DA8A8FFA87DF8277DFFA8FF52A8A8FFA87DF87DFD2FFF2727F827A8 %FD07FFA852F8FFA8FF7D27F827A8FFA87DF82726C7C1C7C7C8C1C8C7C875 %27F827F82752FD05FFA87DF8277DFD06FF7D7DFFA8FF5227F87DA8FFA852 %7DFFA8FF52277DFD30FF5227F827F827F827F827F827F827F827F827F827 %F827F827F827F82774C79FC79FC79FC77427F852A827F827F827F827F827 %F827F827F827F827F827F827F827F827F827F827F827F827F827F827F87D %FD31FFA87D2727F827F827F82727522727F827F87D7D27F827F827527DF8 %27269F9FC79F9F2627F852FFFFFFA82727F827F827F8527DA82727F827F8 %27F827F827F82727A87D27F827F827F827F827F87DFD35FFA8A87DFD04A8 %FD10FF7DF827F827F827F827F87DFD06FFA8A87DA8A8A8FD04FFA8A87DA8 %7DFD62FF7D52F82752527DFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFD %FCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFDFCFFFD %FCFFFDFCFFFDFCFFFDFCFFFDFCFFFD15FFFF %%EndData +%ADOEndClientInjection: DocumentHeader "AI11EPS" +%%Pages: 1 +%%DocumentNeededResources: +%%DocumentSuppliedResources: procset Adobe_AGM_Image 1.0 0 +%%+ procset Adobe_CoolType_Utility_T42 1.0 0 +%%+ procset Adobe_CoolType_Utility_MAKEOCF 1.23 0 +%%+ procset Adobe_CoolType_Core 2.31 0 +%%+ procset Adobe_AGM_Core 2.0 0 +%%+ procset Adobe_AGM_Utils 1.0 0 +%%DocumentFonts: +%%DocumentNeededFonts: +%%DocumentNeededFeatures: +%%DocumentSuppliedFeatures: +%%DocumentProcessColors: Cyan Magenta Yellow Black +%%DocumentCustomColors: +%%CMYKCustomColor: +%%RGBCustomColor: +%%EndComments + + + + + + +%%BeginDefaults +%%ViewingOrientation: 1 0 0 1 +%%EndDefaults +%%BeginProlog +%%BeginResource: procset Adobe_AGM_Utils 1.0 0 +%%Version: 1.0 0 +%%Copyright: Copyright(C)2000-2006 Adobe Systems, Inc. All Rights Reserved. +systemdict/setpacking known +{currentpacking true setpacking}if +userdict/Adobe_AGM_Utils 73 dict dup begin put +/bdf +{bind def}bind def +/nd{null def}bdf +/xdf +{exch def}bdf +/ldf +{load def}bdf +/ddf +{put}bdf +/xddf +{3 -1 roll put}bdf +/xpt +{exch put}bdf +/ndf +{ + exch dup where{ + pop pop pop + }{ + xdf + }ifelse +}def +/cdndf +{ + exch dup currentdict exch known{ + pop pop + }{ + exch def + }ifelse +}def +/gx +{get exec}bdf +/ps_level + /languagelevel where{ + pop systemdict/languagelevel gx + }{ + 1 + }ifelse +def +/level2 + ps_level 2 ge +def +/level3 + ps_level 3 ge +def +/ps_version + {version cvr}stopped{-1}if +def +/set_gvm +{currentglobal exch setglobal}bdf +/reset_gvm +{setglobal}bdf +/makereadonlyarray +{ + /packedarray where{pop packedarray + }{ + array astore readonly}ifelse +}bdf +/map_reserved_ink_name +{ + dup type/stringtype eq{ + dup/Red eq{ + pop(_Red_) + }{ + dup/Green eq{ + pop(_Green_) + }{ + dup/Blue eq{ + pop(_Blue_) + }{ + dup()cvn eq{ + pop(Process) + }if + }ifelse + }ifelse + }ifelse + }if +}bdf +/AGMUTIL_GSTATE 22 dict def +/get_gstate +{ + AGMUTIL_GSTATE begin + /AGMUTIL_GSTATE_clr_spc currentcolorspace def + /AGMUTIL_GSTATE_clr_indx 0 def + /AGMUTIL_GSTATE_clr_comps 12 array def + mark currentcolor counttomark + {AGMUTIL_GSTATE_clr_comps AGMUTIL_GSTATE_clr_indx 3 -1 roll put + /AGMUTIL_GSTATE_clr_indx AGMUTIL_GSTATE_clr_indx 1 add def}repeat pop + /AGMUTIL_GSTATE_fnt rootfont def + /AGMUTIL_GSTATE_lw currentlinewidth def + /AGMUTIL_GSTATE_lc currentlinecap def + /AGMUTIL_GSTATE_lj currentlinejoin def + /AGMUTIL_GSTATE_ml currentmiterlimit def + currentdash/AGMUTIL_GSTATE_do xdf/AGMUTIL_GSTATE_da xdf + /AGMUTIL_GSTATE_sa currentstrokeadjust def + /AGMUTIL_GSTATE_clr_rnd currentcolorrendering def + /AGMUTIL_GSTATE_op currentoverprint def + /AGMUTIL_GSTATE_bg currentblackgeneration cvlit def + /AGMUTIL_GSTATE_ucr currentundercolorremoval cvlit def + currentcolortransfer cvlit/AGMUTIL_GSTATE_gy_xfer xdf cvlit/AGMUTIL_GSTATE_b_xfer xdf + cvlit/AGMUTIL_GSTATE_g_xfer xdf cvlit/AGMUTIL_GSTATE_r_xfer xdf + /AGMUTIL_GSTATE_ht currenthalftone def + /AGMUTIL_GSTATE_flt currentflat def + end +}def +/set_gstate +{ + AGMUTIL_GSTATE begin + AGMUTIL_GSTATE_clr_spc setcolorspace + AGMUTIL_GSTATE_clr_indx{AGMUTIL_GSTATE_clr_comps AGMUTIL_GSTATE_clr_indx 1 sub get + /AGMUTIL_GSTATE_clr_indx AGMUTIL_GSTATE_clr_indx 1 sub def}repeat setcolor + AGMUTIL_GSTATE_fnt setfont + AGMUTIL_GSTATE_lw setlinewidth + AGMUTIL_GSTATE_lc setlinecap + AGMUTIL_GSTATE_lj setlinejoin + AGMUTIL_GSTATE_ml setmiterlimit + AGMUTIL_GSTATE_da AGMUTIL_GSTATE_do setdash + AGMUTIL_GSTATE_sa setstrokeadjust + AGMUTIL_GSTATE_clr_rnd setcolorrendering + AGMUTIL_GSTATE_op setoverprint + AGMUTIL_GSTATE_bg cvx setblackgeneration + AGMUTIL_GSTATE_ucr cvx setundercolorremoval + AGMUTIL_GSTATE_r_xfer cvx AGMUTIL_GSTATE_g_xfer cvx AGMUTIL_GSTATE_b_xfer cvx + AGMUTIL_GSTATE_gy_xfer cvx setcolortransfer + AGMUTIL_GSTATE_ht/HalftoneType get dup 9 eq exch 100 eq or + { + currenthalftone/HalftoneType get AGMUTIL_GSTATE_ht/HalftoneType get ne + { + mark AGMUTIL_GSTATE_ht{sethalftone}stopped cleartomark + }if + }{ + AGMUTIL_GSTATE_ht sethalftone + }ifelse + AGMUTIL_GSTATE_flt setflat + end +}def +/get_gstate_and_matrix +{ + AGMUTIL_GSTATE begin + /AGMUTIL_GSTATE_ctm matrix currentmatrix def + end + get_gstate +}def +/set_gstate_and_matrix +{ + set_gstate + AGMUTIL_GSTATE begin + AGMUTIL_GSTATE_ctm setmatrix + end +}def +/AGMUTIL_str256 256 string def +/AGMUTIL_src256 256 string def +/AGMUTIL_dst64 64 string def +/AGMUTIL_srcLen nd +/AGMUTIL_ndx nd +/AGMUTIL_cpd nd +/capture_cpd{ + //Adobe_AGM_Utils/AGMUTIL_cpd currentpagedevice ddf +}def +/thold_halftone +{ + level3 + {sethalftone currenthalftone} + { + dup/HalftoneType get 3 eq + { + sethalftone currenthalftone + }{ + begin + Width Height mul{ + Thresholds read{pop}if + }repeat + end + currenthalftone + }ifelse + }ifelse +}def +/rdcmntline +{ + currentfile AGMUTIL_str256 readline pop + (%)anchorsearch{pop}if +}bdf +/filter_cmyk +{ + dup type/filetype ne{ + exch()/SubFileDecode filter + }{ + exch pop + } + ifelse + [ + exch + { + AGMUTIL_src256 readstring pop + dup length/AGMUTIL_srcLen exch def + /AGMUTIL_ndx 0 def + AGMCORE_plate_ndx 4 AGMUTIL_srcLen 1 sub{ + 1 index exch get + AGMUTIL_dst64 AGMUTIL_ndx 3 -1 roll put + /AGMUTIL_ndx AGMUTIL_ndx 1 add def + }for + pop + AGMUTIL_dst64 0 AGMUTIL_ndx getinterval + } + bind + /exec cvx + ]cvx +}bdf +/filter_indexed_devn +{ + cvi Names length mul names_index add Lookup exch get +}bdf +/filter_devn +{ + 4 dict begin + /srcStr xdf + /dstStr xdf + dup type/filetype ne{ + 0()/SubFileDecode filter + }if + [ + exch + [ + /devicen_colorspace_dict/AGMCORE_gget cvx/begin cvx + currentdict/srcStr get/readstring cvx/pop cvx + /dup cvx/length cvx 0/gt cvx[ + Adobe_AGM_Utils/AGMUTIL_ndx 0/ddf cvx + names_index Names length currentdict/srcStr get length 1 sub{ + 1/index cvx/exch cvx/get cvx + currentdict/dstStr get/AGMUTIL_ndx/load cvx 3 -1/roll cvx/put cvx + Adobe_AGM_Utils/AGMUTIL_ndx/AGMUTIL_ndx/load cvx 1/add cvx/ddf cvx + }for + currentdict/dstStr get 0/AGMUTIL_ndx/load cvx/getinterval cvx + ]cvx/if cvx + /end cvx + ]cvx + bind + /exec cvx + ]cvx + end +}bdf +/AGMUTIL_imagefile nd +/read_image_file +{ + AGMUTIL_imagefile 0 setfileposition + 10 dict begin + /imageDict xdf + /imbufLen Width BitsPerComponent mul 7 add 8 idiv def + /imbufIdx 0 def + /origDataSource imageDict/DataSource get def + /origMultipleDataSources imageDict/MultipleDataSources get def + /origDecode imageDict/Decode get def + /dstDataStr imageDict/Width get colorSpaceElemCnt mul string def + imageDict/MultipleDataSources known{MultipleDataSources}{false}ifelse + { + /imbufCnt imageDict/DataSource get length def + /imbufs imbufCnt array def + 0 1 imbufCnt 1 sub{ + /imbufIdx xdf + imbufs imbufIdx imbufLen string put + imageDict/DataSource get imbufIdx[AGMUTIL_imagefile imbufs imbufIdx get/readstring cvx/pop cvx]cvx put + }for + DeviceN_PS2{ + imageDict begin + /DataSource[DataSource/devn_sep_datasource cvx]cvx def + /MultipleDataSources false def + /Decode[0 1]def + end + }if + }{ + /imbuf imbufLen string def + Indexed_DeviceN level3 not and DeviceN_NoneName or{ + /srcDataStrs[imageDict begin + currentdict/MultipleDataSources known{MultipleDataSources{DataSource length}{1}ifelse}{1}ifelse + { + Width Decode length 2 div mul cvi string + }repeat + end]def + imageDict begin + /DataSource[AGMUTIL_imagefile Decode BitsPerComponent false 1/filter_indexed_devn load dstDataStr srcDataStrs devn_alt_datasource/exec cvx]cvx def + /Decode[0 1]def + end + }{ + imageDict/DataSource[1 string dup 0 AGMUTIL_imagefile Decode length 2 idiv string/readstring cvx/pop cvx names_index/get cvx/put cvx]cvx put + imageDict/Decode[0 1]put + }ifelse + }ifelse + imageDict exch + load exec + imageDict/DataSource origDataSource put + imageDict/MultipleDataSources origMultipleDataSources put + imageDict/Decode origDecode put + end +}bdf +/write_image_file +{ + begin + {(AGMUTIL_imagefile)(w+)file}stopped{ + false + }{ + Adobe_AGM_Utils/AGMUTIL_imagefile xddf + 2 dict begin + /imbufLen Width BitsPerComponent mul 7 add 8 idiv def + MultipleDataSources{DataSource 0 get}{DataSource}ifelse type/filetype eq{ + /imbuf imbufLen string def + }if + 1 1 Height MultipleDataSources not{Decode length 2 idiv mul}if{ + pop + MultipleDataSources{ + 0 1 DataSource length 1 sub{ + DataSource type dup + /arraytype eq{ + pop DataSource exch gx + }{ + /filetype eq{ + DataSource exch get imbuf readstring pop + }{ + DataSource exch get + }ifelse + }ifelse + AGMUTIL_imagefile exch writestring + }for + }{ + DataSource type dup + /arraytype eq{ + pop DataSource exec + }{ + /filetype eq{ + DataSource imbuf readstring pop + }{ + DataSource + }ifelse + }ifelse + AGMUTIL_imagefile exch writestring + }ifelse + }for + end + true + }ifelse + end +}bdf +/close_image_file +{ + AGMUTIL_imagefile closefile(AGMUTIL_imagefile)deletefile +}def +statusdict/product known userdict/AGMP_current_show known not and{ + /pstr statusdict/product get def + pstr(HP LaserJet 2200)eq + pstr(HP LaserJet 4000 Series)eq or + pstr(HP LaserJet 4050 Series )eq or + pstr(HP LaserJet 8000 Series)eq or + pstr(HP LaserJet 8100 Series)eq or + pstr(HP LaserJet 8150 Series)eq or + pstr(HP LaserJet 5000 Series)eq or + pstr(HP LaserJet 5100 Series)eq or + pstr(HP Color LaserJet 4500)eq or + pstr(HP Color LaserJet 4600)eq or + pstr(HP LaserJet 5Si)eq or + pstr(HP LaserJet 1200 Series)eq or + pstr(HP LaserJet 1300 Series)eq or + pstr(HP LaserJet 4100 Series)eq or + { + userdict/AGMP_current_show/show load put + userdict/show{ + currentcolorspace 0 get + /Pattern eq + {false charpath f} + {AGMP_current_show}ifelse + }put + }if + currentdict/pstr undef +}if +/consumeimagedata +{ + begin + AGMIMG_init_common + currentdict/MultipleDataSources known not + {/MultipleDataSources false def}if + MultipleDataSources + { + DataSource 0 get type + dup/filetype eq + { + 1 dict begin + /flushbuffer Width cvi string def + 1 1 Height cvi + { + pop + 0 1 DataSource length 1 sub + { + DataSource exch get + flushbuffer readstring pop pop + }for + }for + end + }if + dup/arraytype eq exch/packedarraytype eq or DataSource 0 get xcheck and + { + Width Height mul cvi + { + 0 1 DataSource length 1 sub + {dup DataSource exch gx length exch 0 ne{pop}if}for + dup 0 eq + {pop exit}if + sub dup 0 le + {exit}if + }loop + pop + }if + } + { + /DataSource load type + dup/filetype eq + { + 1 dict begin + /flushbuffer Width Decode length 2 idiv mul cvi string def + 1 1 Height{pop DataSource flushbuffer readstring pop pop}for + end + }if + dup/arraytype eq exch/packedarraytype eq or/DataSource load xcheck and + { + Height Width BitsPerComponent mul 8 BitsPerComponent sub add 8 idiv Decode length 2 idiv mul mul + { + DataSource length dup 0 eq + {pop exit}if + sub dup 0 le + {exit}if + }loop + pop + }if + }ifelse + end +}bdf +/addprocs +{ + 2{/exec load}repeat + 3 1 roll + [5 1 roll]bind cvx +}def +/modify_halftone_xfer +{ + currenthalftone dup length dict copy begin + currentdict 2 index known{ + 1 index load dup length dict copy begin + currentdict/TransferFunction known{ + /TransferFunction load + }{ + currenttransfer + }ifelse + addprocs/TransferFunction xdf + currentdict end def + currentdict end sethalftone + }{ + currentdict/TransferFunction known{ + /TransferFunction load + }{ + currenttransfer + }ifelse + addprocs/TransferFunction xdf + currentdict end sethalftone + pop + }ifelse +}def +/clonearray +{ + dup xcheck exch + dup length array exch + Adobe_AGM_Core/AGMCORE_tmp -1 ddf + { + Adobe_AGM_Core/AGMCORE_tmp 2 copy get 1 add ddf + dup type/dicttype eq + { + Adobe_AGM_Core/AGMCORE_tmp get + exch + clonedict + Adobe_AGM_Core/AGMCORE_tmp 4 -1 roll ddf + }if + dup type/arraytype eq + { + Adobe_AGM_Core/AGMCORE_tmp get exch + clonearray + Adobe_AGM_Core/AGMCORE_tmp 4 -1 roll ddf + }if + exch dup + Adobe_AGM_Core/AGMCORE_tmp get 4 -1 roll put + }forall + exch{cvx}if +}bdf +/clonedict +{ + dup length dict + begin + { + dup type/dicttype eq + {clonedict}if + dup type/arraytype eq + {clonearray}if + def + }forall + currentdict + end +}bdf +/DeviceN_PS2 +{ + /currentcolorspace AGMCORE_gget 0 get/DeviceN eq level3 not and +}bdf +/Indexed_DeviceN +{ + /indexed_colorspace_dict AGMCORE_gget dup null ne{ + dup/CSDBase known{ + /CSDBase get/CSD get_res/Names known + }{ + pop false + }ifelse + }{ + pop false + }ifelse +}bdf +/DeviceN_NoneName +{ + /Names where{ + pop + false Names + { + (None)eq or + }forall + }{ + false + }ifelse +}bdf +/DeviceN_PS2_inRip_seps +{ + /AGMCORE_in_rip_sep where + { + pop dup type dup/arraytype eq exch/packedarraytype eq or + { + dup 0 get/DeviceN eq level3 not and AGMCORE_in_rip_sep and + { + /currentcolorspace exch AGMCORE_gput + false + }{ + true + }ifelse + }{ + true + }ifelse + }{ + true + }ifelse +}bdf +/base_colorspace_type +{ + dup type/arraytype eq{0 get}if +}bdf +/currentdistillerparams where{pop currentdistillerparams/CoreDistVersion get 5000 lt}{true}ifelse +{ + /pdfmark_5{cleartomark}bind def +}{ + /pdfmark_5{pdfmark}bind def +}ifelse +/ReadBypdfmark_5 +{ + currentfile exch 0 exch/SubFileDecode filter + /currentdistillerparams where + {pop currentdistillerparams/CoreDistVersion get 5000 lt}{true}ifelse + {flushfile cleartomark} + {/PUT pdfmark}ifelse +}bdf +/xpdfm +{ + { + dup 0 get/Label eq + { + aload length[exch 1 add 1 roll/PAGELABEL + }{ + aload pop + [{ThisPage}<<5 -2 roll>>/PUT + }ifelse + pdfmark_5 + }forall +}bdf +/ds{ + Adobe_AGM_Utils begin +}bdf +/dt{ + currentdict Adobe_AGM_Utils eq{ + end + }if +}bdf +systemdict/setpacking known +{setpacking}if +%%EndResource +%%BeginResource: procset Adobe_AGM_Core 2.0 0 +%%Version: 2.0 0 +%%Copyright: Copyright(C)1997-2007 Adobe Systems, Inc. All Rights Reserved. +systemdict/setpacking known +{ + currentpacking + true setpacking +}if +userdict/Adobe_AGM_Core 209 dict dup begin put +/Adobe_AGM_Core_Id/Adobe_AGM_Core_2.0_0 def +/AGMCORE_str256 256 string def +/AGMCORE_save nd +/AGMCORE_graphicsave nd +/AGMCORE_c 0 def +/AGMCORE_m 0 def +/AGMCORE_y 0 def +/AGMCORE_k 0 def +/AGMCORE_cmykbuf 4 array def +/AGMCORE_screen[currentscreen]cvx def +/AGMCORE_tmp 0 def +/AGMCORE_&setgray nd +/AGMCORE_&setcolor nd +/AGMCORE_&setcolorspace nd +/AGMCORE_&setcmykcolor nd +/AGMCORE_cyan_plate nd +/AGMCORE_magenta_plate nd +/AGMCORE_yellow_plate nd +/AGMCORE_black_plate nd +/AGMCORE_plate_ndx nd +/AGMCORE_get_ink_data nd +/AGMCORE_is_cmyk_sep nd +/AGMCORE_host_sep nd +/AGMCORE_avoid_L2_sep_space nd +/AGMCORE_distilling nd +/AGMCORE_composite_job nd +/AGMCORE_producing_seps nd +/AGMCORE_ps_level -1 def +/AGMCORE_ps_version -1 def +/AGMCORE_environ_ok nd +/AGMCORE_CSD_cache 0 dict def +/AGMCORE_currentoverprint false def +/AGMCORE_deltaX nd +/AGMCORE_deltaY nd +/AGMCORE_name nd +/AGMCORE_sep_special nd +/AGMCORE_err_strings 4 dict def +/AGMCORE_cur_err nd +/AGMCORE_current_spot_alias false def +/AGMCORE_inverting false def +/AGMCORE_feature_dictCount nd +/AGMCORE_feature_opCount nd +/AGMCORE_feature_ctm nd +/AGMCORE_ConvertToProcess false def +/AGMCORE_Default_CTM matrix def +/AGMCORE_Default_PageSize nd +/AGMCORE_Default_flatness nd +/AGMCORE_currentbg nd +/AGMCORE_currentucr nd +/AGMCORE_pattern_paint_type 0 def +/knockout_unitsq nd +currentglobal true setglobal +[/CSA/Gradient/Procedure] +{ + /Generic/Category findresource dup length dict copy/Category defineresource pop +}forall +setglobal +/AGMCORE_key_known +{ + where{ + /Adobe_AGM_Core_Id known + }{ + false + }ifelse +}ndf +/flushinput +{ + save + 2 dict begin + /CompareBuffer 3 -1 roll def + /readbuffer 256 string def + mark + { + currentfile readbuffer{readline}stopped + {cleartomark mark} + { + not + {pop exit} + if + CompareBuffer eq + {exit} + if + }ifelse + }loop + cleartomark + end + restore +}bdf +/getspotfunction +{ + AGMCORE_screen exch pop exch pop + dup type/dicttype eq{ + dup/HalftoneType get 1 eq{ + /SpotFunction get + }{ + dup/HalftoneType get 2 eq{ + /GraySpotFunction get + }{ + pop + { + abs exch abs 2 copy add 1 gt{ + 1 sub dup mul exch 1 sub dup mul add 1 sub + }{ + dup mul exch dup mul add 1 exch sub + }ifelse + }bind + }ifelse + }ifelse + }if +}def +/np +{newpath}bdf +/clp_npth +{clip np}def +/eoclp_npth +{eoclip np}def +/npth_clp +{np clip}def +/graphic_setup +{ + /AGMCORE_graphicsave save store + concat + 0 setgray + 0 setlinecap + 0 setlinejoin + 1 setlinewidth + []0 setdash + 10 setmiterlimit + np + false setoverprint + false setstrokeadjust + //Adobe_AGM_Core/spot_alias gx + /Adobe_AGM_Image where{ + pop + Adobe_AGM_Image/spot_alias 2 copy known{ + gx + }{ + pop pop + }ifelse + }if + /sep_colorspace_dict null AGMCORE_gput + 100 dict begin + /dictstackcount countdictstack def + /showpage{}def + mark +}def +/graphic_cleanup +{ + cleartomark + dictstackcount 1 countdictstack 1 sub{end}for + end + AGMCORE_graphicsave restore +}def +/compose_error_msg +{ + grestoreall initgraphics + /Helvetica findfont 10 scalefont setfont + /AGMCORE_deltaY 100 def + /AGMCORE_deltaX 310 def + clippath pathbbox np pop pop 36 add exch 36 add exch moveto + 0 AGMCORE_deltaY rlineto AGMCORE_deltaX 0 rlineto + 0 AGMCORE_deltaY neg rlineto AGMCORE_deltaX neg 0 rlineto closepath + 0 AGMCORE_&setgray + gsave 1 AGMCORE_&setgray fill grestore + 1 setlinewidth gsave stroke grestore + currentpoint AGMCORE_deltaY 15 sub add exch 8 add exch moveto + /AGMCORE_deltaY 12 def + /AGMCORE_tmp 0 def + AGMCORE_err_strings exch get + { + dup 32 eq + { + pop + AGMCORE_str256 0 AGMCORE_tmp getinterval + stringwidth pop currentpoint pop add AGMCORE_deltaX 28 add gt + { + currentpoint AGMCORE_deltaY sub exch pop + clippath pathbbox pop pop pop 44 add exch moveto + }if + AGMCORE_str256 0 AGMCORE_tmp getinterval show( )show + 0 1 AGMCORE_str256 length 1 sub + { + AGMCORE_str256 exch 0 put + }for + /AGMCORE_tmp 0 def + }{ + AGMCORE_str256 exch AGMCORE_tmp xpt + /AGMCORE_tmp AGMCORE_tmp 1 add def + }ifelse + }forall +}bdf +/AGMCORE_CMYKDeviceNColorspaces[ + [/Separation/None/DeviceCMYK{0 0 0}] + [/Separation(Black)/DeviceCMYK{0 0 0 4 -1 roll}bind] + [/Separation(Yellow)/DeviceCMYK{0 0 3 -1 roll 0}bind] + [/DeviceN[(Yellow)(Black)]/DeviceCMYK{0 0 4 2 roll}bind] + [/Separation(Magenta)/DeviceCMYK{0 exch 0 0}bind] + [/DeviceN[(Magenta)(Black)]/DeviceCMYK{0 3 1 roll 0 exch}bind] + [/DeviceN[(Magenta)(Yellow)]/DeviceCMYK{0 3 1 roll 0}bind] + [/DeviceN[(Magenta)(Yellow)(Black)]/DeviceCMYK{0 4 1 roll}bind] + [/Separation(Cyan)/DeviceCMYK{0 0 0}] + [/DeviceN[(Cyan)(Black)]/DeviceCMYK{0 0 3 -1 roll}bind] + [/DeviceN[(Cyan)(Yellow)]/DeviceCMYK{0 exch 0}bind] + [/DeviceN[(Cyan)(Yellow)(Black)]/DeviceCMYK{0 3 1 roll}bind] + [/DeviceN[(Cyan)(Magenta)]/DeviceCMYK{0 0}] + [/DeviceN[(Cyan)(Magenta)(Black)]/DeviceCMYK{0 exch}bind] + [/DeviceN[(Cyan)(Magenta)(Yellow)]/DeviceCMYK{0}] + [/DeviceCMYK] +]def +/ds{ + Adobe_AGM_Core begin + /currentdistillerparams where + { + pop currentdistillerparams/CoreDistVersion get 5000 lt + {<>setdistillerparams}if + }if + /AGMCORE_ps_version xdf + /AGMCORE_ps_level xdf + errordict/AGM_handleerror known not{ + errordict/AGM_handleerror errordict/handleerror get put + errordict/handleerror{ + Adobe_AGM_Core begin + $error/newerror get AGMCORE_cur_err null ne and{ + $error/newerror false put + AGMCORE_cur_err compose_error_msg + }if + $error/newerror true put + end + errordict/AGM_handleerror get exec + }bind put + }if + /AGMCORE_environ_ok + ps_level AGMCORE_ps_level ge + ps_version AGMCORE_ps_version ge and + AGMCORE_ps_level -1 eq or + def + AGMCORE_environ_ok not + {/AGMCORE_cur_err/AGMCORE_bad_environ def}if + /AGMCORE_&setgray systemdict/setgray get def + level2{ + /AGMCORE_&setcolor systemdict/setcolor get def + /AGMCORE_&setcolorspace systemdict/setcolorspace get def + }if + /AGMCORE_currentbg currentblackgeneration def + /AGMCORE_currentucr currentundercolorremoval def + /AGMCORE_Default_flatness currentflat def + /AGMCORE_distilling + /product where{ + pop systemdict/setdistillerparams known product(Adobe PostScript Parser)ne and + }{ + false + }ifelse + def + /AGMCORE_GSTATE AGMCORE_key_known not{ + /AGMCORE_GSTATE 21 dict def + /AGMCORE_tmpmatrix matrix def + /AGMCORE_gstack 32 array def + /AGMCORE_gstackptr 0 def + /AGMCORE_gstacksaveptr 0 def + /AGMCORE_gstackframekeys 14 def + /AGMCORE_&gsave/gsave ldf + /AGMCORE_&grestore/grestore ldf + /AGMCORE_&grestoreall/grestoreall ldf + /AGMCORE_&save/save ldf + /AGMCORE_&setoverprint/setoverprint ldf + /AGMCORE_gdictcopy{ + begin + {def}forall + end + }def + /AGMCORE_gput{ + AGMCORE_gstack AGMCORE_gstackptr get + 3 1 roll + put + }def + /AGMCORE_gget{ + AGMCORE_gstack AGMCORE_gstackptr get + exch + get + }def + /gsave{ + AGMCORE_&gsave + AGMCORE_gstack AGMCORE_gstackptr get + AGMCORE_gstackptr 1 add + dup 32 ge{limitcheck}if + /AGMCORE_gstackptr exch store + AGMCORE_gstack AGMCORE_gstackptr get + AGMCORE_gdictcopy + }def + /grestore{ + AGMCORE_&grestore + AGMCORE_gstackptr 1 sub + dup AGMCORE_gstacksaveptr lt{1 add}if + dup AGMCORE_gstack exch get dup/AGMCORE_currentoverprint known + {/AGMCORE_currentoverprint get setoverprint}{pop}ifelse + /AGMCORE_gstackptr exch store + }def + /grestoreall{ + AGMCORE_&grestoreall + /AGMCORE_gstackptr AGMCORE_gstacksaveptr store + }def + /save{ + AGMCORE_&save + AGMCORE_gstack AGMCORE_gstackptr get + AGMCORE_gstackptr 1 add + dup 32 ge{limitcheck}if + /AGMCORE_gstackptr exch store + /AGMCORE_gstacksaveptr AGMCORE_gstackptr store + AGMCORE_gstack AGMCORE_gstackptr get + AGMCORE_gdictcopy + }def + /setoverprint{ + dup/AGMCORE_currentoverprint exch AGMCORE_gput AGMCORE_&setoverprint + }def + 0 1 AGMCORE_gstack length 1 sub{ + AGMCORE_gstack exch AGMCORE_gstackframekeys dict put + }for + }if + level3/AGMCORE_&sysshfill AGMCORE_key_known not and + { + /AGMCORE_&sysshfill systemdict/shfill get def + /AGMCORE_&sysmakepattern systemdict/makepattern get def + /AGMCORE_&usrmakepattern/makepattern load def + }if + /currentcmykcolor[0 0 0 0]AGMCORE_gput + /currentstrokeadjust false AGMCORE_gput + /currentcolorspace[/DeviceGray]AGMCORE_gput + /sep_tint 0 AGMCORE_gput + /devicen_tints[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]AGMCORE_gput + /sep_colorspace_dict null AGMCORE_gput + /devicen_colorspace_dict null AGMCORE_gput + /indexed_colorspace_dict null AGMCORE_gput + /currentcolor_intent()AGMCORE_gput + /customcolor_tint 1 AGMCORE_gput + /absolute_colorimetric_crd null AGMCORE_gput + /relative_colorimetric_crd null AGMCORE_gput + /saturation_crd null AGMCORE_gput + /perceptual_crd null AGMCORE_gput + currentcolortransfer cvlit/AGMCore_gray_xfer xdf cvlit/AGMCore_b_xfer xdf + cvlit/AGMCore_g_xfer xdf cvlit/AGMCore_r_xfer xdf + << + /MaxPatternItem currentsystemparams/MaxPatternCache get + >> + setuserparams + end +}def +/ps +{ + /setcmykcolor where{ + pop + Adobe_AGM_Core/AGMCORE_&setcmykcolor/setcmykcolor load put + }if + Adobe_AGM_Core begin + /setcmykcolor + { + 4 copy AGMCORE_cmykbuf astore/currentcmykcolor exch AGMCORE_gput + 1 sub 4 1 roll + 3{ + 3 index add neg dup 0 lt{ + pop 0 + }if + 3 1 roll + }repeat + setrgbcolor pop + }ndf + /currentcmykcolor + { + /currentcmykcolor AGMCORE_gget aload pop + }ndf + /setoverprint + {pop}ndf + /currentoverprint + {false}ndf + /AGMCORE_cyan_plate 1 0 0 0 test_cmyk_color_plate def + /AGMCORE_magenta_plate 0 1 0 0 test_cmyk_color_plate def + /AGMCORE_yellow_plate 0 0 1 0 test_cmyk_color_plate def + /AGMCORE_black_plate 0 0 0 1 test_cmyk_color_plate def + /AGMCORE_plate_ndx + AGMCORE_cyan_plate{ + 0 + }{ + AGMCORE_magenta_plate{ + 1 + }{ + AGMCORE_yellow_plate{ + 2 + }{ + AGMCORE_black_plate{ + 3 + }{ + 4 + }ifelse + }ifelse + }ifelse + }ifelse + def + /AGMCORE_have_reported_unsupported_color_space false def + /AGMCORE_report_unsupported_color_space + { + AGMCORE_have_reported_unsupported_color_space false eq + { + (Warning: Job contains content that cannot be separated with on-host methods. This content appears on the black plate, and knocks out all other plates.)== + Adobe_AGM_Core/AGMCORE_have_reported_unsupported_color_space true ddf + }if + }def + /AGMCORE_composite_job + AGMCORE_cyan_plate AGMCORE_magenta_plate and AGMCORE_yellow_plate and AGMCORE_black_plate and def + /AGMCORE_in_rip_sep + /AGMCORE_in_rip_sep where{ + pop AGMCORE_in_rip_sep + }{ + AGMCORE_distilling + { + false + }{ + userdict/Adobe_AGM_OnHost_Seps known{ + false + }{ + level2{ + currentpagedevice/Separations 2 copy known{ + get + }{ + pop pop false + }ifelse + }{ + false + }ifelse + }ifelse + }ifelse + }ifelse + def + /AGMCORE_producing_seps AGMCORE_composite_job not AGMCORE_in_rip_sep or def + /AGMCORE_host_sep AGMCORE_producing_seps AGMCORE_in_rip_sep not and def + /AGM_preserve_spots + /AGM_preserve_spots where{ + pop AGM_preserve_spots + }{ + AGMCORE_distilling AGMCORE_producing_seps or + }ifelse + def + /AGM_is_distiller_preserving_spotimages + { + currentdistillerparams/PreserveOverprintSettings known + { + currentdistillerparams/PreserveOverprintSettings get + { + currentdistillerparams/ColorConversionStrategy known + { + currentdistillerparams/ColorConversionStrategy get + /sRGB ne + }{ + true + }ifelse + }{ + false + }ifelse + }{ + false + }ifelse + }def + /convert_spot_to_process where{pop}{ + /convert_spot_to_process + { + //Adobe_AGM_Core begin + dup map_alias{ + /Name get exch pop + }if + dup dup(None)eq exch(All)eq or + { + pop false + }{ + AGMCORE_host_sep + { + gsave + 1 0 0 0 setcmykcolor currentgray 1 exch sub + 0 1 0 0 setcmykcolor currentgray 1 exch sub + 0 0 1 0 setcmykcolor currentgray 1 exch sub + 0 0 0 1 setcmykcolor currentgray 1 exch sub + add add add 0 eq + { + pop false + }{ + false setoverprint + current_spot_alias false set_spot_alias + 1 1 1 1 6 -1 roll findcmykcustomcolor 1 setcustomcolor + set_spot_alias + currentgray 1 ne + }ifelse + grestore + }{ + AGMCORE_distilling + { + pop AGM_is_distiller_preserving_spotimages not + }{ + //Adobe_AGM_Core/AGMCORE_name xddf + false + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 0 eq + AGMUTIL_cpd/OverrideSeparations known and + { + AGMUTIL_cpd/OverrideSeparations get + { + /HqnSpots/ProcSet resourcestatus + { + pop pop pop true + }if + }if + }if + { + AGMCORE_name/HqnSpots/ProcSet findresource/TestSpot gx not + }{ + gsave + [/Separation AGMCORE_name/DeviceGray{}]AGMCORE_&setcolorspace + false + AGMUTIL_cpd/SeparationColorNames 2 copy known + { + get + {AGMCORE_name eq or}forall + not + }{ + pop pop pop true + }ifelse + grestore + }ifelse + }ifelse + }ifelse + }ifelse + end + }def + }ifelse + /convert_to_process where{pop}{ + /convert_to_process + { + dup length 0 eq + { + pop false + }{ + AGMCORE_host_sep + { + dup true exch + { + dup(Cyan)eq exch + dup(Magenta)eq 3 -1 roll or exch + dup(Yellow)eq 3 -1 roll or exch + dup(Black)eq 3 -1 roll or + {pop} + {convert_spot_to_process and}ifelse + } + forall + { + true exch + { + dup(Cyan)eq exch + dup(Magenta)eq 3 -1 roll or exch + dup(Yellow)eq 3 -1 roll or exch + (Black)eq or and + }forall + not + }{pop false}ifelse + }{ + false exch + { + /PhotoshopDuotoneList where{pop false}{true}ifelse + { + dup(Cyan)eq exch + dup(Magenta)eq 3 -1 roll or exch + dup(Yellow)eq 3 -1 roll or exch + dup(Black)eq 3 -1 roll or + {pop} + {convert_spot_to_process or}ifelse + } + { + convert_spot_to_process or + } + ifelse + } + forall + }ifelse + }ifelse + }def + }ifelse + /AGMCORE_avoid_L2_sep_space + version cvr 2012 lt + level2 and + AGMCORE_producing_seps not and + def + /AGMCORE_is_cmyk_sep + AGMCORE_cyan_plate AGMCORE_magenta_plate or AGMCORE_yellow_plate or AGMCORE_black_plate or + def + /AGM_avoid_0_cmyk where{ + pop AGM_avoid_0_cmyk + }{ + AGM_preserve_spots + userdict/Adobe_AGM_OnHost_Seps known + userdict/Adobe_AGM_InRip_Seps known or + not and + }ifelse + { + /setcmykcolor[ + { + 4 copy add add add 0 eq currentoverprint and{ + pop 0.0005 + }if + }/exec cvx + /AGMCORE_&setcmykcolor load dup type/operatortype ne{ + /exec cvx + }if + ]cvx def + }if + /AGMCORE_IsSeparationAProcessColor + { + dup(Cyan)eq exch dup(Magenta)eq exch dup(Yellow)eq exch(Black)eq or or or + }def + AGMCORE_host_sep{ + /setcolortransfer + { + AGMCORE_cyan_plate{ + pop pop pop + }{ + AGMCORE_magenta_plate{ + 4 3 roll pop pop pop + }{ + AGMCORE_yellow_plate{ + 4 2 roll pop pop pop + }{ + 4 1 roll pop pop pop + }ifelse + }ifelse + }ifelse + settransfer + } + def + /AGMCORE_get_ink_data + AGMCORE_cyan_plate{ + {pop pop pop} + }{ + AGMCORE_magenta_plate{ + {4 3 roll pop pop pop} + }{ + AGMCORE_yellow_plate{ + {4 2 roll pop pop pop} + }{ + {4 1 roll pop pop pop} + }ifelse + }ifelse + }ifelse + def + /AGMCORE_RemoveProcessColorNames + { + 1 dict begin + /filtername + { + dup/Cyan eq 1 index(Cyan)eq or + {pop(_cyan_)}if + dup/Magenta eq 1 index(Magenta)eq or + {pop(_magenta_)}if + dup/Yellow eq 1 index(Yellow)eq or + {pop(_yellow_)}if + dup/Black eq 1 index(Black)eq or + {pop(_black_)}if + }def + dup type/arraytype eq + {[exch{filtername}forall]} + {filtername}ifelse + end + }def + level3{ + /AGMCORE_IsCurrentColor + { + dup AGMCORE_IsSeparationAProcessColor + { + AGMCORE_plate_ndx 0 eq + {dup(Cyan)eq exch/Cyan eq or}if + AGMCORE_plate_ndx 1 eq + {dup(Magenta)eq exch/Magenta eq or}if + AGMCORE_plate_ndx 2 eq + {dup(Yellow)eq exch/Yellow eq or}if + AGMCORE_plate_ndx 3 eq + {dup(Black)eq exch/Black eq or}if + AGMCORE_plate_ndx 4 eq + {pop false}if + }{ + gsave + false setoverprint + current_spot_alias false set_spot_alias + 1 1 1 1 6 -1 roll findcmykcustomcolor 1 setcustomcolor + set_spot_alias + currentgray 1 ne + grestore + }ifelse + }def + /AGMCORE_filter_functiondatasource + { + 5 dict begin + /data_in xdf + data_in type/stringtype eq + { + /ncomp xdf + /comp xdf + /string_out data_in length ncomp idiv string def + 0 ncomp data_in length 1 sub + { + string_out exch dup ncomp idiv exch data_in exch ncomp getinterval comp get 255 exch sub put + }for + string_out + }{ + string/string_in xdf + /string_out 1 string def + /component xdf + [ + data_in string_in/readstring cvx + [component/get cvx 255/exch cvx/sub cvx string_out/exch cvx 0/exch cvx/put cvx string_out]cvx + [/pop cvx()]cvx/ifelse cvx + ]cvx/ReusableStreamDecode filter + }ifelse + end + }def + /AGMCORE_separateShadingFunction + { + 2 dict begin + /paint? xdf + /channel xdf + dup type/dicttype eq + { + begin + FunctionType 0 eq + { + /DataSource channel Range length 2 idiv DataSource AGMCORE_filter_functiondatasource def + currentdict/Decode known + {/Decode Decode channel 2 mul 2 getinterval def}if + paint? not + {/Decode[1 1]def}if + }if + FunctionType 2 eq + { + paint? + { + /C0[C0 channel get 1 exch sub]def + /C1[C1 channel get 1 exch sub]def + }{ + /C0[1]def + /C1[1]def + }ifelse + }if + FunctionType 3 eq + { + /Functions[Functions{channel paint? AGMCORE_separateShadingFunction}forall]def + }if + currentdict/Range known + {/Range[0 1]def}if + currentdict + end}{ + channel get 0 paint? AGMCORE_separateShadingFunction + }ifelse + end + }def + /AGMCORE_separateShading + { + 3 -1 roll begin + currentdict/Function known + { + currentdict/Background known + {[1 index{Background 3 index get 1 exch sub}{1}ifelse]/Background xdf}if + Function 3 1 roll AGMCORE_separateShadingFunction/Function xdf + /ColorSpace[/DeviceGray]def + }{ + ColorSpace dup type/arraytype eq{0 get}if/DeviceCMYK eq + { + /ColorSpace[/DeviceN[/_cyan_/_magenta_/_yellow_/_black_]/DeviceCMYK{}]def + }{ + ColorSpace dup 1 get AGMCORE_RemoveProcessColorNames 1 exch put + }ifelse + ColorSpace 0 get/Separation eq + { + { + [1/exch cvx/sub cvx]cvx + }{ + [/pop cvx 1]cvx + }ifelse + ColorSpace 3 3 -1 roll put + pop + }{ + { + [exch ColorSpace 1 get length 1 sub exch sub/index cvx 1/exch cvx/sub cvx ColorSpace 1 get length 1 add 1/roll cvx ColorSpace 1 get length{/pop cvx}repeat]cvx + }{ + pop[ColorSpace 1 get length{/pop cvx}repeat cvx 1]cvx + }ifelse + ColorSpace 3 3 -1 roll bind put + }ifelse + ColorSpace 2/DeviceGray put + }ifelse + end + }def + /AGMCORE_separateShadingDict + { + dup/ColorSpace get + dup type/arraytype ne + {[exch]}if + dup 0 get/DeviceCMYK eq + { + exch begin + currentdict + AGMCORE_cyan_plate + {0 true}if + AGMCORE_magenta_plate + {1 true}if + AGMCORE_yellow_plate + {2 true}if + AGMCORE_black_plate + {3 true}if + AGMCORE_plate_ndx 4 eq + {0 false}if + dup not currentoverprint and + {/AGMCORE_ignoreshade true def}if + AGMCORE_separateShading + currentdict + end exch + }if + dup 0 get/Separation eq + { + exch begin + ColorSpace 1 get dup/None ne exch/All ne and + { + ColorSpace 1 get AGMCORE_IsCurrentColor AGMCORE_plate_ndx 4 lt and ColorSpace 1 get AGMCORE_IsSeparationAProcessColor not and + { + ColorSpace 2 get dup type/arraytype eq{0 get}if/DeviceCMYK eq + { + /ColorSpace + [ + /Separation + ColorSpace 1 get + /DeviceGray + [ + ColorSpace 3 get/exec cvx + 4 AGMCORE_plate_ndx sub -1/roll cvx + 4 1/roll cvx + 3[/pop cvx]cvx/repeat cvx + 1/exch cvx/sub cvx + ]cvx + ]def + }{ + AGMCORE_report_unsupported_color_space + AGMCORE_black_plate not + { + currentdict 0 false AGMCORE_separateShading + }if + }ifelse + }{ + currentdict ColorSpace 1 get AGMCORE_IsCurrentColor + 0 exch + dup not currentoverprint and + {/AGMCORE_ignoreshade true def}if + AGMCORE_separateShading + }ifelse + }if + currentdict + end exch + }if + dup 0 get/DeviceN eq + { + exch begin + ColorSpace 1 get convert_to_process + { + ColorSpace 2 get dup type/arraytype eq{0 get}if/DeviceCMYK eq + { + /ColorSpace + [ + /DeviceN + ColorSpace 1 get + /DeviceGray + [ + ColorSpace 3 get/exec cvx + 4 AGMCORE_plate_ndx sub -1/roll cvx + 4 1/roll cvx + 3[/pop cvx]cvx/repeat cvx + 1/exch cvx/sub cvx + ]cvx + ]def + }{ + AGMCORE_report_unsupported_color_space + AGMCORE_black_plate not + { + currentdict 0 false AGMCORE_separateShading + /ColorSpace[/DeviceGray]def + }if + }ifelse + }{ + currentdict + false -1 ColorSpace 1 get + { + AGMCORE_IsCurrentColor + { + 1 add + exch pop true exch exit + }if + 1 add + }forall + exch + dup not currentoverprint and + {/AGMCORE_ignoreshade true def}if + AGMCORE_separateShading + }ifelse + currentdict + end exch + }if + dup 0 get dup/DeviceCMYK eq exch dup/Separation eq exch/DeviceN eq or or not + { + exch begin + ColorSpace dup type/arraytype eq + {0 get}if + /DeviceGray ne + { + AGMCORE_report_unsupported_color_space + AGMCORE_black_plate not + { + ColorSpace 0 get/CIEBasedA eq + { + /ColorSpace[/Separation/_ciebaseda_/DeviceGray{}]def + }if + ColorSpace 0 get dup/CIEBasedABC eq exch dup/CIEBasedDEF eq exch/DeviceRGB eq or or + { + /ColorSpace[/DeviceN[/_red_/_green_/_blue_]/DeviceRGB{}]def + }if + ColorSpace 0 get/CIEBasedDEFG eq + { + /ColorSpace[/DeviceN[/_cyan_/_magenta_/_yellow_/_black_]/DeviceCMYK{}]def + }if + currentdict 0 false AGMCORE_separateShading + }if + }if + currentdict + end exch + }if + pop + dup/AGMCORE_ignoreshade known + { + begin + /ColorSpace[/Separation(None)/DeviceGray{}]def + currentdict end + }if + }def + /shfill + { + AGMCORE_separateShadingDict + dup/AGMCORE_ignoreshade known + {pop} + {AGMCORE_&sysshfill}ifelse + }def + /makepattern + { + exch + dup/PatternType get 2 eq + { + clonedict + begin + /Shading Shading AGMCORE_separateShadingDict def + Shading/AGMCORE_ignoreshade known + currentdict end exch + {pop<>}if + exch AGMCORE_&sysmakepattern + }{ + exch AGMCORE_&usrmakepattern + }ifelse + }def + }if + }if + AGMCORE_in_rip_sep{ + /setcustomcolor + { + exch aload pop + dup 7 1 roll inRip_spot_has_ink not { + 4{4 index mul 4 1 roll} + repeat + /DeviceCMYK setcolorspace + 6 -2 roll pop pop + }{ + //Adobe_AGM_Core begin + /AGMCORE_k xdf/AGMCORE_y xdf/AGMCORE_m xdf/AGMCORE_c xdf + end + [/Separation 4 -1 roll/DeviceCMYK + {dup AGMCORE_c mul exch dup AGMCORE_m mul exch dup AGMCORE_y mul exch AGMCORE_k mul} + ] + setcolorspace + }ifelse + setcolor + }ndf + /setseparationgray + { + [/Separation(All)/DeviceGray{}]setcolorspace_opt + 1 exch sub setcolor + }ndf + }{ + /setseparationgray + { + AGMCORE_&setgray + }ndf + }ifelse + /findcmykcustomcolor + { + 5 makereadonlyarray + }ndf + /setcustomcolor + { + exch aload pop pop + 4{4 index mul 4 1 roll}repeat + setcmykcolor pop + }ndf + /has_color + /colorimage where{ + AGMCORE_producing_seps{ + pop true + }{ + systemdict eq + }ifelse + }{ + false + }ifelse + def + /map_index + { + 1 index mul exch getinterval{255 div}forall + }bdf + /map_indexed_devn + { + Lookup Names length 3 -1 roll cvi map_index + }bdf + /n_color_components + { + base_colorspace_type + dup/DeviceGray eq{ + pop 1 + }{ + /DeviceCMYK eq{ + 4 + }{ + 3 + }ifelse + }ifelse + }bdf + level2{ + /mo/moveto ldf + /li/lineto ldf + /cv/curveto ldf + /knockout_unitsq + { + 1 setgray + 0 0 1 1 rectfill + }def + level2/setcolorspace AGMCORE_key_known not and{ + /AGMCORE_&&&setcolorspace/setcolorspace ldf + /AGMCORE_ReplaceMappedColor + { + dup type dup/arraytype eq exch/packedarraytype eq or + { + /AGMCORE_SpotAliasAry2 where{ + begin + dup 0 get dup/Separation eq + { + pop + dup length array copy + dup dup 1 get + current_spot_alias + { + dup map_alias + { + false set_spot_alias + dup 1 exch setsepcolorspace + true set_spot_alias + begin + /sep_colorspace_dict currentdict AGMCORE_gput + pop pop pop + [ + /Separation Name + CSA map_csa + MappedCSA + /sep_colorspace_proc load + ] + dup Name + end + }if + }if + map_reserved_ink_name 1 xpt + }{ + /DeviceN eq + { + dup length array copy + dup dup 1 get[ + exch{ + current_spot_alias{ + dup map_alias{ + /Name get exch pop + }if + }if + map_reserved_ink_name + }forall + ]1 xpt + }if + }ifelse + end + }if + }if + }def + /setcolorspace + { + dup type dup/arraytype eq exch/packedarraytype eq or + { + dup 0 get/Indexed eq + { + AGMCORE_distilling + { + /PhotoshopDuotoneList where + { + pop false + }{ + true + }ifelse + }{ + true + }ifelse + { + aload pop 3 -1 roll + AGMCORE_ReplaceMappedColor + 3 1 roll 4 array astore + }if + }{ + AGMCORE_ReplaceMappedColor + }ifelse + }if + DeviceN_PS2_inRip_seps{AGMCORE_&&&setcolorspace}if + }def + }if + }{ + /adj + { + currentstrokeadjust{ + transform + 0.25 sub round 0.25 add exch + 0.25 sub round 0.25 add exch + itransform + }if + }def + /mo{ + adj moveto + }def + /li{ + adj lineto + }def + /cv{ + 6 2 roll adj + 6 2 roll adj + 6 2 roll adj curveto + }def + /knockout_unitsq + { + 1 setgray + 8 8 1[8 0 0 8 0 0]{}image + }def + /currentstrokeadjust{ + /currentstrokeadjust AGMCORE_gget + }def + /setstrokeadjust{ + /currentstrokeadjust exch AGMCORE_gput + }def + /setcolorspace + { + /currentcolorspace exch AGMCORE_gput + }def + /currentcolorspace + { + /currentcolorspace AGMCORE_gget + }def + /setcolor_devicecolor + { + base_colorspace_type + dup/DeviceGray eq{ + pop setgray + }{ + /DeviceCMYK eq{ + setcmykcolor + }{ + setrgbcolor + }ifelse + }ifelse + }def + /setcolor + { + currentcolorspace 0 get + dup/DeviceGray ne{ + dup/DeviceCMYK ne{ + dup/DeviceRGB ne{ + dup/Separation eq{ + pop + currentcolorspace 3 gx + currentcolorspace 2 get + }{ + dup/Indexed eq{ + pop + currentcolorspace 3 get dup type/stringtype eq{ + currentcolorspace 1 get n_color_components + 3 -1 roll map_index + }{ + exec + }ifelse + currentcolorspace 1 get + }{ + /AGMCORE_cur_err/AGMCORE_invalid_color_space def + AGMCORE_invalid_color_space + }ifelse + }ifelse + }if + }if + }if + setcolor_devicecolor + }def + }ifelse + /sop/setoverprint ldf + /lw/setlinewidth ldf + /lc/setlinecap ldf + /lj/setlinejoin ldf + /ml/setmiterlimit ldf + /dsh/setdash ldf + /sadj/setstrokeadjust ldf + /gry/setgray ldf + /rgb/setrgbcolor ldf + /cmyk[ + /currentcolorspace[/DeviceCMYK]/AGMCORE_gput cvx + /setcmykcolor load dup type/operatortype ne{/exec cvx}if + ]cvx bdf + level3 AGMCORE_host_sep not and{ + /nzopmsc{ + 6 dict begin + /kk exch def + /yy exch def + /mm exch def + /cc exch def + /sum 0 def + cc 0 ne{/sum sum 2#1000 or def cc}if + mm 0 ne{/sum sum 2#0100 or def mm}if + yy 0 ne{/sum sum 2#0010 or def yy}if + kk 0 ne{/sum sum 2#0001 or def kk}if + AGMCORE_CMYKDeviceNColorspaces sum get setcolorspace + sum 0 eq{0}if + end + setcolor + }bdf + }{ + /nzopmsc/cmyk ldf + }ifelse + /sep/setsepcolor ldf + /devn/setdevicencolor ldf + /idx/setindexedcolor ldf + /colr/setcolor ldf + /csacrd/set_csa_crd ldf + /sepcs/setsepcolorspace ldf + /devncs/setdevicencolorspace ldf + /idxcs/setindexedcolorspace ldf + /cp/closepath ldf + /clp/clp_npth ldf + /eclp/eoclp_npth ldf + /f/fill ldf + /ef/eofill ldf + /@/stroke ldf + /nclp/npth_clp ldf + /gset/graphic_setup ldf + /gcln/graphic_cleanup ldf + /ct/concat ldf + /cf/currentfile ldf + /fl/filter ldf + /rs/readstring ldf + /AGMCORE_def_ht currenthalftone def + /clonedict Adobe_AGM_Utils begin/clonedict load end def + /clonearray Adobe_AGM_Utils begin/clonearray load end def + currentdict{ + dup xcheck 1 index type dup/arraytype eq exch/packedarraytype eq or and{ + bind + }if + def + }forall + /getrampcolor + { + /indx exch def + 0 1 NumComp 1 sub + { + dup + Samples exch get + dup type/stringtype eq{indx get}if + exch + Scaling exch get aload pop + 3 1 roll + mul add + }for + ColorSpaceFamily/Separation eq + {sep} + { + ColorSpaceFamily/DeviceN eq + {devn}{setcolor}ifelse + }ifelse + }bdf + /sssetbackground{ + aload pop + ColorSpaceFamily/Separation eq + {sep} + { + ColorSpaceFamily/DeviceN eq + {devn}{setcolor}ifelse + }ifelse + }bdf + /RadialShade + { + 40 dict begin + /ColorSpaceFamily xdf + /background xdf + /ext1 xdf + /ext0 xdf + /BBox xdf + /r2 xdf + /c2y xdf + /c2x xdf + /r1 xdf + /c1y xdf + /c1x xdf + /rampdict xdf + /setinkoverprint where{pop/setinkoverprint{pop}def}if + gsave + BBox length 0 gt + { + np + BBox 0 get BBox 1 get moveto + BBox 2 get BBox 0 get sub 0 rlineto + 0 BBox 3 get BBox 1 get sub rlineto + BBox 2 get BBox 0 get sub neg 0 rlineto + closepath + clip + np + }if + c1x c2x eq + { + c1y c2y lt{/theta 90 def}{/theta 270 def}ifelse + }{ + /slope c2y c1y sub c2x c1x sub div def + /theta slope 1 atan def + c2x c1x lt c2y c1y ge and{/theta theta 180 sub def}if + c2x c1x lt c2y c1y lt and{/theta theta 180 add def}if + }ifelse + gsave + clippath + c1x c1y translate + theta rotate + -90 rotate + {pathbbox}stopped + {0 0 0 0}if + /yMax xdf + /xMax xdf + /yMin xdf + /xMin xdf + grestore + xMax xMin eq yMax yMin eq or + { + grestore + end + }{ + /max{2 copy gt{pop}{exch pop}ifelse}bdf + /min{2 copy lt{pop}{exch pop}ifelse}bdf + rampdict begin + 40 dict begin + background length 0 gt{background sssetbackground gsave clippath fill grestore}if + gsave + c1x c1y translate + theta rotate + -90 rotate + /c2y c1x c2x sub dup mul c1y c2y sub dup mul add sqrt def + /c1y 0 def + /c1x 0 def + /c2x 0 def + ext0 + { + 0 getrampcolor + c2y r2 add r1 sub 0.0001 lt + { + c1x c1y r1 360 0 arcn + pathbbox + /aymax exch def + /axmax exch def + /aymin exch def + /axmin exch def + /bxMin xMin axmin min def + /byMin yMin aymin min def + /bxMax xMax axmax max def + /byMax yMax aymax max def + bxMin byMin moveto + bxMax byMin lineto + bxMax byMax lineto + bxMin byMax lineto + bxMin byMin lineto + eofill + }{ + c2y r1 add r2 le + { + c1x c1y r1 0 360 arc + fill + } + { + c2x c2y r2 0 360 arc fill + r1 r2 eq + { + /p1x r1 neg def + /p1y c1y def + /p2x r1 def + /p2y c1y def + p1x p1y moveto p2x p2y lineto p2x yMin lineto p1x yMin lineto + fill + }{ + /AA r2 r1 sub c2y div def + AA -1 eq + {/theta 89.99 def} + {/theta AA 1 AA dup mul sub sqrt div 1 atan def} + ifelse + /SS1 90 theta add dup sin exch cos div def + /p1x r1 SS1 SS1 mul SS1 SS1 mul 1 add div sqrt mul neg def + /p1y p1x SS1 div neg def + /SS2 90 theta sub dup sin exch cos div def + /p2x r1 SS2 SS2 mul SS2 SS2 mul 1 add div sqrt mul def + /p2y p2x SS2 div neg def + r1 r2 gt + { + /L1maxX p1x yMin p1y sub SS1 div add def + /L2maxX p2x yMin p2y sub SS2 div add def + }{ + /L1maxX 0 def + /L2maxX 0 def + }ifelse + p1x p1y moveto p2x p2y lineto L2maxX L2maxX p2x sub SS2 mul p2y add lineto + L1maxX L1maxX p1x sub SS1 mul p1y add lineto + fill + }ifelse + }ifelse + }ifelse + }if + c1x c2x sub dup mul + c1y c2y sub dup mul + add 0.5 exp + 0 dtransform + dup mul exch dup mul add 0.5 exp 72 div + 0 72 matrix defaultmatrix dtransform dup mul exch dup mul add sqrt + 72 0 matrix defaultmatrix dtransform dup mul exch dup mul add sqrt + 1 index 1 index lt{exch}if pop + /hires xdf + hires mul + /numpix xdf + /numsteps NumSamples def + /rampIndxInc 1 def + /subsampling false def + numpix 0 ne + { + NumSamples numpix div 0.5 gt + { + /numsteps numpix 2 div round cvi dup 1 le{pop 2}if def + /rampIndxInc NumSamples 1 sub numsteps div def + /subsampling true def + }if + }if + /xInc c2x c1x sub numsteps div def + /yInc c2y c1y sub numsteps div def + /rInc r2 r1 sub numsteps div def + /cx c1x def + /cy c1y def + /radius r1 def + np + xInc 0 eq yInc 0 eq rInc 0 eq and and + { + 0 getrampcolor + cx cy radius 0 360 arc + stroke + NumSamples 1 sub getrampcolor + cx cy radius 72 hires div add 0 360 arc + 0 setlinewidth + stroke + }{ + 0 + numsteps + { + dup + subsampling{round cvi}if + getrampcolor + cx cy radius 0 360 arc + /cx cx xInc add def + /cy cy yInc add def + /radius radius rInc add def + cx cy radius 360 0 arcn + eofill + rampIndxInc add + }repeat + pop + }ifelse + ext1 + { + c2y r2 add r1 lt + { + c2x c2y r2 0 360 arc + fill + }{ + c2y r1 add r2 sub 0.0001 le + { + c2x c2y r2 360 0 arcn + pathbbox + /aymax exch def + /axmax exch def + /aymin exch def + /axmin exch def + /bxMin xMin axmin min def + /byMin yMin aymin min def + /bxMax xMax axmax max def + /byMax yMax aymax max def + bxMin byMin moveto + bxMax byMin lineto + bxMax byMax lineto + bxMin byMax lineto + bxMin byMin lineto + eofill + }{ + c2x c2y r2 0 360 arc fill + r1 r2 eq + { + /p1x r2 neg def + /p1y c2y def + /p2x r2 def + /p2y c2y def + p1x p1y moveto p2x p2y lineto p2x yMax lineto p1x yMax lineto + fill + }{ + /AA r2 r1 sub c2y div def + AA -1 eq + {/theta 89.99 def} + {/theta AA 1 AA dup mul sub sqrt div 1 atan def} + ifelse + /SS1 90 theta add dup sin exch cos div def + /p1x r2 SS1 SS1 mul SS1 SS1 mul 1 add div sqrt mul neg def + /p1y c2y p1x SS1 div sub def + /SS2 90 theta sub dup sin exch cos div def + /p2x r2 SS2 SS2 mul SS2 SS2 mul 1 add div sqrt mul def + /p2y c2y p2x SS2 div sub def + r1 r2 lt + { + /L1maxX p1x yMax p1y sub SS1 div add def + /L2maxX p2x yMax p2y sub SS2 div add def + }{ + /L1maxX 0 def + /L2maxX 0 def + }ifelse + p1x p1y moveto p2x p2y lineto L2maxX L2maxX p2x sub SS2 mul p2y add lineto + L1maxX L1maxX p1x sub SS1 mul p1y add lineto + fill + }ifelse + }ifelse + }ifelse + }if + grestore + grestore + end + end + end + }ifelse + }bdf + /GenStrips + { + 40 dict begin + /ColorSpaceFamily xdf + /background xdf + /ext1 xdf + /ext0 xdf + /BBox xdf + /y2 xdf + /x2 xdf + /y1 xdf + /x1 xdf + /rampdict xdf + /setinkoverprint where{pop/setinkoverprint{pop}def}if + gsave + BBox length 0 gt + { + np + BBox 0 get BBox 1 get moveto + BBox 2 get BBox 0 get sub 0 rlineto + 0 BBox 3 get BBox 1 get sub rlineto + BBox 2 get BBox 0 get sub neg 0 rlineto + closepath + clip + np + }if + x1 x2 eq + { + y1 y2 lt{/theta 90 def}{/theta 270 def}ifelse + }{ + /slope y2 y1 sub x2 x1 sub div def + /theta slope 1 atan def + x2 x1 lt y2 y1 ge and{/theta theta 180 sub def}if + x2 x1 lt y2 y1 lt and{/theta theta 180 add def}if + } + ifelse + gsave + clippath + x1 y1 translate + theta rotate + {pathbbox}stopped + {0 0 0 0}if + /yMax exch def + /xMax exch def + /yMin exch def + /xMin exch def + grestore + xMax xMin eq yMax yMin eq or + { + grestore + end + }{ + rampdict begin + 20 dict begin + background length 0 gt{background sssetbackground gsave clippath fill grestore}if + gsave + x1 y1 translate + theta rotate + /xStart 0 def + /xEnd x2 x1 sub dup mul y2 y1 sub dup mul add 0.5 exp def + /ySpan yMax yMin sub def + /numsteps NumSamples def + /rampIndxInc 1 def + /subsampling false def + xStart 0 transform + xEnd 0 transform + 3 -1 roll + sub dup mul + 3 1 roll + sub dup mul + add 0.5 exp 72 div + 0 72 matrix defaultmatrix dtransform dup mul exch dup mul add sqrt + 72 0 matrix defaultmatrix dtransform dup mul exch dup mul add sqrt + 1 index 1 index lt{exch}if pop + mul + /numpix xdf + numpix 0 ne + { + NumSamples numpix div 0.5 gt + { + /numsteps numpix 2 div round cvi dup 1 le{pop 2}if def + /rampIndxInc NumSamples 1 sub numsteps div def + /subsampling true def + }if + }if + ext0 + { + 0 getrampcolor + xMin xStart lt + { + xMin yMin xMin neg ySpan rectfill + }if + }if + /xInc xEnd xStart sub numsteps div def + /x xStart def + 0 + numsteps + { + dup + subsampling{round cvi}if + getrampcolor + x yMin xInc ySpan rectfill + /x x xInc add def + rampIndxInc add + }repeat + pop + ext1{ + xMax xEnd gt + { + xEnd yMin xMax xEnd sub ySpan rectfill + }if + }if + grestore + grestore + end + end + end + }ifelse + }bdf +}def +/pt +{ + end +}def +/dt{ +}def +/pgsv{ + //Adobe_AGM_Core/AGMCORE_save save put +}def +/pgrs{ + //Adobe_AGM_Core/AGMCORE_save get restore +}def +systemdict/findcolorrendering known{ + /findcolorrendering systemdict/findcolorrendering get def +}if +systemdict/setcolorrendering known{ + /setcolorrendering systemdict/setcolorrendering get def +}if +/test_cmyk_color_plate +{ + gsave + setcmykcolor currentgray 1 ne + grestore +}def +/inRip_spot_has_ink +{ + dup//Adobe_AGM_Core/AGMCORE_name xddf + convert_spot_to_process not +}def +/map255_to_range +{ + 1 index sub + 3 -1 roll 255 div mul add +}def +/set_csa_crd +{ + /sep_colorspace_dict null AGMCORE_gput + begin + CSA get_csa_by_name setcolorspace_opt + set_crd + end +} +def +/map_csa +{ + currentdict/MappedCSA known{MappedCSA null ne}{false}ifelse + {pop}{get_csa_by_name/MappedCSA xdf}ifelse +}def +/setsepcolor +{ + /sep_colorspace_dict AGMCORE_gget begin + dup/sep_tint exch AGMCORE_gput + TintProc + end +}def +/setdevicencolor +{ + /devicen_colorspace_dict AGMCORE_gget begin + Names length copy + Names length 1 sub -1 0 + { + /devicen_tints AGMCORE_gget 3 1 roll xpt + }for + TintProc + end +}def +/sep_colorspace_proc +{ + /AGMCORE_tmp exch store + /sep_colorspace_dict AGMCORE_gget begin + currentdict/Components known{ + Components aload pop + TintMethod/Lab eq{ + 2{AGMCORE_tmp mul NComponents 1 roll}repeat + LMax sub AGMCORE_tmp mul LMax add NComponents 1 roll + }{ + TintMethod/Subtractive eq{ + NComponents{ + AGMCORE_tmp mul NComponents 1 roll + }repeat + }{ + NComponents{ + 1 sub AGMCORE_tmp mul 1 add NComponents 1 roll + }repeat + }ifelse + }ifelse + }{ + ColorLookup AGMCORE_tmp ColorLookup length 1 sub mul round cvi get + aload pop + }ifelse + end +}def +/sep_colorspace_gray_proc +{ + /AGMCORE_tmp exch store + /sep_colorspace_dict AGMCORE_gget begin + GrayLookup AGMCORE_tmp GrayLookup length 1 sub mul round cvi get + end +}def +/sep_proc_name +{ + dup 0 get + dup/DeviceRGB eq exch/DeviceCMYK eq or level2 not and has_color not and{ + pop[/DeviceGray] + /sep_colorspace_gray_proc + }{ + /sep_colorspace_proc + }ifelse +}def +/setsepcolorspace +{ + current_spot_alias{ + dup begin + Name map_alias{ + exch pop + }if + end + }if + dup/sep_colorspace_dict exch AGMCORE_gput + begin + CSA map_csa + /AGMCORE_sep_special Name dup()eq exch(All)eq or store + AGMCORE_avoid_L2_sep_space{ + [/Indexed MappedCSA sep_proc_name 255 exch + {255 div}/exec cvx 3 -1 roll[4 1 roll load/exec cvx]cvx + ]setcolorspace_opt + /TintProc{ + 255 mul round cvi setcolor + }bdf + }{ + MappedCSA 0 get/DeviceCMYK eq + currentdict/Components known and + AGMCORE_sep_special not and{ + /TintProc[ + Components aload pop Name findcmykcustomcolor + /exch cvx/setcustomcolor cvx + ]cvx bdf + }{ + AGMCORE_host_sep Name(All)eq and{ + /TintProc{ + 1 exch sub setseparationgray + }bdf + }{ + AGMCORE_in_rip_sep MappedCSA 0 get/DeviceCMYK eq and + AGMCORE_host_sep or + Name()eq and{ + /TintProc[ + MappedCSA sep_proc_name exch 0 get/DeviceCMYK eq{ + cvx/setcmykcolor cvx + }{ + cvx/setgray cvx + }ifelse + ]cvx bdf + }{ + AGMCORE_producing_seps MappedCSA 0 get dup/DeviceCMYK eq exch/DeviceGray eq or and AGMCORE_sep_special not and{ + /TintProc[ + /dup cvx + MappedCSA sep_proc_name cvx exch + 0 get/DeviceGray eq{ + 1/exch cvx/sub cvx 0 0 0 4 -1/roll cvx + }if + /Name cvx/findcmykcustomcolor cvx/exch cvx + AGMCORE_host_sep{ + AGMCORE_is_cmyk_sep + /Name cvx + /AGMCORE_IsSeparationAProcessColor load/exec cvx + /not cvx/and cvx + }{ + Name inRip_spot_has_ink not + }ifelse + [ + /pop cvx 1 + ]cvx/if cvx + /setcustomcolor cvx + ]cvx bdf + }{ + /TintProc{setcolor}bdf + [/Separation Name MappedCSA sep_proc_name load]setcolorspace_opt + }ifelse + }ifelse + }ifelse + }ifelse + }ifelse + set_crd + setsepcolor + end +}def +/additive_blend +{ + 3 dict begin + /numarrays xdf + /numcolors xdf + 0 1 numcolors 1 sub + { + /c1 xdf + 1 + 0 1 numarrays 1 sub + { + 1 exch add/index cvx + c1/get cvx/mul cvx + }for + numarrays 1 add 1/roll cvx + }for + numarrays[/pop cvx]cvx/repeat cvx + end +}def +/subtractive_blend +{ + 3 dict begin + /numarrays xdf + /numcolors xdf + 0 1 numcolors 1 sub + { + /c1 xdf + 1 1 + 0 1 numarrays 1 sub + { + 1 3 3 -1 roll add/index cvx + c1/get cvx/sub cvx/mul cvx + }for + /sub cvx + numarrays 1 add 1/roll cvx + }for + numarrays[/pop cvx]cvx/repeat cvx + end +}def +/exec_tint_transform +{ + /TintProc[ + /TintTransform cvx/setcolor cvx + ]cvx bdf + MappedCSA setcolorspace_opt +}bdf +/devn_makecustomcolor +{ + 2 dict begin + /names_index xdf + /Names xdf + 1 1 1 1 Names names_index get findcmykcustomcolor + /devicen_tints AGMCORE_gget names_index get setcustomcolor + Names length{pop}repeat + end +}bdf +/setdevicencolorspace +{ + dup/AliasedColorants known{false}{true}ifelse + current_spot_alias and{ + 7 dict begin + /names_index 0 def + dup/names_len exch/Names get length def + /new_names names_len array def + /new_LookupTables names_len array def + /alias_cnt 0 def + dup/Names get + { + dup map_alias{ + exch pop + dup/ColorLookup known{ + dup begin + new_LookupTables names_index ColorLookup put + end + }{ + dup/Components known{ + dup begin + new_LookupTables names_index Components put + end + }{ + dup begin + new_LookupTables names_index[null null null null]put + end + }ifelse + }ifelse + new_names names_index 3 -1 roll/Name get put + /alias_cnt alias_cnt 1 add def + }{ + /name xdf + new_names names_index name put + dup/LookupTables known{ + dup begin + new_LookupTables names_index LookupTables names_index get put + end + }{ + dup begin + new_LookupTables names_index[null null null null]put + end + }ifelse + }ifelse + /names_index names_index 1 add def + }forall + alias_cnt 0 gt{ + /AliasedColorants true def + /lut_entry_len new_LookupTables 0 get dup length 256 ge{0 get length}{length}ifelse def + 0 1 names_len 1 sub{ + /names_index xdf + new_LookupTables names_index get dup length 256 ge{0 get length}{length}ifelse lut_entry_len ne{ + /AliasedColorants false def + exit + }{ + new_LookupTables names_index get 0 get null eq{ + dup/Names get names_index get/name xdf + name(Cyan)eq name(Magenta)eq name(Yellow)eq name(Black)eq + or or or not{ + /AliasedColorants false def + exit + }if + }if + }ifelse + }for + lut_entry_len 1 eq{ + /AliasedColorants false def + }if + AliasedColorants{ + dup begin + /Names new_names def + /LookupTables new_LookupTables def + /AliasedColorants true def + /NComponents lut_entry_len def + /TintMethod NComponents 4 eq{/Subtractive}{/Additive}ifelse def + /MappedCSA TintMethod/Additive eq{/DeviceRGB}{/DeviceCMYK}ifelse def + currentdict/TTTablesIdx known not{ + /TTTablesIdx -1 def + }if + end + }if + }if + end + }if + dup/devicen_colorspace_dict exch AGMCORE_gput + begin + currentdict/AliasedColorants known{ + AliasedColorants + }{ + false + }ifelse + dup not{ + CSA map_csa + }if + /TintTransform load type/nulltype eq or{ + /TintTransform[ + 0 1 Names length 1 sub + { + /TTTablesIdx TTTablesIdx 1 add def + dup LookupTables exch get dup 0 get null eq + { + 1 index + Names exch get + dup(Cyan)eq + { + pop exch + LookupTables length exch sub + /index cvx + 0 0 0 + } + { + dup(Magenta)eq + { + pop exch + LookupTables length exch sub + /index cvx + 0/exch cvx 0 0 + }{ + (Yellow)eq + { + exch + LookupTables length exch sub + /index cvx + 0 0 3 -1/roll cvx 0 + }{ + exch + LookupTables length exch sub + /index cvx + 0 0 0 4 -1/roll cvx + }ifelse + }ifelse + }ifelse + 5 -1/roll cvx/astore cvx + }{ + dup length 1 sub + LookupTables length 4 -1 roll sub 1 add + /index cvx/mul cvx/round cvx/cvi cvx/get cvx + }ifelse + Names length TTTablesIdx add 1 add 1/roll cvx + }for + Names length[/pop cvx]cvx/repeat cvx + NComponents Names length + TintMethod/Subtractive eq + { + subtractive_blend + }{ + additive_blend + }ifelse + ]cvx bdf + }if + AGMCORE_host_sep{ + Names convert_to_process{ + exec_tint_transform + } + { + currentdict/AliasedColorants known{ + AliasedColorants not + }{ + false + }ifelse + 5 dict begin + /AvoidAliasedColorants xdf + /painted? false def + /names_index 0 def + /names_len Names length def + AvoidAliasedColorants{ + /currentspotalias current_spot_alias def + false set_spot_alias + }if + Names{ + AGMCORE_is_cmyk_sep{ + dup(Cyan)eq AGMCORE_cyan_plate and exch + dup(Magenta)eq AGMCORE_magenta_plate and exch + dup(Yellow)eq AGMCORE_yellow_plate and exch + (Black)eq AGMCORE_black_plate and or or or{ + /devicen_colorspace_dict AGMCORE_gget/TintProc[ + Names names_index/devn_makecustomcolor cvx + ]cvx ddf + /painted? true def + }if + painted?{exit}if + }{ + 0 0 0 0 5 -1 roll findcmykcustomcolor 1 setcustomcolor currentgray 0 eq{ + /devicen_colorspace_dict AGMCORE_gget/TintProc[ + Names names_index/devn_makecustomcolor cvx + ]cvx ddf + /painted? true def + exit + }if + }ifelse + /names_index names_index 1 add def + }forall + AvoidAliasedColorants{ + currentspotalias set_spot_alias + }if + painted?{ + /devicen_colorspace_dict AGMCORE_gget/names_index names_index put + }{ + /devicen_colorspace_dict AGMCORE_gget/TintProc[ + names_len[/pop cvx]cvx/repeat cvx 1/setseparationgray cvx + 0 0 0 0/setcmykcolor cvx + ]cvx ddf + }ifelse + end + }ifelse + } + { + AGMCORE_in_rip_sep{ + Names convert_to_process not + }{ + level3 + }ifelse + { + [/DeviceN Names MappedCSA/TintTransform load]setcolorspace_opt + /TintProc level3 not AGMCORE_in_rip_sep and{ + [ + Names/length cvx[/pop cvx]cvx/repeat cvx + ]cvx bdf + }{ + {setcolor}bdf + }ifelse + }{ + exec_tint_transform + }ifelse + }ifelse + set_crd + /AliasedColorants false def + end +}def +/setindexedcolorspace +{ + dup/indexed_colorspace_dict exch AGMCORE_gput + begin + currentdict/CSDBase known{ + CSDBase/CSD get_res begin + currentdict/Names known{ + currentdict devncs + }{ + 1 currentdict sepcs + }ifelse + AGMCORE_host_sep{ + 4 dict begin + /compCnt/Names where{pop Names length}{1}ifelse def + /NewLookup HiVal 1 add string def + 0 1 HiVal{ + /tableIndex xdf + Lookup dup type/stringtype eq{ + compCnt tableIndex map_index + }{ + exec + }ifelse + /Names where{ + pop setdevicencolor + }{ + setsepcolor + }ifelse + currentgray + tableIndex exch + 255 mul cvi + NewLookup 3 1 roll put + }for + [/Indexed currentcolorspace HiVal NewLookup]setcolorspace_opt + end + }{ + level3 + { + currentdict/Names known{ + [/Indexed[/DeviceN Names MappedCSA/TintTransform load]HiVal Lookup]setcolorspace_opt + }{ + [/Indexed[/Separation Name MappedCSA sep_proc_name load]HiVal Lookup]setcolorspace_opt + }ifelse + }{ + [/Indexed MappedCSA HiVal + [ + currentdict/Names known{ + Lookup dup type/stringtype eq + {/exch cvx CSDBase/CSD get_res/Names get length dup/mul cvx exch/getinterval cvx{255 div}/forall cvx} + {/exec cvx}ifelse + /TintTransform load/exec cvx + }{ + Lookup dup type/stringtype eq + {/exch cvx/get cvx 255/div cvx} + {/exec cvx}ifelse + CSDBase/CSD get_res/MappedCSA get sep_proc_name exch pop/load cvx/exec cvx + }ifelse + ]cvx + ]setcolorspace_opt + }ifelse + }ifelse + end + set_crd + } + { + CSA map_csa + AGMCORE_host_sep level2 not and{ + 0 0 0 0 setcmykcolor + }{ + [/Indexed MappedCSA + level2 not has_color not and{ + dup 0 get dup/DeviceRGB eq exch/DeviceCMYK eq or{ + pop[/DeviceGray] + }if + HiVal GrayLookup + }{ + HiVal + currentdict/RangeArray known{ + { + /indexed_colorspace_dict AGMCORE_gget begin + Lookup exch + dup HiVal gt{ + pop HiVal + }if + NComponents mul NComponents getinterval{}forall + NComponents 1 sub -1 0{ + RangeArray exch 2 mul 2 getinterval aload pop map255_to_range + NComponents 1 roll + }for + end + }bind + }{ + Lookup + }ifelse + }ifelse + ]setcolorspace_opt + set_crd + }ifelse + }ifelse + end +}def +/setindexedcolor +{ + AGMCORE_host_sep{ + /indexed_colorspace_dict AGMCORE_gget + begin + currentdict/CSDBase known{ + CSDBase/CSD get_res begin + currentdict/Names known{ + map_indexed_devn + devn + } + { + Lookup 1 3 -1 roll map_index + sep + }ifelse + end + }{ + Lookup MappedCSA/DeviceCMYK eq{4}{1}ifelse 3 -1 roll + map_index + MappedCSA/DeviceCMYK eq{setcmykcolor}{setgray}ifelse + }ifelse + end + }{ + level3 not AGMCORE_in_rip_sep and/indexed_colorspace_dict AGMCORE_gget/CSDBase known and{ + /indexed_colorspace_dict AGMCORE_gget/CSDBase get/CSD get_res begin + map_indexed_devn + devn + end + } + { + setcolor + }ifelse + }ifelse +}def +/ignoreimagedata +{ + currentoverprint not{ + gsave + dup clonedict begin + 1 setgray + /Decode[0 1]def + /DataSourcedef + /MultipleDataSources false def + /BitsPerComponent 8 def + currentdict end + systemdict/image gx + grestore + }if + consumeimagedata +}def +/add_res +{ + dup/CSD eq{ + pop + //Adobe_AGM_Core begin + /AGMCORE_CSD_cache load 3 1 roll put + end + }{ + defineresource pop + }ifelse +}def +/del_res +{ + { + aload pop exch + dup/CSD eq{ + pop + {//Adobe_AGM_Core/AGMCORE_CSD_cache get exch undef}forall + }{ + exch + {1 index undefineresource}forall + pop + }ifelse + }forall +}def +/get_res +{ + dup/CSD eq{ + pop + dup type dup/nametype eq exch/stringtype eq or{ + AGMCORE_CSD_cache exch get + }if + }{ + findresource + }ifelse +}def +/get_csa_by_name +{ + dup type dup/nametype eq exch/stringtype eq or{ + /CSA get_res + }if +}def +/paintproc_buf_init +{ + /count get 0 0 put +}def +/paintproc_buf_next +{ + dup/count get dup 0 get + dup 3 1 roll + 1 add 0 xpt + get +}def +/cachepaintproc_compress +{ + 5 dict begin + currentfile exch 0 exch/SubFileDecode filter/ReadFilter exch def + /ppdict 20 dict def + /string_size 16000 def + /readbuffer string_size string def + currentglobal true setglobal + ppdict 1 array dup 0 1 put/count xpt + setglobal + /LZWFilter + { + exch + dup length 0 eq{ + pop + }{ + ppdict dup length 1 sub 3 -1 roll put + }ifelse + {string_size}{0}ifelse string + }/LZWEncode filter def + { + ReadFilter readbuffer readstring + exch LZWFilter exch writestring + not{exit}if + }loop + LZWFilter closefile + ppdict + end +}def +/cachepaintproc +{ + 2 dict begin + currentfile exch 0 exch/SubFileDecode filter/ReadFilter exch def + /ppdict 20 dict def + currentglobal true setglobal + ppdict 1 array dup 0 1 put/count xpt + setglobal + { + ReadFilter 16000 string readstring exch + ppdict dup length 1 sub 3 -1 roll put + not{exit}if + }loop + ppdict dup dup length 1 sub()put + end +}def +/make_pattern +{ + exch clonedict exch + dup matrix currentmatrix matrix concatmatrix 0 0 3 2 roll itransform + exch 3 index/XStep get 1 index exch 2 copy div cvi mul sub sub + exch 3 index/YStep get 1 index exch 2 copy div cvi mul sub sub + matrix translate exch matrix concatmatrix + 1 index begin + BBox 0 get XStep div cvi XStep mul/xshift exch neg def + BBox 1 get YStep div cvi YStep mul/yshift exch neg def + BBox 0 get xshift add + BBox 1 get yshift add + BBox 2 get xshift add + BBox 3 get yshift add + 4 array astore + /BBox exch def + [xshift yshift/translate load null/exec load]dup + 3/PaintProc load put cvx/PaintProc exch def + end + gsave 0 setgray + makepattern + grestore +}def +/set_pattern +{ + dup/PatternType get 1 eq{ + dup/PaintType get 1 eq{ + currentoverprint sop[/DeviceGray]setcolorspace 0 setgray + }if + }if + setpattern +}def +/setcolorspace_opt +{ + dup currentcolorspace eq{pop}{setcolorspace}ifelse +}def +/updatecolorrendering +{ + currentcolorrendering/RenderingIntent known{ + currentcolorrendering/RenderingIntent get + } + { + Intent/AbsoluteColorimetric eq + { + /absolute_colorimetric_crd AGMCORE_gget dup null eq + } + { + Intent/RelativeColorimetric eq + { + /relative_colorimetric_crd AGMCORE_gget dup null eq + } + { + Intent/Saturation eq + { + /saturation_crd AGMCORE_gget dup null eq + } + { + /perceptual_crd AGMCORE_gget dup null eq + }ifelse + }ifelse + }ifelse + { + pop null + } + { + /RenderingIntent known{null}{Intent}ifelse + }ifelse + }ifelse + Intent ne{ + Intent/ColorRendering{findresource}stopped + { + pop pop systemdict/findcolorrendering known + { + Intent findcolorrendering + { + /ColorRendering findresource true exch + } + { + /ColorRendering findresource + product(Xerox Phaser 5400)ne + exch + }ifelse + dup Intent/AbsoluteColorimetric eq + { + /absolute_colorimetric_crd exch AGMCORE_gput + } + { + Intent/RelativeColorimetric eq + { + /relative_colorimetric_crd exch AGMCORE_gput + } + { + Intent/Saturation eq + { + /saturation_crd exch AGMCORE_gput + } + { + Intent/Perceptual eq + { + /perceptual_crd exch AGMCORE_gput + } + { + pop + }ifelse + }ifelse + }ifelse + }ifelse + 1 index{exch}{pop}ifelse + } + {false}ifelse + } + {true}ifelse + { + dup begin + currentdict/TransformPQR known{ + currentdict/TransformPQR get aload pop + 3{{}eq 3 1 roll}repeat or or + } + {true}ifelse + currentdict/MatrixPQR known{ + currentdict/MatrixPQR get aload pop + 1.0 eq 9 1 roll 0.0 eq 9 1 roll 0.0 eq 9 1 roll + 0.0 eq 9 1 roll 1.0 eq 9 1 roll 0.0 eq 9 1 roll + 0.0 eq 9 1 roll 0.0 eq 9 1 roll 1.0 eq + and and and and and and and and + } + {true}ifelse + end + or + { + clonedict begin + /TransformPQR[ + {4 -1 roll 3 get dup 3 1 roll sub 5 -1 roll 3 get 3 -1 roll sub div + 3 -1 roll 3 get 3 -1 roll 3 get dup 4 1 roll sub mul add}bind + {4 -1 roll 4 get dup 3 1 roll sub 5 -1 roll 4 get 3 -1 roll sub div + 3 -1 roll 4 get 3 -1 roll 4 get dup 4 1 roll sub mul add}bind + {4 -1 roll 5 get dup 3 1 roll sub 5 -1 roll 5 get 3 -1 roll sub div + 3 -1 roll 5 get 3 -1 roll 5 get dup 4 1 roll sub mul add}bind + ]def + /MatrixPQR[0.8951 -0.7502 0.0389 0.2664 1.7135 -0.0685 -0.1614 0.0367 1.0296]def + /RangePQR[-0.3227950745 2.3229645538 -1.5003771057 3.5003465881 -0.1369979095 2.136967392]def + currentdict end + }if + setcolorrendering_opt + }if + }if +}def +/set_crd +{ + AGMCORE_host_sep not level2 and{ + currentdict/ColorRendering known{ + ColorRendering/ColorRendering{findresource}stopped not{setcolorrendering_opt}if + }{ + currentdict/Intent known{ + updatecolorrendering + }if + }ifelse + currentcolorspace dup type/arraytype eq + {0 get}if + /DeviceRGB eq + { + currentdict/UCR known + {/UCR}{/AGMCORE_currentucr}ifelse + load setundercolorremoval + currentdict/BG known + {/BG}{/AGMCORE_currentbg}ifelse + load setblackgeneration + }if + }if +}def +/set_ucrbg +{ + dup null eq{pop/AGMCORE_currentbg load}{/Procedure get_res}ifelse setblackgeneration + dup null eq{pop/AGMCORE_currentucr load}{/Procedure get_res}ifelse setundercolorremoval +}def +/setcolorrendering_opt +{ + dup currentcolorrendering eq{ + pop + }{ + clonedict + begin + /Intent Intent def + currentdict + end + setcolorrendering + }ifelse +}def +/cpaint_gcomp +{ + convert_to_process//Adobe_AGM_Core/AGMCORE_ConvertToProcess xddf + //Adobe_AGM_Core/AGMCORE_ConvertToProcess get not + { + (%end_cpaint_gcomp)flushinput + }if +}def +/cpaint_gsep +{ + //Adobe_AGM_Core/AGMCORE_ConvertToProcess get + { + (%end_cpaint_gsep)flushinput + }if +}def +/cpaint_gend +{np}def +/T1_path +{ + currentfile token pop currentfile token pop mo + { + currentfile token pop dup type/stringtype eq + {pop exit}if + 0 exch rlineto + currentfile token pop dup type/stringtype eq + {pop exit}if + 0 rlineto + }loop +}def +/T1_gsave + level3 + {/clipsave} + {/gsave}ifelse + load def +/T1_grestore + level3 + {/cliprestore} + {/grestore}ifelse + load def +/set_spot_alias_ary +{ + dup inherit_aliases + //Adobe_AGM_Core/AGMCORE_SpotAliasAry xddf +}def +/set_spot_normalization_ary +{ + dup inherit_aliases + dup length + /AGMCORE_SpotAliasAry where{pop AGMCORE_SpotAliasAry length add}if + array + //Adobe_AGM_Core/AGMCORE_SpotAliasAry2 xddf + /AGMCORE_SpotAliasAry where{ + pop + AGMCORE_SpotAliasAry2 0 AGMCORE_SpotAliasAry putinterval + AGMCORE_SpotAliasAry length + }{0}ifelse + AGMCORE_SpotAliasAry2 3 1 roll exch putinterval + true set_spot_alias +}def +/inherit_aliases +{ + {dup/Name get map_alias{/CSD put}{pop}ifelse}forall +}def +/set_spot_alias +{ + /AGMCORE_SpotAliasAry2 where{ + /AGMCORE_current_spot_alias 3 -1 roll put + }{ + pop + }ifelse +}def +/current_spot_alias +{ + /AGMCORE_SpotAliasAry2 where{ + /AGMCORE_current_spot_alias get + }{ + false + }ifelse +}def +/map_alias +{ + /AGMCORE_SpotAliasAry2 where{ + begin + /AGMCORE_name xdf + false + AGMCORE_SpotAliasAry2{ + dup/Name get AGMCORE_name eq{ + /CSD get/CSD get_res + exch pop true + exit + }{ + pop + }ifelse + }forall + end + }{ + pop false + }ifelse +}bdf +/spot_alias +{ + true set_spot_alias + /AGMCORE_&setcustomcolor AGMCORE_key_known not{ + //Adobe_AGM_Core/AGMCORE_&setcustomcolor/setcustomcolor load put + }if + /customcolor_tint 1 AGMCORE_gput + //Adobe_AGM_Core begin + /setcustomcolor + { + //Adobe_AGM_Core begin + dup/customcolor_tint exch AGMCORE_gput + 1 index aload pop pop 1 eq exch 1 eq and exch 1 eq and exch 1 eq and not + current_spot_alias and{1 index 4 get map_alias}{false}ifelse + { + false set_spot_alias + /sep_colorspace_dict AGMCORE_gget null ne + 3 1 roll 2 index{ + exch pop/sep_tint AGMCORE_gget exch + }if + mark 3 1 roll + setsepcolorspace + counttomark 0 ne{ + setsepcolor + }if + pop + not{/sep_tint 1.0 AGMCORE_gput}if + pop + true set_spot_alias + }{ + AGMCORE_&setcustomcolor + }ifelse + end + }bdf + end +}def +/begin_feature +{ + Adobe_AGM_Core/AGMCORE_feature_dictCount countdictstack put + count Adobe_AGM_Core/AGMCORE_feature_opCount 3 -1 roll put + {Adobe_AGM_Core/AGMCORE_feature_ctm matrix currentmatrix put}if +}def +/end_feature +{ + 2 dict begin + /spd/setpagedevice load def + /setpagedevice{get_gstate spd set_gstate}def + stopped{$error/newerror false put}if + end + count Adobe_AGM_Core/AGMCORE_feature_opCount get sub dup 0 gt{{pop}repeat}{pop}ifelse + countdictstack Adobe_AGM_Core/AGMCORE_feature_dictCount get sub dup 0 gt{{end}repeat}{pop}ifelse + {Adobe_AGM_Core/AGMCORE_feature_ctm get setmatrix}if +}def +/set_negative +{ + //Adobe_AGM_Core begin + /AGMCORE_inverting exch def + level2{ + currentpagedevice/NegativePrint known AGMCORE_distilling not and{ + currentpagedevice/NegativePrint get//Adobe_AGM_Core/AGMCORE_inverting get ne{ + true begin_feature true{ + <>setpagedevice + }end_feature + }if + /AGMCORE_inverting false def + }if + }if + AGMCORE_inverting{ + [{1 exch sub}/exec load dup currenttransfer exch]cvx bind settransfer + AGMCORE_distilling{ + erasepage + }{ + gsave np clippath 1/setseparationgray where{pop setseparationgray}{setgray}ifelse + /AGMIRS_&fill where{pop AGMIRS_&fill}{fill}ifelse grestore + }ifelse + }if + end +}def +/lw_save_restore_override{ + /md where{ + pop + md begin + initializepage + /initializepage{}def + /pmSVsetup{}def + /endp{}def + /pse{}def + /psb{}def + /orig_showpage where + {pop} + {/orig_showpage/showpage load def} + ifelse + /showpage{orig_showpage gR}def + end + }if +}def +/pscript_showpage_override{ + /NTPSOct95 where + { + begin + showpage + save + /showpage/restore load def + /restore{exch pop}def + end + }if +}def +/driver_media_override +{ + /md where{ + pop + md/initializepage known{ + md/initializepage{}put + }if + md/rC known{ + md/rC{4{pop}repeat}put + }if + }if + /mysetup where{ + /mysetup[1 0 0 1 0 0]put + }if + Adobe_AGM_Core/AGMCORE_Default_CTM matrix currentmatrix put + level2 + {Adobe_AGM_Core/AGMCORE_Default_PageSize currentpagedevice/PageSize get put}if +}def +/driver_check_media_override +{ + /PrepsDict where + {pop} + { + Adobe_AGM_Core/AGMCORE_Default_CTM get matrix currentmatrix ne + Adobe_AGM_Core/AGMCORE_Default_PageSize get type/arraytype eq + { + Adobe_AGM_Core/AGMCORE_Default_PageSize get 0 get currentpagedevice/PageSize get 0 get eq and + Adobe_AGM_Core/AGMCORE_Default_PageSize get 1 get currentpagedevice/PageSize get 1 get eq and + }if + { + Adobe_AGM_Core/AGMCORE_Default_CTM get setmatrix + }if + }ifelse +}def +AGMCORE_err_strings begin + /AGMCORE_bad_environ(Environment not satisfactory for this job. Ensure that the PPD is correct or that the PostScript level requested is supported by this printer. )def + /AGMCORE_color_space_onhost_seps(This job contains colors that will not separate with on-host methods. )def + /AGMCORE_invalid_color_space(This job contains an invalid color space. )def +end +/set_def_ht +{AGMCORE_def_ht sethalftone}def +/set_def_flat +{AGMCORE_Default_flatness setflat}def +end +systemdict/setpacking known +{setpacking}if +%%EndResource +%%BeginResource: procset Adobe_CoolType_Core 2.31 0 %%Copyright: Copyright 1997-2006 Adobe Systems Incorporated. All Rights Reserved. %%Version: 2.31 0 10 dict begin /Adobe_CoolType_Passthru currentdict def /Adobe_CoolType_Core_Defined userdict/Adobe_CoolType_Core known def Adobe_CoolType_Core_Defined {/Adobe_CoolType_Core userdict/Adobe_CoolType_Core get def} if userdict/Adobe_CoolType_Core 70 dict dup begin put /Adobe_CoolType_Version 2.31 def /Level2? systemdict/languagelevel known dup {pop systemdict/languagelevel get 2 ge} if def Level2? not { /currentglobal false def /setglobal/pop load def /gcheck{pop false}bind def /currentpacking false def /setpacking/pop load def /SharedFontDirectory 0 dict def } if currentpacking true setpacking currentglobal false setglobal userdict/Adobe_CoolType_Data 2 copy known not {2 copy 10 dict put} if get begin /@opStackCountByLevel 32 dict def /@opStackLevel 0 def /@dictStackCountByLevel 32 dict def /@dictStackLevel 0 def end setglobal currentglobal true setglobal userdict/Adobe_CoolType_GVMFonts known not {userdict/Adobe_CoolType_GVMFonts 10 dict put} if setglobal currentglobal false setglobal userdict/Adobe_CoolType_LVMFonts known not {userdict/Adobe_CoolType_LVMFonts 10 dict put} if setglobal /ct_VMDictPut { dup gcheck{Adobe_CoolType_GVMFonts}{Adobe_CoolType_LVMFonts}ifelse 3 1 roll put }bind def /ct_VMDictUndef { dup Adobe_CoolType_GVMFonts exch known {Adobe_CoolType_GVMFonts exch undef} { dup Adobe_CoolType_LVMFonts exch known {Adobe_CoolType_LVMFonts exch undef} {pop} ifelse }ifelse }bind def /ct_str1 1 string def /ct_xshow { /_ct_na exch def /_ct_i 0 def currentpoint /_ct_y exch def /_ct_x exch def { pop pop ct_str1 exch 0 exch put ct_str1 show {_ct_na _ct_i get}stopped {pop pop} { _ct_x _ct_y moveto 0 rmoveto } ifelse /_ct_i _ct_i 1 add def currentpoint /_ct_y exch def /_ct_x exch def } exch @cshow }bind def /ct_yshow { /_ct_na exch def /_ct_i 0 def currentpoint /_ct_y exch def /_ct_x exch def { pop pop ct_str1 exch 0 exch put ct_str1 show {_ct_na _ct_i get}stopped {pop pop} { _ct_x _ct_y moveto 0 exch rmoveto } ifelse /_ct_i _ct_i 1 add def currentpoint /_ct_y exch def /_ct_x exch def } exch @cshow }bind def /ct_xyshow { /_ct_na exch def /_ct_i 0 def currentpoint /_ct_y exch def /_ct_x exch def { pop pop ct_str1 exch 0 exch put ct_str1 show {_ct_na _ct_i get}stopped {pop pop} { {_ct_na _ct_i 1 add get}stopped {pop pop pop} { _ct_x _ct_y moveto rmoveto } ifelse } ifelse /_ct_i _ct_i 2 add def currentpoint /_ct_y exch def /_ct_x exch def } exch @cshow }bind def /xsh{{@xshow}stopped{Adobe_CoolType_Data begin ct_xshow end}if}bind def /ysh{{@yshow}stopped{Adobe_CoolType_Data begin ct_yshow end}if}bind def /xysh{{@xyshow}stopped{Adobe_CoolType_Data begin ct_xyshow end}if}bind def currentglobal true setglobal /ct_T3Defs { /BuildChar { 1 index/Encoding get exch get 1 index/BuildGlyph get exec }bind def /BuildGlyph { exch begin GlyphProcs exch get exec end }bind def }bind def setglobal /@_SaveStackLevels { Adobe_CoolType_Data begin /@vmState currentglobal def false setglobal @opStackCountByLevel @opStackLevel 2 copy known not { 2 copy 3 dict dup/args 7 index 5 add array put put get } { get dup/args get dup length 3 index lt { dup length 5 add array exch 1 index exch 0 exch putinterval 1 index exch/args exch put } {pop} ifelse } ifelse begin count 1 sub 1 index lt {pop count} if dup/argCount exch def dup 0 gt { args exch 0 exch getinterval astore pop } {pop} ifelse count /restCount exch def end /@opStackLevel @opStackLevel 1 add def countdictstack 1 sub @dictStackCountByLevel exch @dictStackLevel exch put /@dictStackLevel @dictStackLevel 1 add def @vmState setglobal end }bind def /@_RestoreStackLevels { Adobe_CoolType_Data begin /@opStackLevel @opStackLevel 1 sub def @opStackCountByLevel @opStackLevel get begin count restCount sub dup 0 gt {{pop}repeat} {pop} ifelse args 0 argCount getinterval{}forall end /@dictStackLevel @dictStackLevel 1 sub def @dictStackCountByLevel @dictStackLevel get end countdictstack exch sub dup 0 gt {{end}repeat} {pop} ifelse }bind def /@_PopStackLevels { Adobe_CoolType_Data begin /@opStackLevel @opStackLevel 1 sub def /@dictStackLevel @dictStackLevel 1 sub def end }bind def /@Raise { exch cvx exch errordict exch get exec stop }bind def /@ReRaise { cvx $error/errorname get errordict exch get exec stop }bind def /@Stopped { 0 @#Stopped }bind def /@#Stopped { @_SaveStackLevels stopped {@_RestoreStackLevels true} {@_PopStackLevels false} ifelse }bind def /@Arg { Adobe_CoolType_Data begin @opStackCountByLevel @opStackLevel 1 sub get begin args exch argCount 1 sub exch sub get end end }bind def currentglobal true setglobal /CTHasResourceForAllBug Level2? { 1 dict dup /@shouldNotDisappearDictValue true def Adobe_CoolType_Data exch/@shouldNotDisappearDict exch put begin count @_SaveStackLevels {(*){pop stop}128 string/Category resourceforall} stopped pop @_RestoreStackLevels currentdict Adobe_CoolType_Data/@shouldNotDisappearDict get dup 3 1 roll ne dup 3 1 roll { /@shouldNotDisappearDictValue known { { end currentdict 1 index eq {pop exit} if } loop } if } { pop end } ifelse } {false} ifelse def true setglobal /CTHasResourceStatusBug Level2? { mark {/steveamerige/Category resourcestatus} stopped {cleartomark true} {cleartomark currentglobal not} ifelse } {false} ifelse def setglobal /CTResourceStatus { mark 3 1 roll /Category findresource begin ({ResourceStatus}stopped)0()/SubFileDecode filter cvx exec {cleartomark false} {{3 2 roll pop true}{cleartomark false}ifelse} ifelse end }bind def /CTWorkAroundBugs { Level2? { /cid_PreLoad/ProcSet resourcestatus { pop pop currentglobal mark { (*) { dup/CMap CTHasResourceStatusBug {CTResourceStatus} {resourcestatus} ifelse { pop dup 0 eq exch 1 eq or { dup/CMap findresource gcheck setglobal /CMap undefineresource } { pop CTHasResourceForAllBug {exit} {stop} ifelse } ifelse } {pop} ifelse } 128 string/CMap resourceforall } stopped {cleartomark} stopped pop setglobal } if } if }bind def /ds { Adobe_CoolType_Core begin CTWorkAroundBugs /mo/moveto load def /nf/newencodedfont load def /msf{makefont setfont}bind def /uf{dup undefinefont ct_VMDictUndef}bind def /ur/undefineresource load def /chp/charpath load def /awsh/awidthshow load def /wsh/widthshow load def /ash/ashow load def /@xshow/xshow load def /@yshow/yshow load def /@xyshow/xyshow load def /@cshow/cshow load def /sh/show load def /rp/repeat load def /.n/.notdef def end currentglobal false setglobal userdict/Adobe_CoolType_Data 2 copy known not {2 copy 10 dict put} if get begin /AddWidths? false def /CC 0 def /charcode 2 string def /@opStackCountByLevel 32 dict def /@opStackLevel 0 def /@dictStackCountByLevel 32 dict def /@dictStackLevel 0 def /InVMFontsByCMap 10 dict def /InVMDeepCopiedFonts 10 dict def end setglobal }bind def /dt { currentdict Adobe_CoolType_Core eq {end} if }bind def /ps { Adobe_CoolType_Core begin Adobe_CoolType_GVMFonts begin Adobe_CoolType_LVMFonts begin SharedFontDirectory begin }bind def /pt { end end end end }bind def /unload { systemdict/languagelevel known { systemdict/languagelevel get 2 ge { userdict/Adobe_CoolType_Core 2 copy known {undef} {pop pop} ifelse } if } if }bind def /ndf { 1 index where {pop pop pop} {dup xcheck{bind}if def} ifelse }def /findfont systemdict begin userdict begin /globaldict where{/globaldict get begin}if dup where pop exch get /globaldict where{pop end}if end end Adobe_CoolType_Core_Defined {/systemfindfont exch def} { /findfont 1 index def /systemfindfont exch def } ifelse /undefinefont {pop}ndf /copyfont { currentglobal 3 1 roll 1 index gcheck setglobal dup null eq{0}{dup length}ifelse 2 index length add 1 add dict begin exch { 1 index/FID eq {pop pop} {def} ifelse } forall dup null eq {pop} {{def}forall} ifelse currentdict end exch setglobal }bind def /copyarray { currentglobal exch dup gcheck setglobal dup length array copy exch setglobal }bind def /newencodedfont { currentglobal { SharedFontDirectory 3 index known {SharedFontDirectory 3 index get/FontReferenced known} {false} ifelse } { FontDirectory 3 index known {FontDirectory 3 index get/FontReferenced known} { SharedFontDirectory 3 index known {SharedFontDirectory 3 index get/FontReferenced known} {false} ifelse } ifelse } ifelse dup { 3 index findfont/FontReferenced get 2 index dup type/nametype eq {findfont} if ne {pop false} if } if dup { 1 index dup type/nametype eq {findfont} if dup/CharStrings known { /CharStrings get length 4 index findfont/CharStrings get length ne { pop false } if } {pop} ifelse } if { pop 1 index findfont /Encoding get exch 0 1 255 {2 copy get 3 index 3 1 roll put} for pop pop pop } { currentglobal 4 1 roll dup type/nametype eq {findfont} if dup gcheck setglobal dup dup maxlength 2 add dict begin exch { 1 index/FID ne 2 index/Encoding ne and {def} {pop pop} ifelse } forall /FontReferenced exch def /Encoding exch dup length array copy def /FontName 1 index dup type/stringtype eq{cvn}if def dup currentdict end definefont ct_VMDictPut setglobal } ifelse }bind def /SetSubstituteStrategy { $SubstituteFont begin dup type/dicttype ne {0 dict} if currentdict/$Strategies known { exch $Strategies exch 2 copy known { get 2 copy maxlength exch maxlength add dict begin {def}forall {def}forall currentdict dup/$Init known {dup/$Init get exec} if end /$Strategy exch def } {pop pop pop} ifelse } {pop pop} ifelse end }bind def /scff { $SubstituteFont begin dup type/stringtype eq {dup length exch} {null} ifelse /$sname exch def /$slen exch def /$inVMIndex $sname null eq { 1 index $str cvs dup length $slen sub $slen getinterval cvn } {$sname} ifelse def end {findfont} @Stopped { dup length 8 add string exch 1 index 0(BadFont:)putinterval 1 index exch 8 exch dup length string cvs putinterval cvn {findfont} @Stopped {pop/Courier findfont} if } if $SubstituteFont begin /$sname null def /$slen 0 def /$inVMIndex null def end }bind def /isWidthsOnlyFont { dup/WidthsOnly known {pop pop true} { dup/FDepVector known {/FDepVector get{isWidthsOnlyFont dup{exit}if}forall} { dup/FDArray known {/FDArray get{isWidthsOnlyFont dup{exit}if}forall} {pop} ifelse } ifelse } ifelse }bind def /ct_StyleDicts 4 dict dup begin /Adobe-Japan1 4 dict dup begin Level2? { /Serif /HeiseiMin-W3-83pv-RKSJ-H/Font resourcestatus {pop pop/HeiseiMin-W3} { /CIDFont/Category resourcestatus { pop pop /HeiseiMin-W3/CIDFont resourcestatus {pop pop/HeiseiMin-W3} {/Ryumin-Light} ifelse } {/Ryumin-Light} ifelse } ifelse def /SansSerif /HeiseiKakuGo-W5-83pv-RKSJ-H/Font resourcestatus {pop pop/HeiseiKakuGo-W5} { /CIDFont/Category resourcestatus { pop pop /HeiseiKakuGo-W5/CIDFont resourcestatus {pop pop/HeiseiKakuGo-W5} {/GothicBBB-Medium} ifelse } {/GothicBBB-Medium} ifelse } ifelse def /HeiseiMaruGo-W4-83pv-RKSJ-H/Font resourcestatus {pop pop/HeiseiMaruGo-W4} { /CIDFont/Category resourcestatus { pop pop /HeiseiMaruGo-W4/CIDFont resourcestatus {pop pop/HeiseiMaruGo-W4} { /Jun101-Light-RKSJ-H/Font resourcestatus {pop pop/Jun101-Light} {SansSerif} ifelse } ifelse } { /Jun101-Light-RKSJ-H/Font resourcestatus {pop pop/Jun101-Light} {SansSerif} ifelse } ifelse } ifelse /RoundSansSerif exch def /Default Serif def } { /Serif/Ryumin-Light def /SansSerif/GothicBBB-Medium def { (fonts/Jun101-Light-83pv-RKSJ-H)status }stopped {pop}{ {pop pop pop pop/Jun101-Light} {SansSerif} ifelse /RoundSansSerif exch def }ifelse /Default Serif def } ifelse end def /Adobe-Korea1 4 dict dup begin /Serif/HYSMyeongJo-Medium def /SansSerif/HYGoThic-Medium def /RoundSansSerif SansSerif def /Default Serif def end def /Adobe-GB1 4 dict dup begin /Serif/STSong-Light def /SansSerif/STHeiti-Regular def /RoundSansSerif SansSerif def /Default Serif def end def /Adobe-CNS1 4 dict dup begin /Serif/MKai-Medium def /SansSerif/MHei-Medium def /RoundSansSerif SansSerif def /Default Serif def end def end def Level2?{currentglobal true setglobal}if /ct_BoldRomanWidthProc { stringwidth 1 index 0 ne{exch .03 add exch}if setcharwidth 0 0 }bind def /ct_Type0WidthProc { dup stringwidth 0 0 moveto 2 index true charpath pathbbox 0 -1 7 index 2 div .88 setcachedevice2 pop 0 0 }bind def /ct_Type0WMode1WidthProc { dup stringwidth pop 2 div neg -0.88 2 copy moveto 0 -1 5 -1 roll true charpath pathbbox setcachedevice }bind def /cHexEncoding [/c00/c01/c02/c03/c04/c05/c06/c07/c08/c09/c0A/c0B/c0C/c0D/c0E/c0F/c10/c11/c12 /c13/c14/c15/c16/c17/c18/c19/c1A/c1B/c1C/c1D/c1E/c1F/c20/c21/c22/c23/c24/c25 /c26/c27/c28/c29/c2A/c2B/c2C/c2D/c2E/c2F/c30/c31/c32/c33/c34/c35/c36/c37/c38 /c39/c3A/c3B/c3C/c3D/c3E/c3F/c40/c41/c42/c43/c44/c45/c46/c47/c48/c49/c4A/c4B /c4C/c4D/c4E/c4F/c50/c51/c52/c53/c54/c55/c56/c57/c58/c59/c5A/c5B/c5C/c5D/c5E /c5F/c60/c61/c62/c63/c64/c65/c66/c67/c68/c69/c6A/c6B/c6C/c6D/c6E/c6F/c70/c71 /c72/c73/c74/c75/c76/c77/c78/c79/c7A/c7B/c7C/c7D/c7E/c7F/c80/c81/c82/c83/c84 /c85/c86/c87/c88/c89/c8A/c8B/c8C/c8D/c8E/c8F/c90/c91/c92/c93/c94/c95/c96/c97 /c98/c99/c9A/c9B/c9C/c9D/c9E/c9F/cA0/cA1/cA2/cA3/cA4/cA5/cA6/cA7/cA8/cA9/cAA /cAB/cAC/cAD/cAE/cAF/cB0/cB1/cB2/cB3/cB4/cB5/cB6/cB7/cB8/cB9/cBA/cBB/cBC/cBD /cBE/cBF/cC0/cC1/cC2/cC3/cC4/cC5/cC6/cC7/cC8/cC9/cCA/cCB/cCC/cCD/cCE/cCF/cD0 /cD1/cD2/cD3/cD4/cD5/cD6/cD7/cD8/cD9/cDA/cDB/cDC/cDD/cDE/cDF/cE0/cE1/cE2/cE3 /cE4/cE5/cE6/cE7/cE8/cE9/cEA/cEB/cEC/cED/cEE/cEF/cF0/cF1/cF2/cF3/cF4/cF5/cF6 /cF7/cF8/cF9/cFA/cFB/cFC/cFD/cFE/cFF]def /ct_BoldBaseFont 11 dict begin /FontType 3 def /FontMatrix[1 0 0 1 0 0]def /FontBBox[0 0 1 1]def /Encoding cHexEncoding def /_setwidthProc/ct_BoldRomanWidthProc load def /_bcstr1 1 string def /BuildChar { exch begin _basefont setfont _bcstr1 dup 0 4 -1 roll put dup _setwidthProc 3 copy moveto show _basefonto setfont moveto show end }bind def currentdict end def systemdict/composefont known { /ct_DefineIdentity-H { /Identity-H/CMap resourcestatus { pop pop } { /CIDInit/ProcSet findresource begin 12 dict begin begincmap /CIDSystemInfo 3 dict dup begin /Registry(Adobe)def /Ordering(Identity)def /Supplement 0 def end def /CMapName/Identity-H def /CMapVersion 1.000 def /CMapType 1 def 1 begincodespacerange <0000> endcodespacerange 1 begincidrange <0000>0 endcidrange endcmap CMapName currentdict/CMap defineresource pop end end } ifelse } def /ct_BoldBaseCIDFont 11 dict begin /CIDFontType 1 def /CIDFontName/ct_BoldBaseCIDFont def /FontMatrix[1 0 0 1 0 0]def /FontBBox[0 0 1 1]def /_setwidthProc/ct_Type0WidthProc load def /_bcstr2 2 string def /BuildGlyph { exch begin _basefont setfont _bcstr2 1 2 index 256 mod put _bcstr2 0 3 -1 roll 256 idiv put _bcstr2 dup _setwidthProc 3 copy moveto show _basefonto setfont moveto show end }bind def currentdict end def }if Level2?{setglobal}if /ct_CopyFont{ { 1 index/FID ne 2 index/UniqueID ne and {def}{pop pop}ifelse }forall }bind def /ct_Type0CopyFont { exch dup length dict begin ct_CopyFont [ exch FDepVector { dup/FontType get 0 eq { 1 index ct_Type0CopyFont /_ctType0 exch definefont } { /_ctBaseFont exch 2 index exec } ifelse exch } forall pop ] /FDepVector exch def currentdict end }bind def /ct_MakeBoldFont { dup/ct_SyntheticBold known { dup length 3 add dict begin ct_CopyFont /ct_StrokeWidth .03 0 FontMatrix idtransform pop def /ct_SyntheticBold true def currentdict end definefont } { dup dup length 3 add dict begin ct_CopyFont /PaintType 2 def /StrokeWidth .03 0 FontMatrix idtransform pop def /dummybold currentdict end definefont dup/FontType get dup 9 ge exch 11 le and { ct_BoldBaseCIDFont dup length 3 add dict copy begin dup/CIDSystemInfo get/CIDSystemInfo exch def ct_DefineIdentity-H /_Type0Identity/Identity-H 3 -1 roll[exch]composefont /_basefont exch def /_Type0Identity/Identity-H 3 -1 roll[exch]composefont /_basefonto exch def currentdict end /CIDFont defineresource } { ct_BoldBaseFont dup length 3 add dict copy begin /_basefont exch def /_basefonto exch def currentdict end definefont } ifelse } ifelse }bind def /ct_MakeBold{ 1 index 1 index findfont currentglobal 5 1 roll dup gcheck setglobal dup /FontType get 0 eq { dup/WMode known{dup/WMode get 1 eq}{false}ifelse version length 4 ge and {version 0 4 getinterval cvi 2015 ge} {true} ifelse {/ct_Type0WidthProc} {/ct_Type0WMode1WidthProc} ifelse ct_BoldBaseFont/_setwidthProc 3 -1 roll load put {ct_MakeBoldFont}ct_Type0CopyFont definefont } { dup/_fauxfont known not 1 index/SubstMaster known not and { ct_BoldBaseFont/_setwidthProc /ct_BoldRomanWidthProc load put ct_MakeBoldFont } { 2 index 2 index eq {exch pop } { dup length dict begin ct_CopyFont currentdict end definefont } ifelse } ifelse } ifelse pop pop pop setglobal }bind def /?str1 256 string def /?set { $SubstituteFont begin /$substituteFound false def /$fontname 1 index def /$doSmartSub false def end dup findfont $SubstituteFont begin $substituteFound {false} { dup/FontName known { dup/FontName get $fontname eq 1 index/DistillerFauxFont known not and /currentdistillerparams where {pop false 2 index isWidthsOnlyFont not and} if } {false} ifelse } ifelse exch pop /$doSmartSub true def end { 5 1 roll pop pop pop pop findfont } { 1 index findfont dup/FontType get 3 eq { 6 1 roll pop pop pop pop pop false } {pop true} ifelse { $SubstituteFont begin pop pop /$styleArray 1 index def /$regOrdering 2 index def pop pop 0 1 $styleArray length 1 sub { $styleArray exch get ct_StyleDicts $regOrdering 2 copy known { get exch 2 copy known not {pop/Default} if get dup type/nametype eq { ?str1 cvs length dup 1 add exch ?str1 exch(-)putinterval exch dup length exch ?str1 exch 3 index exch putinterval add ?str1 exch 0 exch getinterval cvn } { pop pop/Unknown } ifelse } { pop pop pop pop/Unknown } ifelse } for end findfont }if } ifelse currentglobal false setglobal 3 1 roll null copyfont definefont pop setglobal }bind def setpacking userdict/$SubstituteFont 25 dict put 1 dict begin /SubstituteFont dup $error exch 2 copy known {get} {pop pop{pop/Courier}bind} ifelse def /currentdistillerparams where dup { pop pop currentdistillerparams/CannotEmbedFontPolicy 2 copy known {get/Error eq} {pop pop false} ifelse } if not { countdictstack array dictstack 0 get begin userdict begin $SubstituteFont begin /$str 128 string def /$fontpat 128 string def /$slen 0 def /$sname null def /$match false def /$fontname null def /$substituteFound false def /$inVMIndex null def /$doSmartSub true def /$depth 0 def /$fontname null def /$italicangle 26.5 def /$dstack null def /$Strategies 10 dict dup begin /$Type3Underprint { currentglobal exch false setglobal 11 dict begin /UseFont exch $WMode 0 ne { dup length dict copy dup/WMode $WMode put /UseFont exch definefont } if def /FontName $fontname dup type/stringtype eq{cvn}if def /FontType 3 def /FontMatrix[.001 0 0 .001 0 0]def /Encoding 256 array dup 0 1 255{/.notdef put dup}for pop def /FontBBox[0 0 0 0]def /CCInfo 7 dict dup begin /cc null def /x 0 def /y 0 def end def /BuildChar { exch begin CCInfo begin 1 string dup 0 3 index put exch pop /cc exch def UseFont 1000 scalefont setfont cc stringwidth/y exch def/x exch def x y setcharwidth $SubstituteFont/$Strategy get/$Underprint get exec 0 0 moveto cc show x y moveto end end }bind def currentdict end exch setglobal }bind def /$GetaTint 2 dict dup begin /$BuildFont { dup/WMode known {dup/WMode get} {0} ifelse /$WMode exch def $fontname exch dup/FontName known { dup/FontName get dup type/stringtype eq{cvn}if } {/unnamedfont} ifelse exch Adobe_CoolType_Data/InVMDeepCopiedFonts get 1 index/FontName get known { pop Adobe_CoolType_Data/InVMDeepCopiedFonts get 1 index get null copyfont } {$deepcopyfont} ifelse exch 1 index exch/FontBasedOn exch put dup/FontName $fontname dup type/stringtype eq{cvn}if put definefont Adobe_CoolType_Data/InVMDeepCopiedFonts get begin dup/FontBasedOn get 1 index def end }bind def /$Underprint { gsave x abs y abs gt {/y 1000 def} {/x -1000 def 500 120 translate} ifelse Level2? { [/Separation(All)/DeviceCMYK{0 0 0 1 pop}] setcolorspace } {0 setgray} ifelse 10 setlinewidth x .8 mul [7 3] { y mul 8 div 120 sub x 10 div exch moveto 0 y 4 div neg rlineto dup 0 rlineto 0 y 4 div rlineto closepath gsave Level2? {.2 setcolor} {.8 setgray} ifelse fill grestore stroke } forall pop grestore }bind def end def /$Oblique 1 dict dup begin /$BuildFont { currentglobal exch dup gcheck setglobal null copyfont begin /FontBasedOn currentdict/FontName known { FontName dup type/stringtype eq{cvn}if } {/unnamedfont} ifelse def /FontName $fontname dup type/stringtype eq{cvn}if def /currentdistillerparams where {pop} { /FontInfo currentdict/FontInfo known {FontInfo null copyfont} {2 dict} ifelse dup begin /ItalicAngle $italicangle def /FontMatrix FontMatrix [1 0 ItalicAngle dup sin exch cos div 1 0 0] matrix concatmatrix readonly end 4 2 roll def def } ifelse FontName currentdict end definefont exch setglobal }bind def end def /$None 1 dict dup begin /$BuildFont{}bind def end def end def /$Oblique SetSubstituteStrategy /$findfontByEnum { dup type/stringtype eq{cvn}if dup/$fontname exch def $sname null eq {$str cvs dup length $slen sub $slen getinterval} {pop $sname} ifelse $fontpat dup 0(fonts/*)putinterval exch 7 exch putinterval /$match false def $SubstituteFont/$dstack countdictstack array dictstack put mark { $fontpat 0 $slen 7 add getinterval {/$match exch def exit} $str filenameforall } stopped { cleardictstack currentdict true $SubstituteFont/$dstack get { exch { 1 index eq {pop false} {true} ifelse } {begin false} ifelse } forall pop } if cleartomark /$slen 0 def $match false ne {$match(fonts/)anchorsearch pop pop cvn} {/Courier} ifelse }bind def /$ROS 1 dict dup begin /Adobe 4 dict dup begin /Japan1 [/Ryumin-Light/HeiseiMin-W3 /GothicBBB-Medium/HeiseiKakuGo-W5 /HeiseiMaruGo-W4/Jun101-Light]def /Korea1 [/HYSMyeongJo-Medium/HYGoThic-Medium]def /GB1 [/STSong-Light/STHeiti-Regular]def /CNS1 [/MKai-Medium/MHei-Medium]def end def end def /$cmapname null def /$deepcopyfont { dup/FontType get 0 eq { 1 dict dup/FontName/copied put copyfont begin /FDepVector FDepVector copyarray 0 1 2 index length 1 sub { 2 copy get $deepcopyfont dup/FontName/copied put /copied exch definefont 3 copy put pop pop } for def currentdict end } {$Strategies/$Type3Underprint get exec} ifelse }bind def /$buildfontname { dup/CIDFont findresource/CIDSystemInfo get begin Registry length Ordering length Supplement 8 string cvs 3 copy length 2 add add add string dup 5 1 roll dup 0 Registry putinterval dup 4 index(-)putinterval dup 4 index 1 add Ordering putinterval 4 2 roll add 1 add 2 copy(-)putinterval end 1 add 2 copy 0 exch getinterval $cmapname $fontpat cvs exch anchorsearch {pop pop 3 2 roll putinterval cvn/$cmapname exch def} {pop pop pop pop pop} ifelse length $str 1 index(-)putinterval 1 add $str 1 index $cmapname $fontpat cvs putinterval $cmapname length add $str exch 0 exch getinterval cvn }bind def /$findfontByROS { /$fontname exch def $ROS Registry 2 copy known { get Ordering 2 copy known {get} {pop pop[]} ifelse } {pop pop[]} ifelse false exch { dup/CIDFont resourcestatus { pop pop save 1 index/CIDFont findresource dup/WidthsOnly known {dup/WidthsOnly get} {false} ifelse exch pop exch restore {pop} {exch pop true exit} ifelse } {pop} ifelse } forall {$str cvs $buildfontname} { false(*) { save exch dup/CIDFont findresource dup/WidthsOnly known {dup/WidthsOnly get not} {true} ifelse exch/CIDSystemInfo get dup/Registry get Registry eq exch/Ordering get Ordering eq and and {exch restore exch pop true exit} {pop restore} ifelse } $str/CIDFont resourceforall {$buildfontname} {$fontname $findfontByEnum} ifelse } ifelse }bind def end end currentdict/$error known currentdict/languagelevel known and dup {pop $error/SubstituteFont known} if dup {$error} {Adobe_CoolType_Core} ifelse begin { /SubstituteFont /CMap/Category resourcestatus { pop pop { $SubstituteFont begin /$substituteFound true def dup length $slen gt $sname null ne or $slen 0 gt and { $sname null eq {dup $str cvs dup length $slen sub $slen getinterval cvn} {$sname} ifelse Adobe_CoolType_Data/InVMFontsByCMap get 1 index 2 copy known { get false exch { pop currentglobal { GlobalFontDirectory 1 index known {exch pop true exit} {pop} ifelse } { FontDirectory 1 index known {exch pop true exit} { GlobalFontDirectory 1 index known {exch pop true exit} {pop} ifelse } ifelse } ifelse } forall } {pop pop false} ifelse { exch pop exch pop } { dup/CMap resourcestatus { pop pop dup/$cmapname exch def /CMap findresource/CIDSystemInfo get{def}forall $findfontByROS } { 128 string cvs dup(-)search { 3 1 roll search { 3 1 roll pop {dup cvi} stopped {pop pop pop pop pop $findfontByEnum} { 4 2 roll pop pop exch length exch 2 index length 2 index sub exch 1 sub -1 0 { $str cvs dup length 4 index 0 4 index 4 3 roll add getinterval exch 1 index exch 3 index exch putinterval dup/CMap resourcestatus { pop pop 4 1 roll pop pop pop dup/$cmapname exch def /CMap findresource/CIDSystemInfo get{def}forall $findfontByROS true exit } {pop} ifelse } for dup type/booleantype eq {pop} {pop pop pop $findfontByEnum} ifelse } ifelse } {pop pop pop $findfontByEnum} ifelse } {pop pop $findfontByEnum} ifelse } ifelse } ifelse } {//SubstituteFont exec} ifelse /$slen 0 def end } } { { $SubstituteFont begin /$substituteFound true def dup length $slen gt $sname null ne or $slen 0 gt and {$findfontByEnum} {//SubstituteFont exec} ifelse end } } ifelse bind readonly def Adobe_CoolType_Core/scfindfont/systemfindfont load put } { /scfindfont { $SubstituteFont begin dup systemfindfont dup/FontName known {dup/FontName get dup 3 index ne} {/noname true} ifelse dup { /$origfontnamefound 2 index def /$origfontname 4 index def/$substituteFound true def } if exch pop { $slen 0 gt $sname null ne 3 index length $slen gt or and { pop dup $findfontByEnum findfont dup maxlength 1 add dict begin {1 index/FID eq{pop pop}{def}ifelse} forall currentdict end definefont dup/FontName known{dup/FontName get}{null}ifelse $origfontnamefound ne { $origfontname $str cvs print ( substitution revised, using )print dup/FontName known {dup/FontName get}{(unspecified font)} ifelse $str cvs print(.\n)print } if } {exch pop} ifelse } {exch pop} ifelse end }bind def } ifelse end end Adobe_CoolType_Core_Defined not { Adobe_CoolType_Core/findfont { $SubstituteFont begin $depth 0 eq { /$fontname 1 index dup type/stringtype ne{$str cvs}if def /$substituteFound false def } if /$depth $depth 1 add def end scfindfont $SubstituteFont begin /$depth $depth 1 sub def $substituteFound $depth 0 eq and { $inVMIndex null ne {dup $inVMIndex $AddInVMFont} if $doSmartSub { currentdict/$Strategy known {$Strategy/$BuildFont get exec} if } if } if end }bind put } if } if end /$AddInVMFont { exch/FontName 2 copy known { get 1 dict dup begin exch 1 index gcheck def end exch Adobe_CoolType_Data/InVMFontsByCMap get exch $DictAdd } {pop pop pop} ifelse }bind def /$DictAdd { 2 copy known not {2 copy 4 index length dict put} if Level2? not { 2 copy get dup maxlength exch length 4 index length add lt 2 copy get dup length 4 index length add exch maxlength 1 index lt { 2 mul dict begin 2 copy get{forall}def 2 copy currentdict put end } {pop} ifelse } if get begin {def} forall end }bind def end end %%EndResource currentglobal true setglobal %%BeginResource: procset Adobe_CoolType_Utility_MAKEOCF 1.23 0 %%Copyright: Copyright 1987-2006 Adobe Systems Incorporated. %%Version: 1.23 0 systemdict/languagelevel known dup {currentglobal false setglobal} {false} ifelse exch userdict/Adobe_CoolType_Utility 2 copy known {2 copy get dup maxlength 27 add dict copy} {27 dict} ifelse put Adobe_CoolType_Utility begin /@eexecStartData def /@recognizeCIDFont null def /ct_Level2? exch def /ct_Clone? 1183615869 internaldict dup /CCRun known not exch/eCCRun known not ct_Level2? and or def ct_Level2? {globaldict begin currentglobal true setglobal} if /ct_AddStdCIDMap ct_Level2? {{ mark Adobe_CoolType_Utility/@recognizeCIDFont currentdict put { ((Hex)57 StartData 0615 1e27 2c39 1c60 d8a8 cc31 fe2b f6e0 7aa3 e541 e21c 60d8 a8c9 c3d0 6d9e 1c60 d8a8 c9c2 02d7 9a1c 60d8 a849 1c60 d8a8 cc36 74f4 1144 b13b 77)0()/SubFileDecode filter cvx exec } stopped { cleartomark Adobe_CoolType_Utility/@recognizeCIDFont get countdictstack dup array dictstack exch 1 sub -1 0 { 2 copy get 3 index eq {1 index length exch sub 1 sub{end}repeat exit} {pop} ifelse } for pop pop Adobe_CoolType_Utility/@eexecStartData get eexec } {cleartomark} ifelse }} {{ Adobe_CoolType_Utility/@eexecStartData get eexec }} ifelse bind def userdict/cid_extensions known dup{cid_extensions/cid_UpdateDB known and}if { cid_extensions begin /cid_GetCIDSystemInfo { 1 index type/stringtype eq {exch cvn exch} if cid_extensions begin dup load 2 index known { 2 copy cid_GetStatusInfo dup null ne { 1 index load 3 index get dup null eq {pop pop cid_UpdateDB} { exch 1 index/Created get eq {exch pop exch pop} {pop cid_UpdateDB} ifelse } ifelse } {pop cid_UpdateDB} ifelse } {cid_UpdateDB} ifelse end }bind def end } if ct_Level2? {end setglobal} if /ct_UseNativeCapability? systemdict/composefont known def /ct_MakeOCF 35 dict def /ct_Vars 25 dict def /ct_GlyphDirProcs 6 dict def /ct_BuildCharDict 15 dict dup begin /charcode 2 string def /dst_string 1500 string def /nullstring()def /usewidths? true def end def ct_Level2?{setglobal}{pop}ifelse ct_GlyphDirProcs begin /GetGlyphDirectory { systemdict/languagelevel known {pop/CIDFont findresource/GlyphDirectory get} { 1 index/CIDFont findresource/GlyphDirectory get dup type/dicttype eq { dup dup maxlength exch length sub 2 index lt { dup length 2 index add dict copy 2 index /CIDFont findresource/GlyphDirectory 2 index put } if } if exch pop exch pop } ifelse + }def /+ { systemdict/languagelevel known { currentglobal false setglobal 3 dict begin /vm exch def } {1 dict begin} ifelse /$ exch def systemdict/languagelevel known { vm setglobal /gvm currentglobal def $ gcheck setglobal } if ?{$ begin}if }def /?{$ type/dicttype eq}def /|{ userdict/Adobe_CoolType_Data known { Adobe_CoolType_Data/AddWidths? known { currentdict Adobe_CoolType_Data begin begin AddWidths? { Adobe_CoolType_Data/CC 3 index put ?{def}{$ 3 1 roll put}ifelse CC charcode exch 1 index 0 2 index 256 idiv put 1 index exch 1 exch 256 mod put stringwidth 2 array astore currentfont/Widths get exch CC exch put } {?{def}{$ 3 1 roll put}ifelse} ifelse end end } {?{def}{$ 3 1 roll put}ifelse} ifelse } {?{def}{$ 3 1 roll put}ifelse} ifelse }def /! { ?{end}if systemdict/languagelevel known {gvm setglobal} if end }def /:{string currentfile exch readstring pop}executeonly def end ct_MakeOCF begin /ct_cHexEncoding [/c00/c01/c02/c03/c04/c05/c06/c07/c08/c09/c0A/c0B/c0C/c0D/c0E/c0F/c10/c11/c12 /c13/c14/c15/c16/c17/c18/c19/c1A/c1B/c1C/c1D/c1E/c1F/c20/c21/c22/c23/c24/c25 /c26/c27/c28/c29/c2A/c2B/c2C/c2D/c2E/c2F/c30/c31/c32/c33/c34/c35/c36/c37/c38 /c39/c3A/c3B/c3C/c3D/c3E/c3F/c40/c41/c42/c43/c44/c45/c46/c47/c48/c49/c4A/c4B /c4C/c4D/c4E/c4F/c50/c51/c52/c53/c54/c55/c56/c57/c58/c59/c5A/c5B/c5C/c5D/c5E /c5F/c60/c61/c62/c63/c64/c65/c66/c67/c68/c69/c6A/c6B/c6C/c6D/c6E/c6F/c70/c71 /c72/c73/c74/c75/c76/c77/c78/c79/c7A/c7B/c7C/c7D/c7E/c7F/c80/c81/c82/c83/c84 /c85/c86/c87/c88/c89/c8A/c8B/c8C/c8D/c8E/c8F/c90/c91/c92/c93/c94/c95/c96/c97 /c98/c99/c9A/c9B/c9C/c9D/c9E/c9F/cA0/cA1/cA2/cA3/cA4/cA5/cA6/cA7/cA8/cA9/cAA /cAB/cAC/cAD/cAE/cAF/cB0/cB1/cB2/cB3/cB4/cB5/cB6/cB7/cB8/cB9/cBA/cBB/cBC/cBD /cBE/cBF/cC0/cC1/cC2/cC3/cC4/cC5/cC6/cC7/cC8/cC9/cCA/cCB/cCC/cCD/cCE/cCF/cD0 /cD1/cD2/cD3/cD4/cD5/cD6/cD7/cD8/cD9/cDA/cDB/cDC/cDD/cDE/cDF/cE0/cE1/cE2/cE3 /cE4/cE5/cE6/cE7/cE8/cE9/cEA/cEB/cEC/cED/cEE/cEF/cF0/cF1/cF2/cF3/cF4/cF5/cF6 /cF7/cF8/cF9/cFA/cFB/cFC/cFD/cFE/cFF]def /ct_CID_STR_SIZE 8000 def /ct_mkocfStr100 100 string def /ct_defaultFontMtx[.001 0 0 .001 0 0]def /ct_1000Mtx[1000 0 0 1000 0 0]def /ct_raise{exch cvx exch errordict exch get exec stop}bind def /ct_reraise {cvx $error/errorname get(Error: )print dup( )cvs print errordict exch get exec stop }bind def /ct_cvnsi { 1 index add 1 sub 1 exch 0 4 1 roll { 2 index exch get exch 8 bitshift add } for exch pop }bind def /ct_GetInterval { Adobe_CoolType_Utility/ct_BuildCharDict get begin /dst_index 0 def dup dst_string length gt {dup string/dst_string exch def} if 1 index ct_CID_STR_SIZE idiv /arrayIndex exch def 2 index arrayIndex get 2 index arrayIndex ct_CID_STR_SIZE mul sub { dup 3 index add 2 index length le { 2 index getinterval dst_string dst_index 2 index putinterval length dst_index add/dst_index exch def exit } { 1 index length 1 index sub dup 4 1 roll getinterval dst_string dst_index 2 index putinterval pop dup dst_index add/dst_index exch def sub /arrayIndex arrayIndex 1 add def 2 index dup length arrayIndex gt {arrayIndex get} { pop exit } ifelse 0 } ifelse } loop pop pop pop dst_string 0 dst_index getinterval end }bind def ct_Level2? { /ct_resourcestatus currentglobal mark true setglobal {/unknowninstancename/Category resourcestatus} stopped {cleartomark setglobal true} {cleartomark currentglobal not exch setglobal} ifelse { { mark 3 1 roll/Category findresource begin ct_Vars/vm currentglobal put ({ResourceStatus}stopped)0()/SubFileDecode filter cvx exec {cleartomark false} {{3 2 roll pop true}{cleartomark false}ifelse} ifelse ct_Vars/vm get setglobal end } } {{resourcestatus}} ifelse bind def /CIDFont/Category ct_resourcestatus {pop pop} { currentglobal true setglobal /Generic/Category findresource dup length dict copy dup/InstanceType/dicttype put /CIDFont exch/Category defineresource pop setglobal } ifelse ct_UseNativeCapability? { /CIDInit/ProcSet findresource begin 12 dict begin begincmap /CIDSystemInfo 3 dict dup begin /Registry(Adobe)def /Ordering(Identity)def /Supplement 0 def end def /CMapName/Identity-H def /CMapVersion 1.000 def /CMapType 1 def 1 begincodespacerange <0000> endcodespacerange 1 begincidrange <0000>0 endcidrange endcmap CMapName currentdict/CMap defineresource pop end end } if } { /ct_Category 2 dict begin /CIDFont 10 dict def /ProcSet 2 dict def currentdict end def /defineresource { ct_Category 1 index 2 copy known { get dup dup maxlength exch length eq { dup length 10 add dict copy ct_Category 2 index 2 index put } if 3 index 3 index put pop exch pop } {pop pop/defineresource/undefined ct_raise} ifelse }bind def /findresource { ct_Category 1 index 2 copy known { get 2 index 2 copy known {get 3 1 roll pop pop} {pop pop/findresource/undefinedresource ct_raise} ifelse } {pop pop/findresource/undefined ct_raise} ifelse }bind def /resourcestatus { ct_Category 1 index 2 copy known { get 2 index known exch pop exch pop { 0 -1 true } { false } ifelse } {pop pop/findresource/undefined ct_raise} ifelse }bind def /ct_resourcestatus/resourcestatus load def } ifelse /ct_CIDInit 2 dict begin /ct_cidfont_stream_init { { dup(Binary)eq { pop null currentfile ct_Level2? { {cid_BYTE_COUNT()/SubFileDecode filter} stopped {pop pop pop} if } if /readstring load exit } if dup(Hex)eq { pop currentfile ct_Level2? { {null exch/ASCIIHexDecode filter/readstring} stopped {pop exch pop(>)exch/readhexstring} if } {(>)exch/readhexstring} ifelse load exit } if /StartData/typecheck ct_raise } loop cid_BYTE_COUNT ct_CID_STR_SIZE le { 2 copy cid_BYTE_COUNT string exch exec pop 1 array dup 3 -1 roll 0 exch put } { cid_BYTE_COUNT ct_CID_STR_SIZE div ceiling cvi dup array exch 2 sub 0 exch 1 exch { 2 copy 5 index ct_CID_STR_SIZE string 6 index exec pop put pop } for 2 index cid_BYTE_COUNT ct_CID_STR_SIZE mod string 3 index exec pop 1 index exch 1 index length 1 sub exch put } ifelse cid_CIDFONT exch/GlyphData exch put 2 index null eq { pop pop pop } { pop/readstring load 1 string exch { 3 copy exec pop dup length 0 eq { pop pop pop pop pop true exit } if 4 index eq { pop pop pop pop false exit } if } loop pop } ifelse }bind def /StartData { mark { currentdict dup/FDArray get 0 get/FontMatrix get 0 get 0.001 eq { dup/CDevProc known not { /CDevProc 1183615869 internaldict/stdCDevProc 2 copy known {get} { pop pop {pop pop pop pop pop 0 -1000 7 index 2 div 880} } ifelse def } if } { /CDevProc { pop pop pop pop pop 0 1 cid_temp/cid_CIDFONT get /FDArray get 0 get /FontMatrix get 0 get div 7 index 2 div 1 index 0.88 mul }def } ifelse /cid_temp 15 dict def cid_temp begin /cid_CIDFONT exch def 3 copy pop dup/cid_BYTE_COUNT exch def 0 gt { ct_cidfont_stream_init FDArray { /Private get dup/SubrMapOffset known { begin /Subrs SubrCount array def Subrs SubrMapOffset SubrCount SDBytes ct_Level2? { currentdict dup/SubrMapOffset undef dup/SubrCount undef /SDBytes undef } if end /cid_SD_BYTES exch def /cid_SUBR_COUNT exch def /cid_SUBR_MAP_OFFSET exch def /cid_SUBRS exch def cid_SUBR_COUNT 0 gt { GlyphData cid_SUBR_MAP_OFFSET cid_SD_BYTES ct_GetInterval 0 cid_SD_BYTES ct_cvnsi 0 1 cid_SUBR_COUNT 1 sub { exch 1 index 1 add cid_SD_BYTES mul cid_SUBR_MAP_OFFSET add GlyphData exch cid_SD_BYTES ct_GetInterval 0 cid_SD_BYTES ct_cvnsi cid_SUBRS 4 2 roll GlyphData exch 4 index 1 index sub ct_GetInterval dup length string copy put } for pop } if } {pop} ifelse } forall } if cleartomark pop pop end CIDFontName currentdict/CIDFont defineresource pop end end } stopped {cleartomark/StartData ct_reraise} if }bind def currentdict end def /ct_saveCIDInit { /CIDInit/ProcSet ct_resourcestatus {true} {/CIDInitC/ProcSet ct_resourcestatus} ifelse { pop pop /CIDInit/ProcSet findresource ct_UseNativeCapability? {pop null} {/CIDInit ct_CIDInit/ProcSet defineresource pop} ifelse } {/CIDInit ct_CIDInit/ProcSet defineresource pop null} ifelse ct_Vars exch/ct_oldCIDInit exch put }bind def /ct_restoreCIDInit { ct_Vars/ct_oldCIDInit get dup null ne {/CIDInit exch/ProcSet defineresource pop} {pop} ifelse }bind def /ct_BuildCharSetUp { 1 index begin CIDFont begin Adobe_CoolType_Utility/ct_BuildCharDict get begin /ct_dfCharCode exch def /ct_dfDict exch def CIDFirstByte ct_dfCharCode add dup CIDCount ge {pop 0} if /cid exch def { GlyphDirectory cid 2 copy known {get} {pop pop nullstring} ifelse dup length FDBytes sub 0 gt { dup FDBytes 0 ne {0 FDBytes ct_cvnsi} {pop 0} ifelse /fdIndex exch def dup length FDBytes sub FDBytes exch getinterval /charstring exch def exit } { pop cid 0 eq {/charstring nullstring def exit} if /cid 0 def } ifelse } loop }def /ct_SetCacheDevice { 0 0 moveto dup stringwidth 3 -1 roll true charpath pathbbox 0 -1000 7 index 2 div 880 setcachedevice2 0 0 moveto }def /ct_CloneSetCacheProc { 1 eq { stringwidth pop -2 div -880 0 -1000 setcharwidth moveto } { usewidths? { currentfont/Widths get cid 2 copy known {get exch pop aload pop} {pop pop stringwidth} ifelse } {stringwidth} ifelse setcharwidth 0 0 moveto } ifelse }def /ct_Type3ShowCharString { ct_FDDict fdIndex 2 copy known {get} { currentglobal 3 1 roll 1 index gcheck setglobal ct_Type1FontTemplate dup maxlength dict copy begin FDArray fdIndex get dup/FontMatrix 2 copy known {get} {pop pop ct_defaultFontMtx} ifelse /FontMatrix exch dup length array copy def /Private get /Private exch def /Widths rootfont/Widths get def /CharStrings 1 dict dup/.notdef dup length string copy put def currentdict end /ct_Type1Font exch definefont dup 5 1 roll put setglobal } ifelse dup/CharStrings get 1 index/Encoding get ct_dfCharCode get charstring put rootfont/WMode 2 copy known {get} {pop pop 0} ifelse exch 1000 scalefont setfont ct_str1 0 ct_dfCharCode put ct_str1 exch ct_dfSetCacheProc ct_SyntheticBold { currentpoint ct_str1 show newpath moveto ct_str1 true charpath ct_StrokeWidth setlinewidth stroke } {ct_str1 show} ifelse }def /ct_Type4ShowCharString { ct_dfDict ct_dfCharCode charstring FDArray fdIndex get dup/FontMatrix get dup ct_defaultFontMtx ct_matrixeq not {ct_1000Mtx matrix concatmatrix concat} {pop} ifelse /Private get Adobe_CoolType_Utility/ct_Level2? get not { ct_dfDict/Private 3 -1 roll {put} 1183615869 internaldict/superexec get exec } if 1183615869 internaldict Adobe_CoolType_Utility/ct_Level2? get {1 index} {3 index/Private get mark 6 1 roll} ifelse dup/RunInt known {/RunInt get} {pop/CCRun} ifelse get exec Adobe_CoolType_Utility/ct_Level2? get not {cleartomark} if }bind def /ct_BuildCharIncremental { { Adobe_CoolType_Utility/ct_MakeOCF get begin ct_BuildCharSetUp ct_ShowCharString } stopped {stop} if end end end end }bind def /BaseFontNameStr(BF00)def /ct_Type1FontTemplate 14 dict begin /FontType 1 def /FontMatrix [0.001 0 0 0.001 0 0]def /FontBBox [-250 -250 1250 1250]def /Encoding ct_cHexEncoding def /PaintType 0 def currentdict end def /BaseFontTemplate 11 dict begin /FontMatrix [0.001 0 0 0.001 0 0]def /FontBBox [-250 -250 1250 1250]def /Encoding ct_cHexEncoding def /BuildChar/ct_BuildCharIncremental load def ct_Clone? { /FontType 3 def /ct_ShowCharString/ct_Type3ShowCharString load def /ct_dfSetCacheProc/ct_CloneSetCacheProc load def /ct_SyntheticBold false def /ct_StrokeWidth 1 def } { /FontType 4 def /Private 1 dict dup/lenIV 4 put def /CharStrings 1 dict dup/.notdefput def /PaintType 0 def /ct_ShowCharString/ct_Type4ShowCharString load def } ifelse /ct_str1 1 string def currentdict end def /BaseFontDictSize BaseFontTemplate length 5 add def /ct_matrixeq { true 0 1 5 { dup 4 index exch get exch 3 index exch get eq and dup not {exit} if } for exch pop exch pop }bind def /ct_makeocf { 15 dict begin exch/WMode exch def exch/FontName exch def /FontType 0 def /FMapType 2 def dup/FontMatrix known {dup/FontMatrix get/FontMatrix exch def} {/FontMatrix matrix def} ifelse /bfCount 1 index/CIDCount get 256 idiv 1 add dup 256 gt{pop 256}if def /Encoding 256 array 0 1 bfCount 1 sub{2 copy dup put pop}for bfCount 1 255{2 copy bfCount put pop}for def /FDepVector bfCount dup 256 lt{1 add}if array def BaseFontTemplate BaseFontDictSize dict copy begin /CIDFont exch def CIDFont/FontBBox known {CIDFont/FontBBox get/FontBBox exch def} if CIDFont/CDevProc known {CIDFont/CDevProc get/CDevProc exch def} if currentdict end BaseFontNameStr 3(0)putinterval 0 1 bfCount dup 256 eq{1 sub}if { FDepVector exch 2 index BaseFontDictSize dict copy begin dup/CIDFirstByte exch 256 mul def FontType 3 eq {/ct_FDDict 2 dict def} if currentdict end 1 index 16 BaseFontNameStr 2 2 getinterval cvrs pop BaseFontNameStr exch definefont put } for ct_Clone? {/Widths 1 index/CIDFont get/GlyphDirectory get length dict def} if FontName currentdict end definefont ct_Clone? { gsave dup 1000 scalefont setfont ct_BuildCharDict begin /usewidths? false def currentfont/Widths get begin exch/CIDFont get/GlyphDirectory get { pop dup charcode exch 1 index 0 2 index 256 idiv put 1 index exch 1 exch 256 mod put stringwidth 2 array astore def } forall end /usewidths? true def end grestore } {exch pop} ifelse }bind def currentglobal true setglobal /ct_ComposeFont { ct_UseNativeCapability? { 2 index/CMap ct_resourcestatus {pop pop exch pop} { /CIDInit/ProcSet findresource begin 12 dict begin begincmap /CMapName 3 index def /CMapVersion 1.000 def /CMapType 1 def exch/WMode exch def /CIDSystemInfo 3 dict dup begin /Registry(Adobe)def /Ordering CMapName ct_mkocfStr100 cvs (Adobe-)search { pop pop (-)search { dup length string copy exch pop exch pop } {pop(Identity)} ifelse } {pop (Identity)} ifelse def /Supplement 0 def end def 1 begincodespacerange <0000> endcodespacerange 1 begincidrange <0000>0 endcidrange endcmap CMapName currentdict/CMap defineresource pop end end } ifelse composefont } { 3 2 roll pop 0 get/CIDFont findresource ct_makeocf } ifelse }bind def setglobal /ct_MakeIdentity { ct_UseNativeCapability? { 1 index/CMap ct_resourcestatus {pop pop} { /CIDInit/ProcSet findresource begin 12 dict begin begincmap /CMapName 2 index def /CMapVersion 1.000 def /CMapType 1 def /CIDSystemInfo 3 dict dup begin /Registry(Adobe)def /Ordering CMapName ct_mkocfStr100 cvs (Adobe-)search { pop pop (-)search {dup length string copy exch pop exch pop} {pop(Identity)} ifelse } {pop(Identity)} ifelse def /Supplement 0 def end def 1 begincodespacerange <0000> endcodespacerange 1 begincidrange <0000>0 endcidrange endcmap CMapName currentdict/CMap defineresource pop end end } ifelse composefont } { exch pop 0 get/CIDFont findresource ct_makeocf } ifelse }bind def currentdict readonly pop end end %%EndResource setglobal %%BeginResource: procset Adobe_CoolType_Utility_T42 1.0 0 %%Copyright: Copyright 1987-2004 Adobe Systems Incorporated. %%Version: 1.0 0 userdict/ct_T42Dict 15 dict put ct_T42Dict begin /Is2015? { version cvi 2015 ge }bind def /AllocGlyphStorage { Is2015? { pop } { {string}forall }ifelse }bind def /Type42DictBegin { 25 dict begin /FontName exch def /CharStrings 256 dict begin /.notdef 0 def currentdict end def /Encoding exch def /PaintType 0 def /FontType 42 def /FontMatrix[1 0 0 1 0 0]def 4 array astore cvx/FontBBox exch def /sfnts }bind def /Type42DictEnd { currentdict dup/FontName get exch definefont end ct_T42Dict exch dup/FontName get exch put }bind def /RD{string currentfile exch readstring pop}executeonly def /PrepFor2015 { Is2015? { /GlyphDirectory 16 dict def sfnts 0 get dup 2 index (glyx) putinterval 2 index (locx) putinterval pop pop } { pop pop }ifelse }bind def /AddT42Char { Is2015? { /GlyphDirectory get begin def end pop pop } { /sfnts get 4 index get 3 index 2 index putinterval pop pop pop pop }ifelse }bind def /T0AddT42Mtx2 { /CIDFont findresource/Metrics2 get begin def end }bind def end %%EndResource currentglobal true setglobal %%BeginFile: MMFauxFont.prc %%Copyright: Copyright 1987-2001 Adobe Systems Incorporated. %%All Rights Reserved. userdict /ct_EuroDict 10 dict put ct_EuroDict begin /ct_CopyFont { { 1 index /FID ne {def} {pop pop} ifelse} forall } def /ct_GetGlyphOutline { gsave initmatrix newpath exch findfont dup length 1 add dict begin ct_CopyFont /Encoding Encoding dup length array copy dup 4 -1 roll 0 exch put def currentdict end /ct_EuroFont exch definefont 1000 scalefont setfont 0 0 moveto [ <00> stringwidth <00> false charpath pathbbox [ {/m cvx} {/l cvx} {/c cvx} {/cp cvx} pathforall grestore counttomark 8 add } def /ct_MakeGlyphProc { ] cvx /ct_PSBuildGlyph cvx ] cvx } def /ct_PSBuildGlyph { gsave 8 -1 roll pop 7 1 roll 6 -2 roll ct_FontMatrix transform 6 2 roll 4 -2 roll ct_FontMatrix transform 4 2 roll ct_FontMatrix transform currentdict /PaintType 2 copy known {get 2 eq}{pop pop false} ifelse dup 9 1 roll { currentdict /StrokeWidth 2 copy known { get 2 div 0 ct_FontMatrix dtransform pop 5 1 roll 4 -1 roll 4 index sub 4 1 roll 3 -1 roll 4 index sub 3 1 roll exch 4 index add exch 4 index add 5 -1 roll pop } { pop pop } ifelse } if setcachedevice ct_FontMatrix concat ct_PSPathOps begin exec end { currentdict /StrokeWidth 2 copy known { get } { pop pop 0 } ifelse setlinewidth stroke } { fill } ifelse grestore } def /ct_PSPathOps 4 dict dup begin /m {moveto} def /l {lineto} def /c {curveto} def /cp {closepath} def end def /ct_matrix1000 [1000 0 0 1000 0 0] def /ct_AddGlyphProc { 2 index findfont dup length 4 add dict begin ct_CopyFont /CharStrings CharStrings dup length 1 add dict copy begin 3 1 roll def currentdict end def /ct_FontMatrix ct_matrix1000 FontMatrix matrix concatmatrix def /ct_PSBuildGlyph /ct_PSBuildGlyph load def /ct_PSPathOps /ct_PSPathOps load def currentdict end definefont pop } def systemdict /languagelevel known { /ct_AddGlyphToPrinterFont { 2 copy ct_GetGlyphOutline 3 add -1 roll restore ct_MakeGlyphProc ct_AddGlyphProc } def } { /ct_AddGlyphToPrinterFont { pop pop restore Adobe_CTFauxDict /$$$FONTNAME get /Euro Adobe_CTFauxDict /$$$SUBSTITUTEBASE get ct_EuroDict exch get ct_AddGlyphProc } def } ifelse /AdobeSansMM { 556 0 24 -19 541 703 { 541 628 m 510 669 442 703 354 703 c 201 703 117 607 101 444 c 50 444 l 25 372 l 97 372 l 97 301 l 49 301 l 24 229 l 103 229 l 124 67 209 -19 350 -19 c 435 -19 501 25 509 32 c 509 131 l 492 105 417 60 343 60 c 267 60 204 127 197 229 c 406 229 l 430 301 l 191 301 l 191 372 l 455 372 l 479 444 l 194 444 l 201 531 245 624 348 624 c 433 624 484 583 509 534 c cp 556 0 m } ct_PSBuildGlyph } def /AdobeSerifMM { 500 0 10 -12 484 692 { 347 298 m 171 298 l 170 310 170 322 170 335 c 170 362 l 362 362 l 374 403 l 172 403 l 184 580 244 642 308 642 c 380 642 434 574 457 457 c 481 462 l 474 691 l 449 691 l 433 670 429 657 410 657 c 394 657 360 692 299 692 c 204 692 94 604 73 403 c 22 403 l 10 362 l 70 362 l 69 352 69 341 69 330 c 69 319 69 308 70 298 c 22 298 l 10 257 l 73 257 l 97 57 216 -12 295 -12 c 364 -12 427 25 484 123 c 458 142 l 425 101 384 37 316 37 c 256 37 189 84 173 257 c 335 257 l cp 500 0 m } ct_PSBuildGlyph } def end %%EndFile setglobal Adobe_CoolType_Core begin /$Oblique SetSubstituteStrategy end %%BeginResource: procset Adobe_AGM_Image 1.0 0 +%%Version: 1.0 0 +%%Copyright: Copyright(C)2000-2006 Adobe Systems, Inc. All Rights Reserved. +systemdict/setpacking known +{ + currentpacking + true setpacking +}if +userdict/Adobe_AGM_Image 71 dict dup begin put +/Adobe_AGM_Image_Id/Adobe_AGM_Image_1.0_0 def +/nd{ + null def +}bind def +/AGMIMG_&image nd +/AGMIMG_&colorimage nd +/AGMIMG_&imagemask nd +/AGMIMG_mbuf()def +/AGMIMG_ybuf()def +/AGMIMG_kbuf()def +/AGMIMG_c 0 def +/AGMIMG_m 0 def +/AGMIMG_y 0 def +/AGMIMG_k 0 def +/AGMIMG_tmp nd +/AGMIMG_imagestring0 nd +/AGMIMG_imagestring1 nd +/AGMIMG_imagestring2 nd +/AGMIMG_imagestring3 nd +/AGMIMG_imagestring4 nd +/AGMIMG_imagestring5 nd +/AGMIMG_cnt nd +/AGMIMG_fsave nd +/AGMIMG_colorAry nd +/AGMIMG_override nd +/AGMIMG_name nd +/AGMIMG_maskSource nd +/AGMIMG_flushfilters nd +/invert_image_samples nd +/knockout_image_samples nd +/img nd +/sepimg nd +/devnimg nd +/idximg nd +/ds +{ + Adobe_AGM_Core begin + Adobe_AGM_Image begin + /AGMIMG_&image systemdict/image get def + /AGMIMG_&imagemask systemdict/imagemask get def + /colorimage where{ + pop + /AGMIMG_&colorimage/colorimage ldf + }if + end + end +}def +/ps +{ + Adobe_AGM_Image begin + /AGMIMG_ccimage_exists{/customcolorimage where + { + pop + /Adobe_AGM_OnHost_Seps where + { + pop false + }{ + /Adobe_AGM_InRip_Seps where + { + pop false + }{ + true + }ifelse + }ifelse + }{ + false + }ifelse + }bdf + level2{ + /invert_image_samples + { + Adobe_AGM_Image/AGMIMG_tmp Decode length ddf + /Decode[Decode 1 get Decode 0 get]def + }def + /knockout_image_samples + { + Operator/imagemask ne{ + /Decode[1 1]def + }if + }def + }{ + /invert_image_samples + { + {1 exch sub}currenttransfer addprocs settransfer + }def + /knockout_image_samples + { + {pop 1}currenttransfer addprocs settransfer + }def + }ifelse + /img/imageormask ldf + /sepimg/sep_imageormask ldf + /devnimg/devn_imageormask ldf + /idximg/indexed_imageormask ldf + /_ctype 7 def + currentdict{ + dup xcheck 1 index type dup/arraytype eq exch/packedarraytype eq or and{ + bind + }if + def + }forall +}def +/pt +{ + end +}def +/dt +{ +}def +/AGMIMG_flushfilters +{ + dup type/arraytype ne + {1 array astore}if + dup 0 get currentfile ne + {dup 0 get flushfile}if + { + dup type/filetype eq + { + dup status 1 index currentfile ne and + {closefile} + {pop} + ifelse + }{pop}ifelse + }forall +}def +/AGMIMG_init_common +{ + currentdict/T known{/ImageType/T ldf currentdict/T undef}if + currentdict/W known{/Width/W ldf currentdict/W undef}if + currentdict/H known{/Height/H ldf currentdict/H undef}if + currentdict/M known{/ImageMatrix/M ldf currentdict/M undef}if + currentdict/BC known{/BitsPerComponent/BC ldf currentdict/BC undef}if + currentdict/D known{/Decode/D ldf currentdict/D undef}if + currentdict/DS known{/DataSource/DS ldf currentdict/DS undef}if + currentdict/O known{ + /Operator/O load 1 eq{ + /imagemask + }{ + /O load 2 eq{ + /image + }{ + /colorimage + }ifelse + }ifelse + def + currentdict/O undef + }if + currentdict/HSCI known{/HostSepColorImage/HSCI ldf currentdict/HSCI undef}if + currentdict/MD known{/MultipleDataSources/MD ldf currentdict/MD undef}if + currentdict/I known{/Interpolate/I ldf currentdict/I undef}if + currentdict/SI known{/SkipImageProc/SI ldf currentdict/SI undef}if + /DataSource load xcheck not{ + DataSource type/arraytype eq{ + DataSource 0 get type/filetype eq{ + /_Filters DataSource def + currentdict/MultipleDataSources known not{ + /DataSource DataSource dup length 1 sub get def + }if + }if + }if + currentdict/MultipleDataSources known not{ + /MultipleDataSources DataSource type/arraytype eq{ + DataSource length 1 gt + } + {false}ifelse def + }if + }if + /NComponents Decode length 2 div def + currentdict/SkipImageProc known not{/SkipImageProc{false}def}if +}bdf +/imageormask_sys +{ + begin + AGMIMG_init_common + save mark + level2{ + currentdict + Operator/imagemask eq{ + AGMIMG_&imagemask + }{ + use_mask{ + process_mask AGMIMG_&image + }{ + AGMIMG_&image + }ifelse + }ifelse + }{ + Width Height + Operator/imagemask eq{ + Decode 0 get 1 eq Decode 1 get 0 eq and + ImageMatrix/DataSource load + AGMIMG_&imagemask + }{ + BitsPerComponent ImageMatrix/DataSource load + AGMIMG_&image + }ifelse + }ifelse + currentdict/_Filters known{_Filters AGMIMG_flushfilters}if + cleartomark restore + end +}def +/overprint_plate +{ + currentoverprint{ + 0 get dup type/nametype eq{ + dup/DeviceGray eq{ + pop AGMCORE_black_plate not + }{ + /DeviceCMYK eq{ + AGMCORE_is_cmyk_sep not + }if + }ifelse + }{ + false exch + { + AGMOHS_sepink eq or + }forall + not + }ifelse + }{ + pop false + }ifelse +}def +/process_mask +{ + level3{ + dup begin + /ImageType 1 def + end + 4 dict begin + /DataDict exch def + /ImageType 3 def + /InterleaveType 3 def + /MaskDict 9 dict begin + /ImageType 1 def + /Width DataDict dup/MaskWidth known{/MaskWidth}{/Width}ifelse get def + /Height DataDict dup/MaskHeight known{/MaskHeight}{/Height}ifelse get def + /ImageMatrix[Width 0 0 Height neg 0 Height]def + /NComponents 1 def + /BitsPerComponent 1 def + /Decode DataDict dup/MaskD known{/MaskD}{[1 0]}ifelse get def + /DataSource Adobe_AGM_Core/AGMIMG_maskSource get def + currentdict end def + currentdict end + }if +}def +/use_mask +{ + dup/Mask known {dup/Mask get}{false}ifelse +}def +/imageormask +{ + begin + AGMIMG_init_common + SkipImageProc{ + currentdict consumeimagedata + } + { + save mark + level2 AGMCORE_host_sep not and{ + currentdict + Operator/imagemask eq DeviceN_PS2 not and{ + imagemask + }{ + AGMCORE_in_rip_sep currentoverprint and currentcolorspace 0 get/DeviceGray eq and{ + [/Separation/Black/DeviceGray{}]setcolorspace + /Decode[Decode 1 get Decode 0 get]def + }if + use_mask{ + process_mask image + }{ + DeviceN_NoneName DeviceN_PS2 Indexed_DeviceN level3 not and or or AGMCORE_in_rip_sep and + { + Names convert_to_process not{ + 2 dict begin + /imageDict xdf + /names_index 0 def + gsave + imageDict write_image_file{ + Names{ + dup(None)ne{ + [/Separation 3 -1 roll/DeviceGray{1 exch sub}]setcolorspace + Operator imageDict read_image_file + names_index 0 eq{true setoverprint}if + /names_index names_index 1 add def + }{ + pop + }ifelse + }forall + close_image_file + }if + grestore + end + }{ + Operator/imagemask eq{ + imagemask + }{ + image + }ifelse + }ifelse + }{ + Operator/imagemask eq{ + imagemask + }{ + image + }ifelse + }ifelse + }ifelse + }ifelse + }{ + Width Height + Operator/imagemask eq{ + Decode 0 get 1 eq Decode 1 get 0 eq and + ImageMatrix/DataSource load + /Adobe_AGM_OnHost_Seps where{ + pop imagemask + }{ + currentgray 1 ne{ + currentdict imageormask_sys + }{ + currentoverprint not{ + 1 AGMCORE_&setgray + currentdict imageormask_sys + }{ + currentdict ignoreimagedata + }ifelse + }ifelse + }ifelse + }{ + BitsPerComponent ImageMatrix + MultipleDataSources{ + 0 1 NComponents 1 sub{ + DataSource exch get + }for + }{ + /DataSource load + }ifelse + Operator/colorimage eq{ + AGMCORE_host_sep{ + MultipleDataSources level2 or NComponents 4 eq and{ + AGMCORE_is_cmyk_sep{ + MultipleDataSources{ + /DataSource DataSource 0 get xcheck + { + [ + DataSource 0 get/exec cvx + DataSource 1 get/exec cvx + DataSource 2 get/exec cvx + DataSource 3 get/exec cvx + /AGMCORE_get_ink_data cvx + ]cvx + }{ + DataSource aload pop AGMCORE_get_ink_data + }ifelse def + }{ + /DataSource + Width BitsPerComponent mul 7 add 8 idiv Height mul 4 mul + /DataSource load + filter_cmyk 0()/SubFileDecode filter def + }ifelse + /Decode[Decode 0 get Decode 1 get]def + /MultipleDataSources false def + /NComponents 1 def + /Operator/image def + invert_image_samples + 1 AGMCORE_&setgray + currentdict imageormask_sys + }{ + currentoverprint not Operator/imagemask eq and{ + 1 AGMCORE_&setgray + currentdict imageormask_sys + }{ + currentdict ignoreimagedata + }ifelse + }ifelse + }{ + MultipleDataSources NComponents AGMIMG_&colorimage + }ifelse + }{ + true NComponents colorimage + }ifelse + }{ + Operator/image eq{ + AGMCORE_host_sep{ + /DoImage true def + currentdict/HostSepColorImage known{HostSepColorImage not}{false}ifelse + { + AGMCORE_black_plate not Operator/imagemask ne and{ + /DoImage false def + currentdict ignoreimagedata + }if + }if + 1 AGMCORE_&setgray + DoImage + {currentdict imageormask_sys}if + }{ + use_mask{ + process_mask image + }{ + image + }ifelse + }ifelse + }{ + Operator/knockout eq{ + pop pop pop pop pop + currentcolorspace overprint_plate not{ + knockout_unitsq + }if + }if + }ifelse + }ifelse + }ifelse + }ifelse + cleartomark restore + }ifelse + currentdict/_Filters known{_Filters AGMIMG_flushfilters}if + end +}def +/sep_imageormask +{ + /sep_colorspace_dict AGMCORE_gget begin + CSA map_csa + begin + AGMIMG_init_common + SkipImageProc{ + currentdict consumeimagedata + }{ + save mark + AGMCORE_avoid_L2_sep_space{ + /Decode[Decode 0 get 255 mul Decode 1 get 255 mul]def + }if + AGMIMG_ccimage_exists + MappedCSA 0 get/DeviceCMYK eq and + currentdict/Components known and + Name()ne and + Name(All)ne and + Operator/image eq and + AGMCORE_producing_seps not and + level2 not and + { + Width Height BitsPerComponent ImageMatrix + [ + /DataSource load/exec cvx + { + 0 1 2 index length 1 sub{ + 1 index exch + 2 copy get 255 xor put + }for + }/exec cvx + ]cvx bind + MappedCSA 0 get/DeviceCMYK eq{ + Components aload pop + }{ + 0 0 0 Components aload pop 1 exch sub + }ifelse + Name findcmykcustomcolor + customcolorimage + }{ + AGMCORE_producing_seps not{ + level2{ + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 2 ne AGMCORE_avoid_L2_sep_space not and currentcolorspace 0 get/Separation ne and{ + [/Separation Name MappedCSA sep_proc_name exch dup 0 get 15 string cvs(/Device)anchorsearch{pop pop 0 get}{pop}ifelse exch load]setcolorspace_opt + /sep_tint AGMCORE_gget setcolor + }if + currentdict imageormask + }{ + currentdict + Operator/imagemask eq{ + imageormask + }{ + sep_imageormask_lev1 + }ifelse + }ifelse + }{ + AGMCORE_host_sep{ + Operator/knockout eq{ + currentdict/ImageMatrix get concat + knockout_unitsq + }{ + currentgray 1 ne{ + AGMCORE_is_cmyk_sep Name(All)ne and{ + level2{ + Name AGMCORE_IsSeparationAProcessColor + { + Operator/imagemask eq{ + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 2 ne{ + /sep_tint AGMCORE_gget 1 exch sub AGMCORE_&setcolor + }if + }{ + invert_image_samples + }ifelse + }{ + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 2 ne{ + [/Separation Name[/DeviceGray] + { + sep_colorspace_proc AGMCORE_get_ink_data + 1 exch sub + }bind + ]AGMCORE_&setcolorspace + /sep_tint AGMCORE_gget AGMCORE_&setcolor + }if + }ifelse + currentdict imageormask_sys + }{ + currentdict + Operator/imagemask eq{ + imageormask_sys + }{ + sep_image_lev1_sep + }ifelse + }ifelse + }{ + Operator/imagemask ne{ + invert_image_samples + }if + currentdict imageormask_sys + }ifelse + }{ + currentoverprint not Name(All)eq or Operator/imagemask eq and{ + currentdict imageormask_sys + }{ + currentoverprint not + { + gsave + knockout_unitsq + grestore + }if + currentdict consumeimagedata + }ifelse + }ifelse + }ifelse + }{ + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 2 ne{ + currentcolorspace 0 get/Separation ne{ + [/Separation Name MappedCSA sep_proc_name exch 0 get exch load]setcolorspace_opt + /sep_tint AGMCORE_gget setcolor + }if + }if + currentoverprint + MappedCSA 0 get/DeviceCMYK eq and + Name AGMCORE_IsSeparationAProcessColor not and + //Adobe_AGM_Core/AGMCORE_pattern_paint_type get 2 ne{Name inRip_spot_has_ink not and}{false}ifelse + Name(All)ne and{ + imageormask_l2_overprint + }{ + currentdict imageormask + }ifelse + }ifelse + }ifelse + }ifelse + cleartomark restore + }ifelse + currentdict/_Filters known{_Filters AGMIMG_flushfilters}if + end + end +}def +/colorSpaceElemCnt +{ + mark currentcolor counttomark dup 2 add 1 roll cleartomark +}bdf +/devn_sep_datasource +{ + 1 dict begin + /dataSource xdf + [ + 0 1 dataSource length 1 sub{ + dup currentdict/dataSource get/exch cvx/get cvx/exec cvx + /exch cvx names_index/ne cvx[/pop cvx]cvx/if cvx + }for + ]cvx bind + end +}bdf +/devn_alt_datasource +{ + 11 dict begin + /convProc xdf + /origcolorSpaceElemCnt xdf + /origMultipleDataSources xdf + /origBitsPerComponent xdf + /origDecode xdf + /origDataSource xdf + /dsCnt origMultipleDataSources{origDataSource length}{1}ifelse def + /DataSource origMultipleDataSources + { + [ + BitsPerComponent 8 idiv origDecode length 2 idiv mul string + 0 1 origDecode length 2 idiv 1 sub + { + dup 7 mul 1 add index exch dup BitsPerComponent 8 idiv mul exch + origDataSource exch get 0()/SubFileDecode filter + BitsPerComponent 8 idiv string/readstring cvx/pop cvx/putinterval cvx + }for + ]bind cvx + }{origDataSource}ifelse 0()/SubFileDecode filter def + [ + origcolorSpaceElemCnt string + 0 2 origDecode length 2 sub + { + dup origDecode exch get dup 3 -1 roll 1 add origDecode exch get exch sub 2 BitsPerComponent exp 1 sub div + 1 BitsPerComponent 8 idiv{DataSource/read cvx/not cvx{0}/if cvx/mul cvx}repeat/mul cvx/add cvx + }for + /convProc load/exec cvx + origcolorSpaceElemCnt 1 sub -1 0 + { + /dup cvx 2/add cvx/index cvx + 3 1/roll cvx/exch cvx 255/mul cvx/cvi cvx/put cvx + }for + ]bind cvx 0()/SubFileDecode filter + end +}bdf +/devn_imageormask +{ + /devicen_colorspace_dict AGMCORE_gget begin + CSA map_csa + 2 dict begin + dup + /srcDataStrs[3 -1 roll begin + AGMIMG_init_common + currentdict/MultipleDataSources known{MultipleDataSources{DataSource length}{1}ifelse}{1}ifelse + { + Width Decode length 2 div mul cvi + { + dup 65535 gt{1 add 2 div cvi}{exit}ifelse + }loop + string + }repeat + end]def + /dstDataStr srcDataStrs 0 get length string def + begin + AGMIMG_init_common + SkipImageProc{ + currentdict consumeimagedata + }{ + save mark + AGMCORE_producing_seps not{ + level3 not{ + Operator/imagemask ne{ + /DataSource[[ + DataSource Decode BitsPerComponent currentdict/MultipleDataSources known{MultipleDataSources}{false}ifelse + colorSpaceElemCnt/devicen_colorspace_dict AGMCORE_gget/TintTransform get + devn_alt_datasource 1/string cvx/readstring cvx/pop cvx]cvx colorSpaceElemCnt 1 sub{dup}repeat]def + /MultipleDataSources true def + /Decode colorSpaceElemCnt[exch{0 1}repeat]def + }if + }if + currentdict imageormask + }{ + AGMCORE_host_sep{ + Names convert_to_process{ + CSA get_csa_by_name 0 get/DeviceCMYK eq{ + /DataSource + Width BitsPerComponent mul 7 add 8 idiv Height mul 4 mul + DataSource Decode BitsPerComponent currentdict/MultipleDataSources known{MultipleDataSources}{false}ifelse + 4/devicen_colorspace_dict AGMCORE_gget/TintTransform get + devn_alt_datasource + filter_cmyk 0()/SubFileDecode filter def + /MultipleDataSources false def + /Decode[1 0]def + /DeviceGray setcolorspace + currentdict imageormask_sys + }{ + AGMCORE_report_unsupported_color_space + AGMCORE_black_plate{ + /DataSource + DataSource Decode BitsPerComponent currentdict/MultipleDataSources known{MultipleDataSources}{false}ifelse + CSA get_csa_by_name 0 get/DeviceRGB eq{3}{1}ifelse/devicen_colorspace_dict AGMCORE_gget/TintTransform get + devn_alt_datasource + /MultipleDataSources false def + /Decode colorSpaceElemCnt[exch{0 1}repeat]def + currentdict imageormask_sys + }{ + gsave + knockout_unitsq + grestore + currentdict consumeimagedata + }ifelse + }ifelse + } + { + /devicen_colorspace_dict AGMCORE_gget/names_index known{ + Operator/imagemask ne{ + MultipleDataSources{ + /DataSource[DataSource devn_sep_datasource/exec cvx]cvx def + /MultipleDataSources false def + }{ + /DataSource/DataSource load dstDataStr srcDataStrs 0 get filter_devn def + }ifelse + invert_image_samples + }if + currentdict imageormask_sys + }{ + currentoverprint not Operator/imagemask eq and{ + currentdict imageormask_sys + }{ + currentoverprint not + { + gsave + knockout_unitsq + grestore + }if + currentdict consumeimagedata + }ifelse + }ifelse + }ifelse + }{ + currentdict imageormask + }ifelse + }ifelse + cleartomark restore + }ifelse + currentdict/_Filters known{_Filters AGMIMG_flushfilters}if + end + end + end +}def +/imageormask_l2_overprint +{ + currentdict + currentcmykcolor add add add 0 eq{ + currentdict consumeimagedata + }{ + level3{ + currentcmykcolor + /AGMIMG_k xdf + /AGMIMG_y xdf + /AGMIMG_m xdf + /AGMIMG_c xdf + Operator/imagemask eq{ + [/DeviceN[ + AGMIMG_c 0 ne{/Cyan}if + AGMIMG_m 0 ne{/Magenta}if + AGMIMG_y 0 ne{/Yellow}if + AGMIMG_k 0 ne{/Black}if + ]/DeviceCMYK{}]setcolorspace + AGMIMG_c 0 ne{AGMIMG_c}if + AGMIMG_m 0 ne{AGMIMG_m}if + AGMIMG_y 0 ne{AGMIMG_y}if + AGMIMG_k 0 ne{AGMIMG_k}if + setcolor + }{ + /Decode[Decode 0 get 255 mul Decode 1 get 255 mul]def + [/Indexed + [ + /DeviceN[ + AGMIMG_c 0 ne{/Cyan}if + AGMIMG_m 0 ne{/Magenta}if + AGMIMG_y 0 ne{/Yellow}if + AGMIMG_k 0 ne{/Black}if + ] + /DeviceCMYK{ + AGMIMG_k 0 eq{0}if + AGMIMG_y 0 eq{0 exch}if + AGMIMG_m 0 eq{0 3 1 roll}if + AGMIMG_c 0 eq{0 4 1 roll}if + } + ] + 255 + { + 255 div + mark exch + dup dup dup + AGMIMG_k 0 ne{ + /sep_tint AGMCORE_gget mul MappedCSA sep_proc_name exch pop load exec 4 1 roll pop pop pop + counttomark 1 roll + }{ + pop + }ifelse + AGMIMG_y 0 ne{ + /sep_tint AGMCORE_gget mul MappedCSA sep_proc_name exch pop load exec 4 2 roll pop pop pop + counttomark 1 roll + }{ + pop + }ifelse + AGMIMG_m 0 ne{ + /sep_tint AGMCORE_gget mul MappedCSA sep_proc_name exch pop load exec 4 3 roll pop pop pop + counttomark 1 roll + }{ + pop + }ifelse + AGMIMG_c 0 ne{ + /sep_tint AGMCORE_gget mul MappedCSA sep_proc_name exch pop load exec pop pop pop + counttomark 1 roll + }{ + pop + }ifelse + counttomark 1 add -1 roll pop + } + ]setcolorspace + }ifelse + imageormask_sys + }{ + write_image_file{ + currentcmykcolor + 0 ne{ + [/Separation/Black/DeviceGray{}]setcolorspace + gsave + /Black + [{1 exch sub/sep_tint AGMCORE_gget mul}/exec cvx MappedCSA sep_proc_name cvx exch pop{4 1 roll pop pop pop 1 exch sub}/exec cvx] + cvx modify_halftone_xfer + Operator currentdict read_image_file + grestore + }if + 0 ne{ + [/Separation/Yellow/DeviceGray{}]setcolorspace + gsave + /Yellow + [{1 exch sub/sep_tint AGMCORE_gget mul}/exec cvx MappedCSA sep_proc_name cvx exch pop{4 2 roll pop pop pop 1 exch sub}/exec cvx] + cvx modify_halftone_xfer + Operator currentdict read_image_file + grestore + }if + 0 ne{ + [/Separation/Magenta/DeviceGray{}]setcolorspace + gsave + /Magenta + [{1 exch sub/sep_tint AGMCORE_gget mul}/exec cvx MappedCSA sep_proc_name cvx exch pop{4 3 roll pop pop pop 1 exch sub}/exec cvx] + cvx modify_halftone_xfer + Operator currentdict read_image_file + grestore + }if + 0 ne{ + [/Separation/Cyan/DeviceGray{}]setcolorspace + gsave + /Cyan + [{1 exch sub/sep_tint AGMCORE_gget mul}/exec cvx MappedCSA sep_proc_name cvx exch pop{pop pop pop 1 exch sub}/exec cvx] + cvx modify_halftone_xfer + Operator currentdict read_image_file + grestore + }if + close_image_file + }{ + imageormask + }ifelse + }ifelse + }ifelse +}def +/indexed_imageormask +{ + begin + AGMIMG_init_common + save mark + currentdict + AGMCORE_host_sep{ + Operator/knockout eq{ + /indexed_colorspace_dict AGMCORE_gget dup/CSA known{ + /CSA get get_csa_by_name + }{ + /Names get + }ifelse + overprint_plate not{ + knockout_unitsq + }if + }{ + Indexed_DeviceN{ + /devicen_colorspace_dict AGMCORE_gget dup/names_index known exch/Names get convert_to_process or{ + indexed_image_lev2_sep + }{ + currentoverprint not{ + knockout_unitsq + }if + currentdict consumeimagedata + }ifelse + }{ + AGMCORE_is_cmyk_sep{ + Operator/imagemask eq{ + imageormask_sys + }{ + level2{ + indexed_image_lev2_sep + }{ + indexed_image_lev1_sep + }ifelse + }ifelse + }{ + currentoverprint not{ + knockout_unitsq + }if + currentdict consumeimagedata + }ifelse + }ifelse + }ifelse + }{ + level2{ + Indexed_DeviceN{ + /indexed_colorspace_dict AGMCORE_gget begin + }{ + /indexed_colorspace_dict AGMCORE_gget dup null ne + { + begin + currentdict/CSDBase known{CSDBase/CSD get_res/MappedCSA get}{CSA}ifelse + get_csa_by_name 0 get/DeviceCMYK eq ps_level 3 ge and ps_version 3015.007 lt and + AGMCORE_in_rip_sep and{ + [/Indexed[/DeviceN[/Cyan/Magenta/Yellow/Black]/DeviceCMYK{}]HiVal Lookup] + setcolorspace + }if + end + } + {pop}ifelse + }ifelse + imageormask + Indexed_DeviceN{ + end + }if + }{ + Operator/imagemask eq{ + imageormask + }{ + indexed_imageormask_lev1 + }ifelse + }ifelse + }ifelse + cleartomark restore + currentdict/_Filters known{_Filters AGMIMG_flushfilters}if + end +}def +/indexed_image_lev2_sep +{ + /indexed_colorspace_dict AGMCORE_gget begin + begin + Indexed_DeviceN not{ + currentcolorspace + dup 1/DeviceGray put + dup 3 + currentcolorspace 2 get 1 add string + 0 1 2 3 AGMCORE_get_ink_data 4 currentcolorspace 3 get length 1 sub + { + dup 4 idiv exch currentcolorspace 3 get exch get 255 exch sub 2 index 3 1 roll put + }for + put setcolorspace + }if + currentdict + Operator/imagemask eq{ + AGMIMG_&imagemask + }{ + use_mask{ + process_mask AGMIMG_&image + }{ + AGMIMG_&image + }ifelse + }ifelse + end end +}def + /OPIimage + { + dup type/dicttype ne{ + 10 dict begin + /DataSource xdf + /ImageMatrix xdf + /BitsPerComponent xdf + /Height xdf + /Width xdf + /ImageType 1 def + /Decode[0 1 def] + currentdict + end + }if + dup begin + /NComponents 1 cdndf + /MultipleDataSources false cdndf + /SkipImageProc{false}cdndf + /Decode[ + 0 + currentcolorspace 0 get/Indexed eq{ + 2 BitsPerComponent exp 1 sub + }{ + 1 + }ifelse + ]cdndf + /Operator/image cdndf + end + /sep_colorspace_dict AGMCORE_gget null eq{ + imageormask + }{ + gsave + dup begin invert_image_samples end + sep_imageormask + grestore + }ifelse + }def +/cachemask_level2 +{ + 3 dict begin + /LZWEncode filter/WriteFilter xdf + /readBuffer 256 string def + /ReadFilter + currentfile + 0(%EndMask)/SubFileDecode filter + /ASCII85Decode filter + /RunLengthDecode filter + def + { + ReadFilter readBuffer readstring exch + WriteFilter exch writestring + not{exit}if + }loop + WriteFilter closefile + end +}def +/spot_alias +{ + /mapto_sep_imageormask + { + dup type/dicttype ne{ + 12 dict begin + /ImageType 1 def + /DataSource xdf + /ImageMatrix xdf + /BitsPerComponent xdf + /Height xdf + /Width xdf + /MultipleDataSources false def + }{ + begin + }ifelse + /Decode[/customcolor_tint AGMCORE_gget 0]def + /Operator/image def + /SkipImageProc{false}def + currentdict + end + sep_imageormask + }bdf + /customcolorimage + { + Adobe_AGM_Image/AGMIMG_colorAry xddf + /customcolor_tint AGMCORE_gget + << + /Name AGMIMG_colorAry 4 get + /CSA[/DeviceCMYK] + /TintMethod/Subtractive + /TintProc null + /MappedCSA null + /NComponents 4 + /Components[AGMIMG_colorAry aload pop pop] + >> + setsepcolorspace + mapto_sep_imageormask + }ndf + Adobe_AGM_Image/AGMIMG_&customcolorimage/customcolorimage load put + /customcolorimage + { + Adobe_AGM_Image/AGMIMG_override false put + current_spot_alias{dup 4 get map_alias}{false}ifelse + { + false set_spot_alias + /customcolor_tint AGMCORE_gget exch setsepcolorspace + pop + mapto_sep_imageormask + true set_spot_alias + }{ + //Adobe_AGM_Image/AGMIMG_&customcolorimage get exec + }ifelse + }bdf +}def +/snap_to_device +{ + 6 dict begin + matrix currentmatrix + dup 0 get 0 eq 1 index 3 get 0 eq and + 1 index 1 get 0 eq 2 index 2 get 0 eq and or exch pop + { + 1 1 dtransform 0 gt exch 0 gt/AGMIMG_xSign? exch def/AGMIMG_ySign? exch def + 0 0 transform + AGMIMG_ySign?{floor 0.1 sub}{ceiling 0.1 add}ifelse exch + AGMIMG_xSign?{floor 0.1 sub}{ceiling 0.1 add}ifelse exch + itransform/AGMIMG_llY exch def/AGMIMG_llX exch def + 1 1 transform + AGMIMG_ySign?{ceiling 0.1 add}{floor 0.1 sub}ifelse exch + AGMIMG_xSign?{ceiling 0.1 add}{floor 0.1 sub}ifelse exch + itransform/AGMIMG_urY exch def/AGMIMG_urX exch def + [AGMIMG_urX AGMIMG_llX sub 0 0 AGMIMG_urY AGMIMG_llY sub AGMIMG_llX AGMIMG_llY]concat + }{ + }ifelse + end +}def +level2 not{ + /colorbuf + { + 0 1 2 index length 1 sub{ + dup 2 index exch get + 255 exch sub + 2 index + 3 1 roll + put + }for + }def + /tint_image_to_color + { + begin + Width Height BitsPerComponent ImageMatrix + /DataSource load + end + Adobe_AGM_Image begin + /AGMIMG_mbuf 0 string def + /AGMIMG_ybuf 0 string def + /AGMIMG_kbuf 0 string def + { + colorbuf dup length AGMIMG_mbuf length ne + { + dup length dup dup + /AGMIMG_mbuf exch string def + /AGMIMG_ybuf exch string def + /AGMIMG_kbuf exch string def + }if + dup AGMIMG_mbuf copy AGMIMG_ybuf copy AGMIMG_kbuf copy pop + } + addprocs + {AGMIMG_mbuf}{AGMIMG_ybuf}{AGMIMG_kbuf}true 4 colorimage + end + }def + /sep_imageormask_lev1 + { + begin + MappedCSA 0 get dup/DeviceRGB eq exch/DeviceCMYK eq or has_color not and{ + { + 255 mul round cvi GrayLookup exch get + }currenttransfer addprocs settransfer + currentdict imageormask + }{ + /sep_colorspace_dict AGMCORE_gget/Components known{ + MappedCSA 0 get/DeviceCMYK eq{ + Components aload pop + }{ + 0 0 0 Components aload pop 1 exch sub + }ifelse + Adobe_AGM_Image/AGMIMG_k xddf + Adobe_AGM_Image/AGMIMG_y xddf + Adobe_AGM_Image/AGMIMG_m xddf + Adobe_AGM_Image/AGMIMG_c xddf + AGMIMG_y 0.0 eq AGMIMG_m 0.0 eq and AGMIMG_c 0.0 eq and{ + {AGMIMG_k mul 1 exch sub}currenttransfer addprocs settransfer + currentdict imageormask + }{ + currentcolortransfer + {AGMIMG_k mul 1 exch sub}exch addprocs 4 1 roll + {AGMIMG_y mul 1 exch sub}exch addprocs 4 1 roll + {AGMIMG_m mul 1 exch sub}exch addprocs 4 1 roll + {AGMIMG_c mul 1 exch sub}exch addprocs 4 1 roll + setcolortransfer + currentdict tint_image_to_color + }ifelse + }{ + MappedCSA 0 get/DeviceGray eq{ + {255 mul round cvi ColorLookup exch get 0 get}currenttransfer addprocs settransfer + currentdict imageormask + }{ + MappedCSA 0 get/DeviceCMYK eq{ + currentcolortransfer + {255 mul round cvi ColorLookup exch get 3 get 1 exch sub}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 2 get 1 exch sub}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 1 get 1 exch sub}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 0 get 1 exch sub}exch addprocs 4 1 roll + setcolortransfer + currentdict tint_image_to_color + }{ + currentcolortransfer + {pop 1}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 2 get}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 1 get}exch addprocs 4 1 roll + {255 mul round cvi ColorLookup exch get 0 get}exch addprocs 4 1 roll + setcolortransfer + currentdict tint_image_to_color + }ifelse + }ifelse + }ifelse + }ifelse + end + }def + /sep_image_lev1_sep + { + begin + /sep_colorspace_dict AGMCORE_gget/Components known{ + Components aload pop + Adobe_AGM_Image/AGMIMG_k xddf + Adobe_AGM_Image/AGMIMG_y xddf + Adobe_AGM_Image/AGMIMG_m xddf + Adobe_AGM_Image/AGMIMG_c xddf + {AGMIMG_c mul 1 exch sub} + {AGMIMG_m mul 1 exch sub} + {AGMIMG_y mul 1 exch sub} + {AGMIMG_k mul 1 exch sub} + }{ + {255 mul round cvi ColorLookup exch get 0 get 1 exch sub} + {255 mul round cvi ColorLookup exch get 1 get 1 exch sub} + {255 mul round cvi ColorLookup exch get 2 get 1 exch sub} + {255 mul round cvi ColorLookup exch get 3 get 1 exch sub} + }ifelse + AGMCORE_get_ink_data currenttransfer addprocs settransfer + currentdict imageormask_sys + end + }def + /indexed_imageormask_lev1 + { + /indexed_colorspace_dict AGMCORE_gget begin + begin + currentdict + MappedCSA 0 get dup/DeviceRGB eq exch/DeviceCMYK eq or has_color not and{ + {HiVal mul round cvi GrayLookup exch get HiVal div}currenttransfer addprocs settransfer + imageormask + }{ + MappedCSA 0 get/DeviceGray eq{ + {HiVal mul round cvi Lookup exch get HiVal div}currenttransfer addprocs settransfer + imageormask + }{ + MappedCSA 0 get/DeviceCMYK eq{ + currentcolortransfer + {4 mul HiVal mul round cvi 3 add Lookup exch get HiVal div 1 exch sub}exch addprocs 4 1 roll + {4 mul HiVal mul round cvi 2 add Lookup exch get HiVal div 1 exch sub}exch addprocs 4 1 roll + {4 mul HiVal mul round cvi 1 add Lookup exch get HiVal div 1 exch sub}exch addprocs 4 1 roll + {4 mul HiVal mul round cvi Lookup exch get HiVal div 1 exch sub}exch addprocs 4 1 roll + setcolortransfer + tint_image_to_color + }{ + currentcolortransfer + {pop 1}exch addprocs 4 1 roll + {3 mul HiVal mul round cvi 2 add Lookup exch get HiVal div}exch addprocs 4 1 roll + {3 mul HiVal mul round cvi 1 add Lookup exch get HiVal div}exch addprocs 4 1 roll + {3 mul HiVal mul round cvi Lookup exch get HiVal div}exch addprocs 4 1 roll + setcolortransfer + tint_image_to_color + }ifelse + }ifelse + }ifelse + end end + }def + /indexed_image_lev1_sep + { + /indexed_colorspace_dict AGMCORE_gget begin + begin + {4 mul HiVal mul round cvi Lookup exch get HiVal div 1 exch sub} + {4 mul HiVal mul round cvi 1 add Lookup exch get HiVal div 1 exch sub} + {4 mul HiVal mul round cvi 2 add Lookup exch get HiVal div 1 exch sub} + {4 mul HiVal mul round cvi 3 add Lookup exch get HiVal div 1 exch sub} + AGMCORE_get_ink_data currenttransfer addprocs settransfer + currentdict imageormask_sys + end end + }def +}if +end +systemdict/setpacking known +{setpacking}if +%%EndResource +currentdict Adobe_AGM_Utils eq {end} if +%%EndProlog +%%BeginSetup +Adobe_AGM_Utils begin +2 2010 Adobe_AGM_Core/ds gx +Adobe_CoolType_Core/ds get exec Adobe_AGM_Image/ds gx +currentdict Adobe_AGM_Utils eq {end} if +%%EndSetup +%%Page: (Page 1) 1 +%%EndPageComments +%%BeginPageSetup +%ADOBeginClientInjection: PageSetup Start "AI11EPS" +%AI12_RMC_Transparency: Balance=75 RasterRes=300 GradRes=150 Text=0 Stroke=1 Clip=1 OP=0 +%ADOEndClientInjection: PageSetup Start "AI11EPS" +Adobe_AGM_Utils begin +Adobe_AGM_Core/ps gx +Adobe_AGM_Utils/capture_cpd gx +Adobe_CoolType_Core/ps get exec Adobe_AGM_Image/ps gx +%ADOBeginClientInjection: PageSetup End "AI11EPS" +/currentdistillerparams where {pop currentdistillerparams /CoreDistVersion get 5000 lt} {true} ifelse { userdict /AI11_PDFMark5 /cleartomark load put userdict /AI11_ReadMetadata_PDFMark5 {flushfile cleartomark } bind put} { userdict /AI11_PDFMark5 /pdfmark load put userdict /AI11_ReadMetadata_PDFMark5 {/PUT pdfmark} bind put } ifelse [/NamespacePush AI11_PDFMark5 [/_objdef {ai_metadata_stream_123} /type /stream /OBJ AI11_PDFMark5 [{ai_metadata_stream_123} currentfile 0 (% &&end XMP packet marker&&) /SubFileDecode filter AI11_ReadMetadata_PDFMark5 + + + + application/postscript + + + Web + + + + + Adobe Illustrator CS3 + 2017-04-03T09:52:22+02:00 + 2017-04-03T10:02:31+02:00 + 2017-04-03T10:02:31+02:00 + + + + 256 + 76 + JPEG + /9j/4AAQSkZJRgABAgEASABIAAD/7QAsUGhvdG9zaG9wIDMuMAA4QklNA+0AAAAAABAASAAAAAEA AQBIAAAAAQAB/+4ADkFkb2JlAGTAAAAAAf/bAIQABgQEBAUEBgUFBgkGBQYJCwgGBggLDAoKCwoK DBAMDAwMDAwQDA4PEA8ODBMTFBQTExwbGxscHx8fHx8fHx8fHwEHBwcNDA0YEBAYGhURFRofHx8f Hx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8f/8AAEQgATAEAAwER AAIRAQMRAf/EAaIAAAAHAQEBAQEAAAAAAAAAAAQFAwIGAQAHCAkKCwEAAgIDAQEBAQEAAAAAAAAA AQACAwQFBgcICQoLEAACAQMDAgQCBgcDBAIGAnMBAgMRBAAFIRIxQVEGE2EicYEUMpGhBxWxQiPB UtHhMxZi8CRygvElQzRTkqKyY3PCNUQnk6OzNhdUZHTD0uIIJoMJChgZhJRFRqS0VtNVKBry4/PE 1OT0ZXWFlaW1xdXl9WZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo+Ck5SVlpeYmZ qbnJ2en5KjpKWmp6ipqqusra6voRAAICAQIDBQUEBQYECAMDbQEAAhEDBCESMUEFURNhIgZxgZEy obHwFMHR4SNCFVJicvEzJDRDghaSUyWiY7LCB3PSNeJEgxdUkwgJChgZJjZFGidkdFU38qOzwygp 0+PzhJSktMTU5PRldYWVpbXF1eX1RlZmdoaWprbG1ub2R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo +DlJWWl5iZmpucnZ6fkqOkpaanqKmqq6ytrq+v/aAAwDAQACEQMRAD8A9U4q7FWGebvzi/LjynK9 vrOtQreps1jAGuJwf5XSIPwP+vTFXnl1/wA5gfl9HKUt9L1SdB/uwpAgPyBmJp88Vdbf85gfl88g W40rVYUP7apbvT5j1lNPlirNfLn5+flTr8iQ22uxWty5AEF8rWpqegDyhYyT4BsVegKyuodCGVgC rA1BB6EHFW8VdirsVdirsVSDzh588peT7EXnmHUYrKN6+jEavNKR2jiQM7e9BQd8VeRX/wDzmF5K imKWOjajcxAkepJ6MNfcAPIfvpiqceX/APnKv8r9TmWG+N5oztt6l3CHir2+OBpSPmVAxV61pmqa ZqllHfaZdw3tlMKxXNvIssbfJkJBxVFYq7FXYq7FXYq7FXYq7FXzR+fv/OQfmXSPM1x5V8pzrYiw CrqGohEkleZ1DGOPmGVFQNQmleXhTdV4ddeefzL8xSmCbW9V1Fnr/oyTzupr1pEh4/hiqBeLzl5S 1S2vZYr/AEPU2Hr2s0qy20zLy+2vIKzKWHyOKvuX8oPOV15x/LzSNdvQBfTo8V5xFFMsEjRM4Hbn w5U7VxVlGp6nY6Xp1zqV/MtvZWcTz3MzdEjjBZjt7DFXxx+Zf/OSnnXzJfzW/l+6l0LQlJWBLZuF 1Io/blmX41J/lQgDpv1xVgkXm78yNKkj1FNY1e0aYhkuWnuFWQj3ZqP+OKvof8hv+cib3zDqMPlX ze6Nqk/w6bqiqsYnYCvpTKoChyPssoAPTr1VfQFzc29rby3NzKkFvAjSTTSMFREUVZmY7AAdTir5 G/OP/nJPWdfuZ9G8ozyaboKExyX0ZKXN1Q0JDDeOI9gPiI+1144qw7yH+RP5iedYlvbKzWz0yTdd Sv2MUbg94wA0knzVae+KvWdP/wCcNIfTRtR80MZDQyR29oAo8QHeU1+fH6MVbvf+cM7coxsvNTq9 TxWazDAjsCyzLT50xV575u/5xj/M3QIpLm0gh1yzjBZnsGJmCjxgcI5PsnLFVT/nHz81/MegectJ 8vTXUt15f1W4jsTYyMXWGSdgkUkNa8KSMOQGxFe9CFX2lirsVdirsVYb+Z/5peXvy/0M32pP6t/O GXTdOQ/vJ5FH/CopI5v29zQFV8U3V15y/Mvzk887m+1i+NST8MUMS9h2jijH+ZY70anUwwwM5mgG zFilOVDm9R0z/nGW2Nsrajrb/WGALJbwgIppuAXYlt+9B8s5jJ7Tm/TDbzLso9mjqUm8yf8AON2v 2UDz6Hfx6pxFfq0i/V5T7ISzox+ZXMrTe0mORrJHh8+Y/HzasnZ0h9JtiPkX8w/Of5beYWezaSFY 5OOp6NcclilCmjLIh+y9OjgVHy2PRQmJAGJsF15BBovuHyT5x0jzj5as9f0pyba6X44m+3FKu0kT gftI23v1GxySE9xV2KuxV2KuxV2KuxV8D/nj/wCTa80f8xrf8RXFX3D5Qtre38r6SkESQobSBisa hRUxLU0HfFXzD/zmJ/ynGi/9swf9REuKvYP+cYf/ACTulf8AGa7/AOoh8VQ//OU2pXNn+UtzFASq 393bW05Bp+75GUj6TEBirx7/AJxO8o6LrXm7UtT1OBLp9GgjksoZQGUTTOQJeJ2JQJtXoTXrTFX1 pqmlabq1hNp+p2sV5Y3C8JreZQ6MPcHFX59eZbU+U/zB1O20uU10LVJksZSeTD6rcH0iT/MOAr74 q+gP+csvzJltre28jabNxe6QXWtMh39Kv7mAkfzEF2HgF7HFWM/845/kxY67H/i/zHbrcaZHIyaX YSDlHO8Z4vLIpFGjRvhC92BrsN1X1ZDchVCFQFUUHEUAA9sVYV5o/PX8rvLV3LY6jrSPfwkrLa2s clw6sOqsY1ZFbtRmBxVKtN/5yZ/KC9mETarLZsxorXNtMq/SyK6j6cVR35q/mTpGmflTq+v6LqMF 4biL6lp9zayrIPrFx+7BVkJHKNWMlP8AJxV8QaLq95o2r2erWRUXthMlxas6h1WWJgyNxOx4sAd8 VZZqf54/m1qRJuPM97Hy7WrLaj6Pq6xeOKpYfOP5l3X7865rU/P/AHb9aunrTb7XI16YqmGkfnP+ a2jTA2/mW/cxmhiu5DdKKbceFx6gHTFXu/5Xf85VWGqTJpfneOLTLgj91q8NRbOQOkqHkYyf5gSp P8uKvAfOnmfW/wAyPP8ALfUZ59RnW20y1J2ih5cYY/AUBqx8anK8uWOOJlLkBbKETIgDmX0F+XX5 aaP5StiICZ9TnQC9vm6sBvwjXoicvpPeu2ee9o9pz1Ut9oDkPx1eh0+mjiHmzko3ZyPu/pmuvyb7 aAmHcOPfY/hh2Ts8n/P7yGmq6GPMdjbk6pp1BdemKtJa9yQOvpE8q/y1+joOwNf4eTwpH0S5e/8A a67X4OKPEOYY3/zi/wDma3l3zT/he+f/AHE+YJVWFidor2nGMj2m2jPvx8DnbOleu/mz/wA5KaD5 QuJtH0KJNZ16L4ZmLEWlu/8ALIy7yMO6Idu7A7Yq+cvMH55/mvr0zGfzBdW0chottYN9UQA/sj0e LsP9ZjiqVN5n/M6KMTtq2tpHuVlNxdhfh3NG5dsVT7y5/wA5B/mxoUilNck1GFftW+pD60rfN3/f fc4xV6Lq/wDzmHrMugwRaVokNrrjgi7upnMtuh3FYYxxYkjf42+Hp8WKvJ9W/Nz809duSbnzHqDP IdoLWVreM99orf012+WKoSTzb+Z1lxnk1nW7agDrK1zdx7NsCGLDY4qkGpanqGp301/qFw91e3Dc 57iVizu1KVZj1OKv0V8r/wDKNaT/AMwVv/yaXFXy9/zmCrP570REBZm00BVAqSTcSUAGKvItL84e ffLcccGm6xqWlQAlkt4p5oYiakk+nUId28MVZJrH55edPMPk+78r+ZXj1W1n4PBeMixXMMkThkPK MBXGxDclqQftYqk35bfmNrnkHzEus6UElDoYbyzlr6c0JIJU03BBFVbsfaoxV7Lrf/OY17PpUkOj +XlstTkTit1PcevHExH2ljEcfMg9KmniO2KvFfIlhb+ZPzE0Wz1i4Ii1TUYlvJnqzSNLKCy7ftSs eNfE1xVF+etQvvOn5qapLAwln1XUza2PImnD1Bb26132CBRir7h0PR7LRdGsdIsV4WlhBHbwA9eM ahQT4k0qT44q8n/5yY/Me/8ALPly10TSZWg1HXPUEtyho8VrFxD8SDVWkLhQfAN3pir5FxV2KrhJ IIzGGIjYhmSuxK1AJHiORxV6H+QHl7RfMH5oabpms2iXthJHcPJbSV4s0cLOtaEdGGKvtfSvKXlX SFUaVo9lYBfs/VreKI7GvVFHffFU1xVKPMPlDyv5jtmttc0u21CJhSs0as6+6P8AbQ+6kHFXxz+f X5OD8vtZgudNZ5vLup8vqjSfE8MqbvA7d9jyRj1FR+ySVUb/AM4/+TrS9u5/MtyxZ9Pk9CziUkAS MlXdvGivQD55zHtHrZQiMI/iFl2nZuAE8Z6PoK1QEM1SD02zjwXcEqxi/wAth9OHi8kW4JKOklf9 YA/qpjY7lsN8SylZFBUihHUEH2OD3IL4489aJc+WfO+p2USm1FtdNNYNGxBWFm9SBlYUoQhXp0Oe l9n6jxsEZ3Zrf39ftebz4+CZDJPyZ/JvU/zF1aVpJGs9AsmX9IX4ALlm3EMNdjIw6k7KNz2BzGl9 jeUPy48leUbVINB0qC2kQUa7Kh7l/EvM1XNfCtPADFWSYq+fP+ct7byzZeU7CRdNtV17Ub0Kl8sS LOIYkZpf3gAY/EyCh8cVee/kT/zj+3nSNfMPmIyW/ltXK20EZ4y3bIaNRv2IgRQsNydhTrir6w8v eVfLflyzWz0LTbfTrdRQrAgUtTu7/ac+7EnFU0ZVZSrAFSKEHcEHFXyT/wA5bW/lqx8y6PY6Xp1t Z6g1tJd6jNbxLG0glfhF6nACpHpOd998VfUnlf8A5RrSf+YK3/5NLiqP+rW/r/WPST6xx4etxHPj Unjy6036Yq64tra5haC5iSeFxR4pFDqw91aoOKvn78+P+cetBm0O88z+UbNbDUbFGuL3ToBSGeFR ykaOMbJIi70TZvCuKvNf+cVrGyvPzNlhvLeO5i/Rtw3pzIsi1EkVDRgRXFX15/hfy1/1abL/AKR4 v+acVfA/lUBfzK0cLsBrNtSm1KXS4qqflZElx+ZnlgTTCMHVbSQyPvyZJlcLuRu7Dj9OKvvbFXyT /wA5ZO7fmRYqQQqaTCF8DW4uCSMVeK4qzL8qfy7/AMfeZ30P9Ifo3hbSXP1j0fXr6bIvHhzi68+t cVevf9Cd/wDf3f8Acu/7OsVYx+Quj/oX8+rnSPV+sNpZ1G0W448OZgLRcwtW48gOlTir68W5lHWh +eKvhvzb+Z35lWfm/VoYfNWqqlnfXEMKLdzLGFjmZVHphuB28Rir6k/5x/8AzC1Lzp5AS91mQSap Y3Mljc3FAnq8ESRZCFotSkoBp3GKoX/nJvSodQ/KDVJiOcmnTW13DSho3rLCx/5FzNirzP8AIHRo LPyWdRVi0+pzu0m5oqwMYkWladmNff2GcL7RZzLUcHSA+/d3vZ0AMd971S1RWDV36d80QkXPJVGS 3H2iB82p/HJAyUEtD6sOklP9mf64fV3fYu6orJ2evtUHIkFiXzn/AM5LRWy+btNkQj6w9gPVUdeI mk4E/P4h9Gdn7NE+DIdOL9AdL2iBxj3Pqb8qPKFt5T8gaPo8SBJ1gSe+YdXuZgHmJPejHiPYAZ0b r2Hf85DfnDe+Q9ItNP0Xj+n9VDtFO4Di3hjIDS8GqGZiaJUU2JPShVfLEn5s/mfJctct5r1YSMeR VbydY6+0asEA9gMVRF15o87fmZrnl3Qtb1Fr6b6wtlYyuiKyfW5ERmYoq8vsgktvQYq+89J0ux0n S7TS7CMQ2VlClvbxD9mONQqj7hirwD/nI7889d8vasPKPle4+p3kcSy6pfqFMq+qvKOGItXgeBDM 1K7ihG+KvBLb82/zQt7gXEfmvVWkBrxku5pU8f7uRmT8MVSrzX5u8webNXbWNeufreoNGkTTcEjq sa8V+GMKo+gYq/Qbyv8A8o1pP/MFb/8AJpcVfPf/ADlR5885aD5n0rTtE1m60yznsPWljtJDCWkM sici6Uf7KjviqC/5xp/ODzdqHnEeVdf1KbVLS/hlezku3Ms8c8CepQSuS7K0aNVSTvQim9VX1G6I 6MjqGRgVZWFQQdiCDir5K/5xw06PTPz21rTY/wC7soNQt0r1pFcog/4jir62xV+enlf/AMmXpH/b Zt/+opcVdpvp+VPzLtfrYKx6DrUf1gHqFs7oc/p/d4q++gQRUbg9Dir5p/5y58rzi80XzREhaFo2 026euysjNNBt/lc5PuxV864q9k/5xU/8mbN/2zbj/k5Fir68xV8s/lT/AOtNa9/zGax/yefFX1Ni r8+/PX/KbeYf+2lef8n3xV9I/wDOI7N/gfV1r8I1NiB7m3ir+rFWd/nd/wCSo8y/8wn/ABuuKvHP +cdVmPlLUHadni+vMkduacUKxRszLtX4+e+9NvnnE+0tePHbfh5/Eu77M+g+965bKpchhWo6HOeE i7ElEEQJ1Cr9ww7lG5cJouxr8gT+rHhK8JXBwelfpBGCkU8I/wCcmLezTUfLV3InJpFuY5yNmaKJ omVf+SjZ1vsxI1kHT0/p/U6ntIbxPvfXXrxfzDOqdW+R/wDnMCOb/lYOkz0P1Z9JjSN/2TIlzOXA 9wHWv0Yq8JxVm/5JGMfmx5XMlOP15OorvQ0/HFX3zzT+Yffir4O/PxJ0/N/zMJ93NwjL/qNDG0f/ AAhGKsAxV2Kv0d8tusfl3S43PF0tIFZT1BESgjFXy9/zmF8XnTRJBuh03iD7rPIT/wASGKsO/wCc b3CfnR5dY9B9c6f8wM+Kvt83a9lOKvmT8mTX/nJXzvL0KSau4Hz1FF/42xV9Lm6lPgPkMVfAmhIs f5q6ciCiprsKqPYXgAxVnH/OUfkqXQ/zEk1mKMjTvMCC5jcD4RcRgJOnzJ4yf7LFXt3/ADj/APmN B5t8lQWdzLXXNFRLW9Rj8TxqKQzipJPJBRj/ADA+IxVn/mDQNJ8waPdaPq9ut1p94nCeFqitCCCC NwysAVI6HFXzd5l/5xI1+K8ZvLWr21zZMWKx3/OGZB+yvKJJEkPi1E+WKso/I/8AI3zj5J83ya3r M1k1q1pLbCO3lkeTm7xsDRo0Xj8B/axV7zir5Z/Kn/1prXv+YzWP+Tz4q+psVfn356/5TbzD/wBt K8/5Pvir6Q/5xH/5QnWP+2kf+TEWKs8/O7/yVHmX/mE/43XFXzL+QWuX9t5v/RKThbHUIpGlt2oQ 0kSFlZPBgAa06jr0FOf9otPGWDjr1RPP3uw7OyEZOHoX0chowJ6DrnDgu9RypGBVVAr3phMiWFlo zRg0rU+A3P4YREp4S2GY/s0Hv1/DAQEPm3/nJDUjcedbSyWTlHZWSco9qLLK7s33pwztvZvFWAy/ nS+wfguk7RlcwO4Pp78sPN0HmzyNpOsJKsly8CRX4X9i6iULMpWpK/EOQr+yQe+dC4CUfnJ+VFv+ YWhQQRzraaxp7PJp904JT4wA8UgG/B+K/EASCO+4Kr58m/5xd/NRJmjWKylQGglS5AU+45KrfeMV drP5PebvyvttL876lcWt02napaP9TtGkaiqxl5PI6R0+KMJQA/axV9daZqVlqmnWupWMomsryJJ7 eUdGjkUMp39jiryn87/yKk89XMGtaNcxWmuQRiCWO45LDPErErVkVmV15Gh4muw2pirxqP8A5xe/ NR5QjQ2Uan/djXI4j/gVZvwxVj35nflJrP5exaT+lLy3uptUWc8bbmUjMBSo5OELVEo/ZGKvt7Tv +Ofa/wDGGP8A4iMVfMf/ADl5/wApPoP/ADBSf8nTirC/+cdf/JyeX/8Ao8/6gZ8VfbGKvm38l/8A 1o/zz89W/wC6lHir6SxV8GaL/wCTYsP+29F/1GDFX2z+Zv5e6X588q3GiXpEU1fVsLynJoLhQQrg dxvxYdwcVfE80Hnv8q/Og5h9M1qyJ4OPiiniJpUV+GWJ6f7RGyr6F8kf85S+T9Ugjg80RvomogAP OqvNaO2wqpQNIlTvRloB+0cVelWn5j/l9eMiW3mbS5ZJPsRC8g5nav2OfL8MVbl/Mj8vIneOXzRp KSRkq6NfWwYMDQgjnWoxVjeuf85B/lRpHqKdZF/Oi8hDYxvPy9llAENfm+KvAfys88eXbf8AO/UP NGo3I07Sr+XUZ45LgUKi5dnjV+HP4vipir6M/wCV3flR/wBTLaf8P/zTir4t823dte+a9avLVxLb XN/czQSCtGSSZmVhXfcHFXun/ONX5heS/LXlTVLPXdWh0+5mvzNFHLyqyGFF5CgI6qcVZj+bP5s/ lzq35c69p2na9b3N9c2/CCBOfJ25qaCq4q+UdF1i+0bVbXVLB/Tu7SQSRE1oadVYClVYbEeGVZ8M csDCXKTPHMxkCOYfX+havbazo1lqtsf3N5CkygEHiWHxISO6mqn3zzLPhOLJKB5xNPT45iURIdUz jkjApICw7Dr+GQBLPdEIXI+AKi/efuG2Jr3sTTHPP3nvS/J2iS3l1Ksl9IpFhZVHOWTt8I34L1Zv 40zM0GhnqcgjEenqe5x9RnjjjfV8t6Vo/m7z75knisYpdV1u5WW6mNQCViXkxJNFUbBVHjRR2z0X FijjiIxFRDz0pGRs80//ACv/ADW8xflvrFxGsBuNOmfhqekzVjPOM8eSEiscq9Dtv0I2BFjF9Q+W fz3/ACv163Eia1DpswAMltqTLaOhPblIfSb/AGDnFWTSedvJkdqLuTXtOS1IVhO13AI+LU4nmXpQ 12xV5T+ff5m/lzqf5faroFprtve6pdCF7SOzrcqWinjkNZY6xL8KkbvX2PTFXlH5N/nzqHkaP9Ea pDJqPltmLxwxkevasx5MYORCsrE1aMkCu4I35KvpTRfzk/LDV7ZZ7bzHZQ8tjFeSraSA+HCf0yfo 2xVOLvzv5Ls4xJd6/p1ujGitLdwICetAWcb4q8A/5yZ8/eQfMmiabYaLqceo6vY3nNjAHaNYJImD 0lp6bVcJsrHFXrdh+dn5VJY26t5ktVZYkDKeYIIUbfZxV4F/zkv5w8s+ZvMGjz6DqEeoQ29o6TPF yorGQkA1A7YqxX8j9b0nQ/zQ0XVNWuUs9Pt/rXrXMleK87SaNa0B6swGKvq7/ld35Uf9TLaf8P8A 804q8J/Kzzv5U0r88/N2u6jqUVtpF+dS+p3j8uEnrX6Sx0oCfiRSRir3b/ld35Uf9TLaf8P/AM04 q+QvLciXf5qaVJbH1UuNdgaEqD8Qe8UrQe9cVfoNiqQecvInlXzlpn6O8wWKXcS1MMv2ZomP7UUi 0ZTtv2PeuKvnrzX/AM4e6pHK8vlTWoriAklbTUQYpFHh60Surn/YLirA7j/nGf8AOaKUomhpOo6S x3lmFPy9SVG/DFVsX/ONP50O4VtAWMHq7XlkQP8AgZmP4YqyPRf+cRfzCu2VtTvtP02I/aHN7iUf JUUIf+RmKovXv+cP/OVvcKNE1ey1C2IFXuRJayhqb/AonWlf8vFUr/6FK/NP/fumf9JEn/VLFWTe V/8AnD3UpLe6fzPrMVtM0TLZRaeGmCyn7LzNKsdVXuijf+YYqx66/wCcRvzLjndILvS54Qfgl9aV CR2qpi2OKqa/84k/mkWAM+lqCaFjcS0HvtCcVTTzT/zibr+leTo7/Srz9L+YoHZ76wiXhG0PHpbc qM7oR3pyB2AIoyryny95486eTbl7S1mkt1ikrcaZdISnMbMrRvRkJ78SpzB1fZuHUfWN+/r+Pe34 dTPH9J2ehQ/85KTLEom8vK8oHxul2UUn2UwuR9+aKXsuL2yf7H9rnjtQ9Y/b+xL9Y/5yK8x3EZTS dOg07kpBlkZrmRSehTaJNv8AKU5fg9msUTc5GX2fr+9rn2nM/SK+1h+jeXvPv5keYWWzhn1fUZCP rF05pFEpPV5DRI0HZfoUZv8ADghijwwFRdfOZkbJsvsX8nfyf0n8u9GdA63mu3oU6lqFKA8dxFED usa1+bHc9gLWKD/M78gPJvnqR7+jaTrrDfUbZQRIe3rxGgkp4gq3virwLXv+cUfzQsJW/RgtNZgr +7aGZYJCP8pLj01B+TnFWPr/AM47/nK03pDy3JyqRU3FoF2/yzNx/HFWS6D/AM4m/mXfOp1OSy0i E/b9SX15R8lhDoT/ALMYqz/U/wDnDvQW0OGLTNcuI9aiBMt1cRq1vMT0HpKQ0Q9+bfTirzHV/wDn Fv8ANyxlK2tla6og6SWt1EgI+VyYG/DFUrt/+cdfzlnfivlx06VaS4tEAr/rSiv0YqyXT/8AnEj8 y7izea5utNspwAY7aWaR2J7hmijdF28Cf44qp/8AQpX5p/790z/pIk/6pYqjtG/5xC8+T30a6tqO n2VjUetLC8k8vHvwQpGpPzYYqjvNf/OIPmOHUHfytqdtdaax+CO/ZorhPZmjjZH+fw/LFUj/AOhS vzT/AN+6Z/0kSf8AVLFVWT/nEX8zVhR1vNJeRvtRCecFfpMAX8cVUv8AoUr80/8Afumf9JEn/VLF XpX5N/8AOM9z5W1+DzH5ovILq+sjzsLG05vEkhFBJI8ioWZK7KFpXeuKvfsVdirsVdirsVdirsVd irsVdirsVdirzv8ANz/lTH1Bf+VhfU+VP9H5cvrtP+KvQ/0jj8vh8cVfNGo/9CxfWm9D/Fvp9vq3 1H0/o9f959+Ksr8k/wDQpX1iP63+kPXqvp/pv1eHL/K+qfufnz+HFX035Y/wt+h4f8MfUv0P/un9 Hel9XrQVp6Pw18cVTXFXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq//Z + + + + + + uuid:2B68CB7CE519E7119A76BA5BC76AA065 + uuid:21AD93F6E619E7119A76BA5BC76AA065 + + uuid:2A68CB7CE519E7119A76BA5BC76AA065 + uuid:2968CB7CE519E7119A76BA5BC76AA065 + + + + Web + + + + 14400.000000 + 14400.000000 + Pixels + + 1 + False + False + + + Cyan + Magenta + Yellow + Black + + + + + + Groupe de nuances par défaut + 0 + + + + Blanc + RGB + PROCESS + 255 + 255 + 255 + + + Noir + RGB + PROCESS + 0 + 0 + 0 + + + Rouge RVB + RGB + PROCESS + 255 + 0 + 0 + + + Jaune RVB + RGB + PROCESS + 255 + 255 + 0 + + + Vert RVB + RGB + PROCESS + 0 + 255 + 0 + + + Cyan RVB + RGB + PROCESS + 0 + 255 + 255 + + + Bleu RVB + RGB + PROCESS + 0 + 0 + 255 + + + Magenta RVB + RGB + PROCESS + 255 + 0 + 255 + + + R=193 V=39 B=45 + RGB + PROCESS + 193 + 39 + 45 + + + R=237 V=28 B=36 + RGB + PROCESS + 237 + 28 + 36 + + + R=241 V=90 B=36 + RGB + PROCESS + 241 + 90 + 36 + + + R=247 V=147 B=30 + RGB + PROCESS + 247 + 147 + 30 + + + R=251 V=176 B=59 + RGB + PROCESS + 251 + 176 + 59 + + + R=252 V=238 B=33 + RGB + PROCESS + 252 + 238 + 33 + + + R=217 V=224 B=33 + RGB + PROCESS + 217 + 224 + 33 + + + R=140 V=198 B=63 + RGB + PROCESS + 140 + 198 + 63 + + + R=57 V=181 B=74 + RGB + PROCESS + 57 + 181 + 74 + + + R=0 V=146 B=69 + RGB + PROCESS + 0 + 146 + 69 + + + R=0 V=104 B=55 + RGB + PROCESS + 0 + 104 + 55 + + + R=34 V=181 B=115 + RGB + PROCESS + 34 + 181 + 115 + + + R=0 V=169 B=157 + RGB + PROCESS + 0 + 169 + 157 + + + R=41 V=171 B=226 + RGB + PROCESS + 41 + 171 + 226 + + + R=0 V=113 B=188 + RGB + PROCESS + 0 + 113 + 188 + + + R=46 V=49 B=146 + RGB + PROCESS + 46 + 49 + 146 + + + R=27 V=20 B=100 + RGB + PROCESS + 27 + 20 + 100 + + + R=102 V=45 B=145 + RGB + PROCESS + 102 + 45 + 145 + + + R=147 V=39 B=143 + RGB + PROCESS + 147 + 39 + 143 + + + R=158 V=0 B=93 + RGB + PROCESS + 158 + 0 + 93 + + + R=212 V=20 B=90 + RGB + PROCESS + 212 + 20 + 90 + + + R=237 V=30 B=121 + RGB + PROCESS + 237 + 30 + 121 + + + R=199 V=178 B=153 + RGB + PROCESS + 199 + 178 + 153 + + + R=153 V=134 B=117 + RGB + PROCESS + 153 + 134 + 117 + + + R=115 V=99 B=87 + RGB + PROCESS + 115 + 99 + 87 + + + R=83 V=71 B=65 + RGB + PROCESS + 83 + 71 + 65 + + + R=198 V=156 B=109 + RGB + PROCESS + 198 + 156 + 109 + + + R=166 V=124 B=82 + RGB + PROCESS + 166 + 124 + 82 + + + R=140 V=98 B=57 + RGB + PROCESS + 140 + 98 + 57 + + + R=117 V=76 B=36 + RGB + PROCESS + 117 + 76 + 36 + + + R=96 V=56 B=19 + RGB + PROCESS + 96 + 56 + 19 + + + R=66 V=33 B=11 + RGB + PROCESS + 66 + 33 + 11 + + + + + + Groupe de couleurs Web + 1 + + + + R=236 V=28 B=36 + RGB + PROCESS + 236 + 28 + 36 + + + R=0 V=169 B=157 + RGB + PROCESS + 0 + 169 + 157 + + + R=102 V=45 B=145 + RGB + PROCESS + 102 + 45 + 145 + + + R=139 V=146 B=152 1 + RGB + PROCESS + 139 + 146 + 152 + + + + + + Niveaux de gris + 1 + + + + N=100 + GRAY + PROCESS + 255 + + + N=90 + GRAY + PROCESS + 229 + + + N=80 + GRAY + PROCESS + 204 + + + N=70 + GRAY + PROCESS + 178 + + + N=60 + GRAY + PROCESS + 153 + + + N=50 + GRAY + PROCESS + 127 + + + N=40 + GRAY + PROCESS + 101 + + + N=30 + GRAY + PROCESS + 76 + + + N=20 + GRAY + PROCESS + 50 + + + N=10 + GRAY + PROCESS + 25 + + + N=5 + GRAY + PROCESS + 12 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + % &&end XMP packet marker&& [{ai_metadata_stream_123} <> /PUT AI11_PDFMark5 [/Document 1 dict begin /Metadata {ai_metadata_stream_123} def currentdict end /BDC AI11_PDFMark5 +%ADOEndClientInjection: PageSetup End "AI11EPS" +%%EndPageSetup +1 -1 scale 0 -840 translate +pgsv +[1 0 0 1 0 0 ]ct +gsave +np +gsave +0 0 mo +0 840 li +1096 840 li +1096 0 li +cp +clp +[1 0 0 1 0 0 ]ct +904.575 334.257 mo +904.575 320.361 898.182 311.771 892.811 307.004 cv +885.783 300.775 877.066 297.482 867.603 297.482 cv +858.135 297.482 849.418 300.775 842.393 307.006 cv +837.025 311.763 830.63 320.354 830.63 334.257 cv +830.63 342.282 832.76 348.533 835.592 353.299 cv +832.309 351 828.712 349.018 824.782 347.397 cv +817.438 344.379 809.047 342.776 799.205 342.523 cv +799.205 314.912 li +799.205 309.227 796.713 303.826 792.384 300.14 cv +788.849 297.129 784.381 295.508 779.801 295.508 cv +778.775 295.508 777.742 295.588 776.715 295.754 cv +747.302 300.492 li +737.896 302.006 730.983 310.123 730.983 319.648 cv +730.983 344.484 li +730.277 344.316 729.57 344.156 728.861 344.004 cv +722.946 342.736 716.463 342.093 709.595 342.093 cv +698.512 342.093 688.313 344.105 679.273 348.078 cv +675.158 349.891 671.305 352.037 667.71 354.475 cv +665.828 352.313 663.445 350.541 660.656 349.357 cv +655.197 347.042 649.498 345.239 643.722 344.003 cv +637.803 342.734 631.32 342.092 624.455 342.092 cv +613.37 342.092 603.172 344.105 594.137 348.078 cv +585.168 352.024 577.379 357.498 570.993 364.338 cv +570.505 364.861 570.043 365.404 569.571 365.941 cv +567.759 359.079 564.273 352.445 559.656 347.231 cv +554.654 334.644 545.437 324.396 532.739 317.38 cv +522.827 311.904 510.623 308.563 500.098 308.44 cv +499.868 308.439 499.64 308.438 499.413 308.438 cv +477.133 308.438 463.671 320.701 457.115 331.523 cv +448.029 337.533 440.207 347.509 437.402 358.122 cv +436.752 358.81 436.147 359.513 435.567 360.225 cv +430.352 354.689 423.794 350.346 416.015 347.283 cv +407.797 344.057 398.276 342.486 386.911 342.486 cv +377.26 342.486 367.872 343.197 359 344.601 cv +353.201 345.52 348.15 346.486 343.704 347.52 cv +344.549 345.173 li +347.646 336.569 344.319 326.972 336.56 322.135 cv +334.878 321.084 332.65 319.895 329.551 318.387 cv +326.144 316.729 322.15 315.226 317.317 313.781 cv +312.746 312.423 307.58 311.275 301.503 310.271 cv +295.283 309.251 288.347 308.732 280.892 308.732 cv +269.104 308.732 257.795 310.749 247.286 314.723 cv +236.409 318.832 226.785 324.981 218.687 332.993 cv +210.643 340.949 204.265 350.688 199.728 361.941 cv +195.257 373.034 192.99 385.702 192.99 399.594 cv +192.99 413.145 195.016 425.551 199.007 436.455 cv +203.167 447.837 209.248 457.667 217.078 465.667 cv +224.963 473.729 234.529 479.92 245.506 484.068 cv +256.024 488.043 267.729 490.059 280.299 490.059 cv +293.684 490.059 304.816 489.127 314.333 487.21 cv +321.051 485.856 326.354 484.569 330.603 483.257 cv +333.919 485.905 338.121 487.492 342.695 487.492 cv +372.106 487.492 li +378.378 487.492 383.956 484.516 387.503 479.898 cv +391.05 484.516 396.628 487.492 402.899 487.492 cv +432.311 487.492 li +438.632 487.492 444.246 484.467 447.789 479.788 cv +453.011 488.951 460.835 493.996 468.687 496.009 cv +475.153 500.054 484.721 503.735 498.12 503.737 cv +505.128 503.737 512.654 502.694 520.491 500.64 cv +540.638 495.358 557.324 483.058 564.939 468.345 cv +565.798 467.53 566.584 466.669 567.328 465.78 cv +567.969 466.562 568.627 467.331 569.308 468.082 cv +575.643 475.081 583.667 480.618 593.157 484.54 cv +602.332 488.334 613.062 490.256 625.048 490.256 cv +632.506 490.256 639.527 489.592 645.93 488.281 cv +652.391 486.952 657.784 485.354 662.418 483.393 cv +664.66 482.444 666.634 481.103 668.309 479.489 cv +671.426 481.392 674.758 483.078 678.293 484.538 cv +687.468 488.332 698.197 490.256 710.188 490.256 cv +717.642 490.256 724.662 489.592 731.066 488.281 cv +735.035 487.465 738.597 486.545 741.831 485.505 cv +744.413 486.776 747.316 487.492 750.389 487.492 cv +779.801 487.492 li +786.072 487.492 791.649 484.518 795.196 479.901 cv +798.745 484.518 804.321 487.492 810.593 487.492 cv +840.004 487.492 li +842.283 487.492 844.469 487.096 846.5 486.374 cv +848.531 487.096 850.717 487.492 852.995 487.492 cv +882.407 487.492 li +893.124 487.492 901.811 478.805 901.811 468.089 cv +901.811 364.26 li +901.811 360.595 900.797 357.169 899.031 354.245 cv +902.149 349.367 904.575 342.82 904.575 334.257 cv +cp +false sop +/0 +[/DeviceCMYK] /CSA add_res +0 0 0 0.9 cmyk +f +852.995 468.089 mo +882.407 468.089 li +882.407 364.26 li +852.995 364.26 li +852.995 468.089 li +cp +855.265 346.988 mo +858.75 350.082 862.865 351.627 867.602 351.627 cv +872.34 351.627 876.451 350.082 879.939 346.988 cv +883.425 343.897 885.17 339.654 885.17 334.256 cv +885.17 328.861 883.425 324.618 879.939 321.524 cv +876.451 318.434 872.34 316.886 867.602 316.886 cv +862.865 316.886 858.75 318.434 855.265 321.524 cv +851.776 324.618 850.034 328.861 850.034 334.256 cv +850.034 339.654 851.776 343.897 855.265 346.988 cv +cp +830.826 375.116 mo +827.471 370.906 822.995 367.65 817.403 365.346 cv +811.809 363.045 804.801 361.891 796.381 361.891 cv +793.485 361.891 790.49 362.188 787.4 362.779 cv +784.306 363.371 781.773 363.998 779.8 364.654 cv +779.8 314.912 li +750.389 319.648 li +750.389 468.089 li +779.8 468.089 li +779.8 389.131 li +781.642 388.607 783.714 388.113 786.019 387.65 cv +788.319 387.191 790.786 386.96 793.42 386.96 cv +799.999 386.96 804.505 388.934 806.941 392.882 cv +809.375 396.83 810.594 403.54 810.594 413.016 cv +810.594 468.089 li +840.005 468.089 li +840.005 409.463 li +840.005 402.356 839.314 395.91 837.932 390.118 cv +836.55 384.329 834.182 379.33 830.826 375.116 cv +cp +724.597 444.6 mo +720.516 445.259 716.7 445.586 713.147 445.586 cv +703.146 445.586 696.138 443.02 692.125 437.887 cv +688.11 432.755 686.104 425.52 686.104 416.174 cv +686.104 407.359 688.242 400.253 692.521 394.855 cv +696.795 389.461 703.278 386.763 711.963 386.763 cv +716.041 386.763 719.727 387.157 723.017 387.947 cv +726.305 388.736 729.334 389.658 732.097 390.711 cv +738.216 367.221 li +733.873 365.379 729.398 363.964 724.794 362.977 cv +720.186 361.99 715.121 361.496 709.595 361.496 cv +701.171 361.496 693.671 362.945 687.091 365.839 cv +680.51 368.735 674.918 372.648 670.313 377.584 cv +665.705 382.52 662.187 388.311 659.753 394.955 cv +657.316 401.601 656.101 408.673 656.101 416.174 cv +656.101 423.94 657.119 431.146 659.161 437.788 cv +661.199 444.435 664.423 450.193 668.833 455.061 cv +673.24 459.931 678.865 463.779 685.71 466.607 cv +692.551 469.436 700.711 470.852 710.187 470.852 cv +716.37 470.852 722.03 470.324 727.162 469.272 cv +732.294 468.218 736.569 466.972 739.992 465.522 cv +735.848 441.44 li +732.424 442.89 728.673 443.942 724.597 444.6 cv +cp +639.458 444.6 mo +635.378 445.259 631.562 445.586 628.01 445.586 cv +618.008 445.586 610.999 443.02 606.987 437.887 cv +602.972 432.755 600.967 425.52 600.967 416.174 cv +600.967 407.359 603.104 400.253 607.382 394.855 cv +611.656 389.461 618.141 386.763 626.824 386.763 cv +630.902 386.763 634.588 387.157 637.879 387.947 cv +641.166 388.736 644.195 389.658 646.959 390.711 cv +653.078 367.221 li +648.735 365.379 644.26 363.964 639.655 362.977 cv +635.048 361.99 629.983 361.496 624.456 361.496 cv +616.033 361.496 608.532 362.945 601.953 365.839 cv +595.372 368.735 589.78 372.648 585.176 377.584 cv +580.566 382.52 577.048 388.311 574.614 394.955 cv +572.178 401.601 570.963 408.673 570.963 416.174 cv +570.963 423.94 571.98 431.146 574.022 437.788 cv +576.061 444.435 579.284 450.193 583.694 455.061 cv +588.103 459.931 593.728 463.779 600.572 466.607 cv +607.413 469.436 615.573 470.852 625.048 470.852 cv +631.232 470.852 636.892 470.324 642.023 469.272 cv +647.156 468.218 651.431 466.972 654.854 465.522 cv +650.709 441.44 li +647.286 442.89 643.535 443.942 639.458 444.6 cv +cp +422.836 375.116 mo +419.413 370.906 414.773 367.65 408.92 365.346 cv +403.063 363.045 395.725 361.891 386.911 361.891 cv +378.226 361.891 369.935 362.518 362.039 363.766 cv +354.143 365.019 347.695 366.366 342.695 367.813 cv +342.695 468.089 li +372.106 468.089 li +372.106 387.947 li +373.947 387.685 376.054 387.453 378.422 387.256 cv +380.791 387.059 383.027 386.96 385.134 386.96 cv +391.975 386.96 396.647 388.934 399.149 392.882 cv +401.647 396.83 402.899 403.54 402.899 413.016 cv +402.899 468.089 li +432.311 468.089 li +432.311 409.463 li +432.311 402.356 431.586 395.91 430.14 390.118 cv +428.689 384.329 426.256 379.33 422.836 375.116 cv +cp +297.472 443.414 mo +295.628 443.809 293.49 444.073 291.057 444.203 cv +288.62 444.336 285.693 444.4 282.273 444.4 cv +275.957 444.4 270.429 443.316 265.691 441.145 cv +260.954 438.973 257.007 435.913 253.849 431.966 cv +250.69 428.018 248.322 423.314 246.743 417.852 cv +245.163 412.393 244.374 406.305 244.374 399.594 cv +244.374 385.775 247.563 374.889 253.947 366.924 cv +260.328 358.964 270.692 354.982 285.036 354.982 cv +291.483 354.982 297.438 355.806 302.9 357.449 cv +308.359 359.097 313.196 361.037 317.408 363.272 cv +326.291 338.6 li +325.237 337.941 323.494 337.02 321.06 335.836 cv +318.624 334.65 315.534 333.5 311.783 332.381 cv +308.032 331.265 303.558 330.277 298.36 329.42 cv +293.16 328.566 287.337 328.137 280.891 328.137 cv +271.416 328.137 262.5 329.717 254.145 332.875 cv +245.787 336.033 238.517 340.672 232.333 346.791 cv +226.146 352.91 221.279 360.38 217.726 369.195 cv +214.172 378.012 212.396 388.145 212.396 399.594 cv +212.396 410.912 214.006 420.979 217.232 429.794 cv +220.455 438.612 225.029 446.048 230.951 452.099 cv +236.873 458.153 244.009 462.759 252.368 465.917 cv +260.723 469.075 270.035 470.654 280.299 470.654 cv +292.272 470.654 302.339 469.83 310.5 468.187 cv +318.658 466.543 324.58 464.997 328.265 463.548 cv +328.265 395.843 li +297.472 395.843 li +297.472 443.414 li +cp +0 0 0 0 cmyk +f +499.871 327.844 mo +479.593 327.609 472.617 343.076 471.746 345.664 cv +462.806 348.957 454.521 360.719 455.829 367.776 cv +449.177 372.482 444.763 378.48 449.724 388.479 cv +444.926 393.477 441.001 405.299 449.506 412.943 cv +441.818 426.47 450.486 434.057 454.739 437.174 cv +450.813 449.406 459.539 459.127 461.498 460.463 cv +463.078 470.579 467.977 477.244 476.324 477.636 cv +482.209 482.576 494.498 487.394 515.571 481.87 cv +533.066 477.282 545.821 466.344 549.147 455.993 cv +557.104 451.877 556.777 439.526 556.342 436.938 cv +562.828 423.118 558.739 411.298 556.342 405.886 cv +560.702 397.006 555.143 380.422 551.546 376.951 cv +551.872 369.836 547.456 361.543 542.825 357.896 cv +536.173 335.078 511.879 327.983 499.871 327.844 cv +cp +0 0.203922 0.847059 0 cmyk +f +502.25 467.75 mo +495.838 466.606 492.5 462.25 489 455.25 cv +486.897 453.815 478.75 444.25 477.25 432 cv +474.695 430.128 471.25 418.5 471.5 409.75 cv +469.75 403.75 468.349 397.448 470 388.75 cv +467.75 379.25 467.599 372.865 472.75 367.5 cv +472.75 358 475.359 351.052 482.5 346 cv +481.349 339.791 484.277 333.904 491.679 328.695 cv +477.657 331.937 472.487 343.462 471.746 345.664 cv +462.806 348.957 454.521 360.719 455.829 367.776 cv +449.177 372.482 444.763 378.48 449.724 388.479 cv +444.926 393.477 441.001 405.299 449.506 412.943 cv +441.818 426.47 450.486 434.057 454.739 437.174 cv +450.813 449.406 459.539 459.127 461.498 460.463 cv +463.078 470.579 467.977 477.244 476.324 477.636 cv +482.209 482.576 494.498 487.394 515.571 481.87 cv +522.207 480.13 528.155 477.474 533.171 474.285 cv +516.934 476.368 507.505 472.161 502.25 467.75 cv +cp +0.027451 0.278431 0.905882 0 cmyk +f +479.905 346.547 mo +479.905 346.547 498.071 344.899 507.586 346.71 cv +517.031 348.507 533.404 356.603 533.404 356.603 cv +533.404 356.603 508.984 349.163 501.732 348.135 cv +493.03 346.898 479.905 346.547 479.905 346.547 cv +cp +0 0.376471 0.819608 0 cmyk +f +464.782 368.029 mo +464.782 368.029 488.936 365.72 503.083 367.014 cv +517.229 368.308 540.275 375.997 540.275 375.997 cv +540.275 375.997 514.27 371.326 499.886 369.709 cv +489.149 368.502 464.782 368.029 464.782 368.029 cv +cp +f +460.468 387.674 mo +460.468 387.674 484.75 385.621 499.593 386.067 cv +514.435 386.512 540.681 391.008 540.681 391.008 cv +540.681 391.008 506.098 388.892 494.801 388.754 cv +483.505 388.617 460.468 387.674 460.468 387.674 cv +cp +f +461.11 412.032 mo +461.11 412.032 487.129 405.443 501.163 404.417 cv +517.788 403.2 544.817 406.357 544.817 406.357 cv +544.817 406.357 509.509 406.268 498.869 407.439 cv +487.606 408.681 461.11 412.032 461.11 412.032 cv +cp +f +464.962 436.38 mo +464.962 436.38 490.357 427.354 504.871 425.765 cv +519.387 424.175 546.102 424.177 546.102 424.177 cv +546.102 424.177 511.032 427.614 500.03 429.181 cv +489.032 430.748 464.962 436.38 464.962 436.38 cv +cp +f +545.674 439.174 mo +545.674 439.174 524.613 448.131 510.928 451.999 cv +497.242 455.868 469.725 459.093 469.725 459.093 cv +469.725 459.093 501.297 452.146 511.654 448.944 cv +522.01 445.742 545.674 439.174 545.674 439.174 cv +cp +f +484.328 475.342 mo +484.328 475.342 498.696 467.484 507.908 464.136 cv +525.13 457.875 538.541 456.817 538.541 456.817 cv +538.541 456.817 514.27 464.576 505.585 467.402 cv +498.535 469.697 484.328 475.342 484.328 475.342 cv +cp +f +750.389 468.089 mo +779.8 468.089 li +779.8 423.76 li +770.099 424.447 760.291 425.042 750.389 425.543 cv +750.389 468.089 li +cp +724.597 444.6 mo +720.516 445.259 716.7 445.586 713.147 445.586 cv +703.146 445.586 696.138 443.02 692.125 437.887 cv +689.906 435.051 688.324 431.549 687.332 427.428 cv +682.405 427.474 677.462 427.5 672.5 427.5 cv +667.27 427.5 662.06 427.471 656.868 427.419 cv +657.378 431.016 658.142 434.472 659.161 437.788 cv +661.199 444.435 664.423 450.193 668.833 455.061 cv +673.24 459.931 678.865 463.779 685.71 466.607 cv +692.551 469.436 700.711 470.852 710.187 470.852 cv +716.37 470.852 722.03 470.324 727.162 469.272 cv +732.294 468.218 736.569 466.972 739.992 465.522 cv +735.848 441.44 li +732.424 442.89 728.673 443.942 724.597 444.6 cv +cp +852.995 416.62 mo +852.995 468.089 li +882.407 468.089 li +882.407 412.573 li +872.766 414.02 862.957 415.37 852.995 416.62 cv +cp +810.594 468.089 mo +840.005 468.089 li +840.005 418.184 li +830.335 419.297 820.527 420.317 810.594 421.24 cv +810.594 468.089 li +cp +639.458 444.6 mo +635.378 445.259 631.562 445.586 628.01 445.586 cv +618.008 445.586 610.999 443.02 606.987 437.887 cv +604.494 434.701 602.779 430.701 601.835 425.894 cv +591.57 425.423 581.405 424.852 571.351 424.183 cv +571.815 428.952 572.701 433.489 574.022 437.788 cv +576.061 444.435 579.284 450.193 583.694 455.061 cv +588.103 459.931 593.728 463.779 600.572 466.607 cv +607.413 469.436 615.573 470.852 625.048 470.852 cv +631.232 470.852 636.892 470.324 642.023 469.272 cv +647.156 468.218 651.431 466.972 654.854 465.522 cv +650.709 441.44 li +647.286 442.89 643.535 443.942 639.458 444.6 cv +cp +402.117 401.792 mo +402.637 404.961 402.899 408.698 402.899 413.016 cv +402.899 468.089 li +432.311 468.089 li +432.311 409.463 li +432.311 408.838 432.298 408.226 432.287 407.611 cv +422.005 405.783 411.942 403.842 402.117 401.792 cv +cp +297.472 443.414 mo +295.628 443.809 293.49 444.073 291.057 444.203 cv +288.62 444.336 285.693 444.4 282.273 444.4 cv +275.957 444.4 270.429 443.316 265.691 441.145 cv +260.954 438.973 257.007 435.913 253.849 431.966 cv +250.69 428.018 248.322 423.314 246.743 417.852 cv +245.163 412.393 244.374 406.305 244.374 399.594 cv +244.374 385.775 247.563 374.889 253.947 366.924 cv +256.633 363.573 260.034 360.937 264.132 358.996 cv +253.701 354.222 244.047 349.257 235.23 344.12 cv +234.243 344.98 233.271 345.863 232.333 346.791 cv +226.146 352.91 221.279 360.38 217.726 369.195 cv +214.172 378.012 212.396 388.145 212.396 399.594 cv +212.396 410.912 214.006 420.979 217.232 429.794 cv +220.455 438.612 225.029 446.048 230.951 452.099 cv +236.873 458.153 244.009 462.759 252.368 465.917 cv +260.723 469.075 270.035 470.654 280.299 470.654 cv +292.272 470.654 302.339 469.83 310.5 468.187 cv +318.658 466.543 324.58 464.997 328.265 463.548 cv +328.265 395.843 li +297.472 395.843 li +297.472 443.414 li +cp +342.695 468.089 mo +372.106 468.089 li +372.106 395.013 li +361.997 392.548 352.188 389.961 342.695 387.26 cv +342.695 468.089 li +cp +0 0 0 0.05 cmyk +f +0.5 lw +0 lc +0 lj +4 ml +[] 0 dsh +true sadj +27 804 mo +0 804 li +/0 +<< +/Name (All) +/CSA /0 get_csa_by_name +/MappedCSA /0 /CSA get_res +/TintMethod /Subtractive +/TintProc null +/NComponents 4 +/Components [ 0.858823 0.85098 0.788235 1 ] +>> +/CSD add_res +1 /0 /CSD get_res sepcs +1 sep +@ +36 813 mo +36 840 li +@ +27 36 mo +0 36 li +@ +36 27 mo +36 0 li +@ +1069 36 mo +1096 36 li +@ +1060 27 mo +1060 0 li +@ +1069 804 mo +1096 804 li +@ +1060 813 mo +1060 840 li +@ +%ADOBeginClientInjection: EndPageContent "AI11EPS" +userdict /annotatepage 2 copy known {get exec}{pop pop} ifelse +%ADOEndClientInjection: EndPageContent "AI11EPS" +grestore +grestore +pgrs +%%PageTrailer +%ADOBeginClientInjection: PageTrailer Start "AI11EPS" +[/EMC AI11_PDFMark5 [/NamespacePop AI11_PDFMark5 +%ADOEndClientInjection: PageTrailer Start "AI11EPS" +[ +[/CSA [/0 ]] +[/CSD [/0 ]] +] del_res +Adobe_AGM_Image/pt gx +Adobe_CoolType_Core/pt get exec Adobe_AGM_Core/pt gx +currentdict Adobe_AGM_Utils eq {end} if +%%Trailer +Adobe_AGM_Image/dt get exec +Adobe_CoolType_Core/dt get exec Adobe_AGM_Core/dt get exec +%%EOF +%AI9_PrintingDataEnd userdict /AI9_read_buffer 256 string put userdict begin /ai9_skip_data { mark { currentfile AI9_read_buffer { readline } stopped { } { not { exit } if (%AI9_PrivateDataEnd) eq { exit } if } ifelse } loop cleartomark } def end userdict /ai9_skip_data get exec %AI9_PrivateDataBegin %!PS-Adobe-3.0 EPSF-3.0 %%Creator: Adobe Illustrator(R) 13.0 %%AI8_CreatorVersion: 13.0.0 %%For: (Thierry Ung) () %%Title: (gnocchi.eps) %%CreationDate: 4/3/17 10:02 AM %AI9_DataStream %Gb"-6gQL[>E?P#[nIPHD[c0ZP+b2fXTT.([>W(5PMDUHaV+`s?.s4oPCSi^Ie^]hmJ#)P[O0.ti!7a"kl%\lJ,0;=L5n[cj67B[pj`,=rVWh1T>u6F#C=iN1]3g5s7%_urRF.2rTiqPj53'f1]RD+r_.Y.n\b+i %?@DaDcsd5]m.tf_jdA@2c0KT2^O>e>29jMcqn$93qH**8n^=?A>IsV_0-,OEVXX8n]QsIZ5DOO]qT\n5++EuJ&,oXK^?/Q[ %I/NX&2ubPT6VOKFpc!]npHQB?rUCud^\J$2b6V;HX2"H!c0Mhq$1e&\gK3:S(ncHNRrA)HX,l/OrWK;$iU)\uhL'SJF')uK#hJ+< %=+>JnnHWcFq;!^=J,T@*+9!5PpraA,qsePFj#p.GSGVDc=VAB@7uSH^Q96_H"/SN?#.C1!:U"$;Jr0Nha#DnhH2mIhF.R#Xn]O&8 %nUBO&rgSse^+&A0oli4lU2eu7mUsohmP'Uue#1gdAH77"VdG[3]'fP6^ZGe!k4aCYmpEcEM51jqJA"H,\-2c$FjH4NP\aS<4IQI1-f1>Ag(Z'l+_r')ZY98mp5N1BH0+5L',:bVkDDL8B&#VBFhEXq:$74#cFq8iF2.L\gp4(!hk;^9#=k\Z %M5.t,o;MI`*$>1"],8;&HoreS?2T]Vi98#oMV5RbGinc-N89B/MCXR$fQ5%H`Wb07\u%UYgcM*MIP/qcp0:Zea2WPJ]Cb@C3e*bd %k83l_G:$fUY;so<[rA/[j)2mae\MFIij-1Nk;*(];0h(ECko?<13A\Z>o,87rRM?"QJ$Wr:K2EkD7o(d!'&?`&]X?@C.l %Pe[.5,7Ir@0>G@?j2d>QQXA4[s-LLuRV2VLT(UdC]FG2GV#9?+n]:`s^\eFGSBhTmJ,+5tnGhT'^H-]r4iatJhn6,i^])9'@=3Ja %V=AlGidXc]C6!\ %cZA#K[jQ:MX3u=m,<`fk^#^@6Fr^$tpiLrd^=T:AZge^=:S*M^or]CdZZDIP4_gKE]L+p2*q1XF)b5<`@fTrW'sWI>ELKWYrC$XCI47tIIZn4Gc^aUl\FX8]B(Xa)5&"eZE[aC]]o6=;M4r`OZrBHX>LoX;g&F&X7J#@D8rm8`CX-485$ndc6G+?TLl&Xb2U0Z2P%E3F: %^R_J^CIYpJ_(p;6Hc=\@-uFJLeb36F`nB-c$Ai-IPW*Je\]dNDT#sJa!lnao8@r9HC %@K/-CpaeN:C9#^Z1c<6'F&(An_;]cR(FiM40g<59HT0,'X$YhMJ+8ss>rt1YPe+57RHKArUuYXCkqMQh%ia6tenPB6^MaUFd>))C-PhaX">GJb5`pYVY;P2Dfo]L.tU&$"7Ze]/2=5CCMDfmESVf<7F; %pcWD.l2TZ>h$:Iec$Rs0L;0KZk26.&m>pbahgG@kj7e'#rn@tBY7BH;pug7Q^\N\Sm-j@kq9T*$p",gbG5#)<%K:bMpu[?5W*Na0 %J,efYpHO#/]jH8K)VgBta,`IanGBiTI/*1HgA&XBDgm*2qX4$k_'e\Yhsm7N>^1\kc'rXdrUGZpE)ZH0T?r4>p$1#pmuDA&DQc8(iMMB=_nUTF:VCmeYOC=rq"3]Q)r.KTTAKM8l-98)n[@udmsa?Vj/3h;[r0eEp!;QI %VcRfo^4CG,rqC?F^>YJeQeT!qXZq42g"3^:5/OVRLY)n3$\/3BX1'0gfN*KHpWjr<^A6_.I)Fg-*Ym&^9q>YOAge[6@ajH;^V#$m %iVW)KEI330qU6JPQuC(Hq$>rqFoD$Yqeq!%r;+GmTA7A5Gk_/l+0_:kJ."L\2h-)P %l5d"b2ueXsn>eMJJpRpoYfD^;!tt$YoTroK5e['_meGXer\m):hS?tsF.b/cnaB"Qro;o)hS+:8+92?*%J1hC/F:1@]"@t0peCO] %n6Z7_J@hLJh:Uh,lB^kb*P56ol)prmN[Y$b*p:&O6Ti@e4ND6pos*u_Thdg$fB.PH&,iatot\q/rmG//DWlja84+Gq@3,orEB"Ej %iA+Y:9RRgW8n6"X8kV4@@lgH-\tnZEN=jIq@8(/PVf*L"72EHjI1A;mki%i_e9SPZ=?2`N;8*IHT1Q<^5q%s6($P31^Z``[ftt@R1Vh=C]2o#VmM`%"Jm1 %,n>YBKG9.VKG9"OR;db_!qG7a!%eI2R]%qRC+sZJ#WYY5*1)6sJf=gN#2home4YZqmDmed?[:[n %>jZ0,iuj982qD)4=kEJCPs3A``rM$'Q?oF#oldN?]WAZH3itKG4KNmkV<>g)0>ESM>(7k(L]R9Ln:35Zaho853[dQ6='2d$(cD0DPB>C,"@1\-87W\=*h5).O7kQQSi2-;&!+X;\M=aH+W=VeI0Ms()3Eo9?(d+n49e'$KbjRn.m(O-cEM>%f]523ljWdeGW9R %Yc+J["bnh'7_k2)l#aM,G\oa\]9'OHKH7f-i_4m/j#(+tCO_>qmW-!G0'CFBEeIq3!T]]"XGrPL'XYWn9*]M&ja97aJGL?!CBDk\ %l'u1c,hRMC3MHfgSp#j$*oY3KkrCNk,=-6of^BhWOUG!L'2XqroQ5ieB'=@=\c]uL3-IQG/jPXr0VedZCkfWd@8eMG@6IGn%Kub= %4Sf0oDu_/T!MM:lGCjPn@<+E]%i:t0-eK`#!:r+dlt+gA$)Y"`%FtRY]th'<_%o/KL6uC^Asi-WHT"VA_S^7T_f?`*;&7f8'C>K+ %pATSq+Qa0t_?F!/"D`74Q(28G1TY>C[I%CV6/i+fEs>fhKQUXXra_Zk$*^*.S,afSY!B#4J0d]RQ3I!Z"S?L5$lT,,im@@"^oB3K %ee)k;kt-L131#.^p4.d$]E19g!@s6$nAeYc3P=_$*e@78R?aUk`FTK3\ZWHDiOS8Xeami:E`6\tZ4I;8f"B?.@#F"3L3/ua9nn]_S+_A[h*O-imQK;0"h`c4Ku.h(4! %75&K36nB:7)Q-Ck2IkC(r_rppMSa_$LZfj,^(fOs>0.VTP\Pj>MYgKE]Z5,e7H[7FZP_Z8jW",0h %d_OmK$R=I&R^\"n)2-..3/"[BV)$Oh,Uj`1%P;Je1sHO)L.'[WiKq8Z>/ %'FJ\4`RL59mqM.+h/a>d5mQ91-46ocI6m-bBk`Ssm5!(.%[YZMt`lZH;?@_EO6N)2)r=V.?fs!:X/o;!`Y %TR-b$k@#"s,@I0-_)nFXY-_8*I*5'-IB\dn6eGABk^o`e%0K;E#SRWVDe#FA&4#*"i0DN@48H4uPoWQ68Gk3$e7*36DT66:96?7@CDYAWY?? %-\^(VGWQ!;*rqX'JC>VlSNFXX*md'@`rOaV_(Jht59'^ec`fX:(SJO0Sjq@W`njW@pRK6epd)`f<':W:P+QsaZ_lPU\YgT(kf2_V %G,b2n,@^AN'!PNW7KFHn\pS`C8EY4Ap$O-a#,L_Qq1UrM7&j-K^Jm::2B?0YkEg3+Q&6uGNX3t5_J2bf!2!=Ri50=j[?*Isa0793('JJmt@Pa@ou^&`om:ORmC/C5^W(]->js.`CO3jQ0*cd4$"s.=#"H9 %jrXZrCNNiUa`bR,FADd4is0&78Fq@hR&lYf"B?:AF>eql6_6e[UN_%E7#IGM_U\S1.0q:k`urVr-\sSb&#;[J_?HJK1GTTjJf+m7 %_4G14!3qd7nZRjVB=UcZdE-Vrd'\rp2j6df6$X`#W^P@kl50t-=ub'Z1?9]tHE))30akbL&t7(LQXP?6Mteq-Rg`HC#m(OMOSM];=ok\OLq0H7su'atCjhKYW1c+#h&>B%OP %Y+,4mNq9UQn-9:t#@tYGb(:3C(l;KqG_et/qXHn]9QjA'Dpsa`+g<1'Reeq)7">]]j:SXePs+EYd/X7>&'*T#W]^;U\+P`Mf!gnf %?]A8N';7T&d7,0h+Wa+@fY6j_HlN&9_X&]:4")o.VaTKbO2;\RXOSk/(lToXXSL_8Gh,QU1)&i^&H4))HRA)G%b@$&8=TbQ$H^"G %gDW[igVcIj)Wc/c-tJ]n&K,r]ug^>Fo28de7g]ZhZ.SF %DdO,7ZQohnG&N\LLK;E>5E'Tl9?V`%k.Ys'L"`<973,Mi6`^-^L\$Ek?PPIP.7OYr?"Op32V-[n_gP3=Q@?M4>L]QZJsjO`m76G& %]LdX;'2SK^_`nuEN@Vh`TV91SuMAH"kpgW=aW#3rEW1ma@G.o]@#s'RQ!A?$hA_9#QcR*pj=@6k%+;b3+i)/jFQ>1N+;#s %is!<-j(5lq$.F;QAauDIdW>)F+$]\uhQYVMR!DP(>f:^V,RIc%N1j3!'Cm9$$ %S=A-0E?07j6GDO\:]Y>aamhsH3>UabbCJrm0(.He:A[MA%.[AHQ9OGlToqR);j`m)Q"$,[OPRCnHq(/%WKi$OfenN %\B5PeX0kFV$?>nnXT;)IQ$aTe@D8QqW"_pGm.%o_ll&r,?o.]jrP^9!A4^6QeZZep %M6BMZ=F5h-b]]r_6WEb$SCZCCicupH?,\e*]5P;7Rld",Bmn*ATLWA5\#ql+MSG,8o"?E8T)oU+WL4])iK#D(H.<<%X>+ONJSPZ/ %biNGYnNBe"ki*2rUR7][V0P+jhM?S:pc@*c!]q'.HXYH]n@A6fFM??LHDaNF:OGU*1OKXT-"39CcVWK/I2YKW;5Ro>Ct.euFr!Og %s'+jgXEug@2XK9QXJ^)_7u#97eGoCT]@k!2L&DapT[hV2D/e(c2EqN=LNM%(.c`'H)!g1=7pb`*S&it"c<+`MU$9c-02GLX%m4rJ %;jY^7*3@fU%Y"u6%l9@ZeK>p#`W&gUliJ)^aLeTf^Uf/(*M4YQ_l$)3o0gohF)do_Ej1e'71U28i9CoL8mG?U-95Z4UsZM<1rk0- %S26_S=5"7oW5cl=8pamq;es@_aF#Cs"W%#,.+W0qMksdoQl!kf:"7sOnGe;F^3)$>f:foHPL&#^B*qjC@pbb-TM!CkW@h2!Gq^[V0j+R\+?Ib=1n/fN %FsXa)]6_*eiCI3?&7oOc9%X",ACo47#BqbI"];(]CNc]r]"e%* %05@)ol;:Y:5PoH!B-DPA>R?6H(4p`BFjU"UqUrba9'M^?+"-ChZbSSUU"S7;QeC"16BkB+p$;Lk)6(WrOV?/,sN]`g8S:fMY %>pM3R%pnLjd^Td@$Q:u<2^E3#k^MfKo8Zl/kc#!E#[).;;s.il.5'$bF\<,)-+tu9P9Z#X)#K^9noZ(I$$D].X##]__>4.]84?;g %lsfsml@QGM!Ll".CEQn+=Y_q+c3)\onLNC8e)ku7'd!5@VUr5i_A_q!.u9bjZS"+>X3.^ge[HX2\ZEH%a^6PX.=N5XpK6Kg,V0ha %NoK@9?B3V.0q1@_\$KaBeJ_S\->JMA7H$ZC\grZ5&nZ>BX\l86R22]InDTb52[WPAAhccR3C4/fC6DuLOm%g?[7Y5_DjkjGB4-N:HQf?icGMiWS&%lX*0nk[fsa(k1sC:fCW7Z/-41kK/d)RMI%l?Tp>Cm(CeKbI"Xs&-aj'fQI"JVTLZ95Rt\ %i+cTnoi$Kmj9BmjB+%)/__uo5KAuSc;Cq*D!B.NKQ$+drU2/?M27e!OpNi92@[e/@=Y8UV\7nl\U$W($[sY=;fgk&k=rXoV%EnUn %3Cm&#P0fk^?6<5D$OY.#;='INJ2V;tmT`W#(I)b;]^lk0q(da-IM=AoWo068JN^#ok//c"JQ9$%lM0T@Vta0?]XhRO44Y+i.n`%3 %(8i-NSc*8h!NGILSSIcpjifl8J:%=MdB&Ho8EskS;JIE@7<2Y&BhA&leln!n(S-"&5IUnm:Qa+$%uPKZIbu25fQ(D,W@c$=IJF(iofCm]WA+J=0lGBDHDP`jI4i@^ %U:Lb.GI^aZHi:rbHW_l&2'D`48Eesr0>.FV7K8YXZRE(QKc*oPbRt8kem^hAa"1uLYgHu6V@$G+[Tg>ec7-<-ll!)%:3#\:)Xb(u %rShm;ldUWC.:e-c8qdRT#KX[jFV+s9Zp2M<9'EVgX0M8&9KF'';mX) %@b_a^.]a"_W5]CPTO'oJetOto1&`U+H@p[8o5OIb-uL_M;qO'$TWU'lD759IMg%Im[P<^cK1Vn,FcZ&b]UT3Ut+:&lN.mU:Y?I@B0T %k33$fq`N2HKY;\Yf2jTh#,DNUT<]F8Phki,m"R`92uG,:iD8,9s#:JePhki,m"R`92uG,:iD8,9J')[(kE+NhU$RJoANp$>)=:ef %I!GPb;bKrN:I&)&.VZ!uY)am;fjqOdjl=b+;`X]^4j6e.#+'C$3PM8ZVk(qL/RGX+Xk.X&U**p9EdAP]oU*g#5&[=)Q[-6-"g*ju %\$/*1M57Q+U>/$H=&`;$I7+)tH'/&-.36NEt(-/R\kW7K[8^IqBJO8H0>8FbEMPEqO`@\ODN)S;4 %!]/=lS)!urYVH#NU+j:=%-ofcF)7>)$8QTTC.aiS3B4DfA %Rm3Bdca]A3XfcJTQ9M"-NSUOZ[RelsE/ckM?:PhTUR>j/;[-eC-Sho>nn"]N\k,"[(/[JfX.De*%d'cMPM*W_T6eB$g.h$X_H?q"#9S_j02-@N]1d4feF'(>?SEc_]ptRF`^VHjItA)87.B\"-dR?KD#9N7$^0V'?&-]L&[AFug=TG3S%F%5JrK;$WC_3]XB)R^EusiJ'7;/;GJe[?N6Y2ngkMadB':d( %:c$C[FV`gBC3,?$KaO;CSFMLMO(!Ld8of:#2P22jrPSO,YMQtlk3b;3h16CFJmT2XD03bN?f`G;a3CTs^LW8^[)uO7_C#ef%-hGV %nrbu#S1NEek5+"`+IZ,['36@R^.%?pR;n!s$=I/9Oal8M7LBg;7&QW3c(mJ^_NK5]%3B[8s)V %7S!#S)P$drL)amNIaii%hUK1[(K=NV6T#*qYsC,Y %FcOhILJLK8W%K3`^7!IQW7OG0&JKP(p?qI=%pttaEPiYO%qWW-M3?&EDfc:UP=qFVEpie:#+g*jVY1%Z[SAIQdX\0'P3f4\kfEul %Qh]\Uk:Bb'9&B$3&3qRr)QBBT]/V$]083[-.SlCo)Zo:NT67'jS%*G3i^/^I_=[:D3D0Yn'lE %jWGoMUNdEW=5bIh)u4oEh\T]jQnigGG8397="M+=s/5i%IsWPu5ur__^h7^_)GC8"*Ks+@oYoL4QVb=bkbtNq6`peMXNEZX.HYY#TH7c1r>L\7PU%0 %O^o#\j4$u#dRneu'PXFnEZ"WDF;T<"]%sT2TA^4La0B@HY$Pce._C-%)XA1t)-riJIand[>)U%"_1rc$&*ZP8>s3rtJ4u`7#%l&- %^Cdk]@+.g."rme4%8HAA!M]"X[hiiNR8VPeQD2CQq.)3_k[:tCL3Ut(WD'<5_mi['028[oLoMBOT^mn %*GH>4"am/!Y*\$/*ISdZ6IS#Y/q#(]Kiq0m,Pa"Ngqie8>T\$=bSe\Y1Vg!N"UHg4i_r4.bT*VOWr%BF>7t$SOfSE82)ZZbVr&!! %=TrG;OVqjZm)l16-KCsQnan#pmim&fer]GTdX?-I!LFOs(QQZE,[mp)Rj]I298LS5QK\XARbbLI#%(l19mA?VhmU,_2DaO4/@=Ir %&=h,nR_OA"B.\Bi$fe!MHYD0gKC]pVZb%,jR@ZNtMd=_F`s*D+'^FbL`X@'0W5T8r:TH%/1.>e[[qr-G0%Qebfh8`@P'+*'_-.JU %L8n#$.fV!CM1tE%UT-rgg(M5/EfejT82[Prfu@%haQdEVm3+i3'Y!0 %MAiU6!7,>2@pUmY(HK^@Dapu]?iq<+S$[Li^d6'Vc3k'FK%>78>$iN@Kc9$>DiHJXPZbF=?=ooRXn\5d5pV,P_mS)Rb[.HokHWi1 %gNYG0N#*d,GO5a>I,'1(mSs'JLBl8mD9EMfR>X,c0T*t_UiWXdFcY\[V?^=&k`/\Y`;1@O!7SICEm8id(;E%k[4d/j/KLtAF?"t':f*jrL`D%D&443sZQ95IKA!;eg!EZ4g+-Zt0-c<-*m!_-.&-$sO&PEo!H;nl;u79/[0@WAK2S^ju4@qg]u %OK8S`/#c\ZWpZJXRZ@@m$6eGZi%"(h3TeZ;[ps@tX9+-0k:(sj>.Sek-Rj2#.M^m?$=&ZJE0L(5X=M/jQk`UgqAf?qFIH.Soj$k: %\m7.VXIZ_W_kV\L$G/=eF< %^9:po$%#,-Xn%VCrk:,;`:uh-:cdL*q:iMW)N)n/rH4OD-d4e&X4n^]SY^fh'hKeHr`(d78]RE@E6>3=$LVUgVTM?M<);@\86r%B %VVZbAK&dNp_Fn_54kACc44cq`haZR[2AnHjMBg9\'@ld5f+k3hnT7t6h-?`W/)>$cS-/_P6m8Y==EhKGZHgVDH.J0QoKech&ddP@ %pe7.^)ZW\g9r3j[)Cos)%YMXo]cL$-LsP%t*\k^B$o/>n(+;;u\t6u`S:c*X-PnKRDm:TO`^XjW+0(F;Fhlm;N1A$eD?d=B^)VaR %^UPFLh"c`f^9'8emK]XJg/f9MhYgr'aUoojqhG2sZ;W&U@:"/6'T+*jZ>@?sHY7;6Yqob^fM. %F.ZF)XrP)U3uE\iRTs>;rir&t'%jON8nuF<3_j#WF51&nR3Qdu=7D>-\ZglPQ=8W07n:-0&p8,Ammd#D=1:LA6WVRj4Q"E*Hdr2 %I@mAFi;1Q3nTNH%HC)[5E>oM/=37i8ZFVr>ih_H7[%mtR%D`\.NL$qHEs`mlg&^8#r_4p2dR/Mnr,>9gO"LY6+,I.p)c'SH0W)E_ObE#R=\3::uRR/N_s*Q__B,*l3QD##CJ %5Rrch=^Bc*qI*BV$M=I#X)i*]oW@(WS@!MHPoS'Gp@R^dK\9Y-DVNA5],ul3!\((Hq=TT4J#gXPj(@Z9I;WNL-#OCVp7_&D %X%U@6;rb#TY!.$V.[buYVRkA?TjpM)\MJCg;V`NO0RW83h3On"e %@bTdP6OW$h-YiR#NN%0rYl?NM;_FA.j`C#lDGrKs4FL7,1@J^QJ8r4la.Kth\;*f=')&duKPt9\BtTpiqed5thn"HGo)M!pSHu%r %e"%pM[miP*t;Z42tFB_eaqB\n:F3KN5.#!Q$g0g_FcoBIg&WVRf_Q_8K41bd:Z %kUp4mASATh46Q?G_UmM`r?0]h0!0dPZ$3"B]itF.W.D$9RNA;+C8ME+D!0lTm!QmGW>sa5;R:S5^6=7>'a.).S4*-6pl_YX7.!Fm %e*IBGUeu_(7&krp%[8ZNcI$PZfM0dql$a63ajjj1=`^l>d.HRA#um!]>o-IUE]gYF)jqjVn9Ri^e)hiehHfANbbsiD&ueg5C0r## %f?mmi4]1[!2V,3Q%f$mCZ$VJ`"fWk@,eJkj3[&SVgH;D*^oXUR.8Hq2grkr4W'!rTrbP-L&Y'XQ0GK-EW[!r>#$X\mXYl31&L>tLTBb.O/&8=3W<:#bnmf5hjXOB_-s %Y$7C&DZn3_(X(T!;)=eM!Cqr+khSf/4J!O]=5G&/!M<%6n*cJPp[_Xa\`go?[g3j-OGMKkAc"l!;UkB6k/$I@kji[!0h3I=V %-]2L(3(;*`\Ba`VNiGRNVA\;uLd[&gIX5gMAj^&D[SpYZ*-fqi5'.2cGKa3-BeMe79liS6<=]j.R9YpMSeAo+fe,S!YQ;n^\kLTC>*[\_SYLY\(+)EET=%6Q]u=fJIV %f=?WoHX`r%J[u36FjOP=Qe]iM&5\PGG*8C,"&TajBulrX'^Y;NGWu]0GSeEtC$5c!hg1t#3T7/2hF[&X2*uRA1gfan[57Q?qgRDr %T;P'BRS'rib+E*WT$btFEnA:qN %;F[drQYnJH<+e4s3/.bNXG[32ATBWo`6n(iIZFR>l:PBCL`bu1HR;6L/4L3$#C`\9H8X/tm:>n$\EH#T5nYa!M6B:0/VdDa`+r?t %=k=mli'q(:V"gcV,LI.J\5\m=>.MeuONN+='P4G;8p_Zk !%=ns4@64II%1r8[G(3gDLW2U_Od"+e+_o-1DJZ:HZ7-<1m"0C++G``23k(1jd.b'e`UkYaiojN:7rcQ)6b>\#LO4>M\2,pr.cIts#k)*8H?AY_e@]8M".c%=# %e2+XNY-)>G_XM.GFojW.@J_\&SfnnaJ^m,\2dLBsHCq8F*_14O,Js_R2hK^@:)b"$Y&].n2n)A]7J+L_f>_!'&8'Q@k9:rsC?LDU %,jc^10Zr@;jeu$`^R5QS/5lACAr12)-:5U8nb@7E?G:,X\SJkf%-^+epK*Zj2H#m.a'l6f($&YqlY0pO;i=kb<>7Lu=XlaLFdnrP %m3Hu!cd(su(,r[*YVW-li\FbS<[D>@kqGIchKm/<9a#;[B@@Fp.b)`9Ge'PHX5O0k(](_>X@ %)W@\B=I&LFhr)&hNluomk)cGlj'cj'1Q3S`lc$H'LL(Fb88-lLN7X],Z/MSbDen^c_'\at%Bbq+T53N@2>MpSD6KF/oFED3T;79E %2-Y<)'jiKO[gMq1TDIO-NC[A5?`E3='n&s-HUYV]oKoCXpH&2ZOi\#+VS"S5VU676+ZN2FV9O3&I;aWsgCt`(3U$,4XPn)_bY,eh %oqq12VQ$WW0@fW=Ato-c1CQ,M7?8=@Snp"d&#*g^WqhJ0-p1s%)HM+:^sO&b_[8D>4444CccVL5N(IHuo_G`]F'T!r)3r(6jVaDU %P`*76UU,H)Ao0or8aFNRhT"WY9Ue/o%pPK?a[9@SM>+i?M/-A&`\^37j_<(\Y(DBg*o$qLs*7bFg=KuiWPV$"X%0@Jh(%2f`%#m6 %*k#R@1,(h(bc8oj@aME]C!d9Q;6-(l*\qQ"BhL$^B%sm44&_)E3X\V4^+V=#="ofrRnRLDA(EZ,l(]C;!DKG)L2)VfGe, %Fkk50TaC*@9X;se8Vu-QFNo5ncP,O2fN,II"Q'1aF^7e9cND^!ho*:7\jC0^Q2!4D;/$uh-+:`AW)$p6?uWqO %Vm1'o.XgM+HFeb)^["]Rgs78Z7J/6lG9,/PSZa9cW+`W:g_B?eG+j0"3-png.R&2Bqudh"[^`.`_^TS>RWM0NB".%l`DDOb(\+.F %3"<3.#MM:>mrTL]F$WsuZX\sJ+\&.GBZ"GkP)8e-*21UX\06OS2I"Y4"\bB&#jmEkmX1QSX3CS"!9t;tf3gke=:'1T#<]-?@6KG> %=u8S%b(;VE>*UZ&[*#hq_d"LsM07Yt^1EOd^%tTk<(Le1e4CW\MGWG$f/Xlj\#bcYe6*UDd"o6hZIZ.ZeOtUh7--6i8/?@t] %V^SJ5fk)1L/:#]AdL9o$<=5g8$(NWK>ZamS[8+/j5?$T6=-qZTrM7j3X*+=j=tAFh@+j$/;'s#GdrHGm+gX$`9:%ct7"o7sJ]ACM$^_)U)2 %bR#Cm8[hi/(Vf7`Q.+MY7BU"8rF0\W,n^qT$/1C %DOM0td=OA5DUGi!R!+2MrG2BVo`+mQk"1.troD5L#$ %n`%u6YL=0=a,=L>9[0*_;CZ!G5OBbW^@eqKERSf70.RLllb[=(Qq9DHRB&3Zp10$%264J`S$lNac.oGpH$]Er*0>1[)+U%Yu?UXa$D]c+ZERPLh^;OH!XR9eqM@CV@IqbiC!Di=HKW+rO!Je%8s@20cRG1Cq6>,c/o %C,fJl@Cf6:--,lS!c;A<Xd+^7lnup+SI4=Z'_[Xg1<1loJKP8QiI(Ptk4H+>IB2ZrQ2YOb0X!OQc9k0s1$#^M$66c`N<\$&R@NO7m?1V-E`kYR+7\X*Rarg;d>iO*k %4G>Ue5u^U88qTQ39Jg2$M=p#n[I$+kg\FG<.Mp0>3GoAAqJiaeEQ22TVrb9.V$q-8(i%`S.:gA%Bh:2'MSU9;5oA%ER1;qa^j&XY %et')&N9fkMV@[mB"[4gU)s`nop7#X(i91n&lQd>r/V3nLVjSDMA7(@[$jR);PeBBFBKA$(*24`BSNV1'J=Ca_`_9-g5++V'Tja,co=")?_hRuI[EedXeeiGX,*>[:$PZsDR %k_".#WR$\[:r-iSXlGC%[,T$Nl@/%n[cQ]sC+3gZ(0Kots:6H3b74[Q_(.A=;NAQ^+VGBc>(,VAF"4k(EQFHn&PVE/*F;e<@6V9oV0Vl:6m"Q'6RI%B]p\IENq %\2LWprBuhfeS3"&[76B%WU_W6l?f5`0A#@X,egN4nr6Xl7TZbF'e?^r`O\X`2EA:Yc"t(PhTml8MmDWh[*IPs#MdZeU7'>Eqp1fO %f5X=R5?m)lp_t\j/'9fQU3oP7\5b?R]e8a,p.kKSNstSY+!_ktU)=hj+$qLcB'P9d-uUiC.n/$sIbL'9B&Y:j\qW[7Qr_!tCQ2Yn %*^jR.%A-^4K@R:SA9;'^H$)i^CYi;u86k^0q!8:J3j(+,UZT:tDQ?)*>[dkZLskVS04'JX9+n;+ILY_.B\CP[UX#5i>@N>NVID4e %n)/SWS@]O)"M^uSVfh`NpLSCNF#0pOq$lu[X-?oBVGHs.ds^4t56[dZi?S>A62hI?-XQIt5AL[3A)J7m"`q3l?$q\I27e$#ngGhu %=8.ScN$SVNc\I9A<_pik`NEt+s:"kef\.H3D@qn]E\`Y#U*QZm>>"u.s$BpfkY27n^9`E]1K9AHBr)g?5,Y4pi`4#1X'U]0.h %fZaWO/aT[F"g,4VCP2Aa\F/Vsj:?M[/a@N,-ui)K$P`j)6u4f_&5e>S[>"0."Y4S3ZkT9XYpNL"Pe'2dO^8`3F1'."ZEBoLnJNa& %rWtb]n:sf'Ot##'kXpujYZp)1=7aN`fa^Q:Tbm;EY%UUR %)=JbkQl,rOBp0ju.CB`4VVrlKMZ&HtunK:EC""dQ#?u-4JGje5()[,C@qDjH0mL8]DLt>M6paB9_ %B.6*aW\$e])[$0TBs_J<5=E9pau>!$^dnRuc'"=e5h\4t@n"3q`4SZ?1C).J9T$B3e8gD1EiPGcE:=6Lja,@rATDWTOUY[-i^c71 %6&\uD$FMjT*pZ4[=elBE#+g[BRgEF]GQL`qklda`UOb&--DifGM>N=1JW]I469=kb8/.CS$au>`jF$Z^,t*G'utF+!Eu0Id74.d[t^fN.gp'V!>MRphLhe*#B=-7e,`24GL]V:98lI:R?K9O"4H!/V>]&"DP\_UXRFZMQ\0@!S3+Mhm^nnSu,+[Q`7i.-:N\8iciYJ %X#B&hEl8FDhp481s7@8CmEfaCc0LQpS@mT(DsLpsesF?!IC:6e`Y2It2mp=]MrVU!a88$7,5&c1IG+V)]NArE6HBt&5`[ai8!q."k&(1iH%l(R\XHotE0`PRT-MTSVFeT15q7*nRn9K6THat[ %!U!iKHDadq*5'][V/[L_(Gp(XDFiE%d/DGtB<>')ePl`iKWG:sWKCqkFB^&D[$[kFfr>gb?ugq2PgYS>*QU$CCREGu)()c8K80C> %"hRmdT`uDe6GTH-q&2e*8H,VbaX#>oAZr:Vb>"JlJQI*[_jauaq?h#RF0?_C]Us>GF)pYV[]OZ(.e?aYGq:R[6YKFY]WKDQMX@H: %e=g$&qOqBJm1S.h8W&K.Bi&W$.sH?Le3if<5n&LZ"LDDuBE)j^/f1V5hrGJpO"DRCnKHIFXo4K5o_2Qc9!LToUF %U3&'9\?'rm@+596R>[pQm]1n,_sP %-qc^5T*f(6]=ekZnu\=';7CR7;`]S"Vr*?]2T]r<&nF[V-tt[&=Uq<"+f:"?H9rVl?Z=GT?1Xsi/lB\X2=6EE=C(^3$8[75;`a8d6EY+(_@cj]GSa*jT)]@@!GPOq?0p$SGW<.2dBki$7eBQq-%FuQ7Hc:r=c?Am1_KY$Y\.U)=5[\pn'.$ %4@/tW7;.!F`m4n/3YfDJ:DD7b<*cIT__t'Y#'KT"$GrbsJkNKDYia@aXTqd.X%L;-BnOl?XqfQ(YLHa&=Y$[5K:dCdDY9?&J^'p= %97pB3fVZLo.ii4npp&CZ5,cLQC@C]Y.]+-9'\eWhgOc)&Z'mYCGn]5H3*0No(krCf$L/Ae4%m`DUZ%hST\$(L*X8NE %pF*TCC*Wc='!jTb?t61RjFSo8`On$'EBUtSW8IIfeV'Re=P`QqH7ln[AjJ4M;#Ol9G)b %EC!'m,#6OL,j^.B(_$l?%*LdXj5#gbk99[Eh\U$h6^XL%'Ihq#&Z-[[ %TQKH`+1p)5nQ[MD7#B+bT0BTS=RY^JQNueah*[\qkY^Ldte!1"U_`%]V+D]-s0#*qt_$CObNEDglB2kHE\3=''D4Yk/n"rh.*C5&osNr./oW5!e55s)ROE@H4oMo\HPfpXUsDQ(qY>+$K(U %[k47W]^(S1LVU4;,=(?$Wal16?uF(/?:$O80[8[K/0r>*E"5o=L/40f31C>5'H('jnpdUbK@+Qh0@b_d\0f$ %\\5R5X_k"a0Q17Mi[7ka`lkptQR[H6?&QbI\cd%&J1RZe1]^*PqR@p[C,M5*$Ao)Ed3X4=Q.;PlQD1$#U,FP!e=m=0PhReo\+e'[ %\'SNrmV^!r>Ah4*hUcqab%6Ru'rLX5lNQ-Jh0NpBHWG<+E*6Ua`T"uS[%=_kT;Ft,7Ii\('>&(.@V&:"P\Qs#`"QYDJ&!Z_4+UeXuL@%K%SPpMo?H24?=jSLF.qtVg,:O[+,YQF#%jMsdP`KdYUlI/p;Hq[Gf=&Vd$Q(3**Pdi@l %"tNX;V*pR^1U2eES9SL'E?cg9c$Bk'f^Su6;9d]3=jbc@9I_RtF$T9Dil.=meXT/*Fj(Am(q.bePjp>'jeG`78qaj1#:7I:j@<42 %S+Pota2n^M_+B\`O<+)5EJ#\WMX5ShOQYoU-o241]/QhjWh;XWJ[(j4.V2:GrkH?(@0U_K'&g^sS;kOPR$6?+rgJQX]#;0TYCAj?\O-"a7S23rV@iU>1S?5$\/!YE`rDFPE %EEVpMrI[\-K@h];e1.'/C:76t&CDU[%tMJg&T_]iT-Ve`D%5o*Rk8&)XQ3(tDI3Y(f2Z.ipStc,h'/1L.Yk0\PgGLj*g8i2'N.7D %V;*VMmL(K&7Oc;:q7@o-?MkL\2E3^.O/#_4s,@Fj0sr4*KQY)Vb%P)_]tD%kF$B,s_VHfll>:rmX`nI$V=f0'8Z#3$G1_peC?`+5ip3n)-d`f\QJD=%28/LP[/,YDLCmAhZ.NLBl/WMgW`DE)<42j7,QRiAi"mE7?\/BJ[pmHI%b>ANJKP3p\^CmUbkqD=h6 %3P*/J,r>-a&f4'[`DYl15"aE'1Gc1S6rVXAFg+6T)W\(3MsTQG^['[)jg1ldNP:7`\oj)Vs'KU@=%]#sG2IG6B$KODl;Ht[C"=-?%_YIfcLInoom,/i9eCXG%j_H>9E %[LFF9A4P<*7bKn(;sJ@C+`i?hjA;2L1.oE`Q/XICQ<>k,X8H4#[$0Vt;"R9$l<\;pPNb_6>'=N1hGe=P-6FY([75pBFI,q6D,XU# %h;lnX3=-DhcoDQ*-FcgFM&RbDKR[XX$U$RQNe7Ju*O-kJgGMr3G@;!mnabTF0q(I5;TEZKop(h#aIg4b+(i\C#Ci^&g,_->.5NEh %=W_9SH:l%#r#miK6%V=6kB`h^(ZL_?K."-'(I_c0/f,KO_=>=Nr"jJ8UudZRoOXf@BYHGO3,66^b@a"8;L_H$`.8S);'6\sS0Ums %6rG]_9NTLjChV0sLqjhMJ"-Xb$,SY_*0BBZPJeSX6KcuUMpH8SjH!O7ZKPl'(OeYqL-q6aqI2:S+@RAW!K'_gI9F3O\:!&Q %fH)4LR\``YQBcQ,b3Sb=Z0G9Zp.M^"AWPQ/dchDDtAc.WrC"ea8QCZ_D=CQEe&4@P>:EcU*DJQBV7ikOpG=0TsW6 %;Tt3<[cUtag-U]PUr%g[UtN^o62iusC^N]UCRfZ>ZnRO2G^1RVMC.^XPA=22>:d4EA2^MZWBA^S],mo:$iX)'i1e._C!,i)f$]A;!OWi?Ds %S6B51+ug$K93(_RC3HAB%BEb4%qY'6;ZaiW=%\&uLiJ6UaHHeIVHHUNfs%J#&[#FB@9G;@]+>B>]#(8.la43_obHPKbJZZl9$Tgh %3s5GDb/LTArZ]BNak'DQH`QcDUj)s9pEJ#K=B(#4Q9:ceamn%W]XnrXdGd!X<80FEW\dseL[*9?7RO%P7:FZI"fk3^sE?^J^W7m"VJWnZWVB<6/bXOFr)cXpn$Al2:,)W]%SQ)@QP %4r%f8>n<*2mOFq0B8NJFW(ems_GPFo^WEa'll!ER\gk5s9A,H-K"On?AG&FidS%E=l`BDGQd$)+s;AnbRH<=`NkAp^`+2&u)38Y=H`M3U'MRd$goqL;(ESVs=DK2dEoK,&M2#rEn20?"fD/Y%qtCfFqW!MXZpNu/)c.d'!j%I-=3>,teCG?/'/l6*3Lc_,Hj`._>eS$gN1jk>*J %14.:uAMue((K=*KVQ%I*nuu$XeCE35kb-q[kG,jS:'mS>S@%;e>29!S,'I&*6X?N&C6W->L^/MXQV[a<@g]fFY(41BVV-h_] %\OcD[;I7V,R8G3rXnE\RlTH3idFGCLYneEB;ulF"^\dO"=!->4s7GIPG@,g`c5Q^Kpq-"bPCYsoeZ2K5LX>/KhnSto5JDFA %&WWIR*YS`#Tsu%M$E*DH7c79-30bjb;pe;n^)r3,M8Ia>p(-fars/\;MYF@pW"E?`r'0bI %JO7i;8)2'V%@Z^9U+Fh[&D"muI^F$#]>AdpPJ<\FTF^dF,a'9;_G?ghpF4C1]*e0NLtrK'1\ka[(?&*G^oF=-3Pp.s_h,U1d8*ZL %5f5Pn3R7i+8JrbjTJ;q$I$YI+>85GC)+PkjiFP'$Q_*X*XLeZYJ.ur3Et?erk5Kg)!:P7WPfmm6>VF42SX%qn^!T>6B` %572RdT5t)I:+`g6;DrhTsB.l7Gm,j6j?E(s%TeV1*G%.m9M9ld2MZ4ql.=DjX*7:?Q#q?6A!"IN9Bg_PkhKkbsJd[DAP0dr^Z1!"Z!T9c]rM %NY3"WjhAN<&[B#uX@B=Qcn<]hLN#9P.ssXc!:Cp4NuE89.-/cE-co.hCP!9j?!:-[CAQS/%lu>>hLrI60tL(]pRoCUH1A[6ii>C359,[83o7Ib(KpF17;]]3:VVgaf;0WZ/g:)gC#6Q?AKS5TUWdsXG4 %1=i/7n497MdqIVB@@I'IA:6qt0CA4,34i6D/9p?/H^M#Zf6jH2ijP3Bg)31@lm6O$EX5HYpf&o5Xb4kjArMo@na(5JgFb6/#KRdXcd+bZtF#"7h_S%QVW3=8,?1I5;k(M[+a>!i,H4 %p&t)Km!u4#pXHl`U@e_UA!E`dK93BRa,1X)c`X4Lr2c6:UMA6s`_6U2&R)>aK4)It!MbL`2'bZ3Ds7@3=:fW="8HetecFRB>m-a\ %$PHjNNs$BBm4Jbh3J4!n-[AmaJJI.A:&SUnR&nIuQR6DI"N5kB1CX2afrMQKj5D(op` %rHj\3;G&^)N,5UY/G7^S7/UX-:=M/ZjC,<9d.Fhl$LS+h+[#N:R&>4/+H09M[RSX4PC+BGBV"aDb]7?aa@TT@4^GYLa8:>-Jt@KI %4C\YOa%d%4jYSP$nC72(*nT<@@_f6u5N5fm\)s941WgDPA'`I70PXK'N,7?P15Z*87qn^\/gJisO7K@q:B^%S3"fUX_AUYd_ETKk %.(Yp<]m%u"F\?FV)73D>?sch7nU?l2]m#@h"'F-u$f&r*1j+1EB7g8)2^ruBG4.7]g7tV6`M(n=o]>s\Eooq2a+`r8RI1dXQVe[: %?q%bm:)%H9>OVnq0Ij4.fZi-$/rg:'@7%#o?_d]F(7jlSGp+F7\i&ET(krf."=Dd3M]sJTVu1rCGIeHbeG""P0PA(C;-8^HCg>U> %UO4.3@n'%WW7!YF#66Q8H-e^@FaN4iaGTF8g\r1Z?8m,O8gWUZp'Qc3)H`4(jt97:-k]NO;XS8>!>'/!@>t>AZ[=U`K*\7^'M:;. %&eJY",t"db[l]Z%6lpY5#R6HNamEP#^[i0[f0edRTbrKu5*U*A,\P_2*F67Ng+gIM<>;#T)/D:,((iTQ_[\]CKf1,68B9Z92 %7(`i-d='mkiKAB)NjW&EE)XC/JGW-gJr]*cR,MIHn*U?%E8E<]4V!V*:]h!kAo.MY.I%bScP_Bo\U3IUT1:uR&/?Ak-b&>kWX,/f %M"B(57fs)"OfRPK#A9s)ajXr$$D,^DFoo`Kd%-VnRO6rpOPnO"$!%%W1ZftO[=L'SRa6VWC-j*U#ad7M24gO)\+='V)kQ]5SX(>j %Mk.0^DrtV!VG&$hEX.\'SN?"8>WQ#/,iA29[jF/cAp>eqWe+RcdSL)?W1hd;9gVNcIXiW_QmQ7i#GB/uj=lODE*t.+1t-%:*Z\;= %QjUeM]QXsR(;OpMdV_AjNDB8QbgJoIZ`kPp&<$[DGu05eBbuBaUf0';dX<9`7)bEH9VpQj>)\_>4:&H<:qVdD_'`ri+.3&Mo^9-p %KX.tB'!X?F%^7_'rB/o@XODC)3WmhQ!qpp?/Vn*gOc-e]=MlL=!lq6p.m-1'NHSEd>#uXkZq>R`P-tc8iNP868S148U,=XLM?sAL %C=:eArW$+M$@od"h]-:bi'!RI)4ZASak^uUV^unM<(cT.qg_G.)2Z$h%/kWcqU'p^p'.gM$j?PqjuEMMYVe`(@g*N;BAb<[M$+Fp %.MnWHi"e388NEO=iB\+JJ(mQ'BMCL:d)[-\0F4jm`RB:o$#MnB2fkJl]1GKaD.CqUO %JS5[(cQWc!6-)gDcfZom!A9S=([sRN*LCkde.%UM+5AS.68oMX$:RGtAK-C!#iDV^6F&+#J0LSn/(-(On5IR;C&aQ7*anF/kSOQT3$Ha%W\A/(^p\4rOa_sX;.kY&(8-,'84`ffD!L_T3tS%eV[L[RFS'AqahnJ?pCM*l?\t`Y;Lr%1Ie4C3=C# %XKhk@.*fDb$?D3ToI)-3nB$q2PMV*HA.cLS=[=gD1k=LeVlQG:=mqT;o2M-:fs3%QeBTHQKr5iIJ#Y(c[@+0,>2=3r0sYC6@G$8T %?[W_j==q\:\4f>o/Q*`L5TLf*:7E5p=f_aO)+_&.\L:M`e86VV!hGYY%ktV=J.1dt_06]!)+P:Ld?pep6Q@9PHfMZJ`T2!jV07fU %g&"F*n!@oi*Q=1D^;i7_Go/rK2]s'8oIlY&CI#4hdd&lliaM%Q@DlP#m/prKf;#I;NRgam^d %E!%oO3uRj%`9YeU6h`t/Aa$H^`i2i75-jghof?B55!*Y>"Pte'T@/SY!qD;->+_`WU86[YgG2?1[s&DDteSM<'Y4AX0O%7G/A@FM^c"<4Wm_ogsU4m/(2CM/2V?SmhBRC+NY#n>dV7kT#O:=SmL(biU!KD %4(X*$DY*qr7fO$c]>+3qh/E>^l5eOs0CQZTXGr8Y+UNH"i4H_/7c%s>S3V1H%1f5M5ZAln,?hIh">Yue+Wu.9r7),_F:\?uY]"%W %ClQ)W+\O)g*]&M)U&3@s-L*UC7u#:P.tWj#PpH`f"$7g\I=jUoB%n1cn,T^j@/_"q6VePdOlSAA:O!+sa"qh#]4$dmAoqJ,RX-\3 %;[5Q##bX>pGJKC_J3E>qaEOLP2E_7C9HJi,53fgK)J*_u2^0)adh+-SHj0[t!&h?Nmk`s55-]GCj'gl]'*^et4o/4Dgn\'k!;:.u %dTHuoAt2,k$ut%J%Ka\"*i*/]dh?\\;*0\9#uZ\!)]0r,i/qY\#*LhSg;'dT(o-eTM%)bkU!m5. %#o@!;">UK$O:H'o!7&&35]2V;!,R.Q)8r#WaGf.Na2"P?"N-OS,SW\rc]D_[aX0O1LgV2`Y9Ym<$m\h5"49>-J4kdm^bT%s'_@_] %"ebX9nrbX62``'u(80ku#s-`>;PH/si1QY+&b4u[L0VT;0bga-K!LNR&'W%fhe-Y_J-dTpJOnM*!RtCi"0B`ki3Fm8,f(1G+PWeV %a]u_u8-8jP[KLSY3eh#BaQ&TCRNTa)UQsM%#u#!$Pi1>7k/3)R4*TCrB',?4>rj$G#$+q)bA'WNk#%IW"M)A&bt7YNL:+=1BCqs, %idl"B$mZPi2aP),P"mR+Wk5pqK`l)f[''U+3gVU9%%N"S%SX86LjC]A!)kr`__.2ANcEVSP$opkeJOZP=imK"`R"iqL`C-pk",$_ %M]sC,Q;-L&KJCY:7,djL\,A^Y&b.)h;G(e#$b="[M![ZcjqFGn:OQ;2HC[Ts`EJ+%p5@9]!7&Kp\3=m7R16[324/5H@'BH]i^3X,m[]GRf3R9*]BEq#$t6[K1E`QTMC8ouCM'_UR[*?flk3Tp51-!q %I?'LK5lMu`L7TIk/5k@E,EniY0Mb;Ik!;iENljXL5Rrnb"?CEYKg"Zk#"K$.JJk#6$_ca1qp+mn7a(:l937Zke3K)MV63.O(?&]l %EN$-N2IMr\A!iiY3NQo]:<'P,h&P'bSAmDTrsVoH/h+5-!.Mjopc46;]L %C7%r>#5UV%(Yp(@!A)N4V],!lY66u38g+2b\V]0fAL.8#&7@9)1QD?Bm:(M`6%uSeEpoA1mFf-E5iB%_4]hO;)B`IjF:<`Iou'dm %L_XCOcm_8QiZq(DdjEY6RBh:V*:5cVTnn[md+Y'Gf-76DB,Ib(3:4\V"5$@Lk$oSeICY)Vh?D][sFpqdcj.$?5M%#?V/ibre %EjLUNYj/'9HREA6@+[Lr(alN2<1L&kE\QEC']&n6%d8[75TiVO/b1;(TJA8&c=e2Z#+284Y`Dr!e`dXb=ihB7D@J.RG=+,D[g.(o %i):)m;_j$rT!IJlE"FZU#*1kOn:Y-Ac,cJ\goWl``5l[NIahd&-6oXW7@6Yn:I%&nP-NO8KXsJ6gk*>\=<<=@iO90EUDt9*7O!/+<`m(!XtZ:]lGi`Z,F" %2S1T"@Nk#UU$":a^cPIQFB_4M?k=@u4`:Qc<"M1Up)#l'qZ]18-9hkH*,X"=002Y^:t\DGm(#5(UP;;WE9FMT'(m\DCLAk4G>ZU$ %2F.R2K."\!U$"kHQ_??!S]nggi++nubM;"fC<4Xl"p.QA7]V:ZJl,]!0TfTE:8sn!V[\kR8!q4($ml^O?S'M)\m$X6WiUn8IbSrB %UeD]Gn*(T`XAV>,>&3e>;\C2Z/VQf3"2M3W%gM9810%3)FbmiE'?NVr^;&m*630XdS-Gfp'^*fXGu9MjjpG3ccA@,M.V:^/=a>,F4oJ>c0Q_JmM.'Mm2 %R*u9_V,ECf8[%'BR?a:1dg6I#$>.A5<9>\3_nbsC=L#P2__R29[s.d %^IcQ.,cW>9)ND=Bb>F,S`J1uYnOjm[abk$]/gds<)XWK0:aIC;MFJh*2l5%ea_9_Q-Pc;>3E5RC/3SfDoW3p@>Y.Cr!87&@@7Vo_ %'(BA#-gk4ojC>MFApIiGuW5PL5oe]!;"5[Xe'XSAim2Z#\VWhgQ8C^J&t8"5N,n3"O%[V`^!NR#qs.) %TH$2[6Hk?O$]);5D6i:+Hg*;)eC0/fH3PXW]Fu@O](\ZK"sqcJVeLdBE&Qk3mo*qC)0H:>&O<".#=:\XJ_2P)%)iE_3rp,_a[3&gsNFb@8/KbHeWnL(+* %?tQSm)="j?Yjh9q@H7V.oZ5V:?(b-qo+dS %7\WD0^U.X%,uB@7GhOZm*"-]#L._RtJJ!?YloDNV\!hG`,C%dDQ#Masgbigbq5bXVH(^Z_g]2'nFj!R$VN-9R-"8u+@$t+P%;Q&e %&T3egQK,c:>&Ap).C\a@.1(#':2AgW9oetnU+RFL_bSb'O3+cC_OkfM,g59,=_4_+WuJ4Ek@MRkHKN=;SapLVfC]uYHf+CJKZP!W7h6q\<(Qc$:?o#*/_X4/!_I]p*b3O&T@iYZ)JtKkbJ7.l>Y5X!K!ij %_P8kBJq_ZsZT+Gn%c_L4Ja!0S@_j.!<`dIl0D]fpjn/V2_DRfb"5pJ+-OG>.n6uSo[n13jZn\*b:4FNrIaN`_h*!r>;0+;!@XP#5 %b@[DGRYKB>!Jrt-*DP'HM5pU)rmrn&?Bt7Xe0QMp4K4rJ1+]`pn-GE9TV38e'-S!=^j525]"h?@mSF'.2W1GdQ3Ja"9@fSV=%9s3c3T8UT;'fF",6o&X"ZgaVj7s'g"jO5A]tL]9_W'9 %l\pteaol;'-fo#2+]LBLBNAZ59[_.\lr8k9*Zu=&i8sK+]/2*cd&ZgJ?-Sq':l!PiQUOg01Bd8?0XB=ZTVR5eWtu:3a3l(4MK1;, %)@epfNR+JEdk<)rZB":>iZDIU?+FnUUXRAqYGZ_eY$`@M8T#96MC+8?q(6C0Zb&$NX0)K62a2XH$V3b]$l4aj'BKg>cROYe"@r-$ %lp\775g`Jj8pi>h5Rd6Te^ZY[UCXp]`."r^1(Tm+Lc:C_PCZME=aS_Y!Kb15jh.*RCH#g)Y$l3aCR:P#A/>!VaPq#4EB[N]\X_;F %Bc"jmDT\!)8/W0$2Vou*9C-+:,?#@(M\:V3L6<8R@k:.,-ns0_N6Yal`J>ql`Wha!Cnb*YHIf=[oCDq%hOB&;F]=.5QFobs2=_q1 %>IJ*M>8h4_9[DN2D.j]E/9rKM)Hi4>=mkOg1XfAXj'K)P0>%;b91TK?\!J)f-Dk %F)&>]p3M(omN18O/^9fgj]'d0Zdl$R3k[N6a_]h:Vl9-q"&l*0TaWS"t9k4jE_)344cs`]dam3mb`\[hG %l/I4')g*.WQVQ(OEc[_V"Wr;*#u3826O?"RJS&>e#UJU'FJu5(@GnKG9n'Tj@[OMVDpb>r_*#q:(Wm]*N59"'l?W/f-nUqb&qtje %ZBid3Z'ef%J@:9^ZWTgC!$)Z=jWeaII?^8fRoCAs-AE1RZK44'Ut6d.1=H#rn-\tlbS3YZbDqEEbNE%"Oc81#DR2,%'*IS\6bm>^ %=KD?Z$kQ7GiJqJlcD-4tP'+bSS(ZduTIqar!RFV0)A&ErKqA@,h#7`MEJpD#fYY#R%;6Z($PE?#[HdR5=YLtmV$kn-37Z!R6;?Bu %!\CBk5\?`s0/NoR %?3+V_#`qeWQl/``6B$Vi&=CR"KK.5oB%`T76/D)V$nRkt?WqWJR=KR-/G!kd-J0cQ6-l2)_U$h9^ch.E#Nm;Cn_1?\/jd'bKl&U6 %VGL4<1?,WLM9&BkF59L1E6Q2[pbWf<\'5pS`,@)$&uWK,:`hD)c7bY-'O[\r0Qq9Q)j;Qf$A*8-B9mMdcnfpqTgtJl&+HN="FEla %*i03/:$#EOfE7.P\):Vf!D?OR67t@:7F1$X!&9Uj`@"Xt*]+!MT'W/K;jL*qc22(5-.C%9N7b\!,-LaGHl$01tZmjHjTJ3OY$"9MB-#>p\-j="A<0]$:ebLb^jYB*BQ9qdW;'Ep?RAKZ] %26h`EW;?Jd(6p54lB=<$(Xu?1Vi`gp!WcJ^&99\@8>?I"(toDR/VX_=&2HTm94:s8WPr^T"?ODj,Kh&/\j1'ai %S?Qj]J\h:dI(fG!7iiFr5_Y!#P2fl/JDj'Pk4l/[?goK,0N_3g[s1;O"M#^c#WecUcgklW&Vrup"MI0FDR3e(N?6`$@.*"'o:Hra9]P#E %mgb7Z((\q2=#[@fc?h?`hPqC+b`A\7B_$-T:BPOd$W&]?LGnD%+%;*eA6;am@Hf&h!,k-3pp_#]7"L^$/F?'$TA!V0H![KrrZl,iaL %)4AHP5SIJJ[USfiJJBHuB]unVXGCoY+hYK4ah`rA"$HZlHeq'GfVFYoH5TYsm.lQF6NC.'[J(d,b(i6uk%0 %`D5!gUuGgUX#s?baBu<&`DlooA!@244(sJ?DhS/UWc)V9ma14;8_O=1M12+jDAKNCJO3l+6/$Ft32b4nI(g,i;?9@?6K')SKNBYJY:WU6C8"rtt?[Q#GqiX?/G9nV?"`IK^U %k#aH97^-0V;N'qSG=udYIeVG0JX`NdkOI"Y0^ZR,APrds$VUc*oJR-P(N;mT.LtIb%DkkU0EbA65d#+*<,3."*h.'\9E?Dd?fZgE %NL-dg)Ff=]08hUh(4ou#CGO`dEgN@D3ocnWkS'14SaUQf3sp*tj-s"Q>p-:I^5b>(f$roI7MNr23m@,'no9N5-Zo/>YE.tZXqYjl %%K-+Tdp,oPMF9*Xq;a7X$9^fEV.ZS.?UgGP)]5MoC[=6tnBVs8CU"]` %`47V>^t]K$7?f)>$/<&2-\3;eU<]V)Aqf]t+?+$d2@>T\KE+$[-plD#PA6M'&VXP?k9o1MG)7IpVFP@#\.50f_am2BAFnofFb?OF %V0G#M4N;mRKd4D+:D0*]ojibG+n,^2h&;!=>`LKl)*/]i45`Guc)?A$"kYGKKW+lk5Pbp1)_VRM#,&GD'RlJuMi2>-GJp1:e,ZE> %R2r9]_MtT%D`m0E@2E@cjId3'fkeY&8N4`m8eFn`%7^b)fO[QAV!&ta1T&A8i.lt2oZ#9n]'5q(.+<^,\k.6>VOp5)&); %Tg6f,po``MlAJ&3BQ+XhEY#*-A=p8;P9b(JM3`?1nEe/ %B2BA;cOTpl)%-TBi@S[;ja9Qs`MHM`]<.gS$P.?j=GRFCXRWM-Z:]7egYNOoAb]hIbSB:0>=eYpNod4up%b/'bSW_O.fr_9O+?K* %^2$Q&cUFPe?!<#MA\pgB`AKrIj,X8T2=TF?[DcR-in[D97I75Ch<;1Z*^dq@/OMA1j183=]SX,0KV?3A]i@]N.\YOXBQ]4[Yk-@u %&I.BQ"q!M9lQmA14:?u2qPk]*l$^0M-22%hc8;KVM>)H>:7FXslQt/Vq$E_R]8NNLi[,@oUf5]<'G3d4L_Do(n^/,_k!9oID1$)U %b>3:/&V2^06^g&sk!3;jHkhb`NIWT`YW.oGENhP\e4!sR9BIJg1hUXUjJ9UY'/8Bt_b#cU":/NmAW#sA"m3+p\?P./ZSW+ABBIDS %:U1RJC]Ie?32R^^3A9>m$tKu#bCq$:U9FZ&0N0.,0MdMD<=hkA,m`(5[^P8pK7bbDYCjpFJIZ4NYKF^'APJ[8-pE>'F0=5]P%:`8 %?k+!#.[tL/LM"CXBZ(GKMI3_U0L$:NW2YY>>r\\qmYJiX462?`bq0SYR.lBJ#S\>lJJj&IM&;FS=b`S7`[/JH8->6a;s;NN6EcPt %kQjaOK`2e2k<)R@Q7C"RWf$2eYs[;0m,"j:q%"u)h!\RJ%"_3-S,"TR+$6g>MT0j^,mJu12)O8[CpZfAI%aq8g_jNTlGWT4iQ3&s %Zn+5a&;R?W,uZLA8pfE!09ciq-4_I\7#6TZh-&>m7hc8"iGmlfGH7'iJ.GBJKru@EH!$;^/UeN]OAJ>m"?Y_>5jj=kG<+i^Q0jUY %%`r^LLg39mA;5Y8Q%r>;#!X>0RIiE$*N!.d9Ve*sBQsbf>d#s%/M(q:A+:*K(?nkV^nAK%)eCQQKk'7N:Tss7Z3][%/XuV<*"f@: %h8Z51;Z)W-+(72Sc\iC!`UQf.ru<`q?P).*UMi+IYbLW#/XZhm=Lo]N67`W-EK&tkPT/ue*7G^f@FE=?2N@+OGt^\1pDbh>>fC)! %+`UZ_#Zql9504[:JP9Xp&/rHE8ELfnq?B?dMTgIi-G*5C;<8"sGL%@:&)tsnNtW`uEc[>7ENmKKWR<;T^gD-#a"IbA,+P!E#=b^P %_HL#`Ok<=)qGW-"]d$s7Mc8/SnRl1N"R`Sf?\HC19jZ^blD3^[ZH<^K.M6(SUY+R,SOSn8$jSj_(NZEWL2&46KYUl-F7bQq[R^W\g)V`7I=5QrfhB7s:i3\HgIr5TQJ;n$Qgs*8ug17<`2f#Y%ET+(gtp[JVqi6A&so^a@7uPjTHB %?M^R6L-ih9gd=LF_8^ij"@R8C,)/3s+cn5Nc&]h`Eg\;>h%[m^0Z5(I"<34hAO[YSAG[7U\?CNQBb7\-!u&nVcL1E$bM$M:g:i#M %2$-WDLkF<"S"ke^iJ"HB4sgK1?H %A=-;c@?4f9_I`;-IL%eGb6-5*5_(:Z?9n]3qDu+r@oZ3tJFMSLrjs2P+[W_f=:k_/FYE?nOlWMU#NGoUn\+hTV/eTfM`p(LF.P(.@8YPg>VE"fi'eQSkb3*ACQn>cpQ4jTHDC %pF=*[S('PS^*)eL^ksk-"O[9KR5.?Vj-a%P(lV+prkiFCZ#oN85X2I+c2_sf3!0S0:@L6GB0pC]aPjr;b %K*'/'/Tk4TJXscGL3og4QC!O1H#=>?9h;_sB%[5$bZBNfrhFomdmt-@3"I_7QG_2$Z/Yp$q%HsKeF%"-"a%l6+;)PQ?JiQBju,kc %B(4GK'gl;8aL&\'do9_,/EV+P9n$(`4b>d@Sjgn#![Oqm"S5#"e/aK>V_$(cT1!k,C3u@fd"4.nd-"-1nHoIDp7G;GRcE8TeC*FM/.YfrtII3+r2HIJ7[3]L^9+*n*oH>c[jK)`[?mR`Znl/%cY^\P\56q^lcf@ %_.:$>&m^&'_eU*D_%ZLHV8XAqgn-*q!)J2H:'McUi,oqR1>n91h?RL:]gme;ifCtpKV2FG'g16GD7m6OX2/Y'Vo'j>H1PkjEoW%Y %U2P+XTP348,W?(_Pnr3(\_#;0\Ed>-CjlLUmXl/+"q-DLJ4Gb<4,qcF!Oti$7Q_mt!`eNf![9CFEec,l?uWjN^`0VfrG^2@iJ3(< %DSB!`b6Pq,=?M\8W)m+0GSs+$!?HD8 %&2WV@dQC(T$.?cS@T/mJK,W5M1_KrOAP>2e.gPsK8]N5AHN%7ZltaNEp35a+qadI` %CH;L@;2.gU0Ppc2;kc_\MeSD-NfS)V9BegJ(>.M8NB+habUpAar![t)Nb*sGZ?psQ>9"o8[B#7H]A%r0T;b9D#K2Y/W=DLu#W3Em %4E??V`>LOd(5\^sK_jmPgBPDt`uE;,LZY36s+jYn/Q$R5C07>llH$Po\I-,bVnKhW=U %q=.A:bpM5MdmL0jS/:$X$UtkS4F+D48r);m]_I)>8I[bSCajEZ;2h,f_iq:=b/-f]Qu%ErgN^Bh%d2XQ90fc@j`Xe/"Wq/22c`Te %'+I?u2p)1NI5mL0@L3-Z_Jl.d"&\=lUcP:FEDH84T:OKP\BZQTnFDP"9)4.P-QetP8gQgI]klo/8S-YH"fAR7QqLg@C(!M[BXYUZ %jJLa\]U6lt*fsHsA\2P.Q^Q,6;T1>ZAj-bKb"KXdqhEl4Bj5-8[Dt6NCTi.Ve8a'2*a*hC$YsZ)*sXYfI3=k+7.srVgO#XgAEVWm %Tq+i=\JLZ\Ogu9rH]biTRh`r6Is,hA`:7h>J5-tn5fmOlXpViV*kE),u/!aCrKg#^aa-3K# %ppK5&(OScIf;;IT=/oU@a3nGEY*5i,]GR)?G+qaC5F8"6b,'0CB041t$ZGQ'CoqR+VGl,N`69E;2Qn's\WFE]k%;<#?.[3f`d+j) %ep7$JknGc$KTiW30%Qc1IW/bji"eCfT.ebO\FIoq9Y9o"n0@1b>p!C][(5`?3`VU5*\ZBiCY1A)35!\k+mu#>>WS5/ce1gdn@S5;Ao5/$L[ZPQQou(RF18]g)HuM\'U(oPWDjiLC;T]< %=6cUFR#/KA4qu]e4-<.5Ru%tS[pZ%l#1D"F!`KH0g6/c/9=5H/>UKBcOM^DN^+AE`CUCWQBRuj+C`JWBL%eKndL1Q9T0=8[)gY1# %N/I.Q;o4HYDh6Lt)k++P'8]Vae2TG'2_]`H"Wpt*-0B(.i?a\e@d6o+K+et8Ht_IbY]"p%qZC@7@0[[O[GW9(Vm:3QJIM+hR#%XR %qZ.=UG[?(>R3_<=.MHaR<=a:L]Mp;Tin8m)WUZ77p+_@9M^K@]?7`oHiRal89Y8:+rJ6&6g;Mg:=>[iJiCD%; %Mh[(/_?EFIKu5GmP?9[#0VS0cc3T13^G(d4C0t6i$AXbl'OUH %Ku;,53)!PKs)_!m:D:sI6^[Xt2a?Qd%!E0iD^(o1>(q6+L&&npd/f.ZIgEE[o?P\:fY@1!&8!"-;Xm.r0%Nka?0@$Cn8?G[8bfV? %e*9E5E[%[;,Zcd/c5DH@CD0gEo&^J#9RfPQhe)SD;pDM2LfkMM351@ZWkGDHZlHH&TuI,>C/.TH9a0G981YI&O!a[fQHPe>N=s;fl]>'P-LF[ %F@n+EU@]BmAl,+2Ff.!5m.7pmK(gJi/lCL,e6>"eg=Z[7@X(8JSZ&*QTQg?"cg,bgc-VeQC %-n%"p:S`k\\&ts&7-`H;Y#*Tq$\*l*RH9nmLMP:L)TK2NTY45[Be3oJ2@PpnS1==;6Epn5jXGtm'Nr6gh9fiOr6=bDe^N3IrS<^[Dt%YR!)0Hnn2g0S4A9pF)rr4u9E %YBQcd74M32+uLrV6F1+7-$PuLc=PfYPbcHZmAhOQ>as:I.k>e5^(dP>Oh11j)d$k4?/.TgcuC %#'Z>\97]MJS@$-D,lAuLfnqp*=qM(@/$%JX6QWPq"R7:NLku1Q2"tdcg)H %Z;F)0QS.LmA/=!BEJf!OXkHPP&pD@WVspM'M_Fk0pH[7?YHi>C3,;hsGD\$,R1HGPk.>qt:j%XnI'i?Cl4'lm[nb9__r+ZNhclrn:2ECq##UN_R%ZiDb9Rq+0XTdWWo]E\Ta! %INCPdTrCYX:HuqQO#6%)%+Jus.]uZ-EN"N"aeGQ9QmKl&bCe+67!.!H3`B?;lti=p+XY.[_->NHoC6X*,/d+em6g>u48$HD7QkT8 %3bZ-04Ys`n')=m=In4W266bUhe*2-j$Ct\mb`#bLb5$;&`+mRfkFMVU<<(egUT$%eqF'OLS*1.t`3/G5Rt].8,YLU?"eDB6W!W.P %=/1rrD;NOu:5*;a0ZXGr.QA[;(9Fhd.GIg);@N>)QJhd[n2(!S%DE,8#O%E13(7_8V!k+k;5Q8O;0SuS\]M^LA_`hm6IrKk$\,3- %I6)g8oU%mCob<>)bUp<^*W$8U+#ug_@+U8T7L^+WAtWOTJND,,.f?:j^AKJ6TaIe@n*OG3_F8oJ`nM-[R\Ha^$jL3StkKt\_chMUZBR3C&=Z9 %ZHd]^XNK:d>4^tQ\.uGKFF5&K+%OAldIHb.L5C]+n^'JfkkMZXX$k"Kc)s]]I,*T8]BK#L3^-lc[;#>?Qo=+59b+("R)3;n#X@XP %e%*q97h`R`>a=K[puUP_Aj"n09g8qh;6PD'89mOCb87)j3Sdm'FJIW)gLT/`ZeL#T]%b#>8PS?USP&"lDK/n;9lA/9j4VQ$U[At; %S.U&,,&^`INJkGh@GFi@[BR^u(V/TNQX!IPfFE>Z(Omiu.26L?"IZ5H`Gn4B9[bC?"N2nb<+7U;UFiPF%.Ku,Wcg%m-L8&rME^iP]*Q>"Bs=NU<#8N:ak=l?Ku"%d.,Z %);2s]n,Zn4=tL#A)Cq\BM$Aapr_s\9qJ[q?`!Q_6FHTU*I,Uu].L%^,hOM[M1?"RS'Q1a+6Kagek/-F)\Q0R5;)Du$9j0`jejYc\ %+do*5dO:Ki1P/3]`GK_,PTMQT@@MAeiE[*TDO]BX;5Dj=p=Mm\enXj)X307,.3uQ3h$RAtR?9=F_tliCVA0R]4o@iGVijcme?dS? %4eaC7nHB6VOEg:!+&i3r'8LY^iES9\ma7`V5cWb^9?$\%HQp+41%_G6k;[=QjLQA*lSOE@M3@25E!qA+Rq-bpS8>'3]$a2E6ca7ZG+!bCK5oW"4hg4VQW@$COL#Ucto#f>-/I@;=K\,<$',t'/$&tBKp!]$0NLCmKb:K8_Y9LksOa&sH-AKt* %mZ(V]R:UXloN3/LTaiiH919Z`,AgjI_eOCL+Z_5YeAim&_@30^Jb93[>n2fG#=]Nq4%aBKXTl/Eb`".ZL6eC$@k0M,]3gN+"5@BE %k5CHb#HYoB+V4efbs;WRI"XXNQSmMo1.2]q*MI)"%a0hU4JE+BfkZFi2qc^SknMFIR#ZEd8<62JO>XZ#(8EV2/uu93*i]]PON9Iu %46rtL0tJ/mYX9'KC%#/Z7MN:A:,JU\4@Pc(l)_mnM`*&,+39PmbW2S-7qO/cL0M$sX!>2t %\#:qi6j@"/n3mOgH]P*]%DL1==CgoD[)'4RT2JjO-=>t]gs._O64YZRp`bVLZM5W%0=(@VAFXg:moAE^]s/;[]X0n)._*o'mWIJW)%]q.B9`?"n$bu%\+!(F/Bh=Zb9D9EmV'PbYW2nN*'fcJ(W02"b>"mKW1]Q'c9bNeBYDk/aES5`fH8 %X:h0A;\:kt3!]sq\e27Wg\-1g#hY$VNc##hY]Mr2KZi"B)#[!(# %Z=4'-n4ep[`uIg^b(*Md[hOb.>;na;N=k=Yk6AK\!'CUL[Qu"2iRq"lFFTfS5Z;%UNA%H,24s6C=4_E>31ELdoXbZ:WAtTH' %2e.kV)hH<45_^+m=+G-DAh#."GRnW0*7_Kn5\E&BN_3AU+NAtSK#B?&GbciDA?Q67Mg4EN)FO0J-SqQ&b)%WPt6 %)]qkfHV17_-nRi<[72MuCgY+T5l!T'aV;lM?;1c]//$M$JgM.m1AoK8ur %gUqmKh_B5L9T%aL1dT8BW,8Q==pD&`YV\R3e70i0W.o_Q:j$TdS=PE$)jLk,;\U<8SNGBBYN'l-O5>QT@oZHUXQlgHdV(k&jJGY3Nj4@lAD@qQLpnBjkeo`jWWqm!L[$Me-Ur1q %$T&q/9hX[_[Fe$gTNK!oDBjc(#^q'+G[6A7083@3L7(6=X3*an9AtVD[q0X>m**UfRZ^gf0-4Bpl3Pcq>\k %2@X2;A9Yi]CEO>lBUCNKI$ch[*n(3;YVHJm92Q^Z3-Aqp#A2.VCA\DMt %NE4=)cB[B:[:l.HbS01SS>ZXhWi?km!r@U@Fod"X6.?WTU94js$ceM&9@act3_rpN`:a2&J6l)bQkPMiF,-4&n0ToKI-G,\$'1L?]FRJY<)UeJ,+e:5Q0pV00K.([m0oUmjO3>,CJ>2WSdaIs7hG;p@Za]ia&`,J)tG!miM5Thu:I,H0k#"ris4aoW;cHLH%UZl[.DoHi9GkqURV(k,J65[@;RHle+2/De*DIOM-*T'6=3" %12%I`a%.bjF".0WTRd2&2']9K37;d1Tngoc8@gu2VY^KGE>"H"SP00BW"M9XMGbV8glD^0t/#oA^O:oYf;^d:N %Y\j70+5("o`)rf%[I2pEjVs%p,IWF=m2<]&jPLuD%,8R^A_8#rQl.[85rD7WLmFcbgCjA.'e?eFr7f9?$k;sWY@dX1:I[-XG0VgF %@.\'OrD&IEh/Peqh%%A+$^ %&uFe*IA0O0M*B="Y!25"k1W`oM,7s^BfG^sSLp'a_5dI9h3MPN'qZ7N<g\"i`#;4q!*F;tqmD<\qO(?EoqR54a?Xm#d]' %`9%i'=X#bRa>AV-*&kJtI)=+DOsNaS]^R-9\a[J4%4aa/M_A4^g#XaBF89C60"^nQV=girn`;:(\p!b+2m$5(duCD8EV7JtQFV1lt=ZBoZ'c+qmW1WpLirBNOr7p';ts,>e* %03-g`0>7/IJ,\Iu?@,XWI2S&Sl*@A$LNDq^b]eR-M[Fhk<^6WS[m'JnadpjP^^2^+[eJ.L5P1d"s/RV9'WGaFrUYMZC0qM0";NOn %Xl'";TD7+NIpO-=XDn17++30[YJ%:2'pi7:AB@9,he4AYrmZ.@qEh:o?kgBNnMgID%EJ.lQXPR_2f5+1qd\N1VC"dZ`="p/cDTQ_ %Y]#3&K*lD&:VPjFs8''J'jc:M?QORk`3u.9YR4=DJ$)(kTDe(ghD)Qh9f!O0rVk:t.Pr26!2Z0n_(>OXh,t'n)Vd9JSE)g %55p;Orl1"J;0/RBiI9i>gkD*/,'N`=+8s=mSi^;CQHh_hK[4)F1E+DqJ-;n)s];B/O]*jU4qM,/IcRbP4^Gl\.,0"C>E:pfH;n4qp^HYs/#BAg?h],tIJ8LsfusuAqmd6MmeBiI9$YPE>]f3f4[Tm$pupnU6Q$cF %_\h'N56'"h9FUT!F"(U0-q[OANpSq(VN4PSq'+jaJlR9\9df\Su8eNIDOeuDT'b?JFX[K,\-bMD9Al#>V\.XW#Cfb %.t\sJNrC%C,$De@WGj$0QH+oLUqk=RVs'Ya?/NB4iNeVde7?E&8Cp@q$ %pmLpQUO3`hq$3TD4o$e8PRNW15GeRIGiY9SFe8S"O+h/^DYPksI#RM)T*eT]8O'Po+SiQ%D#*8W;Z99)S?=*`puhS]Kc2d:MSDTiW!,`;C5H*r9q="@$]B\ZBm[HH;[3,3i`f!(V %d\@>Nl;]""eD!1ZPp`2LmcQh7q/\cKo#i,$;b0@D!r$p=khY]Qc[4OI8&*fgO#HoXc@n&RrRDGqa5l0t0hi#1RP291QYEK8`Z!s_a\IdE.?8`C9g"2Hkc2n`e+*eXlijl:/aU5eAX/l#&_?eD4:k^Oc"@g-Ib_9Cr$B7W^A %Lf;(n#C71lI*(s@b:gPNq=8*`TS'@i\`K/ahR2p3/$e/kBAJ\Ef3\J,0P"Sk] %EV6`kXhNbK)fNAtp%?pBHdri]0"p>?V%j0#?.O/K[Fa;Te!aKYOec3!??=R):q#\qAS";s>X\LJ2r&M]FPGMln6(+-FRr5VdArB8=*Y1BbqPtFkuncc!,L@HadhEe?aV[XZ,-^*BH9KqYFO4ii6)8E:[r&)`DVkp#-\P %Mf%.S!K,b'MQ9eQqHnR=AQN-KHHiSJ4e#r0MQ4K_af:((`R$JBN^6P,VX'mPNp2^b0>6@K %k^39*?^>!K9A("2>B0?/CS8e:U)B9''p!:B3gQI* %UYWE9FM6lZcs4+rp!%82CrRglR!8H$D;-$L)NAS\h01Hc8)I1bkpEdu %ULj)e"!QQR7=.%C!mu/J#,HV]1ats52ocZ[s"1^^gTZO]I.*BG4WDu.FplYZ@-QDe9.sIt3>@27Z*o!HcR/GZD!515S)a?F='Qg! %Ed5I*i^tW1k*B6'Y3Uhg]!]$(!ZMUV/e;i0;Y5iHY?26FV-Dn'h6I5\$V8,)Z]U04?bZ(\A0H2pE79e*>^tS0X)POLYU4%+)6fU2 %J1FoLrUncJG:f$]``.qQ&QX,/MNq7JH.CGH?ZkHF>q9YF42BdKMjK:iM<2o5^8MC:@mu_8Ku00-UO9L6=6<@"(H*\]$\o0$RJj%) %PhLc7Ajco>@-T:#0VUu>P:Tg^3-Eo@kJ:b%+M8+t1$IC>-R"A-$+m6LTeTFE[8j6Zq(umuXO-=KVupch`A3f99bP/C''4'T9d=/f %CsCO`AKr5>3jqQ>U',CA$McO0O9NE_HS?Mh2*rRn)4qgZ?T`W,P_^]!s)hYN]5S^*5UbKDg2M4](R,9QsX %kJ$l??i/PY^\%Y% %SpC8rhYY-Ms2A>mr.FH`qer#tNc&.7Je.VlA)Hh#YPmUQhXQANpl7<5k1PsmgofQ$:"BJLX`O1V:#noZcB`L-,I^nC!YZ$1hn!C^ %^Nn>6Xhji/ %K-\6UqtE_]V.*2o`0YekpP]A*Nb!%_4b4l!E4k,k:V"R\GIV9]cpB:[ARB]njhLONL'D1`a>V59GrQnl(++(tbZ=K^REJ,)f11q& %>,0n#gG.lg).fu)OlrS^9rZtZ>q8KGp-$U9B=(DGZXF.d6kQt<&WXPtXhECS0ok],,$-n3G6iH@oYsMogO7(9P]4.K$9k>`5k/YC %.k-3O0BMND'!Ou^;Z9WJII>Lg%.cXd(*/AUgM2gGJM1Xnt(N2R2'^>)A^1HK[>RWMLU!K %(N_2d#o5j,FOTCbNn.r'CIRSZ=iIbc1'Y%P,c@f%RJcTCKX-lK^`pWc!>IVm!m]f_?N(*IM6,r@d-7Q9RETa_1\Re&166SeD$_g[We_k6k\P0>7.eDu]4BGP@=:a27qc-;q.K^&]n,0c$=iZlKAo$Epe[<[kc%LJ %'S#33gQo+729TmLqiNpnU%gE>4?U-2ic="X_2"Y-VXB_jo1sci[=1-X\?i7uouX,shr"Coo3[!fDUS=..s=,S[HD>g4JIg"rQ/"t %@Xd9Y@ip0*dFI/^56&$GG@%QHJgJT!A01V>_i(&JTD\32FVeAgAEAikLqpCqjKZp`j\(oB]&NnmdJ/_WU9\:jP,6chIe9TN2bMEr %`s;$7[1l5_/55;pT).[72,0>7=PK'PGW.E+:!In'[J],^5Egf/$qVg"4oH0e='lf-"qg6bB8W*J$eFE36^Cb#^&slNq'II=oZ0 %qJt=tUS^d_56FoNS$kEa"XdGk6OqP)A6^)i\1rDP8M&dGEl+i@[#2L2d68%1G(_ho`<>rTS %WS?8?iYr`*%*DSkZgE]`&d8EukE(N'K73nVh:@a+m9< %fAb!,Q=[k9"7ij'0m:4;eC!OFn==P?[kAbg0\EZ,AYicp![_LChdPs3Wsp[4I0,V/r:6bX^n6*MFc[^*cV.V+O$Btp>#1:O,XpGus+],( %jO6?iA3CO]3>Yk2R."O9M/Zd=c*\':Y0m.@gcf,I_4T8iVLS!J4a<$B5B!*=h8=1,*tN2*T=+Q&rPgeqp\T+2q3G=\5PX/n=%]jn %VX<+s]r[&F5Tb,d7TC$IjmX.D'@OgX"U_@p$^!bG0H6^kJ6/Jfe,a,50;RBpbIE+-./nQA1!=&#o4/;09+;VDp&hk=B:>isUOmD6 %#0_FJ.am87MA*B7`rMY9X[6dUL6pa'Leu'eml"%gC0%:TX%$D219_?hA[(Dg %ar75[(6)#W"k"*k]MlJ-Nk^g$l3$ofpGGhNm-D$P3S<]LfpbEWJQO7QZ7WFtiH>gWY\I[:-,\0IJoYtAH2oi[[S3&9KlKF5fG"\" %A]C;.R%`1?]`nRuW']N&<,u=[M$WgL6tb*%IHnM3f]:[!9iXb<(1AONW=B%AV\*Da5Ko`V6%Xl.B:XV25i5udr!5p^e1J8Zi*b*ITW8.g %+Jat`@n(/>Zjf@,Ll5kJ_J%A.Kh[L-7QM3;N[(TH$6@0[g(n(pfR=r[^qb'L>$4uAKV[c!/-lA`1rDsg_83@u.a<9oirYfA`O;Ym %"pf'+NXs4_5VY*tX=PosCb`IR.gcIF^a^%`Y(EF:^4[4p7t:XNo9$g6.WiWd.O0O1"[iBp0;*ihk"sY41b(R6j,fS$3",g3'$<-7 %dZ2O0N@m#MKoP"%XIp=U,3$lrQ1B%LMO'V2#A@!]%;7JSX!G8/lP-.^\\[\NYj4&U@,MX$G&3's[Z86ZD.q3m(rh7,4mm#NTqQlU8bgbBi1MRmp7,WTkIr]4'0Z2#0BBlVCruZpD?JPZQ*tl$sD-;s$nVX@DJH'bhP?eg/*N#8HWX %KQl'Fhk$u:rFK>\oO%U:%!DWUENAI-qSfOZ!H"RR5,^3a&RmCL)j\%@WH)eT3LH`bCUO\P'E4XcW!CC31GBDMnB2J>kehV&?26#m&"4G,#3Oqi<[3DnDV&R!Y&UdF(o"U]N(2WS9g^llH5[N^R7e898qj;ist4lP:0/lW`l;2#SF(KKXllAS`em\"7*m#)JG7?pNI("RP.\uJAQ'u]Ap?1u6npnA,?=4nN-]4eG" %A7Ne?#cS&n!>@:\X&37r'3%kYIO>bnp637B&QLuHXj0R1NC+/rW#A1bLKH@p.=@h%c;eNc528ZGs>PY#==gAn9.1(rM)nfqF*erLa:'>o.BN.qZR$@Q7TYXBZPB %JU5ON8a2ldn\e>o^7cF]3UbU2J)YiboH`Htng9N_HIa5Kfh1?>Pl-Khs!@WK^L^-t'=u)rQ&OBJ0FCsSq %MfI\?9'%D"=Uo_7AIKSe.\V-l#?>%RaUVkk74k3YCkh2DQ*eLEKc*f*=#4G^?T]`L(>Fa:(scrL;*@`&o3.DYGsM))Lm>G4$ZZtC %o#qE+#>r`TgX>>d7JEq6SHJba8N")Bo=#*I[WmR>"+-f#*1.D&=u=ta>A%luP6isb'4?Pu;`3#ITM#"h#UO.L1uijq;_R._pQ$5(a(e)g235^nu_Lk#;@sGecd^=Mm*] %b3R3cN8eW'3@6!$&KDrp43A\9"W8[UU]6KOBjih":5[/be;;jk=cHRn$rWhc)r`WU0+k-bE;(Wcr!r9A0!i6RBoLg`hAJO,Hi$-Gg-5l5m)&E.54j, %IO19/=ei"GbJ5[7\N1SSQ5/5W9pdmY)a+b^o:Y[D)[5r)9($[tUh's1-$rOf"/M^-_g!*!>J=':VmG(Dm6$pNesmX %KE>"r6j(1Pn,rSC-.nSmf"cc/^SlqN/@m]`[g)b-818F1E!OWqYm#4+0-t/cXTj7ppqt-%cc]G0D@f0H@F'rrDgdKp-,uq*1ATi% %B7>07Gcqn\BsQ@$V"naE&+CcIr7hO=+i&"=3[>b?8W/Sf@eE=jO&8DRp/+jY[Kun_k0r#q?aV5p=qG&+KaM$XXgCoM1#Yc_n%>C< %=Q7*p``nQ3Q=6lI*hq;:T>X'Pjp;pJASY&1=rj*")@iC:M:J^jlOE$QIYE*S,]qU!$h!F]DY@J$A3h=om7lEQd#M/qM=R\8P1R=,6_]FNGo%8HUA9ih9&+;CS1mOeUO+PEJ.Q)VFJt`-f4MA?]_kLp^E.1Nd+::CKeMh_V\C.0?,dY)n$uJ@+I9fRO"Yr'0sOKa\#r%]U,=K %91d:`X"*6g1mN)kn^KBL8?2;jdO<*bbGmgi_l7+V+WD&8>NF>&(Q,_oFQUE.#5'kZ3o`\5"7u.V4aB0Llt)GKRVt`+UmX&?.@Hb#2$&'!#\\31_P\o"iW-M);0JG;13X5dlmpbY>"j6FDus#V]le;)uKj_+/;D %IGXDX;lTPpgJ9^',,7\9"NG=3`i=Q'!SEp\Pcb&cKCkp66S2uQ7R]B_pp>mO@1,.(Rh`3\EA'DWk.4VU#l?HkK5O0!Hf#\&2=pK %cciLOn8dM-k]NLBD/e?2_U[!d->AUke[*;=*oa2f\g9.ZSD36Y-saj9=nq*q)rsi/)?\`b4lrP^e'kIQ`/DjC1jM(.=NMP4!X3ZUFXJ$%,:Oa+IuqRIdRM^ak5MDLq?8V;OM;!5u.N?KtH,nC^'W^+?UOI@qu %iiC$C'(jb5"QXm;)+fO-1/j-kX@-8_JLL,j(UQfQMi"Wbe7qb?G6^ui9][]H*.#jtnR%+cGOf8cS>i0A1saYF1)ARaElqYQN7k.> %aH_8,=_Gs_5?`EXN0!U"a3:rZbQ8hA0+M9`rWB1,;be*thc(Gkp&([%?;o14g3J.r\IU0]LSKoBfV]@ec^-Tf>U]!SSc&q`UWAml %hmqjYX8!Hdd1-YLQ7@K@eeU1Pk/,&B9^H[=85*;3pWI$*Y>eY6973d<2B4cY0q1NFW`LdJ-_l@-SrliSN*F\^+h8,_ArpnPi!*n5W@h_[oS$\nbqeVmi4[Vh>H-Rqm&;j$#FW;1R'Q:%sNrP9]Ze4<:&qm?$h&#s2H*V4F)( %'U]c*\Q71Y$k(`4/qXD7`T`JbfMTSh;ri7I6J(M(cc'3k2in59Bl"uV?7h@I0,MIS^mU.Ji"kmL62S]c7ta/Q*jjK$B"qT2Mkc*c %B#D5)b(qGj@a9We@n_Gt/2EPcG`JQHjHsh<)Z&G(8JKS:,("Xo"X!ANOpoFec,m8o[if"jNK3e!)'/85j[]6^uSUAT[nreg_ %gkF=E2`:W/F]ofr=T2T=L"+<..jbf10lsU8!Au" %cl?b-#A>.7'_D%:$U6A$-c`#)M?>pKLQe$XgQbb$,YVBN\O8Hg`=t@O]Hn;*8L)][Z'JAZ5+@%jpi`m\SZk@H$CPd3pGZUg %_R)Je3(8W^VGnJ3`R\C_EG,h+AFan+!r%[JggIJ`qQ"_DBL;ulCg2I6/=dG=I=t.amcE@?:Fc0=X/ZkJg)`OQ`u-i3N8*\*"-)^F %R)<(*D[(@`(@^(T/8stKuWg\O9 %ZD=mEe$,]krfE:3-d[X+U*2"pbo1Qb54\/#'$<_T;G7\+]*YYWU1-G^3Z4.,0l\W8ACUh_i3a=!2Oq;NaI\B$,Yfa-q1'[4I#c.n %EtXa/,'D2\0Q@5($B2Q-ghnhELm&G+-%RC*%`3LS5r%Q7`8Y$"dJ5EBKUdp66C&#"TZ::oOid`012LV1#NbGu`&%aKVR+F^jTmr) %4!DRB;+*E';#+?V*iB[Pk_U`1([#/RLI(i3>U+057)?(hPVd5a-pc(qbE>NO,X'\?eIiS/ScU2h#In_0TbSmN./Ka8F0hK1A79Kn6%\Tj+t %I$.H()+nkQO?A@:k,Nu&<@1`=aE2CT(:%6]43BL]%Z[)T.A'J.c_&24rskZ29>$k:OVC\fAkuQmWB7?KVFe17dr?j+*\D5L:09+7U1\[Z.eR0u`D'MhG6@iTa4K0fC"co4 %.k=ND*gh^s(B\7^\Itk>q1<_<*CoS@WC!lF>J['Llh[dMH0)Tg3#K56 %iDLeHcf>,+i+p3^bQZ?nY#NX(,?OLBWDLa0b,*EIMV)GeaWBddM!$3j0\lW;;SeWQEi6/PH]P]"p07g:hjYAD@Q5:p@JHE]DIUc5 %_uC\!+3(*ie/f*hpn:rePQ<"V-shkmH_:4[Be0#(kM!f1O6n#LOA!/b^YC1/SB7S5n9&Z^=Nm\_91Ai1B4$-P+((B^k+I-0.:@Gc %ADUnD[e#;ir[n+L'Q9E5XNqYWS;QJ;gO*/Ir'aEOlsrp.iIr33nE>N@A!dr3^NJc,A91m4`5JHmM3Dg4bfoR>>?X+knPhCQLajRf %#/i7Y((ZKm1`i:(oGfI_/8%;K3tn=-T>J'c_[q:7\:^YH7*^;Xf"Jr%Z?&hjnY[hXbA_UH_AT8/JaWlkC$]rTfV'h %W%<[/>*q;*`mVB9gRsX',s%S5CGPSFnakmH@m(cCFjILbq1/kWR>oQZHgXT=FEMP0Ht\.ci6q%O[+@>7fcV[]Fd@=)Lp;Ej%L@-ge7`BOX-U6)4J.iN\TBGW.=aXT-.Q(U6p4>d@0%!RdFjeL$7opP7$AJ7)n,W+2mj)p=7+]fLX;8.c_-?n%cAmGhI %\hfgJ!3(#0Xk#:Dg!*IfMiq`7QSX%aK[NT8PK>RXb`HVA!Dm2c=2#aVFiI`>4rqc0<%L":R-)'6=oE]Dr_bHu8Cjoq!ff)XT*G3$ %WD"`]pW-I9\V-6[81]Ws#DFD;^J7f2Z`&D\W%\^ml;bt0U5 %b0<_WPI458/VDJgbY?t90*5^Dj:^u>;F#e\?VM/jZ7ng.^N[b+df8uK[,%t5t2E@ %qukuV3`;41QbF]/g]di@jqg"aoH#%n+3u:%"=b&liC9V6]19p,%4NE\B]"%\7I&4J^'X*VmlSU&Z%q$H;-ET8gSJDfFt#!YHGX@c %`\SAM.]q6GX:9uIdrV.ZY4bRck@a%ei9"oI4^o;kW2dI3cs3 %WEmT+8[\@;KnsohYcWu@"1dqEDNt[.oUbmI:uNu9c5S^1$+dU`Vc=W,Rag_@4mHgWoZNqXPc--n`CleA$6)oX+^ %gNP$??B;WR?M&oY#Uh/;]bZK>/oc]5jQ8g(2-+5K]bLrSB0(n@Ra-XWC-!6DEOr*C7eLU(67.u6e7r]AU)9n]stVlQM%c&kD6%:s.'#q/rrV7]E&(1Dus,m^W"cPU)L5T;T._e[oGf=!$'JXl,ME_.p#5n+O\oeU2^ues%r5[2NYD>@HtY&(%G$B_o;A# %>=DEAd\u.1[DhF0:W!r>ALHb6-W1ATD(t8p//0*D\eN0Eq*MbX0D$7::CS10TWhf:+j/pLMK#?=IR[bs/=q^]:DSOaLFDMo[`2m, %=GJ;1`nkIoK%'OP>diM?,oTltR\WZY]>U@J\](6,6kln@8kZmg]b&Al>04l?N!M9mb(g&4W&j%=-`VaO'A0uuPsq\n0e9t7LK>/^ %K=[QcRBn:,YV`OkXuPP>rHlJ(MJeZ-?mLljp$+R^&R']@=JM(&oFriW3163>R?F[.X&pn` %(/)C"@`S<@,uJ`A(Yrh)^FqO+JXsN3QYXi3eTmr:lM2F#qU6q@7$L/rL/2biqVH>*7+cH2O8a6gcGB94or=pC\PmX3nWjBA4(BZh %NO('X\#=%!d9<=9?IZ1Ta)g%A1E0=7oJk&8WGJm6HIo19SGck#\gJRa44A0Rl]r'f*l;$c-GFWfmE]?bM5bkTj*LZM;cK=(euN"= %?G5c3@10K`X,#uPlIo:(?bA!cbrcE"qt%eU\bsR3q@^2J]gE53B$H3glUp9-eHUCFG:(m#DqI.jU42($DW,!BO@PU#%rHUj&Eg#m %>p2o_h__?=pEZY6jf6?p@CTs",+fg8(J!Ith0f09.TpsNds.sX/WN`,!j9m`/tK$*0BCYdQ+biRn8&qLfQrM>U&IqrXaulO6l].E %&7]Bl&dFGd8;6[q1`/D(H_2^TV736)B-Kn%=`B3c54!?n7_>ZTNnucuuFN>nKC%RA(oC:&8 %%hsSXrN&adDK)r+1#`g-/@hI=PpTCc.H%UsT0^%k0._r,SUu<"d[bVp<,`qB'!u5t$d`_K_3-=>b*CnCr4,fkd$?';#A>PhiX!K>M'_@CO\\^K:?"Xmj*fO'e5rU\oT$?#Nmb"`qIVd1kD>h<- %JR/(7RAm$3Hd4YS_'7:*H=gO6>$]=)XG0'k(C %U%WVOn(K[?CRu?rTLirf/6]j(=h\WK5YhQc(ik+1YgddW@4mc_Fg %km^.p\q_WnIH+A#r0era%iF)V*;_:Xn(SbgNH4hb_/al29l[ME>?bsP^H9>`o>OqV-WA?psY#6Gg43.B?q8FK.mgH%&Cm/KJN,VY3iFJE0Rd5-l3*VS%LBZ5HTESBQ%q[XTQ91"7,g(HIlaX%`dSN;HL@!5gST2mY(,1T"ulS`4#(3h:h_M!sc9i %^NnGhL-(.qh9r0k8+s'Ug6P:J:Z>0Lc+>>mVJ+;5g_[HjWHIrR:#3933KWZXjbDXik'Kc",@g$]7/T"3g9tollfpC;*BCj75F^;e %ft:hp[QZ3_muEWDobAg\rd81YZMEQ^7C8F!g/g68dq3H9XeV>TgmT/JMo5B@qfh])DK,p0enBeui3&41?%[L5Y`sHXUZSV_3);U@:2Vn8)/1-`Nl]B5[JPaUinN&qhn&XO4SE7D(\a%m8Tk)I7cQ7EeF%?8HO\L$.%rG"s60Etu1o5V&rShqc!d.VsXIEl_\/FsiD=GJU0!XG.t39\#58ig:hFMmbTPU::2Bf::bWXG'3O$hOiR3*+l*)1b`t%a_KIkP.ks`m]n0Ie*L+HFcX8OP&cGT_])+p %gP1KB]@G.fpY=Z\ea8>(7[9>XhE>CO"(q#<,D>$oPhV78+e9,9h*8Sei.5>m1Q]m'an6aI3joNe\7;jY@!_R5g];PBIBRchs.P:K %%8cHeq>k[/a;!a%)i#aughm2%\LGZ)G98hg4(mO!_&^Q,CW!b2G8rsSJ,dm*5'=74cL+(D=DiQeTu6W#m0D(QgBgCA"9J/SI@o7A %D]^sl1S#H@^.7GXnb0R*D2S5R"D!@M=Y'&fBm7m`#EO1oVX.N[jIZCd^%Y38e$E)@pL*fOOIfHhj\0Im04dWP^.=+3uWa8j>$/@IlBhiIh^> %R49;j(2cmhSdg%##%^_:4!]Uk:O`29[H@>JC6Q?OT&L5sJ"1Dn5?G)1bN1nY_6#^X=qJpsIM'M;hDCIin!fNPp"dVZFruIU;(Nh, %E4Xj^enRp.L]=BCn!WZeN/ZXLI'MQBiXL8"\1]a.m;<4@(pN3)D5b.scE/,d(4kqAdT=uQ(COZRANE&7L.^TQ"A7B\A'NK^Qo/Hq %jG%l/G!Ft-=Fah`Gk$KL$3b"W#@JE"#qkh%Fh4b^W^.BiEm2lJ40Xnr!6H_hcZkMNepj,.HK>.I'u)lk("o7^,iP:Q+2GK&:ba57NY,TPO.gcl(Q %-Cn=Ik1n:OUhNk:F/_m@O\t^6fLfH(.Yc.!jVE_db=TD<2E8:Lmd0k:SI6q)F\50]ec"LtD>+WYdP'H?8qmb`=CPj&qn-.Vh:I.h %cO9?DX4;u`ZesJ8p7tMcZ9liF[e&V$f4l[=CH>GLPO3p=K>gZ;>\R]S&QM>-9_k2oO>F+sO$Y<9gkB:^'FC!6N>INAn;bd=.9!8g+FHg %'L>b,WV4r81!r9K;**Jo5!rDlJaq[rT4gLcB8KPO#*0!olSnjskj4-fm2FB#I$V;cF'2-=[?T@_Gp3#@(>59E6@1/X/9![E$/G]3C%6NdX@!'16*8?3-mGIuiEK>;7g7>n_"E %g?nn"*U"iN3XkM@W]aq'l]RL^%tObQCJ5NuL)1Y`CR_4peZH%LX0R+rOY7`P%phn!-^f9*dC<&s+8g33WMl1Jf_%RRl`-4>FlS9a %!q*^Elo04WA81SDI^cQ2Y.anKb+IBs[^r#'-#Mb`cm)j1++Ot"r9DJb1&@-i_r@=%Ls_u(/X'4ASDk"PlM%H/>?iEedNS=+^`>W^AIB\A`\@9E#2KbCoL`X2q*4KF1ir6'XmUOB>4(+B50/_oZM1s, %?H`=Qo"Z,S^2tpXD]3!]oJ4/TO)^O1**VQW,hW?W1$rIL\S+t9.VG4llnF#]h=q_`c$r6fai-K"G%c!;jT3$W^D)+efmu7SH$0m5 %^&HVgX+%Kf>7!3rnTH'^I6#%AWP$P1b(/-mId)2hlc5p0!^jNsT;Q%EpSGZlp/buj`RXN4:o-PN2J0C`/9Z+2!-rUiUu"6&Cq %6LD)-?h2!2s4:O?!P*^70)`HMV`gtfWCnhOqq(Q&@8`S!%QrEH=%#7Ud6C72PEMpf]qX5*=+**qm4_"j&RgHJtX[s&(WM8c(:E&fS\m$qG>JrNC"b)!@\)8+\%oCt$;Vs"7C-ei&B# %-u1u)OW8sjR7[E_.\=>D$lj\6.,0Ahroc@D>hT#KiD@hYT+beMr`q6&?P^3OQ%W36$Y5k;prf:`\'FAh/7F.A,<?;HkNrZPQ4/>UelJ"l#%8<\X+sr'VN7DhT^HN7.a6Z]5ZN.^AN*E4p\3Ae9ju!B^MO# %H.`mealc,^0/un7lAHJYhE`=no#FtBZ'"7#MP2QE"o`UefsA7qXXCTLcdB887NQPHmB#,Q$ttamnjkL+9PJNkoc/(J`kTqc?P^K! %hY]WsDrZ&X8W0Ga"M^N9N\7hJp/9R'Q"hU*[r[@aK(k9lYL#+Elb\M'q4&"K?#>#AT&f@$i`[t,nc@<^8Nkq4_]H/q8jB/=+b#KI %!8McT:BqY25$q*;rkuLQlt/YIV'eq+8X.(Ip!dQaO!(;W`X?/r8=qMCX1*rn_u2pgKlp=F9,[Q9i!%kg;k\$9bDU4s)g&'O&:S)-8lq %C%\O[rP)uD;u.>5o=!`gNNuN1RTFCBP)4DK2=1Zc0onRss-cG016bjI^6Y)#E;uiGFWleMYR;G_Q4LhKVGR?V=0G]OCRP._%"Rh* %(\f>i\$6/r%#rU."b!0:)CMsA8s %Xe[h#IEXgpeW\auZO23YeQ_FB/:G@'O&F8c2u^N"4`^.nj5uY[P4nYYHEomd&Au1DHt\,7pVX,I$p%ZIlhsCV$]ZkkmgC:uo#L(: %TPYe&lbupjn`.I-1LY2Q&W4=1RCr_0ZibIPJ^+GG8(MiVa4V['bA6t/K:93G_>IS1bN7na*E3I69_EB;[\$R^uX9 %qq0ZRg92E@Qlj:85/U+DrEP;+h^LCMj\-%G;V9F7bp8e6o5+\"aX09sr %#AdASk\s/"QhHbaC,n_2#I]qiB0Hlj?.DuIT:\kZp21rWp%t5p)s5$h,KIA""fo=5rhqILX'InP?CK8V=-#QGrar<>3f8-@'GacZ %aP0L@AN%!Q4OSH]aM-qih;^p/1o9cXIiHYPaW6b^VP-[[^8O3Ms(5ZVITFtIgWZqecm3"ioB":PYM1W0F.8\"]lM^X#)MCW56084>JfQZ@R@ITu"'+'pVMP<3?X[(hega*ab.5bR5QB0;Y!='j&*AWNde?=5@dTA,o?^%ut&"Ej6: %>i"'9"m<4Cm@SBH>0#cc$bI2OH;g!KH%C9kBrQZc51H,%VRB==GW7Z:j;Hp:?>Pp:6/_dnN %Q5rpHE@iR.UjE#hdO&E')3J\rV[!o0r'39gDt53ZqomX.#uAN+ol]5>O`f6VhJL/R5"T=^plp!*<:'ZcWaE5n?VI-ZS6*kKM\9<4 %s"^2*?LDjGL#)&Z)4C!&e<>>HRCl+(@5`m$=SVgpOSq&#i+;EcrmC0i[G?cOH?]0,;!%-G"@;>>Ir/<,Hg[uQ.\2u`=bdonRMkK[ %D[iVL@uZGFYk%PIgH5UMFgu>8^&ZW1@0:*TZ\og-7*p;sUF]nfMf1LaAja:61Ri$<"g3s?9kQBAqBGnOUp\)Td<]bFrLsOtioWhI %%*D$!UUb?Nrd\%cL@l85h3%7D:Pm?hGcn7Z>UH&qpfNHPp+H#_p?`kAh7r%2RDI$c4M,r;NT[fWJLdh2P;KolYG;Z)T$RS--hV6DGCZkNic_Y_alX!q=e]W:9UqU/=J)WcipbkSR(Crl"`GCFMQm %AIHY=/KOK,iKm`]DKiL4!eY$m\:ShHQ=8+@Vq(ZW_X5[i')0&We&_E0\(r"+[g%7rK?Qe0^-i@,IAq\V$U(U8Bn*2`i6TCjI,1s: %#JKQ^ls:e1&)'iujcut&]^MN&MgN7+4i?!+.doHYK`@]\nRT,L-bB;0*q7e0]WdR-#E:%r@=-2Nh-@agnltZn\R"eK<,,cZFY:An %46V&4V=Gh/EI3.Mjm"5VeUTrNKuCfIW>%2Q4kEfG4LGEjlsX\^7]s86opEM`"n;3"#K7PP@!UE-??*CQq+QDB/j@kAHeJZt^(IgJ %E7i\0Y3)p`p"`4E#*jXl;!E/**]U+r\Neu\#IQ:S%K"Mc>tUUj$sJ-Y^6]S^GL0[\7LU1LC`)F"Wr:tmf!Yf/Dt]@2/I=/fpX-=8`Ahg,E:8VKq77deS3-2=XO'48'U-oU&*kYSXNEARW.@>20WjBtVg[r>mNLT]mWh*nJf0C,*F1 %K;u\kKQjOM[G3YEDQ>3__TWpn'^A6AZ58F_D)V."6DS#]d3,%jBY#5TDoI@g#YB5(I.,pU!^cT*m %S``bVmlTb'h<&J1TR5;8mBYWBaSD[5$?rq+U%nb7^9oXSIXQUpHj;fMnNVSWSi-q8:Du'Un6,&eGP3O&!sSq2iV*2`j?)T$k%!+` %:uKGTm+ksHIi\UJCPf[V5=tY]]!q_9n"RCZp)We&T0K,;;8*IBp$3a,F:g?CS\$u]>OrqY^R4:mQ$O1kTJ-]^>Pjac!W];Zj6(bo %M[fM:hf$#]B!XQY^QNQ_bRi1g09THAL6,AEIRh*%/1'p^eA:%t=VDXV^p!=-m&K>K#@K4sB,f*b1k %"tfP'=Q/q6c3E)uf!6@d`_6"'AN&aHIq@@ciRGZK^Y.B,3PW_gEh/E#a)S^9kqL?2d'JJmHf+%nSp/cIH[MO8h:Ns=<=JD)&aFf$+b2$BDA$?sBFQ_an %76Sii51T?ZGuKk5=W`I-\4(0dF2Le=V:14#g.&m,[H(J%mC_Y.C-eq$:7j1_hoc%-2m[]CjlG.ql#*L"D`oDDr1EBtJOKLh];oKE %SmVEm&pjd*i9AkN:[Y!Qq*/`R*%4;f3VKQc`,=")(3U4pm_4#;S)p[=e%tuT17_O*hp^$tLr&]U<4[=GI^$TDpe@mh=r-!K0rYVq\bM5q %(Y?;aW*#pbl=ZPG/W4k"B9eNjX%2@Cm+EuCQ$:P%n`3\bJA2T@riF%Z4*R>*?$rU;Z]4XM&"(U]OiF^t0'm?1b@$?:%*7'E/*UWi %)KqhD^AaK$%<&rbk"`b;2nS[uo(kYt0Qn4]S0E2f5t(F]rPa_da2OeaVqcJ+/#BD$#G&'UjoPeLl=FNk?r1D8XJBcSp!+W02/)kF %AC*eN%N`")kdITZ7FWq0_eRGM5@neN%bg]""8KkrW-#]9"Rfdha>)oMdaE_1.#bJ.H+IW!lN&\KgTUXM;S*r3k^<>oF&Ac\=L&qK %"pJcTCkp'(5p$.57;]]RXTYel1m5X?;'.e-!cQX2P\Cs%[;IRAG*UC*Y!KtS04Q7##1&FggL'/hBKtG"q>Ku2K_N'1]8qVt>Mtr% %DL'"1koj@-3h+?XIsQGMqr(T[otN'[N!WXFiZ%bb!9;n:n]rQFE8Qb,#jk55HfV!#K-aodb?#Egh@E+fR-;]=`c(GD`9T:4iQHqa %:,RV&/6E"M4`2migB7&XWq,EF^H0(SZ7]t%\$8e'3;rs(B^=E-,KoWJ_:"tOMQpF0G6(Y)[cKo/r]t(6)&gN=p8o'/1E2BadfD]= %g`*-rnE1EVY!JMr,C0Zf<6m?Lc]g,HQ_f0K4INs=HEg(5dq-(HK]Ii^5BIQM63T=B/iZ9)h$ %ZF^=TTKRrf2S;ADm:n[%=OHlHEVp8])%0OiSXfp/HRoa0h/-P>0?\h];)-86.gof]bkg9?(71M#5s>SB"WNZ'LAYO7*:C'c@!PjM %48\DV4gjUuYB'\Zokr=o98qYcgDXG2kr#Op7"51pG*WQ5ba0epfDGNnq%.Jji]gL>Vs=o\C1op%pp'3oW_F %^0VWcl6Sne5ni3<%sOrFWgl'2>#fY3'"0`ZdbdSo^HTr/t8cGAbl!j8+hP@el38"3eq\nhNa.+'M4! %F^T5:UpE\m.RIeI>liKdI'=QN_N1&O*p-[V&a1j;l3NRqlIJT/H^V!Ifg!J_n_a=ZnL2][D's'jmL@-b$nb(GC`hGWR,@!-EMN$s %P?IY(M_KG<048D/T2<8O\,@,2/LPtR0^k=;@S%L\g95-^i9Sj0.'JWQH5``C*A#eE"Oecid=mM)%6%@2Lo"3_bl.Le#30Jprj33" %IuK-b$eJGs3@0);6rr%5M4i;5G]*sU;^r]m+*@ejcSh@5cldrb;rr(mcn="FrkZVbm9fXtHDEU#(\ %kJpr*XjtYr@W"q&-,eRYD/>"GSh1&lK^qPnk;HS781M%;[:=QdjQFSZoJ]6Y%YmC:cc9@]Ja.A/0r/4SlO$@i,sDK4^,C\76$[)O %6^^AaLNg&=RBU+0=Q8@.-b@t9YVC7C+m:`m^7r1N!nO[)Z^F,k]gUMM0GDbg&Z.#Q:1Y+ULb]RGDE/;&7ZkAE&^s2R9e,6TU<49r %dWdZ1$Y`csDH@=ir<`_MO.]i1l3inC!/O %qO\H:0S*W*;=;$UpcsH/2#s0iIuPD2*IQ>tBlL-$R$u70aKh=$A.%#Sq,Cb.l^ %E]=/Ig%!5#$1N?(0MYDH]Q-ng[((CM&0i_S(67W@*hbM)"8L]';*=De1;JQ)$Y.)K/jPalkc!O`+\S/5qNtL:V32^?rW]Ihi]*Ir %_Pth?ahWS-N_c>>4m,u>:fK9O^C!%Q7%kEOA)[Dfm#7F"-Wc+b'aI[s.d^DqaE,;p7ebq$,fc:3*>>?"VPZWDjkB%,GOX[5qAE6" %3UD0dcJ0#KZ%T&+5"H=^ptKR^@b[t.$ku5*#b6_l00%RVTMKNc01^r03/Io5L\_l]pt=gG7n`_u[+"P*>*+LBBO_5dXnUe6<_(d" %I6%`GqO"QD@K[!!';jBG.UXG?cVI42Vg]V&!nojZT;25X/uYE,3SN_6@^";-?!ultaE@?W"[\h`TP&&t[:-+@TEX]i*W#(MQ>UD1 %AJkp7NO1(4.r+'2KUf*$ZTpl=H:T*cduVUS*r,9X/9Ec[P9nonA@([\nZs=M`;Hq=-cH]s.k$d*OGUtc7eYkBjMmVtXMsO=l4aNh %`E*]mNkCG>-^\oU8j)/UYi\Fo_OVZiH,hW>$*!PR)R*891.noMYS);$XK&%F>gPQV#ia.=e&fUSWE6f %ns*\Ie*-P/?49srPnZ=Se*m6]M'YXb'1j*sb;20*u-;$eFeR/*n6[WDT00[6iUSoV[($Or/.p$I>`,'2P_COgu]42%&FQ)[cp!X=ake\k6S6i^>C?WMbkR;lUF:Fd&+Z['Lm+5%f %%aBP8Xop6RNA[OT:[c+tG?!SqqI;9I]p(nMRkh>D?ti"uBQR$'_!,6s>k6eihna>PVV,'j)Qp@`c880a/-a9cE2'jPHm,%lVd:HZ %YbN`3U9?47MI%J/h%S!M4f.)Z`t<3nO=:X*bb96&bbDX]]V_6BOL6UJB=n;G0iC:Sm0=qigt$'NRq'"(^;WM#/M0qE\$Bm@(;%+5 %+ieX4Rdt#nO0MjSjr_&%>"Krsh'P_tEH=bQ9LrF?Bpt&=6a/)ARXrTZ0fsHS@"HSVaLEHQFcT8b;u%8mB0FIGKVpdhN8Brf%)\bJ %XToW0N`BP'eF>sZ@2s.=fDT13%XlRe3ZIHGbRD*TSf<+^7$C'@5.'7bI_R?db#J_K"2!iRWtXOCnV&&j&38))\Ik3;m**#GSQW7$,9GaMHL%_T2lOnMRX*0gUj7NLB1pfR\,A(:%.XGI6Q#$28;dFn'aCbX%mHj!NJN742!W_<`(lOd1@=:biMKoE]9 %K;2-J0t61Em1(F'of%1k,6O'gQ(3SZ!r)*b_E=gBfn0=]*A&$jlr,oj,pUQ4//(8NUN#"W>="`-;>%pmaY-& %l=BR5j*5)9r;6hf;47+iMfSkV8gI)jQ6fBJ$l,57*6(M3NCQj3Is5L%PIPn\LM%#`0f576DJ(k&*Enn=k,_c27k$f0:Q:tlkE.F4 %4-QTpVeUru#Q9AsJd$>*daC9`R2Y)7A=k(?oI$bL`nYtj5 %-96Qlr-C@&/Eg6g_bHDi)S$o,7#'uclJ[OH:JHD8lJ[+d":`QcS?s?_Rgiq`s(5IGkB$&>FuJB,\-Su8bb-$&YqOCe^)>P?q:K-W %@3,!99H'Sqe;Mkp&L_ZpV!!Qm@,3S1:?56;`+66)3dl9:180MlOB\i[J?g0G3MQ$K&CjLflm-H9jEWZ#<+;rb5gCog8HF.[BpC,-IrB$4lJ>o.p15ohY;En*ATsGOX_CLe,<(k$;@eGcJ1q6N&hEmg""G`NOZiaa+o@c,?>L&Y>r-P0=DuiC\./7RZi`)5V;'Hu,[VE2\hWbij%"Ee73!TRE5F.GRmObK-N! %j4T]]QGRpm+M-n;Jq&+l$Gg9(p.=?.`8;qa:]N61jcg3nS/3#HAa5LYA7W9J[BC:DnVpJinHgX7U`sXN0 %X3iHn6DP/f8Ep7Wq$m/`S>/J#DY?aQ,"=Q)UH[\Rn,OK7>N:+Yfi7q6JkYk!l:%g]f$BA7KmpR4le\f%Xe/QWpfO7g/o7.](HU.anj>37Y+p+"*emt;ST`E57>XRo2VY$P.T?):HnA!G2(bnem`((%Rh#T@^h&tp4:\@jXL;c=K,Q5K8SJ;!isW9Aj/DS_N(eL %%N&/J^e;(RJr8AR<7q/,_1UaTg4=n9V:r+=[9+l%Kq5-om6DF,K=X^kkc/?4:iETX?BFt#<+./N67V?alOWg,6+Qj;g-NNIGTU9Z %g8j'8't`:U'tZ%I_1OWU(G>)@L[O1qm(`!Q":]Yk!oCV]ZbSkXO4A!CFn`$kZ3(KY?cHr+KCjI5dX,GWo=W--`VV!+T3`s8kV9F7 %e$%H!'@ts)]S`Y),q]!"e4NigrQ(4-Y6GABPuI)f;)L[l;mauEFjI$*dp2_aI['V(c;jA0.O4LU_t,uAlZHeuanaa;kBj)K<&_pY %l%L';oPEN%aIcRd)f^nJ40L=0'"JU"I5AH&/oZ4>pV')?eK$ZHO`q7&#'tr%9%R3\PM[GuSR-b7SX8f3mMbHo\l#'UnZ1n$otu_i %:$6-ofNu'j4*Pc^eT?7,Rs':YeT?7=nt'Y[I5RSQrl#L[WM^?.l2:8$2]HM3C:`B, %7c#RV5rDrPDE5mT]!3A9EAcQk=*U%bXK1S4L"LY#FFG!+D!'8iIN;7%oh;FNr&TYY]UI,!=u)*>t86 %24\)+SQEO!?VT\g%+f?!1T@70:rtSIC-'+5\rcgaVDeK6PLV9'J=cYW?:Uq2&IZP:*k;+&bn7)I.5JA?g;U/^X7MfYW-0oCI;D&13Vj0@/?V!-8\q8?)\RX^lc_-A;AF=@e;MAOu %&kCHO1^&amP9dH)5hDVEPG?p;76GV#T'5)_:sf$1%2BTG_/cs]";`P4Z_b/`#]Tb-K8n2To> %@_@.:'6u2\M>EX+!iZtHmgc?3*hCtQSW,8'%Cq&G>?GY];='sGkG@ij.38@d5jYT#+%sKZf(lR,+];p^6ZrWbnmt2CpYk8pJYkDN)DF?'m/:(al_u-:5b92BKlDf6%6"00RT3la<\E@,%I`D%mq4/)u %rQLq=(4.bLS\f6KdJX3?$g13cEL[=Q=N5GH(uE`5IW@_1lOD"l'=8hBUn>/Xt-.6Pha!2=,BN-QtnOB&+=UR!W';@G)OS4>Mr'^ %^1Z$''Anr/HiTL![F8Vn_ZiYt#uk;UtAPNn+\'@EMOA**2*7;_M4NhrXdU1X"!'Q#:f0 %:TtJFP>/G*92mS1m??gHX]m>6Q':T-3WE'XPRBR?`d.Jt$D,26H[:Eja`A!Il0mf#?ThXFJ]2>)+W&rTXrKDua]/k<=af!7.`.D, %dYYC,O-:s'$atY&s*F;0klCIGkVm0+_B-$_XqH%]+@op.IW8Yo2+"Ws%2j72#C:O-sW^aS%bok.29=peP5B^8#_7>Q0kC+T? %1!d8!d'N)`*NdTG&6c.9N*7kmdV?VDK!VhYpr>Kg,EK^&[qbVF^(nBEW#Xqs(sL,lNo1D-kR=U!k01)D6Bpg%>eo&&54$G %]2jY]L2\FM1e++Ki71=!/b/!nLi=?;OA+u$^,%'U+ka!lS-++^a0)ZcV7a@OZ0%:*lX,t04+ZQ77*rrE;]>?MrjWfu7# %.c@++dWN@$p.lou"p`rK2iG#9n6Wkt<-$7MCoHUdolDe2>M(bu[]T#3Ic4TVT,Obeme-0ZV<:mo8^E.^hWl!G'ah=da1U"W@#'\F %rIW=L)Y[RQ8=2O4]meq.+V)iK<;65XW8sD`]md&G/'+8sad4IFoa>-\nGSW9hBK(%-Q6-8G+$$^BWUBEe(N^AQRDHRciOAZCGlh"(]meq;DEo!0f_>7)8TV]p8XHiYTIH %?T8&1)qh+%f.WB>:(`\&EN`_oFEicApng$i_gNWaoQ:]D\,)(kc/F/U?[.Ad388VhHi:f=GujN#rOm[=\,+@H.W&.67P$9`Hi:hA %c/("'8al<[^X[7[qtS0%dKmIGDsG5^XBYX8EZa10><9Xc%E%r:NH5i,LVlf0YMW0]L$Mc(X(#bbHi5Mfo"ERlP,bnZeXSlE)tRCN %0K&%i&+'7_h!6nW]%>@(qF.H3Sb?ASX-\WdUJH':hfq._554nJ?4:c.lK>`#fbmd.]B1R/oFS+'b!n3j/t %T@/Deq?HrEP:skZ5H4Z3QrpqLrEI/>A>Pe@g=iMP1bVSFU"$n:FbH,#UaYis!TS3mr7Yl'Jki@_\;LpAW%Tn %-=r7nd@2EufdFP)/h^\XeqG5G-=E-)Bhi5/FV`?QG.%HeEV1g6E_)!^NL+q\\uXRGeP\pA0o]OgFE+0*gW99 %9i#\6/RH`C\,*(KWI@g6Dr3#pMoXuge\oN&EGN,Vp7$sdPiL?PSbhm3H7aU$n%J+1Ncfd=](pPnbl@+D*5Hj$P8f"0?S/)P`-8WDh-;srVf*KcEM:&FR9=s$MHl_,gu.HF %kB3r4n?^.mp@mFTh=o0!GOW&Ggfr(l4lX?=7]#4d?S/Uem9S\:`2W*_X.?56h;7Jk!`J-/a-Rqeqa0tP)ng^nZ*nlX>pWtqLVbrTf5bVAkoZQeVV[fGDrp!7 %pD^nY/DrVldB_YPdo44W550n0QZ!Vh$,bY([[1T!+"&$de`BnBos48(dJ,i9\1TYX\&2,;QRcZiq.m`Ikc\h>$bb`R\t7\Oe]uA* %]it/Fm>c)5KKoL^i0ADLeh!'?R2R;hMQ^hQcBJ#RRAjd3K34k>Ci4+[[G-tSb54PWa)1[U4am7m>iFZe-epSOZWuJJ08Iu)&T]1; %pbdlQoq)b7o5m/lh3qSt@@>=`Xrp@dq@d7<`4OG)nk+WTJ?Y+6i6PkmrHZlXAEfGj\@-Qf^Iql"E%e]>fA^NqhM$7DY_fZ"B"@AW %lTs8#TTXPY-*0U>kG(uTfm8a(5'U@_Yo[kE#J%gU2tOjlMH7kY>4W9S7bi#;j*87WRt%_McT**FY&N#o0SG(ka'n5\(Z/1q')j>a %^)q8,%qZ'%)0uJYjMtUK:[C'b=j1LjcmFT.`>n#C5>Om?^csh?5bl4;f\TTj>tYG)njUG^>OLVa=!YH+"l>P%UP/ %WC;d[5*$AbXId!TY&\05:?2Q?a]L(HR_sH#f@4qK5JLX!o\W/`03Q#a.HF_!N[')[\bOdUL3r9#"cK9+fP47XW&"aUm[BkcT9VNrPNG+5.g_n5Y=d38R1_ZpNIup:&+u?[0:X,W"Au#Jq\Wa %BlLeGXTBk7mVFY-Yb@XG/,(-^B@%iPbOc/4Z,TBC2en\^5mHOd$q9s2Bi9SSIbjb:fDJS[Vtr/Zfj"30@%F](TTE\d0Dm%HN36:F %3qIC.QDE#bpYHr0,(7.oEN24P_:n$kL1N`W0".phH$:'C)lImobVhZMUEXdE.NiBHblY7d"/Z2Q>mfPLri!PE4CgW %X,H+pFR6oYH+LgQZHV-CHBTW>6jBO=C!R^_qP2f"La %CVm.E*_M*ceE)MTH5hK]?amER!1`%hDD19&U7s\9u'4pDO,?0ek$QQ2Zb4)Af'(ESQu+SZ9#Mb %FPu`_Ob8$2AAH[2I-,tV&iL,!(O+^*Qf6)K3.V2s`Q7+a.fF!&5!&*o-Q8XS\M"I';#*3/lA$[,5t4%$#9*_uk@F7_!]4 %]\,aNb:ut;\,GB8Z[H\![8@t%,Ca.^AE'Oq=s.LZfC>PrRQ&gq;LVN %rB5,qq)@#5f=]jjH0:fd$Ob6E+!4c%H0i$fo(_@9aQ#e:%C*rlf2Ngk##(\+aNYtG'kQ0URP8HkFo"Od=Il99hE&$!LW-H4dMV5u %ff%:Zd>L?r9?p@"/?R"?R@9G!Kd3?$n+8f*=Z=g*;RG/3;\8JGYifZ,!/;4'JhZ6m#ako]XmKmXF4<:H?$Wj`GpI;EA)e]s#)J427VUde>aG].T>Od^OtMo5sM?>H]EKKjON!rI&M8aRT>2 %D&uBWa!3A/ZZmrcMZ%jg59nu%6%5>9^WMmP'.oe6+O\u0Gq'rZF5%X#H6#qMdj8ka;!o1?+8cshEUf/Gam9f:mpeOgGb73&!\F04 %UI*QgD2A*F:hBHsSYOO\3?4CqJdr$+2t13WD(n;_W!U0tj+uF1bD:H(GmXtT'n==rc^c?oZgoB1F=U&(8p=Am+/!HRg6:,M5t6+- %P5$o3%(+)L%d!e3d3"sNhL#/KJ'NPqq->E:D:>Dpo8U_(ud=XJa>9m:D*Ufrh:R[1Q=PK"6Zd[b!^;%C3 %cXgUeYsN/_k>/jbJ*PibkG7^!orC;QKR %>W^9`IkIS(>5.lVUBBo)q+*V\fN[4\".d;F60XjCTM.IL&ta1#c1kaJ4,@O@Eej2/iVY1.FW5ht`V##?1-EYHj"$J#k'n3;m+H4b %5:`JrO9h1RYsb7tR\@+*,9?sJeI8:>/X2"NeI88h#pk(H228I>4qoKKkV,#HGr7s'AO1-7?Hl;M`=fN@EKmi)i3U22R\@)4--fk( %Gsb9nYV:G*HYYBeQ#Hp-[#ZcRQN:XsI[^ND(p"]"^MCiqWF@`@!A&O7!5h7`JVI)4Q<>DEIbLU:j'UiCLHfl%\'_N/-.+)l1jdiX %f7[GS+F>s?49%A*+D[jAHfAW4O8af^g->C8_XQ1oj#;^&^66gj+YNu,_YZbdcQqIrFsTKs:N:B^[dBC`!F*%BA,?"[J+19'NIkKT %%5m6T$]KfuehcHQGHurk4lu9!Y5El#0jC10`8g5dJ1;N"5D=CH5\?m-Ku>*AW9]UFWeiJSF@\/&$;Lc=`WgrUIVIebhD3*e!.FoZUYF[?Lh./jd!UEQ'eRD9l7n"'#0K>WAS_T<>rlL?#)iT:obG_>^:o8RA8*X^lukBIYn %6d#&Hs5WM@0V!0%"EJ$:.?rTb(-Qfp[31V-b_M'GrjH`>c"6Y(P%T!I%KhmfPA)JC9&;Jc?^?p7A2r5OjtAni1<%Zn*?d>"B$'?i %L(:PF(\\p/'bigN,rWMeW0/0%*)hHX\N8`i*maJ2k2m9TH'ib$3'=oe&@R@K'(N?-Xr(t5]1_S@1B3SCa,N;7(r>mi/Hjh%dI#EA.jfY7)kD\2]o@a;qC-)rMH,BVO,oD;/f'TUl#h@^1L<.T5VAs"Sd2KXFiPGpnRhng$nbH85b?Gr54!#:d$"dHERER)( %o,_\8<_ %\b#[sh0Sk":,g2=Z2\h55Q"U5kBK:^'4`![OKl4d(e[mN55thX3Bf=PqMlli$DhXTqYnuos7q--rn#p42I5[X::I6E.s&_RGOY1g %>S?2:h8/FH/;5.F4.,&6N12?6s"F?bJ,I/'WpjL_`o[$,f,//E#+ki2r36\qr^#"_c:N)hF&tk6A$n$:5$)%APSHa4JsS?Zm^3f=.r(Do*QkdFpi[F5jtGC3Src7 %]YnTGNo5bm$]6[/7)O#k\KFO];PGWBfVQ5)kW53CUr\Q3F*UP97iS(%Gu]L+Pe8L3g70G3GtCl;7F0.'-eI)@7!"CLlL=5k0,cUhj=be_WISNn^49q(D_$*,(k#;N8aN6QP8?/EB:fa'_k;a!uWT@fGq*R(pU&oDr %"WbA]S$%W&D&?I+6j>VFi?XqG/EAYpVN&sQ<1JT:_-7n.Vr[r(!joc/*2hIAn=9\4<+X2I(;NQU"P1EtfFuOq@c,!cnMk`a#;@$b %-Z]8oQof,LVCT1B4PN%g8$Ii[GqcV*3Z9h(0?QD/_/9AEah4]?=4ek8nejtU!2'BVa=>561^R3d^uf;;cWUb<<&?i[8J)iu'bNpb %37r%uD2$97_OL[s,O:M-\%m3#IT'+V+"Lk'OZd#F!? %$nE*d90qV&*CU(S;$8a0+i_@uo.V@F!;ajJ&Em5Y"Jd/oLV\'6$ER,0#Ts-[63C+8*%[CSo;[>a+HWN)'&43s:^rKdG\%nm-uboc %=I?c1=:M=jm?XZ;XK$OX[aai5U&kB/2&IHu@:3D@<$t4I==::cD#Ggn#7)WU)LNk4iH*A,#TKoKPtD$>J3;*igLO(-WJsDcKIr\s %P8&RDkqY.>#4S^JP;U9!).i!>4BA-E=:s]#lF.QXJ91TL2#!m6IJ2H.WW>=L6:oMVS?k8VkIm!jf?9%tO!)>9(P(&U'`*3(CY:Bk3ANY6#8Sm=;[/ %L@YO5nM77G!BkOZ5e*mu0.:VPhP?;qMb= %!XmX"Y]!g?nMh$#"OKJi-N"A,VLk0D>6_rP%YQft;/K&2ION),K$KVd$-E>cCX`@A^oiji&7LU+iB/4mn2C%K,Qe9Td2[*efJo7e %#]`3IH.i9"W!p*=g41=KIhIi/$9DY-#UID_ZuO#DX;9T0Pt>-U/HDUOl[ou*B3:7gdbM&O^aWM)TF(c_?p,X1Lq(Y0+-:/&6k]VO %@AkH&\7(7t"]/>*`7H=3N'fTVE^5;nS>^5O,t(R=5R#O$(*GpU^rImeh$-7Q].,#jZG2WsG8]n.k+EkC:_"Us,N*g,<";d&WJceG %J8]rLG4l'?;q#m22$[(jM8._bOC7bhE#ZD`1=[Z:*MaJ1($kuu$dCt&KbAYfYR)?g#Ab,Fg]\a6%">F=S;>#nd@N'=>poT8j/qK`.7ni(f(3`nj)9;78QPY_0<7OZOLH!_4!C6j\Vsoq?j[YG! %N<'=n3f])nK_kt+U\k=>#b4[)6*UF`3JYIU'F_VZjZta,\um0!MTAkM)p&?fAE_AN82?b.!!%gAUS7CAg4R[QJVZ,@nKksQ7:BXj %e5/pE'^'&c@'o#d.j2h%\2ahE"iM`;4Ij8E;?;db'1BN=,,?nri%#gl=a%/$Vu[8so/uRN!0mVMW<8_jf.(:L$BMV)+h>`V&+Ekk_;P7J#Ca;;YD^h32MCG3TXKp\Son,h#>c+oB=MBbV.iW2Z[i05JZM6e:dCEKGp+lq_&k.''7a120Tes7bR^Y(k6Xg)=NL=]dmW9]/S8@` %4i0:k"'@+Yo11u%rE9tuK76fs0L4T@d'!f3!CX60a2mP0;=d7XMp,8SR.UXb'U2;3>JN%'S")VBXCPUUnM(IYMMU"A#;gg"hHFb7 %Y)qiI>'/8H_%+f7ZsZ-4lj$K+TGGrNL;Yo!],,jjdi@cF60_"2._*JgiXd*F"&[5MIo/I"&1H7M+faRB)ltr.8miqPfTjtkYa.9F %&uMKU9`M#]e5/cse59QU'iLsDJA!2Sal#IUfAX(:%2X92goqaX.J=BI%q=`1&]h0\]+nJZX9"$(OWMEM3jeEa?WU6";XcOQd/PB;P46mXYs3>%Mm %'sUjc23@G[eI6#\WrN-;,#O\puac(g)V<&?lQ`INla=;&"AZ&,?dcJ'q,XRJ8#nCC5\IXk^#kJWA` %dd3@Pc'(SRR%j_bP22n.KFj5To*SF6BpX+9l*Y2'Th,aZ/=A/s9L_%?l(E&!cp4GWLeSG[Tba_]?4MbnF%$THWq\9^MJs7@^VQcK %,fl2&$poX3i+@U/;)aq3YW9e,L>?eoZb^>;"Os;W0Mmb&74#R5-_4IN&dO7i6uNg$nt.2'N*<'.91)$7%10]e*)iFpU!cn>51#Aq %\.InD*.EZt.N%1G8=5R7Whep!RV369L`OuocufB3/eS>OL8a'%W4cEH]%M\i9mk"O''V->:*&jI;?=D*4CckZ0?PV&atu4QOS2N.7:J1i.^!Hf0V(N^K'rKU_Eg]6KtJCt$X1btWCcNsdY,8K9X^`%=co8nP2 %QFha[R7Pi-diK[=7Y;=&RN-7SUVL\k,5XVJS?h"'EHRDXj-eC4!UE#ko@,^8!HSs:7DmRgF:FWqRepc\+Br@@QYE5Q`(;+=7tGB` %QpS+?YZlM/JGt)$!+B@V1/@K0+g1TpD4-7A5Qn;\oB\91Cg11rsSkL"FaW[_[K=hA< %&'WLT\!&V)*<1g#^5i<9)Z1sL<@\]4EB(@878WZJq0_c7j-O[lU/\Zn%;KWBRR=#;g%P"6+j!\U6:i+J?9*bYqiZ0[dW&k[51"h_ESiJg@"d9s<5&luX6fR_"\m*MTRQ5!^e4@Oq)[q%9a %A?mM'lYnn_3SBRIquqk\fFiLd-lQuD,mC546k.T,8f5-=.6_Th-$3>\jI/$]=bG+6=5 %'T.+.0@f,U3$'Zo_3G`*>Ng[`Q-b^!=JsnY$2nJ)k[I/gnUR*"67,Vt-Q0m@/>f59dt2eL,f3pZKUXMa6(\D.3>`#f3]".1s,aWK1,_fjUQs9cBQGT3f!g*FaCI-[C2/s#0,<`.&.Ee/+Hsn-^8A_1OO"R"@Upu[jEjcR@6nQ""2PU9 %Ti.tOWVHZJ19=/:!4,CA])c?cK(,WXTV@0u7YS_i^uQ9YBa-7E-[K?k).q#uM,T %@0UC9'U0\JRK4n:W%>j.1]rJp(f=Li%p1)G7Ss3?p_W$F;3$].9G0NuHpL*/"1r2+:YIE; %n69tn-0EO\kA@UdgQu%=e3DWIRk&<'pK;OogsaU$n'BqhH*6o8nCsB. %qK_Kg1AqD3NjAOIc@?`:f$,AI7_]]"q#O>3Xh:M;+I+T$N!)Cs,lgi/M+UN,KrM.\0m'T"h20WNnL*D8?qq"22B=eB.KI8,'M<9T %d3c$7:u[+h'I^;CdVsHUd*YCr8*<$r=\Lm:lkG,hTVdObLHmm'-2A^Nmj5[CCJEWK6;_T6!>TBIa[(\\rr(N7YDm>+p86VTdhi8,]mn_ %13LgO5qeBNUX9FuA4/r8jX4If'ReI"[S/tL'"4$T6G;uX7VIm9XE#k"a(+,g;[hL_2N1;KhD'ctZ]m:tA)U9?gLZ$tWVDXr+pJm2 %j]NXZLe9ar:GR[^WBX6>>&h1Poa+&59lpuQ$S"CFnn7e#AiKTs"DNq"7>^43lE,=DZgBh&jHa)iU:?0=I)S.'uV %e'g%J"*6FU&kOu/$>^aT3g=%i84-a:8H/^QL"W:H3H*9]1/dXQ$DoK\N&HSF)GJq`*!l39"%G(B^WN'DPkN'ph^V9id9(&q<&#%q %1-J*\L&PKJB7[W07Hsp&ARrs%&Ma->gB7-UZ2oeB$eWX=DRbNYUL_R]^`Sg4:@8.Nrki&TJ)Yk'\rb<0$E0gp'Tp>#cj,5e6T0[! %,`8F,i0,HVK4`Ca:bc0?7JfG0163rM2l^8%#"+6;,-m=[IhUKPC`<=<;ZFM6eCPD#Wf+D\)J*=BR&,[r/"iXG^XFVH)l$IqbKYh4 %JE>u3rY7.X=@,9-6PYtGG?i`n3h?;\GaXDB3%k?6u*lBB6T*61^IP7bQ2MZ9bpT$'9>pIHFN7d_Mh\dlPRW7# %(tX\9#\3DO`m=r(GUP]gErn'!nfK\2;)W\WaW3/oXg%Kh$8?*Mid*%D=<\3-;CHLtk_$a/@reJ8jTq5mcES@#!Q/KY?qcGGlW6Me %%KmpGA4#lWMo[eYB)*k-QUj)lV%)LV<0?<#d;-;Xrr%"uVW6S-(fM3Ljg@?=JS+/^ZO."NfsP_Gfo %g;"A37+j36N:0]U%pI:7k'jXi@IbpA'H4rh'r?]e!<.V<1WL^GA;iAa3i.1,_<]@>a'."Q+kd-qP>Y4[\?$9'+Dro\_9C-;?25S>SD1!Am:TdK)N%RXrfiZb[0?Hr?%OR!=^T[/YcuP=80n``N5+&Ilu7Ck%"E!9lpUmV.X@_/i:)N&4/n %1WpeIT!(?R'ji3V.o=+`)T=U.+i9BjM6pcD,t1_kL.&a%h%Dmb+Xp7!"T8P.?@,"=\^I.S+8W&!ub0.?n%etN0P7^/m4oh&05OD0N-#V5ig>dpn*MTs2g)7N&V0BURd'H@noC,(Y]WTTbJpl %?K[k^<+sWU-i]\,Wc/)I_$gU,.&ngeKZ_E8"VX&`AW12#.&.VA^)'`<%+f., %5nYmmoju"ms2dP%5E^RQP\0G#Sn@P+k)`3#tu)$LBT\$er&+nUHaFnJ,&)IbTui8.^RT9C>[?Zek@]OIuE[cR7d5: %[hg"U3o+Ec@'5"j^43A0pS%K3S,^uV*]&Q'7j+6\lRNNmP49^o%/cDI^Z=bMBSu0. %cn+Qks3KKt$el0D*jurC_uYDRV#0Z\g$-L_HOp/(R="`.7\dE`$W#nk]:E:.VqZdirqT?+JPXMA'8bFl4nemX1`;d<~> %AI9_PrivateDataEnd \ No newline at end of file -- GitLab From 03f3a74b88d7ec358c327d858e4bec8a00fff5f3 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 22 May 2017 17:41:12 +0200 Subject: [PATCH 0755/1483] Bump Cradox requirement to 1.2.0 The `operate_aio_write_op` method is unavailable before that version. --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 6675c97b..0ffa24a6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -51,7 +51,7 @@ ceph = lz4>=0.9.0 tooz>=1.38 ceph_recommended_lib = - cradox>=1.0.9 + cradox>=1.2.0 ceph_alternative_lib = python-rados>=10.1.0 # not available on pypi file = -- GitLab From 295d0b85149b36bd50ca712bf97aa8d849602e7a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 22 May 2017 17:50:22 +0200 Subject: [PATCH 0756/1483] Remove OpenStack from setup.cfg --- setup.cfg | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/setup.cfg b/setup.cfg index 6675c97b..159ea297 100644 --- a/setup.cfg +++ b/setup.cfg @@ -4,11 +4,9 @@ url = http://launchpad.net/gnocchi summary = Metric as a Service description-file = README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org +author = Gnocchi developers home-page = http://gnocchi.xyz classifier = - Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License @@ -16,6 +14,7 @@ classifier = Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 + Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 Topic :: System :: Monitoring -- GitLab From 6037536c317b981ac2a9f160ab1dc842e1078290 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 23 May 2017 08:55:44 +0200 Subject: [PATCH 0757/1483] doc: add where to find source code This seems obvious but we never wrote it anywhere. --- doc/source/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/index.rst b/doc/source/index.rst index 8a873a94..e1ce3cda 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -27,6 +27,7 @@ Community --------- You can join Gnocchi's community via the following channels: +- Source code: https://github.com/gnocchixyz/gnocchi - Bug tracker: https://github.com/gnocchixyz/gnocchi/issues - IRC: #gnocchi on `Freenode `_ -- GitLab From 5a7eb905ea069a9d606045b3ef09018dfef7c7b3 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 23 May 2017 14:46:27 +0000 Subject: [PATCH 0758/1483] use pipeline to batch push measures pipeline allows us to group multiple actions in a single request saving roundtrip of multiple requests. this is at expense of immediate feedback but since we don't give feedback per measure in batch, this doesn't matter. in testing with 20metric batch POST, this improves performance by ~15%. --- gnocchi/storage/incoming/redis.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py index 9e81327c..7a86f95b 100644 --- a/gnocchi/storage/incoming/redis.py +++ b/gnocchi/storage/incoming/redis.py @@ -44,9 +44,12 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): self.get_sack_name(self.sack_for_metric(metric_id)), six.text_type(metric_id)]) - def _store_new_measures(self, metric, data): - path = self._build_measure_path(metric.id) - self._client.rpush(path, data) + def add_measures_batch(self, metrics_and_measures): + pipe = self._client.pipeline(transaction=False) + for metric, measures in six.iteritems(metrics_and_measures): + path = self._build_measure_path(metric.id) + pipe.rpush(path, self._encode_measures(measures)) + pipe.execute() def _build_report(self, details): match = redis.SEP.join([self.get_sack_name("*"), "*"]) -- GitLab From 1be2bf29f1b77b73cf79aa9ef5167b823fd19d48 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 23 May 2017 10:01:10 +0200 Subject: [PATCH 0759/1483] doc: add incoming configuration option note --- doc/source/install.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/install.rst b/doc/source/install.rst index 897107a1..ddd0a3b3 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -122,6 +122,10 @@ options you want to change and configure: | | if you use the Redis storage driver. | +---------------------+---------------------------------------------------+ +The same options are also available as `incoming._*` for +configuring the incoming storage. If no incoming storage is set, the default is +to use the configured storage driver. + Configuring authentication ----------------------------- -- GitLab From 0e45013fc297bada1d16c50707dddc7a5baff5a7 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 23 May 2017 15:57:37 +0000 Subject: [PATCH 0760/1483] cleanup devstack point to new gnocchixyz project --- devstack/README.rst | 2 +- devstack/plugin.sh | 8 ++++---- doc/source/install.rst | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/devstack/README.rst b/devstack/README.rst index 1d6c9ed0..57eadc4b 100644 --- a/devstack/README.rst +++ b/devstack/README.rst @@ -10,6 +10,6 @@ Enabling Gnocchi in DevStack 2. Add this repo as an external repository in ``local.conf`` file:: [[local|localrc]] - enable_plugin gnocchi https://git.openstack.org/openstack/gnocchi + enable_plugin gnocchi https://github.com/gnocchixyz/gnocchi 3. Run ``stack.sh``. diff --git a/devstack/plugin.sh b/devstack/plugin.sh index e1ef90b4..9c6aabd2 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -3,7 +3,7 @@ # To enable Gnocchi service, add the following to localrc: # -# enable_plugin gnocchi https://github.com/openstack/gnocchi master +# enable_plugin gnocchi https://github.com/gnocchixyz/gnocchi master # # This will turn on both gnocchi-api and gnocchi-metricd services. # If you don't want one of those (you do) you can use the @@ -139,10 +139,10 @@ function _gnocchi_install_grafana { sudo yum install "$GRAFANA_RPM_PKG" fi if [ ! "$GRAFANA_PLUGIN_VERSION" ]; then - sudo grafana-cli plugins install sileht-gnocchi-datasource + sudo grafana-cli plugins install gnocchixyz-gnocchi-datasource elif [ "$GRAFANA_PLUGIN_VERSION" != "git" ]; then - tmpfile=/tmp/sileht-gnocchi-datasource-${GRAFANA_PLUGIN_VERSION}.tar.gz - wget https://github.com/sileht/grafana-gnocchi-datasource/releases/download/${GRAFANA_PLUGIN_VERSION}/sileht-gnocchi-datasource-${GRAFANA_PLUGIN_VERSION}.tar.gz -O $tmpfile + tmpfile=/tmp/gnocchixyz-gnocchi-datasource-${GRAFANA_PLUGIN_VERSION}.tar.gz + wget https://github.com/gnocchixyz/grafana-gnocchi-datasource/releases/download/${GRAFANA_PLUGIN_VERSION}/gnocchixyz-gnocchi-datasource-${GRAFANA_PLUGIN_VERSION}.tar.gz -O $tmpfile sudo -u grafana tar -xzf $tmpfile -C /var/lib/grafana/plugins rm -f $file else diff --git a/doc/source/install.rst b/doc/source/install.rst index 897107a1..18a11fbb 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -176,7 +176,7 @@ To enable Gnocchi in `devstack`_, add the following to local.conf: :: - enable_plugin gnocchi https://github.com/openstack/gnocchi master + enable_plugin gnocchi https://github.com/gnocchixyz/gnocchi master To enable Grafana support in devstack, you can also enable `gnocchi-grafana`:: -- GitLab From 1f68f50586dc3aef60c1651371163da105b7299b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 23 Feb 2017 22:48:12 +0100 Subject: [PATCH 0761/1483] Uses Grafana 4.3.1 This change updates devstack to use Grafana 4 Change-Id: I2d7ca78f7d7c2d77eb2c53c25851a088d6fbd1c1 --- devstack/plugin.sh | 2 ++ devstack/settings | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index e1ef90b4..6120db5b 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -138,6 +138,8 @@ function _gnocchi_install_grafana { elif is_fedora; then sudo yum install "$GRAFANA_RPM_PKG" fi + sudo -u grafana mkdir -p /var/lib/grafana/plugins + sudo rm -rf /var/lib/grafana/plugins/grafana-gnocchi-datasource if [ ! "$GRAFANA_PLUGIN_VERSION" ]; then sudo grafana-cli plugins install sileht-gnocchi-datasource elif [ "$GRAFANA_PLUGIN_VERSION" != "git" ]; then diff --git a/devstack/settings b/devstack/settings index 2ac7d52a..17c03f4f 100644 --- a/devstack/settings +++ b/devstack/settings @@ -57,8 +57,8 @@ GNOCCHI_REDIS_URL=${GNOCCHI_REDIS_URL:-redis://localhost:6379} GNOCCHI_STORAGE_BACKEND=${GNOCCHI_STORAGE_BACKEND:-redis} # Grafana settings -GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.4-1464167696.x86_64.rpm} -GRAFANA_DEB_PKG=${GRAFANA_DEB_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.4-1464167696_amd64.deb} +GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.3.1-1.x86_64.rpm} +GRAFANA_DEB_PKG=${GRAFANA_DEB_PKG:-https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.3.1_amd64.deb} GRAFANA_PLUGIN_VERSION=${GRAFANA_PLUGIN_VERSION} GRAFANA_PLUGINS_DIR=${GRAFANA_PLUGINS_DIR:-$DEST/grafana-gnocchi-datasource} GRAFANA_PLUGINS_REPO=${GRAFANA_PLUGINS_REPO:-http://github.com/gnocchixyz/grafana-gnocchi-datasource.git} -- GitLab From 3da28b7793409a56f57fca8ba69ccae06ffcb0b9 Mon Sep 17 00:00:00 2001 From: Jaime Alvarez Date: Tue, 23 May 2017 08:55:12 +0000 Subject: [PATCH 0762/1483] add contributing guidelines --- doc/source/contributing.rst | 37 +++++++++++++++++++++++++++++++++++++ doc/source/index.rst | 1 + 2 files changed, 38 insertions(+) create mode 100644 doc/source/contributing.rst diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst new file mode 100644 index 00000000..b7923e67 --- /dev/null +++ b/doc/source/contributing.rst @@ -0,0 +1,37 @@ +============== + Contributing +============== + +Issues +------ + +We use the [Gihub issue tracker](https://github.com/gnocchixyz/gnocchi/issues) +for reporting issues. Before opening a new issue, ensure the bug was not +already reported by searching on Issue tracker first. + +If you're unable to find an open issue addressing the problem, open a new one. +Be sure to include a title and clear description, as much relevant information +as possible, and a code sample or an executable test case demonstrating the +expected behavior that is not occurring. + +Running the Tests +----------------- + +Tests are run using `tox `_. Tox creates +a virtual environment for each test environment, so make sure you are using an +up to date version of `virtualenv `_. + +Different test environments and configurations can be found by running the +``tox -l`` command. For example, to run tests with Python 2.7, PostgreSQL as +indexer, and file as storage backend: + +:: + + tox -e py27-postgresql-file + + +To run tests with Python 2.7, MySQL as indexer, and Ceph as storage backend: + +:: + + tox -e py35-mysql-ceph diff --git a/doc/source/index.rst b/doc/source/index.rst index 8a873a94..867cf154 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -65,3 +65,4 @@ Documentation collectd glossary releasenotes/index.rst + contributing -- GitLab From d99f2c51dd485dc3005b51f0bbd61ec08ab31158 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 24 May 2017 12:27:13 +0000 Subject: [PATCH 0763/1483] remove residual queue i missed removing this when i dropped scheduler process --- gnocchi/cli.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 06e1fbbc..941f4713 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -279,10 +279,6 @@ class MetricdServiceManager(cotyledon.ServiceManager): self.reconfigure(self.metric_processor_id, workers=self.conf.metricd.workers) - def run(self): - super(MetricdServiceManager, self).run() - self.queue.close() - def metricd_tester(conf): # NOTE(sileht): This method is designed to be profiled, we -- GitLab From 76810a02e9068dcce2deaa6e86a22cd93b4cbb8e Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 24 May 2017 16:22:05 +0000 Subject: [PATCH 0764/1483] fix swift slim reporting swift is reporting double the real number of metrics in backlog --- gnocchi/storage/incoming/swift.py | 2 +- gnocchi/tests/test_storage.py | 22 +++++++++++++++++++++- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py index 304126f9..6aa445cf 100644 --- a/gnocchi/storage/incoming/swift.py +++ b/gnocchi/storage/incoming/swift.py @@ -72,7 +72,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): else: headers, files = self.swift.get_container( self.get_sack_name(i), delimiter='/', full_listing=True) - nb_metrics += len(files) + nb_metrics += len([f for f in files if 'subdir' in f]) measures += int(headers.get('x-container-object-count')) return (nb_metrics or len(metric_details), measures, metric_details if details else None) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 7047f44d..69238598 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -143,7 +143,7 @@ class TestStorageDriver(tests_base.TestCase): self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric, self.metric.id) - def test_measures_reporting(self): + def test_measures_reporting_format(self): report = self.storage.incoming.measures_report(True) self.assertIsInstance(report, dict) self.assertIn('summary', report) @@ -158,6 +158,26 @@ class TestStorageDriver(tests_base.TestCase): self.assertIn('measures', report['summary']) self.assertNotIn('details', report) + def test_measures_reporting(self): + m2, __ = self._create_metric('medium') + for i in six.moves.range(60): + self.storage.incoming.add_measures(self.metric, [ + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, i), 69), + ]) + self.storage.incoming.add_measures(m2, [ + storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, i), 69), + ]) + report = self.storage.incoming.measures_report(True) + self.assertIsInstance(report, dict) + self.assertEqual(2, report['summary']['metrics']) + self.assertEqual(120, report['summary']['measures']) + self.assertIn('details', report) + self.assertIsInstance(report['details'], dict) + report = self.storage.incoming.measures_report(False) + self.assertIsInstance(report, dict) + self.assertEqual(2, report['summary']['metrics']) + self.assertEqual(120, report['summary']['measures']) + def test_add_measures_big(self): m, __ = self._create_metric('high') self.storage.incoming.add_measures(m, [ -- GitLab From e8984b3697e6995916b85d700f06afa701260d98 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 25 May 2017 16:11:51 +0000 Subject: [PATCH 0765/1483] change copyright template drop openstack copyright templating --- doc/source/conf.py | 2 +- gnocchi/indexer/alembic/script.py.mako | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 8c9b810b..68d7edf5 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ master_doc = 'index' # General information about the project. project = u'Gnocchi' -copyright = u'%s, OpenStack Foundation' % datetime.date.today().year +copyright = u'%s, The Gnocchi Developers' % datetime.date.today().year # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/gnocchi/indexer/alembic/script.py.mako b/gnocchi/indexer/alembic/script.py.mako index 8f4e92ea..66e2be40 100644 --- a/gnocchi/indexer/alembic/script.py.mako +++ b/gnocchi/indexer/alembic/script.py.mako @@ -1,4 +1,4 @@ -# Copyright ${create_date.year} OpenStack Foundation +# Copyright ${create_date.year} The Gnocchi Developers # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain -- GitLab From 32edd508ce90e5910a866f668933b0c23f6a4c3e Mon Sep 17 00:00:00 2001 From: sum12 Date: Mon, 29 May 2017 10:36:50 +0200 Subject: [PATCH 0766/1483] archive_policy: Raise Error if calculated points is < 0 In case granularity is larger than timespan, then the calculated points are < zero, and since there was no validation of this value an invalid policy would be created. This commit fixes that behaviour. Fixes #40 --- gnocchi/archive_policy.py | 2 ++ gnocchi/tests/functional/gabbits/archive.yaml | 12 ++++++++++++ gnocchi/tests/test_archive_policy.py | 3 +++ 3 files changed, 17 insertions(+) diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py index 54c64cc2..c039dbd0 100644 --- a/gnocchi/archive_policy.py +++ b/gnocchi/archive_policy.py @@ -175,6 +175,8 @@ class ArchivePolicyItem(dict): self['timespan'] = None else: points = int(timespan / granularity) + if points <= 0: + raise ValueError("Calculated number of points is < 0") self['timespan'] = granularity * points else: points = int(points) diff --git a/gnocchi/tests/functional/gabbits/archive.yaml b/gnocchi/tests/functional/gabbits/archive.yaml index 42fe13c8..98e0ce99 100644 --- a/gnocchi/tests/functional/gabbits/archive.yaml +++ b/gnocchi/tests/functional/gabbits/archive.yaml @@ -480,6 +480,18 @@ tests: timespan: "1 shenanigan" status: 400 + - name: create policy when granularity is larger than timespan + POST: /v1/archive_policy + request_headers: + content-type: application/json + x-roles: admin + data: + name: should-have-failed + definition: + - granularity: 2 hour + timespan: 1 hour + status: 400 + # Non admin user attempt - name: fail to create policy non-admin diff --git a/gnocchi/tests/test_archive_policy.py b/gnocchi/tests/test_archive_policy.py index 3b2afb08..da90e70e 100644 --- a/gnocchi/tests/test_archive_policy.py +++ b/gnocchi/tests/test_archive_policy.py @@ -96,3 +96,6 @@ class TestArchivePolicyItem(base.BaseTestCase): self.assertRaises(ValueError, archive_policy.ArchivePolicyItem, 1, -1) + self.assertRaises(ValueError, + archive_policy.ArchivePolicyItem, + 2, None, 1) -- GitLab From 31dc2c7588c44467b84a8529bdbb218e9a81edb5 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 29 May 2017 09:59:05 +0200 Subject: [PATCH 0767/1483] travis: rebase pull requests --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 72b03e19..aa2472df 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,7 +22,7 @@ env: - TARGET: py35-postgresql before_script: -# Travis We need to fetch all tags/branches for documentation target + # Travis We need to fetch all tags/branches for documentation target - case $TARGET in docs*) git fetch origin $(git ls-remote -q | sed -n '/refs\/heads/s,.*refs/heads\(.*\),:remotes/origin\1,gp') ; @@ -30,7 +30,7 @@ before_script: git fetch --unshallow ; ;; esac - + - 'if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then git pull --rebase ; fi' - docker build --tag gnocchi-ci --file=tools/travis-ci-setup.dockerfile . script: - docker run -v ~/.cache/pip:/home/tester/.cache/pip -v $(pwd):/home/tester/src gnocchi-ci tox -e ${TARGET} -- GitLab From 2ab7c2e853922e27113bfba4b903920aca4bd129 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 30 May 2017 08:25:41 +0200 Subject: [PATCH 0768/1483] Unify the number of sacks argument It feels weird to have different option name for the same thing. --- gnocchi/cli.py | 14 +++++++------- .../notes/incoming-sacks-413f4818882ab83d.yaml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 941f4713..5b00e566 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -52,8 +52,8 @@ def upgrade(): help="Skip storage upgrade."), cfg.BoolOpt("skip-archive-policies-creation", default=False, help="Skip default archive policies creation."), - cfg.IntOpt("num-storage-sacks", default=128, - help="Initial number of storage sacks to create."), + cfg.IntOpt("sacks-number", default=128, min=1, + help="Number of storage sacks to create."), ]) conf = service.prepare_service(conf=conf) @@ -65,7 +65,7 @@ def upgrade(): if not conf.skip_storage: s = storage.get_driver(conf) LOG.info("Upgrading storage %s", s) - s.upgrade(index, conf.num_storage_sacks) + s.upgrade(index, conf.sacks_number) if (not conf.skip_archive_policies_creation and not index.list_archive_policies() @@ -78,8 +78,8 @@ def upgrade(): def change_sack_size(): conf = cfg.ConfigOpts() conf.register_cli_opts([ - cfg.IntOpt("sack_size", required=True, min=1, - help="Number of sacks."), + cfg.IntOpt("sacks-number", required=True, min=1, + help="Number of storage sacks."), ]) conf = service.prepare_service(conf=conf) s = storage.get_driver(conf) @@ -89,9 +89,9 @@ def change_sack_size(): LOG.error('Cannot change sack when non-empty backlog. Process ' 'remaining %s measures and try again', remainder) return - LOG.info("Changing sack size to: %s", conf.sack_size) + LOG.info("Changing sack size to: %s", conf.sacks_number) old_num_sacks = s.incoming.get_storage_sacks() - s.incoming.set_storage_settings(conf.sack_size) + s.incoming.set_storage_settings(conf.sacks_number) s.incoming.remove_sack_group(old_num_sacks) diff --git a/releasenotes/notes/incoming-sacks-413f4818882ab83d.yaml b/releasenotes/notes/incoming-sacks-413f4818882ab83d.yaml index c2cf17ff..ef6d788d 100644 --- a/releasenotes/notes/incoming-sacks-413f4818882ab83d.yaml +++ b/releasenotes/notes/incoming-sacks-413f4818882ab83d.yaml @@ -7,7 +7,7 @@ features: upgrade: - | The storage driver needs to be upgraded. The number of sacks to distribute - across can be configured on upgrade by passing in ``num-storage-sacks`` + across can be configured on upgrade by passing in ``sacks-number`` value on upgrade. A default number of sacks will be created if not set. This can be reconfigured post-upgrade as well by using ``gnocchi-change-sack-size`` cli. See documentation for hints on the number -- GitLab From 17f8dadf678856e3fc4fa20872e35bbf7bf7da5f Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 23 May 2017 20:05:43 +0000 Subject: [PATCH 0769/1483] reduce memory for slim report - don't store all metrics and counts in memory if not building detailed report. - leverage pipelines in redis - remove s3 FIXME since i don't think it's true. --- gnocchi/storage/incoming/file.py | 24 ++++++++++++++++++----- gnocchi/storage/incoming/redis.py | 32 +++++++++++++++++++++++++------ gnocchi/storage/incoming/s3.py | 1 - 3 files changed, 45 insertions(+), 12 deletions(-) diff --git a/gnocchi/storage/incoming/file.py b/gnocchi/storage/incoming/file.py index 781d3ec5..cab9cace 100644 --- a/gnocchi/storage/incoming/file.py +++ b/gnocchi/storage/incoming/file.py @@ -98,13 +98,27 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): raise def _build_report(self, details): - metric_details = {} + report_vars = {'metrics': 0, 'measures': 0, 'metric_details': {}} + if details: + def build_metric_report(metric, sack): + report_vars['metric_details'][metric] = len( + self._list_measures_container_for_metric_id_str(sack, + metric)) + else: + def build_metric_report(metric, sack): + report_vars['metrics'] += 1 + report_vars['measures'] += len( + self._list_measures_container_for_metric_id_str(sack, + metric)) + for i in six.moves.range(self.NUM_SACKS): for metric in self.list_metric_with_measures_to_process(i): - metric_details[metric] = len( - self._list_measures_container_for_metric_id_str(i, metric)) - return (len(metric_details.keys()), sum(metric_details.values()), - metric_details if details else None) + build_metric_report(metric, i) + return (report_vars['metrics'] or + len(report_vars['metric_details'].keys()), + report_vars['measures'] or + sum(report_vars['metric_details'].values()), + report_vars['metric_details'] if details else None) def list_metric_with_measures_to_process(self, sack): return set(self._list_target(self._sack_path(sack))) diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py index 7a86f95b..75dcc95d 100644 --- a/gnocchi/storage/incoming/redis.py +++ b/gnocchi/storage/incoming/redis.py @@ -13,7 +13,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import collections import contextlib import six @@ -52,13 +51,34 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): pipe.execute() def _build_report(self, details): + report_vars = {'measures': 0, 'metric_details': {}} + + def update_report(results, m_list): + report_vars['measures'] += sum(results) + if details: + report_vars['metric_details'].update( + dict(six.moves.zip(m_list, results))) + match = redis.SEP.join([self.get_sack_name("*"), "*"]) - metric_details = collections.defaultdict(int) + metrics = 0 + m_list = [] + pipe = self._client.pipeline() for key in self._client.scan_iter(match=match, count=1000): - metric = key.decode('utf8').split(redis.SEP)[1] - metric_details[metric] = self._client.llen(key) - return (len(metric_details.keys()), sum(metric_details.values()), - metric_details if details else None) + metrics += 1 + pipe.llen(key) + if details: + m_list.append(key.decode('utf8').split(redis.SEP)[1]) + # group 100 commands/call + if metrics % 100 == 0: + results = pipe.execute() + update_report(results, m_list) + m_list = [] + pipe = self._client.pipeline() + else: + results = pipe.execute() + update_report(results, m_list) + return (metrics, report_vars['measures'], + report_vars['metric_details'] if details else None) def list_metric_with_measures_to_process(self, sack): match = redis.SEP.join([self.get_sack_name(sack), "*"]) diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/storage/incoming/s3.py index 89de4192..6e7fbaaa 100644 --- a/gnocchi/storage/incoming/s3.py +++ b/gnocchi/storage/incoming/s3.py @@ -98,7 +98,6 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): response = self.s3.list_objects_v2( Bucket=self._bucket_name_measures, **kwargs) - # FIXME(gordc): this can be streamlined if not details for c in response.get('Contents', ()): if c['Key'] != self.CFG_PREFIX: __, metric, metric_file = c['Key'].split("/", 2) -- GitLab From 318200fb6bf166c240df4ac9793b5a787b51a48e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 31 May 2017 18:07:15 +0200 Subject: [PATCH 0770/1483] doc: fix issue tracker link not being in RST --- doc/source/contributing.rst | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index b7923e67..5d3abedd 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -5,15 +5,17 @@ Issues ------ -We use the [Gihub issue tracker](https://github.com/gnocchixyz/gnocchi/issues) -for reporting issues. Before opening a new issue, ensure the bug was not -already reported by searching on Issue tracker first. +We use the `GitHub issue tracker`_ for reporting issues. Before opening a new +issue, ensure the bug was not already reported by searching on Issue tracker +first. If you're unable to find an open issue addressing the problem, open a new one. Be sure to include a title and clear description, as much relevant information as possible, and a code sample or an executable test case demonstrating the expected behavior that is not occurring. +.. _`GitHub issue tracker`: https://github.com/gnocchixyz/gnocchi/issues + Running the Tests ----------------- -- GitLab From c0b5b220a1fec7bd2c10345ed3ba833dc2547ccf Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 31 May 2017 18:07:37 +0200 Subject: [PATCH 0771/1483] doc: add a note on pull-requests --- doc/source/contributing.rst | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 5d3abedd..249f6ec2 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -16,6 +16,22 @@ expected behavior that is not occurring. .. _`GitHub issue tracker`: https://github.com/gnocchixyz/gnocchi/issues + +Pull-requests +------------- + +When opening a pull-request, make sure that: + +* You write a comprehensive summary of your problem and the solution you + implemented. +* If you update or fix your pull-request, make sure the commits are atomic. Do + not include fix-up commits in your history, rewrite it properly using e.g. + `git rebase --interactive` and/or `git commit --amend`. +* We recommend using `git pull-request`_ to send your pull-requests. + +.. _`git pull-request`: https://github.com/jd/git-pull-request + + Running the Tests ----------------- -- GitLab From 2e976897f81ea1fa28bb4bee9ee125fbc3e84428 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 31 May 2017 20:46:01 +0000 Subject: [PATCH 0772/1483] don't load indexer in reporting process we don't need access to indexer to build report. skip loading indexer to save memory and connection. --- gnocchi/cli.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 5b00e566..02b68226 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -146,6 +146,9 @@ class MetricReporting(MetricProcessBase): super(MetricReporting, self).__init__( worker_id, conf, conf.metricd.metric_reporting_delay) + def _configure(self): + self.store = storage.get_driver(self.conf) + def _run_job(self): try: report = self.store.incoming.measures_report(details=False) -- GitLab From c169eceb48e9147ecd2ffd77576a82ffc078b6f3 Mon Sep 17 00:00:00 2001 From: Jaime Alvarez Date: Fri, 2 Jun 2017 06:48:39 +0000 Subject: [PATCH 0773/1483] Remove catching of non existent ArchivePolicyInUse exception. --- gnocchi/rest/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 42e9bc41..42f638d7 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -355,8 +355,6 @@ class ArchivePolicyRulesController(rest.RestController): pecan.request.indexer.delete_archive_policy_rule(name) except indexer.NoSuchArchivePolicyRule as e: abort(404, e) - except indexer.ArchivePolicyRuleInUse as e: - abort(400, e) def MeasuresListSchema(measures): -- GitLab From 464cfc9f8469fc7cea4d6987603775009f6cab16 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 2 Jun 2017 08:00:55 +0200 Subject: [PATCH 0774/1483] Fix doc build Since yesterday, reno doesn't find some git reference. This is due to the method to unshallow the travis git clone. This change uses a better way, by configuring the fetcher instead of fetch stuff manually. Then --unshallow doesn't miss some references. Change-Id: Ic69d70a2e2e126d47480aba157d0bbec0ff230e0 --- .travis.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index aa2472df..3a384a7d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,9 +25,8 @@ before_script: # Travis We need to fetch all tags/branches for documentation target - case $TARGET in docs*) - git fetch origin $(git ls-remote -q | sed -n '/refs\/heads/s,.*refs/heads\(.*\),:remotes/origin\1,gp') ; - git fetch --tags ; - git fetch --unshallow ; + git config --add remote.origin.fetch +refs/heads/stable/*:refs/remotes/origin/stable/*; + git fetch --unshallow --tags; ;; esac - 'if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then git pull --rebase ; fi' -- GitLab From 1bfa785b3ed096f44d44b75fe26d9e5377fa9893 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 2 Jun 2017 20:08:08 +0200 Subject: [PATCH 0775/1483] travis: remove irc bot join/leave Change-Id: I40283991daf1841f5dc89565d41a123099c782af --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 3a384a7d..10a2bd81 100644 --- a/.travis.yml +++ b/.travis.yml @@ -39,5 +39,6 @@ notifications: irc: on_success: change on_failure: always + skip_join: true channels: - "irc.freenode.org#gnocchi" -- GitLab From 052e286dd94145ea11fed53c913476865968e3f9 Mon Sep 17 00:00:00 2001 From: Jaime Alvarez Date: Mon, 5 Jun 2017 05:33:29 +0000 Subject: [PATCH 0776/1483] Fix typos in docs --- doc/source/rest.j2 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index c06c845d..7a93da3c 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -408,9 +408,9 @@ resource type is ready to be used. If something unexpected occurs during this step, the state switches to `creation_error`. The same behavior occurs when the resource type is deleted. The state starts to -switch to `deleting`, the resource type is no more usable. Then the tables are -removed and the finally the resource_type is really deleted from the database. -If some unexpected occurs the state switches to `deletion_error`. +switch to `deleting`, the resource type is no longer usable. Then the tables are +removed and then finally the resource_type is really deleted from the database. +If some unexpected error occurs the state switches to `deletion_error`. Searching for resources ======================= @@ -518,7 +518,7 @@ the one described in `Searching for resources`_. {{ scenarios['get-across-metrics-measures-by-attributes-lookup']['doc'] }} It is possible to group the resource search results by any attribute of the -requested resource type, and the compute the aggregation: +requested resource type, and then compute the aggregation: {{ scenarios['get-across-metrics-measures-by-attributes-lookup-groupby']['doc'] }} -- GitLab From 056f23d047a21785fc740a060df21172f40f5ad7 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 5 Jun 2017 14:48:21 +0000 Subject: [PATCH 0777/1483] drop unused clean we only needed to clean v2 objects because it was msgpack serialised dict. see commit: 5596bdea4848d0594990a5adde74045637e69431 --- gnocchi/carbonara.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 4716f41a..0aa9fff8 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -188,12 +188,8 @@ class TimeSerie(object): return ts @classmethod - def from_data(cls, timestamps=None, values=None, clean=False): - ts = pandas.Series(values, timestamps) - if clean: - # For format v2 - ts = cls.clean_ts(ts) - return cls(ts) + def from_data(cls, timestamps=None, values=None): + return cls(pandas.Series(values, timestamps)) @classmethod def from_tuples(cls, timestamps_values): -- GitLab From f3bea40f8b372ff9d52f83a7be533fb524febed9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 30 May 2017 08:32:52 +0200 Subject: [PATCH 0778/1483] Remove indexer argument to storage upgrade() This is finally useless. --- gnocchi/cli.py | 9 ++++++--- gnocchi/storage/__init__.py | 4 ++-- gnocchi/storage/incoming/__init__.py | 2 +- gnocchi/storage/incoming/_carbonara.py | 4 ++-- gnocchi/storage/incoming/file.py | 4 ++-- gnocchi/storage/incoming/s3.py | 4 ++-- gnocchi/storage/s3.py | 4 ++-- gnocchi/tests/base.py | 2 +- gnocchi/tests/functional/fixtures.py | 2 +- 9 files changed, 19 insertions(+), 16 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 02b68226..b124b799 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -57,19 +57,22 @@ def upgrade(): ]) conf = service.prepare_service(conf=conf) - index = indexer.get_driver(conf) - index.connect() if not conf.skip_index: + index = indexer.get_driver(conf) + index.connect() LOG.info("Upgrading indexer %s", index) index.upgrade() if not conf.skip_storage: s = storage.get_driver(conf) LOG.info("Upgrading storage %s", s) - s.upgrade(index, conf.sacks_number) + s.upgrade(conf.sacks_number) if (not conf.skip_archive_policies_creation and not index.list_archive_policies() and not index.list_archive_policy_rules()): + if conf.skip_index: + index = indexer.get_driver(conf) + index.connect() for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES): index.create_archive_policy(ap) index.create_archive_policy_rule("default", "*", "low") diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index d06a47cf..f4d0f7e7 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -162,8 +162,8 @@ class StorageDriver(object): def stop(): pass - def upgrade(self, index, num_sacks): - self.incoming.upgrade(index, num_sacks) + def upgrade(self, num_sacks): + self.incoming.upgrade(num_sacks) def process_background_tasks(self, index, metrics, sync=False): """Process background tasks for this storage. diff --git a/gnocchi/storage/incoming/__init__.py b/gnocchi/storage/incoming/__init__.py index eb99ae4d..34dcd1c0 100644 --- a/gnocchi/storage/incoming/__init__.py +++ b/gnocchi/storage/incoming/__init__.py @@ -29,7 +29,7 @@ class StorageDriver(object): pass @staticmethod - def upgrade(indexer): + def upgrade(): pass def add_measures(self, metric, measures): diff --git a/gnocchi/storage/incoming/_carbonara.py b/gnocchi/storage/incoming/_carbonara.py index e20720d6..f6a8b8b8 100644 --- a/gnocchi/storage/incoming/_carbonara.py +++ b/gnocchi/storage/incoming/_carbonara.py @@ -53,8 +53,8 @@ class CarbonaraBasedStorage(incoming.StorageDriver): sacks = num_sacks if num_sacks else self.NUM_SACKS return self.SACK_PREFIX + str(sacks) + '-%s' - def upgrade(self, index, num_sacks): - super(CarbonaraBasedStorage, self).upgrade(index) + def upgrade(self, num_sacks): + super(CarbonaraBasedStorage, self).upgrade() if not self.get_storage_sacks(): self.set_storage_settings(num_sacks) diff --git a/gnocchi/storage/incoming/file.py b/gnocchi/storage/incoming/file.py index 781d3ec5..18c8427b 100644 --- a/gnocchi/storage/incoming/file.py +++ b/gnocchi/storage/incoming/file.py @@ -32,8 +32,8 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): self.basepath = conf.file_basepath self.basepath_tmp = os.path.join(self.basepath, 'tmp') - def upgrade(self, index, num_sacks): - super(FileStorage, self).upgrade(index, num_sacks) + def upgrade(self, num_sacks): + super(FileStorage, self).upgrade(num_sacks) utils.ensure_paths([self.basepath_tmp]) def get_storage_sacks(self): diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/storage/incoming/s3.py index 89de4192..bccd00fe 100644 --- a/gnocchi/storage/incoming/s3.py +++ b/gnocchi/storage/incoming/s3.py @@ -64,7 +64,7 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): # nothing to cleanup since sacks are part of path pass - def upgrade(self, indexer, num_sacks): + def upgrade(self, num_sacks): try: s3.create_bucket(self.s3, self._bucket_name_measures, self._region_name) @@ -74,7 +74,7 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): ): raise # need to create bucket first to store storage settings object - super(S3Storage, self).upgrade(indexer, num_sacks) + super(S3Storage, self).upgrade(num_sacks) def _store_new_measures(self, metric, data): now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 59c801de..dd923a92 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -75,8 +75,8 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): else: self._consistency_stop = None - def upgrade(self, index, num_sacks): - super(S3Storage, self).upgrade(index, num_sacks) + def upgrade(self, num_sacks): + super(S3Storage, self).upgrade(num_sacks) try: s3.create_bucket(self.s3, self._bucket_name, self._region_name) except botocore.exceptions.ClientError as e: diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 3f35b40c..53016f59 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -327,7 +327,7 @@ class TestCase(base.BaseTestCase): self.storage.STORAGE_PREFIX = str(uuid.uuid4()) self.storage.incoming.SACK_PREFIX = str(uuid.uuid4()) - self.storage.upgrade(self.index, 128) + self.storage.upgrade(128) def tearDown(self): self.index.disconnect() diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 90004194..a51a2614 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -134,7 +134,7 @@ class ConfigFixture(fixture.GabbiFixture): self.index = index s = storage.get_driver(conf) - s.upgrade(index, 128) + s.upgrade(128) LOAD_APP_KWARGS = { 'storage': s, -- GitLab From 5fb48769440cc405d375d7721fecf82e0dd5610f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 5 Jun 2017 22:24:20 +0200 Subject: [PATCH 0779/1483] storage: fix resample on empty metric If a metric is empty, carbonara raises an IndexError when checking for the data on grouping since the Pandas timeseries is empty. Fixes #69 --- gnocchi/carbonara.py | 2 +- .../functional/gabbits/resource-aggregation.yaml | 16 ++++++++++++++++ gnocchi/tests/test_storage.py | 10 ++++++++++ 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 4716f41a..8352819c 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -246,7 +246,7 @@ class TimeSerie(object): # NOTE(jd) Our whole serialization system is based on Epoch, and we # store unsigned integer, so we can't store anything before Epoch. # Sorry! - if self.ts.index[0].value < 0: + if not self.ts.empty and self.ts.index[0].value < 0: raise BeforeEpochError(self.ts.index[0]) return GroupedTimeSeries(self.ts[start:], granularity) diff --git a/gnocchi/tests/functional/gabbits/resource-aggregation.yaml b/gnocchi/tests/functional/gabbits/resource-aggregation.yaml index c0338476..cf563e7b 100644 --- a/gnocchi/tests/functional/gabbits/resource-aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/resource-aggregation.yaml @@ -43,6 +43,22 @@ tests: value: 12 status: 202 + - name: get aggregation with no data + desc: https://github.com/gnocchixyz/gnocchi/issues/69 + POST: /v1/aggregation/resource/generic/metric/cpu.util?stop=2012-03-06T00:00:00&fill=0&granularity=300&resample=3600 + request_headers: + x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 + x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 + content-type: application/json + data: + =: + id: 4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4 + poll: + count: 10 + delay: 1 + response_json_paths: + $: [] + - name: create resource 2 POST: /v1/resource/generic request_headers: diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 69238598..f89728d5 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -960,6 +960,16 @@ class TestStorageDriver(tests_base.TestCase): (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0), ], self.storage.get_measures(m)) + def test_resample_no_metric(self): + """https://github.com/gnocchixyz/gnocchi/issues/69""" + self.assertEqual([], + self.storage.get_measures( + self.metric, + utils.datetime_utc(2014, 1, 1), + utils.datetime_utc(2015, 1, 1), + granularity=300, + resample=3600)) + class TestMeasureQuery(base.BaseTestCase): def test_equal(self): -- GitLab From 9e4a53f911203462143a4aa966c9d3bb9f3ee6b4 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 5 Jun 2017 16:43:38 +0000 Subject: [PATCH 0780/1483] incoming-only connection for changing sacks and reporter, there is no need to connect to main storage and initialise a new coordinator. change so we only connect to incoming driver and do not create a coordinator connection. --- gnocchi/cli.py | 14 +++++++------- gnocchi/storage/__init__.py | 8 ++++++++ 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index b124b799..ebeaa6d6 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -85,17 +85,17 @@ def change_sack_size(): help="Number of storage sacks."), ]) conf = service.prepare_service(conf=conf) - s = storage.get_driver(conf) - report = s.incoming.measures_report(details=False) + s = storage.get_incoming_driver(conf.incoming) + report = s.measures_report(details=False) remainder = report['summary']['measures'] if remainder: LOG.error('Cannot change sack when non-empty backlog. Process ' 'remaining %s measures and try again', remainder) return LOG.info("Changing sack size to: %s", conf.sacks_number) - old_num_sacks = s.incoming.get_storage_sacks() - s.incoming.set_storage_settings(conf.sacks_number) - s.incoming.remove_sack_group(old_num_sacks) + old_num_sacks = s.get_storage_sacks() + s.set_storage_settings(conf.sacks_number) + s.remove_sack_group(old_num_sacks) def statsd(): @@ -150,11 +150,11 @@ class MetricReporting(MetricProcessBase): worker_id, conf, conf.metricd.metric_reporting_delay) def _configure(self): - self.store = storage.get_driver(self.conf) + self.incoming = storage.get_incoming_driver(self.conf.incoming) def _run_job(self): try: - report = self.store.incoming.measures_report(details=False) + report = self.incoming.measures_report(details=False) LOG.info("%d measurements bundles across %d " "metrics wait to be processed.", report['summary']['measures'], diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index f4d0f7e7..0e60502f 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -146,6 +146,14 @@ def get_driver_class(namespace, conf): conf.driver).driver +def get_incoming_driver(conf): + """Return configured incoming driver only + + :param conf: incoming configuration only (not global) + """ + return get_driver_class('gnocchi.incoming', conf)(conf) + + def get_driver(conf): """Return the configured driver.""" incoming = get_driver_class('gnocchi.incoming', conf.incoming)( -- GitLab From b328c7041821788bf2bc880ce2406f080d25709c Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 6 Jun 2017 11:35:19 +0200 Subject: [PATCH 0781/1483] Remove useless debug message This is always printed even if nothing have to done: http://logs.openstack.org/44/468844/17/gate/gate-telemetry-dsvm-integration-ceilometer-ubuntu-xenial/8f54c0e/logs/screen-gnocchi-metricd.txt.gz#_Jun_06_09_09_09_101487 Also we have something to do, we already print another more usefull message: "Processing measures for b03bed59-d1da-4db0-b9bf-adff3766d2a9" --- gnocchi/storage/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index f4d0f7e7..983067da 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -176,7 +176,6 @@ class StorageDriver(object): on error :type sync: bool """ - LOG.debug("Processing new measures") try: self.process_new_measures(index, metrics, sync) except Exception: -- GitLab From 0e49c7ffdd732649378fea797b651eeb60fabc43 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 6 Jun 2017 17:55:23 +0200 Subject: [PATCH 0782/1483] Remove gitreview config Change-Id: Id779145bb9dfc95237ba89b131536d9408bec811 --- .gitreview | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 .gitreview diff --git a/.gitreview b/.gitreview deleted file mode 100644 index e4b8477d..00000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/gnocchi.git -- GitLab From 86cd908bb9d4399b0629be7e415c7d2d9e493cfa Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 6 Jun 2017 17:24:55 +0000 Subject: [PATCH 0783/1483] drop unused _timestamps_and_values_from_dict method leftover from v2 upgrade path. --- gnocchi/carbonara.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 6525e8f6..eb647d51 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -209,15 +209,6 @@ class TimeSerie(object): def __len__(self): return len(self.ts) - @staticmethod - def _timestamps_and_values_from_dict(values): - timestamps = numpy.array(list(values.keys()), dtype='datetime64[ns]') - timestamps = pandas.to_datetime(timestamps) - v = list(values.values()) - if v: - return timestamps, v - return (), () - @staticmethod def _to_offset(value): if isinstance(value, numbers.Real): -- GitLab From b2da4c8e0ede02c4970affc417d7684c8f7d6d7a Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 5 Jun 2017 20:57:46 +0000 Subject: [PATCH 0784/1483] append rather than combine_first when getting measures spanning multiple object, we currently use combine_first. we shouldn't need to use combine_first since there should not be any overlap in indices across measures in object. because of that we should be able to use append which is faster because it doesn't need to verify as much. benchmark combining 5K points is ~7x faster: timeit.timeit("asdf=ts.combine_first(ts2);asdf=asdf.combine_first(ts3)", number=1000) 1.601928949356079 timeit.timeit("asdf=ts.append([ts3,ts2])", number=1000) 0.2242450714111328 i'm not initialising a new Series to work on because it adds significant overhead timeit.timeit("asdf=pd.Series();asdf=asdf.combine_first(ts); asdf=asdf.combine_first(ts2);asdf=asdf.combine_first(ts3)", number=1000) 5.4795918464660645 --- gnocchi/carbonara.py | 8 +++++--- gnocchi/storage/_carbonara.py | 6 +++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index eb647d51..a6399327 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -551,9 +551,11 @@ class AggregatedTimeSerie(TimeSerie): @classmethod def from_timeseries(cls, timeseries, sampling, aggregation_method, max_size=None): - ts = pandas.Series() - for t in timeseries: - ts = ts.combine_first(t.ts) + # NOTE(gordc): Indices must be unique across all timeseries. Also, + # timeseries should be a list that is ordered within list and series. + ts = (timeseries[0].ts.append([t.ts for t in timeseries[1:]]) + if timeseries else None) + return cls(sampling=sampling, aggregation_method=aggregation_method, ts=ts, max_size=max_size) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 65983ad1..ca723ea7 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -200,15 +200,15 @@ class CarbonaraBasedStorage(storage.StorageDriver): carbonara.SplitKey.from_timestamp_and_sampling( to_timestamp, granularity)) - timeseries = filter( + timeseries = list(filter( lambda x: x is not None, self._map_in_thread( self._get_measures_and_unserialize, ((metric, key, aggregation, granularity) - for key in all_keys + for key in sorted(all_keys) if ((not from_timestamp or key >= from_timestamp) and (not to_timestamp or key <= to_timestamp)))) - ) + )) return carbonara.AggregatedTimeSerie.from_timeseries( sampling=granularity, -- GitLab From cb3eb287d85102c79b6d2629dd106450395bcd1a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 7 Jun 2017 00:11:51 +0200 Subject: [PATCH 0785/1483] tox: add missing redis dep for functional tests Install redis as a test dependency since it is used as a coordination driver in functional tests (--coordination-driver is passed to pifpaf). --- tox.ini | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index b9fa1711..d3a0cde8 100644 --- a/tox.ini +++ b/tox.ini @@ -33,7 +33,10 @@ setenv = s3: GNOCCHI_TEST_TARBALLS= redis: GNOCCHI_TEST_TARBALLS= file: GNOCCHI_TEST_TARBALLS= -deps = .[test] +# NOTE(jd) Install redis as a test dependency since it is used as a +# coordination driver in functional tests (--coordination-driver is passed to +# pifpaf) +deps = .[test,redis] postgresql: .[postgresql,{env:GNOCCHI_STORAGE_DEPS}] mysql: .[mysql,{env:GNOCCHI_STORAGE_DEPS}] {env:GNOCCHI_TEST_TARBALLS:} -- GitLab From 5d7bdbbe6f1e8b4279dd1c8f7f37788aad882596 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 6 Jun 2017 21:13:45 +0200 Subject: [PATCH 0786/1483] travis: fetch all refs for docs Since 464cfc9f8469fc7cea4d6987603775009f6cab16 we change the fetcher configuration to build the doc of stable branch from master. But that doesn't work when we build the doc from stable branch. This change should fix that by fetching all refs. This also removes the useless rebase since we enforce PR to be up2date before merging. --- .travis.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 10a2bd81..9b2b56fb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,11 +25,13 @@ before_script: # Travis We need to fetch all tags/branches for documentation target - case $TARGET in docs*) - git config --add remote.origin.fetch +refs/heads/stable/*:refs/remotes/origin/stable/*; + git config --get-all remote.origin.fetch; + git config --unset-all remote.origin.fetch; + git config --add remote.origin.fetch +refs/heads/*:refs/remotes/origin/*; + git config --get-all remote.origin.fetch; git fetch --unshallow --tags; ;; esac - - 'if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then git pull --rebase ; fi' - docker build --tag gnocchi-ci --file=tools/travis-ci-setup.dockerfile . script: - docker run -v ~/.cache/pip:/home/tester/.cache/pip -v $(pwd):/home/tester/src gnocchi-ci tox -e ${TARGET} -- GitLab From 375df41a89da16bc1e166c7fda7a1b7d9e18eb61 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Jun 2017 09:31:53 +0200 Subject: [PATCH 0787/1483] sqlalchemy: be explicit about the PostgreSQL driver SQLAlchemy now logs a warning if the driver is not correctly set in the URL. Fixes #56 --- gnocchi/indexer/sqlalchemy.py | 4 ++++ setup.cfg | 1 + 2 files changed, 5 insertions(+) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 3497b52d..87a3f055 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -275,6 +275,10 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): url = sqlalchemy_url.make_url(url) url.drivername = "mysql+pymysql" return str(url) + if url.startswith("postgresql://"): + url = sqlalchemy_url.make_url(url) + url.drivername = "postgresql+psycopg2" + return str(url) return url def __init__(self, conf): diff --git a/setup.cfg b/setup.cfg index bf336519..856f1210 100644 --- a/setup.cfg +++ b/setup.cfg @@ -120,6 +120,7 @@ gnocchi.indexer = mysql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer mysql+pymysql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer postgresql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer + postgresql+psycopg2 = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer gnocchi.aggregates = moving-average = gnocchi.aggregates.moving_stats:MovingAverage -- GitLab From 60be1b141a780f3af19ac30f6bd3a6f1a806aff7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Jun 2017 17:07:16 +0200 Subject: [PATCH 0788/1483] Remove devstack gate support This is not used anymore after the move to GitHub. --- bindep.txt | 10 ---- devstack/gate/gate_hook.sh | 59 --------------------- devstack/gate/post_test_hook.sh | 78 ---------------------------- gnocchi/tests/functional/fixtures.py | 8 +-- tox.ini | 2 +- 5 files changed, 3 insertions(+), 154 deletions(-) delete mode 100644 bindep.txt delete mode 100755 devstack/gate/gate_hook.sh delete mode 100755 devstack/gate/post_test_hook.sh diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 9d9b91a5..00000000 --- a/bindep.txt +++ /dev/null @@ -1,10 +0,0 @@ -libpq-dev [platform:dpkg] -postgresql [platform:dpkg] -mysql-client [platform:dpkg] -mysql-server [platform:dpkg] -build-essential [platform:dpkg] -libffi-dev [platform:dpkg] -librados-dev [platform:dpkg] -ceph [platform:dpkg] -redis-server [platform:dpkg] -liberasurecode-dev [platform:dpkg] diff --git a/devstack/gate/gate_hook.sh b/devstack/gate/gate_hook.sh deleted file mode 100755 index c01d37a0..00000000 --- a/devstack/gate/gate_hook.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script is executed inside gate_hook function in devstack gate. - -STORAGE_DRIVER="$1" -SQL_DRIVER="$2" - -ENABLED_SERVICES="key,gnocchi-api,gnocchi-metricd,tempest," - -# Use efficient wsgi web server -DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_DEPLOY=uwsgi' -DEVSTACK_LOCAL_CONFIG+=$'\nexport KEYSTONE_DEPLOY=uwsgi' - -export DEVSTACK_GATE_INSTALL_TESTONLY=1 -export DEVSTACK_GATE_NO_SERVICES=1 -export DEVSTACK_GATE_TEMPEST=1 -export DEVSTACK_GATE_TEMPEST_NOTESTS=1 -export DEVSTACK_GATE_EXERCISES=0 -export KEEP_LOCALRC=1 - -case $STORAGE_DRIVER in - file) - DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_STORAGE_BACKEND=file' - ;; - swift) - ENABLED_SERVICES+="s-proxy,s-account,s-container,s-object," - DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_STORAGE_BACKEND=swift' - # FIXME(sileht): use mod_wsgi as workaround for LP#1508424 - DEVSTACK_GATE_TEMPEST+=$'\nexport SWIFT_USE_MOD_WSGI=True' - ;; - ceph) - DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_STORAGE_BACKEND=ceph' - ;; -esac - - -# default to mysql -case $SQL_DRIVER in - postgresql) - export DEVSTACK_GATE_POSTGRES=1 - ;; -esac - -export ENABLED_SERVICES -export DEVSTACK_LOCAL_CONFIG - -$BASE/new/devstack-gate/devstack-vm-gate.sh diff --git a/devstack/gate/post_test_hook.sh b/devstack/gate/post_test_hook.sh deleted file mode 100755 index f4a89086..00000000 --- a/devstack/gate/post_test_hook.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script is executed inside post_test_hook function in devstack gate. - -source $BASE/new/devstack/openrc admin admin - -set -e - -function generate_testr_results { - if [ -f .testrepository/0 ]; then - sudo /usr/os-testr-env/bin/testr last --subunit > $WORKSPACE/testrepository.subunit - sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit - sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html - sudo gzip -9 $BASE/logs/testrepository.subunit - sudo gzip -9 $BASE/logs/testr_results.html - sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz - sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz - fi -} - -set -x - -export GNOCCHI_DIR="$BASE/new/gnocchi" -sudo chown -R stack:stack $GNOCCHI_DIR -cd $GNOCCHI_DIR - -openstack catalog list - -export GNOCCHI_SERVICE_TOKEN=$(openstack token issue -c id -f value) -export GNOCCHI_ENDPOINT=$(openstack catalog show metric -c endpoints -f value | awk '/public/{print $2}') -export GNOCCHI_AUTHORIZATION="" # Temporary set to transition to the new functional testing - -curl -X GET ${GNOCCHI_ENDPOINT}/v1/archive_policy -H "Content-Type: application/json" - -sudo gnocchi-upgrade - -# Just ensure tools still works -sudo -E -H -u stack $GNOCCHI_DIR/tools/measures_injector.py --metrics 1 --batch-of-measures 2 --measures-per-batch 2 - -# NOTE(sileht): on swift job permissions are wrong, I don't known why -sudo chown -R tempest:stack $BASE/new/tempest -sudo chown -R tempest:stack $BASE/data/tempest - -# Run tests with tempst -cd $BASE/new/tempest -set +e -sudo -H -u tempest OS_TEST_TIMEOUT=$TEMPEST_OS_TEST_TIMEOUT tox -eall-plugin -- gnocchi --concurrency=$TEMPEST_CONCURRENCY -TEMPEST_EXIT_CODE=$? -set -e -if [[ $TEMPEST_EXIT_CODE != 0 ]]; then - # Collect and parse result - generate_testr_results - exit $TEMPEST_EXIT_CODE -fi - -# Run tests with tox -cd $GNOCCHI_DIR -echo "Running gnocchi functional test suite" -set +e -sudo -E -H -u stack tox -epy27-gate -EXIT_CODE=$? -set -e - -# Collect and parse result -generate_testr_results -exit $EXIT_CODE diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index a51a2614..f78ea516 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -106,12 +106,8 @@ class ConfigFixture(fixture.GabbiFixture): if conf.indexer.url is None: raise case.SkipTest("No indexer configured") - # Use the presence of DEVSTACK_GATE_TEMPEST as a semaphore - # to signal we are not in a gate driven functional test - # and thus should override conf settings. - if 'DEVSTACK_GATE_TEMPEST' not in os.environ: - conf.set_override('driver', 'file', 'storage') - conf.set_override('file_basepath', data_tmp_dir, 'storage') + conf.set_override('driver', 'file', 'storage') + conf.set_override('file_basepath', data_tmp_dir, 'storage') # NOTE(jd) All of that is still very SQL centric but we only support # SQL for now so let's say it's good enough. diff --git a/tox.ini b/tox.ini index d3a0cde8..d7b0706d 100644 --- a/tox.ini +++ b/tox.ini @@ -78,7 +78,7 @@ commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE [testenv:bashate] deps = bashate -commands = bashate -v devstack/plugin.sh devstack/gate/gate_hook.sh devstack/gate/post_test_hook.sh +commands = bashate -v devstack/plugin.sh whitelist_externals = bash [testenv:pep8] -- GitLab From c307faa535a00b5525b73bb29a434ba2692f8c67 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 1 Jun 2017 22:46:22 +0000 Subject: [PATCH 0789/1483] reuse storage coordinator the processing worker creates it's own connection to coordinator to handle hashring distribution. this is unnecessary as the storage driver already has a coordinator for sack_locking which is exactly what the processing worker's coordinator is ultimately for. reuse the same coordinator since if the storage coordinator is down, the processing worker is ultimately down. --- gnocchi/cli.py | 15 +++++++++------ gnocchi/storage/__init__.py | 6 +++--- gnocchi/storage/_carbonara.py | 11 +++++++---- gnocchi/storage/ceph.py | 4 ++-- gnocchi/storage/file.py | 4 ++-- gnocchi/storage/redis.py | 4 ++-- gnocchi/storage/s3.py | 4 ++-- gnocchi/storage/swift.py | 4 ++-- 8 files changed, 29 insertions(+), 23 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index ebeaa6d6..b95027b6 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -174,26 +174,29 @@ class MetricProcessor(MetricProcessBase): def __init__(self, worker_id, conf): super(MetricProcessor, self).__init__( worker_id, conf, conf.metricd.metric_processing_delay) - self._coord, self._my_id = utils.get_coordinator_and_start( + self.coord, __ = utils.get_coordinator_and_start( conf.storage.coordination_url) self._tasks = [] self.group_state = None @utils.retry def _configure(self): - super(MetricProcessor, self)._configure() + self.store = storage.get_driver(self.conf, self.coord) + self.index = indexer.get_driver(self.conf) + self.index.connect() + # create fallback in case paritioning fails or assigned no tasks self.fallback_tasks = list( six.moves.range(self.store.incoming.NUM_SACKS)) try: - self.partitioner = self._coord.join_partitioned_group( + self.partitioner = self.coord.join_partitioned_group( self.GROUP_ID, partitions=200) LOG.info('Joined coordination group: %s', self.GROUP_ID) @periodics.periodic(spacing=self.conf.metricd.worker_sync_rate, run_immediately=True) def run_watchers(): - self._coord.run_watchers() + self.coord.run_watchers() self.periodic = periodics.PeriodicWorker.create([]) self.periodic.add(run_watchers) @@ -227,7 +230,7 @@ class MetricProcessor(MetricProcessBase): for s in self._get_tasks(): # TODO(gordc): support delay release lock so we don't # process a sack right after another process - lock = in_store.get_sack_lock(self._coord, s) + lock = in_store.get_sack_lock(self.coord, s) if not lock.acquire(blocking=False): continue try: @@ -243,7 +246,7 @@ class MetricProcessor(MetricProcessBase): LOG.debug("%d metrics processed from %d sacks", m_count, s_count) def close_services(self): - self._coord.stop() + self.coord.stop() class MetricJanitor(MetricProcessBase): diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 88d78fb8..cf1b52f3 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -154,16 +154,16 @@ def get_incoming_driver(conf): return get_driver_class('gnocchi.incoming', conf)(conf) -def get_driver(conf): +def get_driver(conf, coord=None): """Return the configured driver.""" incoming = get_driver_class('gnocchi.incoming', conf.incoming)( conf.incoming) return get_driver_class('gnocchi.storage', conf.storage)( - conf.storage, incoming) + conf.storage, incoming, coord) class StorageDriver(object): - def __init__(self, conf, incoming): + def __init__(self, conf, incoming, coord=None): self.incoming = incoming @staticmethod diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index ca723ea7..2b8753b0 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -59,7 +59,7 @@ class SackLockTimeoutError(Exception): class CarbonaraBasedStorage(storage.StorageDriver): - def __init__(self, conf, incoming): + def __init__(self, conf, incoming, coord=None): super(CarbonaraBasedStorage, self).__init__(conf, incoming) self.aggregation_workers_number = conf.aggregation_workers_number if self.aggregation_workers_number == 1: @@ -67,11 +67,14 @@ class CarbonaraBasedStorage(storage.StorageDriver): self._map_in_thread = self._map_no_thread else: self._map_in_thread = self._map_in_futures_threads - self.coord, my_id = utils.get_coordinator_and_start( - conf.coordination_url) + self.coord, __ = ( + (coord, None) if coord else + utils.get_coordinator_and_start(conf.coordination_url)) + self.shared_coord = bool(coord) def stop(self): - self.coord.stop() + if not self.shared_coord: + self.coord.stop() @staticmethod def _get_measures(metric, timestamp_key, aggregation, granularity, diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 4de4d1b5..ca5b2809 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -41,8 +41,8 @@ rados = ceph.rados class CephStorage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = False - def __init__(self, conf, incoming): - super(CephStorage, self).__init__(conf, incoming) + def __init__(self, conf, incoming, coord=None): + super(CephStorage, self).__init__(conf, incoming, coord) self.rados, self.ioctx = ceph.create_rados_connection(conf) def stop(self): diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 3c067bef..11cfad89 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -36,8 +36,8 @@ OPTS = [ class FileStorage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = True - def __init__(self, conf, incoming): - super(FileStorage, self).__init__(conf, incoming) + def __init__(self, conf, incoming, coord=None): + super(FileStorage, self).__init__(conf, incoming, coord) self.basepath = conf.file_basepath self.basepath_tmp = os.path.join(self.basepath, 'tmp') utils.ensure_paths([self.basepath_tmp]) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index fc2c63ad..843827a2 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -33,8 +33,8 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): STORAGE_PREFIX = "timeseries" FIELD_SEP = '_' - def __init__(self, conf, incoming): - super(RedisStorage, self).__init__(conf, incoming) + def __init__(self, conf, incoming, coord=None): + super(RedisStorage, self).__init__(conf, incoming, coord) self._client = redis.get_client(conf) def _metric_key(self, metric): diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index dd923a92..26149b5d 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -63,8 +63,8 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): _consistency_wait = tenacity.wait_exponential(multiplier=0.1) - def __init__(self, conf, incoming): - super(S3Storage, self).__init__(conf, incoming) + def __init__(self, conf, incoming, coord=None): + super(S3Storage, self).__init__(conf, incoming, coord) self.s3, self._region_name, self._bucket_prefix = ( s3.get_connection(conf) ) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 52dadbdb..ae67b7d5 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -68,8 +68,8 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = True - def __init__(self, conf, incoming): - super(SwiftStorage, self).__init__(conf, incoming) + def __init__(self, conf, incoming, coord=None): + super(SwiftStorage, self).__init__(conf, incoming, coord) self.swift = swift.get_connection(conf) self._container_prefix = conf.swift_container_prefix -- GitLab From cc0c0f0b8376cb06440b47eb4e6d1c183c8f0a2c Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 30 May 2017 11:43:53 +0200 Subject: [PATCH 0790/1483] cors: Allow Authorization header The basic auth plugin doesn't work with cors by default. This change fixes it. Change-Id: I8dea973cab58acb7816dfa48ed5daa621a73a4d5 --- gnocchi/opts.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 023138da..a4bcc61a 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -159,6 +159,7 @@ def list_opts(): def set_defaults(): cfg.set_defaults(cors.CORS_OPTS, allow_headers=[ + 'Authorization', 'X-Auth-Token', 'X-Subject-Token', 'X-User-Id', -- GitLab From e66fa53348e11f22f98ae02170d87f62cbd64709 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 30 May 2017 11:46:35 +0200 Subject: [PATCH 0791/1483] auth: pass complete request It can safely use pecan.request object, since the auth plugin is called from the pecan.request it self. This will allow to create more advanced auth plugins that need to access to the environment variable. Change-Id: I90773e51ce4f20d31ccd6b9c4a18e358583cd59f --- gnocchi/rest/__init__.py | 22 +++++++++++----------- gnocchi/rest/auth_helper.py | 36 ++++++++++++++++++------------------ 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 42f638d7..af9eb9db 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -78,7 +78,7 @@ def enforce(rule, target): :param target: The target to enforce on. """ - creds = pecan.request.auth_helper.get_auth_info(pecan.request.headers) + creds = pecan.request.auth_helper.get_auth_info(pecan.request) if not isinstance(target, dict): if hasattr(target, "jsonify"): @@ -523,7 +523,7 @@ class MetricsController(rest.RestController): definition['archive_policy_name'] = ap.name creator = pecan.request.auth_helper.get_current_user( - pecan.request.headers) + pecan.request) enforce("create metric", { "creator": creator, @@ -537,7 +537,7 @@ class MetricsController(rest.RestController): @pecan.expose('json') def post(self): creator = pecan.request.auth_helper.get_current_user( - pecan.request.headers) + pecan.request) body = deserialize_and_validate(self.MetricSchema) try: m = pecan.request.indexer.create_metric( @@ -587,7 +587,7 @@ class MetricsController(rest.RestController): except webob.exc.HTTPForbidden: enforce("list metric", {}) creator = pecan.request.auth_helper.get_current_user( - pecan.request.headers) + pecan.request) if provided_creator and creator != provided_creator: abort(403, "Insufficient privileges to filter by user/project") attr_filter = {} @@ -883,7 +883,7 @@ class ResourceController(rest.RestController): def __init__(self, resource_type, id): self._resource_type = resource_type creator = pecan.request.auth_helper.get_current_user( - pecan.request.headers) + pecan.request) try: self.id = utils.ResourceUUID(id, creator) except ValueError: @@ -990,7 +990,7 @@ class ResourcesController(rest.RestController): # and we don't want that next patch call have the "id" schema = dict(schema_for(self._resource_type)) creator = pecan.request.auth_helper.get_current_user( - pecan.request.headers) + pecan.request) schema["id"] = functools.partial(ResourceID, creator=creator) body = deserialize_and_validate(schema) @@ -1027,7 +1027,7 @@ class ResourcesController(rest.RestController): pagination_opts = get_pagination_options( kwargs, RESOURCE_DEFAULT_PAGINATION) policy_filter = pecan.request.auth_helper.get_resource_policy_filter( - pecan.request.headers, "list resource", self._resource_type) + pecan.request, "list resource", self._resource_type) try: # FIXME(sileht): next API version should returns @@ -1055,7 +1055,7 @@ class ResourcesController(rest.RestController): delete entire database") policy_filter = pecan.request.auth_helper.get_resource_policy_filter( - pecan.request.headers, + pecan.request, "delete resources", self._resource_type) if policy_filter: @@ -1177,7 +1177,7 @@ def ResourceSearchSchema(v): def _ResourceSearchSchema(): user = pecan.request.auth_helper.get_current_user( - pecan.request.headers) + pecan.request) _ResourceUUID = functools.partial(ResourceUUID, creator=user) return voluptuous.Schema( @@ -1238,7 +1238,7 @@ class SearchResourceTypeController(rest.RestController): kwargs, RESOURCE_DEFAULT_PAGINATION) policy_filter = pecan.request.auth_helper.get_resource_policy_filter( - pecan.request.headers, "search resource", self._resource_type) + pecan.request, "search resource", self._resource_type) if policy_filter: if attr_filter: attr_filter = {"and": [ @@ -1378,7 +1378,7 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): @pecan.expose('json') def post(self, create_metrics=False): creator = pecan.request.auth_helper.get_current_user( - pecan.request.headers) + pecan.request) MeasuresBatchSchema = voluptuous.Schema( {functools.partial(ResourceID, creator=creator): {six.text_type: MeasuresListSchema}} diff --git a/gnocchi/rest/auth_helper.py b/gnocchi/rest/auth_helper.py index 46c0893c..77dff813 100644 --- a/gnocchi/rest/auth_helper.py +++ b/gnocchi/rest/auth_helper.py @@ -22,26 +22,26 @@ from gnocchi import rest class KeystoneAuthHelper(object): @staticmethod - def get_current_user(headers): + def get_current_user(request): # FIXME(jd) should have domain but should not break existing :( - user_id = headers.get("X-User-Id", "") - project_id = headers.get("X-Project-Id", "") + user_id = request.headers.get("X-User-Id", "") + project_id = request.headers.get("X-Project-Id", "") return user_id + ":" + project_id @staticmethod - def get_auth_info(headers): - user_id = headers.get("X-User-Id") - project_id = headers.get("X-Project-Id") + def get_auth_info(request): + user_id = request.headers.get("X-User-Id") + project_id = request.headers.get("X-Project-Id") return { "user": (user_id or "") + ":" + (project_id or ""), "user_id": user_id, "project_id": project_id, - 'domain_id': headers.get("X-Domain-Id"), - 'roles': headers.get("X-Roles", "").split(","), + 'domain_id': request.headers.get("X-Domain-Id"), + 'roles': request.headers.get("X-Roles", "").split(","), } @staticmethod - def get_resource_policy_filter(headers, rule, resource_type): + def get_resource_policy_filter(request, rule, resource_type): try: # Check if the policy allows the user to list any resource rest.enforce(rule, { @@ -49,7 +49,7 @@ class KeystoneAuthHelper(object): }) except webob.exc.HTTPForbidden: policy_filter = [] - project_id = headers.get("X-Project-Id") + project_id = request.headers.get("X-Project-Id") try: # Check if the policy allows the user to list resources linked @@ -88,10 +88,10 @@ class KeystoneAuthHelper(object): class NoAuthHelper(KeystoneAuthHelper): @staticmethod - def get_current_user(headers): + def get_current_user(request): # FIXME(jd) Should be a single header - user_id = headers.get("X-User-Id") - project_id = headers.get("X-Project-Id") + user_id = request.headers.get("X-User-Id") + project_id = request.headers.get("X-Project-Id") if user_id: if project_id: return user_id + ":" + project_id @@ -103,15 +103,15 @@ class NoAuthHelper(KeystoneAuthHelper): class BasicAuthHelper(object): @staticmethod - def get_current_user(headers): + def get_current_user(request): auth = werkzeug.http.parse_authorization_header( - headers.get("Authorization")) + request.headers.get("Authorization")) if auth is None: rest.abort(401) return auth.username - def get_auth_info(self, headers): - user = self.get_current_user(headers) + def get_auth_info(self, request): + user = self.get_current_user(request) roles = [] if user == "admin": roles.append("admin") @@ -121,5 +121,5 @@ class BasicAuthHelper(object): } @staticmethod - def get_resource_policy_filter(headers, rule, resource_type): + def get_resource_policy_filter(request, rule, resource_type): return None -- GitLab From b06ed71b0c314044d99040acec2f4723d7776dcc Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 30 May 2017 09:24:00 +0200 Subject: [PATCH 0792/1483] auth: Add REMOTE_USER env support Authentication can be delegated to a frontend webserver/middleware, that will ensure that REMOTE_USER env is set. Change-Id: I8ebda5fccb0804d3349b84cefbb8353226436c19 --- doc/source/rest.j2 | 6 ++++- gnocchi/opts.py | 1 + gnocchi/rest/api-paste.ini | 6 +++++ gnocchi/rest/auth_helper.py | 23 +++++++++++++++++++ gnocchi/tests/test_rest.py | 12 +++++++++- ...moteuser-auth-plugin-00f0cefb6b003a6e.yaml | 5 ++++ setup.cfg | 1 + 7 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/remoteuser-auth-plugin-00f0cefb6b003a6e.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 7a93da3c..82b619a9 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -7,7 +7,7 @@ Authentication By default, the authentication is configured to the "basic" mode. You need to provide an `Authorization` header in your HTTP requests with a valid username -(the password is not used). The "admin" password is granted all privileges, +(the password is not used). The "admin" username is granted all privileges, whereas any other username is recognize as having standard permissions. You can customize permissions by specifying a different `policy_file` than the @@ -18,6 +18,10 @@ middleware will be enabled for authentication. It is then needed to authenticate against Keystone and provide a `X-Auth-Token` header with a valid token for each request sent to Gnocchi's API. +If you set the `api.auth_mode` value to `remoteuser`, Gnocchi will look at the +HTTP server REMOTE_USER environment variable to get the username. Then the +permissions model is the same as the "basic" mode. + Metrics ======= diff --git a/gnocchi/opts.py b/gnocchi/opts.py index a4bcc61a..8cf8be40 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -41,6 +41,7 @@ class CustomStrSubWrapper(cfg.ConfigOpts.StrSubWrapper): return '' return value + cfg.ConfigOpts.StrSubWrapper = CustomStrSubWrapper diff --git a/gnocchi/rest/api-paste.ini b/gnocchi/rest/api-paste.ini index 47bb3c32..84792644 100644 --- a/gnocchi/rest/api-paste.ini +++ b/gnocchi/rest/api-paste.ini @@ -16,6 +16,12 @@ use = egg:Paste#urlmap /v1 = gnocchiv1+keystone /healthcheck = healthcheck +[composite:gnocchi+remoteuser] +use = egg:Paste#urlmap +/ = gnocchiversions_pipeline +/v1 = gnocchiv1+noauth +/healthcheck = healthcheck + [pipeline:gnocchiv1+noauth] pipeline = http_proxy_to_wsgi gnocchiv1 diff --git a/gnocchi/rest/auth_helper.py b/gnocchi/rest/auth_helper.py index 77dff813..99bb607e 100644 --- a/gnocchi/rest/auth_helper.py +++ b/gnocchi/rest/auth_helper.py @@ -123,3 +123,26 @@ class BasicAuthHelper(object): @staticmethod def get_resource_policy_filter(request, rule, resource_type): return None + + +class RemoteUserAuthHelper(object): + @staticmethod + def get_current_user(request): + user = request.remote_user + if user is None: + rest.abort(401) + return user.decode('iso-8859-1') + + def get_auth_info(self, request): + user = self.get_current_user(request) + roles = [] + if user == "admin": + roles.append("admin") + return { + "user": user, + "roles": roles + } + + @staticmethod + def get_resource_policy_filter(request, rule, resource_type): + return None diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 9caf9b39..f9bb5ffc 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -84,6 +84,13 @@ class TestingApp(webtest.TestApp): yield finally: self.user = old_user + elif self.auth_mode == "remoteuser": + old_user = self.user + self.user = b"admin" + try: + yield + finally: + self.user = old_user elif self.auth_mode == "noauth": raise testcase.TestSkipped("auth mode is noauth") else: @@ -119,6 +126,8 @@ class TestingApp(webtest.TestApp): req.headers['Authorization'] = ( b"basic " + base64.b64encode(self.user + b":") ) + elif self.auth_mode == "remoteuser": + req.remote_user = self.user elif self.auth_mode == "noauth": req.headers['X-User-Id'] = self.USER_ID req.headers['X-Project-Id'] = self.PROJECT_ID @@ -134,6 +143,7 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): ('basic', dict(auth_mode="basic")), ('keystone', dict(auth_mode="keystone")), ('noauth', dict(auth_mode="noauth")), + ('remoteuser', dict(auth_mode="remoteuser")), ] def setUp(self): @@ -645,7 +655,7 @@ class ResourceTest(RestTest): self.resource['creator'] = ( TestingApp.USER_ID + ":" + TestingApp.PROJECT_ID ) - elif self.auth_mode == "basic": + elif self.auth_mode in ["basic", "remoteuser"]: self.resource['created_by_project_id'] = "" self.resource['creator'] = TestingApp.USER_ID self.resource['ended_at'] = None diff --git a/releasenotes/notes/remoteuser-auth-plugin-00f0cefb6b003a6e.yaml b/releasenotes/notes/remoteuser-auth-plugin-00f0cefb6b003a6e.yaml new file mode 100644 index 00000000..93c6c558 --- /dev/null +++ b/releasenotes/notes/remoteuser-auth-plugin-00f0cefb6b003a6e.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Gnocchi provides a new authentication mode 'remoteuser'. It uses the HTTP + server REMOTE_USER environment variable to retrieve the username. diff --git a/setup.cfg b/setup.cfg index 856f1210..fca2a973 100644 --- a/setup.cfg +++ b/setup.cfg @@ -129,6 +129,7 @@ gnocchi.rest.auth_helper = noauth = gnocchi.rest.auth_helper:NoAuthHelper keystone = gnocchi.rest.auth_helper:KeystoneAuthHelper basic = gnocchi.rest.auth_helper:BasicAuthHelper + remoteuser = gnocchi.rest.auth_helper:RemoteUserAuthHelper console_scripts = gnocchi-config-generator = gnocchi.cli:config_generator -- GitLab From e037b9be3489cf172288713356be8a40011af334 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Jun 2017 10:41:12 +0200 Subject: [PATCH 0793/1483] doc: improve upgrading documentation --- doc/source/install.rst | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index 2ffb679c..0f6fa2c7 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -152,22 +152,36 @@ and storage: Upgrading ========= In order to upgrade from a previous version of Gnocchi, you need to make sure -that your indexer and storage are properly upgraded. Run the following: +that your indexer and storage are properly upgraded. + +.. warning:: + + Upgrade is only supported between one major version to another or between + minor versions, e.g.: + + - version 2.0 to version 2.1 or 2.2 is supported + + - version 2.1 to version 3.0 is supported + + - version 2 to version 4 is **not** supported. + +Run the following: 1. Stop the old version of Gnocchi API server and `gnocchi-statsd` daemon 2. Stop the old version of `gnocchi-metricd` daemon -.. note:: +.. warning:: Data in backlog is never migrated between versions. Ensure the backlog is empty before any upgrade to ensure data is not lost. 3. Install the new version of Gnocchi -4. Run `gnocchi-upgrade` - This can take several hours depending on the size of your index and - storage. +4. Run `gnocchi-upgrade`. + + This will take from a few minutes to several hours depending on the size of + your index and storage. 5. Start the new Gnocchi API server, `gnocchi-metricd` and `gnocchi-statsd` daemons -- GitLab From c3c6eb7c44ee2457cfb711ca828fea654de40613 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 8 Jun 2017 11:42:54 +0200 Subject: [PATCH 0794/1483] tests: remove half of tests We are running gabbi tests twice with two differents URL just to check header Location. This change just keeps the one with a prefix. --- gnocchi/tests/functional/test_gabbi.py | 35 ------------------- gnocchi/tests/functional/test_gabbi_prefix.py | 2 ++ 2 files changed, 2 insertions(+), 35 deletions(-) delete mode 100644 gnocchi/tests/functional/test_gabbi.py diff --git a/gnocchi/tests/functional/test_gabbi.py b/gnocchi/tests/functional/test_gabbi.py deleted file mode 100644 index 489bd546..00000000 --- a/gnocchi/tests/functional/test_gabbi.py +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright 2015 Red Hat. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A test module to exercise the Gnocchi API with gabbi.""" - -import os - -from gabbi import driver -import wsgi_intercept - -from gnocchi.tests.functional import fixtures - - -wsgi_intercept.STRICT_RESPONSE_HEADERS = True -TESTS_DIR = 'gabbits' - - -def load_tests(loader, tests, pattern): - """Provide a TestSuite to the discovery process.""" - test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) - return driver.build_tests(test_dir, loader, host=None, - intercept=fixtures.setup_app, - fixture_module=fixtures) diff --git a/gnocchi/tests/functional/test_gabbi_prefix.py b/gnocchi/tests/functional/test_gabbi_prefix.py index 0a77ceeb..0273dee1 100644 --- a/gnocchi/tests/functional/test_gabbi_prefix.py +++ b/gnocchi/tests/functional/test_gabbi_prefix.py @@ -18,10 +18,12 @@ import os from gabbi import driver +import wsgi_intercept from gnocchi.tests.functional import fixtures +wsgi_intercept.STRICT_RESPONSE_HEADERS = True TESTS_DIR = 'gabbits' PREFIX = '/gnocchi' -- GitLab From a24fbe0f0c7634acf81e137b5d74127ebcc28f0b Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 7 Jun 2017 18:17:40 +0000 Subject: [PATCH 0795/1483] remove ignore_too_old_timestamps path there is no workflow that actually accepts too old timestamps. the only workflow that uses the alternate path is test case. i'm not sure what the use case is that we would allow someone to bypass the archive policy definition. --- gnocchi/carbonara.py | 31 ++++++------------------------- gnocchi/storage/_carbonara.py | 3 +-- gnocchi/tests/test_carbonara.py | 25 ++++--------------------- 3 files changed, 11 insertions(+), 48 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index a6399327..49241ac5 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -42,16 +42,6 @@ time.strptime("2016-02-19", "%Y-%m-%d") LOG = logging.getLogger(__name__) -class NoDeloreanAvailable(Exception): - """Error raised when trying to insert a value that is too old.""" - - def __init__(self, first_timestamp, bad_timestamp): - self.first_timestamp = first_timestamp - self.bad_timestamp = bad_timestamp - super(NoDeloreanAvailable, self).__init__( - "%s is before %s" % (bad_timestamp, first_timestamp)) - - class BeforeEpochError(Exception): """Error raised when a timestamp before Epoch is used.""" @@ -280,25 +270,16 @@ class BoundTimeSerie(TimeSerie): and self.block_size == other.block_size and self.back_window == other.back_window) - def set_values(self, values, before_truncate_callback=None, - ignore_too_old_timestamps=False): + def set_values(self, values, before_truncate_callback=None): # NOTE: values must be sorted when passed in. if self.block_size is not None and not self.ts.empty: first_block_timestamp = self.first_block_timestamp() - if ignore_too_old_timestamps: - for index, (timestamp, value) in enumerate(values): - if timestamp >= first_block_timestamp: - values = values[index:] - break - else: - values = [] + for index, (timestamp, value) in enumerate(values): + if timestamp >= first_block_timestamp: + values = values[index:] + break else: - # Check that the smallest timestamp does not go too much back - # in time. - smallest_timestamp = values[0][0] - if smallest_timestamp < first_block_timestamp: - raise NoDeloreanAvailable(first_block_timestamp, - smallest_timestamp) + values = [] super(BoundTimeSerie, self).set_values(values) if before_truncate_callback: before_truncate_callback(self) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index ca723ea7..0c4bcbc4 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -448,8 +448,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): with utils.StopWatch() as sw: ts.set_values(measures, - before_truncate_callback=_map_add_measures, - ignore_too_old_timestamps=True) + before_truncate_callback=_map_add_measures) number_of_operations = (len(agg_methods) * len(definition)) perf = "" diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 82ec819a..cbe6ded8 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -899,21 +899,6 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ], ts['return'].fetch()) - try: - tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9), - ]) - except carbonara.NoDeloreanAvailable as e: - self.assertEqual( - six.text_type(e), - u"2014-01-01 12:00:02.000099 is before 2014-01-01 12:00:03") - self.assertEqual(datetime.datetime(2014, 1, 1, 12, 0, 2, 99), - e.bad_timestamp) - self.assertEqual(datetime.datetime(2014, 1, 1, 12, 0, 3), - e.first_timestamp) - else: - self.fail("No exception raised") - def test_back_window_ignore(self): """Back window testing. @@ -948,9 +933,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9), - ], ignore_too_old_timestamps=True, - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual( [ @@ -969,9 +953,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9), (datetime.datetime(2014, 1, 1, 12, 0, 3, 9), 4.5), - ], ignore_too_old_timestamps=True, - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) + ], before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual( [ -- GitLab From 9f057fe329ccc01274d2c2f1c11d611c1f787d1b Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 8 Jun 2017 15:58:52 +0000 Subject: [PATCH 0796/1483] use generators when possible minimise the amount of lists we're putting into memory --- gnocchi/carbonara.py | 16 ++++----- gnocchi/tests/test_carbonara.py | 63 +++++++++++++++++---------------- 2 files changed, 40 insertions(+), 39 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index a6399327..d3b63b92 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -18,6 +18,7 @@ import datetime import functools +import itertools import logging import math import numbers @@ -737,9 +738,8 @@ class AggregatedTimeSerie(TimeSerie): del points[to_timestamp] except KeyError: pass - return [(timestamp, self.sampling, value) - for timestamp, value - in six.iteritems(points)] + return six.moves.zip(points.index, itertools.repeat(self.sampling), + points) def merge(self, ts): """Merge a timeserie into this one. @@ -864,7 +864,7 @@ class AggregatedTimeSerie(TimeSerie): return [] for timeserie in timeseries: - timeserie_raw = timeserie.fetch(from_timestamp, to_timestamp) + timeserie_raw = list(timeserie.fetch(from_timestamp, to_timestamp)) if timeserie_raw: dataframe = pandas.DataFrame(timeserie_raw, columns=columns) @@ -954,10 +954,10 @@ class AggregatedTimeSerie(TimeSerie): agg_timeserie = agg_timeserie[ agg_timeserie['timestamp'] <= right_boundary_ts] - points = (agg_timeserie.sort_values(by=['granularity', 'timestamp'], - ascending=[0, 1]).itertuples()) - return [(timestamp, granularity, value) - for __, timestamp, granularity, value in points] + points = agg_timeserie.sort_values(by=['granularity', 'timestamp'], + ascending=[0, 1]) + return six.moves.zip(points.timestamp, points.granularity, + points.value) if __name__ == '__main__': diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 82ec819a..9e886ec7 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -129,23 +129,24 @@ class TestAggregatedTimeSerie(base.BaseTestCase): [(datetime.datetime(2014, 1, 1, 12), 1, 3), (datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], - ts.fetch()) + list(ts.fetch())) self.assertEqual( [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], - ts.fetch(from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 4))) + list(ts.fetch( + from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 4)))) self.assertEqual( [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], - ts.fetch( + list(ts.fetch( from_timestamp=iso8601.parse_date( - "2014-01-01 12:00:04"))) + "2014-01-01 12:00:04")))) self.assertEqual( [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], - ts.fetch( + list(ts.fetch( from_timestamp=iso8601.parse_date( - "2014-01-01 13:00:04+01:00"))) + "2014-01-01 13:00:04+01:00")))) def test_before_epoch(self): ts = carbonara.TimeSerie.from_tuples( @@ -435,7 +436,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime( 2014, 1, 1, 12, 9, 0 ), 60.0, 2.0), - ], output) + ], list(output)) def test_aggregated_different_archive_overlap_edge_missing1(self): tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} @@ -483,7 +484,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime( 2014, 1, 1, 12, 6, 0 ), 60.0, 19.0), - ], output) + ], list(output)) def test_aggregated_different_archive_overlap_edge_missing2(self): tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} @@ -508,7 +509,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime( 2014, 1, 1, 12, 3, 0 ), 60.0, 4.0), - ], output) + ], list(output)) def test_fetch(self): ts = {'sampling': 60, 'size': 10, 'agg': 'mean'} @@ -550,7 +551,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0), (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0), (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0) - ], ts['return'].fetch()) + ], list(ts['return'].fetch())) self.assertEqual([ (datetime.datetime(2014, 1, 1, 12, 1), 60.0, 5.5), @@ -559,7 +560,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0), (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0), (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0) - ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], list(ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))) def test_aggregated_some_overlap_with_fill_zero(self): tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} @@ -601,7 +602,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 6, 0), 60.0, 9.5), (datetime.datetime(2014, 1, 1, 12, 7, 0), 60.0, 2.5), (datetime.datetime(2014, 1, 1, 12, 8, 0), 60.0, 1.5), - ], output) + ], list(output)) def test_aggregated_some_overlap_with_fill_null(self): tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} @@ -643,7 +644,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 6, 0), 60.0, 9.5), (datetime.datetime(2014, 1, 1, 12, 7, 0), 60.0, 5.0), (datetime.datetime(2014, 1, 1, 12, 8, 0), 60.0, 3.0), - ], output) + ], list(output)) def test_aggregate_no_points_with_fill_zero(self): tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} @@ -679,7 +680,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 4, 0), 60.0, 2.5), (datetime.datetime(2014, 1, 1, 12, 7, 0), 60.0, 2.5), (datetime.datetime(2014, 1, 1, 12, 8, 0), 60.0, 1.5), - ], output) + ], list(output)) def test_fetch_agg_pct(self): ts = {'sampling': 1, 'size': 3600 * 24, 'agg': '90pct'} @@ -701,7 +702,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ), 1.0, 4) ] - self.assertEqual(len(reference), len(result)) + self.assertEqual(len(reference), len(list(result))) for ref, res in zip(reference, result): self.assertEqual(ref[0], res[0]) @@ -723,7 +724,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ), 1.0, 99.4) ] - self.assertEqual(len(reference), len(result)) + self.assertEqual(len(reference), len(list(result))) for ref, res in zip(reference, result): self.assertEqual(ref[0], res[0]) @@ -754,7 +755,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 11, 47, 0, 200000), 0.2, 50.0), (datetime.datetime(2014, 1, 1, 11, 48, 0, 400000), 0.2, 4.0), (datetime.datetime(2014, 1, 1, 11, 48, 0, 800000), 0.2, 4.5) - ], ts['return'].fetch()) + ], list(ts['return'].fetch())) def test_fetch_agg_std(self): # NOTE (gordc): this is a good test to ensure we drop NaN entries @@ -777,7 +778,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime( 2014, 1, 1, 12, 2, 0 ), 60.0, 9.8994949366116654), - ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], list(ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)], before_truncate_callback=functools.partial( @@ -790,7 +791,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime( 2014, 1, 1, 12, 2, 0 ), 60.0, 59.304300012730948), - ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], list(ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))) def test_fetch_agg_max(self): ts = {'sampling': 60, 'size': 60, 'agg': 'max'} @@ -814,7 +815,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime( 2014, 1, 1, 12, 2, 0 ), 60.0, 15), - ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], list(ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))) tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)], before_truncate_callback=functools.partial( @@ -830,7 +831,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime( 2014, 1, 1, 12, 2, 0 ), 60.0, 110), - ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) + ], list(ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))) def test_serialize(self): ts = {'sampling': 0.5, 'agg': 'mean'} @@ -865,7 +866,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, i, i + 1), float(i + 1)) ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) - self.assertEqual(i, len(ts['return'].fetch())) + self.assertEqual(i, len(list(ts['return'].fetch()))) def test_back_window(self): """Back window testing. @@ -897,7 +898,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 2014, 1, 1, 12, 0, 3 ), 1.0, 2.5), ], - ts['return'].fetch()) + list(ts['return'].fetch())) try: tsb.set_values([ @@ -944,7 +945,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 2014, 1, 1, 12, 0, 3 ), 1.0, 2.5), ], - ts['return'].fetch()) + list(ts['return'].fetch())) tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9), @@ -964,7 +965,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 2014, 1, 1, 12, 0, 3 ), 1.0, 2.5), ], - ts['return'].fetch()) + list(ts['return'].fetch())) tsb.set_values([ (datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9), @@ -985,7 +986,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 2014, 1, 1, 12, 0, 3 ), 1.0, 3.5), ], - ts['return'].fetch()) + list(ts['return'].fetch())) def test_aggregated_nominal(self): tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} @@ -1084,7 +1085,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 5.5), (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 6.75), (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 2.0), - ], output) + ], list(output)) def test_aggregated_partial_overlap(self): tsc1 = {'sampling': 1, 'size': 86400, 'agg': 'mean'} @@ -1118,7 +1119,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime( 2015, 12, 3, 13, 22, 15 ), 1.0, 11.0), - ], output) + ], list(output)) dtfrom = datetime.datetime(2015, 12, 3, 13, 17, 0) dtto = datetime.datetime(2015, 12, 3, 13, 25, 0) @@ -1147,7 +1148,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime( 2015, 12, 3, 13, 24, 15 ), 1.0, 10.0), - ], output) + ], list(output)) # By default we require 100% of point that overlap # so that fail if from or to is set @@ -1178,7 +1179,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime( 2015, 12, 3, 13, 22, 15 ), 1.0, 11.0), - ], output) + ], list(output)) output = carbonara.AggregatedTimeSerie.aggregated( [tsc1['return'], tsc2['return']], to_timestamp=dtto, @@ -1197,7 +1198,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime( 2015, 12, 3, 13, 24, 15 ), 1.0, 10.0), - ], output) + ], list(output)) def test_split_key(self): self.assertEqual( -- GitLab From 4e8089eb77216695d69080b9e148f18d1210c7a8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 8 Jun 2017 20:52:39 +0200 Subject: [PATCH 0797/1483] Install pifpaf with ceph extra Some extra dependency in pifpaf are now set as flavors, such as ceph. --- setup.cfg | 2 +- tox.ini | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index fca2a973..ad6845a4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -64,7 +64,7 @@ doc = Jinja2 reno>=1.6.2 test = - pifpaf>=1.0.1 + pifpaf[ceph]>=1.0.1 gabbi>=1.30.0 coverage>=3.6 fixtures diff --git a/tox.ini b/tox.ini index d7b0706d..4cbd9ad3 100644 --- a/tox.ini +++ b/tox.ini @@ -73,7 +73,7 @@ setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 alembic<0.9.0 gnocchiclient>=2.8.0 - pifpaf>=0.13 + pifpaf[ceph]>=0.13 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:bashate] -- GitLab From e04bf2f04ab14b8430f59d5d0f9d7ec7dcedf13d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 8 Jun 2017 09:45:54 +0200 Subject: [PATCH 0798/1483] travis: remove install step MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The standard Travis procedure installs what's in requirements.txt whereas we don't care. Let's win a few seconds by doing… nothing instead. --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 9b2b56fb..47fc96fb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,6 +33,7 @@ before_script: ;; esac - docker build --tag gnocchi-ci --file=tools/travis-ci-setup.dockerfile . +install: script: - docker run -v ~/.cache/pip:/home/tester/.cache/pip -v $(pwd):/home/tester/src gnocchi-ci tox -e ${TARGET} -- GitLab From 2ce477320864df3b243e22642915b25480850e08 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 6 Jun 2017 17:47:58 +0200 Subject: [PATCH 0799/1483] tests: Update travis/tox configuration Travis ressources for opensource project is limited This change will limit the number of jobs per PR in parallel, allowing more PR to be processed at the same time. And remove the build of the image of each commit (save 3-4 minutes). It pulls the image from dockerhub (I will create a new repo to have this image updated automatically). It's run in parallel tests of differents storage system. The overall time of testing is still ~40 minutes, but now we use only 3 jobs instead of 10. --- .travis.yml | 4 ++-- run-tests.sh | 8 +++++++ tools/travis-ci-setup.dockerfile | 41 -------------------------------- tox.ini | 8 ++++--- 4 files changed, 15 insertions(+), 46 deletions(-) delete mode 100644 tools/travis-ci-setup.dockerfile diff --git a/.travis.yml b/.travis.yml index 47fc96fb..956c6f4a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,10 +32,10 @@ before_script: git fetch --unshallow --tags; ;; esac - - docker build --tag gnocchi-ci --file=tools/travis-ci-setup.dockerfile . install: + - docker pull gnocchixyz/ci-tools:latest script: - - docker run -v ~/.cache/pip:/home/tester/.cache/pip -v $(pwd):/home/tester/src gnocchi-ci tox -e ${TARGET} + - docker run -v ~/.cache/pip:/home/tester/.cache/pip -v $(pwd):/home/tester/src gnocchixyz/ci-tools:latest tox -e ${TARGET} notifications: email: false diff --git a/run-tests.sh b/run-tests.sh index 0e6d11f8..606f6375 100755 --- a/run-tests.sh +++ b/run-tests.sh @@ -7,6 +7,7 @@ do export GNOCCHI_TEST_STORAGE_DRIVER=$storage for indexer in ${GNOCCHI_TEST_INDEXER_DRIVERS} do + ( case $GNOCCHI_TEST_STORAGE_DRIVER in ceph|redis) pifpaf run $GNOCCHI_TEST_STORAGE_DRIVER -- pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* @@ -27,5 +28,12 @@ do pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* ;; esac + # NOTE(sileht): Start all storage tests at once + ) & done + # NOTE(sileht): Wait all storage tests + wait + # TODO(sileht): the output can be a mess with this + # Create a less verbose testrun output (with dot like nose ?) + # merge all subunit output and print it in after_script in travis done diff --git a/tools/travis-ci-setup.dockerfile b/tools/travis-ci-setup.dockerfile deleted file mode 100644 index be2179bc..00000000 --- a/tools/travis-ci-setup.dockerfile +++ /dev/null @@ -1,41 +0,0 @@ -FROM ubuntu:16.04 -ENV GNOCCHI_SRC /home/tester/src -ENV DEBIAN_FRONTEND noninteractive - -RUN apt-get update -y && apt-get install -qy \ - locales \ - git \ - wget \ - nodejs \ - nodejs-legacy \ - npm \ - python \ - python3 \ - python-dev \ - python3-dev \ - python-pip \ - redis-server \ - build-essential \ - libffi-dev \ - libpq-dev \ - postgresql \ - mysql-client \ - mysql-server \ - librados-dev \ - liberasurecode-dev \ - ceph \ - && apt-get clean -y - -#NOTE(sileht): really no utf-8 in 2017 !? -ENV LANG en_US.UTF-8 -RUN update-locale -RUN locale-gen $LANG - -#NOTE(sileht): Upgrade python dev tools -RUN pip install -U pip tox virtualenv - -RUN useradd -ms /bin/bash tester -RUN mkdir $GNOCCHI_SRC -RUN chown -R tester: $GNOCCHI_SRC -USER tester -WORKDIR $GNOCCHI_SRC diff --git a/tox.ini b/tox.ini index 4cbd9ad3..48fb8124 100644 --- a/tox.ini +++ b/tox.ini @@ -26,6 +26,10 @@ setenv = redis: GNOCCHI_STORAGE_DEPS=redis s3: GNOCCHI_STORAGE_DEPS=s3 + GNOCCHI_INDEXER_DEPS=mysql,postgresql + mysql: GNOCCHI_INDEXER_DEPS=mysql + postgresql: GNOCCHI_INDEXER_DEPS=postgresql + # FIXME(sileht): pbr doesn't support url in setup.cfg extras, so we do this crap GNOCCHI_TEST_TARBALLS=http://tarballs.openstack.org/swift/swift-master.tar.gz#egg=swift ceph: GNOCCHI_TEST_TARBALLS= @@ -36,9 +40,7 @@ setenv = # NOTE(jd) Install redis as a test dependency since it is used as a # coordination driver in functional tests (--coordination-driver is passed to # pifpaf) -deps = .[test,redis] - postgresql: .[postgresql,{env:GNOCCHI_STORAGE_DEPS}] - mysql: .[mysql,{env:GNOCCHI_STORAGE_DEPS}] +deps = .[test,redis,{env:GNOCCHI_STORAGE_DEPS:},{env:GNOCCHI_INDEXER_DEPS:}] {env:GNOCCHI_TEST_TARBALLS:} commands = doc8 --ignore-path doc/source/rest.rst doc/source -- GitLab From fb50a7b61db921973ae9af9f830a11a2228e8a18 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 8 Jun 2017 09:53:56 +0200 Subject: [PATCH 0800/1483] Remove gnocchi-config-generator from tox It's already launched and tested by gnocchi.tests.test_bin, and the current runs output the logfile to stdout which pollutes test output. --- tox.ini | 1 - 1 file changed, 1 deletion(-) diff --git a/tox.ini b/tox.ini index 48fb8124..63da6805 100644 --- a/tox.ini +++ b/tox.ini @@ -44,7 +44,6 @@ deps = .[test,redis,{env:GNOCCHI_STORAGE_DEPS:},{env:GNOCCHI_INDEXER_DEPS:}] {env:GNOCCHI_TEST_TARBALLS:} commands = doc8 --ignore-path doc/source/rest.rst doc/source - gnocchi-config-generator {toxinidir}/run-tests.sh {posargs} {toxinidir}/run-func-tests.sh {posargs} -- GitLab From 64e9f56c0a62d6e327e19de5203ffeaa2792ccf9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 9 Jun 2017 18:57:09 +0200 Subject: [PATCH 0801/1483] tests: speed-up data generation Calling 28800 times `date' is pretty slow. Let's generate that data in a snap using Python. This makes the job twice faster on my laptop. --- run-upgrade-tests.sh | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index be2d188b..726554cd 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -3,8 +3,6 @@ set -e export GNOCCHI_DATA=$(mktemp -d -t gnocchi.XXXX) -GDATE=$((which gdate >/dev/null && echo gdate) || echo date) - old_version=$(pip freeze | sed -n '/gnocchi==/s/.*==\(.*\)/\1/p') RESOURCE_IDS=( @@ -37,15 +35,11 @@ inject_data() { { measures_sep="" - MEASURES=$(for i in $(seq 0 10 288000); do - now=$($GDATE --iso-8601=s -d "-${i}minute") ; value=$((RANDOM % 13 + 52)) - echo -n "$measures_sep {\"timestamp\": \"$now\", \"value\": $value }" - measures_sep="," - done) + MEASURES=$(python -c 'import datetime, random, json; now = datetime.datetime.utcnow(); print(json.dumps([{"timestamp": (now - datetime.timedelta(seconds=i)).isoformat(), "value": random.uniform(-100000, 100000)} for i in range(0, 288000, 10)]))') echo -n '{' resource_sep="" for resource_id in ${RESOURCE_IDS[@]} $RESOURCE_ID_EXT; do - echo -n "$resource_sep \"$resource_id\": { \"metric\": [ $MEASURES ] }" + echo -n "$resource_sep \"$resource_id\": { \"metric\": $MEASURES }" resource_sep="," done echo -n '}' -- GitLab From d7f1f0300f64fcea87cf510611dbcb0dc513f5a5 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 8 Jun 2017 13:40:28 +0200 Subject: [PATCH 0802/1483] Merge pep8 and bashate jobs That'll simplify the dev workflow and Travis jobs number. --- .travis.yml | 1 - tox.ini | 10 ++++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index 956c6f4a..7a6d7ea1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,7 +8,6 @@ cache: directories: - ~/.cache/pip env: - - TARGET: bashate - TARGET: pep8 - TARGET: docs - TARGET: docs-gnocchi.xyz diff --git a/tox.ini b/tox.ini index 63da6805..f795a637 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] minversion = 2.4 -envlist = py{35,27}-{postgresql,mysql}{,-file,-swift,-ceph,-s3},pep8,bashate +envlist = py{35,27}-{postgresql,mysql}{,-file,-swift,-ceph,-s3},pep8 [testenv] usedevelop = True @@ -77,14 +77,12 @@ deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 pifpaf[ceph]>=0.13 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} -[testenv:bashate] -deps = bashate -commands = bashate -v devstack/plugin.sh -whitelist_externals = bash - [testenv:pep8] deps = hacking>=0.12,<0.13 + bashate +whitelist_externals = bash commands = flake8 + bashate -v devstack/plugin.sh [testenv:py27-gate] setenv = OS_TEST_PATH=gnocchi/tests/functional_live -- GitLab From 8e2480016e56427ccca7a0d5ec247f32fb65a9b9 Mon Sep 17 00:00:00 2001 From: Jaime Alvarez Date: Wed, 31 May 2017 05:56:54 +0000 Subject: [PATCH 0803/1483] Return new metrics when creating metrics for a resource Fixes #12 --- gnocchi/rest/__init__.py | 10 +++++++--- gnocchi/tests/functional/gabbits/history.yaml | 5 ++++- gnocchi/tests/functional/gabbits/resource.yaml | 10 ++++++++-- gnocchi/tests/test_rest.py | 4 ++-- 4 files changed, 21 insertions(+), 8 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index af9eb9db..23faee66 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -641,7 +641,7 @@ class NamedMetricController(rest.RestController): else: abort(404, indexer.NoSuchResource(self.resource_id)) - @pecan.expose() + @pecan.expose('json') def post(self): resource = pecan.request.indexer.get_resource( self.resource_type, self.resource_id) @@ -650,8 +650,10 @@ class NamedMetricController(rest.RestController): enforce("update resource", resource) metrics = deserialize_and_validate(MetricsSchema) try: - pecan.request.indexer.update_resource( - self.resource_type, self.resource_id, metrics=metrics, + r = pecan.request.indexer.update_resource( + self.resource_type, + self.resource_id, + metrics=metrics, append_metrics=True, create_revision=False) except (indexer.NoSuchMetric, @@ -663,6 +665,8 @@ class NamedMetricController(rest.RestController): except indexer.NoSuchResource as e: abort(404, e) + return r.metrics + @pecan.expose('json') def get_all(self): resource = pecan.request.indexer.get_resource( diff --git a/gnocchi/tests/functional/gabbits/history.yaml b/gnocchi/tests/functional/gabbits/history.yaml index 0bdc47fd..2f3ff25f 100644 --- a/gnocchi/tests/functional/gabbits/history.yaml +++ b/gnocchi/tests/functional/gabbits/history.yaml @@ -139,7 +139,10 @@ tests: data: foobar: archive_policy_name: low - status: 204 + status: 200 + response_json_paths: + $[/name][1].name: foobar + $[/name][1].resource_id: f93450f2-d8a5-4d67-9985-02511241e7d1 - name: list all resources with history no change after metrics creation GET: /v1/resource/generic diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index a9d7e040..0b69df21 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -667,11 +667,14 @@ tests: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json - status: 204 + status: 200 data: electron.spin: archive_policy_name: medium response_headers: + response_json_paths: + $[/name][1].name: electron.spin + $[/name][1].resource_id: 85c44741-cc60-4033-804e-2d3098c7d2e9 - name: post metric at generic with empty definition POST: $LAST_URL @@ -691,9 +694,12 @@ tests: x-user-id: 0fbb231484614b1a80131fc22f6afc9c x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: application/json - status: 204 + status: 200 data: disk.io.rate: {} + response_json_paths: + $[/name][1].name: disk.io.rate + $[/name][1].resource_id: 85c44741-cc60-4033-804e-2d3098c7d2e9 - name: duplicate metrics at generic POST: $LAST_URL diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index f9bb5ffc..d6b78538 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -978,7 +978,7 @@ class ResourceTest(RestTest): metrics = {'foo': {'archive_policy_name': "high"}} self.app.post_json("/v1/resource/" + self.resource_type + "/" + self.attributes['id'] + "/metric", - params=metrics, status=204) + params=metrics, status=200) metrics = {'foo': {'archive_policy_name': "low"}} self.app.post_json("/v1/resource/" + self.resource_type + "/" + self.attributes['id'] @@ -999,7 +999,7 @@ class ResourceTest(RestTest): metrics = {'foo': {'archive_policy_name': "high"}} self.app.post_json("/v1/resource/" + self.resource_type + "/" + self.attributes['id'] + "/metric", - params=metrics, status=204) + params=metrics, status=200) result = self.app.get("/v1/resource/" + self.resource_type + "/" + self.attributes['id']) -- GitLab From ad2484d61cab955cdd6f8ae5740ea4049b710ba0 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 17 May 2017 09:45:15 +0200 Subject: [PATCH 0804/1483] Switch from oslo.log to daiquiri --- gnocchi/cli.py | 4 +- gnocchi/gnocchi-config-generator.conf | 1 - gnocchi/indexer/sqlalchemy.py | 4 +- gnocchi/opts.py | 42 +++++++++++++++++++ gnocchi/rest/app.py | 4 +- gnocchi/service.py | 35 ++++++++++++---- gnocchi/statsd.py | 4 +- gnocchi/storage/__init__.py | 5 ++- gnocchi/storage/_carbonara.py | 4 +- gnocchi/storage/common/ceph.py | 5 ++- gnocchi/storage/common/s3.py | 4 +- gnocchi/storage/common/swift.py | 6 +-- gnocchi/storage/incoming/_carbonara.py | 4 +- gnocchi/utils.py | 6 ++- .../oslo.log-removal-69a17397b10bc2bb.yaml | 5 +++ requirements.txt | 2 +- 16 files changed, 102 insertions(+), 33 deletions(-) create mode 100644 releasenotes/notes/oslo.log-removal-69a17397b10bc2bb.yaml diff --git a/gnocchi/cli.py b/gnocchi/cli.py index b95027b6..93192c76 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -19,9 +19,9 @@ import time import cotyledon from cotyledon import oslo_config_glue +import daiquiri from futurist import periodics from oslo_config import cfg -from oslo_log import log import six import tenacity import tooz @@ -36,7 +36,7 @@ from gnocchi.storage import incoming from gnocchi import utils -LOG = log.getLogger(__name__) +LOG = daiquiri.getLogger(__name__) def config_generator(): diff --git a/gnocchi/gnocchi-config-generator.conf b/gnocchi/gnocchi-config-generator.conf index df6e9880..faf947e0 100644 --- a/gnocchi/gnocchi-config-generator.conf +++ b/gnocchi/gnocchi-config-generator.conf @@ -2,7 +2,6 @@ wrap_width = 79 namespace = gnocchi namespace = oslo.db -namespace = oslo.log namespace = oslo.middleware.cors namespace = oslo.middleware.healthcheck namespace = oslo.middleware.http_proxy_to_wsgi diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 87a3f055..ecad2b65 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -22,11 +22,11 @@ import uuid from alembic import migration from alembic import operations +import daiquiri import oslo_db.api from oslo_db import exception from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import utils as oslo_db_utils -from oslo_log import log try: import psycopg2 except ImportError: @@ -59,7 +59,7 @@ ResourceType = base.ResourceType _marker = indexer._marker -LOG = log.getLogger(__name__) +LOG = daiquiri.getLogger(__name__) def _retry_on_exceptions(exc): diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 8cf8be40..092f3d95 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -58,8 +58,50 @@ for opt in _INCOMING_OPTS: opt.default = '${storage.%s}' % opt.name +_cli_options = ( + cfg.BoolOpt( + 'debug', + short='d', + default=False, + help='If set to true, the logging level will be set to DEBUG.'), + cfg.BoolOpt( + 'verbose', + short='v', + default=True, + help='If set to true, the logging level will be set to INFO.'), + cfg.StrOpt( + "log-dir", + help="Base directory for log files. " + "If not set, logging will go to stderr."), + cfg.StrOpt( + 'log-file', + metavar='PATH', + help='(Optional) Name of log file to send logging output to. ' + 'If no default is set, logging will go to stderr as ' + 'defined by use_stderr.'), +) + + def list_opts(): return [ + ("DEFAULT", _cli_options + ( + cfg.BoolOpt( + 'use-syslog', + default=False, + help='Use syslog for logging.'), + cfg.BoolOpt( + 'use-journal', + default=False, + help='Enable journald for logging. ' + 'If running in a systemd environment you may wish ' + 'to enable journal support. Doing so will use the ' + 'journal native protocol which includes structured ' + 'metadata in addition to log messages.'), + cfg.StrOpt( + 'syslog-log-facility', + default='user', + help='Syslog facility to receive log lines.') + )), ("indexer", gnocchi.indexer.OPTS), ("metricd", ( cfg.IntOpt('workers', min=1, diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 02022bd9..ff91f5b2 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -18,7 +18,7 @@ import pkg_resources import uuid import warnings -from oslo_log import log +import daiquiri from oslo_middleware import cors from oslo_policy import policy from paste import deploy @@ -34,7 +34,7 @@ from gnocchi import service from gnocchi import storage as gnocchi_storage -LOG = log.getLogger(__name__) +LOG = daiquiri.getLogger(__name__) # Register our encoder by default for everything diff --git a/gnocchi/service.py b/gnocchi/service.py index 26b8e7dd..985d2f88 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -14,11 +14,12 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging import os +import daiquiri from oslo_config import cfg from oslo_db import options as db_options -from oslo_log import log from oslo_policy import opts as policy_opts import pbr.version from six.moves.urllib import parse as urlparse @@ -27,7 +28,7 @@ from gnocchi import archive_policy from gnocchi import opts from gnocchi import utils -LOG = log.getLogger(__name__) +LOG = daiquiri.getLogger(__name__) def prepare_service(args=None, conf=None, @@ -36,7 +37,6 @@ def prepare_service(args=None, conf=None, conf = cfg.ConfigOpts() opts.set_defaults() # FIXME(jd) Use the pkg_entry info to register the options of these libs - log.register_options(conf) db_options.set_defaults(conf) policy_opts.set_defaults(conf) @@ -45,12 +45,36 @@ def prepare_service(args=None, conf=None, conf.register_opts(list(options), group=None if group == "DEFAULT" else group) + conf.register_cli_opts(opts._cli_options) + conf.set_default("workers", utils.get_default_workers(), group="metricd") conf(args, project='gnocchi', validate_default_values=True, default_config_files=default_config_files, version=pbr.version.VersionInfo('gnocchi').version_string()) + if conf.log_dir or conf.log_file: + outputs = [daiquiri.output.File(filename=conf.log_file, + directory=conf.log_dir)] + else: + outputs = [daiquiri.output.STDERR] + + if conf.use_syslog: + outputs.append( + daiquiri.output.Syslog(facilty=conf.syslog_log_faciltity)) + + if conf.use_journal: + outputs.append(daiquiri.output.Journal()) + + daiquiri.setup(outputs=outputs) + if conf.debug: + level = logging.DEBUG + elif conf.verbose: + level = logging.INFO + else: + level = logging.WARNING + logging.getLogger("gnocchi").setLevel(level) + # HACK(jd) I'm not happy about that, fix AP class to handle a conf object? archive_policy.ArchivePolicy.DEFAULT_AGGREGATION_METHODS = ( conf.archive_policy.default_aggregation_methods @@ -85,9 +109,6 @@ def prepare_service(args=None, conf=None, 'rest', 'policy.json')) conf.set_default('policy_file', cfg_path, group='oslo_policy') - log.set_defaults(default_log_levels=log.get_default_log_levels() + - ["passlib.utils.compat=INFO"]) - log.setup(conf, 'gnocchi') - conf.log_opt_values(LOG, log.DEBUG) + conf.log_opt_values(LOG, logging.DEBUG) return conf diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index 267df497..76b28244 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -19,8 +19,8 @@ try: import asyncio except ImportError: import trollius as asyncio +import daiquiri from oslo_config import cfg -from oslo_log import log import six from gnocchi import indexer @@ -29,7 +29,7 @@ from gnocchi import storage from gnocchi import utils -LOG = log.getLogger(__name__) +LOG = daiquiri.getLogger(__name__) class Stats(object): diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index cf1b52f3..62613255 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -14,8 +14,9 @@ # License for the specific language governing permissions and limitations # under the License. import operator + +import daiquiri from oslo_config import cfg -from oslo_log import log from stevedore import driver from gnocchi import exceptions @@ -28,7 +29,7 @@ OPTS = [ help='Storage driver to use'), ] -LOG = log.getLogger(__name__) +LOG = daiquiri.getLogger(__name__) class Measure(object): diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index e2ffbec0..56b4d60d 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -20,9 +20,9 @@ import itertools import operator from concurrent import futures +import daiquiri import iso8601 from oslo_config import cfg -from oslo_log import log import six import six.moves @@ -43,7 +43,7 @@ OPTS = [ ] -LOG = log.getLogger(__name__) +LOG = daiquiri.getLogger(__name__) class CorruptionError(ValueError): diff --git a/gnocchi/storage/common/ceph.py b/gnocchi/storage/common/ceph.py index b1c9b673..b649cf00 100644 --- a/gnocchi/storage/common/ceph.py +++ b/gnocchi/storage/common/ceph.py @@ -14,9 +14,10 @@ import errno -from oslo_log import log +import daiquiri -LOG = log.getLogger(__name__) + +LOG = daiquiri.getLogger(__name__) for RADOS_MODULE_NAME in ('cradox', 'rados'): diff --git a/gnocchi/storage/common/s3.py b/gnocchi/storage/common/s3.py index eb6c0660..d7969f2a 100644 --- a/gnocchi/storage/common/s3.py +++ b/gnocchi/storage/common/s3.py @@ -13,8 +13,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import daiquiri -from oslo_log import log import tenacity try: import boto3 @@ -25,7 +25,7 @@ except ImportError: from gnocchi import utils -LOG = log.getLogger(__name__) +LOG = daiquiri.getLogger(__name__) def retry_if_operationaborted(exception): diff --git a/gnocchi/storage/common/swift.py b/gnocchi/storage/common/swift.py index 5d4ff47e..2009b4a3 100644 --- a/gnocchi/storage/common/swift.py +++ b/gnocchi/storage/common/swift.py @@ -11,9 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - - -from oslo_log import log +import daiquiri from six.moves.urllib.parse import quote try: @@ -26,7 +24,7 @@ except ImportError: from gnocchi import storage from gnocchi import utils -LOG = log.getLogger(__name__) +LOG = daiquiri.getLogger(__name__) @utils.retry diff --git a/gnocchi/storage/incoming/_carbonara.py b/gnocchi/storage/incoming/_carbonara.py index f6a8b8b8..ff7eb4ea 100644 --- a/gnocchi/storage/incoming/_carbonara.py +++ b/gnocchi/storage/incoming/_carbonara.py @@ -18,14 +18,14 @@ from concurrent import futures import itertools import struct -from oslo_log import log +import daiquiri import pandas import six from gnocchi.storage import incoming from gnocchi import utils -LOG = log.getLogger(__name__) +LOG = daiquiri.getLogger(__name__) _NUM_WORKERS = utils.get_default_workers() diff --git a/gnocchi/utils.py b/gnocchi/utils.py index b7e92263..f81d93e0 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -23,16 +23,18 @@ import numbers import os import uuid +import daiquiri import iso8601 import monotonic import numpy -from oslo_log import log import pandas as pd import six import tenacity from tooz import coordination -LOG = log.getLogger(__name__) + +LOG = daiquiri.getLogger(__name__) + # uuid5 namespace for id transformation. # NOTE(chdent): This UUID must stay the same, forever, across all diff --git a/releasenotes/notes/oslo.log-removal-69a17397b10bc2bb.yaml b/releasenotes/notes/oslo.log-removal-69a17397b10bc2bb.yaml new file mode 100644 index 00000000..8fef1f87 --- /dev/null +++ b/releasenotes/notes/oslo.log-removal-69a17397b10bc2bb.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + The logging library oslo.log has been removed for daiquiri. Some + superfluous configuration options have been removed. diff --git a/requirements.txt b/requirements.txt index e06a0ecf..4083e8e0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,6 @@ pbr numpy>=1.9.0 iso8601 oslo.config>=3.22.0 -oslo.log>=2.3.0 oslo.policy>=0.3.0 oslo.middleware>=3.22.0 pandas>=0.18.0 @@ -22,3 +21,4 @@ WebOb>=1.4.1 Paste PasteDeploy monotonic +daiquiri -- GitLab From 00b2262bcf12a56436cce024d9e379746d28614f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 30 May 2017 09:03:14 +0200 Subject: [PATCH 0805/1483] gabbi: upgrade test to basic authentication instead of noauth Since noauth is deprecated, rewrite the tests using the basic auth --- gnocchi/tests/functional/fixtures.py | 3 - .../tests/functional/gabbits/aggregation.yaml | 77 +--- .../functional/gabbits/archive-rule.yaml | 54 +-- gnocchi/tests/functional/gabbits/archive.yaml | 139 +++---- gnocchi/tests/functional/gabbits/async.yaml | 25 +- gnocchi/tests/functional/gabbits/base.yaml | 54 +-- .../functional/gabbits/batch-measures.yaml | 40 +- gnocchi/tests/functional/gabbits/history.yaml | 42 +-- .../gabbits/metric-granularity.yaml | 13 +- .../tests/functional/gabbits/metric-list.yaml | 44 +-- .../gabbits/metric-timestamp-format.yaml | 17 +- gnocchi/tests/functional/gabbits/metric.yaml | 58 +-- .../tests/functional/gabbits/pagination.yaml | 182 +-------- .../gabbits/resource-aggregation.yaml | 46 +-- .../functional/gabbits/resource-type.yaml | 91 +++-- .../tests/functional/gabbits/resource.yaml | 352 +----------------- .../functional/gabbits/search-metric.yaml | 27 +- gnocchi/tests/functional/gabbits/search.yaml | 19 +- .../functional/gabbits/transformedids.yaml | 21 +- 19 files changed, 283 insertions(+), 1021 deletions(-) diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index f78ea516..1df4fb3c 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -123,9 +123,6 @@ class ConfigFixture(fixture.GabbiFixture): # Set pagination to a testable value conf.set_override('max_limit', 7, 'api') - # Those tests uses noauth mode - # TODO(jd) Rewrite them for basic - conf.set_override("auth_mode", "noauth", 'api') self.index = index diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index 39c31d38..ee1905c7 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -3,16 +3,17 @@ fixtures: defaults: request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + # User foobar + authorization: "basic Zm9vYmFyOg==" tests: - name: create archive policy desc: for later use POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: low definition: @@ -143,10 +144,6 @@ tests: - name: post a resource POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: bcd3441c-b5aa-4d1b-af9a-5a72322bb269 metrics: @@ -156,10 +153,6 @@ tests: - name: post another resource POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: 1b0a8345-b279-4cb8-bd7a-2cb83193624f metrics: @@ -169,10 +162,6 @@ tests: - name: push measurements to resource 1 POST: /v1/resource/generic/bcd3441c-b5aa-4d1b-af9a-5a72322bb269/metric/agg_meter/measures - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: - timestamp: "2015-03-06T14:33:57" value: 43.1 @@ -182,10 +171,6 @@ tests: - name: push measurements to resource 2 POST: /v1/resource/generic/1b0a8345-b279-4cb8-bd7a-2cb83193624f/metric/agg_meter/measures - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: - timestamp: "2015-03-06T14:33:57" value: 3.1 @@ -197,10 +182,6 @@ tests: - name: get measure aggregates by granularity from resources with refresh POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&refresh=true - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_json_paths: $: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] @@ -208,10 +189,6 @@ tests: - name: get measure aggregates by granularity from resources POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json poll: count: 10 delay: 1 @@ -222,10 +199,6 @@ tests: - name: get measure aggregates by granularity from resources and resample POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&resample=60 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_json_paths: $: - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] @@ -233,28 +206,16 @@ tests: - name: get measure aggregates by granularity from resources and bad resample POST: /v1/aggregation/resource/generic/metric/agg_meter?resample=abc - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 - name: get measure aggregates by granularity from resources and resample no granularity POST: /v1/aggregation/resource/generic/metric/agg_meter?resample=60 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 response_strings: - A granularity must be specified to resample - name: get measure aggregates by granularity with timestamps from resources POST: /v1/aggregation/resource/generic/metric/agg_meter?start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json poll: count: 10 delay: 1 @@ -265,10 +226,6 @@ tests: - name: get measure aggregates by granularity from resources and reaggregate POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&reaggregation=min - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json poll: count: 10 delay: 1 @@ -279,10 +236,6 @@ tests: - name: get measure aggregates from resources with fill zero POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&fill=0 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_json_paths: $: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] @@ -298,44 +251,24 @@ tests: - name: get measure aggregates with wrong metric_name POST: /v1/aggregation/resource/generic/metric/notexists - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 200 response_json_paths: $.`len`: 0 - name: get measure aggregates with wrong resource POST: /v1/aggregation/resource/notexits/metric/agg_meter - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 404 response_strings: - Resource type notexits does not exist - name: get measure aggregates with wrong path POST: /v1/aggregation/re/generic/metric/agg_meter - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 404 - name: get measure aggregates with wrong path 2 POST: /v1/aggregation/resource/generic/notexists/agg_meter - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 404 - name: get measure aggregates with no resource name POST: /v1/aggregation/resource/generic/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 405 diff --git a/gnocchi/tests/functional/gabbits/archive-rule.yaml b/gnocchi/tests/functional/gabbits/archive-rule.yaml index bc3ea60a..1d130c11 100644 --- a/gnocchi/tests/functional/gabbits/archive-rule.yaml +++ b/gnocchi/tests/functional/gabbits/archive-rule.yaml @@ -6,14 +6,20 @@ fixtures: - ConfigFixture +defaults: + request_headers: + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json + tests: # create dependent policy - name: create archive policy POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: low definition: @@ -27,8 +33,8 @@ tests: - name: create archive policy rule1 POST: /v1/archive_policy_rule request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: test_rule1 metric_pattern: "*" @@ -42,8 +48,8 @@ tests: - name: create archive policy rule 2 POST: /v1/archive_policy_rule request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: test_rule2 metric_pattern: "disk.foo.*" @@ -57,8 +63,8 @@ tests: - name: create archive policy rule 3 POST: /v1/archive_policy_rule request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: test_rule3 metric_pattern: "disk.*" @@ -75,8 +81,8 @@ tests: - name: create invalid archive policy rule POST: /v1/archive_policy_rule request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: test_rule metric_pattern: "disk.foo.*" @@ -96,16 +102,14 @@ tests: POST: /v1/archive_policy_rule request_headers: content-type: text/plain - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 415 response_strings: - Unsupported Media Type - name: wrong auth create rule POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - x-roles: foo data: name: test_rule_wrong_auth metric_pattern: "disk.foo.*" @@ -125,8 +129,8 @@ tests: - name: bad request body POST: /v1/archive_policy_rule request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: whaa: foobar status: 400 @@ -150,7 +154,8 @@ tests: - name: delete used archive policy DELETE: /v1/archive_policy/low request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 400 # delete rule as non admin @@ -164,20 +169,23 @@ tests: - name: delete archive policy rule1 DELETE: /v1/archive_policy_rule/test_rule1 request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 204 - name: delete archive policy rule2 DELETE: /v1/archive_policy_rule/test_rule2 request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 204 - name: delete archive policy rule3 DELETE: /v1/archive_policy_rule/test_rule3 request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 204 # delete again @@ -185,13 +193,15 @@ tests: - name: confirm delete archive policy rule DELETE: /v1/archive_policy_rule/test_rule1 request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 404 - name: delete missing archive policy rule utf8 DELETE: /v1/archive_policy_rule/%E2%9C%94%C3%A9%C3%B1%E2%98%83 request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 404 response_strings: - Archive policy rule ✔éñ☃ does not exist diff --git a/gnocchi/tests/functional/gabbits/archive.yaml b/gnocchi/tests/functional/gabbits/archive.yaml index 98e0ce99..0ce0182a 100644 --- a/gnocchi/tests/functional/gabbits/archive.yaml +++ b/gnocchi/tests/functional/gabbits/archive.yaml @@ -6,6 +6,12 @@ fixtures: - ConfigFixture +defaults: + request_headers: + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json + tests: # Retrieve the empty list when there are no archive policies. @@ -49,7 +55,8 @@ tests: POST: /v1/archive_policy request_headers: content-type: text/plain - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 415 response_strings: - Unsupported Media Type @@ -57,15 +64,12 @@ tests: - name: wrong method PUT: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 405 - name: wrong authZ POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: clancy data: name: medium definition: @@ -75,18 +79,18 @@ tests: - name: missing authZ POST: /v1/archive_policy request_headers: - content-type: application/json + authorization: "" data: name: medium definition: - granularity: 1 second - status: 403 + status: 401 - name: bad request body POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: cowsay: moo status: 400 @@ -96,8 +100,8 @@ tests: - name: missing definition POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: medium status: 400 @@ -107,8 +111,8 @@ tests: - name: empty definition POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: medium definition: [] @@ -119,8 +123,8 @@ tests: - name: wrong value definition POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: somename definition: foobar @@ -131,8 +135,8 @@ tests: - name: useless definition POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: medium definition: @@ -146,8 +150,8 @@ tests: - name: create archive policy POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: medium definition: @@ -184,8 +188,8 @@ tests: - name: patch archive policy with bad definition PATCH: $LAST_URL request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: definition: - granularity: 1 second @@ -199,8 +203,8 @@ tests: - name: patch archive policy with missing granularity PATCH: $LAST_URL request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: definition: - granularity: 1 second @@ -212,8 +216,8 @@ tests: - name: patch archive policy with non-matching granularity PATCH: $LAST_URL request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: definition: - granularity: 5 second @@ -226,8 +230,8 @@ tests: - name: patch archive policy PATCH: $LAST_URL request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: definition: - granularity: 1 second @@ -265,8 +269,8 @@ tests: - name: create second policy POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: large definition: @@ -278,8 +282,8 @@ tests: - name: create duplicate policy POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: large definition: @@ -293,8 +297,8 @@ tests: - name: post unicode policy name POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: ✔éñ☃ definition: @@ -314,8 +318,8 @@ tests: - name: post small unicode policy name POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: æ definition: @@ -354,7 +358,8 @@ tests: - name: delete single archive DELETE: /v1/archive_policy/medium request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 204 # It really is gone @@ -368,7 +373,8 @@ tests: - name: delete missing archive DELETE: /v1/archive_policy/grandiose request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 404 response_strings: - Archive policy grandiose does not exist @@ -376,13 +382,15 @@ tests: - name: delete archive utf8 DELETE: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 204 - name: delete missing archive utf8 again DELETE: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 404 response_strings: - Archive policy ✔éñ☃ does not exist @@ -391,10 +399,6 @@ tests: - name: create metric POST: /v1/metric - request_headers: - content-type: application/json - x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee - x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f data: archive_policy_name: large status: 201 @@ -402,7 +406,8 @@ tests: - name: delete in use policy DELETE: /v1/archive_policy/large request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 400 response_strings: - Archive policy large is still in use @@ -412,8 +417,8 @@ tests: - name: create illogical policy POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: complex definition: @@ -427,8 +432,8 @@ tests: - name: create invalid points policy POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: complex definition: @@ -441,8 +446,8 @@ tests: - name: create invalid granularity policy POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: complex definition: @@ -455,8 +460,8 @@ tests: - name: create identical granularities policy POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: complex definition: @@ -471,8 +476,8 @@ tests: - name: policy invalid unit POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: 227d0e1f-4295-4e4b-8515-c296c47d71d3 definition: @@ -483,8 +488,8 @@ tests: - name: create policy when granularity is larger than timespan POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: should-have-failed definition: @@ -496,10 +501,6 @@ tests: - name: fail to create policy non-admin POST: /v1/archive_policy - request_headers: - content-type: application/json - x-user-id: b45187c5-150b-4730-bcb2-b5e04e234220 - x-project-id: 16764ee0-bffe-4843-aa36-04b002cdbc7c data: name: f1d150d9-02ad-4fe7-8872-c64b2bcaaa97 definition: @@ -514,8 +515,8 @@ tests: - name: policy with back window POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: 7720a99d-cd3b-4aa4-8a6f-935bf0d46ded back_window: 1 @@ -531,8 +532,8 @@ tests: desc: and default seconds on int granularity POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: 22f2b99f-e629-4170-adc4-09b65635e056 back_window: 0 @@ -550,8 +551,8 @@ tests: - name: policy float granularity POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: 595228db-ea29-4415-9d5b-ecb5366abb1b definition: @@ -566,8 +567,8 @@ tests: - name: policy float timespan POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: 6bc72791-a27e-4417-a589-afc6d2067a38 definition: diff --git a/gnocchi/tests/functional/gabbits/async.yaml b/gnocchi/tests/functional/gabbits/async.yaml index fd2f97ae..64eb71ed 100644 --- a/gnocchi/tests/functional/gabbits/async.yaml +++ b/gnocchi/tests/functional/gabbits/async.yaml @@ -5,13 +5,19 @@ fixtures: - ConfigFixture +defaults: + request_headers: + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json + tests: - name: create archive policy POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: moderate definition: @@ -20,10 +26,6 @@ tests: - name: make a generic resource POST: /v1/resource/generic - request_headers: - x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147 - x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d - content-type: application/json data: id: 41937416-1644-497d-a0ed-b43d55a2b0ea started_at: "2015-06-06T02:02:02.000000" @@ -34,19 +36,11 @@ tests: - name: confirm no metrics yet GET: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures - request_headers: - x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147 - x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d - content-type: application/json response_json_paths: $: [] - name: post some measures POST: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures - request_headers: - x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147 - x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d - content-type: application/json data: - timestamp: "2015-06-06T14:33:00" value: 11 @@ -59,9 +53,6 @@ tests: - name: get some measures GET: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures - request_headers: - x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147 - x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d poll: count: 50 delay: .1 diff --git a/gnocchi/tests/functional/gabbits/base.yaml b/gnocchi/tests/functional/gabbits/base.yaml index ef097711..6997d587 100644 --- a/gnocchi/tests/functional/gabbits/base.yaml +++ b/gnocchi/tests/functional/gabbits/base.yaml @@ -3,8 +3,9 @@ fixtures: defaults: request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json tests: @@ -20,8 +21,8 @@ tests: - name: archive policy post success POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: test1 definition: @@ -40,8 +41,6 @@ tests: - name: post archive policy no auth desc: this confirms that auth handling comes before data validation POST: /v1/archive_policy - request_headers: - content-type: application/json data: definition: - granularity: 1 second @@ -50,11 +49,6 @@ tests: - name: post metric with archive policy POST: /v1/metric - request_headers: - content-type: application/json - x-roles: admin - x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee - x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f data: archive_policy_name: test1 status: 201 @@ -66,13 +60,9 @@ tests: - name: retrieve metric info GET: $LOCATION status: 200 - request_headers: - content_type: /application\/json/ - x-roles: admin response_json_paths: $.archive_policy.name: test1 - $.created_by_user_id: 93180da9-7c15-40d3-a050-a374551e52ee - $.created_by_project_id: 99d13f22-3618-4288-82b8-6512ded77e4f + $.creator: foobar - name: list the one metric GET: /v1/metric @@ -83,10 +73,6 @@ tests: - name: post a single measure desc: post one measure POST: /v1/metric/$RESPONSE['$[0].id']/measures - request_headers: - content-type: application/json - x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee - x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f data: - timestamp: "2013-01-01 23:23:20" value: 1234.2 @@ -103,10 +89,6 @@ tests: - name: post generic resource POST: /v1/resource/generic - request_headers: - content-type: application/json - x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee - x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f data: id: 5b7ebe90-4ad2-4c83-ad2c-f6344884ab70 started_at: "2014-01-03T02:02:02.000000" @@ -119,14 +101,10 @@ tests: type: generic started_at: "2014-01-03T02:02:02+00:00" project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - created_by_project_id: 99d13f22-3618-4288-82b8-6512ded77e4f + creator: foobar - name: post generic resource bad id POST: /v1/resource/generic - request_headers: - content-type: application/json - x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee - x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f data: id: 1.2.3.4 started_at: "2014-01-03T02:02:02.000000" @@ -134,13 +112,13 @@ tests: project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea status: 201 response_headers: - location: $SCHEME://$NETLOC/v1/resource/generic/2d869568-70d4-5ed6-9891-7d7a3bbf572d + location: $SCHEME://$NETLOC/v1/resource/generic/a9c729cc-d1b0-5e6b-b5ba-8b5a7f45f1fc response_json_paths: type: generic started_at: "2014-01-03T02:02:02+00:00" project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea - created_by_project_id: 99d13f22-3618-4288-82b8-6512ded77e4f - id: 2d869568-70d4-5ed6-9891-7d7a3bbf572d + creator: foobar + id: a9c729cc-d1b0-5e6b-b5ba-8b5a7f45f1fc original_resource_id: 1.2.3.4 - name: get status denied @@ -150,19 +128,15 @@ tests: - name: get status GET: /v1/status request_headers: - content-type: application/json - x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee - x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f - x-roles: admin + # User admin + authorization: "basic YWRtaW46" response_json_paths: $.storage.`len`: 2 - name: get status, no details GET: /v1/status?details=False request_headers: - content-type: application/json - x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee - x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f - x-roles: admin + # User admin + authorization: "basic YWRtaW46" response_json_paths: $.storage.`len`: 1 diff --git a/gnocchi/tests/functional/gabbits/batch-measures.yaml b/gnocchi/tests/functional/gabbits/batch-measures.yaml index a121f6fb..36a7210b 100644 --- a/gnocchi/tests/functional/gabbits/batch-measures.yaml +++ b/gnocchi/tests/functional/gabbits/batch-measures.yaml @@ -3,16 +3,18 @@ fixtures: defaults: request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json tests: - name: create archive policy desc: for later use POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: simple definition: @@ -21,16 +23,12 @@ tests: - name: create metric POST: /v1/metric - request_headers: - content-type: application/json data: archive_policy_name: simple status: 201 - name: push measurements to metric POST: /v1/batch/metrics/measures - request_headers: - content-type: application/json data: $RESPONSE['$.id']: - timestamp: "2015-03-06T14:33:57" @@ -41,8 +39,6 @@ tests: - name: push measurements to unknown metrics POST: /v1/batch/metrics/measures - request_headers: - content-type: application/json data: 37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5C: - timestamp: "2015-03-06T14:33:57" @@ -60,8 +56,6 @@ tests: - name: push measurements to unknown named metrics POST: /v1/batch/resources/metrics/measures - request_headers: - content-type: application/json data: 37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5D: cpu_util: @@ -81,16 +75,12 @@ tests: - name: create second metric POST: /v1/metric - request_headers: - content-type: application/json data: archive_policy_name: simple status: 201 - name: post a resource POST: /v1/resource/generic - request_headers: - content-type: application/json data: id: 46c9418d-d63b-4cdd-be89-8f57ffc5952e user_id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c @@ -104,8 +94,6 @@ tests: - name: post a second resource POST: /v1/resource/generic - request_headers: - content-type: application/json data: id: f0f6038f-f82c-4f30-8d81-65db8be249fe user_id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c @@ -122,8 +110,6 @@ tests: - name: push measurements to two metrics POST: /v1/batch/metrics/measures - request_headers: - content-type: application/json data: $RESPONSE['$[0].id']: - timestamp: "2015-03-06T14:33:57" @@ -139,8 +125,6 @@ tests: - name: push measurements to two named metrics POST: /v1/batch/resources/metrics/measures - request_headers: - content-type: application/json data: 46c9418d-d63b-4cdd-be89-8f57ffc5952e: disk.iops: @@ -169,8 +153,8 @@ tests: - name: create archive policy rule for auto POST: /v1/archive_policy_rule request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: rule_auto metric_pattern: "auto.*" @@ -179,8 +163,6 @@ tests: - name: push measurements to unknown named metrics and create it POST: /v1/batch/resources/metrics/measures?create_metrics=true - request_headers: - content-type: application/json data: 46c9418d-d63b-4cdd-be89-8f57ffc5952e: auto.test: @@ -202,7 +184,6 @@ tests: - name: push measurements to unknown named metrics and resource with create_metrics with uuid resource id POST: /v1/batch/resources/metrics/measures?create_metrics=true request_headers: - content-type: application/json accept: application/json data: aaaaaaaa-d63b-4cdd-be89-111111111111: @@ -230,7 +211,6 @@ tests: - name: push measurements to unknown named metrics and resource with create_metrics with uuid resource id where resources is several times listed POST: /v1/batch/resources/metrics/measures?create_metrics=true request_headers: - content-type: application/json accept: application/json data: aaaaaaaa-d63b-4cdd-be89-111111111111: @@ -263,7 +243,6 @@ tests: - name: push measurements to unknown named metrics and resource with create_metrics with non uuid resource id POST: /v1/batch/resources/metrics/measures?create_metrics=true request_headers: - content-type: application/json accept: application/json data: foobar: @@ -277,13 +256,12 @@ tests: response_json_paths: $.description.cause: "Unknown resources" $.description.detail: - - resource_id: "6b8e287d-c01a-538c-979b-a819ee49de5d" + - resource_id: "2fbfbb20-8d56-5e1e-afb9-b3007da11fdf" original_resource_id: "foobar" - name: push measurements to named metrics and resource with create_metrics with wrong measure objects POST: /v1/batch/resources/metrics/measures?create_metrics=true request_headers: - content-type: application/json accept: application/json data: 46c9418d-d63b-4cdd-be89-8f57ffc5952e: diff --git a/gnocchi/tests/functional/gabbits/history.yaml b/gnocchi/tests/functional/gabbits/history.yaml index 2f3ff25f..f7503d26 100644 --- a/gnocchi/tests/functional/gabbits/history.yaml +++ b/gnocchi/tests/functional/gabbits/history.yaml @@ -5,12 +5,18 @@ fixtures: - ConfigFixture +defaults: + request_headers: + content-type: application/json + # User foobar + authorization: "basic Zm9vYmFyOg==" + tests: - name: create archive policy POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: low definition: @@ -23,10 +29,6 @@ tests: - name: post generic resource POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: f93450f2-d8a5-4d67-9985-02511241e7d1 started_at: "2014-01-03T02:02:02.000000" @@ -37,17 +39,12 @@ tests: location: $SCHEME://$NETLOC/v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 content-type: /^application\/json/ response_json_paths: - $.created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - $.created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c + $.creator: foobar $.user_id: 0fbb231484614b1a80131fc22f6afc9c # Update it twice - name: patch resource user_id PATCH: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: user_id: f53c58a4-fdea-4c09-aac4-02135900be67 status: 200 @@ -57,10 +54,6 @@ tests: - name: patch resource project_id PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: project_id: fe20a931-1012-4cc6-addc-39556ec60907 metrics: @@ -75,9 +68,6 @@ tests: - name: list all resources without history GET: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea response_json_paths: $[0].user_id: f53c58a4-fdea-4c09-aac4-02135900be67 $[0].project_id: fe20a931-1012-4cc6-addc-39556ec60907 @@ -86,8 +76,6 @@ tests: GET: $LAST_URL request_headers: accept: application/json; details=True; history=True - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea response_json_paths: $.`len`: 3 $[0].id: f93450f2-d8a5-4d67-9985-02511241e7d1 @@ -102,10 +90,6 @@ tests: - name: patch resource metrics PATCH: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: metrics: foo: @@ -116,8 +100,6 @@ tests: GET: /v1/resource/generic request_headers: accept: application/json; details=True; history=True - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea response_json_paths: $.`len`: 3 $[0].id: f93450f2-d8a5-4d67-9985-02511241e7d1 @@ -132,10 +114,6 @@ tests: - name: create new metrics POST: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: foobar: archive_policy_name: low @@ -148,8 +126,6 @@ tests: GET: /v1/resource/generic request_headers: accept: application/json; details=True; history=True - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea response_json_paths: $.`len`: 3 $[0].id: f93450f2-d8a5-4d67-9985-02511241e7d1 diff --git a/gnocchi/tests/functional/gabbits/metric-granularity.yaml b/gnocchi/tests/functional/gabbits/metric-granularity.yaml index 47a5efe3..c015e513 100644 --- a/gnocchi/tests/functional/gabbits/metric-granularity.yaml +++ b/gnocchi/tests/functional/gabbits/metric-granularity.yaml @@ -3,16 +3,17 @@ fixtures: defaults: request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json tests: - name: create archive policy desc: for later use POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: cookies definition: @@ -21,16 +22,12 @@ tests: - name: create valid metric POST: /v1/metric - request_headers: - content-type: application/json data: archive_policy_name: cookies status: 201 - name: push measurements to metric POST: /v1/metric/$RESPONSE['$.id']/measures - request_headers: - content-type: application/json data: - timestamp: "2015-03-06T14:33:57" value: 43.1 diff --git a/gnocchi/tests/functional/gabbits/metric-list.yaml b/gnocchi/tests/functional/gabbits/metric-list.yaml index 59f58b96..fe7a7ad9 100644 --- a/gnocchi/tests/functional/gabbits/metric-list.yaml +++ b/gnocchi/tests/functional/gabbits/metric-list.yaml @@ -3,17 +3,17 @@ fixtures: defaults: request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - x-roles: admin + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json tests: - name: create archive policy 1 desc: for later use POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: first_archive definition: @@ -24,8 +24,8 @@ tests: desc: for later use POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: second_archive definition: @@ -34,8 +34,6 @@ tests: - name: create metric 1 POST: /v1/metric - request_headers: - content-type: application/json data: name: "disk.io.rate" unit: "B/s" @@ -49,9 +47,8 @@ tests: - name: create metric 2 POST: /v1/metric request_headers: - content-type: application/json - x-user-id: 4fff6179c2fc414dbedfc8cc82d6ada7 - x-project-id: f3ca498a61c84422b953133adb71cff8 + # User foobaz + authorization: "basic Zm9vYmF6Og==" data: name: "disk.io.rate" unit: "B/s" @@ -65,9 +62,8 @@ tests: - name: create metric 3 POST: /v1/metric request_headers: - content-type: application/json - x-user-id: faf30294217c4e1a91387d9c8f1fb1fb - x-project-id: f3ca498a61c84422b953133adb71cff8 + # User jd + authorization: "basic amQ6" data: name: "cpu_util" unit: "%" @@ -80,8 +76,6 @@ tests: - name: create metric 4 POST: /v1/metric - request_headers: - content-type: application/json data: name: "cpu" unit: "ns" @@ -131,12 +125,18 @@ tests: $[1].archive_policy.name: first_archive $[2].archive_policy.name: first_archive - - name: list metrics by user_id - GET: /v1/metric?user_id=faf30294217c4e1a91387d9c8f1fb1fb + - name: list metrics by creator jd + GET: /v1/metric?creator=jd + request_headers: + # User admin + authorization: "basic YWRtaW46" response_json_paths: $.`len`: 1 - - name: list metrics by project_id - GET: /v1/metric?project_id=f3ca498a61c84422b953133adb71cff8 + - name: list metrics by creator foobaz + GET: /v1/metric?creator=foobaz + request_headers: + # User admin + authorization: "basic YWRtaW46" response_json_paths: - $.`len`: 2 + $.`len`: 1 diff --git a/gnocchi/tests/functional/gabbits/metric-timestamp-format.yaml b/gnocchi/tests/functional/gabbits/metric-timestamp-format.yaml index f4522880..e376189d 100644 --- a/gnocchi/tests/functional/gabbits/metric-timestamp-format.yaml +++ b/gnocchi/tests/functional/gabbits/metric-timestamp-format.yaml @@ -3,16 +3,17 @@ fixtures: defaults: request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json tests: - name: create archive policy desc: for later use POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: cookies definition: @@ -21,8 +22,6 @@ tests: - name: create metric POST: /v1/metric - request_headers: - content-type: application/json data: archive_policy_name: cookies status: 201 @@ -31,8 +30,6 @@ tests: - name: push measurements to metric with relative timestamp POST: /v1/metric/$RESPONSE['$.id']/measures - request_headers: - content-type: application/json data: - timestamp: "-5 minutes" value: 43.1 @@ -40,8 +37,6 @@ tests: - name: create metric 2 POST: /v1/metric - request_headers: - content-type: application/json data: archive_policy_name: cookies status: 201 @@ -50,8 +45,6 @@ tests: - name: push measurements to metric with mixed timestamps POST: /v1/metric/$RESPONSE['$.id']/measures - request_headers: - content-type: application/json data: - timestamp: 1478012832 value: 43.1 diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index e987c81c..987f9a51 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -3,8 +3,10 @@ fixtures: defaults: request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + content-type: application/json + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json tests: - name: wrong metric @@ -16,8 +18,8 @@ tests: desc: for later use POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: cookies definition: @@ -27,8 +29,8 @@ tests: - name: create archive policy rule POST: /v1/archive_policy_rule request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: test_rule metric_pattern: "disk.io.*" @@ -38,8 +40,8 @@ tests: - name: create alt archive policy POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: cream definition: @@ -50,8 +52,8 @@ tests: desc: extra rule that won't be matched POST: /v1/archive_policy_rule request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: test_ignore_rule metric_pattern: "disk.*" @@ -72,8 +74,6 @@ tests: - name: create metric with name and unit POST: /v1/metric - request_headers: - content-type: application/json data: name: "disk.io.rate" unit: "B/s" @@ -85,8 +85,6 @@ tests: - name: create metric with invalid name POST: /v1/metric - request_headers: - content-type: application/json data: name: "disk/io/rate" unit: "B/s" @@ -96,8 +94,6 @@ tests: - name: create metric with name and over length unit POST: /v1/metric - request_headers: - content-type: application/json data: name: "disk.io.rate" unit: "over_length_unit_over_length_unit" @@ -109,8 +105,6 @@ tests: - name: create metric with name no rule POST: /v1/metric - request_headers: - content-type: application/json data: name: "volume.io.rate" status: 400 @@ -119,8 +113,6 @@ tests: - name: create metric bad archive policy POST: /v1/metric - request_headers: - content-type: application/json data: archive_policy_name: bad-cookie status: 400 @@ -136,8 +128,6 @@ tests: - name: create valid metric POST: /v1/metric - request_headers: - content-type: application/json data: archive_policy_name: cookies status: 201 @@ -152,8 +142,6 @@ tests: - name: push measurements to metric before epoch POST: /v1/metric/$RESPONSE['$.id']/measures - request_headers: - content-type: application/json data: - timestamp: "1915-03-06T14:33:57" value: 43.1 @@ -168,8 +156,6 @@ tests: - name: push measurements to metric with bad timestamp POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures - request_headers: - content-type: application/json data: - timestamp: "1915-100-06T14:33:57" value: 43.1 @@ -177,8 +163,6 @@ tests: - name: push measurements to metric epoch format POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures - request_headers: - content-type: application/json data: - timestamp: 1425652437.0 value: 43.1 @@ -186,8 +170,6 @@ tests: - name: push measurements to metric POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures - request_headers: - content-type: application/json data: - timestamp: "2015-03-06T14:34:12" value: 12 @@ -214,8 +196,6 @@ tests: - name: push measurements to metric again POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures - request_headers: - content-type: application/json data: - timestamp: "2015-03-06T14:34:15" value: 16 @@ -245,8 +225,6 @@ tests: - name: create valid metric two POST: /v1/metric - request_headers: - content-type: application/json data: archive_policy_name: cookies status: 201 @@ -255,8 +233,6 @@ tests: - name: push invalid measurements to metric POST: /v1/metric/$RESPONSE['$.id']/measures - request_headers: - content-type: application/json data: - timestamp: "2015-03-06T14:33:57" value: 12 @@ -266,8 +242,6 @@ tests: - name: create valid metric three POST: /v1/metric - request_headers: - content-type: application/json data: archive_policy_name: cookies status: 201 @@ -276,15 +250,11 @@ tests: - name: push invalid measurements to metric bis POST: /v1/metric/$RESPONSE['$.id']/measures - request_headers: - content-type: application/json data: 1 status: 400 - name: add measure unknown metric POST: /v1/metric/fake/measures - request_headers: - content-type: application/json data: - timestamp: "2015-03-06T14:33:57" value: 43.1 @@ -292,8 +262,8 @@ tests: - name: get metric list for authenticated user request_headers: - x-user-id: foo - x-project-id: bar + # User foobaz + authorization: "basic Zm9vYmF6Og==" GET: /v1/metric - name: get measures unknown metric diff --git a/gnocchi/tests/functional/gabbits/pagination.yaml b/gnocchi/tests/functional/gabbits/pagination.yaml index ef85a379..1b9bda97 100644 --- a/gnocchi/tests/functional/gabbits/pagination.yaml +++ b/gnocchi/tests/functional/gabbits/pagination.yaml @@ -5,6 +5,12 @@ fixtures: - ConfigFixture +defaults: + request_headers: + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json + tests: # @@ -12,10 +18,6 @@ tests: # - name: post resource 1 POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: 57a9e836-87b8-4a21-9e30-18a474b98fef started_at: "2014-01-01T02:02:02.000000" @@ -25,10 +27,6 @@ tests: - name: post resource 2 POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: 4facbf7e-a900-406d-a828-82393f7006b3 started_at: "2014-01-02T02:02:02.000000" @@ -38,10 +36,6 @@ tests: - name: post resource 3 POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: 36775172-ebc9-4060-9870-a649361bc3ab started_at: "2014-01-03T02:02:02.000000" @@ -51,10 +45,6 @@ tests: - name: post resource 4 POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: 28593168-52bb-43b5-a6db-fc2343aac02a started_at: "2014-01-04T02:02:02.000000" @@ -64,10 +54,6 @@ tests: - name: post resource 5 POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 started_at: "2014-01-05T02:02:02.000000" @@ -80,10 +66,6 @@ tests: # - name: list first two items default order GET: /v1/resource/generic?limit=2 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_json_paths: $.`len`: 2 $[0].id: 57a9e836-87b8-4a21-9e30-18a474b98fef @@ -91,10 +73,6 @@ tests: - name: list next third items default order GET: /v1/resource/generic?limit=4&marker=4facbf7e-a900-406d-a828-82393f7006b3 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_json_paths: $.`len`: 3 $[0].id: 36775172-ebc9-4060-9870-a649361bc3ab @@ -103,10 +81,6 @@ tests: - name: list first two items order by id witouth direction GET: /v1/resource/generic?limit=2&sort=id - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 200 response_json_paths: $.`len`: 2 @@ -115,10 +89,6 @@ tests: - name: list first two items order by id GET: /v1/resource/generic?limit=2&sort=id:asc - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_json_paths: $.`len`: 2 $[0].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 @@ -126,10 +96,6 @@ tests: - name: list next third items order by id GET: /v1/resource/generic?limit=4&sort=id:asc&marker=28593168-52bb-43b5-a6db-fc2343aac02a - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_json_paths: $.`len`: 3 $[0].id: 36775172-ebc9-4060-9870-a649361bc3ab @@ -138,10 +104,6 @@ tests: - name: search for some resources with limit, order and marker POST: /v1/search/resource/generic?limit=2&sort=id:asc&marker=36775172-ebc9-4060-9870-a649361bc3ab - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: "or": [ {"=": {"id": 36775172-ebc9-4060-9870-a649361bc3ab}}, @@ -158,42 +120,22 @@ tests: # - name: invalid sort_key GET: /v1/resource/generic?sort=invalid:asc - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 - name: invalid sort_dir GET: /v1/resource/generic?sort=id:invalid - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 - name: invalid marker GET: /v1/resource/generic?marker=d44b3f4c-27bc-4ace-b81c-2a8e60026874 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 - name: invalid negative limit GET: /v1/resource/generic?limit=-2 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 - name: invalid limit GET: /v1/resource/generic?limit=invalid - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 # @@ -202,10 +144,6 @@ tests: - name: post resource 6 POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: 465f87b2-61f7-4118-adec-1d96a78af401 started_at: "2014-01-02T02:02:02.000000" @@ -215,10 +153,6 @@ tests: - name: post resource 7 POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: 9b6af245-57df-4ed6-a8c0-f64b77d8867f started_at: "2014-01-28T02:02:02.000000" @@ -228,10 +162,6 @@ tests: - name: post resource 8 POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: d787aa85-5743-4443-84f9-204270bc141a started_at: "2014-01-31T02:02:02.000000" @@ -241,10 +171,6 @@ tests: - name: default limit GET: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_json_paths: $.`len`: 7 $[-1].id: 9b6af245-57df-4ed6-a8c0-f64b77d8867f @@ -252,28 +178,16 @@ tests: - name: update resource 5 PATCH: /v1/resource/generic/1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: ended_at: "2014-01-30T02:02:02.000000" - name: update resource 5 again PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: ended_at: "2014-01-31T02:02:02.000000" - name: default limit with history and multiple sort key GET: /v1/resource/generic?history=true&sort=id:asc&sort=ended_at:desc-nullslast - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_json_paths: $.`len`: 7 $[0].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 @@ -290,8 +204,8 @@ tests: desc: for later use POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: dummy_policy definition: @@ -300,10 +214,6 @@ tests: - name: create metric with name1 POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: name: "dummy1" archive_policy_name: dummy_policy @@ -311,10 +221,6 @@ tests: - name: create metric with name2 POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: name: "dummy2" archive_policy_name: dummy_policy @@ -322,10 +228,6 @@ tests: - name: create metric with name3 POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: name: "dummy3" archive_policy_name: dummy_policy @@ -333,10 +235,6 @@ tests: - name: create metric with name4 POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: name: "dummy4" archive_policy_name: dummy_policy @@ -344,10 +242,6 @@ tests: - name: create metric with name5 POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: name: "dummy5" archive_policy_name: dummy_policy @@ -355,17 +249,9 @@ tests: - name: list all default order GET: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - name: list first two metrics default order GET: /v1/metric?limit=2 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_json_paths: $.`len`: 2 $[0].name: $RESPONSE['$[0].name'] @@ -373,10 +259,6 @@ tests: - name: list next three metrics default order GET: /v1/metric?limit=4&marker=$HISTORY['list all default order'].$RESPONSE['$[1].id'] - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_json_paths: $.`len`: 3 $[0].name: $HISTORY['list all default order'].$RESPONSE['$[2].name'] @@ -385,10 +267,6 @@ tests: - name: list first two metrics order by user without direction GET: /v1/metric?limit=2&sort=name - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 200 response_json_paths: $.`len`: 2 @@ -397,10 +275,6 @@ tests: - name: list first two metrics order by user GET: /v1/metric?limit=2&sort=name:asc - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_json_paths: $.`len`: 2 $[0].name: dummy1 @@ -408,10 +282,6 @@ tests: - name: list next third metrics order by user GET: /v1/metric?limit=4&sort=name:asc&marker=$RESPONSE['$[1].id'] - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_json_paths: $.`len`: 3 $[0].name: dummy3 @@ -424,40 +294,24 @@ tests: - name: create metric with name6 POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: archive_policy_name: dummy_policy status: 201 - name: create metric with name7 POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: archive_policy_name: dummy_policy status: 201 - name: create metric with name8 POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: archive_policy_name: dummy_policy status: 201 - name: default metric limit GET: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_json_paths: $.`len`: 7 @@ -467,40 +321,20 @@ tests: - name: metric invalid sort_key GET: /v1/metric?sort=invalid:asc - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 - name: metric invalid sort_dir GET: /v1/metric?sort=id:invalid - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 - name: metric invalid marker GET: /v1/metric?marker=d44b3f4c-27bc-4ace-b81c-2a8e60026874 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 - name: metric invalid negative limit GET: /v1/metric?limit=-2 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 - name: metric invalid limit GET: /v1/metric?limit=invalid - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 diff --git a/gnocchi/tests/functional/gabbits/resource-aggregation.yaml b/gnocchi/tests/functional/gabbits/resource-aggregation.yaml index cf563e7b..0d6fa963 100644 --- a/gnocchi/tests/functional/gabbits/resource-aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/resource-aggregation.yaml @@ -1,13 +1,19 @@ fixtures: - ConfigFixture +defaults: + request_headers: + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json + tests: - name: create archive policy desc: for later use POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: low definition: @@ -17,10 +23,6 @@ tests: - name: create resource 1 POST: /v1/resource/generic - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json data: id: 4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4 user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 @@ -32,10 +34,6 @@ tests: - name: post cpuutil measures 1 POST: /v1/resource/generic/4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4/metric/cpu.util/measures - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json data: - timestamp: "2015-03-06T14:33:57" value: 43.1 @@ -61,10 +59,6 @@ tests: - name: create resource 2 POST: /v1/resource/generic - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json data: id: 1447CD7E-48A6-4C50-A991-6677CC0D00E6 user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 @@ -76,10 +70,6 @@ tests: - name: post cpuutil measures 2 POST: /v1/resource/generic/1447CD7E-48A6-4C50-A991-6677CC0D00E6/metric/cpu.util/measures - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json data: - timestamp: "2015-03-06T14:33:57" value: 23 @@ -89,10 +79,6 @@ tests: - name: create resource 3 POST: /v1/resource/generic - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json data: id: 33333BC5-5948-4F29-B7DF-7DE607660452 user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 @@ -104,10 +90,6 @@ tests: - name: post cpuutil measures 3 POST: /v1/resource/generic/33333BC5-5948-4F29-B7DF-7DE607660452/metric/cpu.util/measures - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json data: - timestamp: "2015-03-06T14:33:57" value: 230 @@ -117,10 +99,6 @@ tests: - name: aggregate metric with groupby on project_id POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json data: =: user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 @@ -144,10 +122,6 @@ tests: - name: aggregate metric with groupby on project_id and invalid group POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=thisisdumb - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json data: =: user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 @@ -157,10 +131,6 @@ tests: - name: aggregate metric with groupby on project_id and user_id POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=user_id - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json data: =: user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 diff --git a/gnocchi/tests/functional/gabbits/resource-type.yaml b/gnocchi/tests/functional/gabbits/resource-type.yaml index fca3aaa3..90b9a8ba 100644 --- a/gnocchi/tests/functional/gabbits/resource-type.yaml +++ b/gnocchi/tests/functional/gabbits/resource-type.yaml @@ -8,8 +8,9 @@ fixtures: defaults: request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json tests: @@ -25,15 +26,13 @@ tests: POST: $LAST_URL data: name: my_custom_resource - request_headers: - content-type: application/json status: 403 - name: post resource type with existing name POST: /v1/resource_type request_headers: - x-roles: admin - content-type: application/json + # User admin + authorization: "basic YWRtaW46" data: name: my_custom_resource attributes: @@ -44,8 +43,8 @@ tests: - name: post resource type bad string POST: $LAST_URL request_headers: - x-roles: admin - content-type: application/json + # User admin + authorization: "basic YWRtaW46" data: name: my_custom_resource attributes: @@ -65,8 +64,8 @@ tests: - name: post resource type bad min_length value POST: $LAST_URL request_headers: - x-roles: admin - content-type: application/json + # User admin + authorization: "basic YWRtaW46" data: name: my_custom_resource attributes: @@ -80,8 +79,8 @@ tests: - name: post resource type bad min value POST: $LAST_URL request_headers: - x-roles: admin - content-type: application/json + # User admin + authorization: "basic YWRtaW46" data: name: my_custom_resource attributes: @@ -97,8 +96,8 @@ tests: - name: post resource type POST: $LAST_URL request_headers: - x-roles: admin - content-type: application/json + # User admin + authorization: "basic YWRtaW46" data: name: my_custom_resource attributes: @@ -226,8 +225,6 @@ tests: - name: post invalid resource POST: /v1/resource/my_custom_resource - request_headers: - content-type: application/json data: id: d11edfca-4393-4fda-b94d-b05a3a1b3747 name: toolong!!! @@ -241,8 +238,6 @@ tests: - name: post invalid resource uuid POST: $LAST_URL - request_headers: - content-type: application/json data: id: d11edfca-4393-4fda-b94d-b05a3a1b3747 name: too @@ -258,8 +253,6 @@ tests: - name: post custom resource POST: $LAST_URL - request_headers: - content-type: application/json data: id: d11edfca-4393-4fda-b94d-b05a3a1b3747 name: bar @@ -274,8 +267,6 @@ tests: - name: patch custom resource PATCH: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 - request_headers: - content-type: application/json data: name: foo status: 200 @@ -288,8 +279,6 @@ tests: - name: get resource GET: $LAST_URL - request_headers: - content-type: application/json response_json_paths: $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747 $.name: foo @@ -299,8 +288,6 @@ tests: - name: post resource with default POST: /v1/resource/my_custom_resource - request_headers: - content-type: application/json data: id: c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59 name: foo @@ -315,8 +302,6 @@ tests: - name: list resource history GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast - request_headers: - content-type: application/json response_json_paths: $.`len`: 2 $[0].id: d11edfca-4393-4fda-b94d-b05a3a1b3747 @@ -331,7 +316,8 @@ tests: - name: post a new resource attribute PATCH: /v1/resource_type/my_custom_resource request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" content-type: application/json-patch+json data: - op: add @@ -444,7 +430,8 @@ tests: - name: post a new resource attribute with missing fill PATCH: /v1/resource_type/my_custom_resource request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" content-type: application/json-patch+json data: - op: add @@ -460,7 +447,9 @@ tests: - name: post a new resource attribute with incorrect fill PATCH: /v1/resource_type/my_custom_resource request_headers: - x-roles: admin + request_headers: + # User admin + authorization: "basic YWRtaW46" content-type: application/json-patch+json data: - op: add @@ -534,8 +523,6 @@ tests: - name: control new attributes of existing resource GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 - request_headers: - content-type: application/json status: 200 response_json_paths: $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747 @@ -549,8 +536,6 @@ tests: - name: control new attributes of existing resource history GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast - request_headers: - content-type: application/json response_json_paths: $.`len`: 2 $[0].id: d11edfca-4393-4fda-b94d-b05a3a1b3747 @@ -575,7 +560,8 @@ tests: - name: add/delete the same resource attribute PATCH: /v1/resource_type/my_custom_resource request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" content-type: application/json-patch+json data: - op: add @@ -647,7 +633,8 @@ tests: - name: delete/add the same resource attribute PATCH: /v1/resource_type/my_custom_resource request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" content-type: application/json-patch+json data: - op: remove @@ -666,7 +653,8 @@ tests: - name: patch a resource attribute replace PATCH: /v1/resource_type/my_custom_resource request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" content-type: application/json-patch+json data: - op: replace @@ -684,7 +672,8 @@ tests: - name: patch a resource attribute type not exist PATCH: /v1/resource_type/my_custom_resource request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" content-type: application/json-patch+json data: - op: add @@ -699,7 +688,8 @@ tests: - name: patch a resource attribute type unknown PATCH: /v1/resource_type/my_custom_resource request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" content-type: application/json-patch+json data: - op: remove @@ -713,7 +703,8 @@ tests: - name: delete in use resource_type DELETE: /v1/resource_type/my_custom_resource request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 400 response_strings: - Resource type my_custom_resource is still in use @@ -723,13 +714,15 @@ tests: - name: delete the resource DELETE: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 204 - name: delete the second resource DELETE: /v1/resource/my_custom_resource/c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59 request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 204 # Now we can deleted the type @@ -737,19 +730,20 @@ tests: - name: delete the custom resource type DELETE: /v1/resource_type/my_custom_resource request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" status: 204 - name: delete non-existing custom resource type DELETE: $LAST_URL request_headers: - x-roles: admin + authorization: "basic YWRtaW46" status: 404 - name: delete missing custom resource type utf8 DELETE: /v1/resource_type/%E2%9C%94%C3%A9%C3%B1%E2%98%83 request_headers: - x-roles: admin + authorization: "basic YWRtaW46" status: 404 response_strings: - Resource type ✔éñ☃ does not exist @@ -759,8 +753,7 @@ tests: - name: post resource type again POST: /v1/resource_type request_headers: - x-roles: admin - content-type: application/json + authorization: "basic YWRtaW46" data: name: my_custom_resource status: 201 @@ -768,5 +761,5 @@ tests: - name: delete the custom resource type again DELETE: /v1/resource_type/my_custom_resource request_headers: - x-roles: admin + authorization: "basic YWRtaW46" status: 204 diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index 0b69df21..da423767 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -6,6 +6,12 @@ fixtures: - ConfigFixture +defaults: + request_headers: + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json + tests: # We will need an archive for use in later tests so we create it @@ -16,8 +22,8 @@ tests: desc: for later use POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: medium definition: @@ -27,8 +33,8 @@ tests: - name: create archive policy rule POST: /v1/archive_policy_rule request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: test_rule metric_pattern: "disk.io.*" @@ -103,26 +109,8 @@ tests: response_strings: - "[]" - - name: post resource no user-id - desc: https://bugs.launchpad.net/gnocchi/+bug/1424005 - POST: $LAST_URL - request_headers: - # Only provide one of these auth headers - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - content-type: application/json - data: - id: f93454f2-d8a5-4d67-9985-02511241e7f3 - started_at: "2014-01-03T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - name: post generic resource POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: f93450f2-d8a5-4d67-9985-02511241e7d1 started_at: "2014-01-03T02:02:02.000000" @@ -133,17 +121,12 @@ tests: location: $SCHEME://$NETLOC/v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 content-type: /^application\/json/ response_json_paths: - $.created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - $.created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c + $.creator: foobar $.user_id: 0fbb231484614b1a80131fc22f6afc9c - name: post same resource refuse desc: We can only post one identified resource once POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: f93450f2-d8a5-4d67-9985-02511241e7d1 started_at: "2014-01-03T02:02:02.000000" @@ -154,8 +137,6 @@ tests: - name: post generic resource bad content type POST: $LAST_URL request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea content-type: text/plain data: '{"id": "f93450f2-d8a5-4d67-9985-02511241e7d1", "started_at": "2014-01-03T02:02:02.000000", "user_id": "0fbb231484614b1a80131fc22f6afc9c", "project_id": "f3d41b770cc14f0bb94a1d5be9c0e3ea"}' status: 415 @@ -165,18 +146,10 @@ tests: - name: post generic resource no data POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 - name: post generic with invalid metric name POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: metrics: "disk/iops": @@ -187,10 +160,6 @@ tests: - name: post generic resource to modify POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: 75C44741-CC60-4033-804E-2D3098C7D2E9 user_id: 0fbb231484614b1a80131fc22f6afc9c @@ -204,10 +173,6 @@ tests: # graceful failure. - name: patch generic resource PATCH: $LOCATION - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: user_id: foobar status: 200 @@ -217,10 +182,6 @@ tests: - name: patch generic resource with same data desc: Ensure no useless revision have been created PATCH: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: user_id: foobar status: 200 @@ -230,10 +191,6 @@ tests: - name: patch generic resource with id PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: foobar status: 400 @@ -243,10 +200,6 @@ tests: - name: patch generic with metrics PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: metrics: disk.iops: @@ -258,11 +211,6 @@ tests: - name: get generic history desc: Ensure we can get the history GET: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9/history?sort=revision_end:asc-nullslast - request_headers: - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_json_paths: $.`len`: 2 $[1].revision_end: null @@ -270,10 +218,6 @@ tests: - name: patch generic bad metric association PATCH: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: metrics: disk.iops: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea @@ -283,10 +227,6 @@ tests: - name: patch generic with bad archive policy PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: metrics: disk.iops: @@ -297,10 +237,6 @@ tests: - name: patch generic with no archive policy rule PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: metrics: disk.iops: {} @@ -310,10 +246,6 @@ tests: - name: patch generic with archive policy rule PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: metrics: disk.io.rate: {} @@ -322,20 +254,12 @@ tests: - name: get patched resource desc: confirm the patched resource is properly patched GET: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: user_id: foobar - name: patch resource empty dict desc: an empty dict in patch is an existence check PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: "{}" status: 200 data: @@ -344,10 +268,6 @@ tests: - name: patch resource without change with metrics in response desc: an empty dict in patch is an existence check PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: "{}" status: 200 response_json_paths: @@ -355,10 +275,6 @@ tests: - name: patch generic with invalid metric name PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: metrics: "disk/iops": @@ -372,19 +288,11 @@ tests: - name: post generic history desc: should don't work POST: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9/history - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 405 - name: delete generic history desc: should don't work DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 405 # Failure modes for PATCHing a resource @@ -392,10 +300,6 @@ tests: - name: patch resource no data desc: providing no data is an error PATCH: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 response_strings: - "Unable to decode body:" @@ -403,10 +307,6 @@ tests: - name: patch resource bad data desc: providing data that is not a dict is an error PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 data: - Beer and pickles @@ -416,10 +316,6 @@ tests: - name: patch noexit resource desc: "patching something that doesn't exist is a 404" PATCH: /v1/resource/generic/77777777-CC60-4033-804E-2D3098C7D2E9 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 404 # GET single resource failure modes @@ -427,9 +323,6 @@ tests: - name: get noexist resource desc: if a resource does not exist 404 GET: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea status: 404 response_strings: - The resource could not be found. @@ -437,42 +330,24 @@ tests: - name: get bad resource id desc: https://bugs.launchpad.net/gnocchi/+bug/1425588 GET: /v1/resource/generic/noexist - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea status: 404 response_strings: - The resource could not be found. - name: get metrics for this not-existing resource GET: /v1/resource/generic/77777777-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 404 # List resources - - name: list generic resources no auth - GET: /v1/resource/generic - response_strings: - - "[]" - - name: list generic resources - GET: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + GET: /v1/resource/generic response_json_paths: $[0].user_id: 0fbb231484614b1a80131fc22f6afc9c $[-1].user_id: foobar - name: list all resources GET: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea response_strings: - '"type": "generic"' @@ -480,10 +355,6 @@ tests: - name: post new generic with non-existent metrics POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: 85C44741-CC60-4033-804E-2D3098C7D2E9 user_id: 0fbb231484614b1a80131fc22f6afc9c @@ -494,10 +365,6 @@ tests: - name: post new generic with metrics bad policy POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: 85C44741-CC60-4033-804E-2D3098C7D2E9 user_id: 0fbb231484614b1a80131fc22f6afc9c @@ -509,10 +376,6 @@ tests: - name: post new generic with metrics no policy rule POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: 85BABE39-F7F7-455A-877B-62C22E11AA40 user_id: 0fbb231484614b1a80131fc22f6afc9c @@ -525,10 +388,6 @@ tests: - name: post new generic with metrics using policy rule POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: 85BABE39-F7F7-455A-877B-62C22E11AA40 user_id: 0fbb231484614b1a80131fc22f6afc9c @@ -539,10 +398,6 @@ tests: - name: post new generic with metrics POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: d13982cb-4cce-4f84-a96e-7581be1e599c user_id: 0fbb231484614b1a80131fc22f6afc9c @@ -552,15 +407,10 @@ tests: archive_policy_name: medium status: 201 response_json_paths: - created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c - created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + creator: foobar - name: post new generic with metrics and un-normalized user/project id from keystone middleware POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: 85C44741-CC60-4033-804E-2D3098C7D2E9 metrics: @@ -568,17 +418,11 @@ tests: archive_policy_name: medium status: 201 response_json_paths: - created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c - created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - + creator: foobar - name: get metrics for this resource desc: with async measure handling this is a null test GET: /v1/resource/generic/$RESPONSE['$.id']/metric/cpu.util/measures - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json response_strings: - "[]" @@ -586,15 +430,9 @@ tests: - name: list the generics GET: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - name: request metrics from one of the generics GET: /v1/resource/generic/$RESPONSE['$[-1].id']/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea response_json_paths: $.`len`: 1 $[0].name: cpu.util @@ -603,44 +441,25 @@ tests: - name: request metrics from non uuid metrics desc: 404 from GenericResourceController GET: /v1/resource/generic/not.a.uuid/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 404 - name: request cpuutil metric from generic GET: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea response_json_paths: - $.created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea $.archive_policy.name: medium - name: try post cpuutil metric to generic POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 405 - name: request cpuutil measures from generic desc: with async measure handling this is a null test GET: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util/measures - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea response_strings: - "[]" - name: post cpuutil measures POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: - timestamp: "2015-03-06T14:33:57" value: 43.1 @@ -650,9 +469,6 @@ tests: - name: request cpuutil measures again GET: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea poll: count: 50 delay: .1 @@ -663,10 +479,6 @@ tests: - name: post metric at generic POST: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 200 data: electron.spin: @@ -678,10 +490,6 @@ tests: - name: post metric at generic with empty definition POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 data: foo.bar: {} @@ -690,10 +498,6 @@ tests: - name: post metric at generic using archive policy rule POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 200 data: disk.io.rate: {} @@ -703,10 +507,6 @@ tests: - name: duplicate metrics at generic POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 409 data: electron.spin: @@ -716,10 +516,6 @@ tests: - name: post metrics at generic bad policy POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 400 data: electron.charge: @@ -731,10 +527,6 @@ tests: - name: post new generic with bad timestamp POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: 95C44741-CC60-4033-804E-2D3098C7D2E9 user_id: 0fbb231484614b1a80131fc22f6afc9c @@ -753,10 +545,6 @@ tests: - name: post to non uuid metrics desc: 404 from GenericResourceController POST: /v1/resource/generic/not.a.uuid/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: some.metric: archive_policy_name: medium @@ -765,10 +553,6 @@ tests: - name: post to missing uuid metrics desc: 404 from NamedMetricController POST: /v1/resource/generic/d5a5994e-ee90-11e4-88cf-685b35afa334/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: some.metric: archive_policy_name: medium @@ -779,10 +563,6 @@ tests: - name: post measure on unknown metric desc: 404 from NamedMetricController with metric error POST: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/unknown/measures - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: - timestamp: "2015-03-06T14:33:57" value: 43.1 @@ -794,16 +574,10 @@ tests: - name: delete generic DELETE: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea status: 204 - name: delete noexist generic DELETE: /v1/resource/generic/77777777-CC60-4033-804E-2D3098C7D2E9 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea status: 404 # Delete a batch of resources by attributes filter @@ -811,10 +585,6 @@ tests: - name: create resource one desc: before test batch delete, create some resources using a float in started_at POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: f93450f2-aaaa-4d67-9985-02511241e7d1 started_at: 1388714522.0 @@ -825,10 +595,6 @@ tests: - name: create resource two desc: before test batch delete, create some resources POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: f93450f2-bbbb-4d67-9985-02511241e7d1 started_at: "2014-01-03T02:02:02.000000" @@ -839,10 +605,6 @@ tests: - name: create resource three desc: before test batch delete, create some resources POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: f93450f2-cccc-4d67-9985-02511241e7d1 started_at: "2014-08-04T00:00:00.000000" @@ -853,10 +615,6 @@ tests: - name: create resource four desc: before test batch delete, create some resources POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: f93450f2-dddd-4d67-9985-02511241e7d1 started_at: "2014-08-04T00:00:00.000000" @@ -867,10 +625,6 @@ tests: - name: create resource five desc: before test batch delete, create some resources POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: f93450f2-eeee-4d67-9985-02511241e7d1 started_at: "2015-08-14T00:00:00.000000" @@ -881,10 +635,6 @@ tests: - name: create resource six desc: before test batch delete, create some resources POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: id: f93450f2-ffff-4d67-9985-02511241e7d1 started_at: "2015-08-14T00:00:00.000000" @@ -895,64 +645,36 @@ tests: - name: get resource one desc: ensure the resources exists GET: /v1/resource/generic/f93450f2-aaaa-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 200 - name: get resource two desc: ensure the resources exists GET: /v1/resource/generic/f93450f2-bbbb-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 200 - name: get resource three desc: ensure the resources exists GET: /v1/resource/generic/f93450f2-cccc-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 200 - name: get resource four desc: ensure the resources exists GET: /v1/resource/generic/f93450f2-dddd-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 200 - name: get resource five desc: ensure the resources exists GET: /v1/resource/generic/f93450f2-eeee-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 200 - name: get resource six desc: ensure the resources exists GET: /v1/resource/generic/f93450f2-ffff-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json status: 200 - name: delete random data structure desc: delete an empty list test DELETE: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: resource_ids: [] @@ -963,20 +685,12 @@ tests: - name: delete something empty desc: use empty filter for delete DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: "" status: 400 - name: delete something empty a desc: use empty filter for delete DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: in: id: [] @@ -987,10 +701,6 @@ tests: - name: delete something empty b desc: use empty filter for delete DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: in: {} status: 400 @@ -998,22 +708,14 @@ tests: - name: delete something empty c desc: use empty filter for delete DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: in: and: [] status: 400 - name: delete something empty d - desc: use empty filter for delete + desc: use empty filter for delete DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: in: and: @@ -1025,10 +727,6 @@ tests: - name: delete something empty e desc: use empty filter for delete DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: and: [] status: 400 @@ -1036,10 +734,6 @@ tests: - name: delete something empty f desc: use empty filter for delete DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: and: - in: @@ -1050,10 +744,6 @@ tests: - name: delete batch of resources filter by started_at desc: delete the created resources DELETE: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: eq: started_at: "2014-08-04" @@ -1064,10 +754,6 @@ tests: - name: delete batch of resources filter by multiple ids desc: delete the created resources DELETE: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: in: id: @@ -1080,10 +766,6 @@ tests: - name: delete both existent and non-existent data desc: delete exits and non-exist data DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: in: id: @@ -1098,10 +780,6 @@ tests: - name: delete multiple non-existent resources desc: delete a batch of non-existent resources DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json data: in: id: diff --git a/gnocchi/tests/functional/gabbits/search-metric.yaml b/gnocchi/tests/functional/gabbits/search-metric.yaml index 4f477b71..0a781e6f 100644 --- a/gnocchi/tests/functional/gabbits/search-metric.yaml +++ b/gnocchi/tests/functional/gabbits/search-metric.yaml @@ -8,16 +8,17 @@ fixtures: defaults: request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json tests: - name: create archive policy desc: for later use POST: /v1/archive_policy request_headers: - content-type: application/json - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: high definition: @@ -31,8 +32,6 @@ tests: - name: create metric POST: /v1/metric - request_headers: - content-type: application/json data: archive_policy_name: high status: 201 @@ -40,8 +39,6 @@ tests: - name: post measures desc: for later use POST: /v1/batch/metrics/measures - request_headers: - content-type: application/json data: $RESPONSE['$.id']: - timestamp: "2014-10-06T14:34:12" @@ -78,24 +75,18 @@ tests: - name: search with one correct granularity POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=1s - request_headers: - content-type: application/json data: "=": 12 status: 200 - name: search with multiple correct granularities POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=1second&granularity=2s - request_headers: - content-type: application/json data: "=": 12 status: 200 - name: search with correct and incorrect granularities POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=1s&granularity=300 - request_headers: - content-type: application/json data: "=": 12 status: 400 @@ -104,8 +95,6 @@ tests: - name: search with incorrect granularity POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=300 - request_headers: - content-type: application/json data: "=": 12 status: 400 @@ -114,8 +103,6 @@ tests: - name: search measure with wrong start POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&start=foobar - request_headers: - content-type: application/json data: ∧: - ≥: 1000 @@ -125,16 +112,12 @@ tests: - name: create metric 2 POST: /v1/metric - request_headers: - content-type: application/json data: archive_policy_name: "high" status: 201 - name: search measure with wrong stop POST: /v1/search/metric?metric_id=$RESPONSE['$.id']&stop=foobar - request_headers: - content-type: application/json data: ∧: - ≥: 1000 diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml index c8f9bc2d..0bd8f93d 100644 --- a/gnocchi/tests/functional/gabbits/search.yaml +++ b/gnocchi/tests/functional/gabbits/search.yaml @@ -8,8 +8,9 @@ fixtures: defaults: request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json tests: - name: typo of search @@ -22,16 +23,12 @@ tests: - name: search with invalid uuid POST: /v1/search/resource/generic - request_headers: - content-type: application/json data: =: id: "cd9eef" - name: post generic resource POST: /v1/resource/generic - request_headers: - content-type: application/json data: id: faef212f-0bf4-4030-a461-2186fef79be0 started_at: "2014-01-03T02:02:02.000000" @@ -41,8 +38,6 @@ tests: - name: post generic resource twice POST: /v1/resource/generic - request_headers: - content-type: application/json data: id: df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e started_at: "2014-01-03T02:02:02.000000" @@ -52,8 +47,6 @@ tests: - name: search in_ POST: /v1/search/resource/generic - request_headers: - content-type: application/json data: in: id: @@ -64,8 +57,6 @@ tests: - name: search like created_by_project_id POST: /v1/search/resource/generic - request_headers: - content-type: application/json data: eq: created_by_project_id: @@ -75,15 +66,11 @@ tests: - name: search in_ query string POST: /v1/search/resource/generic?filter=id%20in%20%5Bfaef212f-0bf4-4030-a461-2186fef79be0%2C%20df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e%5D - request_headers: - content-type: application/json response_json_paths: $.`len`: 2 - name: search empty query POST: /v1/search/resource/generic - request_headers: - content-type: application/json data: {} response_json_paths: $.`len`: 2 diff --git a/gnocchi/tests/functional/gabbits/transformedids.yaml b/gnocchi/tests/functional/gabbits/transformedids.yaml index cc544f11..08a6238b 100644 --- a/gnocchi/tests/functional/gabbits/transformedids.yaml +++ b/gnocchi/tests/functional/gabbits/transformedids.yaml @@ -8,9 +8,9 @@ fixtures: defaults: request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json tests: @@ -22,7 +22,8 @@ tests: desc: for later use POST: /v1/archive_policy request_headers: - x-roles: admin + # User admin + authorization: "basic YWRtaW46" data: name: medium definition: @@ -41,8 +42,7 @@ tests: archive_policy_name: medium status: 201 response_json_paths: - created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c - created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + creator: foobar response_headers: # is a UUID location: /v1/resource/generic/[a-f0-9-]{36}/ @@ -72,8 +72,7 @@ tests: - name: post new resource non uuid again different user POST: /v1/resource/generic request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9b - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea + authorization: "basic cGFzdGE6" data: id: generic zero metrics: @@ -81,8 +80,7 @@ tests: archive_policy_name: medium status: 201 response_json_paths: - created_by_user_id: 0fbb231484614b1a80131fc22f6afc9b - created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + creator: pasta response_headers: # is a UUID location: /v1/resource/generic/[a-f0-9-]{36}/ @@ -98,8 +96,7 @@ tests: archive_policy_name: medium status: 201 response_json_paths: - created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c - created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + creator: foobar response_headers: # is a UUID location: /v1/resource/generic/[a-f0-9-]{36}/ -- GitLab From 42c213f13d90e68d6a51b449c11807d1cd10b4c3 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 12 Jun 2017 17:21:44 +0200 Subject: [PATCH 0806/1483] Rename gabbi_prefix to gabbi There are no 2 types of Gabbi tests anymore. --- .testr.conf | 2 +- .../tests/functional/{test_gabbi_prefix.py => test_gabbi.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename gnocchi/tests/functional/{test_gabbi_prefix.py => test_gabbi.py} (100%) diff --git a/.testr.conf b/.testr.conf index c274843c..482699ff 100644 --- a/.testr.conf +++ b/.testr.conf @@ -2,4 +2,4 @@ test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} ${PYTHON:-python} -m subunit.run discover -t . ${OS_TEST_PATH:-gnocchi/tests} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list -group_regex=(gabbi\.suitemaker\.test_gabbi((_prefix_|_live_|_)([^_]+)))_ +group_regex=(gabbi\.suitemaker\.test_gabbi((_live_|_)([^_]+)))_ diff --git a/gnocchi/tests/functional/test_gabbi_prefix.py b/gnocchi/tests/functional/test_gabbi.py similarity index 100% rename from gnocchi/tests/functional/test_gabbi_prefix.py rename to gnocchi/tests/functional/test_gabbi.py -- GitLab From 81a87efbba77131dc264736ed98b9904a25ea7af Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 25 May 2017 21:55:22 +0000 Subject: [PATCH 0807/1483] catch error trying to change sack on pre-gnocchi4 storage if storage hasn't been upgraded to gnocchiv4, it will fail with obscure message. a TypeError is because storage hasn't been upgraded. --- gnocchi/cli.py | 6 +++++- gnocchi/storage/incoming/__init__.py | 4 ++++ gnocchi/storage/incoming/_carbonara.py | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 93192c76..4939451f 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -86,7 +86,11 @@ def change_sack_size(): ]) conf = service.prepare_service(conf=conf) s = storage.get_incoming_driver(conf.incoming) - report = s.measures_report(details=False) + try: + report = s.measures_report(details=False) + except incoming.SackDetectionError: + # issue is already logged by NUM_SACKS, abort. + return remainder = report['summary']['measures'] if remainder: LOG.error('Cannot change sack when non-empty backlog. Process ' diff --git a/gnocchi/storage/incoming/__init__.py b/gnocchi/storage/incoming/__init__.py index 34dcd1c0..12d7d102 100644 --- a/gnocchi/storage/incoming/__init__.py +++ b/gnocchi/storage/incoming/__init__.py @@ -22,6 +22,10 @@ class ReportGenerationError(Exception): pass +class SackDetectionError(Exception): + pass + + class StorageDriver(object): @staticmethod diff --git a/gnocchi/storage/incoming/_carbonara.py b/gnocchi/storage/incoming/_carbonara.py index ff7eb4ea..ee6ddcc2 100644 --- a/gnocchi/storage/incoming/_carbonara.py +++ b/gnocchi/storage/incoming/_carbonara.py @@ -46,7 +46,7 @@ class CarbonaraBasedStorage(incoming.StorageDriver): except Exception as e: LOG.error('Unable to detect the number of storage sacks. ' 'Ensure gnocchi-upgrade has been executed: %s', e) - raise + raise incoming.SackDetectionError(e) return self._num_sacks def get_sack_prefix(self, num_sacks=None): -- GitLab From cba74ba1bcb4c20949a19477f7c631e8f958bba9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 12 Jun 2017 11:03:09 +0200 Subject: [PATCH 0808/1483] Remove oslotest dependency Oslotest is OpenStack specific and we actually don't use any of its feature except stdout redirection and log silencing, which is really provided by `fixtures' and `daiquiri'. Remove it and use testtools directly. This avoids having OS_* variables used everywhere and makes sure we don't pull dependencies we don't need (mox3, os-client-config, etc). --- .testr.conf | 2 +- gnocchi/tests/base.py | 49 ++++++++++++++----- gnocchi/tests/functional/fixtures.py | 24 ++++----- .../indexer/sqlalchemy/test_migrations.py | 4 +- gnocchi/tests/test_archive_policy.py | 3 +- gnocchi/tests/test_bin.py | 2 +- gnocchi/tests/test_carbonara.py | 2 +- gnocchi/tests/test_storage.py | 3 +- run-func-tests.sh | 2 +- setup.cfg | 1 - tox.ini | 6 +-- 11 files changed, 59 insertions(+), 39 deletions(-) diff --git a/.testr.conf b/.testr.conf index 482699ff..6e2e4a5e 100644 --- a/.testr.conf +++ b/.testr.conf @@ -1,5 +1,5 @@ [DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} ${PYTHON:-python} -m subunit.run discover -t . ${OS_TEST_PATH:-gnocchi/tests} $LISTOPT $IDOPTION +test_command=${PYTHON:-python} -m subunit.run discover -t . ${GNOCCHI_TEST_PATH:-gnocchi/tests} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list group_regex=(gabbi\.suitemaker\.test_gabbi((_live_|_)([^_]+)))_ diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 53016f59..e4a1e1a0 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -20,10 +20,8 @@ import subprocess import threading import uuid +import daiquiri import fixtures -from oslotest import base -from oslotest import log -from oslotest import output import six from six.moves.urllib.parse import unquote try: @@ -175,8 +173,40 @@ class FakeSwiftClient(object): return {}, None +class CaptureOutput(fixtures.Fixture): + """Optionally capture the output streams. + + .. py:attribute:: stdout + + The ``stream`` attribute from a :class:`StringStream` instance + replacing stdout. + + .. py:attribute:: stderr + + The ``stream`` attribute from a :class:`StringStream` instance + replacing stderr. + + """ + + def setUp(self): + super(CaptureOutput, self).setUp() + self._stdout_fixture = fixtures.StringStream('stdout') + self.stdout = self.useFixture(self._stdout_fixture).stream + self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.stdout)) + self._stderr_fixture = fixtures.StringStream('stderr') + self.stderr = self.useFixture(self._stderr_fixture).stream + self.useFixture(fixtures.MonkeyPatch('sys.stderr', self.stderr)) + + +class BaseTestCase(testcase.TestCase): + def setUp(self): + super(BaseTestCase, self).setUp() + if not os.getenv("GNOCCHI_TEST_DEBUG"): + self.useFixture(CaptureOutput()) + + @six.add_metaclass(SkipNotImplementedMeta) -class TestCase(base.BaseTestCase): +class TestCase(BaseTestCase): REDIS_DB_INDEX = 0 REDIS_DB_LOCK = threading.Lock() @@ -234,16 +264,11 @@ class TestCase(base.BaseTestCase): @classmethod def setUpClass(self): super(TestCase, self).setUpClass() - - # NOTE(sileht): oslotest does this in setUp() but we - # need it here - self.output = output.CaptureOutput() - self.output.setUp() - self.log = log.ConfigureLogging() - self.log.setUp() - self.conf = service.prepare_service([], default_config_files=[]) + if not os.getenv("GNOCCHI_TEST_DEBUG"): + daiquiri.setup(outputs=[]) + py_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',)) self.conf.set_override('paste_config', diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 1df4fb3c..13b7ebbd 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -1,5 +1,5 @@ # -# Copyright 2015 Red Hat. All Rights Reserved. +# Copyright 2015-2017 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -22,11 +22,10 @@ import time from unittest import case import warnings +import daiquiri from gabbi import fixture from oslo_config import cfg from oslo_middleware import cors -from oslotest import log -from oslotest import output import sqlalchemy_utils from gnocchi import indexer @@ -34,9 +33,9 @@ from gnocchi.indexer import sqlalchemy from gnocchi.rest import app from gnocchi import service from gnocchi import storage +from gnocchi.tests import base from gnocchi.tests import utils - # NOTE(chdent): Hack to restore semblance of global configuration to # pass to the WSGI app used per test suite. LOAD_APP_KWARGS are the olso # configuration, and the pecan application configuration of @@ -68,14 +67,12 @@ class ConfigFixture(fixture.GabbiFixture): def start_fixture(self): """Create necessary temp files and do the config dance.""" - - self.output = output.CaptureOutput() - self.output.setUp() - self.log = log.ConfigureLogging() - self.log.setUp() - global LOAD_APP_KWARGS + if not os.getenv("GNOCCHI_TEST_DEBUG"): + self.output = base.CaptureOutput() + self.output.setUp() + data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi') if os.getenv("GABBI_LIVE"): @@ -84,6 +81,9 @@ class ConfigFixture(fixture.GabbiFixture): dcf = [] conf = service.prepare_service([], default_config_files=dcf) + if not os.getenv("GNOCCHI_TEST_DEBUG"): + daiquiri.setup(outputs=[]) + py_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',)) conf.set_override('paste_config', @@ -159,8 +159,8 @@ class ConfigFixture(fixture.GabbiFixture): shutil.rmtree(self.tmp_dir) self.conf.reset() - self.output.cleanUp() - self.log.cleanUp() + if not os.getenv("GNOCCHI_TEST_DEBUG"): + self.output.cleanUp() class MetricdThread(threading.Thread): diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py index 781236fd..7e1cafee 100644 --- a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py +++ b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py @@ -37,11 +37,9 @@ class ModelsMigrationsSync( base.TestCase, test_migrations.ModelsMigrationsSync)): - def _set_timeout(self): - self.useFixture(fixtures.Timeout(120, gentle=True)) - def setUp(self): super(ModelsMigrationsSync, self).setUp() + self.useFixture(fixtures.Timeout(120, gentle=True)) self.db = mock.Mock() self.conf.set_override( 'url', diff --git a/gnocchi/tests/test_archive_policy.py b/gnocchi/tests/test_archive_policy.py index da90e70e..38922be3 100644 --- a/gnocchi/tests/test_archive_policy.py +++ b/gnocchi/tests/test_archive_policy.py @@ -11,10 +11,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from oslotest import base - from gnocchi import archive_policy from gnocchi import service +from gnocchi.tests import base class TestArchivePolicy(base.BaseTestCase): diff --git a/gnocchi/tests/test_bin.py b/gnocchi/tests/test_bin.py index e70bb865..4616fd5a 100644 --- a/gnocchi/tests/test_bin.py +++ b/gnocchi/tests/test_bin.py @@ -15,7 +15,7 @@ # under the License. import subprocess -from oslotest import base +from gnocchi.tests import base class BinTestCase(base.BaseTestCase): diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 0ec0d4aa..e886c900 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -19,11 +19,11 @@ import math import fixtures import iso8601 -from oslotest import base import pandas import six from gnocchi import carbonara +from gnocchi.tests import base class TestBoundTimeSerie(base.BaseTestCase): diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index f89728d5..d39746ee 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -18,7 +18,6 @@ import uuid import iso8601 import mock -from oslotest import base import six.moves from gnocchi import archive_policy @@ -971,7 +970,7 @@ class TestStorageDriver(tests_base.TestCase): resample=3600)) -class TestMeasureQuery(base.BaseTestCase): +class TestMeasureQuery(tests_base.TestCase): def test_equal(self): q = storage.MeasureQuery({"=": 4}) self.assertTrue(q(4)) diff --git a/run-func-tests.sh b/run-func-tests.sh index cf28931d..f6395076 100755 --- a/run-func-tests.sh +++ b/run-func-tests.sh @@ -44,7 +44,7 @@ for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS}; do export GNOCCHI_SERVICE_TOKEN="" # Just make gabbi happy export GNOCCHI_AUTHORIZATION="basic YWRtaW46" # admin in base64 - export OS_TEST_PATH=gnocchi/tests/functional_live + export GNOCCHI_TEST_PATH=gnocchi/tests/functional_live pifpaf -e GNOCCHI run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL --coordination-driver redis -- ./tools/pretty_tox.sh $* cleanup diff --git a/setup.cfg b/setup.cfg index ad6845a4..9840196e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -69,7 +69,6 @@ test = coverage>=3.6 fixtures mock - oslotest python-subunit>=0.0.18 os-testr testrepository diff --git a/tox.ini b/tox.ini index f795a637..714a14fa 100644 --- a/tox.ini +++ b/tox.ini @@ -5,7 +5,7 @@ envlist = py{35,27}-{postgresql,mysql}{,-file,-swift,-ceph,-s3},pep8 [testenv] usedevelop = True sitepackages = False -passenv = LANG OS_DEBUG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE GNOCCHI_TEST_* AWS_* +passenv = LANG GNOCCHI_TEST_* AWS_* setenv = GNOCCHI_TEST_STORAGE_DRIVER=file GNOCCHI_TEST_INDEXER_DRIVER=postgresql @@ -85,7 +85,7 @@ commands = flake8 bashate -v devstack/plugin.sh [testenv:py27-gate] -setenv = OS_TEST_PATH=gnocchi/tests/functional_live +setenv = GNOCCHI_TEST_PATH=gnocchi/tests/functional_live GABBI_LIVE=1 passenv = {[testenv]passenv} GNOCCHI_SERVICE* GNOCCHI_AUTHORIZATION sitepackages = True @@ -95,7 +95,7 @@ commands = {toxinidir}/tools/pretty_tox.sh '{posargs}' # This target provides a shortcut to running just the gabbi tests. [testenv:py27-gabbi] deps = .[test,postgresql,file] -setenv = OS_TEST_PATH=gnocchi/tests/functional +setenv = GNOCCHI_TEST_PATH=gnocchi/tests/functional basepython = python2.7 commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- {toxinidir}/tools/pretty_tox.sh '{posargs}' -- GitLab From 3c51ab85a1fb4466c20d5ab073d51918e12cf331 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 13 Jun 2017 15:55:11 +0200 Subject: [PATCH 0809/1483] tests: don't use wait without pid Without pid, wait always return 0. This change tracks pids to ensure we have the return code of the background jobs. --- run-tests.sh | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/run-tests.sh b/run-tests.sh index 606f6375..5f16b78c 100755 --- a/run-tests.sh +++ b/run-tests.sh @@ -1,5 +1,6 @@ #!/bin/bash -x set -e +PIDS="" GNOCCHI_TEST_STORAGE_DRIVERS=${GNOCCHI_TEST_STORAGE_DRIVERS:-file} GNOCCHI_TEST_INDEXER_DRIVERS=${GNOCCHI_TEST_INDEXER_DRIVERS:-postgresql} for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS} @@ -7,7 +8,7 @@ do export GNOCCHI_TEST_STORAGE_DRIVER=$storage for indexer in ${GNOCCHI_TEST_INDEXER_DRIVERS} do - ( + { case $GNOCCHI_TEST_STORAGE_DRIVER in ceph|redis) pifpaf run $GNOCCHI_TEST_STORAGE_DRIVER -- pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* @@ -29,10 +30,15 @@ do ;; esac # NOTE(sileht): Start all storage tests at once - ) & + } & + PIDS="$PIDS $!" done - # NOTE(sileht): Wait all storage tests - wait + # NOTE(sileht): Wait all storage tests, we tracks pid + # because wait without pid always return 0 + for pid in $PIDS; do + wait $pid + done + PIDS="" # TODO(sileht): the output can be a mess with this # Create a less verbose testrun output (with dot like nose ?) # merge all subunit output and print it in after_script in travis -- GitLab From 3347d172598f93a543765c3a50d99e14e9fb0732 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 14 Jun 2017 00:59:48 +0200 Subject: [PATCH 0810/1483] doc: add release note for 4.0 Fixes #107 --- doc/source/releasenotes/4.0.rst | 6 ++++++ doc/source/releasenotes/index.rst | 1 + 2 files changed, 7 insertions(+) create mode 100644 doc/source/releasenotes/4.0.rst diff --git a/doc/source/releasenotes/4.0.rst b/doc/source/releasenotes/4.0.rst new file mode 100644 index 00000000..8e290057 --- /dev/null +++ b/doc/source/releasenotes/4.0.rst @@ -0,0 +1,6 @@ +=================================== + 4.0 Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/4.0 diff --git a/doc/source/releasenotes/index.rst b/doc/source/releasenotes/index.rst index 9b4032fa..44677b75 100644 --- a/doc/source/releasenotes/index.rst +++ b/doc/source/releasenotes/index.rst @@ -5,6 +5,7 @@ Release Notes :maxdepth: 2 unreleased + 4.0 3.1 3.0 2.2 -- GitLab From 5f634d94ed593466f4fcca2f22376a468ff76986 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 14 Jun 2017 15:28:48 +0200 Subject: [PATCH 0811/1483] ceph: change default timeout The default ceph timeout is unlimited. For metricd this is not really an issue. But for API, that makes connection stuck until ceph comeback. This change uses a default to ensure we return an error 500 by default, and not hang. This also change the type on the option, rados except string. --- gnocchi/storage/ceph.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index ca5b2809..620155a4 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -29,7 +29,9 @@ OPTS = [ help='Ceph username (ie: admin without "client." prefix).'), cfg.StrOpt('ceph_secret', help='Ceph key', secret=True), cfg.StrOpt('ceph_keyring', help='Ceph keyring path.'), - cfg.IntOpt('ceph_timeout', help='Ceph connection timeout'), + cfg.StrOpt('ceph_timeout', + default="30", + help='Ceph connection timeout in seconds'), cfg.StrOpt('ceph_conffile', default='/etc/ceph/ceph.conf', help='Ceph configuration file.'), -- GitLab From 4f2e485f059833cd118bb07d0066a477436aae85 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 14 Jun 2017 08:33:32 +0200 Subject: [PATCH 0812/1483] Don't generate releasenotes of old branches/tags This change puts a redirect to the master releases notes page on old version of the documentation. --- gnocchi/gendoc.py | 32 ++++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index 7b9a8a11..9586fbd2 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -107,6 +107,7 @@ class ScenarioList(list): multiversion_hack = """ +import subprocess import sys import os @@ -114,6 +115,32 @@ srcdir = os.path.join("%s", "..", "..") os.chdir(srcdir) sys.path.insert(0, srcdir) +version = sys.argv[1] + +# NOTE(sileht): We delete releasenotes from old documentation +# only master will have it. +if os.path.exists("doc/source/releasenotes/index.rst.backup"): + os.remove("doc/source/releasenotes/index.rst") + os.rename("doc/source/releasenotes/index.rst.backup", + "doc/source/releasenotes/index.rst") + +if version not in ["", "master"] and os.path.exists("releasenotes"): + os.rename("doc/source/releasenotes/index.rst", + "doc/source/releasenotes/index.rst.backup") + with open("doc/source/releasenotes/index.rst", "w") as f: + f.write(\"\"\" +Release Notes +============= + +Releases notes can be found `here `_ + +.. raw:: html + + + + +\"\"\") + class FakeApp(object): def info(self, *args, **kwasrgs): pass @@ -136,11 +163,12 @@ def setup(app): if sys.argv[0].endswith("sphinx-versioning"): subprocess.call(["dropdb", os.environ['PGDATABASE']]) subprocess.call(["createdb", os.environ['PGDATABASE']]) - + from sphinxcontrib.versioning import sphinx_ + version = sphinx_.EventHandlers.CURRENT_VERSION with tempfile.NamedTemporaryFile() as f: f.write(multiversion_hack % app.confdir) f.flush() - subprocess.call(['python', f.name]) + subprocess.call(['python', f.name, version]) _RUN = True return -- GitLab From 151fbded793157363b8b8ec95df17da0e7e61e65 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 14 Jun 2017 19:13:49 +0200 Subject: [PATCH 0813/1483] doc: Add missing oslotest for old docs --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 714a14fa..28ec65c5 100644 --- a/tox.ini +++ b/tox.ini @@ -135,6 +135,7 @@ deps = {[testenv:docs]deps} pytimeparse retrying # for 3.x doc + oslotest oslosphinx commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-versioning build doc/source doc/build/html -- GitLab From a3052b5e0a0108fb92bed4e762fea9539514f1ee Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 14 Jun 2017 19:22:48 +0000 Subject: [PATCH 0814/1483] dynamically set bin path leverage GNOCCHI_BIN_DIR like every other place. --- devstack/plugin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index ad38b60c..71724be9 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -283,7 +283,7 @@ function configure_gnocchi { rm -f "$GNOCCHI_UWSGI_FILE" iniset "$GNOCCHI_UWSGI_FILE" uwsgi http $GNOCCHI_SERVICE_HOST:$GNOCCHI_SERVICE_PORT - iniset "$GNOCCHI_UWSGI_FILE" uwsgi wsgi-file "/usr/local/bin/gnocchi-api" + iniset "$GNOCCHI_UWSGI_FILE" uwsgi wsgi-file "$GNOCCHI_BIN_DIR/gnocchi-api" # This is running standalone iniset "$GNOCCHI_UWSGI_FILE" uwsgi master true # Set die-on-term & exit-on-reload so that uwsgi shuts down -- GitLab From 233f78ed1e7cf387bcaf56e945bb1a96bf0c3517 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 14 Jun 2017 15:28:48 +0200 Subject: [PATCH 0815/1483] ceph: change default timeout The default ceph timeout is unlimited. For metricd this is not really an issue. But for API, that makes connection stuck until ceph comeback. This change uses a default to ensure we return an error 500 by default, and not hang. This also change the type on the option, rados except string. (cherry picked from commit 5f634d94ed593466f4fcca2f22376a468ff76986) --- gnocchi/storage/ceph.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index ca5b2809..620155a4 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -29,7 +29,9 @@ OPTS = [ help='Ceph username (ie: admin without "client." prefix).'), cfg.StrOpt('ceph_secret', help='Ceph key', secret=True), cfg.StrOpt('ceph_keyring', help='Ceph keyring path.'), - cfg.IntOpt('ceph_timeout', help='Ceph connection timeout'), + cfg.StrOpt('ceph_timeout', + default="30", + help='Ceph connection timeout in seconds'), cfg.StrOpt('ceph_conffile', default='/etc/ceph/ceph.conf', help='Ceph configuration file.'), -- GitLab From 309f55ac853380edefd7b202820ecfe7c5a9d2f2 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 16 Jun 2017 11:58:35 +0200 Subject: [PATCH 0816/1483] Remove workaround Gnocchi 3.1 and alembic in upgrade testing --- tox.ini | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tox.ini b/tox.ini index 28ec65c5..e89d3d48 100644 --- a/tox.ini +++ b/tox.ini @@ -50,14 +50,12 @@ commands = [testenv:py35-postgresql-file-upgrade-from-3.1] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv -# FIXME(sileht): We set alembic version until next Gnocchi 3.1 is released envdir = upgrade recreate = True skip_install = True usedevelop = False setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 - alembic<0.9.0 pifpaf>=0.13 gnocchiclient>=2.8.0 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} @@ -65,14 +63,12 @@ commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-te [testenv:py27-mysql-ceph-upgrade-from-3.1] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv -# FIXME(sileht): We set alembic version until next Gnocchi 3.1 is released envdir = upgrade recreate = True skip_install = True usedevelop = False setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 - alembic<0.9.0 gnocchiclient>=2.8.0 pifpaf[ceph]>=0.13 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} -- GitLab From cb93f31443987f8b979a40ac6014a4b0e205d76f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 16 Jun 2017 11:39:20 +0200 Subject: [PATCH 0817/1483] doc: mention gnocchi-docker in install options --- doc/source/install.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/source/install.rst b/doc/source/install.rst index 0f6fa2c7..43ccdd80 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -187,6 +187,15 @@ Run the following: and `gnocchi-statsd` daemons +Installation using Docker +========================= +The `gnocchi-docker repository`_ contains the needed Dockerfile and script to +build a Docker image containing Gnocchi latest version (fetched from PyPI). It +also provides an example of docker-compose file in order to run a full +deployment on Gnocchi (indexer and storage included). + +.. _gnocchi-docker repository: https://github.com/gnocchixyz/gnocchi-docker + Installation Using Devstack =========================== -- GitLab From 4da3928d7cbad8feda2d0a123fe516bb3812c045 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 14 Jun 2017 17:32:25 +0200 Subject: [PATCH 0818/1483] rest: make sure we use an absolute path for api-paste file --- gnocchi/rest/app.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index ff91f5b2..2ae58361 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -101,7 +101,8 @@ def load_app(conf, indexer=None, storage=None, if cfg_path is None or not os.path.exists(cfg_path): LOG.debug("No api-paste configuration file found! Using default.") - cfg_path = pkg_resources.resource_filename(__name__, "api-paste.ini") + cfg_path = os.path.abspath(pkg_resources.resource_filename( + __name__, "api-paste.ini")) config = dict(conf=conf, indexer=indexer, storage=storage, not_implemented_middleware=not_implemented_middleware) -- GitLab From 9aefc01af59c18a0a7bf8714adf6ed9bdcb85d0b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 14 Jun 2017 16:56:37 +0200 Subject: [PATCH 0819/1483] tests: hide useless rados output Each test print some useless message not catched by the stdout/stderr fixture. This change hides them --- gnocchi/tests/base.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index e4a1e1a0..eb41ce43 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -336,8 +336,10 @@ class TestCase(BaseTestCase): 'storage') elif self.conf.storage.driver == 'ceph': pool_name = uuid.uuid4().hex - subprocess.call("rados -c %s mkpool %s" % ( - os.getenv("CEPH_CONF"), pool_name), shell=True) + with open(os.devnull, 'w') as f: + subprocess.call("rados -c %s mkpool %s" % ( + os.getenv("CEPH_CONF"), pool_name), shell=True, + stdout=f, stderr=subprocess.STDOUT) self.conf.set_override('ceph_pool', pool_name, 'storage') # Override the bucket prefix to be unique to avoid concurrent access -- GitLab From 8cff0dde1d83dfc97b35891b5ba796de1864e19e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 14 Jun 2017 17:01:53 +0200 Subject: [PATCH 0820/1483] test: don't print the whole config file --- gnocchi/tests/test_bin.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gnocchi/tests/test_bin.py b/gnocchi/tests/test_bin.py index 4616fd5a..e802f8f0 100644 --- a/gnocchi/tests/test_bin.py +++ b/gnocchi/tests/test_bin.py @@ -13,6 +13,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import os import subprocess from gnocchi.tests import base @@ -20,5 +21,6 @@ from gnocchi.tests import base class BinTestCase(base.BaseTestCase): def test_gnocchi_config_generator_run(self): - subp = subprocess.Popen(['gnocchi-config-generator']) + with open(os.devnull, 'w') as f: + subp = subprocess.Popen(['gnocchi-config-generator'], stdout=f) self.assertEqual(0, subp.wait()) -- GitLab From 5bf61dd8cab3ae8a83be679f44878d1ca1a39213 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 16 Jun 2017 17:45:04 +0200 Subject: [PATCH 0821/1483] Add upgrade directory to gitignore This directory is created by tox when running one of the upgrade test. --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 0562170d..56381f3e 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ releasenotes/build cover .coverage dist +upgrade/ -- GitLab From b98a31c106e2a00cf37caf52ed7c765b0ef7830e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 16 Jun 2017 11:57:53 +0200 Subject: [PATCH 0822/1483] Test upgrades from 4.0 --- .travis.yml | 2 ++ tox.ini | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/.travis.yml b/.travis.yml index 7a6d7ea1..8b0782cb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,6 +14,8 @@ env: - TARGET: py27-mysql-ceph-upgrade-from-3.1 - TARGET: py35-postgresql-file-upgrade-from-3.1 + - TARGET: py27-mysql-ceph-upgrade-from-4.0 + - TARGET: py35-postgresql-file-upgrade-from-4.0 - TARGET: py27-mysql - TARGET: py35-mysql diff --git a/tox.ini b/tox.ini index e89d3d48..554a5e7d 100644 --- a/tox.ini +++ b/tox.ini @@ -73,6 +73,32 @@ deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 pifpaf[ceph]>=0.13 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} +[testenv:py35-postgresql-file-upgrade-from-4.0] +# We should always recreate since the script upgrade +# Gnocchi we can't reuse the virtualenv +envdir = upgrade +recreate = True +skip_install = True +usedevelop = False +setenv = GNOCCHI_VARIANT=test,postgresql,file +deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.0,<4.1 + pifpaf>=0.13 + gnocchiclient>=2.8.0 +commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} + +[testenv:py27-mysql-ceph-upgrade-from-4.0] +# We should always recreate since the script upgrade +# Gnocchi we can't reuse the virtualenv +envdir = upgrade +recreate = True +skip_install = True +usedevelop = False +setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib +deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.0,<4.1 + gnocchiclient>=2.8.0 + pifpaf[ceph]>=0.13 +commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} + [testenv:pep8] deps = hacking>=0.12,<0.13 bashate -- GitLab From 6d03a69325d89675c3163bba391a80d18098130c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 16 Jun 2017 17:42:06 +0200 Subject: [PATCH 0823/1483] doc: improve various doc and add comparison table - Rename running to operating - Mention aggregation methods in archive policies - Add a comparison table --- README.rst | 8 +-- doc/source/architecture.rst | 6 +-- doc/source/comparison-table.rst | 21 ++++++++ doc/source/index.rst | 61 ++++++++++++++++++++++- doc/source/{running.rst => operating.rst} | 21 +++++--- tox.ini | 7 ++- 6 files changed, 108 insertions(+), 16 deletions(-) create mode 100644 doc/source/comparison-table.rst rename doc/source/{running.rst => operating.rst} (93%) diff --git a/README.rst b/README.rst index 1bbd8925..55040f83 100644 --- a/README.rst +++ b/README.rst @@ -11,10 +11,10 @@ .. image:: doc/source/_static/gnocchi-logo.png -Gnocchi is a multi-tenant timeseries, metrics and resources database. It -provides an `HTTP REST`_ interface to create and manipulate the data. It is -designed to store metrics at a very large scale while providing access to -metrics and resources information and history. +Gnocchi is an open-source, multi-tenant timeseries, metrics and resources +database. It provides an `HTTP REST`_ interface to create and manipulate the +data. It is designed to store metrics at a very large scale while providing +access to metrics and resources information and history. You can read the full documentation online at http://gnocchi.xyz. diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index 9b7b4f9c..455c7dd4 100755 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -35,10 +35,10 @@ The *indexer* is responsible for storing the index of all resources, archive policies and metrics, along with their definitions, types and properties. The indexer is also responsible for linking resources with metrics. -Available storage back-ends -~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Available incoming and storage back-ends +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Gnocchi currently offers different storage drivers: +Gnocchi currently offers different incoming and storage drivers: * File (default) * `Ceph`_ (preferred) diff --git a/doc/source/comparison-table.rst b/doc/source/comparison-table.rst new file mode 100644 index 00000000..82d3f1e8 --- /dev/null +++ b/doc/source/comparison-table.rst @@ -0,0 +1,21 @@ ++------------------+-------------------------------------------------------------------+------------+------------------+----------+-----------+ +| Features | Gnocchi | Prometheus | InfluxDB | OpenTSDB | Graphite | ++==================+===================================================================+============+==================+==========+===========+ +| Metric polling | No | Yes | No | No | No | ++------------------+-------------------------------------------------------------------+------------+------------------+----------+-----------+ +| Resource history | Yes | No | No | No | No | ++------------------+-------------------------------------------------------------------+------------+------------------+----------+-----------+ +| Multi-tenant | Yes | No | No | No | No | ++------------------+-------------------------------------------------------------------+------------+------------------+----------+-----------+ +| Query interface | REST API | REST API | HTTP | TCP | None | ++------------------+-------------------------------------------------------------------+------------+------------------+----------+-----------+ +| High-available | Yes | No | With *Relay* | Yes | No | ++------------------+-------------------------------------------------------------------+------------+------------------+----------+-----------+ +| Scalable | Yes | No | Commercial only | Yes | No | ++------------------+-------------------------------------------------------------------+------------+------------------+----------+-----------+ +| Alerting | No (`roadmap `_) | Yes | With *Kapacitor* | No | No | ++------------------+-------------------------------------------------------------------+------------+------------------+----------+-----------+ +| Grafana support | Yes | Yes | Yes | Yes | Yes | ++------------------+-------------------------------------------------------------------+------------+------------------+----------+-----------+ +| collectd support | Yes | Yes | Yes | Yes | Yes | ++------------------+-------------------------------------------------------------------+------------+------------------+----------+-----------+ diff --git a/doc/source/index.rst b/doc/source/index.rst index 85f284e2..17d29332 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -8,6 +8,8 @@ Gnocchi – Metric as a Service Key Features ------------ +Gnocchi's main features are: + - HTTP REST interface - Horizontal scalability - Metric aggregation @@ -49,6 +51,63 @@ More information are available on `Julien's blog post on Gnocchi .. _`OpenStack Ceilometer`: https://docs.openstack.org/developer/ceilometer/ + + +Comparisons To Alternatives +--------------------------- + +The following table summarises feature comparison between different existing +open source time series database. More details are written below, if needed. + +.. include:: comparison-table.rst + +Gnocchi vs Prometheus +~~~~~~~~~~~~~~~~~~~~~ +`Prometheus `_ is a full-featured solution that +includes everything from polling the metrics to storing and archiving them. It +offers advanced features such as alerting. + +In comparison, Gnocchi does not offer polling has it prefers to leverage +existing solutions (e.g. `collectd `_). However, it +provides high-availability and horizontal scalablity as well as multi-tenancy. + + +Gnocchi vs InfluxDB +~~~~~~~~~~~~~~~~~~~ + +`InfluxDB `_ is a time series database storing metrics +into local files. It offers a variety of input protocol support and created its +own query language, InfluxQL, inspired from SQL. The HTTP API it offers is just +a way to pass InfluxQL over the wire. Horizontal scalability is only provided +in the commercial version. The data model is based on time series with labels +associated to it. + +In comparison, Gnocchi offers scalability and multi-tenancy. Its data model +differs as it does not provide labels, but resources to attach to metrics. + +Gnocchi vs OpenTSDB +~~~~~~~~~~~~~~~~~~~ + +`OpenTSDB `_ is a distributed time series database that +uses `Hadoop `_ and `HBase +`_ to store its data. That makes it easy to scale +horizontally. However, its querying feature are rather simple. + +In comparison, Gnocchi offers a proper query language with more features. The +usage of Hadoop might be a show-stopper for many as it's quite heavy to deploy +and operate. + +Gnocchi vs Graphite +~~~~~~~~~~~~~~~~~~~ + +`Graphite `_ is essentially a data +metric storage composed of flat files (Whisper), and focuses on rendering those +timeseries. Each timeseries stored is composed of points that are stored +regularly and are related to the current date and time. + +In comparison, Gnocchi offers much more scalability, a better file format and +no relativity to the current time and date. + Documentation ------------- @@ -57,7 +116,7 @@ Documentation architecture install - running + operating client rest statsd diff --git a/doc/source/running.rst b/doc/source/operating.rst similarity index 93% rename from doc/source/running.rst rename to doc/source/operating.rst index 48c437ca..7b5168e2 100644 --- a/doc/source/running.rst +++ b/doc/source/operating.rst @@ -48,7 +48,10 @@ Once written to `/etc/gnocchi/uwsgi.ini`, it can be launched this way:: How to define archive policies ============================== -In Gnocchi, the archive policy definitions are expressed in number of points. +In Gnocchi, the archive policies define how the metrics are aggregated and how +long they are stored. Each archive policy definition is expressed as the number +of points over a timespan. + If your archive policy defines a policy of 10 points with a granularity of 1 second, the time series archive will keep up to 10 seconds, each representing an aggregation over 1 second. This means the time series will at maximum retain @@ -77,13 +80,16 @@ it will consume. Therefore, creating an archive policy with 2 definitons (e.g. consume twice CPU than just one definition (e.g. just 1 second granularity for 1 day). +Each archive policy also defines which aggregation methods will be used. The +default is set to `default_aggregation_methods` which is by default set to +*mean*, *min*, *max*, *sum*, *std*, *count*. + Default archive policies ======================== By default, 3 archive policies are created when calling `gnocchi-upgrade`: *low*, *medium* and *high*. The name both describes the storage space and CPU -usage needs. They use `default_aggregation_methods` which is by default set to -*mean*, *min*, *max*, *sum*, *std*, *count*. +usage needs. A fourth archive policy named `bool` is also provided by default and is designed to store only boolean values (i.e. 0 and 1). It only stores one data @@ -228,9 +234,12 @@ How to monitor Gnocchi The `/v1/status` endpoint of the HTTP API returns various information, such as the number of measures to process (measures backlog), which you can easily -monitor (see `How many metricd workers do we need to run`_). Making sure that -the HTTP server and `gnocchi-metricd` daemon are running and are not writing -anything alarming in their logs is a sign of good health of the overall system. +monitor (see `How many metricd workers do we need to run`_). The Gnocchi client +can show this output by running `gnocchi status`. + +Making sure that the HTTP server and `gnocchi-metricd` daemon are running and +are not writing anything alarming in their logs is a sign of good health of the +overall system. Total measures for backlog status may not accurately reflect the number of points to be processed when measures are submitted via batch. diff --git a/tox.ini b/tox.ini index 554a5e7d..5fb517ec 100644 --- a/tox.ini +++ b/tox.ini @@ -43,7 +43,7 @@ setenv = deps = .[test,redis,{env:GNOCCHI_STORAGE_DEPS:},{env:GNOCCHI_INDEXER_DEPS:}] {env:GNOCCHI_TEST_TARBALLS:} commands = - doc8 --ignore-path doc/source/rest.rst doc/source + doc8 doc/source {toxinidir}/run-tests.sh {posargs} {toxinidir}/run-func-tests.sh {posargs} @@ -145,7 +145,7 @@ basepython = python2.7 # .[postgresql,doc] # setenv = GNOCCHI_STORAGE_DEPS=file deps = .[test,file,postgresql,doc] -commands = doc8 --ignore-path doc/source/rest.rst doc/source +commands = doc8 --ignore-path doc/source/rest.rst,doc/source/comparison-table.rst doc/source pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx -W [testenv:docs-gnocchi.xyz] @@ -161,3 +161,6 @@ deps = {[testenv:docs]deps} oslosphinx commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-versioning build doc/source doc/build/html + +[doc8] +ignore-path = doc/source/rest.rst,doc/source/comparison-table.rst -- GitLab From fb2dcf67d1dd1837862fe84caf7937de6288632f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 21 Jun 2017 21:51:41 +0200 Subject: [PATCH 0824/1483] Fix Travis new image permissions Docker cannot create files otherwise since it uses a different UID/GID. --- .travis.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.travis.yml b/.travis.yml index 8b0782cb..5d4295c1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,6 +35,9 @@ before_script: esac install: - docker pull gnocchixyz/ci-tools:latest + # Allow docker to write in home directory + - chmod -R o+w ~/ + - find ~/ -type d -exec chmod o+x {} \; script: - docker run -v ~/.cache/pip:/home/tester/.cache/pip -v $(pwd):/home/tester/src gnocchixyz/ci-tools:latest tox -e ${TARGET} -- GitLab From 64e1b81d5590ce2b9ff3f99d3e6725c532a2b8e9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 22 Jun 2017 18:08:20 +0200 Subject: [PATCH 0825/1483] rest: move _carbonara.SackLockTimeoutError to storage gnocchi.rest actually expects storage.SackLockTimeoutError so it currently is not caught. --- gnocchi/storage/__init__.py | 4 ++++ gnocchi/storage/_carbonara.py | 6 +----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 62613255..d548b19b 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -32,6 +32,10 @@ OPTS = [ LOG = daiquiri.getLogger(__name__) +class SackLockTimeoutError(Exception): + pass + + class Measure(object): def __init__(self, timestamp, value): self.timestamp = timestamp diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 56b4d60d..b036c91b 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -53,10 +53,6 @@ class CorruptionError(ValueError): super(CorruptionError, self).__init__(message) -class SackLockTimeoutError(Exception): - pass - - class CarbonaraBasedStorage(storage.StorageDriver): def __init__(self, conf, incoming, coord=None): @@ -361,7 +357,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): s = self.incoming.sack_for_metric(metric.id) lock = self.incoming.get_sack_lock(self.coord, s) if not lock.acquire(blocking=timeout): - raise SackLockTimeoutError( + raise storage.SackLockTimeoutError( 'Unable to refresh metric: %s. Metric is locked. ' 'Please try again.' % metric.id) try: -- GitLab From 3bc91ac7250ecabd9573137b392a8c0628ace7c9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 22 Jun 2017 13:03:43 +0200 Subject: [PATCH 0826/1483] cli: fix close_services() order close_services() closes e.g. the coordinator in the MetricProcessor. But currently it is close before the shutdown is complete, and it's possible that the processing of metrics requires the coordinator. On shutdown, this can lead to a backtrace such as: Traceback (most recent call last): File "/usr/local/lib/python2.7/site-packages/cotyledon/_utils.py", line 84, in exit_on_exception yield File "/usr/local/lib/python2.7/site-packages/cotyledon/_service.py", line 139, in _run self.run() File "/Users/jd/Source/gnocchi/gnocchi/cli.py", line 134, in run self._run_job() File "/Users/jd/Source/gnocchi/gnocchi/cli.py", line 242, in _run_job lock = in_store.get_sack_lock(self.coord, s) File "/Users/jd/Source/gnocchi/gnocchi/storage/incoming/_carbonara.py", line 77, in get_sack_lock return coord.get_lock(lock_name) File "/Users/jd/Source/tooz/tooz/drivers/redis.py", line 377, in get_lock return RedisLock(self, self._client, name, self.lock_timeout) File "/Users/jd/Source/tooz/tooz/drivers/redis.py", line 65, in __init__ self._lock = client.lock(name, AttributeError: 'NoneType' object has no attribute 'lock' In order to fix it, make sure we close the coordinator when the MetricProcessor has shut down. --- gnocchi/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 4939451f..48a199f8 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -133,9 +133,9 @@ class MetricProcessBase(cotyledon.Service): def terminate(self): self._shutdown.set() - self.close_services() LOG.info("Waiting ongoing metric processing to finish") self._shutdown_done.wait() + self.close_services() @staticmethod def close_services(): -- GitLab From 38477d4d9d36f653f560d2d18fba53225a278653 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 27 Jun 2017 17:20:23 +0200 Subject: [PATCH 0827/1483] Install gnocchi flavor of pifpaf So we have uwsgi pulled. --- setup.cfg | 2 +- tox.ini | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/setup.cfg b/setup.cfg index 9840196e..e6501837 100644 --- a/setup.cfg +++ b/setup.cfg @@ -64,7 +64,7 @@ doc = Jinja2 reno>=1.6.2 test = - pifpaf[ceph]>=1.0.1 + pifpaf[ceph,gnocchi]>=1.0.1 gabbi>=1.30.0 coverage>=3.6 fixtures diff --git a/tox.ini b/tox.ini index 5fb517ec..feb3e98b 100644 --- a/tox.ini +++ b/tox.ini @@ -56,7 +56,7 @@ skip_install = True usedevelop = False setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 - pifpaf>=0.13 + pifpaf[gnocchi]>=0.13 gnocchiclient>=2.8.0 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} @@ -70,7 +70,7 @@ usedevelop = False setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 gnocchiclient>=2.8.0 - pifpaf[ceph]>=0.13 + pifpaf[ceph,gnocchi]>=0.13 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:py35-postgresql-file-upgrade-from-4.0] @@ -82,7 +82,7 @@ skip_install = True usedevelop = False setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.0,<4.1 - pifpaf>=0.13 + pifpaf[gnocchi]>=0.13 gnocchiclient>=2.8.0 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} @@ -96,7 +96,7 @@ usedevelop = False setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.0,<4.1 gnocchiclient>=2.8.0 - pifpaf[ceph]>=0.13 + pifpaf[ceph,gnocchi]>=0.13 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:pep8] -- GitLab From 0ff791ed0aca6f03503ab24e12ae9246c71253c6 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 23 Jun 2017 10:41:40 +0200 Subject: [PATCH 0828/1483] tox: don't specify envdir Putting `envdir = upgrade` makes tox create an upgrade in the middle of the source directory, which is not convenient/standard and gets scanned by e.g. flake8. Let's each job use its own default directory. --- tox.ini | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tox.ini b/tox.ini index feb3e98b..8c82137a 100644 --- a/tox.ini +++ b/tox.ini @@ -50,7 +50,6 @@ commands = [testenv:py35-postgresql-file-upgrade-from-3.1] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv -envdir = upgrade recreate = True skip_install = True usedevelop = False @@ -63,7 +62,6 @@ commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-te [testenv:py27-mysql-ceph-upgrade-from-3.1] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv -envdir = upgrade recreate = True skip_install = True usedevelop = False @@ -76,7 +74,6 @@ commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE [testenv:py35-postgresql-file-upgrade-from-4.0] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv -envdir = upgrade recreate = True skip_install = True usedevelop = False @@ -89,7 +86,6 @@ commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-te [testenv:py27-mysql-ceph-upgrade-from-4.0] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv -envdir = upgrade recreate = True skip_install = True usedevelop = False -- GitLab From 9de811e6b1bf250a1893af37357bb61ec0041c2c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 26 Jun 2017 14:38:42 +0200 Subject: [PATCH 0829/1483] devstack: remove deprecated $GNOCCHI_USE_MOD_WSGI --- devstack/plugin.sh | 7 ------- devstack/settings | 2 +- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 71724be9..c04bcca5 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -40,13 +40,6 @@ if [ -z "$GNOCCHI_DEPLOY" ]; then # Fallback to common wsgi devstack configuration if [ "$ENABLE_HTTPD_MOD_WSGI_SERVICES" == "True" ]; then GNOCCHI_DEPLOY=mod_wsgi - - # Deprecated config - elif [ -n "$GNOCCHI_USE_MOD_WSGI" ] ; then - echo_summary "GNOCCHI_USE_MOD_WSGI is deprecated, use GNOCCHI_DEPLOY instead" - if [ "$GNOCCHI_USE_MOD_WSGI" == True ]; then - GNOCCHI_DEPLOY=mod_wsgi - fi fi fi diff --git a/devstack/settings b/devstack/settings index 17c03f4f..68ca66b8 100644 --- a/devstack/settings +++ b/devstack/settings @@ -17,7 +17,7 @@ GNOCCHI_METRICD_PROCESSING_DELAY=${GNOCCHI_METRICD_PROCESSING_DELAY:-5} # - mod_wsgi : Run Gnocchi under Apache HTTPd mod_wsgi # - simple : Run gnocchi-api # - uwsgi : Run Gnocchi under uwsgi -# - : Fallback to GNOCCHI_USE_MOD_WSGI or ENABLE_HTTPD_MOD_WSGI_SERVICES +# - : Fallback to ENABLE_HTTPD_MOD_WSGI_SERVICES GNOCCHI_DEPLOY=${GNOCCHI_DEPLOY} # Toggle for deploying Gnocchi with/without Keystone -- GitLab From dd70bab25ef8339645190976446d55886404d972 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 26 Jun 2017 14:36:23 +0200 Subject: [PATCH 0830/1483] devstack: stop using deprecated WSGI file --- devstack/plugin.sh | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 71724be9..c990d764 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -157,14 +157,11 @@ function _gnocchi_install_grafana { } function _cleanup_gnocchi_apache_wsgi { - sudo rm -f $GNOCCHI_WSGI_DIR/*.wsgi sudo rm -f $(apache_site_config_for gnocchi) } # _config_gnocchi_apache_wsgi() - Set WSGI config files of Gnocchi function _config_gnocchi_apache_wsgi { - sudo mkdir -p $GNOCCHI_WSGI_DIR - local gnocchi_apache_conf=$(apache_site_config_for gnocchi) local venv_path="" local script_name=$GNOCCHI_SERVICE_PREFIX @@ -173,9 +170,6 @@ function _config_gnocchi_apache_wsgi { venv_path="python-path=${PROJECT_VENV["gnocchi"]}/lib/$(python_version)/site-packages" fi - # copy wsgi file - sudo cp $GNOCCHI_DIR/gnocchi/rest/app.wsgi $GNOCCHI_WSGI_DIR/ - # Only run the API on a custom PORT if it has been specifically # asked for. if [[ -n $GNOCCHI_SERVICE_PORT ]]; then @@ -191,7 +185,7 @@ function _config_gnocchi_apache_wsgi { fi sudo sed -e " s|%APACHE_NAME%|$APACHE_NAME|g; - s|%WSGI%|$GNOCCHI_WSGI_DIR/app.wsgi|g; + s|%WSGI%|$GNOCCHI_BIN_DIR/gnocchi-api|g; s|%USER%|$STACK_USER|g s|%APIWORKERS%|$API_WORKERS|g s|%VIRTUALENV%|$venv_path|g -- GitLab From cef3396908947b6a8437476c899e6d5cec48a6d5 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 26 Jun 2017 14:36:44 +0200 Subject: [PATCH 0831/1483] Remove deprecated app.wsgi --- gnocchi/rest/app.wsgi | 29 ----------------------------- 1 file changed, 29 deletions(-) delete mode 100644 gnocchi/rest/app.wsgi diff --git a/gnocchi/rest/app.wsgi b/gnocchi/rest/app.wsgi deleted file mode 100644 index 475d9acb..00000000 --- a/gnocchi/rest/app.wsgi +++ /dev/null @@ -1,29 +0,0 @@ -# -# Copyright 2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Use this file for deploying the API under mod_wsgi. - -See http://pecan.readthedocs.org/en/latest/deployment.html for details. -""" - -import debtcollector - -from gnocchi.rest import app - -application = app.build_wsgi_app() -debtcollector.deprecate(prefix="The wsgi script gnocchi/rest/app.wsgi is deprecated", - postfix=", please use gnocchi-api binary as wsgi script instead", - version="4.0", removal_version="4.1", - category=RuntimeWarning) -- GitLab From f7e65a38cdcde5e88519e4ef720a472e447bb808 Mon Sep 17 00:00:00 2001 From: Jaime Alvarez Date: Fri, 16 Jun 2017 09:36:24 +0000 Subject: [PATCH 0832/1483] Improve indexer and storage INFO logs --- gnocchi/indexer/sqlalchemy.py | 3 +++ gnocchi/storage/ceph.py | 4 ++++ gnocchi/storage/file.py | 3 +++ gnocchi/storage/redis.py | 3 +++ gnocchi/storage/s3.py | 3 +++ gnocchi/storage/swift.py | 3 +++ gnocchi/tests/test_indexer.py | 4 ++++ gnocchi/tests/test_storage.py | 22 ++++++++++++++++++++++ 8 files changed, 45 insertions(+) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index ecad2b65..4651c3e4 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -288,6 +288,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): self.conf = conf self.facade = PerInstanceFacade(conf) + def __str__(self): + return "%s: %s" % (self.__class__.__name__, self.conf.indexer.url) + def disconnect(self): self.facade.dispose() diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 620155a4..0036aaa9 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -47,6 +47,10 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): super(CephStorage, self).__init__(conf, incoming, coord) self.rados, self.ioctx = ceph.create_rados_connection(conf) + def __str__(self): + # Use cluster ID for now + return "%s: %s" % (self.__class__.__name__, self.rados.get_fsid()) + def stop(self): ceph.close_rados_connection(self.rados, self.ioctx) super(CephStorage, self).stop() diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 11cfad89..c96e6d8d 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -42,6 +42,9 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): self.basepath_tmp = os.path.join(self.basepath, 'tmp') utils.ensure_paths([self.basepath_tmp]) + def __str__(self): + return "%s: %s" % (self.__class__.__name__, str(self.basepath)) + def _atomic_file_store(self, dest, data): tmpfile = tempfile.NamedTemporaryFile( prefix='gnocchi', dir=self.basepath_tmp, diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 843827a2..7e0ad04b 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -37,6 +37,9 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): super(RedisStorage, self).__init__(conf, incoming, coord) self._client = redis.get_client(conf) + def __str__(self): + return "%s: %s" % (self.__class__.__name__, self._client) + def _metric_key(self, metric): return redis.SEP.join([self.STORAGE_PREFIX, str(metric.id)]) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 26149b5d..2234c71e 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -75,6 +75,9 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): else: self._consistency_stop = None + def __str__(self): + return "%s: %s" % (self.__class__.__name__, self._bucket_name) + def upgrade(self, num_sacks): super(S3Storage, self).upgrade(num_sacks) try: diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index ae67b7d5..74898e19 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -73,6 +73,9 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): self.swift = swift.get_connection(conf) self._container_prefix = conf.swift_container_prefix + def __str__(self): + return "%s: %s" % (self.__class__.__name__, self._container_prefix) + def _container_name(self, metric): return '%s.%s' % (self._container_prefix, str(metric.id)) diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index f6a29263..c027dc06 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -37,6 +37,10 @@ class TestIndexer(tests_base.TestCase): class TestIndexerDriver(tests_base.TestCase): + def test_str(self): + self.assertEqual("%s: %s" % (self.index.__class__.__name__, + self.conf.indexer.url), str(self.index)) + def test_create_archive_policy_already_exists(self): # NOTE(jd) This archive policy # is created by gnocchi.tests on setUp() :) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index d39746ee..65ca635b 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -25,6 +25,11 @@ from gnocchi import carbonara from gnocchi import indexer from gnocchi import storage from gnocchi.storage import _carbonara +from gnocchi.storage import ceph +from gnocchi.storage import file +from gnocchi.storage import redis +from gnocchi.storage import s3 +from gnocchi.storage import swift from gnocchi.tests import base as tests_base from gnocchi.tests import utils as tests_utils from gnocchi import utils @@ -43,6 +48,23 @@ class TestStorageDriver(tests_base.TestCase): archive_policy_name) return m, m_sql + def test_driver_str(self): + driver = storage.get_driver(self.conf) + + if isinstance(driver, file.FileStorage): + s = driver.basepath + elif isinstance(driver, ceph.CephStorage): + s = driver.rados.get_fsid() + elif isinstance(driver, redis.RedisStorage): + s = driver._client + elif isinstance(driver, s3.S3Storage): + s = driver._bucket_name + elif isinstance(driver, swift.SwiftStorage): + s = driver._container_prefix + + self.assertEqual(str(driver), "%s: %s" % ( + driver.__class__.__name__, s)) + def trigger_processing(self, metrics=None): if metrics is None: metrics = [str(self.metric.id)] -- GitLab From 051593c2d908aad597b42161aedd0d976c81a944 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 28 Jun 2017 08:18:42 +0200 Subject: [PATCH 0833/1483] statsd: remove deprecated user/project ids opts --- devstack/plugin.sh | 3 +-- devstack/settings | 3 +-- gnocchi/opts.py | 9 --------- 3 files changed, 2 insertions(+), 13 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index c04bcca5..c05e3635 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -226,8 +226,7 @@ function configure_gnocchi { if is_service_enabled gnocchi-statsd ; then iniset $GNOCCHI_CONF statsd resource_id $GNOCCHI_STATSD_RESOURCE_ID - iniset $GNOCCHI_CONF statsd project_id $GNOCCHI_STATSD_PROJECT_ID - iniset $GNOCCHI_CONF statsd user_id $GNOCCHI_STATSD_USER_ID + iniset $GNOCCHI_CONF statsd creator $GNOCCHI_STATSD_CREATOR fi # Configure the storage driver diff --git a/devstack/settings b/devstack/settings index 68ca66b8..d7033b40 100644 --- a/devstack/settings +++ b/devstack/settings @@ -41,8 +41,7 @@ GNOCCHI_SERVICE_HOST=${GNOCCHI_SERVICE_HOST:-${SERVICE_HOST}} # Gnocchi statsd info GNOCCHI_STATSD_RESOURCE_ID=${GNOCCHI_STATSD_RESOURCE_ID:-$(uuidgen)} -GNOCCHI_STATSD_USER_ID=${GNOCCHI_STATSD_USER_ID:-$(uuidgen)} -GNOCCHI_STATSD_PROJECT_ID=${GNOCCHI_STATSD_PROJECT_ID:-$(uuidgen)} +GNOCCHI_STATSD_CREATOR=${GNOCCHI_STATSD_CREATOR:-admin} # Ceph gnocchi info GNOCCHI_CEPH_USER=${GNOCCHI_CEPH_USER:-gnocchi} diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 092f3d95..3bd0d6f6 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -175,17 +175,8 @@ def list_opts(): 'resource_id', type=uuid.UUID, help='Resource UUID to use to identify statsd in Gnocchi'), - cfg.StrOpt( - 'user_id', - deprecated_for_removal=True, - help='User ID to use to identify statsd in Gnocchi'), - cfg.StrOpt( - 'project_id', - deprecated_for_removal=True, - help='Project ID to use to identify statsd in Gnocchi'), cfg.StrOpt( 'creator', - default="${statsd.user_id}:${statsd.project_id}", help='Creator value to use to identify statsd in Gnocchi'), cfg.StrOpt( 'archive_policy_name', -- GitLab From 7a9d08687088fc7690be3f76197fc556ea5dff66 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 15 Jun 2017 16:05:41 +0200 Subject: [PATCH 0834/1483] doc: mention gnocchi-openshift as an install solution --- doc/source/install.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/doc/source/install.rst b/doc/source/install.rst index 43ccdd80..f54f9b90 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -196,6 +196,13 @@ deployment on Gnocchi (indexer and storage included). .. _gnocchi-docker repository: https://github.com/gnocchixyz/gnocchi-docker +Installation using OpenShift +============================ +The `gnocchi-openshift repository`_ contains the needed Dockerfile and script +to build a Docker image containing Gnocchi latest version (fetched from PyPI). + +.. _gnocchi-openshift repository: https://github.com/gnocchixyz/gnocchi-openshift + Installation Using Devstack =========================== -- GitLab From 41117a833a9b198f97287270d953e3edde6c88c0 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 12 Jun 2017 20:40:29 +0000 Subject: [PATCH 0835/1483] batch GET metric interface support ability to get multiple metrics based on id. returns result with format matching: {'metric_id1': [[timestamp, granularity, value]...], 'metric_id2': [[timestamp, granularity, value]...]} Fixes: #87 --- doc/source/rest.j2 | 4 + doc/source/rest.yaml | 4 + gnocchi/rest/__init__.py | 59 +++++++++++ .../functional/gabbits/batch-measures.yaml | 97 ++++++++++++++++++- .../bulk-metric-get-e2b298478b679087.yaml | 10 ++ 5 files changed, 171 insertions(+), 3 deletions(-) create mode 100644 releasenotes/notes/bulk-metric-get-e2b298478b679087.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 82b619a9..d0e2d8bc 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -138,6 +138,10 @@ try to create them as long as an archive policy rule matches: {{ scenarios['post-measures-batch-named-create']['doc'] }} +Multiple metrics can be retrieved in a single request and returned as +independent series: + +{{ scenarios['get-measures-batch']['doc'] }} Archive Policy ============== diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 396576ee..7103e433 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -195,6 +195,10 @@ ] } +- name: get-measures-batch + request: | + GET /v1/batch/metrics/measures?metric={{ scenarios['create-metric']['response'].json['id'] }}&metric={{ scenarios['create-metric-2']['response'].json['id'] }} HTTP/1.1 + - name: search-value-in-metric request: | POST /v1/search/metric?metric_id={{ scenarios['create-metric']['response'].json['id'] }} HTTP/1.1 diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 23faee66..bb619843 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1497,6 +1497,65 @@ class MetricsMeasuresBatchController(rest.RestController): pecan.response.status = 202 + @staticmethod + @pecan.expose('json') + def get_all(**kwargs): + # Check RBAC policy + metric_ids = arg_to_list(kwargs.get('metric', [])) + metrics = pecan.request.indexer.list_metrics(ids=metric_ids) + missing_metric_ids = (set(metric_ids) + - set(six.text_type(m.id) for m in metrics)) + if missing_metric_ids: + abort(400, {"cause": "Unknown metrics", + "detail": list(missing_metric_ids)}) + + for metric in metrics: + enforce("get metric", metric) + + start = kwargs.get('start') + if start is not None: + try: + start = utils.to_datetime(start) + except Exception: + abort(400, "Invalid value for start") + + stop = kwargs.get('stop') + if stop is not None: + try: + stop = utils.to_datetime(stop) + except Exception: + abort(400, "Invalid value for stop") + + aggregation = kwargs.get('aggregation', 'mean') + if (aggregation + not in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS): + abort( + 400, + 'Invalid aggregation value %s, must be one of %s' + % (aggregation, + archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)) + + granularity = kwargs.get('granularity') + if granularity is not None: + try: + granularity = Timespan(granularity) + except ValueError as e: + abort(400, e) + + metric_batch = {} + try: + for metric in metrics: + measures = pecan.request.storage.get_measures( + metric, start, stop, aggregation, granularity) + metric_batch[str(metric.id)] = [ + (timestamp.isoformat(), offset, v) + for timestamp, offset, v in measures] + except (storage.GranularityDoesNotExist, + storage.AggregationDoesNotExist) as e: + abort(404, e) + + return metric_batch + class SearchController(object): resource = SearchResourceController() diff --git a/gnocchi/tests/functional/gabbits/batch-measures.yaml b/gnocchi/tests/functional/gabbits/batch-measures.yaml index 36a7210b..6b4a557b 100644 --- a/gnocchi/tests/functional/gabbits/batch-measures.yaml +++ b/gnocchi/tests/functional/gabbits/batch-measures.yaml @@ -181,6 +181,100 @@ tests: $: - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - name: get multiple metrics + GET: /v1/batch/metrics/measures + query_parameters: + metric: + - $HISTORY['list metrics'].$RESPONSE['$[0].id'] + - $HISTORY['list metrics'].$RESPONSE['$[1].id'] + response_json_paths: + $.`len`: 2 + response_strings: + - "\"$HISTORY['list metrics'].$RESPONSE['$[0].id']\": [[\"2015-03-06T14:33:57+00:00\", 1.0, 43.1" + - "\"$HISTORY['list metrics'].$RESPONSE['$[1].id']\": [[\"2015-03-06T14:33:57+00:00\", 1.0, 43.1" + + - name: get multiple metrics with start and stop + GET: /v1/batch/metrics/measures + query_parameters: + metric: + - $HISTORY['list metrics'].$RESPONSE['$[0].id'] + - $HISTORY['list metrics'].$RESPONSE['$[1].id'] + start: "2015-03-06T14:34:00" + stop: "2015-03-06T14:35:57" + response_json_paths: + $.`len`: 2 + response_strings: + - "\"$HISTORY['list metrics'].$RESPONSE['$[0].id']\": [[\"2015-03-06T14:34:12+00:00\", 1.0, 12.0]]" + - "\"$HISTORY['list metrics'].$RESPONSE['$[1].id']\": [[\"2015-03-06T14:34:12+00:00\", 1.0, 12.0]]" + + - name: get multiple metrics with granularity + GET: /v1/batch/metrics/measures + query_parameters: + metric: + - $HISTORY['list metrics'].$RESPONSE['$[0].id'] + - $HISTORY['list metrics'].$RESPONSE['$[1].id'] + granularity: 1.0 + response_json_paths: + $.`len`: 2 + response_strings: + - "\"$HISTORY['list metrics'].$RESPONSE['$[0].id']\": [[\"2015-03-06T14:33:57+00:00\", 1.0, 43.1" + - "\"$HISTORY['list metrics'].$RESPONSE['$[1].id']\": [[\"2015-03-06T14:33:57+00:00\", 1.0, 43.1" + + - name: get multiple metrics with aggregation + GET: /v1/batch/metrics/measures + query_parameters: + metric: + - $HISTORY['list metrics'].$RESPONSE['$[0].id'] + - $HISTORY['list metrics'].$RESPONSE['$[1].id'] + aggregation: 'count' + response_json_paths: + $.`len`: 2 + response_strings: + - "\"$HISTORY['list metrics'].$RESPONSE['$[0].id']\": [[\"2015-03-06T14:33:57+00:00\", 1.0, 1.0" + - "\"$HISTORY['list metrics'].$RESPONSE['$[1].id']\": [[\"2015-03-06T14:33:57+00:00\", 1.0, 1.0" + + - name: get multiple metrics with no metrics + GET: /v1/batch/metrics/measures + response_json_paths: + {} + + - name: get multiple metrics with unknown metric + GET: /v1/batch/metrics/measures + request_headers: + accept: application/json + query_parameters: + metric: + - $HISTORY['list metrics'].$RESPONSE['$[0].id'] + - badbadba-d63b-4cdd-be89-111111111111 + status: 400 + response_json_paths: + $.description.cause: "Unknown metrics" + $.description.detail[0]: "badbadba-d63b-4cdd-be89-111111111111" + + - name: get multiple metrics with irrelevant aggregation + GET: /v1/batch/metrics/measures + query_parameters: + metric: + - $HISTORY['list metrics'].$RESPONSE['$[0].id'] + - $HISTORY['list metrics'].$RESPONSE['$[1].id'] + aggregation: 'last' + status: 404 + response_strings: + - "Aggregation method 'last' for metric" + - "does not exist" + + - name: get multiple metrics with irrelevant granularity + GET: /v1/batch/metrics/measures + query_parameters: + metric: + - $HISTORY['list metrics'].$RESPONSE['$[0].id'] + - $HISTORY['list metrics'].$RESPONSE['$[1].id'] + granularity: 12.0 + status: 404 + response_strings: + - "Granularity '12.0' for metric" + - "does not exist" + - name: push measurements to unknown named metrics and resource with create_metrics with uuid resource id POST: /v1/batch/resources/metrics/measures?create_metrics=true request_headers: @@ -198,7 +292,6 @@ tests: value: 43.1 - timestamp: "2015-03-06T14:34:12" value: 12 - status: 400 response_json_paths: $.description.cause: "Unknown resources" @@ -230,7 +323,6 @@ tests: value: 43.1 - timestamp: "2015-03-06T14:34:12" value: 12 - status: 400 response_json_paths: $.description.cause: "Unknown resources" @@ -251,7 +343,6 @@ tests: value: 43.1 - timestamp: "2015-03-06T14:34:12" value: 12 - status: 400 response_json_paths: $.description.cause: "Unknown resources" diff --git a/releasenotes/notes/bulk-metric-get-e2b298478b679087.yaml b/releasenotes/notes/bulk-metric-get-e2b298478b679087.yaml new file mode 100644 index 00000000..51dd9e6a --- /dev/null +++ b/releasenotes/notes/bulk-metric-get-e2b298478b679087.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + Support added to accept GET requests to /v1/batch/metrics/measures to + retrieve multiple metrics in a single request. Metric ids should be passed + in as query parameters in the format + /v1/batch/metrics/measures?metric=uuid1&metric=uuid2. Additionally, + ``start``, ``stop``, ``granularity`` and ``aggregation`` parameters may be + passed in for additional filtering. All metrics must support the same + granularity and aggregation if filtering on those attributes. -- GitLab From e0727965f41e92d58ddc17c78e3eb5730e34e113 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 29 Jun 2017 16:31:52 +0200 Subject: [PATCH 0836/1483] Remove ugly Travis/docker workaround --- .travis.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 5d4295c1..8b0782cb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,9 +35,6 @@ before_script: esac install: - docker pull gnocchixyz/ci-tools:latest - # Allow docker to write in home directory - - chmod -R o+w ~/ - - find ~/ -type d -exec chmod o+x {} \; script: - docker run -v ~/.cache/pip:/home/tester/.cache/pip -v $(pwd):/home/tester/src gnocchixyz/ci-tools:latest tox -e ${TARGET} -- GitLab From 85c08b4f90d9b9ce0906ae4fe20867526d6c207f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 29 Jun 2017 09:32:31 +0200 Subject: [PATCH 0837/1483] rest: return 404 when granularity does not exist The exception is not caught currently, making the REST API return a 500 error. Fixes #148 --- gnocchi/rest/__init__.py | 6 +++--- gnocchi/tests/functional/gabbits/aggregation.yaml | 6 ++++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index bb619843..852d58b4 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1722,9 +1722,9 @@ class AggregationController(rest.RestController): except storage.MetricUnaggregatable as e: abort(400, ("One of the metrics being aggregated doesn't have " "matching granularity: %s") % str(e)) - except storage.MetricDoesNotExist as e: - abort(404, e) - except storage.AggregationDoesNotExist as e: + except (storage.MetricDoesNotExist, + storage.GranularityDoesNotExist, + storage.AggregationDoesNotExist) as e: abort(404, e) @pecan.expose('json') diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index ee1905c7..7f420377 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -139,6 +139,12 @@ tests: GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&fill=asdf status: 400 + - name: get measure aggregates non existing granularity + desc: https://github.com/gnocchixyz/gnocchi/issues/148 + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&granularity=42 + status: 404 + response_strings: + - Granularity '42.0' for metric # Aggregation by resource and metric_name -- GitLab From e749b60f49a4a3b48cc5da67a797f717dd8cd01d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 20 Jun 2017 16:36:14 +0200 Subject: [PATCH 0838/1483] utils: use ASCII bytes as member id Tooz actually wants ASCII bytes and not random bytes. Fixes #130 --- gnocchi/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index f81d93e0..76667115 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -90,7 +90,7 @@ def _enable_coordination(coord): def get_coordinator_and_start(url): - my_id = uuid.uuid4().bytes + my_id = str(uuid.uuid4()).encode() coord = coordination.get_coordinator(url, my_id) _enable_coordination(coord) return coord, my_id -- GitLab From 8ace8523e32172e33b422acf031d03a565025215 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 30 Jun 2017 10:41:01 +0200 Subject: [PATCH 0839/1483] Remove genconfig target We have a dedicated tool that can be run in any standard installation of Gnocchi, not need for a hackish tox target anymore. --- tox.ini | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tox.ini b/tox.ini index 8c82137a..4da17e5e 100644 --- a/tox.ini +++ b/tox.ini @@ -130,10 +130,6 @@ exclude = .tox,.eggs,doc show-source = true enable-extensions = H904 -[testenv:genconfig] -deps = .[mysql,postgresql,test,file,ceph,swift,s3] -commands = gnocchi-config-generator - [testenv:docs] basepython = python2.7 ## This does not work, see: https://github.com/tox-dev/tox/issues/509 -- GitLab From 0e395bae123be6df8c06ce8d514bbabe289ee9f5 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 27 Jun 2017 22:13:45 +0200 Subject: [PATCH 0840/1483] tests: exit when an error happens with storage setup In run-func-tests, the storage set-up is made using "eval $(pifpaf)". So pifpaf is run in a subprocess and its output is evaluated. But the return value of pifpaf is never checked, the -x setting is not taken into account here. Example: $ eval $(ls /foobar) ls: /foobar: No such file or directory $ echo $? 0 Let's add an explicit check later to see that everything has been started correctly. --- run-func-tests.sh | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/run-func-tests.sh b/run-func-tests.sh index f6395076..78a2f88c 100755 --- a/run-func-tests.sh +++ b/run-func-tests.sh @@ -7,6 +7,14 @@ cleanup(){ } trap cleanup EXIT +check_empty_var() { + local x=$(eval echo `echo \\$${1}`) + if [ -z "$x" ]; then + echo "Variable \$${1} is unset" + exit 15 + fi +} + GNOCCHI_TEST_STORAGE_DRIVERS=${GNOCCHI_TEST_STORAGE_DRIVERS:-file} GNOCCHI_TEST_INDEXER_DRIVERS=${GNOCCHI_TEST_INDEXER_DRIVERS:-postgresql} for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS}; do @@ -14,6 +22,7 @@ for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS}; do case $storage in ceph) eval $(pifpaf -e STORAGE run ceph) + check_empty_var STORAGE_URL rados -c $STORAGE_CEPH_CONF mkpool gnocchi STORAGE_URL=ceph://$STORAGE_CEPH_CONF ;; @@ -40,7 +49,10 @@ for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS}; do ;; esac + check_empty_var STORAGE_URL + eval $(pifpaf -e INDEXER run $indexer) + check_empty_var INDEXER_URL export GNOCCHI_SERVICE_TOKEN="" # Just make gabbi happy export GNOCCHI_AUTHORIZATION="basic YWRtaW46" # admin in base64 -- GitLab From fba06c172a34920a4935b12667bc0b544f8811e7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 19 Jun 2017 14:11:03 +0200 Subject: [PATCH 0841/1483] doc: disable all tags building in versioning There is little sense in building docs for all tags. The stable branches ought to be enough anyway. Let's skip them entirely. --- doc/source/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 68d7edf5..c7140ec7 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -180,10 +180,10 @@ html_theme_options = { # Multiversion docs scv_sort = ('semver',) scv_show_banner = True -scv_banner_greatest_tag = True +scv_banner_main_ref = 'stable/4.0' scv_priority = 'branches' scv_whitelist_branches = ('master', '^stable/(2\.1|2\.2|[3-9]\.)') -scv_whitelist_tags = ("^[2-9]\.",) +scv_whitelist_tags = ("^$",) here = os.path.dirname(os.path.realpath(__file__)) html_static_path_abs = ",".join([os.path.join(here, p) for p in html_static_path]) -- GitLab From 1c6cee72ea836a844f5948d7026b6719dc4b2dca Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 27 Jun 2017 17:20:23 +0200 Subject: [PATCH 0842/1483] Install gnocchi flavor of pifpaf So we have uwsgi pulled. (cherry picked from commit 38477d4d9d36f653f560d2d18fba53225a278653) (cherry picked from commit d63a4c46436f6f551888be39204180cb087497f8) --- setup.cfg | 2 +- tox.ini | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.cfg b/setup.cfg index 9840196e..e6501837 100644 --- a/setup.cfg +++ b/setup.cfg @@ -64,7 +64,7 @@ doc = Jinja2 reno>=1.6.2 test = - pifpaf[ceph]>=1.0.1 + pifpaf[ceph,gnocchi]>=1.0.1 gabbi>=1.30.0 coverage>=3.6 fixtures diff --git a/tox.ini b/tox.ini index 714a14fa..e591d0fe 100644 --- a/tox.ini +++ b/tox.ini @@ -58,7 +58,7 @@ usedevelop = False setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 alembic<0.9.0 - pifpaf>=0.13 + pifpaf[gnocchi]>=0.13 gnocchiclient>=2.8.0 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} @@ -74,7 +74,7 @@ setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 alembic<0.9.0 gnocchiclient>=2.8.0 - pifpaf[ceph]>=0.13 + pifpaf[ceph,gnocchi]>=0.13 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:pep8] -- GitLab From 30d85510d3b1fe6546efa934c15c234008ebe344 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 26 Jun 2017 14:42:35 +0200 Subject: [PATCH 0843/1483] Remove deprecated noauth auth mode Sem-Ver: api-break --- gnocchi/rest/api-paste.ini | 6 ------ gnocchi/rest/app.py | 5 ----- gnocchi/rest/auth_helper.py | 15 --------------- gnocchi/tests/test_rest.py | 8 +------- .../notes/no-auth-removed-b6e936dcefb4b9b1.yaml | 4 ++++ setup.cfg | 1 - 6 files changed, 5 insertions(+), 34 deletions(-) create mode 100644 releasenotes/notes/no-auth-removed-b6e936dcefb4b9b1.yaml diff --git a/gnocchi/rest/api-paste.ini b/gnocchi/rest/api-paste.ini index 84792644..2cdacaa2 100644 --- a/gnocchi/rest/api-paste.ini +++ b/gnocchi/rest/api-paste.ini @@ -1,9 +1,3 @@ -[composite:gnocchi+noauth] -use = egg:Paste#urlmap -/ = gnocchiversions_pipeline -/v1 = gnocchiv1+noauth -/healthcheck = healthcheck - [composite:gnocchi+basic] use = egg:Paste#urlmap / = gnocchiversions_pipeline diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 2ae58361..03060b00 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -16,7 +16,6 @@ import os import pkg_resources import uuid -import warnings import daiquiri from oslo_middleware import cors @@ -111,10 +110,6 @@ def load_app(conf, indexer=None, storage=None, LOG.info("WSGI config used: %s", cfg_path) - if conf.api.auth_mode == "noauth": - warnings.warn("The `noauth' authentication mode is deprecated", - category=DeprecationWarning) - appname = "gnocchi+" + conf.api.auth_mode app = deploy.loadapp("config:" + cfg_path, name=appname, global_conf={'configkey': configkey}) diff --git a/gnocchi/rest/auth_helper.py b/gnocchi/rest/auth_helper.py index 99bb607e..3cf3951c 100644 --- a/gnocchi/rest/auth_helper.py +++ b/gnocchi/rest/auth_helper.py @@ -86,21 +86,6 @@ class KeystoneAuthHelper(object): return {"or": policy_filter} -class NoAuthHelper(KeystoneAuthHelper): - @staticmethod - def get_current_user(request): - # FIXME(jd) Should be a single header - user_id = request.headers.get("X-User-Id") - project_id = request.headers.get("X-Project-Id") - if user_id: - if project_id: - return user_id + ":" + project_id - return user_id - if project_id: - return project_id - rest.abort(401, "Unable to determine current user") - - class BasicAuthHelper(object): @staticmethod def get_current_user(request): diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index d6b78538..bce778f9 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -91,8 +91,6 @@ class TestingApp(webtest.TestApp): yield finally: self.user = old_user - elif self.auth_mode == "noauth": - raise testcase.TestSkipped("auth mode is noauth") else: raise RuntimeError("Unknown auth_mode") @@ -128,9 +126,6 @@ class TestingApp(webtest.TestApp): ) elif self.auth_mode == "remoteuser": req.remote_user = self.user - elif self.auth_mode == "noauth": - req.headers['X-User-Id'] = self.USER_ID - req.headers['X-Project-Id'] = self.PROJECT_ID response = super(TestingApp, self).do_request(req, *args, **kwargs) metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) self.storage.process_background_tasks(self.indexer, metrics, sync=True) @@ -142,7 +137,6 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): scenarios = [ ('basic', dict(auth_mode="basic")), ('keystone', dict(auth_mode="keystone")), - ('noauth', dict(auth_mode="noauth")), ('remoteuser', dict(auth_mode="remoteuser")), ] @@ -650,7 +644,7 @@ class ResourceTest(RestTest): # Set original_resource_id self.resource['original_resource_id'] = self.resource['id'] self.resource['created_by_user_id'] = TestingApp.USER_ID - if self.auth_mode in ("keystone", "noauth"): + if self.auth_mode == "keystone": self.resource['created_by_project_id'] = TestingApp.PROJECT_ID self.resource['creator'] = ( TestingApp.USER_ID + ":" + TestingApp.PROJECT_ID diff --git a/releasenotes/notes/no-auth-removed-b6e936dcefb4b9b1.yaml b/releasenotes/notes/no-auth-removed-b6e936dcefb4b9b1.yaml new file mode 100644 index 00000000..1c63e155 --- /dev/null +++ b/releasenotes/notes/no-auth-removed-b6e936dcefb4b9b1.yaml @@ -0,0 +1,4 @@ +--- +other: + - | + The deprecated `noauth` authentication mode has been removed. diff --git a/setup.cfg b/setup.cfg index e6501837..29766d11 100644 --- a/setup.cfg +++ b/setup.cfg @@ -125,7 +125,6 @@ gnocchi.aggregates = moving-average = gnocchi.aggregates.moving_stats:MovingAverage gnocchi.rest.auth_helper = - noauth = gnocchi.rest.auth_helper:NoAuthHelper keystone = gnocchi.rest.auth_helper:KeystoneAuthHelper basic = gnocchi.rest.auth_helper:BasicAuthHelper remoteuser = gnocchi.rest.auth_helper:RemoteUserAuthHelper -- GitLab From 7503beeda80fcbd1576be49e9e9e4610422310b6 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 29 Jun 2017 09:32:31 +0200 Subject: [PATCH 0844/1483] rest: return 404 when granularity does not exist The exception is not caught currently, making the REST API return a 500 error. Fixes #148 --- gnocchi/rest/__init__.py | 6 +++--- gnocchi/tests/functional/gabbits/aggregation.yaml | 6 ++++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 23faee66..56d71d95 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1663,9 +1663,9 @@ class AggregationController(rest.RestController): except storage.MetricUnaggregatable as e: abort(400, ("One of the metrics being aggregated doesn't have " "matching granularity: %s") % str(e)) - except storage.MetricDoesNotExist as e: - abort(404, e) - except storage.AggregationDoesNotExist as e: + except (storage.MetricDoesNotExist, + storage.GranularityDoesNotExist, + storage.AggregationDoesNotExist) as e: abort(404, e) @pecan.expose('json') diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index ee1905c7..7f420377 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -139,6 +139,12 @@ tests: GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&fill=asdf status: 400 + - name: get measure aggregates non existing granularity + desc: https://github.com/gnocchixyz/gnocchi/issues/148 + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&granularity=42 + status: 404 + response_strings: + - Granularity '42.0' for metric # Aggregation by resource and metric_name -- GitLab From f76b8a5c614cddccbbccdb8feb7afbe1929cd288 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 27 Jun 2017 16:11:21 +0000 Subject: [PATCH 0845/1483] deprecate moving average (& minimally fix what exists) deprecating moving average because the existing rolling average for some reason generates a left-aligned rolling average which is opposite of standard rolling average which should lag. also, none of the existing parameters are configurable or are valid (min_size is definitely wrong). will improve this by leveraging pandas rolling TEMP FIX: don't unnecessarily pull in all granularities, search through all series for min granularity, and rebuild. just detect if valid, pull in granularity that's valid, and continue. --- gnocchi/aggregates/moving_stats.py | 18 +++++++++--------- gnocchi/rest/__init__.py | 3 +++ ...recate-moving-average-a7596a0009be5b12.yaml | 4 ++++ 3 files changed, 16 insertions(+), 9 deletions(-) create mode 100644 releasenotes/notes/deprecate-moving-average-a7596a0009be5b12.yaml diff --git a/gnocchi/aggregates/moving_stats.py b/gnocchi/aggregates/moving_stats.py index b0ce3b40..75f83efa 100644 --- a/gnocchi/aggregates/moving_stats.py +++ b/gnocchi/aggregates/moving_stats.py @@ -39,21 +39,21 @@ class MovingAverage(aggregates.CustomAggregator): @staticmethod def retrieve_data(storage_obj, metric, start, stop, window): """Retrieves finest-res data available from storage.""" - all_data = storage_obj.get_measures(metric, start, stop) - try: - min_grain = min(set([row[1] for row in all_data if row[1] == 0 - or window % row[1] == 0])) - except Exception: + min_grain = min( + ap.granularity for ap in metric.archive_policy.definition + if window % ap.granularity == 0) + except ValueError: msg = ("No data available that is either full-res or " "of a granularity that factors into the window size " "you specified.") raise aggregates.CustomAggFailure(msg) - return min_grain, pandas.Series([r[2] for r in all_data - if r[1] == min_grain], - [r[0] for r in all_data - if r[1] == min_grain]) + data = list(zip(*storage_obj.get_measures(metric, start, stop, + granularity=min_grain))) + + return (min_grain, + pandas.Series(data[2], data[0]) if data else pandas.Series()) @staticmethod def aggregate_data(data, func, window, min_grain, center=False, diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 852d58b4..1d48f7b9 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -17,6 +17,7 @@ import functools import itertools import uuid +import warnings import jsonpatch import pecan @@ -448,6 +449,8 @@ class MetricController(rest.RestController): abort(503, e) try: if aggregation in self.custom_agg: + warnings.warn("moving_average aggregation is deprecated.", + category=DeprecationWarning) measures = self.custom_agg[aggregation].compute( pecan.request.storage, self.metric, start, stop, **param) diff --git a/releasenotes/notes/deprecate-moving-average-a7596a0009be5b12.yaml b/releasenotes/notes/deprecate-moving-average-a7596a0009be5b12.yaml new file mode 100644 index 00000000..8d39853c --- /dev/null +++ b/releasenotes/notes/deprecate-moving-average-a7596a0009be5b12.yaml @@ -0,0 +1,4 @@ +--- +deprecations: + - | + ``moving_average`` aggregate is deprecated. -- GitLab From b23866ed250d84b5bc3f08e4f4c169e0a0d1f2e5 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 20 Jun 2017 16:36:14 +0200 Subject: [PATCH 0846/1483] utils: use ASCII bytes as member id Tooz actually wants ASCII bytes and not random bytes. Fixes #130 (cherry picked from commit e749b60f49a4a3b48cc5da67a797f717dd8cd01d) --- gnocchi/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index f81d93e0..76667115 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -90,7 +90,7 @@ def _enable_coordination(coord): def get_coordinator_and_start(url): - my_id = uuid.uuid4().bytes + my_id = str(uuid.uuid4()).encode() coord = coordination.get_coordinator(url, my_id) _enable_coordination(coord) return coord, my_id -- GitLab From 0b6cd96310bc9238a6492eab01b67ca2b1d0ccf8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 3 Jul 2017 14:00:03 +0200 Subject: [PATCH 0847/1483] Make command line tools log to stderr `gnocchi-upgrade' and `gnocchi-change-sack-size' are both command line tools and not daemons. That means they should always log to stderr, and not use the configuration file instructions as where to log. Fixes #16 --- gnocchi/cli.py | 4 ++-- gnocchi/service.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 48a199f8..78213e80 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -56,7 +56,7 @@ def upgrade(): help="Number of storage sacks to create."), ]) - conf = service.prepare_service(conf=conf) + conf = service.prepare_service(conf=conf, log_to_std=True) if not conf.skip_index: index = indexer.get_driver(conf) index.connect() @@ -84,7 +84,7 @@ def change_sack_size(): cfg.IntOpt("sacks-number", required=True, min=1, help="Number of storage sacks."), ]) - conf = service.prepare_service(conf=conf) + conf = service.prepare_service(conf=conf, log_to_std=True) s = storage.get_incoming_driver(conf.incoming) try: report = s.measures_report(details=False) diff --git a/gnocchi/service.py b/gnocchi/service.py index 985d2f88..5800faaa 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -32,7 +32,8 @@ LOG = daiquiri.getLogger(__name__) def prepare_service(args=None, conf=None, - default_config_files=None): + default_config_files=None, + log_to_std=False): if conf is None: conf = cfg.ConfigOpts() opts.set_defaults() @@ -53,7 +54,7 @@ def prepare_service(args=None, conf=None, default_config_files=default_config_files, version=pbr.version.VersionInfo('gnocchi').version_string()) - if conf.log_dir or conf.log_file: + if not log_to_std and (conf.log_dir or conf.log_file): outputs = [daiquiri.output.File(filename=conf.log_file, directory=conf.log_dir)] else: -- GitLab From c619c92e0d1eda0a6eee23b677e7ea317efecb9b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 4 Jul 2017 16:52:13 +0200 Subject: [PATCH 0848/1483] Don't run python3 functional tests for swift --- run-func-tests.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/run-func-tests.sh b/run-func-tests.sh index 78a2f88c..1f480992 100755 --- a/run-func-tests.sh +++ b/run-func-tests.sh @@ -15,9 +15,15 @@ check_empty_var() { fi } +PYTHON_VERSION_MAJOR=$(python -c 'import sys; print(sys.version_info.major)') + GNOCCHI_TEST_STORAGE_DRIVERS=${GNOCCHI_TEST_STORAGE_DRIVERS:-file} GNOCCHI_TEST_INDEXER_DRIVERS=${GNOCCHI_TEST_INDEXER_DRIVERS:-postgresql} for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS}; do + if [ "$storage" == "swift" ] && [ "$PYTHON_VERSION_MAJOR" == "3" ]; then + echo "WARNING: swift does not support python 3 skipping" + continue + fi for indexer in ${GNOCCHI_TEST_INDEXER_DRIVERS}; do case $storage in ceph) -- GitLab From cbc98d0ae6b8e8fe4061aab6255071f4e90c76e1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 4 Jul 2017 12:07:22 +0200 Subject: [PATCH 0849/1483] swift: we must use the number of default sack We currently set the number of sack already configured instead of the asked number. Making upgrade fail when the number of sack is not yet set. This change fixes that. --- gnocchi/storage/incoming/swift.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py index 6aa445cf..7651ef78 100644 --- a/gnocchi/storage/incoming/swift.py +++ b/gnocchi/storage/incoming/swift.py @@ -43,7 +43,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): self.swift.put_container(self.CFG_PREFIX) self.swift.put_object(self.CFG_PREFIX, self.CFG_PREFIX, json.dumps({self.CFG_SACKS: num_sacks})) - for i in six.moves.range(self.NUM_SACKS): + for i in six.moves.range(num_sacks): self.swift.put_container(self.get_sack_name(i)) def remove_sack_group(self, num_sacks): -- GitLab From dabe1d9bfc578bb7c5526286f6edc52a5128d771 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 4 Jul 2017 19:28:37 +0000 Subject: [PATCH 0850/1483] log metric deletion on completion we might defer metric deletion if sack is locked. add another log when actually completed. --- gnocchi/storage/_carbonara.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index b036c91b..570e3ef3 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -347,6 +347,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): lock.release() self._delete_metric(metric) self.incoming.delete_unprocessed_measures_for_metric_id(metric.id) + LOG.debug("Deleted metric %s", metric) @staticmethod def _delete_metric_measures(metric, timestamp_key, -- GitLab From 1fd56c44dfc627b50116dcc4bdb96e4a3f067f0d Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 30 Jun 2017 19:36:34 +0200 Subject: [PATCH 0851/1483] tests: We must ensure variable are reinit --- run-func-tests.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/run-func-tests.sh b/run-func-tests.sh index 1f480992..da261380 100755 --- a/run-func-tests.sh +++ b/run-func-tests.sh @@ -25,6 +25,8 @@ for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS}; do continue fi for indexer in ${GNOCCHI_TEST_INDEXER_DRIVERS}; do + unset STORAGE_URL + unset INDEXER_URL case $storage in ceph) eval $(pifpaf -e STORAGE run ceph) -- GitLab From 7abab03ae34f6f4d41fa27cce52dd3d978462cf1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 4 Jul 2017 12:07:22 +0200 Subject: [PATCH 0852/1483] swift: we must use the number of default sack We currently set the number of sack already configured instead of the asked number. Making upgrade fail when the number of sack is not yet set. This change fixes that. (cherry picked from commit 1b5a74ac125bdb96a3f36ecc99258c69aedb05fb) --- gnocchi/storage/incoming/swift.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py index 6aa445cf..7651ef78 100644 --- a/gnocchi/storage/incoming/swift.py +++ b/gnocchi/storage/incoming/swift.py @@ -43,7 +43,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): self.swift.put_container(self.CFG_PREFIX) self.swift.put_object(self.CFG_PREFIX, self.CFG_PREFIX, json.dumps({self.CFG_SACKS: num_sacks})) - for i in six.moves.range(self.NUM_SACKS): + for i in six.moves.range(num_sacks): self.swift.put_container(self.get_sack_name(i)) def remove_sack_group(self, num_sacks): -- GitLab From 27ec156dc2968434593d604d3851ae064aaeece4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 20 Jun 2017 11:54:43 +0200 Subject: [PATCH 0853/1483] gendoc: call tearDown on test class If tearDown methods are not called, none of the fixtures is cleaned up and temporary files are not removed. --- gnocchi/gendoc.py | 67 +++++++++++++++++++++++++---------------------- 1 file changed, 35 insertions(+), 32 deletions(-) diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index 9586fbd2..1d27c60c 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -33,14 +33,6 @@ from gnocchi.tests import test_rest _RUN = False -def _setup_test_app(): - t = test_rest.RestTest() - t.auth_mode = "basic" - t.setUpClass() - t.setUp() - return t.app - - def _format_json(txt): return json.dumps(json.loads(txt), sort_keys=True, @@ -172,33 +164,44 @@ def setup(app): _RUN = True return - webapp = _setup_test_app() # TODO(jd) Do not hardcode doc/source with open("doc/source/rest.yaml") as f: scenarios = ScenarioList(yaml.load(f)) - for entry in scenarios: - template = jinja2.Template(entry['request']) - fake_file = six.moves.cStringIO() - fake_file.write(template.render(scenarios=scenarios).encode('utf-8')) - fake_file.seek(0) - request = webapp.RequestClass.from_file(fake_file) - - # TODO(jd) Fix this lame bug in webob < 1.7 - if (hasattr(webob.request, "http_method_probably_has_body") - and request.method == "DELETE"): - # Webob has a bug it does not read the body for DELETE, l4m3r - clen = request.content_length - if clen is None: - request.body = fake_file.read() - else: - request.body = fake_file.read(clen) - - app.info("Doing request %s: %s" % (entry['name'], - six.text_type(request))) - with webapp.use_admin_user(): - response = webapp.request(request) - entry['response'] = response - entry['doc'] = _format_request_reply(request, response) + + test = test_rest.RestTest() + test.auth_mode = "basic" + test.setUpClass() + test.setUp() + webapp = test.app + + try: + for entry in scenarios: + template = jinja2.Template(entry['request']) + fake_file = six.moves.cStringIO() + fake_file.write(template.render( + scenarios=scenarios).encode('utf-8')) + fake_file.seek(0) + request = webapp.RequestClass.from_file(fake_file) + + # TODO(jd) Fix this lame bug in webob < 1.7 + if (hasattr(webob.request, "http_method_probably_has_body") + and request.method == "DELETE"): + # Webob has a bug it does not read the body for DELETE, l4m3r + clen = request.content_length + if clen is None: + request.body = fake_file.read() + else: + request.body = fake_file.read(clen) + + app.info("Doing request %s: %s" % (entry['name'], + six.text_type(request))) + with webapp.use_admin_user(): + response = webapp.request(request) + entry['response'] = response + entry['doc'] = _format_request_reply(request, response) + finally: + test.tearDown() + test.tearDownClass() with open("doc/source/rest.j2", "r") as f: template = jinja2.Template(f.read().decode('utf-8')) with open("doc/source/rest.rst", "w") as f: -- GitLab From c8717e8b71f044953aec744f44f97abad1a0c08e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 26 Jun 2017 12:08:01 +0200 Subject: [PATCH 0854/1483] Make gnocchi-api wrap uWSGI As the wise Mehdi once said: "nobody reads documentation". Everybody wants to run a daemon for the API server and everybody wants to run `gnocchi-api'. Then let's provide it as a wrapper around uwsgi, so it's a fast and performant HTTP by default. --- doc/source/operating.rst | 19 ++++--- gnocchi/cli.py | 56 +++++++++++++++++++ gnocchi/opts.py | 4 +- gnocchi/rest/app.py | 11 ++++ gnocchi/rest/gnocchi-api | 19 +++++++ gnocchi/rest/wsgi.py | 15 +++++ .../gnocchi-api-uwsgi-f16d958cb26ad90e.yaml | 7 +++ setup.cfg | 6 +- 8 files changed, 126 insertions(+), 11 deletions(-) create mode 100755 gnocchi/rest/gnocchi-api create mode 100644 gnocchi/rest/wsgi.py create mode 100644 releasenotes/notes/gnocchi-api-uwsgi-f16d958cb26ad90e.yaml diff --git a/doc/source/operating.rst b/doc/source/operating.rst index 7b5168e2..99d1d535 100644 --- a/doc/source/operating.rst +++ b/doc/source/operating.rst @@ -13,14 +13,15 @@ To run Gnocchi, simply run the HTTP server and metric daemon: Running API As A WSGI Application ================================= -The Gnocchi API tier runs using WSGI. This means it can be run using `Apache -httpd`_ and `mod_wsgi`_, or other HTTP daemon such as `uwsgi`_. You should -configure the number of process and threads according to the number of CPU you -have, usually around 1.5 × number of CPU. If one server is not enough, you can -spawn any number of new API server to scale Gnocchi out, even on different -machines. +To run Gnocchi API, you can use the provided `gnocchi-api`. It wraps around +`uwsgi` – makes sure that `uwsgi`_ is installed. If one Gnocchi API server is +not enough, you can spawn any number of new API server to scale Gnocchi out, +even on different machines. -The following uwsgi configuration file can be used:: +Since Gnocchi API tier runs using WSGI, it can alternatively be run using +`Apache httpd`_ and `mod_wsgi`_, or any other HTTP daemon. If you want to +deploy using `uwsgi`_ yourself, the following uwsgi configuration file can be +used as a base:: [uwsgi] http = localhost:8041 @@ -36,6 +37,10 @@ The following uwsgi configuration file can be used:: plugins = python buffer-size = 65535 lazy-apps = true + add-header = Connection: close + +You should configure the number of processes according to the number of CPU you +have, usually around 1.5 × number of CPU. Once written to `/etc/gnocchi/uwsgi.ini`, it can be launched this way:: diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 78213e80..111575af 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -13,6 +13,10 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. +import copy +from distutils import spawn +import math +import os import sys import threading import time @@ -29,6 +33,7 @@ import tooz from gnocchi import archive_policy from gnocchi import genconfig from gnocchi import indexer +from gnocchi.rest import app from gnocchi import service from gnocchi import statsd as statsd_service from gnocchi import storage @@ -309,6 +314,57 @@ def metricd_tester(conf): index, list(metrics)[:conf.stop_after_processing_metrics], True) +def api(): + # Compat with previous pbr script + try: + double_dash = sys.argv.index("--") + except ValueError: + double_dash = None + else: + sys.argv.pop(double_dash) + + conf = cfg.ConfigOpts() + for opt in app.API_OPTS: + # NOTE(jd) Register the API options without a default, so they are only + # used to override the one in the config file + c = copy.copy(opt) + c.default = None + conf.register_cli_opt(c) + conf = service.prepare_service(conf=conf) + + if double_dash is not None: + # NOTE(jd) Wait to this stage to log so we're sure the logging system + # is in place + LOG.warning( + "No need to pass `--' in gnocchi-api command line anymore, " + "please remove") + + uwsgi = spawn.find_executable("uwsgi") + if not uwsgi: + LOG.error("Unable to find `uwsgi'.\n" + "Be sure it is installed and in $PATH.") + return 1 + + workers = utils.get_default_workers() + + return os.execl( + uwsgi, uwsgi, + "--http", "%s:%d" % (conf.host or conf.api.host, + conf.port or conf.api.port), + "--master", + "--enable-threads", + "--die-on-term", + # NOTE(jd) See https://github.com/gnocchixyz/gnocchi/issues/156 + "--add-header", "Connection: close", + "--processes", str(math.floor(workers * 1.5)), + "--threads", str(workers), + "--lazy-apps", + "--chdir", "/", + "--wsgi", "gnocchi.rest.wsgi", + "--pyargv", " ".join(sys.argv[1:]), + ) + + def metricd(): conf = cfg.ConfigOpts() conf.register_cli_opts([ diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 3bd0d6f6..8f57f915 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -22,6 +22,7 @@ from oslo_middleware import cors import gnocchi.archive_policy import gnocchi.indexer +import gnocchi.rest.app import gnocchi.storage import gnocchi.storage.ceph import gnocchi.storage.file @@ -161,7 +162,8 @@ def list_opts(): default=10, min=0, help='Number of seconds before timeout when attempting ' 'to force refresh of metric.'), - )), + ) + gnocchi.rest.app.API_OPTS, + ), ("storage", (_STORAGE_OPTS + gnocchi.storage._carbonara.OPTS)), ("incoming", _INCOMING_OPTS), ("statsd", ( diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 03060b00..8823bed1 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -18,6 +18,7 @@ import pkg_resources import uuid import daiquiri +from oslo_config import cfg from oslo_middleware import cors from oslo_policy import policy from paste import deploy @@ -36,6 +37,16 @@ from gnocchi import storage as gnocchi_storage LOG = daiquiri.getLogger(__name__) +API_OPTS = ( + cfg.HostAddressOpt('host', + default="0.0.0.0", + help="Host to listen on"), + cfg.PortOpt('port', + default=8041, + help="Port to listen on"), +) + + # Register our encoder by default for everything jsonify.jsonify.register(object)(json.to_primitive) diff --git a/gnocchi/rest/gnocchi-api b/gnocchi/rest/gnocchi-api new file mode 100755 index 00000000..a53ad9ea --- /dev/null +++ b/gnocchi/rest/gnocchi-api @@ -0,0 +1,19 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +if __name__ == '__main__': + import sys + from gnocchi import cli + sys.exit(cli.api()) +else: + from gnocchi.rest import app + application = app.build_wsgi_app() diff --git a/gnocchi/rest/wsgi.py b/gnocchi/rest/wsgi.py new file mode 100644 index 00000000..3edc2468 --- /dev/null +++ b/gnocchi/rest/wsgi.py @@ -0,0 +1,15 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This file is loaded by gnocchi-api when executing uwsgi""" +from gnocchi.rest import app +application = app.build_wsgi_app() diff --git a/releasenotes/notes/gnocchi-api-uwsgi-f16d958cb26ad90e.yaml b/releasenotes/notes/gnocchi-api-uwsgi-f16d958cb26ad90e.yaml new file mode 100644 index 00000000..525c72f7 --- /dev/null +++ b/releasenotes/notes/gnocchi-api-uwsgi-f16d958cb26ad90e.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + The `gnocchi-api` script is now a wrapper around uWSGI. Using a + WSGI-compliant HTTP server always have been recommended, but since most + users want to just run gnocchi-api, it'll now be fast and efficient by + default. diff --git a/setup.cfg b/setup.cfg index 29766d11..a4ef0eb9 100644 --- a/setup.cfg +++ b/setup.cfg @@ -94,6 +94,9 @@ pre-hook.build_config = gnocchi.genconfig.prehook packages = gnocchi +scripts = + gnocchi/rest/gnocchi-api + [entry_points] gnocchi.indexer.sqlalchemy.resource_type_attribute = string = gnocchi.indexer.sqlalchemy_extension:StringSchema @@ -136,9 +139,6 @@ console_scripts = gnocchi-statsd = gnocchi.cli:statsd gnocchi-metricd = gnocchi.cli:metricd -wsgi_scripts = - gnocchi-api = gnocchi.rest.app:build_wsgi_app - oslo.config.opts = gnocchi = gnocchi.opts:list_opts -- GitLab From f3e380c671f61a0906a83b22a930d27ca35b90a0 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 22 Jun 2017 17:37:49 +0200 Subject: [PATCH 0855/1483] test: fix check for SACK_PREFIX This is to be done if incoming driver is redis, not storage. --- gnocchi/tests/base.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index eb41ce43..98f12573 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -352,6 +352,8 @@ class TestCase(BaseTestCase): if self.conf.storage.driver == 'redis': # Create one prefix per test self.storage.STORAGE_PREFIX = str(uuid.uuid4()) + + if self.conf.incoming.driver == 'redis': self.storage.incoming.SACK_PREFIX = str(uuid.uuid4()) self.storage.upgrade(128) -- GitLab From acac5f90e5d3a3e7cdb0425ee4a7a4d94614ce1a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 22 Jun 2017 17:55:55 +0200 Subject: [PATCH 0856/1483] storage: remove incoming driver from storage object Those storage work together but have nothing in common. Certain CLI tools/processes only need one or the other, so creating a storage object with incoming each time is not needed. --- gnocchi/cli.py | 38 +++++---- gnocchi/rest/__init__.py | 18 ++--- gnocchi/rest/app.py | 14 +++- gnocchi/statsd.py | 4 +- gnocchi/storage/__init__.py | 28 ++++--- gnocchi/storage/_carbonara.py | 25 +++--- gnocchi/storage/ceph.py | 4 +- gnocchi/storage/file.py | 4 +- gnocchi/storage/incoming/ceph.py | 4 + gnocchi/storage/incoming/file.py | 3 + gnocchi/storage/incoming/redis.py | 3 + gnocchi/storage/incoming/s3.py | 3 + gnocchi/storage/incoming/swift.py | 3 + gnocchi/storage/redis.py | 4 +- gnocchi/storage/s3.py | 8 +- gnocchi/storage/swift.py | 4 +- gnocchi/tests/base.py | 6 +- gnocchi/tests/functional/fixtures.py | 14 ++-- gnocchi/tests/test_aggregates.py | 11 +-- gnocchi/tests/test_rest.py | 8 +- gnocchi/tests/test_statsd.py | 30 ++++--- gnocchi/tests/test_storage.py | 113 ++++++++++++++------------- 22 files changed, 200 insertions(+), 149 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 111575af..c392e64d 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -55,10 +55,12 @@ def upgrade(): help="Skip index upgrade."), cfg.BoolOpt("skip-storage", default=False, help="Skip storage upgrade."), + cfg.BoolOpt("skip-incoming", default=False, + help="Skip incoming storage upgrade."), cfg.BoolOpt("skip-archive-policies-creation", default=False, help="Skip default archive policies creation."), cfg.IntOpt("sacks-number", default=128, min=1, - help="Number of storage sacks to create."), + help="Number of incoming storage sacks to create."), ]) conf = service.prepare_service(conf=conf, log_to_std=True) @@ -70,7 +72,11 @@ def upgrade(): if not conf.skip_storage: s = storage.get_driver(conf) LOG.info("Upgrading storage %s", s) - s.upgrade(conf.sacks_number) + s.upgrade() + if not conf.skip_incoming: + i = storage.get_incoming_driver(conf) + LOG.info("Upgrading incoming storage %s", i) + i.upgrade(conf.sacks_number) if (not conf.skip_archive_policies_creation and not index.list_archive_policies() @@ -90,7 +96,7 @@ def change_sack_size(): help="Number of storage sacks."), ]) conf = service.prepare_service(conf=conf, log_to_std=True) - s = storage.get_incoming_driver(conf.incoming) + s = storage.get_incoming_driver(conf) try: report = s.measures_report(details=False) except incoming.SackDetectionError: @@ -122,6 +128,7 @@ class MetricProcessBase(cotyledon.Service): def _configure(self): self.store = storage.get_driver(self.conf) + self.incoming = storage.get_incoming_driver(self.conf) self.index = indexer.get_driver(self.conf) self.index.connect() @@ -159,7 +166,7 @@ class MetricReporting(MetricProcessBase): worker_id, conf, conf.metricd.metric_reporting_delay) def _configure(self): - self.incoming = storage.get_incoming_driver(self.conf.incoming) + self.incoming = storage.get_incoming_driver(self.conf) def _run_job(self): try: @@ -191,12 +198,13 @@ class MetricProcessor(MetricProcessBase): @utils.retry def _configure(self): self.store = storage.get_driver(self.conf, self.coord) + self.incoming = storage.get_incoming_driver(self.conf) self.index = indexer.get_driver(self.conf) self.index.connect() # create fallback in case paritioning fails or assigned no tasks self.fallback_tasks = list( - six.moves.range(self.store.incoming.NUM_SACKS)) + six.moves.range(self.incoming.NUM_SACKS)) try: self.partitioner = self.coord.join_partitioned_group( self.GROUP_ID, partitions=200) @@ -226,7 +234,7 @@ class MetricProcessor(MetricProcessBase): self.group_state != self.partitioner.ring.nodes): self.group_state = self.partitioner.ring.nodes.copy() self._tasks = [ - i for i in six.moves.range(self.store.incoming.NUM_SACKS) + i for i in six.moves.range(self.incoming.NUM_SACKS) if self.partitioner.belongs_to_self( i, replicas=self.conf.metricd.processing_replicas)] finally: @@ -235,17 +243,17 @@ class MetricProcessor(MetricProcessBase): def _run_job(self): m_count = 0 s_count = 0 - in_store = self.store.incoming for s in self._get_tasks(): # TODO(gordc): support delay release lock so we don't # process a sack right after another process - lock = in_store.get_sack_lock(self.coord, s) + lock = self.incoming.get_sack_lock(self.coord, s) if not lock.acquire(blocking=False): continue try: - metrics = in_store.list_metric_with_measures_to_process(s) + metrics = self.incoming.list_metric_with_measures_to_process(s) m_count += len(metrics) - self.store.process_background_tasks(self.index, metrics) + self.store.process_background_tasks( + self.index, self.incoming, metrics) s_count += 1 except Exception: LOG.error("Unexpected error processing assigned job", @@ -267,7 +275,7 @@ class MetricJanitor(MetricProcessBase): def _run_job(self): try: - self.store.expunge_metrics(self.index) + self.store.expunge_metrics(self.incoming, self.index) LOG.debug("Metrics marked for deletion removed from backend") except Exception: LOG.error("Unexpected error during metric cleanup", exc_info=True) @@ -305,13 +313,15 @@ def metricd_tester(conf): index = indexer.get_driver(conf) index.connect() s = storage.get_driver(conf) + incoming = storage.get_incoming_driver(conf) metrics = set() - for i in six.moves.range(s.incoming.NUM_SACKS): - metrics.update(s.incoming.list_metric_with_measures_to_process(i)) + for i in six.moves.range(incoming.NUM_SACKS): + metrics.update(incoming.list_metric_with_measures_to_process(i)) if len(metrics) >= conf.stop_after_processing_metrics: break s.process_new_measures( - index, list(metrics)[:conf.stop_after_processing_metrics], True) + index, incoming, + list(metrics)[:conf.stop_after_processing_metrics], True) def api(): diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 1d48f7b9..7d29e34a 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -401,7 +401,7 @@ class MetricController(rest.RestController): if not isinstance(params, list): abort(400, "Invalid input for measures") if params: - pecan.request.storage.incoming.add_measures( + pecan.request.incoming.add_measures( self.metric, MeasuresListSchema(params)) pecan.response.status = 202 @@ -440,10 +440,10 @@ class MetricController(rest.RestController): abort(400, e) if (strtobool("refresh", refresh) and - pecan.request.storage.incoming.has_unprocessed(self.metric)): + pecan.request.incoming.has_unprocessed(self.metric)): try: pecan.request.storage.refresh_metric( - pecan.request.indexer, self.metric, + pecan.request.indexer, pecan.request.incoming, self.metric, pecan.request.conf.api.refresh_timeout) except storage.SackLockTimeoutError as e: abort(503, e) @@ -1462,7 +1462,7 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): for metric in known_metrics: enforce("post measures", metric) - pecan.request.storage.incoming.add_measures_batch( + pecan.request.incoming.add_measures_batch( dict((metric, body_by_rid[metric.resource_id][metric.name]) for metric in known_metrics)) @@ -1494,7 +1494,7 @@ class MetricsMeasuresBatchController(rest.RestController): for metric in metrics: enforce("post measures", metric) - pecan.request.storage.incoming.add_measures_batch( + pecan.request.incoming.add_measures_batch( dict((metric, body[metric.id]) for metric in metrics)) @@ -1699,13 +1699,13 @@ class AggregationController(rest.RestController): try: if strtobool("refresh", refresh): - store = pecan.request.storage metrics_to_update = [ - m for m in metrics if store.incoming.has_unprocessed(m)] + m for m in metrics + if pecan.request.incoming.has_unprocessed(m)] for m in metrics_to_update: try: pecan.request.storage.refresh_metric( - pecan.request.indexer, m, + pecan.request.indexer, pecan.request.incoming, m, pecan.request.conf.api.refresh_timeout) except storage.SackLockTimeoutError as e: abort(503, e) @@ -1768,7 +1768,7 @@ class StatusController(rest.RestController): def get(details=True): enforce("get status", {}) try: - report = pecan.request.storage.incoming.measures_report( + report = pecan.request.incoming.measures_report( strtobool("details", details)) except incoming.ReportGenerationError: abort(503, 'Unable to generate status. Please retry.') diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 8823bed1..5e0d5ca7 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -53,9 +53,10 @@ jsonify.jsonify.register(object)(json.to_primitive) class GnocchiHook(pecan.hooks.PecanHook): - def __init__(self, storage, indexer, conf): + def __init__(self, storage, indexer, incoming, conf): self.storage = storage self.indexer = indexer + self.incoming = incoming self.conf = conf self.policy_enforcer = policy.Enforcer(conf) self.auth_helper = driver.DriverManager("gnocchi.rest.auth_helper", @@ -65,6 +66,7 @@ class GnocchiHook(pecan.hooks.PecanHook): def on_route(self, state): state.request.storage = self.storage state.request.indexer = self.indexer + state.request.incoming = self.incoming state.request.conf = self.conf state.request.policy_enforcer = self.policy_enforcer state.request.auth_helper = self.auth_helper @@ -92,7 +94,7 @@ global APPCONFIGS APPCONFIGS = {} -def load_app(conf, indexer=None, storage=None, +def load_app(conf, indexer=None, storage=None, incoming=None, not_implemented_middleware=True): global APPCONFIGS @@ -100,6 +102,8 @@ def load_app(conf, indexer=None, storage=None, # so all if not storage: storage = gnocchi_storage.get_driver(conf) + if not incoming: + incoming = gnocchi_storage.get_incoming_driver(conf) if not indexer: indexer = gnocchi_indexer.get_driver(conf) indexer.connect() @@ -115,6 +119,7 @@ def load_app(conf, indexer=None, storage=None, __name__, "api-paste.ini")) config = dict(conf=conf, indexer=indexer, storage=storage, + incoming=incoming, not_implemented_middleware=not_implemented_middleware) configkey = str(uuid.uuid4()) APPCONFIGS[configkey] = config @@ -127,10 +132,11 @@ def load_app(conf, indexer=None, storage=None, return cors.CORS(app, conf=conf) -def _setup_app(root, conf, indexer, storage, not_implemented_middleware): +def _setup_app(root, conf, indexer, storage, incoming, + not_implemented_middleware): app = pecan.make_app( root, - hooks=(GnocchiHook(storage, indexer, conf),), + hooks=(GnocchiHook(storage, indexer, incoming, conf),), guess_content_type_from_ext=False, ) diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index 76b28244..d36e03e8 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -35,7 +35,7 @@ LOG = daiquiri.getLogger(__name__) class Stats(object): def __init__(self, conf): self.conf = conf - self.storage = storage.get_driver(self.conf) + self.incoming = storage.get_incoming_driver(self.conf) self.indexer = indexer.get_driver(self.conf) self.indexer.connect() try: @@ -110,7 +110,7 @@ class Stats(object): archive_policy_name=ap_name, name=metric_name, resource_id=self.conf.statsd.resource_id) - self.storage.incoming.add_measures(metric, (measure,)) + self.incoming.add_measures(metric, (measure,)) except Exception as e: LOG.error("Unable to add measure %s: %s", metric_name, e) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index d548b19b..9694e57a 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -156,50 +156,52 @@ def get_incoming_driver(conf): :param conf: incoming configuration only (not global) """ - return get_driver_class('gnocchi.incoming', conf)(conf) + return get_driver_class('gnocchi.incoming', conf.incoming)(conf.incoming) def get_driver(conf, coord=None): """Return the configured driver.""" - incoming = get_driver_class('gnocchi.incoming', conf.incoming)( - conf.incoming) return get_driver_class('gnocchi.storage', conf.storage)( - conf.storage, incoming, coord) + conf.storage, coord) class StorageDriver(object): - def __init__(self, conf, incoming, coord=None): - self.incoming = incoming + @staticmethod + def __init__(conf, coord=None): + pass @staticmethod def stop(): pass - def upgrade(self, num_sacks): - self.incoming.upgrade(num_sacks) + @staticmethod + def upgrade(): + pass - def process_background_tasks(self, index, metrics, sync=False): + def process_background_tasks(self, index, incoming, metrics, sync=False): """Process background tasks for this storage. This calls :func:`process_new_measures` to process new measures :param index: An indexer to be used for querying metrics + :param incoming: The incoming storage :param metrics: The list of metrics waiting for processing :param sync: If True, then process everything synchronously and raise on error :type sync: bool """ try: - self.process_new_measures(index, metrics, sync) + self.process_new_measures(index, incoming, metrics, sync) except Exception: if sync: raise LOG.error("Unexpected error during measures processing", exc_info=True) - def expunge_metrics(self, index, sync=False): + def expunge_metrics(self, incoming, index, sync=False): """Remove deleted metrics + :param incoming: The incoming storage :param index: An indexer to be used for querying metrics :param sync: If True, then delete everything synchronously and raise on error @@ -209,7 +211,7 @@ class StorageDriver(object): metrics_to_expunge = index.list_metrics(status='delete') for m in metrics_to_expunge: try: - self.delete_metric(m, sync) + self.delete_metric(incoming, m, sync) index.expunge_metric(m.id) except (indexer.NoSuchMetric, LockedMetric): # It's possible another process deleted or is deleting the @@ -222,7 +224,7 @@ class StorageDriver(object): exc_info=True) @staticmethod - def process_new_measures(indexer, metrics, sync=False): + def process_new_measures(indexer, incoming, metrics, sync=False): """Process added measures in background. Some drivers might need to have a background task running that process diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 570e3ef3..f52efb6b 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -55,8 +55,8 @@ class CorruptionError(ValueError): class CarbonaraBasedStorage(storage.StorageDriver): - def __init__(self, conf, incoming, coord=None): - super(CarbonaraBasedStorage, self).__init__(conf, incoming) + def __init__(self, conf, coord=None): + super(CarbonaraBasedStorage, self).__init__(conf) self.aggregation_workers_number = conf.aggregation_workers_number if self.aggregation_workers_number == 1: # NOTE(jd) Avoid using futures at all if we don't want any threads. @@ -335,10 +335,10 @@ class CarbonaraBasedStorage(storage.StorageDriver): def _delete_metric(metric): raise NotImplementedError - def delete_metric(self, metric, sync=False): + def delete_metric(self, incoming, metric, sync=False): LOG.debug("Deleting metric %s", metric) - lock = self.incoming.get_sack_lock( - self.coord, self.incoming.sack_for_metric(metric.id)) + lock = incoming.get_sack_lock( + self.coord, incoming.sack_for_metric(metric.id)) if not lock.acquire(blocking=sync): raise storage.LockedMetric(metric) # NOTE(gordc): no need to hold lock because the metric has been already @@ -346,7 +346,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): # is going to process it anymore. lock.release() self._delete_metric(metric) - self.incoming.delete_unprocessed_measures_for_metric_id(metric.id) + incoming.delete_unprocessed_measures_for_metric_id(metric.id) LOG.debug("Deleted metric %s", metric) @staticmethod @@ -354,19 +354,20 @@ class CarbonaraBasedStorage(storage.StorageDriver): aggregation, granularity, version=3): raise NotImplementedError - def refresh_metric(self, indexer, metric, timeout): - s = self.incoming.sack_for_metric(metric.id) - lock = self.incoming.get_sack_lock(self.coord, s) + def refresh_metric(self, indexer, incoming, metric, timeout): + s = incoming.sack_for_metric(metric.id) + lock = incoming.get_sack_lock(self.coord, s) if not lock.acquire(blocking=timeout): raise storage.SackLockTimeoutError( 'Unable to refresh metric: %s. Metric is locked. ' 'Please try again.' % metric.id) try: - self.process_new_measures(indexer, [six.text_type(metric.id)]) + self.process_new_measures(indexer, incoming, + [six.text_type(metric.id)]) finally: lock.release() - def process_new_measures(self, indexer, metrics_to_process, + def process_new_measures(self, indexer, incoming, metrics_to_process, sync=False): # process only active metrics. deleted metrics with unprocessed # measures will be skipped until cleaned by janitor. @@ -375,7 +376,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): # NOTE(gordc): must lock at sack level try: LOG.debug("Processing measures for %s", metric) - with self.incoming.process_measure_for_metric(metric) \ + with incoming.process_measure_for_metric(metric) \ as measures: self._compute_and_store_timeseries(metric, measures) LOG.debug("Measures for metric %s processed", metric) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 0036aaa9..a5e9496a 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -43,8 +43,8 @@ rados = ceph.rados class CephStorage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = False - def __init__(self, conf, incoming, coord=None): - super(CephStorage, self).__init__(conf, incoming, coord) + def __init__(self, conf, coord=None): + super(CephStorage, self).__init__(conf, coord) self.rados, self.ioctx = ceph.create_rados_connection(conf) def __str__(self): diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index c96e6d8d..8d288af0 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -36,8 +36,8 @@ OPTS = [ class FileStorage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = True - def __init__(self, conf, incoming, coord=None): - super(FileStorage, self).__init__(conf, incoming, coord) + def __init__(self, conf, coord=None): + super(FileStorage, self).__init__(conf, coord) self.basepath = conf.file_basepath self.basepath_tmp = os.path.join(self.basepath, 'tmp') utils.ensure_paths([self.basepath_tmp]) diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py index 15777a52..5c94f1a1 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/storage/incoming/ceph.py @@ -53,6 +53,10 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): # we are safe and good. self.OMAP_WRITE_FLAGS = rados.LIBRADOS_OPERATION_SKIPRWLOCKS + def __str__(self): + # Use cluster ID for now + return "%s: %s" % (self.__class__.__name__, self.rados.get_fsid()) + def stop(self): ceph.close_rados_connection(self.rados, self.ioctx) super(CephStorage, self).stop() diff --git a/gnocchi/storage/incoming/file.py b/gnocchi/storage/incoming/file.py index e4d06980..b744787c 100644 --- a/gnocchi/storage/incoming/file.py +++ b/gnocchi/storage/incoming/file.py @@ -32,6 +32,9 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): self.basepath = conf.file_basepath self.basepath_tmp = os.path.join(self.basepath, 'tmp') + def __str__(self): + return "%s: %s" % (self.__class__.__name__, str(self.basepath)) + def upgrade(self, num_sacks): super(FileStorage, self).upgrade(num_sacks) utils.ensure_paths([self.basepath_tmp]) diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py index 75dcc95d..eb2f0448 100644 --- a/gnocchi/storage/incoming/redis.py +++ b/gnocchi/storage/incoming/redis.py @@ -27,6 +27,9 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): super(RedisStorage, self).__init__(conf) self._client = redis.get_client(conf) + def __str__(self): + return "%s: %s" % (self.__class__.__name__, self._client) + def get_storage_sacks(self): return self._client.hget(self.CFG_PREFIX, self.CFG_SACKS) diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/storage/incoming/s3.py index 3ad0fe63..51cca69a 100644 --- a/gnocchi/storage/incoming/s3.py +++ b/gnocchi/storage/incoming/s3.py @@ -40,6 +40,9 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): self._bucket_prefix + "-" + self.MEASURE_PREFIX ) + def __str__(self): + return "%s: %s" % (self.__class__.__name__, self._bucket_name_measures) + def get_storage_sacks(self): try: response = self.s3.get_object(Bucket=self._bucket_name_measures, diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py index 7651ef78..db8d16ea 100644 --- a/gnocchi/storage/incoming/swift.py +++ b/gnocchi/storage/incoming/swift.py @@ -31,6 +31,9 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): super(SwiftStorage, self).__init__(conf) self.swift = swift.get_connection(conf) + def __str__(self): + return self.__class__.__name__ + def get_storage_sacks(self): try: __, data = self.swift.get_object(self.CFG_PREFIX, self.CFG_PREFIX) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 7e0ad04b..9da168d9 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -33,8 +33,8 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): STORAGE_PREFIX = "timeseries" FIELD_SEP = '_' - def __init__(self, conf, incoming, coord=None): - super(RedisStorage, self).__init__(conf, incoming, coord) + def __init__(self, conf, coord=None): + super(RedisStorage, self).__init__(conf, coord) self._client = redis.get_client(conf) def __str__(self): diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 2234c71e..8a814319 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -63,8 +63,8 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): _consistency_wait = tenacity.wait_exponential(multiplier=0.1) - def __init__(self, conf, incoming, coord=None): - super(S3Storage, self).__init__(conf, incoming, coord) + def __init__(self, conf, coord=None): + super(S3Storage, self).__init__(conf, coord) self.s3, self._region_name, self._bucket_prefix = ( s3.get_connection(conf) ) @@ -78,8 +78,8 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): def __str__(self): return "%s: %s" % (self.__class__.__name__, self._bucket_name) - def upgrade(self, num_sacks): - super(S3Storage, self).upgrade(num_sacks) + def upgrade(self): + super(S3Storage, self).upgrade() try: s3.create_bucket(self.s3, self._bucket_name, self._region_name) except botocore.exceptions.ClientError as e: diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 74898e19..f3fee992 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -68,8 +68,8 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): WRITE_FULL = True - def __init__(self, conf, incoming, coord=None): - super(SwiftStorage, self).__init__(conf, incoming, coord) + def __init__(self, conf, coord=None): + super(SwiftStorage, self).__init__(conf, coord) self.swift = swift.get_connection(conf) self._container_prefix = conf.swift_container_prefix diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 98f12573..a55db108 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -348,15 +348,17 @@ class TestCase(BaseTestCase): "storage") self.storage = storage.get_driver(self.conf) + self.incoming = storage.get_incoming_driver(self.conf) if self.conf.storage.driver == 'redis': # Create one prefix per test self.storage.STORAGE_PREFIX = str(uuid.uuid4()) if self.conf.incoming.driver == 'redis': - self.storage.incoming.SACK_PREFIX = str(uuid.uuid4()) + self.incoming.SACK_PREFIX = str(uuid.uuid4()) - self.storage.upgrade(128) + self.storage.upgrade() + self.incoming.upgrade(128) def tearDown(self): self.index.disconnect() diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 13b7ebbd..0ed2924b 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -127,7 +127,9 @@ class ConfigFixture(fixture.GabbiFixture): self.index = index s = storage.get_driver(conf) - s.upgrade(128) + s.upgrade() + i = storage.get_incoming_driver(conf) + i.upgrade(128) LOAD_APP_KWARGS = { 'storage': s, @@ -136,7 +138,7 @@ class ConfigFixture(fixture.GabbiFixture): } # start up a thread to async process measures - self.metricd_thread = MetricdThread(index, s) + self.metricd_thread = MetricdThread(index, s, i) self.metricd_thread.start() def stop_fixture(self): @@ -166,16 +168,18 @@ class ConfigFixture(fixture.GabbiFixture): class MetricdThread(threading.Thread): """Run metricd in a naive thread to process measures.""" - def __init__(self, index, storer, name='metricd'): + def __init__(self, index, storer, incoming, name='metricd'): super(MetricdThread, self).__init__(name=name) self.index = index self.storage = storer + self.incoming = incoming self.flag = True def run(self): while self.flag: - metrics = utils.list_all_incoming_metrics(self.storage.incoming) - self.storage.process_background_tasks(self.index, metrics) + metrics = utils.list_all_incoming_metrics(self.incoming) + self.storage.process_background_tasks( + self.index, self.incoming, metrics) time.sleep(0.1) def stop(self): diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index d5d4e900..2f524ee0 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -60,9 +60,10 @@ class TestAggregates(tests_base.TestCase): utils.dt_in_unix_ns(start_time + incr * n), val) for n, val in enumerate(data)] self.index.create_metric(metric.id, str(uuid.uuid4()), 'medium') - self.storage.incoming.add_measures(metric, measures) - metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) - self.storage.process_background_tasks(self.index, metrics, sync=True) + self.incoming.add_measures(metric, measures) + metrics = tests_utils.list_all_incoming_metrics(self.incoming) + self.storage.process_background_tasks( + self.index, self.incoming, metrics, sync=True) return metric @@ -87,7 +88,7 @@ class TestAggregates(tests_base.TestCase): self.assertEqual(39.0, result[datetime.datetime(2014, 1, 1, 12)]) self.assertEqual(25.5, result[datetime.datetime(2014, 1, 1, 12, 1)]) - self.storage.delete_metric(metric) + self.storage.delete_metric(self.incoming, metric) def test_compute_moving_average(self): metric = self._test_create_metric_and_data([69, 42, 6, 44, 7], @@ -113,4 +114,4 @@ class TestAggregates(tests_base.TestCase): # there are only two points in the retrieved data seems weird. # better to raise an error or return nan in this case? - self.storage.delete_metric(metric) + self.storage.delete_metric(self.incoming, metric) diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index bce778f9..3166fae1 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -62,6 +62,7 @@ class TestingApp(webtest.TestApp): self.auth_mode = kwargs.pop('auth_mode') self.storage = kwargs.pop('storage') self.indexer = kwargs.pop('indexer') + self.incoming = kwargs.pop('incoming') super(TestingApp, self).__init__(*args, **kwargs) # Setup Keystone auth_token fake cache self.token = self.VALID_TOKEN @@ -127,8 +128,9 @@ class TestingApp(webtest.TestApp): elif self.auth_mode == "remoteuser": req.remote_user = self.user response = super(TestingApp, self).do_request(req, *args, **kwargs) - metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) - self.storage.process_background_tasks(self.indexer, metrics, sync=True) + metrics = tests_utils.list_all_incoming_metrics(self.incoming) + self.storage.process_background_tasks( + self.indexer, self.incoming, metrics, sync=True) return response @@ -173,9 +175,11 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): self.app = TestingApp(app.load_app(conf=self.conf, indexer=self.index, storage=self.storage, + incoming=self.incoming, not_implemented_middleware=False), storage=self.storage, indexer=self.index, + incoming=self.incoming, auth_mode=self.auth_mode) # NOTE(jd) Used at least by docs diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index fc0713d6..0fe8e41f 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -42,7 +42,7 @@ class TestStatsd(tests_base.TestCase): self.stats = statsd.Stats(self.conf) # Replace storage/indexer with correct ones that have been upgraded - self.stats.storage = self.storage + self.stats.incoming = self.incoming self.stats.indexer = self.index self.server = statsd.StatsdServer(self.stats) @@ -65,10 +65,11 @@ class TestStatsd(tests_base.TestCase): metric = r.get_metric(metric_key) - self.stats.storage.process_background_tasks( - self.stats.indexer, [str(metric.id)], sync=True) + self.storage.process_background_tasks( + self.stats.indexer, self.stats.incoming, + [str(metric.id)], sync=True) - measures = self.stats.storage.get_measures(metric) + measures = self.storage.get_measures(metric) self.assertEqual([ (utils.datetime_utc(2015, 1, 7), 86400.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.0), @@ -85,10 +86,11 @@ class TestStatsd(tests_base.TestCase): ("127.0.0.1", 12345)) self.stats.flush() - self.stats.storage.process_background_tasks( - self.stats.indexer, [str(metric.id)], sync=True) + self.storage.process_background_tasks( + self.stats.indexer, self.stats.incoming, + [str(metric.id)], sync=True) - measures = self.stats.storage.get_measures(metric) + measures = self.storage.get_measures(metric) self.assertEqual([ (utils.datetime_utc(2015, 1, 7), 86400.0, 1.5), (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.5), @@ -118,10 +120,11 @@ class TestStatsd(tests_base.TestCase): metric = r.get_metric(metric_key) self.assertIsNotNone(metric) - self.stats.storage.process_background_tasks( - self.stats.indexer, [str(metric.id)], sync=True) + self.storage.process_background_tasks( + self.stats.indexer, self.stats.incoming, + [str(metric.id)], sync=True) - measures = self.stats.storage.get_measures(metric) + measures = self.storage.get_measures(metric) self.assertEqual([ (utils.datetime_utc(2015, 1, 7), 86400.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.0), @@ -136,10 +139,11 @@ class TestStatsd(tests_base.TestCase): ("127.0.0.1", 12345)) self.stats.flush() - self.stats.storage.process_background_tasks( - self.stats.indexer, [str(metric.id)], sync=True) + self.storage.process_background_tasks( + self.stats.indexer, self.stats.incoming, + [str(metric.id)], sync=True) - measures = self.stats.storage.get_measures(metric) + measures = self.storage.get_measures(metric) self.assertEqual([ (utils.datetime_utc(2015, 1, 7), 86400.0, 28), (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 28), diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 65ca635b..c5e215d3 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -68,7 +68,8 @@ class TestStorageDriver(tests_base.TestCase): def trigger_processing(self, metrics=None): if metrics is None: metrics = [str(self.metric.id)] - self.storage.process_background_tasks(self.index, metrics, sync=True) + self.storage.process_background_tasks( + self.index, self.incoming, metrics, sync=True) def test_get_driver(self): driver = storage.get_driver(self.conf) @@ -78,12 +79,12 @@ class TestStorageDriver(tests_base.TestCase): if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): self.skipTest("This driver is not based on Carbonara") - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 13, 0, 1), 1), ]) @@ -99,7 +100,7 @@ class TestStorageDriver(tests_base.TestCase): self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), m) def test_aborted_initial_processing(self): - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 5), ]) with mock.patch.object(self.storage, '_store_unaggregated_timeserie', @@ -119,23 +120,23 @@ class TestStorageDriver(tests_base.TestCase): self.assertIn((utils.datetime_utc(2014, 1, 1, 12), 300.0, 5.0), m) def test_list_metric_with_measures_to_process(self): - metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) + metrics = tests_utils.list_all_incoming_metrics(self.incoming) self.assertEqual(set(), metrics) - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) - metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) + metrics = tests_utils.list_all_incoming_metrics(self.incoming) self.assertEqual(set([str(self.metric.id)]), metrics) self.trigger_processing() - metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) + metrics = tests_utils.list_all_incoming_metrics(self.incoming) self.assertEqual(set([]), metrics) def test_delete_nonempty_metric(self): - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() - self.storage.delete_metric(self.metric, sync=True) + self.storage.delete_metric(self.incoming, self.metric, sync=True) self.trigger_processing() self.assertEqual([], self.storage.get_measures(self.metric)) self.assertRaises(storage.MetricDoesNotExist, @@ -143,36 +144,36 @@ class TestStorageDriver(tests_base.TestCase): self.metric) def test_delete_nonempty_metric_unprocessed(self): - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) self.index.delete_metric(self.metric.id) self.trigger_processing() - __, __, details = self.storage.incoming._build_report(True) + __, __, details = self.incoming._build_report(True) self.assertIn(str(self.metric.id), details) - self.storage.expunge_metrics(self.index, sync=True) - __, __, details = self.storage.incoming._build_report(True) + self.storage.expunge_metrics(self.incoming, self.index, sync=True) + __, __, details = self.incoming._build_report(True) self.assertNotIn(str(self.metric.id), details) def test_delete_expunge_metric(self): - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() self.index.delete_metric(self.metric.id) - self.storage.expunge_metrics(self.index, sync=True) + self.storage.expunge_metrics(self.incoming, self.index, sync=True) self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric, self.metric.id) def test_measures_reporting_format(self): - report = self.storage.incoming.measures_report(True) + report = self.incoming.measures_report(True) self.assertIsInstance(report, dict) self.assertIn('summary', report) self.assertIn('metrics', report['summary']) self.assertIn('measures', report['summary']) self.assertIn('details', report) self.assertIsInstance(report['details'], dict) - report = self.storage.incoming.measures_report(False) + report = self.incoming.measures_report(False) self.assertIsInstance(report, dict) self.assertIn('summary', report) self.assertIn('metrics', report['summary']) @@ -182,26 +183,26 @@ class TestStorageDriver(tests_base.TestCase): def test_measures_reporting(self): m2, __ = self._create_metric('medium') for i in six.moves.range(60): - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, i), 69), ]) - self.storage.incoming.add_measures(m2, [ + self.incoming.add_measures(m2, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, i), 69), ]) - report = self.storage.incoming.measures_report(True) + report = self.incoming.measures_report(True) self.assertIsInstance(report, dict) self.assertEqual(2, report['summary']['metrics']) self.assertEqual(120, report['summary']['measures']) self.assertIn('details', report) self.assertIsInstance(report['details'], dict) - report = self.storage.incoming.measures_report(False) + report = self.incoming.measures_report(False) self.assertIsInstance(report, dict) self.assertEqual(2, report['summary']['metrics']) self.assertEqual(120, report['summary']['measures']) def test_add_measures_big(self): m, __ = self._create_metric('high') - self.storage.incoming.add_measures(m, [ + self.incoming.add_measures(m, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, i, j), 100) for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) self.trigger_processing([str(m.id)]) @@ -214,11 +215,11 @@ class TestStorageDriver(tests_base.TestCase): measures = [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] - self.storage.incoming.add_measures(m, measures) + self.incoming.add_measures(m, measures) self.trigger_processing([str(m.id)]) # add measure to end, in same aggregate time as last point. - self.storage.incoming.add_measures(m, [ + self.incoming.add_measures(m, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, 1, 58, 1), 100)]) with mock.patch.object(self.storage, '_store_metric_measures') as c: @@ -237,15 +238,15 @@ class TestStorageDriver(tests_base.TestCase): measures = [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] - self.storage.incoming.add_measures(m, measures) + self.incoming.add_measures(m, measures) self.trigger_processing([str(m.id)]) # add measure to end, in same aggregate time as last point. new_point = utils.dt_to_unix_ns(2014, 1, 6, 1, 58, 1) - self.storage.incoming.add_measures( + self.incoming.add_measures( m, [storage.Measure(new_point, 100)]) - with mock.patch.object(self.storage.incoming, 'add_measures') as c: + with mock.patch.object(self.incoming, 'add_measures') as c: self.trigger_processing([str(m.id)]) for __, args, __ in c.mock_calls: self.assertEqual( @@ -253,7 +254,7 @@ class TestStorageDriver(tests_base.TestCase): new_point, args[1].granularity * 10e8)) def test_delete_old_measures(self): - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), @@ -270,7 +271,7 @@ class TestStorageDriver(tests_base.TestCase): ], self.storage.get_measures(self.metric)) # One year later… - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2015, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() @@ -303,7 +304,7 @@ class TestStorageDriver(tests_base.TestCase): apname) # First store some points scattered across different splits - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4), @@ -343,7 +344,7 @@ class TestStorageDriver(tests_base.TestCase): # split (keep in mind the back window size in one hour here). We move # the BoundTimeSerie processing timeserie far away from its current # range. - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45), storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46), ]) @@ -389,7 +390,7 @@ class TestStorageDriver(tests_base.TestCase): apname) # First store some points scattered across different splits - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4), @@ -432,7 +433,7 @@ class TestStorageDriver(tests_base.TestCase): # Here we test a special case where the oldest_mutable_timestamp will # be 2016-01-10TOO:OO:OO = 1452384000.0, our new split key. - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 0, 12), 45), ]) self.trigger_processing() @@ -475,7 +476,7 @@ class TestStorageDriver(tests_base.TestCase): apname) # First store some points scattered across different splits - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4), @@ -521,7 +522,7 @@ class TestStorageDriver(tests_base.TestCase): # split (keep in mind the back window size in one hour here). We move # the BoundTimeSerie processing timeserie far away from its current # range. - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45), storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46), ]) @@ -538,7 +539,7 @@ class TestStorageDriver(tests_base.TestCase): apname) # First store some points scattered across different splits - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4), @@ -582,14 +583,14 @@ class TestStorageDriver(tests_base.TestCase): # split (keep in mind the back window size in one hour here). We move # the BoundTimeSerie processing timeserie far away from its current # range. - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45), storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46), ]) self.trigger_processing() def test_updated_measures(self): - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), ]) @@ -602,7 +603,7 @@ class TestStorageDriver(tests_base.TestCase): (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 42.0), ], self.storage.get_measures(self.metric)) - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) @@ -633,7 +634,7 @@ class TestStorageDriver(tests_base.TestCase): ], self.storage.get_measures(self.metric, aggregation='min')) def test_add_and_get_measures(self): - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), @@ -723,7 +724,7 @@ class TestStorageDriver(tests_base.TestCase): self.archive_policies['low'])])) def test_get_measure_unknown_aggregation(self): - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), @@ -736,13 +737,13 @@ class TestStorageDriver(tests_base.TestCase): def test_get_cross_metric_measures_unknown_aggregation(self): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['low']) - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) - self.storage.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), @@ -756,13 +757,13 @@ class TestStorageDriver(tests_base.TestCase): def test_get_cross_metric_measures_unknown_granularity(self): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['low']) - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) - self.storage.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), @@ -776,13 +777,13 @@ class TestStorageDriver(tests_base.TestCase): def test_add_and_get_cross_metric_measures_different_archives(self): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['no_granularity_match']) - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) - self.storage.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), @@ -795,13 +796,13 @@ class TestStorageDriver(tests_base.TestCase): def test_add_and_get_cross_metric_measures(self): metric2, __ = self._create_metric() - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) - self.storage.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 41), 2), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 10, 31), 4), @@ -880,14 +881,14 @@ class TestStorageDriver(tests_base.TestCase): def test_add_and_get_cross_metric_measures_with_holes(self): metric2, __ = self._create_metric() - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 5, 31), 8), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 42), ]) - self.storage.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 2), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 6), @@ -906,7 +907,7 @@ class TestStorageDriver(tests_base.TestCase): def test_search_value(self): metric2, __ = self._create_metric() - self.storage.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1,), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 5, 31), 8), @@ -914,7 +915,7 @@ class TestStorageDriver(tests_base.TestCase): storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 42), ]) - self.storage.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 2), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 6), @@ -947,7 +948,7 @@ class TestStorageDriver(tests_base.TestCase): self.index.create_archive_policy(ap) m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()), name) m = self.index.list_metrics(ids=[m.id])[0] - self.storage.incoming.add_measures(m, [ + self.incoming.add_measures(m, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 0), 1), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 1), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 10), 1), @@ -962,7 +963,7 @@ class TestStorageDriver(tests_base.TestCase): self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)]) m = self.index.list_metrics(ids=[m.id])[0] - self.storage.incoming.add_measures(m, [ + self.incoming.add_measures(m, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 15), 1), ]) self.trigger_processing([str(m.id)]) -- GitLab From 93c1a018795c43f934d374b4ea7a14806a7bb10f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 22 Jun 2017 18:48:43 +0200 Subject: [PATCH 0857/1483] Move gnocchi.storage.incoming to gnocchi.incoming --- gnocchi/cli.py | 20 +++++++++---------- gnocchi/{storage => }/incoming/__init__.py | 10 ++++++++++ gnocchi/{storage => }/incoming/_carbonara.py | 2 +- gnocchi/{storage => }/incoming/ceph.py | 2 +- gnocchi/{storage => }/incoming/file.py | 2 +- gnocchi/{storage => }/incoming/redis.py | 2 +- gnocchi/{storage => }/incoming/s3.py | 2 +- gnocchi/{storage => }/incoming/swift.py | 2 +- gnocchi/rest/__init__.py | 2 +- gnocchi/rest/app.py | 3 ++- gnocchi/statsd.py | 3 ++- gnocchi/storage/__init__.py | 21 ++------------------ gnocchi/tests/base.py | 3 ++- gnocchi/tests/functional/fixtures.py | 3 ++- gnocchi/utils.py | 10 ++++++++++ setup.cfg | 10 +++++----- 16 files changed, 52 insertions(+), 45 deletions(-) rename gnocchi/{storage => }/incoming/__init__.py (88%) rename gnocchi/{storage => }/incoming/_carbonara.py (99%) rename gnocchi/{storage => }/incoming/ceph.py (99%) rename gnocchi/{storage => }/incoming/file.py (99%) rename gnocchi/{storage => }/incoming/redis.py (98%) rename gnocchi/{storage => }/incoming/s3.py (99%) rename gnocchi/{storage => }/incoming/swift.py (98%) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index c392e64d..a895cc16 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -32,12 +32,12 @@ import tooz from gnocchi import archive_policy from gnocchi import genconfig +from gnocchi import incoming from gnocchi import indexer from gnocchi.rest import app from gnocchi import service from gnocchi import statsd as statsd_service from gnocchi import storage -from gnocchi.storage import incoming from gnocchi import utils @@ -74,7 +74,7 @@ def upgrade(): LOG.info("Upgrading storage %s", s) s.upgrade() if not conf.skip_incoming: - i = storage.get_incoming_driver(conf) + i = incoming.get_driver(conf) LOG.info("Upgrading incoming storage %s", i) i.upgrade(conf.sacks_number) @@ -96,7 +96,7 @@ def change_sack_size(): help="Number of storage sacks."), ]) conf = service.prepare_service(conf=conf, log_to_std=True) - s = storage.get_incoming_driver(conf) + s = incoming.get_driver(conf) try: report = s.measures_report(details=False) except incoming.SackDetectionError: @@ -128,7 +128,7 @@ class MetricProcessBase(cotyledon.Service): def _configure(self): self.store = storage.get_driver(self.conf) - self.incoming = storage.get_incoming_driver(self.conf) + self.incoming = incoming.get_driver(self.conf) self.index = indexer.get_driver(self.conf) self.index.connect() @@ -166,7 +166,7 @@ class MetricReporting(MetricProcessBase): worker_id, conf, conf.metricd.metric_reporting_delay) def _configure(self): - self.incoming = storage.get_incoming_driver(self.conf) + self.incoming = incoming.get_driver(self.conf) def _run_job(self): try: @@ -198,7 +198,7 @@ class MetricProcessor(MetricProcessBase): @utils.retry def _configure(self): self.store = storage.get_driver(self.conf, self.coord) - self.incoming = storage.get_incoming_driver(self.conf) + self.incoming = incoming.get_driver(self.conf) self.index = indexer.get_driver(self.conf) self.index.connect() @@ -313,14 +313,14 @@ def metricd_tester(conf): index = indexer.get_driver(conf) index.connect() s = storage.get_driver(conf) - incoming = storage.get_incoming_driver(conf) + inc = incoming.get_driver(conf) metrics = set() - for i in six.moves.range(incoming.NUM_SACKS): - metrics.update(incoming.list_metric_with_measures_to_process(i)) + for i in six.moves.range(inc.NUM_SACKS): + metrics.update(inc.list_metric_with_measures_to_process(i)) if len(metrics) >= conf.stop_after_processing_metrics: break s.process_new_measures( - index, incoming, + index, inc, list(metrics)[:conf.stop_after_processing_metrics], True) diff --git a/gnocchi/storage/incoming/__init__.py b/gnocchi/incoming/__init__.py similarity index 88% rename from gnocchi/storage/incoming/__init__.py rename to gnocchi/incoming/__init__.py index 12d7d102..233ac9c7 100644 --- a/gnocchi/storage/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -16,6 +16,7 @@ # under the License. from gnocchi import exceptions +from gnocchi import utils class ReportGenerationError(Exception): @@ -66,3 +67,12 @@ class StorageDriver(object): @staticmethod def list_metric_with_measures_to_process(sack): raise NotImplementedError + + +def get_driver(conf): + """Return configured incoming driver only + + :param conf: incoming configuration only (not global) + """ + return utils.get_driver_class('gnocchi.incoming', conf.incoming)( + conf.incoming) diff --git a/gnocchi/storage/incoming/_carbonara.py b/gnocchi/incoming/_carbonara.py similarity index 99% rename from gnocchi/storage/incoming/_carbonara.py rename to gnocchi/incoming/_carbonara.py index ee6ddcc2..2e6e4afb 100644 --- a/gnocchi/storage/incoming/_carbonara.py +++ b/gnocchi/incoming/_carbonara.py @@ -22,7 +22,7 @@ import daiquiri import pandas import six -from gnocchi.storage import incoming +from gnocchi import incoming from gnocchi import utils LOG = daiquiri.getLogger(__name__) diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/incoming/ceph.py similarity index 99% rename from gnocchi/storage/incoming/ceph.py rename to gnocchi/incoming/ceph.py index 5c94f1a1..ff27e15c 100644 --- a/gnocchi/storage/incoming/ceph.py +++ b/gnocchi/incoming/ceph.py @@ -19,8 +19,8 @@ import uuid import six +from gnocchi.incoming import _carbonara from gnocchi.storage.common import ceph -from gnocchi.storage.incoming import _carbonara rados = ceph.rados diff --git a/gnocchi/storage/incoming/file.py b/gnocchi/incoming/file.py similarity index 99% rename from gnocchi/storage/incoming/file.py rename to gnocchi/incoming/file.py index b744787c..77c088e0 100644 --- a/gnocchi/storage/incoming/file.py +++ b/gnocchi/incoming/file.py @@ -22,7 +22,7 @@ import uuid import six -from gnocchi.storage.incoming import _carbonara +from gnocchi.incoming import _carbonara from gnocchi import utils diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/incoming/redis.py similarity index 98% rename from gnocchi/storage/incoming/redis.py rename to gnocchi/incoming/redis.py index eb2f0448..377b3539 100644 --- a/gnocchi/storage/incoming/redis.py +++ b/gnocchi/incoming/redis.py @@ -17,8 +17,8 @@ import contextlib import six +from gnocchi.incoming import _carbonara from gnocchi.storage.common import redis -from gnocchi.storage.incoming import _carbonara class RedisStorage(_carbonara.CarbonaraBasedStorage): diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/incoming/s3.py similarity index 99% rename from gnocchi/storage/incoming/s3.py rename to gnocchi/incoming/s3.py index 51cca69a..7a41e6cb 100644 --- a/gnocchi/storage/incoming/s3.py +++ b/gnocchi/incoming/s3.py @@ -21,8 +21,8 @@ import uuid import six +from gnocchi.incoming import _carbonara from gnocchi.storage.common import s3 -from gnocchi.storage.incoming import _carbonara boto3 = s3.boto3 botocore = s3.botocore diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/incoming/swift.py similarity index 98% rename from gnocchi/storage/incoming/swift.py rename to gnocchi/incoming/swift.py index db8d16ea..b6ba2210 100644 --- a/gnocchi/storage/incoming/swift.py +++ b/gnocchi/incoming/swift.py @@ -19,8 +19,8 @@ import uuid import six +from gnocchi.incoming import _carbonara from gnocchi.storage.common import swift -from gnocchi.storage.incoming import _carbonara swclient = swift.swclient swift_utils = swift.swift_utils diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 7d29e34a..691294de 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -32,11 +32,11 @@ import werkzeug.http from gnocchi import aggregates from gnocchi import archive_policy +from gnocchi import incoming from gnocchi import indexer from gnocchi import json from gnocchi import resource_type from gnocchi import storage -from gnocchi.storage import incoming from gnocchi import utils diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 5e0d5ca7..947c888f 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -28,6 +28,7 @@ from stevedore import driver import webob.exc from gnocchi import exceptions +from gnocchi import incoming as gnocchi_incoming from gnocchi import indexer as gnocchi_indexer from gnocchi import json from gnocchi import service @@ -103,7 +104,7 @@ def load_app(conf, indexer=None, storage=None, incoming=None, if not storage: storage = gnocchi_storage.get_driver(conf) if not incoming: - incoming = gnocchi_storage.get_incoming_driver(conf) + incoming = gnocchi_incoming.get_driver(conf) if not indexer: indexer = gnocchi_indexer.get_driver(conf) indexer.connect() diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index d36e03e8..c2ce102f 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -23,6 +23,7 @@ import daiquiri from oslo_config import cfg import six +from gnocchi import incoming from gnocchi import indexer from gnocchi import service from gnocchi import storage @@ -35,7 +36,7 @@ LOG = daiquiri.getLogger(__name__) class Stats(object): def __init__(self, conf): self.conf = conf - self.incoming = storage.get_incoming_driver(self.conf) + self.incoming = incoming.get_driver(self.conf) self.indexer = indexer.get_driver(self.conf) self.indexer.connect() try: diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 9694e57a..2a7709b8 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -17,10 +17,10 @@ import operator import daiquiri from oslo_config import cfg -from stevedore import driver from gnocchi import exceptions from gnocchi import indexer +from gnocchi import utils OPTS = [ @@ -142,26 +142,9 @@ class LockedMetric(StorageError): super(LockedMetric, self).__init__("Metric %s is locked" % metric) -def get_driver_class(namespace, conf): - """Return the storage driver class. - - :param conf: The conf to use to determine the driver. - """ - return driver.DriverManager(namespace, - conf.driver).driver - - -def get_incoming_driver(conf): - """Return configured incoming driver only - - :param conf: incoming configuration only (not global) - """ - return get_driver_class('gnocchi.incoming', conf.incoming)(conf.incoming) - - def get_driver(conf, coord=None): """Return the configured driver.""" - return get_driver_class('gnocchi.storage', conf.storage)( + return utils.get_driver_class('gnocchi.storage', conf.storage)( conf.storage, coord) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index a55db108..7b09525c 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -33,6 +33,7 @@ from tooz import coordination from gnocchi import archive_policy from gnocchi import exceptions +from gnocchi import incoming from gnocchi import indexer from gnocchi import service from gnocchi import storage @@ -348,7 +349,7 @@ class TestCase(BaseTestCase): "storage") self.storage = storage.get_driver(self.conf) - self.incoming = storage.get_incoming_driver(self.conf) + self.incoming = incoming.get_driver(self.conf) if self.conf.storage.driver == 'redis': # Create one prefix per test diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 0ed2924b..123ca99f 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -28,6 +28,7 @@ from oslo_config import cfg from oslo_middleware import cors import sqlalchemy_utils +from gnocchi import incoming from gnocchi import indexer from gnocchi.indexer import sqlalchemy from gnocchi.rest import app @@ -128,7 +129,7 @@ class ConfigFixture(fixture.GabbiFixture): s = storage.get_driver(conf) s.upgrade() - i = storage.get_incoming_driver(conf) + i = incoming.get_driver(conf) i.upgrade(128) LOAD_APP_KWARGS = { diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 76667115..046dbd76 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -29,6 +29,7 @@ import monotonic import numpy import pandas as pd import six +from stevedore import driver import tenacity from tooz import coordination @@ -299,3 +300,12 @@ class StopWatch(object): self._stopped_at = monotonic.monotonic() self._state = self._STOPPED return self + + +def get_driver_class(namespace, conf): + """Return the storage driver class. + + :param conf: The conf to use to determine the driver. + """ + return driver.DriverManager(namespace, + conf.driver).driver diff --git a/setup.cfg b/setup.cfg index a4ef0eb9..c1794dfa 100644 --- a/setup.cfg +++ b/setup.cfg @@ -112,11 +112,11 @@ gnocchi.storage = redis = gnocchi.storage.redis:RedisStorage gnocchi.incoming = - ceph = gnocchi.storage.incoming.ceph:CephStorage - file = gnocchi.storage.incoming.file:FileStorage - swift = gnocchi.storage.incoming.swift:SwiftStorage - s3 = gnocchi.storage.incoming.s3:S3Storage - redis = gnocchi.storage.incoming.redis:RedisStorage + ceph = gnocchi.incoming.ceph:CephStorage + file = gnocchi.incoming.file:FileStorage + swift = gnocchi.incoming.swift:SwiftStorage + s3 = gnocchi.incoming.s3:S3Storage + redis = gnocchi.incoming.redis:RedisStorage gnocchi.indexer = mysql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer -- GitLab From b05a8135a228db1b2dc260803040e9fc07d208ab Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 23 Jun 2017 10:32:07 +0200 Subject: [PATCH 0858/1483] Move gnocchi.storage.common to gnocchi.common --- gnocchi/{storage => }/common/__init__.py | 0 gnocchi/{storage => }/common/ceph.py | 0 gnocchi/{storage => }/common/redis.py | 0 gnocchi/{storage => }/common/s3.py | 0 gnocchi/{storage => }/common/swift.py | 0 gnocchi/incoming/ceph.py | 2 +- gnocchi/incoming/redis.py | 2 +- gnocchi/incoming/s3.py | 2 +- gnocchi/incoming/swift.py | 2 +- gnocchi/storage/ceph.py | 2 +- gnocchi/storage/redis.py | 2 +- gnocchi/storage/s3.py | 2 +- gnocchi/storage/swift.py | 2 +- 13 files changed, 8 insertions(+), 8 deletions(-) rename gnocchi/{storage => }/common/__init__.py (100%) rename gnocchi/{storage => }/common/ceph.py (100%) rename gnocchi/{storage => }/common/redis.py (100%) rename gnocchi/{storage => }/common/s3.py (100%) rename gnocchi/{storage => }/common/swift.py (100%) diff --git a/gnocchi/storage/common/__init__.py b/gnocchi/common/__init__.py similarity index 100% rename from gnocchi/storage/common/__init__.py rename to gnocchi/common/__init__.py diff --git a/gnocchi/storage/common/ceph.py b/gnocchi/common/ceph.py similarity index 100% rename from gnocchi/storage/common/ceph.py rename to gnocchi/common/ceph.py diff --git a/gnocchi/storage/common/redis.py b/gnocchi/common/redis.py similarity index 100% rename from gnocchi/storage/common/redis.py rename to gnocchi/common/redis.py diff --git a/gnocchi/storage/common/s3.py b/gnocchi/common/s3.py similarity index 100% rename from gnocchi/storage/common/s3.py rename to gnocchi/common/s3.py diff --git a/gnocchi/storage/common/swift.py b/gnocchi/common/swift.py similarity index 100% rename from gnocchi/storage/common/swift.py rename to gnocchi/common/swift.py diff --git a/gnocchi/incoming/ceph.py b/gnocchi/incoming/ceph.py index ff27e15c..30fbe244 100644 --- a/gnocchi/incoming/ceph.py +++ b/gnocchi/incoming/ceph.py @@ -19,8 +19,8 @@ import uuid import six +from gnocchi.common import ceph from gnocchi.incoming import _carbonara -from gnocchi.storage.common import ceph rados = ceph.rados diff --git a/gnocchi/incoming/redis.py b/gnocchi/incoming/redis.py index 377b3539..7ca3c551 100644 --- a/gnocchi/incoming/redis.py +++ b/gnocchi/incoming/redis.py @@ -17,8 +17,8 @@ import contextlib import six +from gnocchi.common import redis from gnocchi.incoming import _carbonara -from gnocchi.storage.common import redis class RedisStorage(_carbonara.CarbonaraBasedStorage): diff --git a/gnocchi/incoming/s3.py b/gnocchi/incoming/s3.py index 7a41e6cb..345fb1d4 100644 --- a/gnocchi/incoming/s3.py +++ b/gnocchi/incoming/s3.py @@ -21,8 +21,8 @@ import uuid import six +from gnocchi.common import s3 from gnocchi.incoming import _carbonara -from gnocchi.storage.common import s3 boto3 = s3.boto3 botocore = s3.botocore diff --git a/gnocchi/incoming/swift.py b/gnocchi/incoming/swift.py index b6ba2210..e38b2ac9 100644 --- a/gnocchi/incoming/swift.py +++ b/gnocchi/incoming/swift.py @@ -19,8 +19,8 @@ import uuid import six +from gnocchi.common import swift from gnocchi.incoming import _carbonara -from gnocchi.storage.common import swift swclient = swift.swclient swift_utils = swift.swift_utils diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index a5e9496a..8da77d96 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -16,9 +16,9 @@ from oslo_config import cfg +from gnocchi.common import ceph from gnocchi import storage from gnocchi.storage import _carbonara -from gnocchi.storage.common import ceph OPTS = [ diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 9da168d9..218cd843 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -15,9 +15,9 @@ # under the License. from oslo_config import cfg +from gnocchi.common import redis from gnocchi import storage from gnocchi.storage import _carbonara -from gnocchi.storage.common import redis OPTS = [ diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 8a814319..2c5f7d10 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -18,9 +18,9 @@ import os from oslo_config import cfg import tenacity +from gnocchi.common import s3 from gnocchi import storage from gnocchi.storage import _carbonara -from gnocchi.storage.common import s3 boto3 = s3.boto3 botocore = s3.botocore diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index f3fee992..6691f38a 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -16,9 +16,9 @@ from oslo_config import cfg +from gnocchi.common import swift from gnocchi import storage from gnocchi.storage import _carbonara -from gnocchi.storage.common import swift swclient = swift.swclient swift_utils = swift.swift_utils -- GitLab From ce437fc169a5abf103dba747d16d7766d04d3f5b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 6 Jul 2017 14:43:04 +0200 Subject: [PATCH 0859/1483] tests: pass incoming driver to load_app() in Gabbi tests The incoming driver created by Gabbi fixtures is not passed to the created WSGI app. While it works anyway (the conf object is passed and used to re-recreate that incoming object) it's faster to re-use the same object. Let's do that. --- gnocchi/tests/functional/fixtures.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 123ca99f..0b970e03 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -135,6 +135,7 @@ class ConfigFixture(fixture.GabbiFixture): LOAD_APP_KWARGS = { 'storage': s, 'indexer': index, + 'incoming': i, 'conf': conf, } -- GitLab From 9a7710c31e8f2f2e3a67aaf0267ba69ee78a77e3 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 28 Jun 2017 20:59:15 +0200 Subject: [PATCH 0860/1483] Allow metric aggregation with a POST When we aggregate metric with a GET we have a limitation with the number of metric we can aggregate. This change allows to do the same thing with a POST to remove the limitation. --- gnocchi/rest/__init__.py | 18 ++++++++++++++++-- .../tests/functional/gabbits/aggregation.yaml | 14 +++++++++++++- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 691294de..661e31dd 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1619,7 +1619,7 @@ class AggregationResourceController(rest.RestController): class AggregationController(rest.RestController): _custom_actions = { - 'metric': ['GET'], + 'metric': ['POST', 'GET'], } @pecan.expose() @@ -1730,13 +1730,20 @@ class AggregationController(rest.RestController): storage.AggregationDoesNotExist) as e: abort(404, e) + MetricIDsSchema = [utils.UUID] + @pecan.expose('json') def get_metric(self, metric=None, start=None, stop=None, aggregation='mean', reaggregation=None, granularity=None, needed_overlap=100.0, fill=None, refresh=False, resample=None): + if pecan.request.method == 'GET': + metric_ids = arg_to_list(metric) + else: + self._workaround_pecan_issue_88() + body = deserialize_and_validate(self.MetricIDsSchema) + metric_ids = [six.text_type(m) for m in body] # Check RBAC policy - metric_ids = arg_to_list(metric) metrics = pecan.request.indexer.list_metrics(ids=metric_ids) missing_metric_ids = (set(metric_ids) - set(six.text_type(m.id) for m in metrics)) @@ -1748,6 +1755,13 @@ class AggregationController(rest.RestController): metrics, start, stop, aggregation, reaggregation, granularity, needed_overlap, fill, refresh, resample) + post_metric = get_metric + + def _workaround_pecan_issue_88(self): + # FIXME(sileht): https://github.com/pecan/pecan/pull/88 + if pecan.request.path_info.startswith("/aggregation/resource"): + pecan.abort(405) + class CapabilityController(rest.RestController): @staticmethod diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index 7f420377..ea857724 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -70,13 +70,25 @@ tests: GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=foobar status: 400 - - name: get measure aggregates by granularity with refresh + - name: GET measure aggregates by granularity with refresh GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&refresh=true response_json_paths: $: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + - name: POST measure aggregates by granularity with refresh + POST: /v1/aggregation/metric?granularity=1&refresh=true + request_headers: + content-type: application/json + data: + - $HISTORY['get metric list'].$RESPONSE['$[0].id'] + - $HISTORY['get metric list'].$RESPONSE['$[1].id'] + response_json_paths: + $: + - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + - name: get measure aggregates by granularity GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1 poll: -- GitLab From 4649c41fdbf9d88d04162a446ffb70066dab1ad4 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 29 Jun 2017 08:08:11 +0200 Subject: [PATCH 0861/1483] aggregation: validate metrics uuid --- gnocchi/rest/__init__.py | 11 ++++++++--- gnocchi/tests/functional/gabbits/aggregation.yaml | 4 ++++ 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 661e31dd..1cd46acc 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1738,11 +1738,16 @@ class AggregationController(rest.RestController): needed_overlap=100.0, fill=None, refresh=False, resample=None): if pecan.request.method == 'GET': - metric_ids = arg_to_list(metric) + try: + metric_ids = voluptuous.Schema( + self.MetricIDsSchema, required=True)(arg_to_list(metric)) + except voluptuous.Error as e: + abort(400, "Invalid input: %s" % e) else: self._workaround_pecan_issue_88() - body = deserialize_and_validate(self.MetricIDsSchema) - metric_ids = [six.text_type(m) for m in body] + metric_ids = deserialize_and_validate(self.MetricIDsSchema) + + metric_ids = [six.text_type(m) for m in metric_ids] # Check RBAC policy metrics = pecan.request.indexer.list_metrics(ids=metric_ids) missing_metric_ids = (set(metric_ids) diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index ea857724..163c25ab 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -70,6 +70,10 @@ tests: GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=foobar status: 400 + - name: get measure aggregates with invalid uuids + GET: /v1/aggregation/metric?metric=foobar + status: 400 + - name: GET measure aggregates by granularity with refresh GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&refresh=true response_json_paths: -- GitLab From 3526ef3d7691d6d3ae8218f1041b463fb4470785 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 8 Jul 2017 11:50:20 +0200 Subject: [PATCH 0862/1483] doc: do not specify just `pip install` Nothing's gonna work with just "pip install gnocchi" anyway. Fixes #191 --- doc/source/install.rst | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index f54f9b90..d13f808f 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -7,13 +7,9 @@ Installation ============ -To install Gnocchi using `pip`, just type:: - - pip install gnocchi - -Depending on the drivers and features you want to use (see :doc:`architecture` -for which driver to pick), you need to install extra variants using, for -example:: +Gnocchi can be installed using `pip`. Depending on the drivers and features you +want to use (see :doc:`architecture` for which driver to pick), you need to +specify the extra variants you need. For example:: pip install gnocchi[postgresql,ceph,keystone] -- GitLab From a1978c84952d1890211c59973eaa4693c5d99e1a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 8 Jul 2017 11:55:38 +0200 Subject: [PATCH 0863/1483] utils: rely on numpy rather than Pandas to handle timestamps This leverages numpy and datetime64 types rather than pandas. Let's make this part of the ongoing process of getting rid of Pandas at somepoint. --- gnocchi/tests/test_utils.py | 9 +++++++++ gnocchi/utils.py | 39 +++++++++++++++++++++---------------- 2 files changed, 31 insertions(+), 17 deletions(-) diff --git a/gnocchi/tests/test_utils.py b/gnocchi/tests/test_utils.py index d90bc287..efb9a215 100644 --- a/gnocchi/tests/test_utils.py +++ b/gnocchi/tests/test_utils.py @@ -59,6 +59,15 @@ class TestUtils(tests_base.TestCase): datetime.datetime(2015, 3, 6, 14, 34, 0, 400000, tzinfo=iso8601.iso8601.UTC)) + def test_to_timestamps_relative(self): + with mock.patch('gnocchi.utils.utcnow') as utcnow: + utcnow.return_value = datetime.datetime( + 2015, 3, 6, 14, 34, tzinfo=iso8601.iso8601.UTC) + self.assertEqual( + utils.to_datetime("-10 minutes"), + datetime.datetime(2015, 3, 6, 14, 24, + tzinfo=iso8601.iso8601.UTC)) + class TestResourceUUID(tests_base.TestCase): def test_conversion(self): diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 046dbd76..25e4ed1d 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -19,7 +19,6 @@ import distutils.util import errno import itertools import multiprocessing -import numbers import os import uuid @@ -103,36 +102,42 @@ unix_universal_start64 = numpy.datetime64("1970") def to_timestamps(values): try: values = list(values) - if isinstance(values[0], numbers.Real): - times = pd.to_datetime(values, utc=True, box=False, unit='s') - elif (isinstance(values[0], datetime.datetime) or - is_valid_timestamp(values[0])): - times = pd.to_datetime(values, utc=True, box=False) + if isinstance(values[0], (numpy.datetime64, datetime.datetime)): + times = numpy.array(values) else: try: + # Try to convert to float. If it works, then we consider + # timestamps to be number of seconds since Epoch + # e.g. 123456 or 129491.1293 float(values[0]) except ValueError: - times = (utcnow() + pd.to_timedelta(values)).values + try: + # Try to parse the value as a string of ISO timestamp + # e.g. 2017-10-09T23:23:12.123 + numpy.datetime64(values[0]) + except ValueError: + # Last chance: it can be relative timestamp, so convert + # to timedelta relative to now() + # e.g. "-10 seconds" or "5 minutes" + times = numpy.fromiter( + numpy.add(numpy.datetime64(utcnow()), + pd.to_timedelta(values)), + dtype='datetime64[ns]') + else: + times = numpy.array(values, dtype='datetime64[ns]') else: - times = pd.to_datetime(list(map(float, values)), - utc=True, box=False, unit='s') + times = numpy.array(values, dtype='float') * 10e8 except ValueError: raise ValueError("Unable to convert timestamps") + times = times.astype('datetime64[ns]') + if (times < unix_universal_start64).any(): raise ValueError('Timestamp must be after Epoch') return times -def is_valid_timestamp(value): - try: - pd.to_datetime(value) - except Exception: - return False - return True - - def to_timestamp(value): return to_timestamps((value,))[0] -- GitLab From 8731a7d4e08023a118570fb232f7c3228186a845 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 12 Jul 2017 19:27:11 +0200 Subject: [PATCH 0864/1483] tests: remove useless pandas import in test_aggregates --- gnocchi/tests/test_aggregates.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 2f524ee0..3d5cc6af 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -16,7 +16,6 @@ import datetime import uuid -import pandas from stevedore import extension from gnocchi import aggregates @@ -80,7 +79,6 @@ class TestAggregates(tests_base.TestCase): window=window) window = 120.0 - result = pandas.Series() grain, result = agg_obj.retrieve_data(self.storage, metric, start=None, stop=None, window=window) -- GitLab From 77dd5b64eca8f511b650a610146d1142bcf755dc Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 14 Jul 2017 17:01:55 +0000 Subject: [PATCH 0865/1483] document open-ended aggregation behaviour not setting a boundary on aggregates results in some unpredicatable behaviour. make it a bit more clear what is happening and potential risks --- doc/source/rest.j2 | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index d0e2d8bc..c777f348 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -544,11 +544,16 @@ Also, aggregation across metrics have different behavior depending on whether boundary values are set ('start' and 'stop') and if 'needed_overlap' is set. -If boundaries are not set, Gnocchi makes the aggregation only with points -at timestamp present in all timeseries. When boundaries are set, Gnocchi -expects that we have certain percent of timestamps common between timeseries, -this percent is controlled by needed_overlap (defaulted with 100%). If this -percent is not reached an error is returned. +When a boundary is set, Gnocchi expects that we have certain percent of +timestamps common between timeseries. This percent is controlled by +needed_overlap, which by default expects 100% overlap. If this percent is not +reached, an error is returned. If no boundaries are set, Gnocchi aggregates and +returns only the last contiguous range of common datapoints. + +.. note:: + + Not setting a boundary may result in an extremely sparse result. + Additionally, it may not accurately reflect 'needed_overlap' value, if set. The ability to fill in points missing from a subset of timeseries is supported by specifying a `fill` value. Valid fill values include any valid float or -- GitLab From d5a203fd8f4e3a97e418cbe89d81d382b534e767 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 6 Jul 2017 18:32:05 +0200 Subject: [PATCH 0866/1483] incoming: set maximum number of sacks to 65535 This makes sure we can encode the sack number on a short unsigned integer. --- gnocchi/cli.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index a895cc16..7b102e4b 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -48,8 +48,15 @@ def config_generator(): return genconfig.prehook(None, sys.argv[1:]) +_SACK_NUMBER_OPT = cfg.IntOpt( + "sacks-number", min=1, max=65535, required=True, + help="Number of incoming storage sacks to create.") + + def upgrade(): conf = cfg.ConfigOpts() + sack_number_opt = copy.copy(_SACK_NUMBER_OPT) + sack_number_opt.default = 128 conf.register_cli_opts([ cfg.BoolOpt("skip-index", default=False, help="Skip index upgrade."), @@ -59,9 +66,7 @@ def upgrade(): help="Skip incoming storage upgrade."), cfg.BoolOpt("skip-archive-policies-creation", default=False, help="Skip default archive policies creation."), - cfg.IntOpt("sacks-number", default=128, min=1, - help="Number of incoming storage sacks to create."), - + sack_number_opt, ]) conf = service.prepare_service(conf=conf, log_to_std=True) if not conf.skip_index: @@ -91,10 +96,7 @@ def upgrade(): def change_sack_size(): conf = cfg.ConfigOpts() - conf.register_cli_opts([ - cfg.IntOpt("sacks-number", required=True, min=1, - help="Number of storage sacks."), - ]) + conf.register_cli_opts([_SACK_NUMBER_OPT]) conf = service.prepare_service(conf=conf, log_to_std=True) s = incoming.get_driver(conf) try: -- GitLab From 4cb05c0e7bfe2df6dd7e805cb83a3bd117250f6d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 17 Jul 2017 15:23:40 +0200 Subject: [PATCH 0867/1483] Remove worker_sync_rate option If you poll e.g every 60s, there's no need to get the member list and update the partitioner every 30s. Actually the partitioner status should be updated just before requesting the stack, that'll be perfect. So let's do that! --- gnocchi/cli.py | 15 +++------------ gnocchi/opts.py | 6 ------ 2 files changed, 3 insertions(+), 18 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 7b102e4b..50367843 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -24,7 +24,6 @@ import time import cotyledon from cotyledon import oslo_config_glue import daiquiri -from futurist import periodics from oslo_config import cfg import six import tenacity @@ -211,17 +210,6 @@ class MetricProcessor(MetricProcessBase): self.partitioner = self.coord.join_partitioned_group( self.GROUP_ID, partitions=200) LOG.info('Joined coordination group: %s', self.GROUP_ID) - - @periodics.periodic(spacing=self.conf.metricd.worker_sync_rate, - run_immediately=True) - def run_watchers(): - self.coord.run_watchers() - - self.periodic = periodics.PeriodicWorker.create([]) - self.periodic.add(run_watchers) - t = threading.Thread(target=self.periodic.start) - t.daemon = True - t.start() except NotImplementedError: LOG.warning('Coordinator does not support partitioning. Worker ' 'will battle against other workers for jobs.') @@ -232,6 +220,7 @@ class MetricProcessor(MetricProcessBase): def _get_tasks(self): try: + self.coord.run_watchers() if (not self._tasks or self.group_state != self.partitioner.ring.nodes): self.group_state = self.partitioner.ring.nodes.copy() @@ -239,6 +228,8 @@ class MetricProcessor(MetricProcessBase): i for i in six.moves.range(self.incoming.NUM_SACKS) if self.partitioner.belongs_to_self( i, replicas=self.conf.metricd.processing_replicas)] + except Exception as e: + LOG.error('Unexpected error updating the task partitioner: %s', e) finally: return self._tasks or self.fallback_tasks diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 8f57f915..390cf48e 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -129,12 +129,6 @@ def list_opts(): required=True, help="How many seconds to wait between " "cleaning of expired data"), - cfg.IntOpt('worker_sync_rate', - default=30, - help="Frequency to detect when metricd workers join or " - "leave system (in seconds). A shorter rate, may " - "improve rebalancing but create more coordination " - "load"), cfg.IntOpt('processing_replicas', default=3, min=1, -- GitLab From a82872aef79b87d10ebdc0673b0e10a30917a9b7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 12 Jul 2017 19:16:21 +0200 Subject: [PATCH 0868/1483] tests: fix gabbi test title --- gnocchi/tests/functional/gabbits/archive.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/tests/functional/gabbits/archive.yaml b/gnocchi/tests/functional/gabbits/archive.yaml index 0ce0182a..f5e38519 100644 --- a/gnocchi/tests/functional/gabbits/archive.yaml +++ b/gnocchi/tests/functional/gabbits/archive.yaml @@ -429,7 +429,7 @@ tests: response_strings: - timespan ≠ granularity × points - - name: create invalid points policy + - name: create invalid granularity policy POST: /v1/archive_policy request_headers: # User admin @@ -443,7 +443,7 @@ tests: response_strings: - "Invalid input: not a valid value for dictionary value" - - name: create invalid granularity policy + - name: create invalid points policy POST: /v1/archive_policy request_headers: # User admin -- GitLab From f93545bf784e5dde5cfa1710564038eaafb25250 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 11 Jul 2017 11:33:29 +0200 Subject: [PATCH 0869/1483] utils: fix to_timestamps when value list is empty --- gnocchi/tests/test_utils.py | 3 +++ gnocchi/utils.py | 2 ++ 2 files changed, 5 insertions(+) diff --git a/gnocchi/tests/test_utils.py b/gnocchi/tests/test_utils.py index efb9a215..2e07b25d 100644 --- a/gnocchi/tests/test_utils.py +++ b/gnocchi/tests/test_utils.py @@ -41,6 +41,9 @@ class TestUtils(tests_base.TestCase): tzinfo=iso8601.iso8601.FixedOffset(5, 0, '+5h')) self._do_test_datetime_to_unix_timezone_change(1420106400.0, dt) + def test_to_timestamp_empty(self): + self.assertEqual([], utils.to_timestamps([])) + def test_to_timestamps_epoch(self): self.assertEqual( utils.to_datetime("1425652440"), diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 25e4ed1d..2c56b73c 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -102,6 +102,8 @@ unix_universal_start64 = numpy.datetime64("1970") def to_timestamps(values): try: values = list(values) + if len(values) == 0: + return [] if isinstance(values[0], (numpy.datetime64, datetime.datetime)): times = numpy.array(values) else: -- GitLab From 707c92c6754729801884135de511e637cfd37cee Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 13 Jul 2017 16:43:41 +0200 Subject: [PATCH 0870/1483] utils: do not return coordinator id Nobody cares about it. --- gnocchi/cli.py | 2 +- gnocchi/storage/_carbonara.py | 5 ++--- gnocchi/utils.py | 5 ++--- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 50367843..b1d3e2a0 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -191,7 +191,7 @@ class MetricProcessor(MetricProcessBase): def __init__(self, worker_id, conf): super(MetricProcessor, self).__init__( worker_id, conf, conf.metricd.metric_processing_delay) - self.coord, __ = utils.get_coordinator_and_start( + self.coord = utils.get_coordinator_and_start( conf.storage.coordination_url) self._tasks = [] self.group_state = None diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index f52efb6b..da17e505 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -63,9 +63,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): self._map_in_thread = self._map_no_thread else: self._map_in_thread = self._map_in_futures_threads - self.coord, __ = ( - (coord, None) if coord else - utils.get_coordinator_and_start(conf.coordination_url)) + self.coord = (coord if coord else + utils.get_coordinator_and_start(conf.coordination_url)) self.shared_coord = bool(coord) def stop(self): diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 2c56b73c..f7120e90 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -90,10 +90,9 @@ def _enable_coordination(coord): def get_coordinator_and_start(url): - my_id = str(uuid.uuid4()).encode() - coord = coordination.get_coordinator(url, my_id) + coord = coordination.get_coordinator(url, str(uuid.uuid4()).encode()) _enable_coordination(coord) - return coord, my_id + return coord unix_universal_start64 = numpy.datetime64("1970") -- GitLab From bd4cd400c12c9ad19caab373d13fbd3fa95ac3f1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 13 Jul 2017 16:52:06 +0200 Subject: [PATCH 0871/1483] indexer: remove connect() method One of the biggest mystery in universe will be why there is a connect method on the indexer that does nothing. --- gnocchi/cli.py | 5 ----- gnocchi/indexer/__init__.py | 4 ---- gnocchi/indexer/alembic/env.py | 1 - gnocchi/rest/app.py | 1 - gnocchi/statsd.py | 1 - gnocchi/tests/base.py | 1 - gnocchi/tests/functional/fixtures.py | 1 - gnocchi/tests/indexer/sqlalchemy/test_migrations.py | 1 - tools/measures_injector.py | 1 - 9 files changed, 16 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index b1d3e2a0..494c4326 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -70,7 +70,6 @@ def upgrade(): conf = service.prepare_service(conf=conf, log_to_std=True) if not conf.skip_index: index = indexer.get_driver(conf) - index.connect() LOG.info("Upgrading indexer %s", index) index.upgrade() if not conf.skip_storage: @@ -87,7 +86,6 @@ def upgrade(): and not index.list_archive_policy_rules()): if conf.skip_index: index = indexer.get_driver(conf) - index.connect() for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES): index.create_archive_policy(ap) index.create_archive_policy_rule("default", "*", "low") @@ -131,7 +129,6 @@ class MetricProcessBase(cotyledon.Service): self.store = storage.get_driver(self.conf) self.incoming = incoming.get_driver(self.conf) self.index = indexer.get_driver(self.conf) - self.index.connect() def run(self): self._configure() @@ -201,7 +198,6 @@ class MetricProcessor(MetricProcessBase): self.store = storage.get_driver(self.conf, self.coord) self.incoming = incoming.get_driver(self.conf) self.index = indexer.get_driver(self.conf) - self.index.connect() # create fallback in case paritioning fails or assigned no tasks self.fallback_tasks = list( @@ -304,7 +300,6 @@ def metricd_tester(conf): # want to avoid issues with profiler and os.fork(), that # why we don't use the MetricdServiceManager. index = indexer.get_driver(conf) - index.connect() s = storage.get_driver(conf) inc = incoming.get_driver(conf) metrics = set() diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 1ffc9cb4..a5deb38f 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -264,10 +264,6 @@ class IndexerDriver(object): def __init__(conf): pass - @staticmethod - def connect(): - pass - @staticmethod def disconnect(): pass diff --git a/gnocchi/indexer/alembic/env.py b/gnocchi/indexer/alembic/env.py index 47f58efb..c7d7f9e1 100644 --- a/gnocchi/indexer/alembic/env.py +++ b/gnocchi/indexer/alembic/env.py @@ -65,7 +65,6 @@ def run_migrations_online(): """ conf = config.conf indexer = sqlalchemy.SQLAlchemyIndexer(conf) - indexer.connect() with indexer.facade.writer_connection() as connectable: with connectable.connect() as connection: diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 947c888f..7c99030b 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -107,7 +107,6 @@ def load_app(conf, indexer=None, storage=None, incoming=None, incoming = gnocchi_incoming.get_driver(conf) if not indexer: indexer = gnocchi_indexer.get_driver(conf) - indexer.connect() # Build the WSGI app cfg_path = conf.api.paste_config diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index c2ce102f..3db68a14 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -38,7 +38,6 @@ class Stats(object): self.conf = conf self.incoming = incoming.get_driver(self.conf) self.indexer = indexer.get_driver(self.conf) - self.indexer.connect() try: self.indexer.create_resource('generic', self.conf.statsd.resource_id, diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 7b09525c..176aafb3 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -290,7 +290,6 @@ class TestCase(BaseTestCase): group="storage") self.index = indexer.get_driver(self.conf) - self.index.connect() # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all # their tables in a single transaction even with the diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 0b970e03..5b6cbbf9 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -119,7 +119,6 @@ class ConfigFixture(fixture.GabbiFixture): 'indexer') index = indexer.get_driver(conf) - index.connect() index.upgrade() # Set pagination to a testable value diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py index 7e1cafee..c29fcc90 100644 --- a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py +++ b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py @@ -47,7 +47,6 @@ class ModelsMigrationsSync( self.conf.indexer.url), 'indexer') self.index = indexer.get_driver(self.conf) - self.index.connect() self.index.upgrade(nocreate=True) self.addCleanup(self._drop_database) diff --git a/tools/measures_injector.py b/tools/measures_injector.py index ebaef520..9f5cb101 100755 --- a/tools/measures_injector.py +++ b/tools/measures_injector.py @@ -37,7 +37,6 @@ def injector(): ]) conf = service.prepare_service(conf=conf) index = indexer.get_driver(conf) - index.connect() s = storage.get_driver(conf) def todo(): -- GitLab From 112bf6242a953e008df26d1ab946ae50fed7333b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 13 Jul 2017 16:47:22 +0200 Subject: [PATCH 0872/1483] Only retry connection to external components in metricd Currently storage such as Swift will retry forever to connect on __init__. Actually, the only process that wants to retry indefinitely is metricd. The API will keep the client connected forever if it does not raise a 500 error soon enough. This patches make sure that the only part retrying for ever is metricd: the rest (e.g. the API) will fail fast if anything bad happens. Fixes #194 --- gnocchi/cli.py | 31 +++++++++++++++++++++---------- gnocchi/common/swift.py | 14 ++++---------- gnocchi/utils.py | 21 +-------------------- 3 files changed, 26 insertions(+), 40 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 494c4326..0dc776f5 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -116,6 +116,13 @@ def statsd(): statsd_service.start() +# Retry with exponential backoff for up to 1 minute +_wait_exponential = tenacity.wait_exponential(multiplier=0.5, max=60) + + +retry_on_exception = tenacity.Retrying(wait=_wait_exponential) + + class MetricProcessBase(cotyledon.Service): def __init__(self, worker_id, conf, interval_delay=0): super(MetricProcessBase, self).__init__(worker_id) @@ -126,9 +133,9 @@ class MetricProcessBase(cotyledon.Service): self._shutdown_done = threading.Event() def _configure(self): - self.store = storage.get_driver(self.conf) - self.incoming = incoming.get_driver(self.conf) - self.index = indexer.get_driver(self.conf) + self.store = retry_on_exception(storage.get_driver, self.conf) + self.incoming = retry_on_exception(incoming.get_driver, self.conf) + self.index = retry_on_exception(indexer.get_driver, self.conf) def run(self): self._configure() @@ -164,7 +171,7 @@ class MetricReporting(MetricProcessBase): worker_id, conf, conf.metricd.metric_reporting_delay) def _configure(self): - self.incoming = incoming.get_driver(self.conf) + self.incoming = retry_on_exception(incoming.get_driver, self.conf) def _run_job(self): try: @@ -188,16 +195,20 @@ class MetricProcessor(MetricProcessBase): def __init__(self, worker_id, conf): super(MetricProcessor, self).__init__( worker_id, conf, conf.metricd.metric_processing_delay) - self.coord = utils.get_coordinator_and_start( - conf.storage.coordination_url) self._tasks = [] self.group_state = None - @utils.retry + @tenacity.retry( + wait=_wait_exponential, + # Never retry except when explicitly asked by raising TryAgain + retry=tenacity.retry_never) def _configure(self): - self.store = storage.get_driver(self.conf, self.coord) - self.incoming = incoming.get_driver(self.conf) - self.index = indexer.get_driver(self.conf) + self.coord = retry_on_exception(utils.get_coordinator_and_start, + self.conf.storage.coordination_url) + self.store = retry_on_exception(storage.get_driver, + self.conf, self.coord) + self.incoming = retry_on_exception(incoming.get_driver, self.conf) + self.index = retry_on_exception(indexer.get_driver, self.conf) # create fallback in case paritioning fails or assigned no tasks self.fallback_tasks = list( diff --git a/gnocchi/common/swift.py b/gnocchi/common/swift.py index 2009b4a3..42d4fe87 100644 --- a/gnocchi/common/swift.py +++ b/gnocchi/common/swift.py @@ -22,13 +22,14 @@ except ImportError: swift_utils = None from gnocchi import storage -from gnocchi import utils LOG = daiquiri.getLogger(__name__) -@utils.retry -def _get_connection(conf): +def get_connection(conf): + if swclient is None: + raise RuntimeError("python-swiftclient unavailable") + return swclient.Connection( auth_version=conf.swift_auth_version, authurl=conf.swift_authurl, @@ -42,13 +43,6 @@ def _get_connection(conf): retries=0) -def get_connection(conf): - if swclient is None: - raise RuntimeError("python-swiftclient unavailable") - - return _get_connection(conf) - - POST_HEADERS = {'Accept': 'application/json', 'Content-Type': 'text/plain'} diff --git a/gnocchi/utils.py b/gnocchi/utils.py index f7120e90..a0543b4e 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -29,7 +29,6 @@ import numpy import pandas as pd import six from stevedore import driver -import tenacity from tooz import coordination @@ -71,27 +70,9 @@ def UUID(value): raise ValueError(e) -# Retry with exponential backoff for up to 1 minute -retry = tenacity.retry( - wait=tenacity.wait_exponential(multiplier=0.5, max=60), - # Never retry except when explicitly asked by raising TryAgain - retry=tenacity.retry_never, - reraise=True) - - -# TODO(jd) Move this to tooz? -@retry -def _enable_coordination(coord): - try: - coord.start(start_heart=True) - except Exception as e: - LOG.error("Unable to start coordinator: %s", e) - raise tenacity.TryAgain(e) - - def get_coordinator_and_start(url): coord = coordination.get_coordinator(url, str(uuid.uuid4()).encode()) - _enable_coordination(coord) + coord.start(start_heart=True) return coord -- GitLab From 3bf9db7d4e3efeeffd35f1040e4f62faf600b43f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 13 Jul 2017 16:47:22 +0200 Subject: [PATCH 0873/1483] Only retry connection to external components in metricd Currently storage such as Swift will retry forever to connect on __init__. Actually, the only process that wants to retry indefinitely is metricd. The API will keep the client connected forever if it does not raise a 500 error soon enough. This patches make sure that the only part retrying for ever is metricd: the rest (e.g. the API) will fail fast if anything bad happens. Fixes #194 (cherry picked from commit a64fe4e0f271e5075b4922135e8b2bae293f1f76) --- gnocchi/cli.py | 28 ++++++++++++++++++++-------- gnocchi/storage/common/swift.py | 14 ++++---------- gnocchi/utils.py | 21 +-------------------- 3 files changed, 25 insertions(+), 38 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 4939451f..94399a44 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -106,6 +106,13 @@ def statsd(): statsd_service.start() +# Retry with exponential backoff for up to 1 minute +_wait_exponential = tenacity.wait_exponential(multiplier=0.5, max=60) + + +retry_on_exception = tenacity.Retrying(wait=_wait_exponential) + + class MetricProcessBase(cotyledon.Service): def __init__(self, worker_id, conf, interval_delay=0): super(MetricProcessBase, self).__init__(worker_id) @@ -116,8 +123,8 @@ class MetricProcessBase(cotyledon.Service): self._shutdown_done = threading.Event() def _configure(self): - self.store = storage.get_driver(self.conf) - self.index = indexer.get_driver(self.conf) + self.store = retry_on_exception(storage.get_driver, self.conf) + self.index = retry_on_exception(indexer.get_driver, self.conf) self.index.connect() def run(self): @@ -154,7 +161,8 @@ class MetricReporting(MetricProcessBase): worker_id, conf, conf.metricd.metric_reporting_delay) def _configure(self): - self.incoming = storage.get_incoming_driver(self.conf.incoming) + self.incoming = retry_on_exception(storage.get_incoming_driver, + self.conf.incoming) def _run_job(self): try: @@ -178,15 +186,19 @@ class MetricProcessor(MetricProcessBase): def __init__(self, worker_id, conf): super(MetricProcessor, self).__init__( worker_id, conf, conf.metricd.metric_processing_delay) - self.coord, __ = utils.get_coordinator_and_start( - conf.storage.coordination_url) self._tasks = [] self.group_state = None - @utils.retry + @tenacity.retry( + wait=_wait_exponential, + # Never retry except when explicitly asked by raising TryAgain + retry=tenacity.retry_never) def _configure(self): - self.store = storage.get_driver(self.conf, self.coord) - self.index = indexer.get_driver(self.conf) + self.coord, _ = retry_on_exception(utils.get_coordinator_and_start, + self.conf.storage.coordination_url) + self.store = retry_on_exception(storage.get_driver, + self.conf, self.coord) + self.index = retry_on_exception(indexer.get_driver, self.conf) self.index.connect() # create fallback in case paritioning fails or assigned no tasks diff --git a/gnocchi/storage/common/swift.py b/gnocchi/storage/common/swift.py index 2009b4a3..42d4fe87 100644 --- a/gnocchi/storage/common/swift.py +++ b/gnocchi/storage/common/swift.py @@ -22,13 +22,14 @@ except ImportError: swift_utils = None from gnocchi import storage -from gnocchi import utils LOG = daiquiri.getLogger(__name__) -@utils.retry -def _get_connection(conf): +def get_connection(conf): + if swclient is None: + raise RuntimeError("python-swiftclient unavailable") + return swclient.Connection( auth_version=conf.swift_auth_version, authurl=conf.swift_authurl, @@ -42,13 +43,6 @@ def _get_connection(conf): retries=0) -def get_connection(conf): - if swclient is None: - raise RuntimeError("python-swiftclient unavailable") - - return _get_connection(conf) - - POST_HEADERS = {'Accept': 'application/json', 'Content-Type': 'text/plain'} diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 76667115..269a0bed 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -29,7 +29,6 @@ import monotonic import numpy import pandas as pd import six -import tenacity from tooz import coordination @@ -71,28 +70,10 @@ def UUID(value): raise ValueError(e) -# Retry with exponential backoff for up to 1 minute -retry = tenacity.retry( - wait=tenacity.wait_exponential(multiplier=0.5, max=60), - # Never retry except when explicitly asked by raising TryAgain - retry=tenacity.retry_never, - reraise=True) - - -# TODO(jd) Move this to tooz? -@retry -def _enable_coordination(coord): - try: - coord.start(start_heart=True) - except Exception as e: - LOG.error("Unable to start coordinator: %s", e) - raise tenacity.TryAgain(e) - - def get_coordinator_and_start(url): my_id = str(uuid.uuid4()).encode() coord = coordination.get_coordinator(url, my_id) - _enable_coordination(coord) + coord.start(start_heart=True) return coord, my_id -- GitLab From 5d940bb360822f0f84eb4835bc432b44c8790bea Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 11 Jul 2017 11:24:10 +0200 Subject: [PATCH 0874/1483] storage: return SplitKey objects when listing split keys This avoids comparing strings and keep it to timestamp comparison everywhere. --- gnocchi/storage/_carbonara.py | 36 ++++++++++---------- gnocchi/storage/ceph.py | 3 +- gnocchi/storage/file.py | 5 ++- gnocchi/storage/redis.py | 5 ++- gnocchi/storage/s3.py | 3 +- gnocchi/storage/swift.py | 3 +- gnocchi/tests/test_storage.py | 62 ++++++++++++++++++++--------------- 7 files changed, 61 insertions(+), 56 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index da17e505..277dc8ec 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -16,6 +16,7 @@ # under the License. import collections import datetime +import functools import itertools import operator @@ -116,9 +117,15 @@ class CarbonaraBasedStorage(storage.StorageDriver): granularity, data, offset=None, version=3): raise NotImplementedError - @staticmethod - def _list_split_keys_for_metric(metric, aggregation, granularity, + def _list_split_keys_for_metric(self, metric, aggregation, granularity, version=3): + return set(map( + functools.partial(carbonara.SplitKey, sampling=granularity), + self._list_split_keys( + metric, aggregation, granularity, version))) + + @staticmethod + def _list_split_keys(metric, aggregation, granularity, version=3): raise NotImplementedError @staticmethod @@ -189,20 +196,18 @@ class CarbonaraBasedStorage(storage.StorageDriver): raise storage.GranularityDoesNotExist(metric, granularity) if from_timestamp: - from_timestamp = str( - carbonara.SplitKey.from_timestamp_and_sampling( - from_timestamp, granularity)) + from_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( + from_timestamp, granularity) if to_timestamp: - to_timestamp = str( - carbonara.SplitKey.from_timestamp_and_sampling( - to_timestamp, granularity)) + to_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( + to_timestamp, granularity) timeseries = list(filter( lambda x: x is not None, self._map_in_thread( self._get_measures_and_unserialize, - ((metric, key, aggregation, granularity) + ((metric, str(key), aggregation, granularity) for key in sorted(all_keys) if ((not from_timestamp or key >= from_timestamp) and (not to_timestamp or key <= to_timestamp)))) @@ -283,13 +288,12 @@ class CarbonaraBasedStorage(storage.StorageDriver): oldest_point_to_keep = ts.last - datetime.timedelta( seconds=archive_policy_def.timespan) oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) - oldest_key_to_keep_s = str(oldest_key_to_keep) for key in list(existing_keys): # NOTE(jd) Only delete if the key is strictly inferior to # the timestamp; we don't delete any timeserie split that # contains our timestamp, so we prefer to keep a bit more # than deleting too much - if key < oldest_key_to_keep_s: + if key < oldest_key_to_keep: self._delete_metric_measures( metric, key, aggregation, archive_policy_def.granularity) @@ -302,10 +306,9 @@ class CarbonaraBasedStorage(storage.StorageDriver): # means we already wrote some splits at some point – so this is not the # first time we treat this timeserie. if need_rewrite: - previous_oldest_mutable_key = str(ts.get_split_key( - previous_oldest_mutable_timestamp)) - oldest_mutable_key = str(ts.get_split_key( - oldest_mutable_timestamp)) + previous_oldest_mutable_key = ts.get_split_key( + previous_oldest_mutable_timestamp) + oldest_mutable_key = ts.get_split_key(oldest_mutable_timestamp) if previous_oldest_mutable_key != oldest_mutable_key: for key in existing_keys: @@ -316,8 +319,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): # NOTE(jd) Rewrite it entirely for fun (and later for # compression). For that, we just pass None as split. self._store_timeserie_split( - metric, carbonara.SplitKey( - float(key), archive_policy_def.granularity), + metric, key, None, aggregation, archive_policy_def, oldest_mutable_timestamp) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 8da77d96..0fc2c83d 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -152,8 +152,7 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): else: raise storage.MetricDoesNotExist(metric) - def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=3): + def _list_split_keys(self, metric, aggregation, granularity, version=3): with rados.ReadOpCtx() as op: omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1) try: diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 8d288af0..4ec42424 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -68,7 +68,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): def _build_metric_path_for_split(self, metric, aggregation, timestamp_key, granularity, version=3): path = os.path.join(self._build_metric_path(metric, aggregation), - timestamp_key + "_" + str(granularity)) + str(timestamp_key) + "_" + str(granularity)) return path + '_v%s' % version if version else path def _create_metric(self, metric): @@ -101,8 +101,7 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): raise storage.MetricDoesNotExist(metric) raise - def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=3): + def _list_split_keys(self, metric, aggregation, granularity, version=3): try: files = os.listdir(self._build_metric_path(metric, aggregation)) except OSError as e: diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 218cd843..8fb5ffc2 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -50,7 +50,7 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): @classmethod def _aggregated_field_for_split(cls, aggregation, timestamp_key, granularity, version=3): - path = cls.FIELD_SEP.join([timestamp_key, aggregation, + path = cls.FIELD_SEP.join([str(timestamp_key), aggregation, str(granularity)]) return path + '_v%s' % version if version else path @@ -71,8 +71,7 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): raise storage.MetricDoesNotExist(metric) return data - def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=3): + def _list_split_keys(self, metric, aggregation, granularity, version=3): key = self._metric_key(metric) if not self._client.exists(key): raise storage.MetricDoesNotExist(metric) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 2c5f7d10..0ed8d107 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -169,8 +169,7 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): raise return response['Body'].read() - def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=3): + def _list_split_keys(self, metric, aggregation, granularity, version=3): bucket = self._bucket_name keys = set() response = {} diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 6691f38a..90ffad95 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -145,8 +145,7 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): raise return contents - def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=3): + def _list_split_keys(self, metric, aggregation, granularity, version=3): container = self._container_name(metric) try: headers, files = self.swift.get_container( diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index c5e215d3..d11794c2 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -283,13 +283,13 @@ class TestStorageDriver(tests_base.TestCase): (utils.datetime_utc(2015, 1, 1, 12), 300.0, 69), ], self.storage.get_measures(self.metric)) - self.assertEqual({"1244160000.0"}, + self.assertEqual({carbonara.SplitKey("1244160000.0", 86400)}, self.storage._list_split_keys_for_metric( self.metric, "mean", 86400.0)) - self.assertEqual({"1412640000.0"}, + self.assertEqual({carbonara.SplitKey("1412640000.0", 3600)}, self.storage._list_split_keys_for_metric( self.metric, "mean", 3600.0)) - self.assertEqual({"1419120000.0"}, + self.assertEqual({carbonara.SplitKey("1419120000.0", 300)}, self.storage._list_split_keys_for_metric( self.metric, "mean", 300.0)) @@ -312,10 +312,11 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() - splits = {'1451520000.0', '1451736000.0', '1451952000.0'} - self.assertEqual(splits, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 60.0)) + self.assertEqual({ + carbonara.SplitKey(1451520000.0, 60), + carbonara.SplitKey(1451736000.0, 60), + carbonara.SplitKey(1451952000.0, 60), + }, self.storage._list_split_keys_for_metric(self.metric, "mean", 60.0)) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue @@ -350,10 +351,12 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() - self.assertEqual({'1452384000.0', '1451736000.0', - '1451520000.0', '1451952000.0'}, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 60.0)) + self.assertEqual({ + carbonara.SplitKey(1452384000.0, 60), + carbonara.SplitKey(1451736000.0, 60), + carbonara.SplitKey(1451520000.0, 60), + carbonara.SplitKey(1451952000.0, 60), + }, self.storage._list_split_keys_for_metric(self.metric, "mean", 60.0)) data = self.storage._get_measures( self.metric, '1451520000.0', "mean", 60.0) self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -398,10 +401,11 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() - splits = {'1451520000.0', '1451736000.0', '1451952000.0'} - self.assertEqual(splits, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 60.0)) + self.assertEqual({ + carbonara.SplitKey(1451520000.0, 60), + carbonara.SplitKey(1451736000.0, 60), + carbonara.SplitKey(1451952000.0, 60), + }, self.storage._list_split_keys_for_metric(self.metric, "mean", 60.0)) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue @@ -438,10 +442,12 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() - self.assertEqual({'1452384000.0', '1451736000.0', - '1451520000.0', '1451952000.0'}, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 60.0)) + self.assertEqual({ + carbonara.SplitKey(1452384000.0, 60), + carbonara.SplitKey(1451736000.0, 60), + carbonara.SplitKey(1451520000.0, 60), + carbonara.SplitKey(1451952000.0, 60), + }, self.storage._list_split_keys_for_metric(self.metric, "mean", 60.0)) data = self.storage._get_measures( self.metric, '1451520000.0', "mean", 60.0) self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -484,10 +490,11 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() - splits = {'1451520000.0', '1451736000.0', '1451952000.0'} - self.assertEqual(splits, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 60.0)) + self.assertEqual({ + carbonara.SplitKey(1451520000.0, 60), + carbonara.SplitKey(1451736000.0, 60), + carbonara.SplitKey(1451952000.0, 60), + }, self.storage._list_split_keys_for_metric(self.metric, "mean", 60.0)) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue @@ -547,10 +554,11 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() - splits = {'1451520000.0', '1451736000.0', '1451952000.0'} - self.assertEqual(splits, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 60.0)) + self.assertEqual({ + carbonara.SplitKey(1451520000.0, 60), + carbonara.SplitKey(1451736000.0, 60), + carbonara.SplitKey(1451952000.0, 60), + }, self.storage._list_split_keys_for_metric(self.metric, "mean", 60.0)) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue -- GitLab From 4331d479b38e7887d6042952a0b25bc84e2a427d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 11 Jul 2017 18:38:04 +0200 Subject: [PATCH 0875/1483] carbonara: use sampling from SplitKey SplitKey already carries the sampling, so let's use it rather than passing it around each time. This also avoids passing the SplitKey as a simple string and pass the object around, which is more solid. --- gnocchi/carbonara.py | 10 +++---- gnocchi/storage/_carbonara.py | 40 ++++++++++----------------- gnocchi/storage/ceph.py | 24 ++++++---------- gnocchi/storage/file.py | 23 +++++++--------- gnocchi/storage/redis.py | 42 ++++++++++++---------------- gnocchi/storage/s3.py | 21 +++++++------- gnocchi/storage/swift.py | 22 ++++++--------- gnocchi/tests/test_storage.py | 52 ++++++++++++++++++----------------- 8 files changed, 103 insertions(+), 131 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index c3751d83..f704fd09 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2016 Red Hat, Inc. +# Copyright © 2016-2017 Red Hat, Inc. # Copyright © 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -410,7 +410,7 @@ class SplitKey(object): else: self.key = float(value) - self._carbonara_sampling = float(sampling) + self.sampling = sampling @classmethod def from_timestamp_and_sampling(cls, timestamp, sampling): @@ -425,8 +425,8 @@ class SplitKey(object): :return: A `SplitKey` object. """ return self.__class__( - self.key + self._carbonara_sampling * self.POINTS_PER_SPLIT, - self._carbonara_sampling) + self.key + self.sampling * self.POINTS_PER_SPLIT, + self.sampling) next = __next__ @@ -462,7 +462,7 @@ class SplitKey(object): def __repr__(self): return "<%s: %s / %fs>" % (self.__class__.__name__, repr(self.key), - self._carbonara_sampling) + self.sampling) class AggregatedTimeSerie(TimeSerie): diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 277dc8ec..32ba0cfd 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -73,8 +73,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): self.coord.stop() @staticmethod - def _get_measures(metric, timestamp_key, aggregation, granularity, - version=3): + def _get_measures(metric, timestamp_key, aggregation, version=3): raise NotImplementedError @staticmethod @@ -114,7 +113,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): @staticmethod def _store_metric_measures(metric, timestamp_key, aggregation, - granularity, data, offset=None, version=3): + data, offset=None, version=3): raise NotImplementedError def _list_split_keys_for_metric(self, metric, aggregation, granularity, @@ -158,17 +157,16 @@ class CarbonaraBasedStorage(storage.StorageDriver): for ts in agg_timeseries for timestamp, r, v in ts.fetch(from_timestamp, to_timestamp)] - def _get_measures_and_unserialize(self, metric, key, - aggregation, granularity): - data = self._get_measures(metric, key, aggregation, granularity) + def _get_measures_and_unserialize(self, metric, key, aggregation): + data = self._get_measures(metric, key, aggregation) try: return carbonara.AggregatedTimeSerie.unserialize( - data, key, aggregation, granularity) + data, key, aggregation, key.sampling) except carbonara.InvalidData: LOG.error("Data corruption detected for %s " "aggregated `%s' timeserie, granularity `%s' " "around time `%s', ignoring.", - metric.id, aggregation, granularity, key) + metric.id, aggregation, key.sampling, key) def _get_measures_timeserie(self, metric, aggregation, granularity, @@ -207,7 +205,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): lambda x: x is not None, self._map_in_thread( self._get_measures_and_unserialize, - ((metric, str(key), aggregation, granularity) + ((metric, key, aggregation) for key in sorted(all_keys) if ((not from_timestamp or key >= from_timestamp) and (not to_timestamp or key <= to_timestamp)))) @@ -220,17 +218,14 @@ class CarbonaraBasedStorage(storage.StorageDriver): max_size=points) def _store_timeserie_split(self, metric, key, split, - aggregation, archive_policy_def, - oldest_mutable_timestamp): + aggregation, oldest_mutable_timestamp): # NOTE(jd) We write the full split only if the driver works that way # (self.WRITE_FULL) or if the oldest_mutable_timestamp is out of range. write_full = self.WRITE_FULL or next(key) <= oldest_mutable_timestamp - key_as_str = str(key) if write_full: try: existing = self._get_measures_and_unserialize( - metric, key_as_str, aggregation, - archive_policy_def.granularity) + metric, key, aggregation) except storage.AggregationDoesNotExist: pass else: @@ -250,15 +245,14 @@ class CarbonaraBasedStorage(storage.StorageDriver): LOG.warning("No data found for metric %s, granularity %f " "and aggregation method %s (split key %s): " "possible data corruption", - metric, archive_policy_def.granularity, + metric, key.sampling, aggregation, key) return offset, data = split.serialize(key, compressed=write_full) - return self._store_metric_measures( - metric, key_as_str, aggregation, archive_policy_def.granularity, - data, offset=offset) + return self._store_metric_measures(metric, key, aggregation, + data, offset=offset) def _add_measures(self, aggregation, archive_policy_def, metric, grouped_serie, @@ -294,9 +288,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): # contains our timestamp, so we prefer to keep a bit more # than deleting too much if key < oldest_key_to_keep: - self._delete_metric_measures( - metric, key, aggregation, - archive_policy_def.granularity) + self._delete_metric_measures(metric, key, aggregation) existing_keys.remove(key) else: oldest_key_to_keep = carbonara.SplitKey(0, 0) @@ -320,8 +312,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): # compression). For that, we just pass None as split. self._store_timeserie_split( metric, key, - None, aggregation, archive_policy_def, - oldest_mutable_timestamp) + None, aggregation, oldest_mutable_timestamp) for key, split in ts.split(): if key >= oldest_key_to_keep: @@ -329,8 +320,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): "Storing split %s (%s) for metric %s", key, aggregation, metric) self._store_timeserie_split( - metric, key, split, aggregation, archive_policy_def, - oldest_mutable_timestamp) + metric, key, split, aggregation, oldest_mutable_timestamp) @staticmethod def _delete_metric(metric): diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 0fc2c83d..3b0f74e2 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -56,10 +56,9 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): super(CephStorage, self).stop() @staticmethod - def _get_object_name(metric, timestamp_key, aggregation, granularity, - version=3): + def _get_object_name(metric, key, aggregation, version=3): name = str("gnocchi_%s_%s_%s_%s" % ( - metric.id, timestamp_key, aggregation, granularity)) + metric.id, key, aggregation, key.sampling)) return name + '_v%s' % version if version else name def _object_exists(self, name): @@ -76,10 +75,9 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): else: self.ioctx.write_full(name, b"") - def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data, offset=None, version=3): - name = self._get_object_name(metric, timestamp_key, - aggregation, granularity, version) + def _store_metric_measures(self, metric, key, aggregation, + data, offset=None, version=3): + name = self._get_object_name(metric, key, aggregation, version) if offset is None: self.ioctx.write_full(name, data) else: @@ -89,10 +87,8 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): self.ioctx.operate_write_op( op, self._build_unaggregated_timeserie_path(metric, 3)) - def _delete_metric_measures(self, metric, timestamp_key, aggregation, - granularity, version=3): - name = self._get_object_name(metric, timestamp_key, - aggregation, granularity, version) + def _delete_metric_measures(self, metric, key, aggregation, version=3): + name = self._get_object_name(metric, key, aggregation, version) try: self.ioctx.remove_object(name) @@ -139,11 +135,9 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): # It's possible that the object does not exists pass - def _get_measures(self, metric, timestamp_key, aggregation, granularity, - version=3): + def _get_measures(self, metric, key, aggregation, version=3): try: - name = self._get_object_name(metric, timestamp_key, - aggregation, granularity, version) + name = self._get_object_name(metric, key, aggregation, version) return self._get_object_content(name) except rados.ObjectNotFound: if self._object_exists( diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 4ec42424..3277dca1 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -66,9 +66,9 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): "agg_" + aggregation) def _build_metric_path_for_split(self, metric, aggregation, - timestamp_key, granularity, version=3): + key, version=3): path = os.path.join(self._build_metric_path(metric, aggregation), - str(timestamp_key) + "_" + str(granularity)) + str(key) + "_" + str(key.sampling)) return path + '_v%s' % version if version else path def _create_metric(self, metric): @@ -115,17 +115,15 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): keys.add(meta[0]) return keys - def _delete_metric_measures(self, metric, timestamp_key, aggregation, - granularity, version=3): + def _delete_metric_measures(self, metric, key, aggregation, version=3): os.unlink(self._build_metric_path_for_split( - metric, aggregation, timestamp_key, granularity, version)) + metric, aggregation, key, version)) - def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data, offset=None, version=3): + def _store_metric_measures(self, metric, key, aggregation, + data, offset=None, version=3): self._atomic_file_store( - self._build_metric_path_for_split(metric, aggregation, - timestamp_key, granularity, - version), + self._build_metric_path_for_split( + metric, aggregation, key, version), data) def _delete_metric(self, metric): @@ -138,10 +136,9 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): # measures) raise - def _get_measures(self, metric, timestamp_key, aggregation, granularity, - version=3): + def _get_measures(self, metric, key, aggregation, version=3): path = self._build_metric_path_for_split( - metric, aggregation, timestamp_key, granularity, version) + metric, aggregation, key, version) try: with open(path, 'rb') as aggregation_file: return aggregation_file.read() diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 8fb5ffc2..43291161 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -48,10 +48,10 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): return 'none' + ("_v%s" % version if version else "") @classmethod - def _aggregated_field_for_split(cls, aggregation, timestamp_key, - granularity, version=3): - path = cls.FIELD_SEP.join([str(timestamp_key), aggregation, - str(granularity)]) + def _aggregated_field_for_split(cls, aggregation, key, version=3, + granularity=None): + path = cls.FIELD_SEP.join([str(key), aggregation, + str(granularity or key.sampling)]) return path + '_v%s' % version if version else path def _create_metric(self, metric): @@ -77,40 +77,34 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): raise storage.MetricDoesNotExist(metric) split_keys = set() hashes = self._client.hscan_iter( - key, match=self._aggregated_field_for_split(aggregation, '*', - granularity, version)) + key, match=self._aggregated_field_for_split( + aggregation, '*', version, granularity)) for f, __ in hashes: meta = f.decode("utf8").split(self.FIELD_SEP, 1) split_keys.add(meta[0]) return split_keys - def _delete_metric_measures(self, metric, timestamp_key, aggregation, - granularity, version=3): - key = self._metric_key(metric) - field = self._aggregated_field_for_split( - aggregation, timestamp_key, granularity, version) - self._client.hdel(key, field) + def _delete_metric_measures(self, metric, key, aggregation, version=3): + field = self._aggregated_field_for_split(aggregation, key, version) + self._client.hdel(self._metric_key(metric), field) - def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data, offset=None, version=3): - key = self._metric_key(metric) + def _store_metric_measures(self, metric, key, aggregation, + data, offset=None, version=3): field = self._aggregated_field_for_split( - aggregation, timestamp_key, granularity, version) - self._client.hset(key, field, data) + aggregation, key, version) + self._client.hset(self._metric_key(metric), field, data) def _delete_metric(self, metric): self._client.delete(self._metric_key(metric)) # Carbonara API - def _get_measures(self, metric, timestamp_key, aggregation, granularity, - version=3): - key = self._metric_key(metric) - field = self._aggregated_field_for_split( - aggregation, timestamp_key, granularity, version) - data = self._client.hget(key, field) + def _get_measures(self, metric, key, aggregation, version=3): + redis_key = self._metric_key(metric) + field = self._aggregated_field_for_split(aggregation, key, version) + data = self._client.hget(redis_key, field) if data is None: - if not self._client.exists(key): + if not self._client.exists(redis_key): raise storage.MetricDoesNotExist(metric) raise storage.AggregationDoesNotExist(metric, aggregation) return data diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 0ed8d107..a4fb3dab 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -87,8 +87,8 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): raise @staticmethod - def _object_name(split_key, aggregation, granularity, version=3): - name = '%s_%s_%s' % (aggregation, granularity, split_key) + def _object_name(split_key, aggregation, version=3): + name = '%s_%s_%s' % (aggregation, split_key.sampling, split_key) return name + '_v%s' % version if version else name @staticmethod @@ -113,20 +113,20 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): wait=self._consistency_wait, stop=self._consistency_stop)(_head) - def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data, offset=0, version=3): + def _store_metric_measures(self, metric, key, aggregation, + data, offset=0, version=3): self._put_object_safe( Bucket=self._bucket_name, Key=self._prefix(metric) + self._object_name( - timestamp_key, aggregation, granularity, version), + key, aggregation, version), Body=data) - def _delete_metric_measures(self, metric, timestamp_key, aggregation, - granularity, version=3): + def _delete_metric_measures(self, metric, key, aggregation, + version=3): self.s3.delete_object( Bucket=self._bucket_name, Key=self._prefix(metric) + self._object_name( - timestamp_key, aggregation, granularity, version)) + key, aggregation, version)) def _delete_metric(self, metric): bucket = self._bucket_name @@ -149,13 +149,12 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): s3.bulk_delete(self.s3, bucket, [c['Key'] for c in response.get('Contents', ())]) - def _get_measures(self, metric, timestamp_key, aggregation, granularity, - version=3): + def _get_measures(self, metric, key, aggregation, version=3): try: response = self.s3.get_object( Bucket=self._bucket_name, Key=self._prefix(metric) + self._object_name( - timestamp_key, aggregation, granularity, version)) + key, aggregation, version)) except botocore.exceptions.ClientError as e: if e.response['Error'].get('Code') == 'NoSuchKey': try: diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 90ffad95..c5932eb2 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -80,8 +80,8 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): return '%s.%s' % (self._container_prefix, str(metric.id)) @staticmethod - def _object_name(split_key, aggregation, granularity, version=3): - name = '%s_%s_%s' % (split_key, aggregation, granularity) + def _object_name(split_key, aggregation, version=3): + name = '%s_%s_%s' % (split_key, aggregation, split_key.sampling) return name + '_v%s' % version if version else name def _create_metric(self, metric): @@ -94,20 +94,17 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): if resp['status'] == 204: raise storage.MetricAlreadyExists(metric) - def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data, offset=None, version=3): + def _store_metric_measures(self, metric, key, aggregation, + data, offset=None, version=3): self.swift.put_object( self._container_name(metric), - self._object_name(timestamp_key, aggregation, granularity, - version), + self._object_name(key, aggregation, version), data) - def _delete_metric_measures(self, metric, timestamp_key, aggregation, - granularity, version=3): + def _delete_metric_measures(self, metric, key, aggregation, version=3): self.swift.delete_object( self._container_name(metric), - self._object_name(timestamp_key, aggregation, granularity, - version)) + self._object_name(key, aggregation, version)) def _delete_metric(self, metric): container = self._container_name(metric) @@ -127,12 +124,11 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): # Deleted in the meantime? Whatever. raise - def _get_measures(self, metric, timestamp_key, aggregation, granularity, - version=3): + def _get_measures(self, metric, key, aggregation, version=3): try: headers, contents = self.swift.get_object( self._container_name(metric), self._object_name( - timestamp_key, aggregation, granularity, version)) + key, aggregation, version)) except swclient.ClientException as e: if e.http_status == 404: try: diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index d11794c2..db42983d 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -229,7 +229,9 @@ class TestStorageDriver(tests_base.TestCase): for call in c.mock_calls: # policy is 60 points and split is 48. should only update 2nd half args = call[1] - if args[0] == m_sql and args[2] == 'mean' and args[3] == 60.0: + if (args[0] == m_sql + and args[2] == 'mean' + and args[1].sampling == 60.0): count += 1 self.assertEqual(1, count) @@ -324,13 +326,13 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull = self.assertFalse data = self.storage._get_measures( - self.metric, '1451520000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451520000.0, 60.0), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, '1451736000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451736000.0, 60.0), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, '1451952000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451952000.0, 60.0), "mean") assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -358,17 +360,17 @@ class TestStorageDriver(tests_base.TestCase): carbonara.SplitKey(1451952000.0, 60), }, self.storage._list_split_keys_for_metric(self.metric, "mean", 60.0)) data = self.storage._get_measures( - self.metric, '1451520000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451520000.0, 60.0), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, '1451736000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451736000.0, 60.0), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, '1451952000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451952000.0, 60.0), "mean") # Now this one is compressed because it has been rewritten! self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, '1452384000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1452384000.0, 60.0), "mean") assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -413,13 +415,13 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull = self.assertFalse data = self.storage._get_measures( - self.metric, '1451520000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451520000.0, 60.0), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, '1451736000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451736000.0, 60.0), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, '1451952000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451952000.0, 60.0), "mean") assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -449,17 +451,17 @@ class TestStorageDriver(tests_base.TestCase): carbonara.SplitKey(1451952000.0, 60), }, self.storage._list_split_keys_for_metric(self.metric, "mean", 60.0)) data = self.storage._get_measures( - self.metric, '1451520000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451520000.0, 60.0), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, '1451736000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451736000.0, 60.0), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, '1451952000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451952000.0, 60.0), "mean") # Now this one is compressed because it has been rewritten! self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, '1452384000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1452384000.0, 60.0), "mean") assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -502,13 +504,13 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull = self.assertFalse data = self.storage._get_measures( - self.metric, '1451520000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451520000.0, 60.0), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, '1451736000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451736000.0, 60.0), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, '1451952000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451952000.0, 60.0), "mean") assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -521,9 +523,8 @@ class TestStorageDriver(tests_base.TestCase): # Test what happens if we delete the latest split and then need to # compress it! - self.storage._delete_metric_measures(self.metric, - '1451952000.0', - 'mean', 60.0) + self.storage._delete_metric_measures( + self.metric, carbonara.SplitKey(1451952000.0, 60.0), 'mean') # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move @@ -566,13 +567,13 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull = self.assertFalse data = self.storage._get_measures( - self.metric, '1451520000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451520000.0, 60.0), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, '1451736000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451736000.0, 60.0), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, '1451952000.0', "mean", 60.0) + self.metric, carbonara.SplitKey(1451952000.0, 60.0), "mean") assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -585,7 +586,8 @@ class TestStorageDriver(tests_base.TestCase): # Test what happens if we write garbage self.storage._store_metric_measures( - self.metric, '1451952000.0', "mean", 60.0, b"oh really?") + self.metric, carbonara.SplitKey(1451952000.0, 60.0), "mean", + b"oh really?") # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move -- GitLab From 554db377c02e1a01511026de91c6281f19027ca2 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 11 Jul 2017 18:47:21 +0200 Subject: [PATCH 0876/1483] carbonara: simplify unserialize The start/key argument is always a SplitKey object, so there's no need to pass any sampling around. Just simplify the signature. --- gnocchi/carbonara.py | 19 ++++++++++++------- gnocchi/storage/_carbonara.py | 2 +- gnocchi/tests/test_carbonara.py | 5 ++--- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index f704fd09..1dcc752f 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -573,10 +573,15 @@ class AggregatedTimeSerie(TimeSerie): return six.indexbytes(serialized_data, 0) == ord("c") @classmethod - def unserialize(cls, data, start, agg_method, sampling): + def unserialize(cls, data, key, agg_method): + """Unserialize an aggregated timeserie. + + :param data: Raw data buffer. + :param key: A :class:`SplitKey` key. + :param agg_method: The aggregation method of this timeseries. + """ x, y = [], [] - start = float(start) if data: if cls.is_compressed(data): # Compressed format @@ -590,7 +595,7 @@ class AggregatedTimeSerie(TimeSerie): y = numpy.frombuffer(timestamps_raw, dtype=' Date: Wed, 19 Jul 2017 15:35:38 -0400 Subject: [PATCH 0877/1483] Add missing configs for Swift storage Add following missing configurations: * swift_auth_insecure * swift_cacert * swift_region * swift_service_type --- gnocchi/common/swift.py | 13 +++++++++++-- gnocchi/storage/swift.py | 14 ++++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/gnocchi/common/swift.py b/gnocchi/common/swift.py index 42d4fe87..5af262f0 100644 --- a/gnocchi/common/swift.py +++ b/gnocchi/common/swift.py @@ -30,6 +30,14 @@ def get_connection(conf): if swclient is None: raise RuntimeError("python-swiftclient unavailable") + os_options = { + 'endpoint_type': conf.swift_endpoint_type, + 'service_type': conf.swift_service_type, + 'user_domain_name': conf.swift_user_domain_name, + } + if conf.swift_region: + os_options['region_name'] = conf.swift_region + return swclient.Connection( auth_version=conf.swift_auth_version, authurl=conf.swift_authurl, @@ -38,8 +46,9 @@ def get_connection(conf): key=conf.swift_key, tenant_name=conf.swift_project_name, timeout=conf.swift_timeout, - os_options={'endpoint_type': conf.swift_endpoint_type, - 'user_domain_name': conf.swift_user_domain_name}, + insecure=conf.swift_auth_insecure, + os_options=os_options, + cacert=conf.swift_cacert, retries=0) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index c5932eb2..d1c8e2a9 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -27,6 +27,10 @@ OPTS = [ cfg.StrOpt('swift_auth_version', default='1', help='Swift authentication version to user.'), + cfg.BoolOpt('swift_auth_insecure', + default=False, + help='If True, swiftclient won\'t check for a valid SSL ' + 'certificate when authenticating.'), cfg.StrOpt('swift_preauthurl', help='Swift pre-auth URL.'), cfg.StrOpt('swift_authurl', @@ -35,6 +39,11 @@ OPTS = [ cfg.StrOpt('swift_preauthtoken', secret=True, help='Swift token to user to authenticate.'), + cfg.StrOpt('swift_cacert', + help='A string giving the CA certificate file to use in ' + 'SSL connections for verifying certs.'), + cfg.StrOpt('swift_region', + help='Swift region.'), cfg.StrOpt('swift_user', default="admin:admin", help='Swift user.'), @@ -57,6 +66,11 @@ OPTS = [ cfg.StrOpt('swift_endpoint_type', default='publicURL', help='Endpoint type to connect to Swift',), + cfg.StrOpt('swift_service_type', + default='object-store', + help='A string giving the service type of the swift service ' + 'to use. This setting is only used if ' + 'swift_auth_version is 2.'), cfg.IntOpt('swift_timeout', min=0, default=300, -- GitLab From 2b854634b604afdf1f87b0f9354e499aa797d721 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 20 Jul 2017 09:10:04 +0200 Subject: [PATCH 0878/1483] Update tenacity requirement to 4.0.0 Closes #218 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4083e8e0..2bb754c4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,7 +16,7 @@ ujson voluptuous werkzeug trollius; python_version < '3.4' -tenacity>=3.1.0 # Apache-2.0 +tenacity>=4.0.0 # Apache-2.0 WebOb>=1.4.1 Paste PasteDeploy -- GitLab From 914e9434b694e2dc5978d18b7172359352eda619 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 20 Jul 2017 09:13:03 +0200 Subject: [PATCH 0879/1483] Make compatible with tenacity<4.0.0 Closes #218 --- gnocchi/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 94399a44..a79a74f5 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -110,7 +110,7 @@ def statsd(): _wait_exponential = tenacity.wait_exponential(multiplier=0.5, max=60) -retry_on_exception = tenacity.Retrying(wait=_wait_exponential) +retry_on_exception = tenacity.Retrying(wait=_wait_exponential).call class MetricProcessBase(cotyledon.Service): -- GitLab From b807a51ff2fb2577a0b30cd10d40352e6dffd619 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 4 Jul 2017 08:16:12 +0200 Subject: [PATCH 0880/1483] Add rate of change aggregation methods This change allows to compute the rate of change before applying the aggregation method. To do so, aggregation method need to be prefixed by "rate:" --- doc/source/rest.j2 | 7 +- gnocchi/archive_policy.py | 4 + gnocchi/carbonara.py | 29 ++- gnocchi/storage/_carbonara.py | 13 +- gnocchi/tests/functional/gabbits/archive.yaml | 22 ++ .../functional/gabbits/metric-derived.yaml | 199 ++++++++++++++++++ gnocchi/tests/test_archive_policy.py | 9 + gnocchi/tests/test_carbonara.py | 52 ++++- .../rate-archive-policy-74888634f90a81e3.yaml | 5 + 9 files changed, 331 insertions(+), 9 deletions(-) create mode 100644 gnocchi/tests/functional/gabbits/metric-derived.yaml create mode 100644 releasenotes/notes/rate-archive-policy-74888634f90a81e3.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index c777f348..e5d9548f 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -148,8 +148,11 @@ Archive Policy When sending measures for a metric to Gnocchi, the values are dynamically aggregated. That means that Gnocchi does not store all sent measures, but -aggregates them over a certain period of time. Gnocchi provides several -aggregation methods (mean, min, max, sum…) that are builtin. +aggregates them over a certain period of time. + +Gnocchi provides several aggregation methods (mean, min, max, sum…) that are +builtin. Those can be prefix by `rate:` to compute the rate of change before +doing the aggregation. An archive policy is defined by a list of items in the `definition` field. Each item is composed of the timespan and the level of precision that must be kept diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py index c039dbd0..ea09d941 100644 --- a/gnocchi/archive_policy.py +++ b/gnocchi/archive_policy.py @@ -35,6 +35,10 @@ class ArchivePolicy(object): 'std', 'median', 'first', 'count')).union( set((str(i) + 'pct' for i in six.moves.range(1, 100)))) + VALID_AGGREGATION_METHODS = VALID_AGGREGATION_METHODS.union( + set(map(lambda s: "rate:" + s, + VALID_AGGREGATION_METHODS))) + # Set that contains all the above values + their minus equivalent (-mean) # and the "*" entry. VALID_AGGREGATION_METHODS_VALUES = VALID_AGGREGATION_METHODS.union( diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 1dcc752f..1facd6c2 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -79,13 +79,23 @@ def round_timestamp(ts, freq): class GroupedTimeSeries(object): - def __init__(self, ts, granularity): + def __init__(self, ts, granularity, start=None): # NOTE(sileht): The whole class assumes ts is ordered and don't have # duplicate timestamps, it uses numpy.unique that sorted list, but # we always assume the orderd to be the same as the input. + self.granularity = granularity freq = granularity * 10e8 - self._ts = ts - self.indexes = (numpy.array(ts.index, numpy.float) // freq) * freq + self.start = start + if start is None: + self._ts = ts + self._ts_for_derive = ts + else: + self._ts = ts[start:] + start_derive = start - pandas.Timedelta(freq, unit='ns') + self._ts_for_derive = ts[start_derive:] + + self.indexes = (numpy.array(self._ts.index, + numpy.float) // freq) * freq self.tstamps, self.counts = numpy.unique(self.indexes, return_counts=True) @@ -125,6 +135,7 @@ class GroupedTimeSeries(object): counts, timestamps = self._count() cumcounts = numpy.cumsum(counts) - 1 values = self._ts.values[cumcounts] + return pandas.Series(values, pandas.to_datetime(timestamps)) def first(self): @@ -157,6 +168,14 @@ class GroupedTimeSeries(object): timestamps = tstamps.astype('datetime64[ns]', copy=False) return pandas.Series(values, pandas.to_datetime(timestamps)) + def derived(self): + timestamps = self._ts_for_derive.index[1:] + values = numpy.diff(self._ts_for_derive.values) + # FIXME(sileht): create some alternative __init__ to avoid creating + # useless Pandas object, recounting, timestamps convertion, ... + return GroupedTimeSeries(pandas.Series(values, timestamps), + self.granularity, self.start) + class TimeSerie(object): """A representation of series of a timestamp with a value. @@ -220,14 +239,14 @@ class TimeSerie(object): except IndexError: return - def group_serie(self, granularity, start=0): + def group_serie(self, granularity, start=None): # NOTE(jd) Our whole serialization system is based on Epoch, and we # store unsigned integer, so we can't store anything before Epoch. # Sorry! if not self.ts.empty and self.ts.index[0].value < 0: raise BeforeEpochError(self.ts.index[0]) - return GroupedTimeSeries(self.ts[start:], granularity) + return GroupedTimeSeries(self.ts, granularity, start) @staticmethod def _compress(payload): diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 9ebae0f0..e1b1e52c 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -258,9 +258,16 @@ class CarbonaraBasedStorage(storage.StorageDriver): metric, grouped_serie, previous_oldest_mutable_timestamp, oldest_mutable_timestamp): + + if aggregation.startswith("rate:"): + grouped_serie = grouped_serie.derived() + aggregation_to_compute = aggregation[5:] + else: + aggregation_to_compute = aggregation + ts = carbonara.AggregatedTimeSerie.from_grouped_serie( grouped_serie, archive_policy_def.granularity, - aggregation, max_size=archive_policy_def.points) + aggregation_to_compute, max_size=archive_policy_def.points) # Don't do anything if the timeserie is empty if not ts: @@ -389,6 +396,10 @@ class CarbonaraBasedStorage(storage.StorageDriver): block_size = metric.archive_policy.max_block_size back_window = metric.archive_policy.back_window definition = metric.archive_policy.definition + # NOTE(sileht): We keep one more blocks to calculate rate of change + # correctly + if any(filter(lambda x: x.startswith("rate:"), agg_methods)): + back_window += 1 try: ts = self._get_unaggregated_timeserie_and_unserialize( diff --git a/gnocchi/tests/functional/gabbits/archive.yaml b/gnocchi/tests/functional/gabbits/archive.yaml index f5e38519..e26837fe 100644 --- a/gnocchi/tests/functional/gabbits/archive.yaml +++ b/gnocchi/tests/functional/gabbits/archive.yaml @@ -579,3 +579,25 @@ tests: $.definition[0].points: 514 $.definition[0].granularity: "0:00:07" $.definition[0].timespan: "0:59:58" + + - name: policy rated + POST: /v1/archive_policy + request_headers: + # User admin + authorization: "basic YWRtaW46" + data: + name: 595228db-ea29-4415-9d5b-ecb5366abb1c + aggregation_methods: + - rate:mean + - rate:last + definition: + - timespan: 1 hour + points: 1000 + status: 201 + response_json_paths: + $.aggregation_methods.`sorted`: + - rate:last + - rate:mean + $.definition[0].points: 1000 + $.definition[0].granularity: "0:00:04" + $.definition[0].timespan: "1:06:40" diff --git a/gnocchi/tests/functional/gabbits/metric-derived.yaml b/gnocchi/tests/functional/gabbits/metric-derived.yaml new file mode 100644 index 00000000..1d62c722 --- /dev/null +++ b/gnocchi/tests/functional/gabbits/metric-derived.yaml @@ -0,0 +1,199 @@ +fixtures: + - ConfigFixture + +defaults: + request_headers: + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json + +tests: + - name: create archive policy + desc: for later use + POST: /v1/archive_policy + request_headers: + # User admin + authorization: "basic YWRtaW46" + data: + name: carrot-cake + aggregation_methods: + - rate:mean + - rate:max + - rate:95pct + - max + definition: + - granularity: 1 minute + status: 201 + + - name: create valid metric + POST: /v1/metric + data: + archive_policy_name: carrot-cake + status: 201 + + - name: push measurements to metric + POST: /v1/metric/$RESPONSE['$.id']/measures + data: + - timestamp: "2015-03-06T14:33:00" + value: 10 + - timestamp: "2015-03-06T14:34:10" + value: 13 + - timestamp: "2015-03-06T14:34:20" + value: 13 + - timestamp: "2015-03-06T14:34:30" + value: 15 + - timestamp: "2015-03-06T14:34:40" + value: 18 + - timestamp: "2015-03-06T14:34:50" + value: 20 + - timestamp: "2015-03-06T14:35:00" + value: 22 + - timestamp: "2015-03-06T14:35:10" + value: 26 + - timestamp: "2015-03-06T14:35:20" + value: 30 + - timestamp: "2015-03-06T14:35:30" + value: 31 + - timestamp: "2015-03-06T14:35:40" + value: 37 + - timestamp: "2015-03-06T14:35:50" + value: 55 + - timestamp: "2015-03-06T14:36:00" + value: 62 + - timestamp: "2015-03-06T14:36:10" + value: 100 + - timestamp: "2015-03-06T14:36:20" + value: 102 + - timestamp: "2015-03-06T14:36:30" + value: 103 + - timestamp: "2015-03-06T14:36:40" + value: 104 + - timestamp: "2015-03-06T14:36:50" + value: 110 + status: 202 + + - name: get measurements rate:mean + GET: /v1/metric/$HISTORY['create valid metric'].$RESPONSE['id']/measures?aggregation=rate:mean&refresh=true + status: 200 + response_json_paths: + $: + - ['2015-03-06T14:34:00+00:00', 60.0, 2.0] + - ['2015-03-06T14:35:00+00:00', 60.0, 5.833333333333333] + - ['2015-03-06T14:36:00+00:00', 60.0, 9.166666666666666] + + - name: get measurements rate:95pct + GET: /v1/metric/$HISTORY['create valid metric'].$RESPONSE['id']/measures?aggregation=rate:95pct + status: 200 + response_json_paths: + $: + - ['2015-03-06T14:34:00+00:00', 60.0, 3.0] + - ['2015-03-06T14:35:00+00:00', 60.0, 15.0] + - ['2015-03-06T14:36:00+00:00', 60.0, 30.25] + + - name: get measurements rate:max + GET: /v1/metric/$HISTORY['create valid metric'].$RESPONSE['id']/measures?aggregation=rate:max + status: 200 + response_json_paths: + $: + - ['2015-03-06T14:34:00+00:00', 60.0, 3.0] + - ['2015-03-06T14:35:00+00:00', 60.0, 18.0] + - ['2015-03-06T14:36:00+00:00', 60.0, 38.0] + + - name: get measurements max + GET: /v1/metric/$HISTORY['create valid metric'].$RESPONSE['id']/measures?aggregation=max + status: 200 + response_json_paths: + $: + - ['2015-03-06T14:33:00+00:00', 60.0, 10.0] + - ['2015-03-06T14:34:00+00:00', 60.0, 20.0] + - ['2015-03-06T14:35:00+00:00', 60.0, 55.0] + - ['2015-03-06T14:36:00+00:00', 60.0, 110.0] + + - name: create a second metric + POST: /v1/metric + data: + archive_policy_name: carrot-cake + status: 201 + + - name: push measurements to the second metric + POST: /v1/metric/$RESPONSE['$.id']/measures + data: + - timestamp: "2015-03-06T14:33:00" + value: 10 + - timestamp: "2015-03-06T14:34:10" + value: 13 + - timestamp: "2015-03-06T14:34:20" + value: 13 + - timestamp: "2015-03-06T14:34:30" + value: 15 + - timestamp: "2015-03-06T14:34:40" + value: 18 + - timestamp: "2015-03-06T14:34:50" + value: 20 + - timestamp: "2015-03-06T14:35:00" + value: 22 + - timestamp: "2015-03-06T14:35:10" + value: 26 + status: 202 + + - name: push other measurements to the second metric + POST: /v1/metric/$HISTORY['create a second metric'].$RESPONSE['$.id']/measures + data: + - timestamp: "2015-03-06T14:35:20" + value: 30 + - timestamp: "2015-03-06T14:35:30" + value: 31 + - timestamp: "2015-03-06T14:35:40" + value: 37 + - timestamp: "2015-03-06T14:35:50" + value: 55 + - timestamp: "2015-03-06T14:36:00" + value: 62 + - timestamp: "2015-03-06T14:36:10" + value: 100 + - timestamp: "2015-03-06T14:36:20" + value: 102 + - timestamp: "2015-03-06T14:36:30" + value: 103 + - timestamp: "2015-03-06T14:36:40" + value: 104 + - timestamp: "2015-03-06T14:36:50" + value: 110 + status: 202 + + - name: get measurements rate:mean second metric + GET: /v1/metric/$HISTORY['create a second metric'].$RESPONSE['id']/measures?aggregation=rate:mean&refresh=true + status: 200 + response_json_paths: + $: + - ['2015-03-06T14:34:00+00:00', 60.0, 2.0] + - ['2015-03-06T14:35:00+00:00', 60.0, 5.833333333333333] + - ['2015-03-06T14:36:00+00:00', 60.0, 9.166666666666666] + + - name: get measurements rate:95pct second metric + GET: /v1/metric/$HISTORY['create a second metric'].$RESPONSE['id']/measures?aggregation=rate:95pct + status: 200 + response_json_paths: + $: + - ['2015-03-06T14:34:00+00:00', 60.0, 3.0] + - ['2015-03-06T14:35:00+00:00', 60.0, 15.0] + - ['2015-03-06T14:36:00+00:00', 60.0, 30.25] + + - name: get measurements rate:max second metric + GET: /v1/metric/$HISTORY['create a second metric'].$RESPONSE['id']/measures?aggregation=rate:max + status: 200 + response_json_paths: + $: + - ['2015-03-06T14:34:00+00:00', 60.0, 3.0] + - ['2015-03-06T14:35:00+00:00', 60.0, 18.0] + - ['2015-03-06T14:36:00+00:00', 60.0, 38.0] + + - name: get measurements max second metric + GET: /v1/metric/$HISTORY['create a second metric'].$RESPONSE['id']/measures?aggregation=max + status: 200 + response_json_paths: + $: + - ['2015-03-06T14:33:00+00:00', 60.0, 10.0] + - ['2015-03-06T14:34:00+00:00', 60.0, 20.0] + - ['2015-03-06T14:35:00+00:00', 60.0, 55.0] + - ['2015-03-06T14:36:00+00:00', 60.0, 110.0] diff --git a/gnocchi/tests/test_archive_policy.py b/gnocchi/tests/test_archive_policy.py index 38922be3..51fd20c7 100644 --- a/gnocchi/tests/test_archive_policy.py +++ b/gnocchi/tests/test_archive_policy.py @@ -73,6 +73,15 @@ class TestArchivePolicy(base.BaseTestCase): .union(set(["12pct"]))), ap.aggregation_methods) + ap = archive_policy.ArchivePolicy("foobar", + 0, + [], + ["+rate:last"]) + self.assertEqual( + (set(conf.archive_policy.default_aggregation_methods) + .union(set(["rate:last"]))), + ap.aggregation_methods) + def test_max_block_size(self): ap = archive_policy.ArchivePolicy("foobar", 0, diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 8016e5de..c351dd78 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -158,11 +158,61 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts.group_serie, 60) @staticmethod - def _resample(ts, sampling, agg, max_size=None): + def _resample(ts, sampling, agg, max_size=None, derived=False): grouped = ts.group_serie(sampling) + if derived: + grouped = grouped.derived() return carbonara.AggregatedTimeSerie.from_grouped_serie( grouped, sampling, agg, max_size=max_size) + def test_derived_mean(self): + ts = carbonara.TimeSerie.from_tuples( + [(datetime.datetime(2014, 1, 1, 12, 0, 0), 50), + (datetime.datetime(2014, 1, 1, 12, 0, 4), 55), + (datetime.datetime(2014, 1, 1, 12, 1, 2), 65), + (datetime.datetime(2014, 1, 1, 12, 1, 14), 66), + (datetime.datetime(2014, 1, 1, 12, 1, 24), 70), + (datetime.datetime(2014, 1, 1, 12, 2, 4), 83), + (datetime.datetime(2014, 1, 1, 12, 2, 35), 92), + (datetime.datetime(2014, 1, 1, 12, 2, 42), 103), + (datetime.datetime(2014, 1, 1, 12, 3, 2), 105), + (datetime.datetime(2014, 1, 1, 12, 3, 22), 5), # Counter reset + (datetime.datetime(2014, 1, 1, 12, 3, 42), 7), + (datetime.datetime(2014, 1, 1, 12, 4, 9), 23)]) + ts = self._resample(ts, 60, 'mean', derived=True) + + self.assertEqual(5, len(ts)) + self.assertEqual( + [(datetime.datetime(2014, 1, 1, 12, 0, 0), 60, 5), + (datetime.datetime(2014, 1, 1, 12, 1, 0), 60, 5), + (datetime.datetime(2014, 1, 1, 12, 2, 0), 60, 11), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 60, -32), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 60, 16)], + list(ts.fetch( + from_timestamp=datetime.datetime(2014, 1, 1, 12)))) + + def test_derived_hole(self): + ts = carbonara.TimeSerie.from_tuples( + [(datetime.datetime(2014, 1, 1, 12, 0, 0), 50), + (datetime.datetime(2014, 1, 1, 12, 0, 4), 55), + (datetime.datetime(2014, 1, 1, 12, 1, 2), 65), + (datetime.datetime(2014, 1, 1, 12, 1, 14), 66), + (datetime.datetime(2014, 1, 1, 12, 1, 24), 70), + (datetime.datetime(2014, 1, 1, 12, 3, 2), 105), + (datetime.datetime(2014, 1, 1, 12, 3, 22), 108), + (datetime.datetime(2014, 1, 1, 12, 3, 42), 200), + (datetime.datetime(2014, 1, 1, 12, 4, 9), 202)]) + ts = self._resample(ts, 60, 'last', derived=True) + + self.assertEqual(4, len(ts)) + self.assertEqual( + [(datetime.datetime(2014, 1, 1, 12, 0, 0), 60, 5), + (datetime.datetime(2014, 1, 1, 12, 1, 0), 60, 4), + (datetime.datetime(2014, 1, 1, 12, 3, 0), 60, 92), + (datetime.datetime(2014, 1, 1, 12, 4, 0), 60, 2)], + list(ts.fetch( + from_timestamp=datetime.datetime(2014, 1, 1, 12)))) + def test_74_percentile_serialized(self): ts = carbonara.TimeSerie.from_tuples( [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), diff --git a/releasenotes/notes/rate-archive-policy-74888634f90a81e3.yaml b/releasenotes/notes/rate-archive-policy-74888634f90a81e3.yaml new file mode 100644 index 00000000..7da8b32c --- /dev/null +++ b/releasenotes/notes/rate-archive-policy-74888634f90a81e3.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + New aggregation methods are available for archive policy; rate:mean, rate:last, .... These new methods + allow to compute the timeseries rate of change before applying the selected aggregation method. -- GitLab From 47d7cdd765b39c7773ca5f5d987b4e38cd403fd9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 11 Jul 2017 19:34:41 +0200 Subject: [PATCH 0881/1483] carbonara: use numpy native types for computing This replaces various weak data type (e.g. integers for sampling are replaced by numpy.timedelta64) by native numpy data types. Pandas data types are replaced by numpy data types where possible. --- gnocchi/aggregates/moving_stats.py | 36 +- gnocchi/archive_policy.py | 63 +- gnocchi/carbonara.py | 133 +- gnocchi/indexer/sqlalchemy.py | 7 +- gnocchi/indexer/sqlalchemy_base.py | 7 + gnocchi/json.py | 10 + gnocchi/rest/__init__.py | 34 +- gnocchi/storage/__init__.py | 12 +- gnocchi/storage/_carbonara.py | 17 +- gnocchi/storage/ceph.py | 8 +- gnocchi/storage/file.py | 10 +- gnocchi/storage/redis.py | 6 +- gnocchi/storage/s3.py | 13 +- gnocchi/storage/swift.py | 9 +- gnocchi/tests/base.py | 23 +- gnocchi/tests/functional/gabbits/archive.yaml | 10 +- gnocchi/tests/test_aggregates.py | 31 +- gnocchi/tests/test_archive_policy.py | 4 +- gnocchi/tests/test_carbonara.py | 1174 +++++++++-------- gnocchi/tests/test_indexer.py | 37 +- gnocchi/tests/test_statsd.py | 57 +- gnocchi/tests/test_storage.py | 672 +++++++--- gnocchi/utils.py | 15 +- 23 files changed, 1429 insertions(+), 959 deletions(-) diff --git a/gnocchi/aggregates/moving_stats.py b/gnocchi/aggregates/moving_stats.py index 75f83efa..4d4cb2c1 100644 --- a/gnocchi/aggregates/moving_stats.py +++ b/gnocchi/aggregates/moving_stats.py @@ -13,8 +13,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import datetime - import numpy import pandas import six @@ -25,24 +23,15 @@ from gnocchi import utils class MovingAverage(aggregates.CustomAggregator): - @staticmethod - def check_window_valid(window): - """Takes in the window parameter string, reformats as a float.""" - if window is None: - msg = 'Moving aggregate must have window specified.' - raise aggregates.CustomAggFailure(msg) - try: - return utils.to_timespan(six.text_type(window)).total_seconds() - except Exception: - raise aggregates.CustomAggFailure('Invalid value for window') - @staticmethod def retrieve_data(storage_obj, metric, start, stop, window): """Retrieves finest-res data available from storage.""" + window_seconds = utils.timespan_total_seconds(window) try: min_grain = min( ap.granularity for ap in metric.archive_policy.definition - if window % ap.granularity == 0) + if (window_seconds % utils.timespan_total_seconds( + ap.granularity) == 0)) except ValueError: msg = ("No data available that is either full-res or " "of a granularity that factors into the window size " @@ -76,12 +65,11 @@ class MovingAverage(aggregates.CustomAggregator): center = utils.strtobool(center) def moving_window(x): - msec = datetime.timedelta(milliseconds=1) - zero = datetime.timedelta(seconds=0) - half_span = datetime.timedelta(seconds=window / 2) + msec = numpy.timedelta64(1, 'ms') + zero = numpy.timedelta64(0, 's') + half_span = window / 2 start = utils.normalize_time(data.index[0]) - stop = utils.normalize_time( - data.index[-1] + datetime.timedelta(seconds=min_grain)) + stop = utils.normalize_time(data.index[-1] + min_grain) # min_grain addition necessary since each bin of rolled-up data # is indexed by leftmost timestamp of bin. @@ -138,7 +126,15 @@ class MovingAverage(aggregates.CustomAggregator): :param center: how to index the aggregated data (central timestamp or leftmost timestamp) """ - window = self.check_window_valid(window) + if window is None: + raise aggregates.CustomAggFailure( + 'Moving aggregate must have window specified.' + ) + try: + window = utils.to_timespan(window) + except ValueError: + raise aggregates.CustomAggFailure('Invalid value for window') + min_grain, data = self.retrieve_data(storage_obj, metric, start, stop, window) return self.aggregate_data(data, numpy.mean, window, min_grain, center, diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py index ea09d941..d823d32a 100644 --- a/gnocchi/archive_policy.py +++ b/gnocchi/archive_policy.py @@ -18,10 +18,13 @@ import collections import datetime import operator +import numpy from oslo_config import cfg from oslo_config import types import six +from gnocchi import utils + class ArchivePolicy(object): @@ -75,7 +78,7 @@ class ArchivePolicy(object): raise ValueError( "More than one archive policy " "uses granularity `%s'" - % duplicate_granularities[0] + % utils.timespan_total_seconds(duplicate_granularities[0]) ) if aggregation_methods is None: @@ -159,20 +162,25 @@ class ArchivePolicyItem(dict): raise ValueError( u"timespan ≠ granularity × points") - if granularity is not None and granularity <= 0: - raise ValueError("Granularity should be > 0") + if granularity is not None: + if not isinstance(granularity, numpy.timedelta64): + granularity = numpy.timedelta64(int(granularity * 10e8), 'ns') + if granularity <= numpy.timedelta64(0, 'ns'): + raise ValueError("Granularity should be > 0") if points is not None and points <= 0: raise ValueError("Number of points should be > 0") + if (timespan is not None + and not isinstance(timespan, numpy.timedelta64)): + timespan = numpy.timedelta64(int(timespan * 10e8), 'ns') + if granularity is None: if points is None or timespan is None: raise ValueError( "At least two of granularity/points/timespan " "must be provided") - granularity = round(timespan / float(points)) - else: - granularity = float(granularity) + granularity = timespan / float(points) if points is None: if timespan is None: @@ -205,11 +213,25 @@ class ArchivePolicyItem(dict): """Return a dict representation with human readable values.""" return { 'timespan': six.text_type( - datetime.timedelta(seconds=self.timespan)) + datetime.timedelta( + seconds=utils.timespan_total_seconds( + self.timespan))) if self.timespan is not None else None, 'granularity': six.text_type( - datetime.timedelta(seconds=self.granularity)), + datetime.timedelta( + seconds=utils.timespan_total_seconds( + self.granularity))), + 'points': self.points, + } + + def serialize(self): + return { + 'timespan': None + if self.timespan is None + else float(utils.timespan_total_seconds(self.timespan)), + 'granularity': float( + utils.timespan_total_seconds(self.granularity)), 'points': self.points, } @@ -218,36 +240,39 @@ DEFAULT_ARCHIVE_POLICIES = { 'bool': ArchivePolicy( "bool", 3600, [ # 1 second resolution for 365 days - ArchivePolicyItem(granularity=1, - timespan=365 * 24 * 60 * 60), + ArchivePolicyItem(granularity=numpy.timedelta64(1, 's'), + timespan=numpy.timedelta64(365, 'D')), ], aggregation_methods=("last",), ), 'low': ArchivePolicy( "low", 0, [ # 5 minutes resolution for 30 days - ArchivePolicyItem(granularity=300, - timespan=30 * 24 * 60 * 60), + ArchivePolicyItem(granularity=numpy.timedelta64(5, 'm'), + timespan=numpy.timedelta64(30, 'D')), ], ), 'medium': ArchivePolicy( "medium", 0, [ # 1 minute resolution for 7 days - ArchivePolicyItem(granularity=60, - timespan=7 * 24 * 60 * 60), + ArchivePolicyItem(granularity=numpy.timedelta64(1, 'm'), + timespan=numpy.timedelta64(7, 'D')), # 1 hour resolution for 365 days - ArchivePolicyItem(granularity=3600, - timespan=365 * 24 * 60 * 60), + ArchivePolicyItem(granularity=numpy.timedelta64(1, 'h'), + timespan=numpy.timedelta64(365, 'D')), ], ), 'high': ArchivePolicy( "high", 0, [ # 1 second resolution for an hour - ArchivePolicyItem(granularity=1, points=3600), + ArchivePolicyItem(granularity=numpy.timedelta64(1, 's'), + timespan=numpy.timedelta64(1, 'h')), # 1 minute resolution for a week - ArchivePolicyItem(granularity=60, points=60 * 24 * 7), + ArchivePolicyItem(granularity=numpy.timedelta64(1, 'm'), + timespan=numpy.timedelta64(7, 'D')), # 1 hour resolution for a year - ArchivePolicyItem(granularity=3600, points=365 * 24), + ArchivePolicyItem(granularity=numpy.timedelta64(1, 'h'), + timespan=numpy.timedelta64(365, 'D')), ], ), } diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 1facd6c2..b18ab11f 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -21,7 +21,6 @@ import functools import itertools import logging import math -import numbers import random import re import struct @@ -43,6 +42,10 @@ time.strptime("2016-02-19", "%Y-%m-%d") LOG = logging.getLogger(__name__) +UNIX_UNIVERSAL_START64 = numpy.datetime64("1970", 'ns') +ONE_SECOND = numpy.timedelta64(1, 's') + + class BeforeEpochError(Exception): """Error raised when a timestamp before Epoch is used.""" @@ -73,9 +76,13 @@ class InvalidData(ValueError): super(InvalidData, self).__init__("Unable to unpack, invalid data") +def datetime64_to_epoch(dt): + return (dt - UNIX_UNIVERSAL_START64) / ONE_SECOND + + def round_timestamp(ts, freq): - return pandas.Timestamp( - (pandas.Timestamp(ts).value // freq) * freq) + return UNIX_UNIVERSAL_START64 + numpy.floor( + (ts - UNIX_UNIVERSAL_START64) / freq) * freq class GroupedTimeSeries(object): @@ -84,18 +91,17 @@ class GroupedTimeSeries(object): # duplicate timestamps, it uses numpy.unique that sorted list, but # we always assume the orderd to be the same as the input. self.granularity = granularity - freq = granularity * 10e8 self.start = start if start is None: self._ts = ts self._ts_for_derive = ts else: self._ts = ts[start:] - start_derive = start - pandas.Timedelta(freq, unit='ns') + start_derive = start - granularity self._ts_for_derive = ts[start_derive:] - self.indexes = (numpy.array(self._ts.index, - numpy.float) // freq) * freq + self.indexes = round_timestamp( + numpy.array(self._ts.index, dtype=numpy.datetime64), granularity) self.tstamps, self.counts = numpy.unique(self.indexes, return_counts=True) @@ -219,12 +225,6 @@ class TimeSerie(object): def __len__(self): return len(self.ts) - @staticmethod - def _to_offset(value): - if isinstance(value, numbers.Real): - return pandas.tseries.offsets.Nano(value * 10e8) - return pandas.tseries.frequencies.to_offset(value) - @property def first(self): try: @@ -274,7 +274,7 @@ class BoundTimeSerie(TimeSerie): """ super(BoundTimeSerie, self).__init__(ts) - self.block_size = self._to_offset(block_size) + self.block_size = block_size self.back_window = back_window self._truncate() @@ -387,7 +387,7 @@ class BoundTimeSerie(TimeSerie): t0 = time.time() for i in six.moves.range(serialize_times): - cls.unserialize(s, 1, 1) + cls.unserialize(s, ONE_SECOND, 1) t1 = time.time() print(" Unserialization speed: %.2f MB/s" % (((points * 2 * 8) @@ -396,9 +396,13 @@ class BoundTimeSerie(TimeSerie): def first_block_timestamp(self): """Return the timestamp of the first block.""" rounded = round_timestamp(self.ts.index[-1], - self.block_size.delta.value) - - return rounded - (self.block_size * self.back_window) + self.block_size) + # FIXME(jd) Return the result as a pandas.Timestamp object as Pandas is + # faster at indexing pandas.Timestamp than numpy.timedelta64 objects + # for whatever reason + return pandas.Timestamp( + rounded - (self.block_size * self.back_window) + ) def _truncate(self): """Truncate the timeserie.""" @@ -424,10 +428,8 @@ class SplitKey(object): def __init__(self, value, sampling): if isinstance(value, SplitKey): self.key = value.key - elif isinstance(value, pandas.Timestamp): - self.key = value.value / 10e8 else: - self.key = float(value) + self.key = value self.sampling = sampling @@ -435,7 +437,8 @@ class SplitKey(object): def from_timestamp_and_sampling(cls, timestamp, sampling): return cls( round_timestamp( - timestamp, freq=sampling * cls.POINTS_PER_SPLIT * 10e8), + timestamp, + freq=sampling * cls.POINTS_PER_SPLIT), sampling) def __next__(self): @@ -453,35 +456,48 @@ class SplitKey(object): return self def __hash__(self): - return hash(self.key) + return hash( + str(datetime64_to_epoch(self.key)) + + + str(self.sampling / ONE_SECOND) + ) def __lt__(self, other): if isinstance(other, SplitKey): + if self.sampling != other.sampling: + raise TypeError( + "Cannot compare %s with different sampling" % + self.__class__.__name__) return self.key < other.key if isinstance(other, pandas.Timestamp): - return self.key * 10e8 < other.value - return self.key < other + return pandas.Timestamp(self.key) < other + if isinstance(other, numpy.datetime64): + return self.key < other + raise TypeError("Cannot compare %r with %r" % (self, other)) def __eq__(self, other): if isinstance(other, SplitKey): + if self.sampling != other.sampling: + raise TypeError( + "Cannot compare %s with different sampling" % + self.__class__.__name__) return self.key == other.key if isinstance(other, pandas.Timestamp): - return self.key * 10e8 == other.value - return self.key == other + return pandas.Timestamp(self.key) == other + if isinstance(other, numpy.datetime64): + return self.key == other + raise TypeError("Cannot compare %r with %r" % (self, other)) def __str__(self): return str(float(self)) def __float__(self): - return self.key - - def as_datetime(self): - return pandas.Timestamp(self.key, unit='s') + return datetime64_to_epoch(self.key) def __repr__(self): - return "<%s: %s / %fs>" % (self.__class__.__name__, - repr(self.key), - self.sampling) + return "<%s: %s / %s>" % (self.__class__.__name__, + self.key, + self.sampling) class AggregatedTimeSerie(TimeSerie): @@ -500,7 +516,7 @@ class AggregatedTimeSerie(TimeSerie): """ super(AggregatedTimeSerie, self).__init__(ts) - self.sampling = self._to_offset(sampling).nanos / 10e8 + self.sampling = sampling self.max_size = max_size self.aggregation_method = aggregation_method self._truncate(quick=True) @@ -537,8 +553,11 @@ class AggregatedTimeSerie(TimeSerie): # but we have ordered timestamps, so don't need # to iter the whole series. freq = self.sampling * SplitKey.POINTS_PER_SPLIT - ix = numpy.array(self.ts.index, numpy.float64) / 10e8 - keys, counts = numpy.unique((ix // freq) * freq, return_counts=True) + keys, counts = numpy.unique( + round_timestamp( + numpy.array(self.ts.index, dtype=numpy.datetime64), + freq), + return_counts=True) start = 0 for key, count in six.moves.zip(keys, counts): end = start + count @@ -578,7 +597,7 @@ class AggregatedTimeSerie(TimeSerie): and self.aggregation_method == other.aggregation_method) def __repr__(self): - return "<%s 0x%x sampling=%fs max_size=%s agg_method=%s>" % ( + return "<%s 0x%x sampling=%s max_size=%s agg_method=%s>" % ( self.__class__.__name__, id(self), self.sampling, @@ -631,9 +650,6 @@ class AggregatedTimeSerie(TimeSerie): y = index * key.sampling + key.key x = everything['v'][index] - y = y.astype(numpy.float64, copy=False) * 10e8 - y = y.astype('datetime64[ns]', copy=False) - y = pandas.to_datetime(y) return cls.from_data(key.sampling, agg_method, y, x) def get_split_key(self, timestamp=None): @@ -658,25 +674,21 @@ class AggregatedTimeSerie(TimeSerie): The offset returned indicates at which offset the data should be written from. In the case of compressed data, this is always 0. - :param start: Timestamp to start serialization at. + :param start: SplitKey to start serialization at. :param compressed: Serialize in a compressed format. :return: a tuple of (offset, data) """ if not self.ts.index.is_monotonic: self.ts = self.ts.sort_index() - offset_div = self.sampling * 10e8 - if isinstance(start, SplitKey): - start = start.as_datetime().value - else: - start = pandas.Timestamp(start).value + offset_div = self.sampling # calculate how many seconds from start the series runs until and # initialize list to store alternating delimiter, float entries if compressed: # NOTE(jd) Use a double delta encoding for timestamps timestamps = numpy.insert( - numpy.diff(self.ts.index) // offset_div, - 0, int((self.first.value - start) // offset_div)) + numpy.floor(numpy.diff(self.ts.index) / offset_div), + 0, numpy.floor((self.first - start.key) / offset_div)) timestamps = timestamps.astype('>> pandas.Timedelta("1 minute").total_seconds() + # 60.00000000000001 + # Never forget that bro. + return obj.delta / 10e8 + if isinstance(obj, datetime.timedelta): + return obj.total_seconds() # This mimics what Pecan implements in its default JSON encoder if hasattr(obj, "jsonify"): return to_primitive(obj.jsonify()) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 1cd46acc..1fa3c4b8 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -145,7 +145,10 @@ def PositiveNotNullInt(value): def Timespan(value): - return utils.to_timespan(value).total_seconds() + try: + return utils.to_timespan(value) + except ValueError as e: + raise voluptuous.Invalid(e) def get_header_option(name, params): @@ -421,13 +424,13 @@ class MetricController(rest.RestController): if start is not None: try: - start = utils.to_datetime(start) + start = utils.to_timestamp(start) except Exception: abort(400, "Invalid value for start") if stop is not None: try: - stop = utils.to_datetime(stop) + stop = utils.to_timestamp(stop) except Exception: abort(400, "Invalid value for stop") @@ -435,7 +438,7 @@ class MetricController(rest.RestController): if not granularity: abort(400, 'A granularity must be specified to resample') try: - resample = Timespan(resample) + resample = utils.to_timespan(resample) except ValueError as e: abort(400, e) @@ -457,7 +460,8 @@ class MetricController(rest.RestController): else: measures = pecan.request.storage.get_measures( self.metric, start, stop, aggregation, - Timespan(granularity) if granularity is not None else None, + utils.to_timespan(granularity) + if granularity is not None else None, resample) # Replace timestamp keys by their string versions return [(timestamp.isoformat(), offset, v) @@ -1340,7 +1344,7 @@ class SearchMetricController(rest.RestController): @pecan.expose('json') def post(self, metric_id, start=None, stop=None, aggregation='mean', granularity=None): - granularity = [Timespan(g) + granularity = [utils.to_timespan(g) for g in arg_to_list(granularity or [])] metrics = pecan.request.indexer.list_metrics( ids=arg_to_list(metric_id)) @@ -1355,13 +1359,13 @@ class SearchMetricController(rest.RestController): if start is not None: try: - start = utils.to_datetime(start) + start = utils.to_timestamp(start) except Exception: abort(400, "Invalid value for start") if stop is not None: try: - stop = utils.to_datetime(stop) + stop = utils.to_timestamp(stop) except Exception: abort(400, "Invalid value for stop") @@ -1518,14 +1522,14 @@ class MetricsMeasuresBatchController(rest.RestController): start = kwargs.get('start') if start is not None: try: - start = utils.to_datetime(start) + start = utils.to_timestamp(start) except Exception: abort(400, "Invalid value for start") stop = kwargs.get('stop') if stop is not None: try: - stop = utils.to_datetime(stop) + stop = utils.to_timestamp(stop) except Exception: abort(400, "Invalid value for stop") @@ -1541,7 +1545,7 @@ class MetricsMeasuresBatchController(rest.RestController): granularity = kwargs.get('granularity') if granularity is not None: try: - granularity = Timespan(granularity) + granularity = utils.to_timespan(granularity) except ValueError as e: abort(400, e) @@ -1650,13 +1654,13 @@ class AggregationController(rest.RestController): if start is not None: try: - start = utils.to_datetime(start) + start = utils.to_timestamp(start) except Exception: abort(400, "Invalid value for start") if stop is not None: try: - stop = utils.to_datetime(stop) + stop = utils.to_timestamp(stop) except Exception: abort(400, "Invalid value for stop") @@ -1676,7 +1680,7 @@ class AggregationController(rest.RestController): return [] if granularity is not None: try: - granularity = Timespan(granularity) + granularity = utils.to_timespan(granularity) except ValueError as e: abort(400, e) @@ -1684,7 +1688,7 @@ class AggregationController(rest.RestController): if not granularity: abort(400, 'A granularity must be specified to resample') try: - resample = Timespan(resample) + resample = utils.to_timespan(resample) except ValueError as e: abort(400, e) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 2a7709b8..fc2687c2 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -111,7 +111,7 @@ class GranularityDoesNotExist(StorageError): self.granularity = granularity super(GranularityDoesNotExist, self).__init__( "Granularity '%s' for metric %s does not exist" % - (granularity, metric)) + (utils.timespan_total_seconds(granularity), metric)) class MetricAlreadyExists(StorageError): @@ -254,10 +254,12 @@ class StorageDriver(object): for metric in metrics: if aggregation not in metric.archive_policy.aggregation_methods: raise AggregationDoesNotExist(metric, aggregation) - if (granularity is not None and granularity - not in set(d.granularity - for d in metric.archive_policy.definition)): - raise GranularityDoesNotExist(metric, granularity) + if granularity is not None: + for d in metric.archive_policy.definition: + if d.granularity == granularity: + break + else: + raise GranularityDoesNotExist(metric, granularity) @staticmethod def search_value(metrics, query, from_timestamp=None, diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index e1b1e52c..ab2e3230 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -15,7 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. import collections -import datetime import functools import itertools import operator @@ -23,6 +22,7 @@ import operator from concurrent import futures import daiquiri import iso8601 +import numpy from oslo_config import cfg import six import six.moves @@ -120,8 +120,10 @@ class CarbonaraBasedStorage(storage.StorageDriver): version=3): return set(map( functools.partial(carbonara.SplitKey, sampling=granularity), - self._list_split_keys( - metric, aggregation, granularity, version))) + (numpy.array( + list(self._list_split_keys( + metric, aggregation, granularity, version)), + dtype=numpy.float) * 10e8).astype('datetime64[ns]'))) @staticmethod def _list_split_keys(metric, aggregation, granularity, version=3): @@ -286,8 +288,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): # First delete old splits if archive_policy_def.timespan: - oldest_point_to_keep = ts.last - datetime.timedelta( - seconds=archive_policy_def.timespan) + oldest_point_to_keep = ts.last - archive_policy_def.timespan oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) for key in list(existing_keys): # NOTE(jd) Only delete if the key is strictly inferior to @@ -298,7 +299,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): self._delete_metric_measures(metric, key, aggregation) existing_keys.remove(key) else: - oldest_key_to_keep = carbonara.SplitKey(0, 0) + oldest_key_to_keep = None # Rewrite all read-only splits just for fun (and compression). This # only happens if `previous_oldest_mutable_timestamp' exists, which @@ -322,7 +323,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): None, aggregation, oldest_mutable_timestamp) for key, split in ts.split(): - if key >= oldest_key_to_keep: + if oldest_key_to_keep is None or key >= oldest_key_to_keep: LOG.debug( "Storing split %s (%s) for metric %s", key, aggregation, metric) @@ -440,7 +441,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): for d in definition: ts = bound_timeserie.group_serie( d.granularity, carbonara.round_timestamp( - tstamp, d.granularity * 10e8)) + tstamp, d.granularity)) self._map_in_thread( self._add_measures, diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 3b0f74e2..1e37969f 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -19,6 +19,7 @@ from oslo_config import cfg from gnocchi.common import ceph from gnocchi import storage from gnocchi.storage import _carbonara +from gnocchi import utils OPTS = [ @@ -58,7 +59,9 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): @staticmethod def _get_object_name(metric, key, aggregation, version=3): name = str("gnocchi_%s_%s_%s_%s" % ( - metric.id, key, aggregation, key.sampling)) + metric.id, key, aggregation, + utils.timespan_total_seconds(key.sampling)), + ) return name + '_v%s' % version if version else name def _object_exists(self, name): @@ -167,9 +170,10 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): raise storage.MetricDoesNotExist(metric) keys = set() + granularity = str(utils.timespan_total_seconds(granularity)) for name, value in omaps: meta = name.split('_') - if (aggregation == meta[3] and granularity == float(meta[4]) + if (aggregation == meta[3] and granularity == meta[4] and self._version_check(name, version)): keys.add(meta[2]) return keys diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 3277dca1..47b88fbb 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -67,8 +67,11 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): def _build_metric_path_for_split(self, metric, aggregation, key, version=3): - path = os.path.join(self._build_metric_path(metric, aggregation), - str(key) + "_" + str(key.sampling)) + path = os.path.join( + self._build_metric_path(metric, aggregation), + str(key) + + "_" + + str(utils.timespan_total_seconds(key.sampling))) return path + '_v%s' % version if version else path def _create_metric(self, metric): @@ -109,9 +112,10 @@ class FileStorage(_carbonara.CarbonaraBasedStorage): raise storage.MetricDoesNotExist(metric) raise keys = set() + granularity = str(utils.timespan_total_seconds(granularity)) for f in files: meta = f.split("_") - if meta[1] == str(granularity) and self._version_check(f, version): + if meta[1] == granularity and self._version_check(f, version): keys.add(meta[0]) return keys diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 43291161..e744b02f 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -18,6 +18,7 @@ from oslo_config import cfg from gnocchi.common import redis from gnocchi import storage from gnocchi.storage import _carbonara +from gnocchi import utils OPTS = [ @@ -50,8 +51,9 @@ class RedisStorage(_carbonara.CarbonaraBasedStorage): @classmethod def _aggregated_field_for_split(cls, aggregation, key, version=3, granularity=None): - path = cls.FIELD_SEP.join([str(key), aggregation, - str(granularity or key.sampling)]) + path = cls.FIELD_SEP.join([ + str(key), aggregation, + str(utils.timespan_total_seconds(granularity or key.sampling))]) return path + '_v%s' % version if version else path def _create_metric(self, metric): diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index a4fb3dab..28129218 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -21,6 +21,7 @@ import tenacity from gnocchi.common import s3 from gnocchi import storage from gnocchi.storage import _carbonara +from gnocchi import utils boto3 = s3.boto3 botocore = s3.botocore @@ -88,7 +89,11 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): @staticmethod def _object_name(split_key, aggregation, version=3): - name = '%s_%s_%s' % (aggregation, split_key.sampling, split_key) + name = '%s_%s_%s' % ( + aggregation, + utils.timespan_total_seconds(split_key.sampling), + split_key, + ) return name + '_v%s' % version if version else name @staticmethod @@ -182,8 +187,10 @@ class S3Storage(_carbonara.CarbonaraBasedStorage): try: response = self.s3.list_objects_v2( Bucket=bucket, - Prefix=self._prefix(metric) + '%s_%s' % (aggregation, - granularity), + Prefix=self._prefix(metric) + '%s_%s' % ( + aggregation, + utils.timespan_total_seconds(granularity), + ), **kwargs) except botocore.exceptions.ClientError as e: if e.response['Error'].get('Code') == "NoSuchKey": diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index d1c8e2a9..de0166d7 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -19,6 +19,7 @@ from oslo_config import cfg from gnocchi.common import swift from gnocchi import storage from gnocchi.storage import _carbonara +from gnocchi import utils swclient = swift.swclient swift_utils = swift.swift_utils @@ -95,7 +96,10 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): @staticmethod def _object_name(split_key, aggregation, version=3): - name = '%s_%s_%s' % (split_key, aggregation, split_key.sampling) + name = '%s_%s_%s' % ( + split_key, aggregation, + utils.timespan_total_seconds(split_key.sampling), + ) return name + '_v%s' % version if version else name def _create_metric(self, metric): @@ -165,10 +169,11 @@ class SwiftStorage(_carbonara.CarbonaraBasedStorage): raise storage.MetricDoesNotExist(metric) raise keys = set() + granularity = str(utils.timespan_total_seconds(granularity)) for f in files: try: meta = f['name'].split('_') - if (aggregation == meta[1] and granularity == float(meta[2]) + if (aggregation == meta[1] and granularity == meta[2] and self._version_check(f['name'], version)): keys.add(meta[0]) except (ValueError, IndexError): diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 176aafb3..e259c36f 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -22,6 +22,7 @@ import uuid import daiquiri import fixtures +import numpy import six from six.moves.urllib.parse import unquote try: @@ -218,46 +219,48 @@ class TestCase(BaseTestCase): 0, [ # 2 second resolution for a day archive_policy.ArchivePolicyItem( - granularity=2, points=3600 * 24), + granularity=numpy.timedelta64(2, 's'), + timespan=numpy.timedelta64(1, 'D'), + ), ], ), 'low': archive_policy.ArchivePolicy( "low", 0, [ # 5 minutes resolution for an hour archive_policy.ArchivePolicyItem( - granularity=300, points=12), + granularity=numpy.timedelta64(5, 'm'), points=12), # 1 hour resolution for a day archive_policy.ArchivePolicyItem( - granularity=3600, points=24), + granularity=numpy.timedelta64(1, 'h'), points=24), # 1 day resolution for a month archive_policy.ArchivePolicyItem( - granularity=3600 * 24, points=30), + granularity=numpy.timedelta64(1, 'D'), points=30), ], ), 'medium': archive_policy.ArchivePolicy( "medium", 0, [ # 1 minute resolution for an day archive_policy.ArchivePolicyItem( - granularity=60, points=60 * 24), + granularity=numpy.timedelta64(1, 'm'), points=60 * 24), # 1 hour resolution for a week archive_policy.ArchivePolicyItem( - granularity=3600, points=7 * 24), + granularity=numpy.timedelta64(1, 'h'), points=7 * 24), # 1 day resolution for a year archive_policy.ArchivePolicyItem( - granularity=3600 * 24, points=365), + granularity=numpy.timedelta64(1, 'D'), points=365), ], ), 'high': archive_policy.ArchivePolicy( "high", 0, [ # 1 second resolution for an hour archive_policy.ArchivePolicyItem( - granularity=1, points=3600), + granularity=numpy.timedelta64(1, 's'), points=3600), # 1 minute resolution for a week archive_policy.ArchivePolicyItem( - granularity=60, points=60 * 24 * 7), + granularity=numpy.timedelta64(1, 'm'), points=60 * 24 * 7), # 1 hour resolution for a year archive_policy.ArchivePolicyItem( - granularity=3600, points=365 * 24), + granularity=numpy.timedelta64(1, 'h'), points=365 * 24), ], ), } diff --git a/gnocchi/tests/functional/gabbits/archive.yaml b/gnocchi/tests/functional/gabbits/archive.yaml index e26837fe..55964a8f 100644 --- a/gnocchi/tests/functional/gabbits/archive.yaml +++ b/gnocchi/tests/functional/gabbits/archive.yaml @@ -441,7 +441,7 @@ tests: points: 60 status: 400 response_strings: - - "Invalid input: not a valid value for dictionary value" + - "Invalid input: Timespan must be positive for dictionary value" - name: create invalid points policy POST: /v1/archive_policy @@ -561,8 +561,8 @@ tests: status: 201 response_json_paths: $.definition[0].points: 1000 - $.definition[0].granularity: "0:00:04" - $.definition[0].timespan: "1:06:40" + $.definition[0].granularity: "0:00:03.600000" + $.definition[0].timespan: "1:00:00" - name: policy float timespan POST: /v1/archive_policy @@ -599,5 +599,5 @@ tests: - rate:last - rate:mean $.definition[0].points: 1000 - $.definition[0].granularity: "0:00:04" - $.definition[0].timespan: "1:06:40" + $.definition[0].granularity: "0:00:03.600000" + $.definition[0].timespan: "1:00:00" diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 3d5cc6af..21260554 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -16,6 +16,7 @@ import datetime import uuid +import numpy from stevedore import extension from gnocchi import aggregates @@ -38,18 +39,6 @@ class TestAggregates(tests_base.TestCase): self.assertIsInstance(self.custom_agg['moving-average'], moving_stats.MovingAverage) - def test_check_window_valid(self): - for agg_method in self.custom_agg: - window = '60s' - agg_obj = self.custom_agg[agg_method] - result = agg_obj.check_window_valid(window) - self.assertEqual(60.0, result) - - window = '60' - agg_obj = self.custom_agg[agg_method] - result = agg_obj.check_window_valid(window) - self.assertEqual(60.0, result) - def _test_create_metric_and_data(self, data, spacing): metric = storage.Metric( uuid.uuid4(), self.archive_policies['medium']) @@ -71,18 +60,18 @@ class TestAggregates(tests_base.TestCase): spacing=20) for agg_method in self.custom_agg: agg_obj = self.custom_agg[agg_method] - window = 90.0 + window = numpy.timedelta64(90, 's') self.assertRaises(aggregates.CustomAggFailure, agg_obj.retrieve_data, self.storage, metric, start=None, stop=None, window=window) - window = 120.0 + window = numpy.timedelta64(120, 's') grain, result = agg_obj.retrieve_data(self.storage, metric, start=None, stop=None, window=window) - self.assertEqual(60.0, grain) + self.assertEqual(numpy.timedelta64(1, 'm'), grain) self.assertEqual(39.0, result[datetime.datetime(2014, 1, 1, 12)]) self.assertEqual(25.5, result[datetime.datetime(2014, 1, 1, 12, 1)]) @@ -98,16 +87,20 @@ class TestAggregates(tests_base.TestCase): result = agg_obj.compute(self.storage, metric, start=None, stop=None, window=window, center=center) - expected = [(utils.datetime_utc(2014, 1, 1, 12), 120.0, 32.25)] - self.assertEqual(expected, result) + self.assertEqual([(utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(120, 's'), + 32.25)], + result) center = 'True' result = agg_obj.compute(self.storage, metric, start=None, stop=None, window=window, center=center) - expected = [(utils.datetime_utc(2014, 1, 1, 12, 1), 120.0, 28.875)] - self.assertEqual(expected, result) + self.assertEqual([(utils.datetime_utc(2014, 1, 1, 12, 1), + numpy.timedelta64(120, 's'), + 28.875)], + result) # (FIXME) atmalagon: doing a centered average when # there are only two points in the retrieved data seems weird. # better to raise an error or return nan in this case? diff --git a/gnocchi/tests/test_archive_policy.py b/gnocchi/tests/test_archive_policy.py index 51fd20c7..6ec9d71e 100644 --- a/gnocchi/tests/test_archive_policy.py +++ b/gnocchi/tests/test_archive_policy.py @@ -11,6 +11,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import numpy + from gnocchi import archive_policy from gnocchi import service from gnocchi.tests import base @@ -87,7 +89,7 @@ class TestArchivePolicy(base.BaseTestCase): 0, [(20, 60), (10, 300), (10, 5)], ["-mean", "-last"]) - self.assertEqual(ap.max_block_size, 300) + self.assertEqual(ap.max_block_size, numpy.timedelta64(300, 's')) class TestArchivePolicyItem(base.BaseTestCase): diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index c351dd78..b19136e2 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -19,13 +19,17 @@ import math import fixtures import iso8601 -import pandas +import numpy import six from gnocchi import carbonara from gnocchi.tests import base +def datetime64(*args): + return numpy.datetime64(datetime.datetime(*args)) + + class TestBoundTimeSerie(base.BaseTestCase): def test_benchmark(self): self.useFixture(fixtures.Timeout(300, gentle=True)) @@ -34,62 +38,62 @@ class TestBoundTimeSerie(base.BaseTestCase): @staticmethod def test_base(): carbonara.BoundTimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9)], [3, 5, 6]) def test_block_size(self): ts = carbonara.BoundTimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9)], [3, 5, 6], - block_size='5s') + block_size=numpy.timedelta64(5, 's')) self.assertEqual(1, len(ts)) - ts.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 10), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 11), 4)]) + ts.set_values([(datetime64(2014, 1, 1, 12, 0, 10), 3), + (datetime64(2014, 1, 1, 12, 0, 11), 4)]) self.assertEqual(2, len(ts)) def test_block_size_back_window(self): ts = carbonara.BoundTimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9)], [3, 5, 6], - block_size='5s', + block_size=numpy.timedelta64(5, 's'), back_window=1) self.assertEqual(3, len(ts)) - ts.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 10), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 11), 4)]) + ts.set_values([(datetime64(2014, 1, 1, 12, 0, 10), 3), + (datetime64(2014, 1, 1, 12, 0, 11), 4)]) self.assertEqual(3, len(ts)) def test_block_size_unordered(self): ts = carbonara.BoundTimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 5), - datetime.datetime(2014, 1, 1, 12, 0, 9)], + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 5), + datetime64(2014, 1, 1, 12, 0, 9)], [10, 5, 23], - block_size='5s') + block_size=numpy.timedelta64(5, 's')) self.assertEqual(2, len(ts)) - ts.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 11), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 10), 4)]) + ts.set_values([(datetime64(2014, 1, 1, 12, 0, 11), 3), + (datetime64(2014, 1, 1, 12, 0, 10), 4)]) self.assertEqual(2, len(ts)) def test_duplicate_timestamps(self): ts = carbonara.BoundTimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 9)], + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 9)], [10, 23]) self.assertEqual(2, len(ts)) self.assertEqual(10.0, ts[0]) self.assertEqual(23.0, ts[1]) - ts.set_values([(datetime.datetime(2014, 1, 1, 13, 0, 10), 3), - (datetime.datetime(2014, 1, 1, 13, 0, 11), 9), - (datetime.datetime(2014, 1, 1, 13, 0, 11), 8), - (datetime.datetime(2014, 1, 1, 13, 0, 11), 7), - (datetime.datetime(2014, 1, 1, 13, 0, 11), 4)]) + ts.set_values([(datetime64(2014, 1, 1, 13, 0, 10), 3), + (datetime64(2014, 1, 1, 13, 0, 11), 9), + (datetime64(2014, 1, 1, 13, 0, 11), 8), + (datetime64(2014, 1, 1, 13, 0, 11), 7), + (datetime64(2014, 1, 1, 13, 0, 11), 4)]) self.assertEqual(4, len(ts)) self.assertEqual(10.0, ts[0]) self.assertEqual(23.0, ts[1]) @@ -102,15 +106,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase): def test_base(): carbonara.AggregatedTimeSerie.from_data( 3, 'mean', - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9)], [3, 5, 6]) carbonara.AggregatedTimeSerie.from_data( "4s", 'mean', - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9)], [3, 5, 6]) def test_benchmark(self): @@ -119,40 +123,49 @@ class TestAggregatedTimeSerie(base.BaseTestCase): def test_fetch_basic(self): ts = carbonara.AggregatedTimeSerie.from_data( - timestamps=[datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], + timestamps=[datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9)], aggregation_method='mean', values=[3, 5, 6], - sampling="1s") + sampling=numpy.timedelta64(1, 's')) self.assertEqual( - [(datetime.datetime(2014, 1, 1, 12), 1, 3), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], + [(datetime.datetime(2014, 1, 1, 12), + numpy.timedelta64(1000000, 'us'), 3), + (datetime.datetime(2014, 1, 1, 12, 0, 4), + numpy.timedelta64(1000000, 'us'), 5), + (datetime.datetime(2014, 1, 1, 12, 0, 9), + numpy.timedelta64(1000000, 'us'), 6)], list(ts.fetch())) self.assertEqual( - [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], + [(datetime.datetime(2014, 1, 1, 12, 0, 4), + numpy.timedelta64(1000000, 'us'), 5), + (datetime.datetime(2014, 1, 1, 12, 0, 9), + numpy.timedelta64(1000000, 'us'), 6)], list(ts.fetch( - from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 4)))) + from_timestamp=datetime64(2014, 1, 1, 12, 0, 4)))) self.assertEqual( - [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], + [(datetime.datetime(2014, 1, 1, 12, 0, 4), + numpy.timedelta64(1000000, 'us'), 5), + (datetime.datetime(2014, 1, 1, 12, 0, 9), + numpy.timedelta64(1000000, 'us'), 6)], list(ts.fetch( - from_timestamp=iso8601.parse_date( - "2014-01-01 12:00:04")))) + from_timestamp=numpy.datetime64(iso8601.parse_date( + "2014-01-01 12:00:04"))))) self.assertEqual( - [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], + [(datetime.datetime(2014, 1, 1, 12, 0, 4), + numpy.timedelta64(1000000, 'us'), 5), + (datetime.datetime(2014, 1, 1, 12, 0, 9), + numpy.timedelta64(1000000, 'us'), 6)], list(ts.fetch( - from_timestamp=iso8601.parse_date( - "2014-01-01 13:00:04+01:00")))) + from_timestamp=numpy.datetime64(iso8601.parse_date( + "2014-01-01 13:00:04+01:00"))))) def test_before_epoch(self): ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(1950, 1, 1, 12), 3), - (datetime.datetime(2014, 1, 1, 12), 5), - (datetime.datetime(2014, 1, 1, 12), 6)]) + [(datetime64(1950, 1, 1, 12), 3), + (datetime64(2014, 1, 1, 12), 5), + (datetime64(2014, 1, 1, 12), 6)]) self.assertRaises(carbonara.BeforeEpochError, ts.group_serie, 60) @@ -179,17 +192,23 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 3, 22), 5), # Counter reset (datetime.datetime(2014, 1, 1, 12, 3, 42), 7), (datetime.datetime(2014, 1, 1, 12, 4, 9), 23)]) - ts = self._resample(ts, 60, 'mean', derived=True) + ts = self._resample(ts, numpy.timedelta64(60, 's'), 'mean', + derived=True) self.assertEqual(5, len(ts)) self.assertEqual( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 60, 5), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 60, 5), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 60, 11), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 60, -32), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 60, 16)], + [(datetime.datetime(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(60, 's'), 5), + (datetime.datetime(2014, 1, 1, 12, 1, 0), + numpy.timedelta64(60, 's'), 5), + (datetime.datetime(2014, 1, 1, 12, 2, 0), + numpy.timedelta64(60, 's'), 11), + (datetime.datetime(2014, 1, 1, 12, 3, 0), + numpy.timedelta64(60, 's'), -32), + (datetime.datetime(2014, 1, 1, 12, 4, 0), + numpy.timedelta64(60, 's'), 16)], list(ts.fetch( - from_timestamp=datetime.datetime(2014, 1, 1, 12)))) + from_timestamp=datetime64(2014, 1, 1, 12)))) def test_derived_hole(self): ts = carbonara.TimeSerie.from_tuples( @@ -202,26 +221,31 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime.datetime(2014, 1, 1, 12, 3, 22), 108), (datetime.datetime(2014, 1, 1, 12, 3, 42), 200), (datetime.datetime(2014, 1, 1, 12, 4, 9), 202)]) - ts = self._resample(ts, 60, 'last', derived=True) + ts = self._resample(ts, numpy.timedelta64(60, 's'), 'last', + derived=True) self.assertEqual(4, len(ts)) self.assertEqual( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 60, 5), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 60, 4), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 60, 92), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 60, 2)], + [(datetime.datetime(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(60, 's'), 5), + (datetime.datetime(2014, 1, 1, 12, 1, 0), + numpy.timedelta64(60, 's'), 4), + (datetime.datetime(2014, 1, 1, 12, 3, 0), + numpy.timedelta64(60, 's'), 92), + (datetime.datetime(2014, 1, 1, 12, 4, 0), + numpy.timedelta64(60, 's'), 2)], list(ts.fetch( - from_timestamp=datetime.datetime(2014, 1, 1, 12)))) + from_timestamp=datetime64(2014, 1, 1, 12)))) def test_74_percentile_serialized(self): ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)]) - ts = self._resample(ts, 60, '74pct') + [(datetime64(2014, 1, 1, 12, 0, 0), 3), + (datetime64(2014, 1, 1, 12, 0, 4), 5), + (datetime64(2014, 1, 1, 12, 0, 9), 6)]) + ts = self._resample(ts, numpy.timedelta64(60, 's'), '74pct') self.assertEqual(1, len(ts)) - self.assertEqual(5.48, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) + self.assertEqual(5.48, ts[datetime64(2014, 1, 1, 12, 0, 0)]) # Serialize and unserialize key = ts.get_split_key() @@ -230,38 +254,38 @@ class TestAggregatedTimeSerie(base.BaseTestCase): s, key, '74pct') ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)]) - ts = self._resample(ts, 60, '74pct') + [(datetime64(2014, 1, 1, 12, 0, 0), 3), + (datetime64(2014, 1, 1, 12, 0, 4), 5), + (datetime64(2014, 1, 1, 12, 0, 9), 6)]) + ts = self._resample(ts, numpy.timedelta64(60, 's'), '74pct') ts.merge(saved_ts) self.assertEqual(1, len(ts)) - self.assertEqual(5.48, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) + self.assertEqual(5.48, ts[datetime64(2014, 1, 1, 12, 0, 0)]) def test_95_percentile(self): ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)]) - ts = self._resample(ts, 60, '95pct') + [(datetime64(2014, 1, 1, 12, 0, 0), 3), + (datetime64(2014, 1, 1, 12, 0, 4), 5), + (datetime64(2014, 1, 1, 12, 0, 9), 6)]) + ts = self._resample(ts, numpy.timedelta64(60, 's'), '95pct') self.assertEqual(1, len(ts)) self.assertEqual(5.9000000000000004, - ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) + ts[datetime64(2014, 1, 1, 12, 0, 0)]) def _do_test_aggregation(self, name, v1, v2): ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 6), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 5), - (datetime.datetime(2014, 1, 1, 12, 1, 4), 8), - (datetime.datetime(2014, 1, 1, 12, 1, 6), 9)]) - ts = self._resample(ts, 60, name) + [(datetime64(2014, 1, 1, 12, 0, 0), 3), + (datetime64(2014, 1, 1, 12, 0, 4), 6), + (datetime64(2014, 1, 1, 12, 0, 9), 5), + (datetime64(2014, 1, 1, 12, 1, 4), 8), + (datetime64(2014, 1, 1, 12, 1, 6), 9)]) + ts = self._resample(ts, numpy.timedelta64(60, 's'), name) self.assertEqual(2, len(ts)) - self.assertEqual(v1, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) - self.assertEqual(v2, ts[datetime.datetime(2014, 1, 1, 12, 1, 0)]) + self.assertEqual(v1, ts[datetime64(2014, 1, 1, 12, 0, 0)]) + self.assertEqual(v2, ts[datetime64(2014, 1, 1, 12, 1, 0)]) def test_aggregation_first(self): self._do_test_aggregation('first', 3, 8) @@ -293,37 +317,37 @@ class TestAggregatedTimeSerie(base.BaseTestCase): def test_aggregation_std_with_unique(self): ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3)]) - ts = self._resample(ts, 60, 'std') + [(datetime64(2014, 1, 1, 12, 0, 0), 3)]) + ts = self._resample(ts, numpy.timedelta64(60, 's'), 'std') self.assertEqual(0, len(ts), ts.ts.values) ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 6), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 5), - (datetime.datetime(2014, 1, 1, 12, 1, 6), 9)]) - ts = self._resample(ts, 60, "std") + [(datetime64(2014, 1, 1, 12, 0, 0), 3), + (datetime64(2014, 1, 1, 12, 0, 4), 6), + (datetime64(2014, 1, 1, 12, 0, 9), 5), + (datetime64(2014, 1, 1, 12, 1, 6), 9)]) + ts = self._resample(ts, numpy.timedelta64(60, 's'), "std") self.assertEqual(1, len(ts)) self.assertEqual(1.5275252316519465, - ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) + ts[datetime64(2014, 1, 1, 12, 0, 0)]) def test_different_length_in_timestamps_and_data(self): self.assertRaises(ValueError, carbonara.AggregatedTimeSerie.from_data, 3, 'mean', - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9)], [3, 5]) def test_max_size(self): ts = carbonara.TimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9)], [3, 5, 6]) - ts = self._resample(ts, 1, 'mean', max_size=2) + ts = self._resample(ts, numpy.timedelta64(1, 's'), 'mean', max_size=2) self.assertEqual(2, len(ts)) self.assertEqual(5, ts[0]) @@ -331,40 +355,40 @@ class TestAggregatedTimeSerie(base.BaseTestCase): def test_down_sampling(self): ts = carbonara.TimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9)], [3, 5, 7]) - ts = self._resample(ts, 300, 'mean') + ts = self._resample(ts, numpy.timedelta64(300, 's'), 'mean') self.assertEqual(1, len(ts)) - self.assertEqual(5, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) + self.assertEqual(5, ts[datetime64(2014, 1, 1, 12, 0, 0)]) def test_down_sampling_with_max_size(self): ts = carbonara.TimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 1, 4), - datetime.datetime(2014, 1, 1, 12, 1, 9), - datetime.datetime(2014, 1, 1, 12, 2, 12)], + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 1, 4), + datetime64(2014, 1, 1, 12, 1, 9), + datetime64(2014, 1, 1, 12, 2, 12)], [3, 5, 7, 1]) - ts = self._resample(ts, 60, 'mean', max_size=2) + ts = self._resample(ts, numpy.timedelta64(60, 's'), 'mean', max_size=2) self.assertEqual(2, len(ts)) - self.assertEqual(6, ts[datetime.datetime(2014, 1, 1, 12, 1, 0)]) - self.assertEqual(1, ts[datetime.datetime(2014, 1, 1, 12, 2, 0)]) + self.assertEqual(6, ts[datetime64(2014, 1, 1, 12, 1, 0)]) + self.assertEqual(1, ts[datetime64(2014, 1, 1, 12, 2, 0)]) def test_down_sampling_with_max_size_and_method_max(self): ts = carbonara.TimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 1, 4), - datetime.datetime(2014, 1, 1, 12, 1, 9), - datetime.datetime(2014, 1, 1, 12, 2, 12)], + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 1, 4), + datetime64(2014, 1, 1, 12, 1, 9), + datetime64(2014, 1, 1, 12, 2, 12)], [3, 5, 70, 1]) - ts = self._resample(ts, 60, 'max', max_size=2) + ts = self._resample(ts, numpy.timedelta64(60, 's'), 'max', max_size=2) self.assertEqual(2, len(ts)) - self.assertEqual(70, ts[datetime.datetime(2014, 1, 1, 12, 1, 0)]) - self.assertEqual(1, ts[datetime.datetime(2014, 1, 1, 12, 2, 0)]) + self.assertEqual(70, ts[datetime64(2014, 1, 1, 12, 1, 0)]) + self.assertEqual(1, ts[datetime64(2014, 1, 1, 12, 2, 0)]) @staticmethod def _resample_and_merge(ts, agg_dict): @@ -378,31 +402,36 @@ class TestAggregatedTimeSerie(base.BaseTestCase): agg_dict['return'].merge(existing) def test_aggregated_different_archive_no_overlap(self): - tsc1 = {'sampling': 60, 'size': 50, 'agg': 'mean'} + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 50, 'agg': 'mean'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 60, 'size': 50, 'agg': 'mean'} + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 50, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - tsb1.set_values([(datetime.datetime(2014, 1, 1, 11, 46, 4), 4)], + tsb1.set_values([(datetime64(2014, 1, 1, 11, 46, 4), 4)], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) - tsb2.set_values([(datetime.datetime(2014, 1, 1, 9, 1, 4), 4)], + tsb2.set_values([(datetime64(2014, 1, 1, 9, 1, 4), 4)], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc2)) - dtfrom = datetime.datetime(2014, 1, 1, 11, 0, 0) + dtfrom = datetime64(2014, 1, 1, 11, 0, 0) self.assertRaises(carbonara.UnAggregableTimeseries, carbonara.AggregatedTimeSerie.aggregated, [tsc1['return'], tsc2['return']], from_timestamp=dtfrom, aggregation='mean') def test_aggregated_different_archive_no_overlap2(self): - tsc1 = {'sampling': 60, 'size': 50, 'agg': 'mean'} + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 50, 'agg': 'mean'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50, - aggregation_method='mean') + tsc2 = carbonara.AggregatedTimeSerie( + sampling=numpy.timedelta64(60, 's'), + max_size=50, + aggregation_method='mean') - tsb1.set_values([(datetime.datetime(2014, 1, 1, 12, 3, 0), 4)], + tsb1.set_values([(datetime64(2014, 1, 1, 12, 3, 0), 4)], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) self.assertRaises(carbonara.UnAggregableTimeseries, @@ -410,42 +439,44 @@ class TestAggregatedTimeSerie(base.BaseTestCase): [tsc1['return'], tsc2], aggregation='mean') def test_aggregated_different_archive_overlap(self): - tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) # NOTE(sileht): minute 8 is missing in both and # minute 7 in tsc2 too, but it looks like we have # enough point to do the aggregation tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 11, 0, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 10), - (datetime.datetime(2014, 1, 1, 12, 9, 0), 2), + (datetime64(2014, 1, 1, 11, 0, 0), 4), + (datetime64(2014, 1, 1, 12, 1, 0), 3), + (datetime64(2014, 1, 1, 12, 2, 0), 2), + (datetime64(2014, 1, 1, 12, 3, 0), 4), + (datetime64(2014, 1, 1, 12, 4, 0), 2), + (datetime64(2014, 1, 1, 12, 5, 0), 3), + (datetime64(2014, 1, 1, 12, 6, 0), 4), + (datetime64(2014, 1, 1, 12, 7, 0), 10), + (datetime64(2014, 1, 1, 12, 9, 0), 2), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 12, 1, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 9, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 11, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 12, 0), 2), + (datetime64(2014, 1, 1, 12, 1, 0), 3), + (datetime64(2014, 1, 1, 12, 2, 0), 4), + (datetime64(2014, 1, 1, 12, 3, 0), 4), + (datetime64(2014, 1, 1, 12, 4, 0), 6), + (datetime64(2014, 1, 1, 12, 5, 0), 3), + (datetime64(2014, 1, 1, 12, 6, 0), 6), + (datetime64(2014, 1, 1, 12, 9, 0), 2), + (datetime64(2014, 1, 1, 12, 11, 0), 2), + (datetime64(2014, 1, 1, 12, 12, 0), 2), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc2)) - dtfrom = datetime.datetime(2014, 1, 1, 12, 0, 0) - dtto = datetime.datetime(2014, 1, 1, 12, 10, 0) + dtfrom = datetime64(2014, 1, 1, 12, 0, 0) + dtto = datetime64(2014, 1, 1, 12, 10, 0) # By default we require 100% of point that overlap # so that fail @@ -462,56 +493,50 @@ class TestAggregatedTimeSerie(base.BaseTestCase): aggregation='mean', needed_percent_of_overlap=80.0) self.assertEqual([ - (datetime.datetime( - 2014, 1, 1, 12, 1, 0 - ), 60.0, 3.0), - (datetime.datetime( - 2014, 1, 1, 12, 2, 0 - ), 60.0, 3.0), - (datetime.datetime( - 2014, 1, 1, 12, 3, 0 - ), 60.0, 4.0), - (datetime.datetime( - 2014, 1, 1, 12, 4, 0 - ), 60.0, 4.0), - (datetime.datetime( - 2014, 1, 1, 12, 5, 0 - ), 60.0, 3.0), - (datetime.datetime( - 2014, 1, 1, 12, 6, 0 - ), 60.0, 5.0), - (datetime.datetime( - 2014, 1, 1, 12, 7, 0 - ), 60.0, 10.0), - (datetime.datetime( - 2014, 1, 1, 12, 9, 0 - ), 60.0, 2.0), + (datetime64(2014, 1, 1, 12, 1, 0), + numpy.timedelta64(60, 's'), 3.0), + (datetime64(2014, 1, 1, 12, 2, 0), + numpy.timedelta64(60, 's'), 3.0), + (datetime64(2014, 1, 1, 12, 3, 0), + numpy.timedelta64(60, 's'), 4.0), + (datetime64(2014, 1, 1, 12, 4, 0), + numpy.timedelta64(60, 's'), 4.0), + (datetime64(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(60, 's'), 3.0), + (datetime64(2014, 1, 1, 12, 6, 0), + numpy.timedelta64(60, 's'), 5.0), + (datetime64(2014, 1, 1, 12, 7, 0), + numpy.timedelta64(60, 's'), 10.0), + (datetime64(2014, 1, 1, 12, 9, 0), + numpy.timedelta64(60, 's'), 2.0), ], list(output)) def test_aggregated_different_archive_overlap_edge_missing1(self): - tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 1), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 7), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), + (datetime64(2014, 1, 1, 12, 3, 0), 9), + (datetime64(2014, 1, 1, 12, 4, 0), 1), + (datetime64(2014, 1, 1, 12, 5, 0), 2), + (datetime64(2014, 1, 1, 12, 6, 0), 7), + (datetime64(2014, 1, 1, 12, 7, 0), 5), + (datetime64(2014, 1, 1, 12, 8, 0), 3), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 11, 0, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 13), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 24), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 16), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 12), + (datetime64(2014, 1, 1, 11, 0, 0), 6), + (datetime64(2014, 1, 1, 12, 1, 0), 2), + (datetime64(2014, 1, 1, 12, 2, 0), 13), + (datetime64(2014, 1, 1, 12, 3, 0), 24), + (datetime64(2014, 1, 1, 12, 4, 0), 4), + (datetime64(2014, 1, 1, 12, 5, 0), 16), + (datetime64(2014, 1, 1, 12, 6, 0), 12), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc2)) @@ -522,120 +547,137 @@ class TestAggregatedTimeSerie(base.BaseTestCase): tsc1['return'], tsc2['return']], aggregation='sum') self.assertEqual([ - (datetime.datetime( - 2014, 1, 1, 12, 3, 0 - ), 60.0, 33.0), - (datetime.datetime( - 2014, 1, 1, 12, 4, 0 - ), 60.0, 5.0), - (datetime.datetime( - 2014, 1, 1, 12, 5, 0 - ), 60.0, 18.0), - (datetime.datetime( - 2014, 1, 1, 12, 6, 0 - ), 60.0, 19.0), + (datetime64(2014, 1, 1, 12, 3, 0), + numpy.timedelta64(60, 's'), 33.0), + (datetime64(2014, 1, 1, 12, 4, 0), + numpy.timedelta64(60, 's'), 5.0), + (datetime64(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(60, 's'), 18.0), + (datetime64(2014, 1, 1, 12, 6, 0), + numpy.timedelta64(60, 's'), 19.0), ], list(output)) def test_aggregated_different_archive_overlap_edge_missing2(self): - tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), + (datetime64(2014, 1, 1, 12, 3, 0), 4), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 11, 0, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), + (datetime64(2014, 1, 1, 11, 0, 0), 4), + (datetime64(2014, 1, 1, 12, 3, 0), 4), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc2)) output = carbonara.AggregatedTimeSerie.aggregated( [tsc1['return'], tsc2['return']], aggregation='mean') self.assertEqual([ - (datetime.datetime( + (datetime64( 2014, 1, 1, 12, 3, 0 - ), 60.0, 4.0), + ), numpy.timedelta64(60000000000, 'ns'), 4.0), ], list(output)) def test_fetch(self): - ts = {'sampling': 60, 'size': 10, 'agg': 'mean'} + ts = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) tsb.set_values([ - (datetime.datetime(2014, 1, 1, 11, 46, 4), 4), - (datetime.datetime(2014, 1, 1, 11, 47, 34), 8), - (datetime.datetime(2014, 1, 1, 11, 50, 54), 50), - (datetime.datetime(2014, 1, 1, 11, 54, 45), 4), - (datetime.datetime(2014, 1, 1, 11, 56, 49), 4), - (datetime.datetime(2014, 1, 1, 11, 57, 22), 6), - (datetime.datetime(2014, 1, 1, 11, 58, 22), 5), - (datetime.datetime(2014, 1, 1, 12, 1, 4), 4), - (datetime.datetime(2014, 1, 1, 12, 1, 9), 7), - (datetime.datetime(2014, 1, 1, 12, 2, 1), 15), - (datetime.datetime(2014, 1, 1, 12, 2, 12), 1), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 4, 9), 7), - (datetime.datetime(2014, 1, 1, 12, 5, 1), 15), - (datetime.datetime(2014, 1, 1, 12, 5, 12), 1), - (datetime.datetime(2014, 1, 1, 12, 6, 0, 2), 3), + (datetime64(2014, 1, 1, 11, 46, 4), 4), + (datetime64(2014, 1, 1, 11, 47, 34), 8), + (datetime64(2014, 1, 1, 11, 50, 54), 50), + (datetime64(2014, 1, 1, 11, 54, 45), 4), + (datetime64(2014, 1, 1, 11, 56, 49), 4), + (datetime64(2014, 1, 1, 11, 57, 22), 6), + (datetime64(2014, 1, 1, 11, 58, 22), 5), + (datetime64(2014, 1, 1, 12, 1, 4), 4), + (datetime64(2014, 1, 1, 12, 1, 9), 7), + (datetime64(2014, 1, 1, 12, 2, 1), 15), + (datetime64(2014, 1, 1, 12, 2, 12), 1), + (datetime64(2014, 1, 1, 12, 3, 0), 3), + (datetime64(2014, 1, 1, 12, 4, 9), 7), + (datetime64(2014, 1, 1, 12, 5, 1), 15), + (datetime64(2014, 1, 1, 12, 5, 12), 1), + (datetime64(2014, 1, 1, 12, 6, 0, 2), 3), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 6), 5), + (datetime64(2014, 1, 1, 12, 6), 5), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) self.assertEqual([ - (datetime.datetime(2014, 1, 1, 11, 54), 60.0, 4.0), - (datetime.datetime(2014, 1, 1, 11, 56), 60.0, 4.0), - (datetime.datetime(2014, 1, 1, 11, 57), 60.0, 6.0), - (datetime.datetime(2014, 1, 1, 11, 58), 60.0, 5.0), - (datetime.datetime(2014, 1, 1, 12, 1), 60.0, 5.5), - (datetime.datetime(2014, 1, 1, 12, 2), 60.0, 8.0), - (datetime.datetime(2014, 1, 1, 12, 3), 60.0, 3.0), - (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0), - (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0), - (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0) + (datetime.datetime(2014, 1, 1, 11, 54), + numpy.timedelta64(60000000000, 'ns'), 4.0), + (datetime.datetime(2014, 1, 1, 11, 56), + numpy.timedelta64(60000000000, 'ns'), 4.0), + (datetime.datetime(2014, 1, 1, 11, 57), + numpy.timedelta64(60000000000, 'ns'), 6.0), + (datetime.datetime(2014, 1, 1, 11, 58), + numpy.timedelta64(60000000000, 'ns'), 5.0), + (datetime.datetime(2014, 1, 1, 12, 1), + numpy.timedelta64(60000000000, 'ns'), 5.5), + (datetime.datetime(2014, 1, 1, 12, 2), + numpy.timedelta64(60000000000, 'ns'), 8.0), + (datetime.datetime(2014, 1, 1, 12, 3), + numpy.timedelta64(60000000000, 'ns'), 3.0), + (datetime.datetime(2014, 1, 1, 12, 4), + numpy.timedelta64(60000000000, 'ns'), 7.0), + (datetime.datetime(2014, 1, 1, 12, 5), + numpy.timedelta64(60000000000, 'ns'), 8.0), + (datetime.datetime(2014, 1, 1, 12, 6), + numpy.timedelta64(60000000000, 'ns'), 4.0) ], list(ts['return'].fetch())) self.assertEqual([ - (datetime.datetime(2014, 1, 1, 12, 1), 60.0, 5.5), - (datetime.datetime(2014, 1, 1, 12, 2), 60.0, 8.0), - (datetime.datetime(2014, 1, 1, 12, 3), 60.0, 3.0), - (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0), - (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0), - (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0) - ], list(ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))) + (datetime.datetime(2014, 1, 1, 12, 1), + numpy.timedelta64(60000000000, 'ns'), 5.5), + (datetime.datetime(2014, 1, 1, 12, 2), + numpy.timedelta64(60000000000, 'ns'), 8.0), + (datetime.datetime(2014, 1, 1, 12, 3), + numpy.timedelta64(60000000000, 'ns'), 3.0), + (datetime.datetime(2014, 1, 1, 12, 4), + numpy.timedelta64(60000000000, 'ns'), 7.0), + (datetime.datetime(2014, 1, 1, 12, 5), + numpy.timedelta64(60000000000, 'ns'), 8.0), + (datetime.datetime(2014, 1, 1, 12, 6), + numpy.timedelta64(60000000000, 'ns'), 4.0) + ], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)))) def test_aggregated_some_overlap_with_fill_zero(self): - tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 1), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 7), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), + (datetime64(2014, 1, 1, 12, 3, 0), 9), + (datetime64(2014, 1, 1, 12, 4, 0), 1), + (datetime64(2014, 1, 1, 12, 5, 0), 2), + (datetime64(2014, 1, 1, 12, 6, 0), 7), + (datetime64(2014, 1, 1, 12, 7, 0), 5), + (datetime64(2014, 1, 1, 12, 8, 0), 3), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 13), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 24), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 16), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 12), + (datetime64(2014, 1, 1, 12, 0, 0), 6), + (datetime64(2014, 1, 1, 12, 1, 0), 2), + (datetime64(2014, 1, 1, 12, 2, 0), 13), + (datetime64(2014, 1, 1, 12, 3, 0), 24), + (datetime64(2014, 1, 1, 12, 4, 0), 4), + (datetime64(2014, 1, 1, 12, 5, 0), 16), + (datetime64(2014, 1, 1, 12, 6, 0), 12), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc2)) @@ -643,41 +685,52 @@ class TestAggregatedTimeSerie(base.BaseTestCase): tsc1['return'], tsc2['return']], aggregation='mean', fill=0) self.assertEqual([ - (datetime.datetime(2014, 1, 1, 12, 0, 0), 60.0, 3.0), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 60.0, 1.0), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 60.0, 6.5), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 60.0, 16.5), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 60.0, 2.5), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 60.0, 9.0), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 60.0, 9.5), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 60.0, 2.5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 60.0, 1.5), + (datetime.datetime(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(60000000000, 'ns'), 3.0), + (datetime.datetime(2014, 1, 1, 12, 1, 0), + numpy.timedelta64(60000000000, 'ns'), 1.0), + (datetime.datetime(2014, 1, 1, 12, 2, 0), + numpy.timedelta64(60000000000, 'ns'), 6.5), + (datetime.datetime(2014, 1, 1, 12, 3, 0), + numpy.timedelta64(60000000000, 'ns'), 16.5), + (datetime.datetime(2014, 1, 1, 12, 4, 0), + numpy.timedelta64(60000000000, 'ns'), 2.5), + (datetime.datetime(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(60000000000, 'ns'), 9.0), + (datetime.datetime(2014, 1, 1, 12, 6, 0), + numpy.timedelta64(60000000000, 'ns'), 9.5), + (datetime.datetime(2014, 1, 1, 12, 7, 0), + numpy.timedelta64(60000000000, 'ns'), 2.5), + (datetime.datetime(2014, 1, 1, 12, 8, 0), + numpy.timedelta64(60000000000, 'ns'), 1.5), ], list(output)) def test_aggregated_some_overlap_with_fill_null(self): - tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 1), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 7), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), + (datetime64(2014, 1, 1, 12, 3, 0), 9), + (datetime64(2014, 1, 1, 12, 4, 0), 1), + (datetime64(2014, 1, 1, 12, 5, 0), 2), + (datetime64(2014, 1, 1, 12, 6, 0), 7), + (datetime64(2014, 1, 1, 12, 7, 0), 5), + (datetime64(2014, 1, 1, 12, 8, 0), 3), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 13), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 24), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 16), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 12), + (datetime64(2014, 1, 1, 12, 0, 0), 6), + (datetime64(2014, 1, 1, 12, 1, 0), 2), + (datetime64(2014, 1, 1, 12, 2, 0), 13), + (datetime64(2014, 1, 1, 12, 3, 0), 24), + (datetime64(2014, 1, 1, 12, 4, 0), 4), + (datetime64(2014, 1, 1, 12, 5, 0), 16), + (datetime64(2014, 1, 1, 12, 6, 0), 12), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc2)) @@ -685,37 +738,48 @@ class TestAggregatedTimeSerie(base.BaseTestCase): tsc1['return'], tsc2['return']], aggregation='mean', fill='null') self.assertEqual([ - (datetime.datetime(2014, 1, 1, 12, 0, 0), 60.0, 6.0), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 60.0, 2.0), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 60.0, 13.0), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 60.0, 16.5), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 60.0, 2.5), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 60.0, 9.0), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 60.0, 9.5), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 60.0, 5.0), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 60.0, 3.0), + (datetime.datetime(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(60000000000, 'ns'), 6.0), + (datetime.datetime(2014, 1, 1, 12, 1, 0), + numpy.timedelta64(60000000000, 'ns'), 2.0), + (datetime.datetime(2014, 1, 1, 12, 2, 0), + numpy.timedelta64(60000000000, 'ns'), 13.0), + (datetime.datetime(2014, 1, 1, 12, 3, 0), + numpy.timedelta64(60000000000, 'ns'), 16.5), + (datetime.datetime(2014, 1, 1, 12, 4, 0), + numpy.timedelta64(60000000000, 'ns'), 2.5), + (datetime.datetime(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(60000000000, 'ns'), 9.0), + (datetime.datetime(2014, 1, 1, 12, 6, 0), + numpy.timedelta64(60000000000, 'ns'), 9.5), + (datetime.datetime(2014, 1, 1, 12, 7, 0), + numpy.timedelta64(60000000000, 'ns'), 5.0), + (datetime.datetime(2014, 1, 1, 12, 8, 0), + numpy.timedelta64(60000000000, 'ns'), 3.0), ], list(output)) def test_aggregate_no_points_with_fill_zero(self): - tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 1), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), + (datetime64(2014, 1, 1, 12, 3, 0), 9), + (datetime64(2014, 1, 1, 12, 4, 0), 1), + (datetime64(2014, 1, 1, 12, 7, 0), 5), + (datetime64(2014, 1, 1, 12, 8, 0), 3), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 13), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 24), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), + (datetime64(2014, 1, 1, 12, 0, 0), 6), + (datetime64(2014, 1, 1, 12, 1, 0), 2), + (datetime64(2014, 1, 1, 12, 2, 0), 13), + (datetime64(2014, 1, 1, 12, 3, 0), 24), + (datetime64(2014, 1, 1, 12, 4, 0), 4), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc2)) @@ -723,31 +787,39 @@ class TestAggregatedTimeSerie(base.BaseTestCase): tsc1['return'], tsc2['return']], aggregation='mean', fill=0) self.assertEqual([ - (datetime.datetime(2014, 1, 1, 12, 0, 0), 60.0, 3.0), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 60.0, 1.0), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 60.0, 6.5), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 60.0, 16.5), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 60.0, 2.5), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 60.0, 2.5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 60.0, 1.5), + (datetime.datetime(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(60000000000, 'ns'), 3.0), + (datetime.datetime(2014, 1, 1, 12, 1, 0), + numpy.timedelta64(60000000000, 'ns'), 1.0), + (datetime.datetime(2014, 1, 1, 12, 2, 0), + numpy.timedelta64(60000000000, 'ns'), 6.5), + (datetime.datetime(2014, 1, 1, 12, 3, 0), + numpy.timedelta64(60000000000, 'ns'), 16.5), + (datetime.datetime(2014, 1, 1, 12, 4, 0), + numpy.timedelta64(60000000000, 'ns'), 2.5), + (datetime.datetime(2014, 1, 1, 12, 7, 0), + numpy.timedelta64(60000000000, 'ns'), 2.5), + (datetime.datetime(2014, 1, 1, 12, 8, 0), + numpy.timedelta64(60000000000, 'ns'), 1.5), ], list(output)) def test_fetch_agg_pct(self): - ts = {'sampling': 1, 'size': 3600 * 24, 'agg': '90pct'} + ts = {'sampling': numpy.timedelta64(1, 's'), + 'size': 3600 * 24, 'agg': '90pct'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 0, 123), 4), - (datetime.datetime(2014, 1, 1, 12, 0, 2), 4)], + tsb.set_values([(datetime64(2014, 1, 1, 12, 0, 0), 3), + (datetime64(2014, 1, 1, 12, 0, 0, 123), 4), + (datetime64(2014, 1, 1, 12, 0, 2), 4)], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) - result = ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) + result = ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)) reference = [ - (datetime.datetime( + (datetime64( 2014, 1, 1, 12, 0, 0 ), 1.0, 3.9), - (datetime.datetime( + (datetime64( 2014, 1, 1, 12, 0, 2 ), 1.0, 4) ] @@ -760,16 +832,16 @@ class TestAggregatedTimeSerie(base.BaseTestCase): # Rounding \o/ self.assertAlmostEqual(ref[2], res[2]) - tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 2, 113), 110)], + tsb.set_values([(datetime64(2014, 1, 1, 12, 0, 2, 113), 110)], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) - result = ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) + result = ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)) reference = [ - (datetime.datetime( + (datetime64( 2014, 1, 1, 12, 0, 0 ), 1.0, 3.9), - (datetime.datetime( + (datetime64( 2014, 1, 1, 12, 0, 2 ), 1.0, 99.4) ] @@ -783,116 +855,120 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertAlmostEqual(ref[2], res[2]) def test_fetch_nano(self): - ts = {'sampling': 0.2, 'size': 10, 'agg': 'mean'} + ts = {'sampling': numpy.timedelta64(200, 'ms'), + 'size': 10, 'agg': 'mean'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) tsb.set_values([ - (datetime.datetime(2014, 1, 1, 11, 46, 0, 200123), 4), - (datetime.datetime(2014, 1, 1, 11, 46, 0, 340000), 8), - (datetime.datetime(2014, 1, 1, 11, 47, 0, 323154), 50), - (datetime.datetime(2014, 1, 1, 11, 48, 0, 590903), 4), - (datetime.datetime(2014, 1, 1, 11, 48, 0, 903291), 4), + (datetime64(2014, 1, 1, 11, 46, 0, 200123), 4), + (datetime64(2014, 1, 1, 11, 46, 0, 340000), 8), + (datetime64(2014, 1, 1, 11, 47, 0, 323154), 50), + (datetime64(2014, 1, 1, 11, 48, 0, 590903), 4), + (datetime64(2014, 1, 1, 11, 48, 0, 903291), 4), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) tsb.set_values([ - (datetime.datetime(2014, 1, 1, 11, 48, 0, 821312), 5), + (datetime64(2014, 1, 1, 11, 48, 0, 821312), 5), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) self.assertEqual([ - (datetime.datetime(2014, 1, 1, 11, 46, 0, 200000), 0.2, 6.0), - (datetime.datetime(2014, 1, 1, 11, 47, 0, 200000), 0.2, 50.0), - (datetime.datetime(2014, 1, 1, 11, 48, 0, 400000), 0.2, 4.0), - (datetime.datetime(2014, 1, 1, 11, 48, 0, 800000), 0.2, 4.5) + (datetime.datetime(2014, 1, 1, 11, 46, 0, 200000), + numpy.timedelta64(200000000, 'ns'), 6.0), + (datetime.datetime(2014, 1, 1, 11, 47, 0, 200000), + numpy.timedelta64(200000000, 'ns'), 50.0), + (datetime.datetime(2014, 1, 1, 11, 48, 0, 400000), + numpy.timedelta64(200000000, 'ns'), 4.0), + (datetime.datetime(2014, 1, 1, 11, 48, 0, 800000), + numpy.timedelta64(200000000, 'ns'), 4.5) ], list(ts['return'].fetch())) def test_fetch_agg_std(self): # NOTE (gordc): this is a good test to ensure we drop NaN entries # 2014-01-01 12:00:00 will appear if we don't dropna() - ts = {'sampling': 60, 'size': 60, 'agg': 'std'} + ts = {'sampling': numpy.timedelta64(60, 's'), + 'size': 60, 'agg': 'std'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 1, 4), 4), - (datetime.datetime(2014, 1, 1, 12, 1, 9), 7), - (datetime.datetime(2014, 1, 1, 12, 2, 1), 15), - (datetime.datetime(2014, 1, 1, 12, 2, 12), 1)], + tsb.set_values([(datetime64(2014, 1, 1, 12, 0, 0), 3), + (datetime64(2014, 1, 1, 12, 1, 4), 4), + (datetime64(2014, 1, 1, 12, 1, 9), 7), + (datetime64(2014, 1, 1, 12, 2, 1), 15), + (datetime64(2014, 1, 1, 12, 2, 12), 1)], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) self.assertEqual([ (datetime.datetime( 2014, 1, 1, 12, 1, 0 - ), 60.0, 2.1213203435596424), + ), numpy.timedelta64(60000000000, 'ns'), 2.1213203435596424), (datetime.datetime( 2014, 1, 1, 12, 2, 0 - ), 60.0, 9.8994949366116654), - ], list(ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))) + ), numpy.timedelta64(60000000000, 'ns'), 9.8994949366116654), + ], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)))) - tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)], + tsb.set_values([(datetime64(2014, 1, 1, 12, 2, 13), 110)], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) self.assertEqual([ (datetime.datetime( 2014, 1, 1, 12, 1, 0 - ), 60.0, 2.1213203435596424), + ), numpy.timedelta64(60000000000, 'ns'), 2.1213203435596424), (datetime.datetime( 2014, 1, 1, 12, 2, 0 - ), 60.0, 59.304300012730948), - ], list(ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))) + ), numpy.timedelta64(60000000000, 'ns'), 59.304300012730948), + ], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)))) def test_fetch_agg_max(self): - ts = {'sampling': 60, 'size': 60, 'agg': 'max'} + ts = {'sampling': numpy.timedelta64(60, 's'), + 'size': 60, 'agg': 'max'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 1, 4), 4), - (datetime.datetime(2014, 1, 1, 12, 1, 9), 7), - (datetime.datetime(2014, 1, 1, 12, 2, 1), 15), - (datetime.datetime(2014, 1, 1, 12, 2, 12), 1)], + tsb.set_values([(datetime64(2014, 1, 1, 12, 0, 0), 3), + (datetime64(2014, 1, 1, 12, 1, 4), 4), + (datetime64(2014, 1, 1, 12, 1, 9), 7), + (datetime64(2014, 1, 1, 12, 2, 1), 15), + (datetime64(2014, 1, 1, 12, 2, 12), 1)], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) self.assertEqual([ (datetime.datetime( 2014, 1, 1, 12, 0, 0 - ), 60.0, 3), + ), numpy.timedelta64(60000000000, 'ns'), 3), (datetime.datetime( 2014, 1, 1, 12, 1, 0 - ), 60.0, 7), + ), numpy.timedelta64(60000000000, 'ns'), 7), (datetime.datetime( 2014, 1, 1, 12, 2, 0 - ), 60.0, 15), - ], list(ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))) + ), numpy.timedelta64(60000000000, 'ns'), 15), + ], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)))) - tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)], + tsb.set_values([(datetime64(2014, 1, 1, 12, 2, 13), 110)], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) self.assertEqual([ (datetime.datetime( - 2014, 1, 1, 12, 0, 0 - ), 60.0, 3), + 2014, 1, 1, 12, 0, 0), numpy.timedelta64(60, 's'), 3), (datetime.datetime( - 2014, 1, 1, 12, 1, 0 - ), 60.0, 7), + 2014, 1, 1, 12, 1, 0), numpy.timedelta64(60, 's'), 7), (datetime.datetime( - 2014, 1, 1, 12, 2, 0 - ), 60.0, 110), - ], list(ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)))) + 2014, 1, 1, 12, 2, 0), numpy.timedelta64(60, 's'), 110), + ], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)))) def test_serialize(self): - ts = {'sampling': 0.5, 'agg': 'mean'} + ts = {'sampling': numpy.timedelta64(500, 'ms'), 'agg': 'mean'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 0, 1234), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 0, 321), 6), - (datetime.datetime(2014, 1, 1, 12, 1, 4, 234), 5), - (datetime.datetime(2014, 1, 1, 12, 1, 9, 32), 7), - (datetime.datetime(2014, 1, 1, 12, 2, 12, 532), 1), + (datetime64(2014, 1, 1, 12, 0, 0, 1234), 3), + (datetime64(2014, 1, 1, 12, 0, 0, 321), 6), + (datetime64(2014, 1, 1, 12, 1, 4, 234), 5), + (datetime64(2014, 1, 1, 12, 1, 9, 32), 7), + (datetime64(2014, 1, 1, 12, 2, 12, 532), 1), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) @@ -903,16 +979,16 @@ class TestAggregatedTimeSerie(base.BaseTestCase): s, key, 'mean')) def test_no_truncation(self): - ts = {'sampling': 60, 'agg': 'mean'} + ts = {'sampling': numpy.timedelta64(60, 's'), 'agg': 'mean'} tsb = carbonara.BoundTimeSerie() for i in six.moves.range(1, 11): tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, i, i), float(i)) + (datetime64(2014, 1, 1, 12, i, i), float(i)) ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, i, i + 1), float(i + 1)) + (datetime64(2014, 1, 1, 12, i, i + 1), float(i + 1)) ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) self.assertEqual(i, len(list(ts['return'].fetch()))) @@ -923,29 +999,29 @@ class TestAggregatedTimeSerie(base.BaseTestCase): Test the back window on an archive is not longer than the window we aggregate on. """ - ts = {'sampling': 1, 'size': 60, 'agg': 'mean'} + ts = {'sampling': numpy.timedelta64(1, 's'), 'size': 60, 'agg': 'mean'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 1, 2300), 1), - (datetime.datetime(2014, 1, 1, 12, 0, 1, 4600), 2), - (datetime.datetime(2014, 1, 1, 12, 0, 2, 4500), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 2, 7800), 4), - (datetime.datetime(2014, 1, 1, 12, 0, 3, 8), 2.5), + (datetime64(2014, 1, 1, 12, 0, 1, 2300), 1), + (datetime64(2014, 1, 1, 12, 0, 1, 4600), 2), + (datetime64(2014, 1, 1, 12, 0, 2, 4500), 3), + (datetime64(2014, 1, 1, 12, 0, 2, 7800), 4), + (datetime64(2014, 1, 1, 12, 0, 3, 8), 2.5), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) self.assertEqual( [ - (datetime.datetime( + (datetime64( 2014, 1, 1, 12, 0, 1 - ), 1.0, 1.5), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 1.5), + (datetime64( 2014, 1, 1, 12, 0, 2 - ), 1.0, 3.5), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 3.5), + (datetime64( 2014, 1, 1, 12, 0, 3 - ), 1.0, 2.5), + ), numpy.timedelta64(1, 's'), 2.5), ], list(ts['return'].fetch())) @@ -955,15 +1031,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase): Test the back window on an archive is not longer than the window we aggregate on. """ - ts = {'sampling': 1, 'size': 60, 'agg': 'mean'} + ts = {'sampling': numpy.timedelta64(1, 's'), 'size': 60, 'agg': 'mean'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 1, 2300), 1), - (datetime.datetime(2014, 1, 1, 12, 0, 1, 4600), 2), - (datetime.datetime(2014, 1, 1, 12, 0, 2, 4500), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 2, 7800), 4), - (datetime.datetime(2014, 1, 1, 12, 0, 3, 8), 2.5), + (datetime64(2014, 1, 1, 12, 0, 1, 2300), 1), + (datetime64(2014, 1, 1, 12, 0, 1, 4600), 2), + (datetime64(2014, 1, 1, 12, 0, 2, 4500), 3), + (datetime64(2014, 1, 1, 12, 0, 2, 7800), 4), + (datetime64(2014, 1, 1, 12, 0, 3, 8), 2.5), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) @@ -971,61 +1047,65 @@ class TestAggregatedTimeSerie(base.BaseTestCase): [ (datetime.datetime( 2014, 1, 1, 12, 0, 1 - ), 1.0, 1.5), + ), numpy.timedelta64(1, 's'), 1.5), (datetime.datetime( 2014, 1, 1, 12, 0, 2 - ), 1.0, 3.5), + ), numpy.timedelta64(1, 's'), 3.5), (datetime.datetime( 2014, 1, 1, 12, 0, 3 - ), 1.0, 2.5), + ), numpy.timedelta64(1, 's'), 2.5), ], list(ts['return'].fetch())) tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9), + (datetime64(2014, 1, 1, 12, 0, 2, 99), 9), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) self.assertEqual( [ - (datetime.datetime( + (datetime64( 2014, 1, 1, 12, 0, 1 - ), 1.0, 1.5), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 1.5), + (datetime64( 2014, 1, 1, 12, 0, 2 - ), 1.0, 3.5), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 3.5), + (datetime64( 2014, 1, 1, 12, 0, 3 - ), 1.0, 2.5), + ), numpy.timedelta64(1, 's'), 2.5), ], list(ts['return'].fetch())) tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9), - (datetime.datetime(2014, 1, 1, 12, 0, 3, 9), 4.5), + (datetime64(2014, 1, 1, 12, 0, 2, 99), 9), + (datetime64(2014, 1, 1, 12, 0, 3, 9), 4.5), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) self.assertEqual( [ - (datetime.datetime( + (datetime64( 2014, 1, 1, 12, 0, 1 - ), 1.0, 1.5), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 1.5), + (datetime64( 2014, 1, 1, 12, 0, 2 - ), 1.0, 3.5), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 3.5), + (datetime64( 2014, 1, 1, 12, 0, 3 - ), 1.0, 3.5), + ), numpy.timedelta64(1, 's'), 3.5), ], list(ts['return'].fetch())) def test_aggregated_nominal(self): - tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsc12 = {'sampling': 300, 'size': 6, 'agg': 'mean'} + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsc12 = {'sampling': numpy.timedelta64(300, 's'), + 'size': 6, 'agg': 'mean'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc12['sampling']) - tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsc22 = {'sampling': 300, 'size': 6, 'agg': 'mean'} + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsc22 = {'sampling': numpy.timedelta64(300, 's'), + 'size': 6, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc22['sampling']) def ts1_update(ts): @@ -1061,83 +1141,100 @@ class TestAggregatedTimeSerie(base.BaseTestCase): tsc22['return'].merge(existing) tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 11, 46, 4), 4), - (datetime.datetime(2014, 1, 1, 11, 47, 34), 8), - (datetime.datetime(2014, 1, 1, 11, 50, 54), 50), - (datetime.datetime(2014, 1, 1, 11, 54, 45), 4), - (datetime.datetime(2014, 1, 1, 11, 56, 49), 4), - (datetime.datetime(2014, 1, 1, 11, 57, 22), 6), - (datetime.datetime(2014, 1, 1, 11, 58, 22), 5), - (datetime.datetime(2014, 1, 1, 12, 1, 4), 4), - (datetime.datetime(2014, 1, 1, 12, 1, 9), 7), - (datetime.datetime(2014, 1, 1, 12, 2, 1), 15), - (datetime.datetime(2014, 1, 1, 12, 2, 12), 1), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 4, 9), 7), - (datetime.datetime(2014, 1, 1, 12, 5, 1), 15), - (datetime.datetime(2014, 1, 1, 12, 5, 12), 1), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 3), + (datetime64(2014, 1, 1, 11, 46, 4), 4), + (datetime64(2014, 1, 1, 11, 47, 34), 8), + (datetime64(2014, 1, 1, 11, 50, 54), 50), + (datetime64(2014, 1, 1, 11, 54, 45), 4), + (datetime64(2014, 1, 1, 11, 56, 49), 4), + (datetime64(2014, 1, 1, 11, 57, 22), 6), + (datetime64(2014, 1, 1, 11, 58, 22), 5), + (datetime64(2014, 1, 1, 12, 1, 4), 4), + (datetime64(2014, 1, 1, 12, 1, 9), 7), + (datetime64(2014, 1, 1, 12, 2, 1), 15), + (datetime64(2014, 1, 1, 12, 2, 12), 1), + (datetime64(2014, 1, 1, 12, 3, 0), 3), + (datetime64(2014, 1, 1, 12, 4, 9), 7), + (datetime64(2014, 1, 1, 12, 5, 1), 15), + (datetime64(2014, 1, 1, 12, 5, 12), 1), + (datetime64(2014, 1, 1, 12, 6, 0), 3), ], before_truncate_callback=ts1_update) tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 11, 46, 4), 6), - (datetime.datetime(2014, 1, 1, 11, 47, 34), 5), - (datetime.datetime(2014, 1, 1, 11, 50, 54), 51), - (datetime.datetime(2014, 1, 1, 11, 54, 45), 5), - (datetime.datetime(2014, 1, 1, 11, 56, 49), 5), - (datetime.datetime(2014, 1, 1, 11, 57, 22), 7), - (datetime.datetime(2014, 1, 1, 11, 58, 22), 5), - (datetime.datetime(2014, 1, 1, 12, 1, 4), 5), - (datetime.datetime(2014, 1, 1, 12, 1, 9), 8), - (datetime.datetime(2014, 1, 1, 12, 2, 1), 10), - (datetime.datetime(2014, 1, 1, 12, 2, 12), 2), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 4, 9), 4), - (datetime.datetime(2014, 1, 1, 12, 5, 1), 10), - (datetime.datetime(2014, 1, 1, 12, 5, 12), 1), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 1), + (datetime64(2014, 1, 1, 11, 46, 4), 6), + (datetime64(2014, 1, 1, 11, 47, 34), 5), + (datetime64(2014, 1, 1, 11, 50, 54), 51), + (datetime64(2014, 1, 1, 11, 54, 45), 5), + (datetime64(2014, 1, 1, 11, 56, 49), 5), + (datetime64(2014, 1, 1, 11, 57, 22), 7), + (datetime64(2014, 1, 1, 11, 58, 22), 5), + (datetime64(2014, 1, 1, 12, 1, 4), 5), + (datetime64(2014, 1, 1, 12, 1, 9), 8), + (datetime64(2014, 1, 1, 12, 2, 1), 10), + (datetime64(2014, 1, 1, 12, 2, 12), 2), + (datetime64(2014, 1, 1, 12, 3, 0), 6), + (datetime64(2014, 1, 1, 12, 4, 9), 4), + (datetime64(2014, 1, 1, 12, 5, 1), 10), + (datetime64(2014, 1, 1, 12, 5, 12), 1), + (datetime64(2014, 1, 1, 12, 6, 0), 1), ], before_truncate_callback=ts2_update) output = carbonara.AggregatedTimeSerie.aggregated( [tsc1['return'], tsc12['return'], tsc2['return'], tsc22['return']], 'mean') self.assertEqual([ - (datetime.datetime(2014, 1, 1, 11, 45), 300.0, 5.75), - (datetime.datetime(2014, 1, 1, 11, 50), 300.0, 27.5), - (datetime.datetime(2014, 1, 1, 11, 55), 300.0, 5.3333333333333339), - (datetime.datetime(2014, 1, 1, 12, 0), 300.0, 6.0), - (datetime.datetime(2014, 1, 1, 12, 5), 300.0, 5.1666666666666661), - (datetime.datetime(2014, 1, 1, 11, 54), 60.0, 4.5), - (datetime.datetime(2014, 1, 1, 11, 56), 60.0, 4.5), - (datetime.datetime(2014, 1, 1, 11, 57), 60.0, 6.5), - (datetime.datetime(2014, 1, 1, 11, 58), 60.0, 5.0), - (datetime.datetime(2014, 1, 1, 12, 1), 60.0, 6.0), - (datetime.datetime(2014, 1, 1, 12, 2), 60.0, 7.0), - (datetime.datetime(2014, 1, 1, 12, 3), 60.0, 4.5), - (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 5.5), - (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 6.75), - (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 2.0), + (datetime.datetime(2014, 1, 1, 11, 45), + numpy.timedelta64(300, 's'), 5.75), + (datetime.datetime(2014, 1, 1, 11, 50), + numpy.timedelta64(300, 's'), 27.5), + (datetime.datetime(2014, 1, 1, 11, 55), + numpy.timedelta64(300, 's'), 5.3333333333333339), + (datetime.datetime(2014, 1, 1, 12, 0), + numpy.timedelta64(300, 's'), 6.0), + (datetime.datetime(2014, 1, 1, 12, 5), + numpy.timedelta64(300, 's'), 5.1666666666666661), + (datetime.datetime(2014, 1, 1, 11, 54), + numpy.timedelta64(60, 's'), 4.5), + (datetime.datetime(2014, 1, 1, 11, 56), + numpy.timedelta64(60, 's'), 4.5), + (datetime.datetime(2014, 1, 1, 11, 57), + numpy.timedelta64(60, 's'), 6.5), + (datetime.datetime(2014, 1, 1, 11, 58), + numpy.timedelta64(60, 's'), 5.0), + (datetime.datetime(2014, 1, 1, 12, 1), + numpy.timedelta64(60, 's'), 6.0), + (datetime.datetime(2014, 1, 1, 12, 2), + numpy.timedelta64(60, 's'), 7.0), + (datetime.datetime(2014, 1, 1, 12, 3), + numpy.timedelta64(60, 's'), 4.5), + (datetime.datetime(2014, 1, 1, 12, 4), + numpy.timedelta64(60, 's'), 5.5), + (datetime.datetime(2014, 1, 1, 12, 5), + numpy.timedelta64(60, 's'), 6.75), + (datetime.datetime(2014, 1, 1, 12, 6), + numpy.timedelta64(60, 's'), 2.0), ], list(output)) def test_aggregated_partial_overlap(self): - tsc1 = {'sampling': 1, 'size': 86400, 'agg': 'mean'} + tsc1 = {'sampling': numpy.timedelta64(1, 's'), + 'size': 86400, 'agg': 'mean'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 1, 'size': 60, 'agg': 'mean'} + tsc2 = {'sampling': numpy.timedelta64(1, 's'), + 'size': 60, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values([ - (datetime.datetime(2015, 12, 3, 13, 19, 15), 1), - (datetime.datetime(2015, 12, 3, 13, 20, 15), 1), - (datetime.datetime(2015, 12, 3, 13, 21, 15), 1), - (datetime.datetime(2015, 12, 3, 13, 22, 15), 1), + (datetime64(2015, 12, 3, 13, 19, 15), 1), + (datetime64(2015, 12, 3, 13, 20, 15), 1), + (datetime64(2015, 12, 3, 13, 21, 15), 1), + (datetime64(2015, 12, 3, 13, 22, 15), 1), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) tsb2.set_values([ - (datetime.datetime(2015, 12, 3, 13, 21, 15), 10), - (datetime.datetime(2015, 12, 3, 13, 22, 15), 10), - (datetime.datetime(2015, 12, 3, 13, 23, 15), 10), - (datetime.datetime(2015, 12, 3, 13, 24, 15), 10), + (datetime64(2015, 12, 3, 13, 21, 15), 10), + (datetime64(2015, 12, 3, 13, 22, 15), 10), + (datetime64(2015, 12, 3, 13, 23, 15), 10), + (datetime64(2015, 12, 3, 13, 24, 15), 10), ], before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc2)) @@ -1145,16 +1242,16 @@ class TestAggregatedTimeSerie(base.BaseTestCase): [tsc1['return'], tsc2['return']], aggregation="sum") self.assertEqual([ - (datetime.datetime( + (datetime64( 2015, 12, 3, 13, 21, 15 - ), 1.0, 11.0), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 11.0), + (datetime64( 2015, 12, 3, 13, 22, 15 - ), 1.0, 11.0), + ), numpy.timedelta64(1, 's'), 11.0), ], list(output)) - dtfrom = datetime.datetime(2015, 12, 3, 13, 17, 0) - dtto = datetime.datetime(2015, 12, 3, 13, 25, 0) + dtfrom = datetime64(2015, 12, 3, 13, 17, 0) + dtto = datetime64(2015, 12, 3, 13, 25, 0) output = carbonara.AggregatedTimeSerie.aggregated( [tsc1['return'], tsc2['return']], @@ -1162,24 +1259,24 @@ class TestAggregatedTimeSerie(base.BaseTestCase): aggregation="sum", needed_percent_of_overlap=0) self.assertEqual([ - (datetime.datetime( + (datetime64( 2015, 12, 3, 13, 19, 15 - ), 1.0, 1.0), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 1.0), + (datetime64( 2015, 12, 3, 13, 20, 15 - ), 1.0, 1.0), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 1.0), + (datetime64( 2015, 12, 3, 13, 21, 15 - ), 1.0, 11.0), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 11.0), + (datetime64( 2015, 12, 3, 13, 22, 15 - ), 1.0, 11.0), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 11.0), + (datetime64( 2015, 12, 3, 13, 23, 15 - ), 1.0, 10.0), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 10.0), + (datetime64( 2015, 12, 3, 13, 24, 15 - ), 1.0, 10.0), + ), numpy.timedelta64(1, 's'), 10.0), ], list(output)) # By default we require 100% of point that overlap @@ -1199,18 +1296,18 @@ class TestAggregatedTimeSerie(base.BaseTestCase): aggregation="sum", needed_percent_of_overlap=50.0) self.assertEqual([ - (datetime.datetime( + (datetime64( 2015, 12, 3, 13, 19, 15 - ), 1.0, 1.0), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 1.0), + (datetime64( 2015, 12, 3, 13, 20, 15 - ), 1.0, 1.0), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 1.0), + (datetime64( 2015, 12, 3, 13, 21, 15 - ), 1.0, 11.0), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 11.0), + (datetime64( 2015, 12, 3, 13, 22, 15 - ), 1.0, 11.0), + ), numpy.timedelta64(1, 's'), 11.0), ], list(output)) output = carbonara.AggregatedTimeSerie.aggregated( @@ -1218,57 +1315,54 @@ class TestAggregatedTimeSerie(base.BaseTestCase): aggregation="sum", needed_percent_of_overlap=50.0) self.assertEqual([ - (datetime.datetime( + (datetime64( 2015, 12, 3, 13, 21, 15 - ), 1.0, 11.0), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 11.0), + (datetime64( 2015, 12, 3, 13, 22, 15 - ), 1.0, 11.0), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 11.0), + (datetime64( 2015, 12, 3, 13, 23, 15 - ), 1.0, 10.0), - (datetime.datetime( + ), numpy.timedelta64(1, 's'), 10.0), + (datetime64( 2015, 12, 3, 13, 24, 15 - ), 1.0, 10.0), + ), numpy.timedelta64(1, 's'), 10.0), ], list(output)) def test_split_key(self): self.assertEqual( - datetime.datetime(2014, 10, 7), + numpy.datetime64("2014-10-07"), carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600).as_datetime()) + numpy.datetime64("2015-01-01T15:03"), + numpy.timedelta64(3600, 's'))) self.assertEqual( - datetime.datetime(2014, 12, 31, 18), + numpy.datetime64("2014-12-31 18:00"), carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 58).as_datetime()) - self.assertEqual( - 1420048800.0, - float(carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 58))) + numpy.datetime64("2015-01-01 15:03:58"), + numpy.timedelta64(58, 's'))) key = carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600) + numpy.datetime64("2015-01-01 15:03"), + numpy.timedelta64(3600, 's')) - self.assertGreater(key, pandas.Timestamp(0)) + self.assertGreater(key, numpy.datetime64("1970")) - self.assertGreaterEqual(key, pandas.Timestamp(0)) + self.assertGreaterEqual(key, numpy.datetime64("1970")) def test_split_key_next(self): self.assertEqual( - datetime.datetime(2015, 3, 6), + numpy.datetime64("2015-03-06"), next(carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600)).as_datetime()) + numpy.datetime64("2015-01-01 15:03"), + numpy.timedelta64(3600, 's')))) self.assertEqual( - datetime.datetime(2015, 8, 3), + numpy.datetime64("2015-08-03"), next(next(carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600))).as_datetime()) - self.assertEqual( - 113529600000.0, - float(next(carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600 * 24 * 365)))) + numpy.datetime64("2015-01-01T15:03"), + numpy.timedelta64(3600, 's'))))) def test_split(self): - sampling = 5 + sampling = numpy.timedelta64(5, 's') points = 100000 ts = carbonara.TimeSerie.from_data( timestamps=map(datetime.datetime.utcfromtimestamp, @@ -1279,19 +1373,19 @@ class TestAggregatedTimeSerie(base.BaseTestCase): grouped_points = list(agg.split()) self.assertEqual( - math.ceil((points / float(sampling)) + math.ceil((points / sampling.astype(float)) / carbonara.SplitKey.POINTS_PER_SPLIT), len(grouped_points)) self.assertEqual("0.0", str(carbonara.SplitKey(grouped_points[0][0], 0))) # 3600 × 5s = 5 hours - self.assertEqual(datetime.datetime(1970, 1, 1, 5), - grouped_points[1][0].as_datetime()) + self.assertEqual(datetime64(1970, 1, 1, 5), + grouped_points[1][0]) self.assertEqual(carbonara.SplitKey.POINTS_PER_SPLIT, len(grouped_points[0][1])) def test_from_timeseries(self): - sampling = 5 + sampling = numpy.timedelta64(5, 's') points = 100000 ts = carbonara.TimeSerie.from_data( timestamps=map(datetime.datetime.utcfromtimestamp, @@ -1310,16 +1404,16 @@ class TestAggregatedTimeSerie(base.BaseTestCase): def test_resample(self): ts = carbonara.TimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9), - datetime.datetime(2014, 1, 1, 12, 0, 11), - datetime.datetime(2014, 1, 1, 12, 0, 12)], + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9), + datetime64(2014, 1, 1, 12, 0, 11), + datetime64(2014, 1, 1, 12, 0, 12)], [3, 5, 6, 2, 4]) - agg_ts = self._resample(ts, 5, 'mean') + agg_ts = self._resample(ts, numpy.timedelta64(5, 's'), 'mean') self.assertEqual(3, len(agg_ts)) - agg_ts = agg_ts.resample(10) + agg_ts = agg_ts.resample(numpy.timedelta64(10, 's')) self.assertEqual(2, len(agg_ts)) self.assertEqual(5, agg_ts[0]) self.assertEqual(3, agg_ts[1]) diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index c027dc06..37388ba0 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -18,6 +18,7 @@ import operator import uuid import mock +import numpy from gnocchi import archive_policy from gnocchi import indexer @@ -55,9 +56,15 @@ class TestIndexerDriver(tests_base.TestCase): 'aggregation_methods': set(self.conf.archive_policy.default_aggregation_methods), 'definition': [ - {u'granularity': 300, u'points': 12, u'timespan': 3600}, - {u'granularity': 3600, u'points': 24, u'timespan': 86400}, - {u'granularity': 86400, u'points': 30, u'timespan': 2592000}], + {u'granularity': numpy.timedelta64(5, 'm'), + u'points': 12, + u'timespan': numpy.timedelta64(3600, 's')}, + {u'granularity': numpy.timedelta64(3600, 's'), + u'points': 24, + u'timespan': numpy.timedelta64(86400, 's')}, + {u'granularity': numpy.timedelta64(86400, 's'), + u'points': 30, + u'timespan': numpy.timedelta64(2592000, 's')}], 'name': u'low'}, dict(ap)) def test_update_archive_policy(self): @@ -88,9 +95,15 @@ class TestIndexerDriver(tests_base.TestCase): 'aggregation_methods': set(self.conf.archive_policy.default_aggregation_methods), 'definition': [ - {u'granularity': 300, u'points': 6, u'timespan': 1800}, - {u'granularity': 3600, u'points': 24, u'timespan': 86400}, - {u'granularity': 86400, u'points': 30, u'timespan': 2592000}], + {u'granularity': numpy.timedelta64(300, 's'), + u'points': 6, + u'timespan': numpy.timedelta64(1800, 's')}, + {u'granularity': numpy.timedelta64(3600, 's'), + u'points': 24, + u'timespan': numpy.timedelta64(86400, 's')}, + {u'granularity': numpy.timedelta64(86400, 's'), + u'points': 30, + u'timespan': numpy.timedelta64(2592000, 's')}], 'name': apname}, dict(ap)) ap = self.index.update_archive_policy( apname, [archive_policy.ArchivePolicyItem(granularity=300, @@ -104,9 +117,15 @@ class TestIndexerDriver(tests_base.TestCase): 'aggregation_methods': set(self.conf.archive_policy.default_aggregation_methods), 'definition': [ - {u'granularity': 300, u'points': 12, u'timespan': 3600}, - {u'granularity': 3600, u'points': 24, u'timespan': 86400}, - {u'granularity': 86400, u'points': 30, u'timespan': 2592000}], + {u'granularity': numpy.timedelta64(300, 's'), + u'points': 12, + u'timespan': numpy.timedelta64(3600, 's')}, + {u'granularity': numpy.timedelta64(3600, 's'), + u'points': 24, + u'timespan': numpy.timedelta64(86400, 's')}, + {u'granularity': numpy.timedelta64(86400, 's'), + u'points': 30, + u'timespan': numpy.timedelta64(2592000, 's')}], 'name': apname}, dict(ap)) def test_delete_archive_policy(self): diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index 0fe8e41f..6f292146 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -17,6 +17,7 @@ import uuid import mock +import numpy from gnocchi import indexer from gnocchi import statsd @@ -71,9 +72,15 @@ class TestStatsd(tests_base.TestCase): measures = self.storage.get_measures(metric) self.assertEqual([ - (utils.datetime_utc(2015, 1, 7), 86400.0, 1.0), - (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.0), - (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0) + (utils.datetime_utc(2015, 1, 7), + numpy.timedelta64(1, 'D'), + 1.0), + (utils.datetime_utc(2015, 1, 7, 13), + numpy.timedelta64(1, 'h'), + 1.0), + (utils.datetime_utc(2015, 1, 7, 13, 58), + numpy.timedelta64(1, 'm'), + 1.0) ], measures) utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37) @@ -92,10 +99,18 @@ class TestStatsd(tests_base.TestCase): measures = self.storage.get_measures(metric) self.assertEqual([ - (utils.datetime_utc(2015, 1, 7), 86400.0, 1.5), - (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.5), - (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0), - (utils.datetime_utc(2015, 1, 7, 13, 59), 60.0, 2.0) + (utils.datetime_utc(2015, 1, 7), + numpy.timedelta64(1, 'D'), + 1.5), + (utils.datetime_utc(2015, 1, 7, 13), + numpy.timedelta64(1, 'h'), + 1.5), + (utils.datetime_utc(2015, 1, 7, 13, 58), + numpy.timedelta64(1, 'm'), + 1.0), + (utils.datetime_utc(2015, 1, 7, 13, 59), + numpy.timedelta64(1, 'm'), + 2.0) ], measures) def test_gauge(self): @@ -126,9 +141,15 @@ class TestStatsd(tests_base.TestCase): measures = self.storage.get_measures(metric) self.assertEqual([ - (utils.datetime_utc(2015, 1, 7), 86400.0, 1.0), - (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.0), - (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0)], measures) + (utils.datetime_utc(2015, 1, 7), + numpy.timedelta64(1, 'D'), + 1.0), + (utils.datetime_utc(2015, 1, 7, 13), + numpy.timedelta64(1, 'h'), + 1.0), + (utils.datetime_utc(2015, 1, 7, 13, 58), + numpy.timedelta64(1, 'm'), + 1.0)], measures) utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37) self.server.datagram_received( @@ -145,10 +166,18 @@ class TestStatsd(tests_base.TestCase): measures = self.storage.get_measures(metric) self.assertEqual([ - (utils.datetime_utc(2015, 1, 7), 86400.0, 28), - (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 28), - (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0), - (utils.datetime_utc(2015, 1, 7, 13, 59), 60.0, 55.0)], measures) + (utils.datetime_utc(2015, 1, 7), + numpy.timedelta64(1, 'D'), + 28), + (utils.datetime_utc(2015, 1, 7, 13), + numpy.timedelta64(1, 'h'), + 28), + (utils.datetime_utc(2015, 1, 7, 13, 58), + numpy.timedelta64(1, 'm'), + 1.0), + (utils.datetime_utc(2015, 1, 7, 13, 59), + numpy.timedelta64(1, 'm'), + 55.0)], measures) class TestStatsdArchivePolicyRule(TestStatsd): diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index db42983d..67c0a65d 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -16,8 +16,8 @@ import datetime import uuid -import iso8601 import mock +import numpy import six.moves from gnocchi import archive_policy @@ -35,6 +35,10 @@ from gnocchi.tests import utils as tests_utils from gnocchi import utils +def datetime64(*args): + return numpy.datetime64(datetime.datetime(*args)) + + class TestStorageDriver(tests_base.TestCase): def setUp(self): super(TestStorageDriver, self).setUp() @@ -95,9 +99,12 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() m = self.storage.get_measures(self.metric) - self.assertIn((utils.datetime_utc(2014, 1, 1), 86400.0, 1), m) - self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 3600.0, 1), m) - self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), m) + self.assertIn((utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 1), m) + self.assertIn((utils.datetime_utc(2014, 1, 1, 13), + numpy.timedelta64(1, 'h'), 1), m) + self.assertIn((utils.datetime_utc(2014, 1, 1, 13), + numpy.timedelta64(5, 'm'), 1), m) def test_aborted_initial_processing(self): self.incoming.add_measures(self.metric, [ @@ -115,9 +122,12 @@ class TestStorageDriver(tests_base.TestCase): self.assertFalse(LOG.error.called) m = self.storage.get_measures(self.metric) - self.assertIn((utils.datetime_utc(2014, 1, 1), 86400.0, 5.0), m) - self.assertIn((utils.datetime_utc(2014, 1, 1, 12), 3600.0, 5.0), m) - self.assertIn((utils.datetime_utc(2014, 1, 1, 12), 300.0, 5.0), m) + self.assertIn((utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 5.0), m) + self.assertIn((utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 5.0), m) + self.assertIn((utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(5, 'm'), 5.0), m) def test_list_metric_with_measures_to_process(self): metrics = tests_utils.list_all_incoming_metrics(self.incoming) @@ -231,7 +241,7 @@ class TestStorageDriver(tests_base.TestCase): args = call[1] if (args[0] == m_sql and args[2] == 'mean' - and args[1].sampling == 60.0): + and args[1].sampling == numpy.timedelta64(1, 'm')): count += 1 self.assertEqual(1, count) @@ -265,11 +275,16 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(5, 'm'), 69.0), + (utils.datetime_utc(2014, 1, 1, 12, 5), + numpy.timedelta64(5, 'm'), 23.0), + (utils.datetime_utc(2014, 1, 1, 12, 10), + numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures(self.metric)) # One year later… @@ -279,21 +294,31 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2015, 1, 1), 86400.0, 69), - (utils.datetime_utc(2015, 1, 1, 12), 3600.0, 69), - (utils.datetime_utc(2015, 1, 1, 12), 300.0, 69), + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 39.75), + (utils.datetime_utc(2015, 1, 1), + numpy.timedelta64(1, 'D'), 69), + (utils.datetime_utc(2015, 1, 1, 12), + numpy.timedelta64(1, 'h'), 69), + (utils.datetime_utc(2015, 1, 1, 12), + numpy.timedelta64(5, 'm'), 69), ], self.storage.get_measures(self.metric)) - self.assertEqual({carbonara.SplitKey("1244160000.0", 86400)}, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 86400.0)) - self.assertEqual({carbonara.SplitKey("1412640000.0", 3600)}, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 3600.0)) - self.assertEqual({carbonara.SplitKey("1419120000.0", 300)}, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 300.0)) + self.assertEqual({ + carbonara.SplitKey(numpy.datetime64(1244160000, 's'), + numpy.timedelta64(1, 'D')), + }, self.storage._list_split_keys_for_metric( + self.metric, "mean", numpy.timedelta64(1, 'D'))) + self.assertEqual({ + carbonara.SplitKey(numpy.datetime64(1412640000, 's'), + numpy.timedelta64(1, 'h')), + }, self.storage._list_split_keys_for_metric( + self.metric, "mean", numpy.timedelta64(1, 'h'))) + self.assertEqual({ + carbonara.SplitKey(numpy.datetime64(1419120000, 's'), + numpy.timedelta64(5, 'm')), + }, self.storage._list_split_keys_for_metric( + self.metric, "mean", numpy.timedelta64(5, 'm'))) def test_rewrite_measures(self): # Create an archive policy that spans on several splits. Each split @@ -315,10 +340,14 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.assertEqual({ - carbonara.SplitKey(1451520000.0, 60), - carbonara.SplitKey(1451736000.0, 60), - carbonara.SplitKey(1451952000.0, 60), - }, self.storage._list_split_keys_for_metric(self.metric, "mean", 60.0)) + carbonara.SplitKey(numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm')), + }, self.storage._list_split_keys_for_metric( + self.metric, "mean", numpy.timedelta64(1, 'm'))) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue @@ -326,22 +355,36 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull = self.assertFalse data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451520000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm'), + ), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451736000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451736000, 's'), + numpy.timedelta64(60, 's'), + ), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451952000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451952000, 's'), + numpy.timedelta64(60, 's'), + ), "mean") assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) self.assertEqual([ - (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), - (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), - (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), - (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), - ], self.storage.get_measures(self.metric, granularity=60.0)) + (utils.datetime_utc(2016, 1, 1, 12), + numpy.timedelta64(1, 'm'), 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), + numpy.timedelta64(1, 'm'), 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), + numpy.timedelta64(1, 'm'), 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), + numpy.timedelta64(1, 'm'), 44), + ], self.storage.get_measures(self.metric, + granularity=numpy.timedelta64(1, 'm'))) # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move @@ -354,34 +397,58 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.assertEqual({ - carbonara.SplitKey(1452384000.0, 60), - carbonara.SplitKey(1451736000.0, 60), - carbonara.SplitKey(1451520000.0, 60), - carbonara.SplitKey(1451952000.0, 60), - }, self.storage._list_split_keys_for_metric(self.metric, "mean", 60.0)) + carbonara.SplitKey(numpy.datetime64(1452384000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm')), + }, self.storage._list_split_keys_for_metric( + self.metric, "mean", numpy.timedelta64(1, 'm'))) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451520000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(60, 's'), + ), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451736000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451736000, 's'), + numpy.timedelta64(60, 's'), + ), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451952000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm'), + ), "mean") # Now this one is compressed because it has been rewritten! self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1452384000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1452384000, 's'), + numpy.timedelta64(60, 's'), + ), "mean") assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) self.assertEqual([ - (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), - (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), - (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), - (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), - (utils.datetime_utc(2016, 1, 10, 16, 18), 60.0, 45), - (utils.datetime_utc(2016, 1, 10, 17, 12), 60.0, 46), - ], self.storage.get_measures(self.metric, granularity=60.0)) + (utils.datetime_utc(2016, 1, 1, 12), + numpy.timedelta64(1, 'm'), 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), + numpy.timedelta64(1, 'm'), 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), + numpy.timedelta64(1, 'm'), 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), + numpy.timedelta64(1, 'm'), 44), + (utils.datetime_utc(2016, 1, 10, 16, 18), + numpy.timedelta64(1, 'm'), 45), + (utils.datetime_utc(2016, 1, 10, 17, 12), + numpy.timedelta64(1, 'm'), 46), + ], self.storage.get_measures(self.metric, + granularity=numpy.timedelta64(1, 'm'))) def test_rewrite_measures_oldest_mutable_timestamp_eq_next_key(self): """See LP#1655422""" @@ -404,10 +471,14 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.assertEqual({ - carbonara.SplitKey(1451520000.0, 60), - carbonara.SplitKey(1451736000.0, 60), - carbonara.SplitKey(1451952000.0, 60), - }, self.storage._list_split_keys_for_metric(self.metric, "mean", 60.0)) + carbonara.SplitKey(numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm')), + }, self.storage._list_split_keys_for_metric( + self.metric, "mean", numpy.timedelta64(1, 'm'))) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue @@ -415,22 +486,36 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull = self.assertFalse data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451520000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm'), + ), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451736000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm'), + ), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451952000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm') + ), "mean") assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) self.assertEqual([ - (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), - (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), - (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), - (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), - ], self.storage.get_measures(self.metric, granularity=60.0)) + (utils.datetime_utc(2016, 1, 1, 12), + numpy.timedelta64(1, 'm'), 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), + numpy.timedelta64(1, 'm'), 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), + numpy.timedelta64(1, 'm'), 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), + numpy.timedelta64(1, 'm'), 44), + ], self.storage.get_measures(self.metric, + granularity=numpy.timedelta64(60, 's'))) # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move @@ -445,33 +530,56 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.assertEqual({ - carbonara.SplitKey(1452384000.0, 60), - carbonara.SplitKey(1451736000.0, 60), - carbonara.SplitKey(1451520000.0, 60), - carbonara.SplitKey(1451952000.0, 60), - }, self.storage._list_split_keys_for_metric(self.metric, "mean", 60.0)) + carbonara.SplitKey(numpy.datetime64('2016-01-10T00:00:00'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64('2016-01-02T12:00:00'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64('2015-12-31T00:00:00'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64('2016-01-05T00:00:00'), + numpy.timedelta64(1, 'm')), + }, self.storage._list_split_keys_for_metric( + self.metric, "mean", numpy.timedelta64(1, 'm'))) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451520000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm'), + ), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451736000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm'), + ), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451952000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451952000, 's'), + numpy.timedelta64(60, 's') + ), "mean") # Now this one is compressed because it has been rewritten! self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1452384000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1452384000, 's'), + numpy.timedelta64(1, 'm'), + ), "mean") assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) self.assertEqual([ - (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), - (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), - (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), - (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), - (utils.datetime_utc(2016, 1, 10, 0, 12), 60.0, 45), - ], self.storage.get_measures(self.metric, granularity=60.0)) + (utils.datetime_utc(2016, 1, 1, 12), + numpy.timedelta64(1, 'm'), 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), + numpy.timedelta64(1, 'm'), 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), + numpy.timedelta64(1, 'm'), 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), + numpy.timedelta64(1, 'm'), 44), + (utils.datetime_utc(2016, 1, 10, 0, 12), + numpy.timedelta64(1, 'm'), 45), + ], self.storage.get_measures(self.metric, + granularity=numpy.timedelta64(60, 's'))) def test_rewrite_measures_corruption_missing_file(self): # Create an archive policy that spans on several splits. Each split @@ -493,10 +601,14 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.assertEqual({ - carbonara.SplitKey(1451520000.0, 60), - carbonara.SplitKey(1451736000.0, 60), - carbonara.SplitKey(1451952000.0, 60), - }, self.storage._list_split_keys_for_metric(self.metric, "mean", 60.0)) + carbonara.SplitKey(numpy.datetime64('2015-12-31T00:00:00'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64('2016-01-02T12:00:00'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64('2016-01-05T00:00:00'), + numpy.timedelta64(1, 'm')), + }, self.storage._list_split_keys_for_metric( + self.metric, "mean", numpy.timedelta64(1, 'm'))) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue @@ -504,27 +616,45 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull = self.assertFalse data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451520000.0, 60.0), "mean") + self.metric, + carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm'), + ), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451736000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm') + ), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451952000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm'), + ), "mean") assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) self.assertEqual([ - (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), - (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), - (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), - (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), - ], self.storage.get_measures(self.metric, granularity=60.0)) + (utils.datetime_utc(2016, 1, 1, 12), + numpy.timedelta64(1, 'm'), 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), + numpy.timedelta64(1, 'm'), 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), + numpy.timedelta64(1, 'm'), 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), + numpy.timedelta64(1, 'm'), 44), + ], self.storage.get_measures(self.metric, + granularity=numpy.timedelta64(60, 's'))) # Test what happens if we delete the latest split and then need to # compress it! self.storage._delete_metric_measures( - self.metric, carbonara.SplitKey(1451952000.0, 60.0), 'mean') + self.metric, carbonara.SplitKey( + numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm'), + ), 'mean') # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move @@ -556,10 +686,14 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.assertEqual({ - carbonara.SplitKey(1451520000.0, 60), - carbonara.SplitKey(1451736000.0, 60), - carbonara.SplitKey(1451952000.0, 60), - }, self.storage._list_split_keys_for_metric(self.metric, "mean", 60.0)) + carbonara.SplitKey(numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm')), + }, self.storage._list_split_keys_for_metric( + self.metric, "mean", numpy.timedelta64(1, 'm'))) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue @@ -567,26 +701,43 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull = self.assertFalse data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451520000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(60, 's'), + ), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451736000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm'), + ), "mean") self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey(1451952000.0, 60.0), "mean") + self.metric, carbonara.SplitKey( + numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm'), + ), "mean") assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) self.assertEqual([ - (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), - (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), - (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), - (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), - ], self.storage.get_measures(self.metric, granularity=60.0)) + (utils.datetime_utc(2016, 1, 1, 12), + numpy.timedelta64(1, 'm'), 69), + (utils.datetime_utc(2016, 1, 2, 13, 7), + numpy.timedelta64(1, 'm'), 42), + (utils.datetime_utc(2016, 1, 4, 14, 9), + numpy.timedelta64(1, 'm'), 4), + (utils.datetime_utc(2016, 1, 6, 15, 12), + numpy.timedelta64(1, 'm'), 44), + ], self.storage.get_measures(self.metric, + granularity=numpy.timedelta64(1, 'm'))) # Test what happens if we write garbage self.storage._store_metric_measures( - self.metric, carbonara.SplitKey(1451952000.0, 60.0), "mean", + self.metric, carbonara.SplitKey( + numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm'), + ), "mean", b"oh really?") # Now store brand new points that should force a rewrite of one of the @@ -607,10 +758,14 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 55.5), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 55.5), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69), - (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 42.0), + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 55.5), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 55.5), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(5, 'm'), 69), + (utils.datetime_utc(2014, 1, 1, 12, 5), + numpy.timedelta64(5, 'm'), 42.0), ], self.storage.get_measures(self.metric)) self.incoming.add_measures(self.metric, [ @@ -620,27 +775,42 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(5, 'm'), 69.0), + (utils.datetime_utc(2014, 1, 1, 12, 5), + numpy.timedelta64(5, 'm'), 23.0), + (utils.datetime_utc(2014, 1, 1, 12, 10), + numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures(self.metric)) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 69), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 69.0), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 42.0), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 69), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 69.0), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(5, 'm'), 69.0), + (utils.datetime_utc(2014, 1, 1, 12, 5), + numpy.timedelta64(5, 'm'), 42.0), + (utils.datetime_utc(2014, 1, 1, 12, 10), + numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures(self.metric, aggregation='max')) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 4), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 4), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 4.0), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 4), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 4), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(5, 'm'), 69.0), + (utils.datetime_utc(2014, 1, 1, 12, 5), + numpy.timedelta64(5, 'm'), 4.0), + (utils.datetime_utc(2014, 1, 1, 12, 10), + numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures(self.metric, aggregation='min')) def test_add_and_get_measures(self): @@ -653,77 +823,100 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(5, 'm'), 69.0), + (utils.datetime_utc(2014, 1, 1, 12, 5), + numpy.timedelta64(5, 'm'), 23.0), + (utils.datetime_utc(2014, 1, 1, 12, 10), + numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures(self.metric)) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12, 10), + numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures( self.metric, - from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 0))) + from_timestamp=datetime64(2014, 1, 1, 12, 10, 0))) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(5, 'm'), 69.0), + (utils.datetime_utc(2014, 1, 1, 12, 5), + numpy.timedelta64(5, 'm'), 23.0), ], self.storage.get_measures( self.metric, - to_timestamp=datetime.datetime(2014, 1, 1, 12, 6, 0))) + to_timestamp=datetime64(2014, 1, 1, 12, 6, 0))) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12, 10), + numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures( self.metric, - to_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10), - from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10))) + to_timestamp=datetime64(2014, 1, 1, 12, 10, 10), + from_timestamp=datetime64(2014, 1, 1, 12, 10, 10))) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(5, 'm'), 69.0), ], self.storage.get_measures( self.metric, - from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2))) + from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), + to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(5, 'm'), 69.0), ], self.storage.get_measures( self.metric, - from_timestamp=iso8601.parse_date("2014-1-1 13:00:00+01:00"), - to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2))) + from_timestamp=datetime64(2014, 1, 1, 12), + to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 39.75), ], self.storage.get_measures( self.metric, - from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2), - granularity=3600.0)) + from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), + to_timestamp=datetime64(2014, 1, 1, 12, 0, 2), + granularity=numpy.timedelta64(1, 'h'))) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(5, 'm'), 69.0), ], self.storage.get_measures( self.metric, - from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2), - granularity=300.0)) + from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), + to_timestamp=datetime64(2014, 1, 1, 12, 0, 2), + granularity=numpy.timedelta64(5, 'm'))) self.assertRaises(storage.GranularityDoesNotExist, self.storage.get_measures, self.metric, - granularity=42) + granularity=numpy.timedelta64(42, 's')) def test_get_cross_metric_measures_unknown_metric(self): self.assertEqual([], @@ -782,7 +975,7 @@ class TestStorageDriver(tests_base.TestCase): self.assertRaises(storage.GranularityDoesNotExist, self.storage.get_cross_metric_measures, [self.metric, metric2], - granularity=12345.456) + granularity=numpy.timedelta64(12345456, 'ms')) def test_add_and_get_cross_metric_measures_different_archives(self): metric2 = storage.Metric(uuid.uuid4(), @@ -822,71 +1015,94 @@ class TestStorageDriver(tests_base.TestCase): values = self.storage.get_cross_metric_measures([self.metric, metric2]) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), - (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 12.5), - (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0) + (utils.datetime_utc(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(5, 'm'), 39.0), + (utils.datetime_utc(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(5, 'm'), 12.5), + (utils.datetime_utc(2014, 1, 1, 12, 10, 0), + numpy.timedelta64(5, 'm'), 24.0) ], values) values = self.storage.get_cross_metric_measures([self.metric, metric2], reaggregation='max') self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 69), - (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 23), - (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 44) + (utils.datetime_utc(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(5, 'm'), 69), + (utils.datetime_utc(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(5, 'm'), 23), + (utils.datetime_utc(2014, 1, 1, 12, 10, 0), + numpy.timedelta64(5, 'm'), 44) ], values) values = self.storage.get_cross_metric_measures( [self.metric, metric2], - from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 0)) + from_timestamp=datetime64(2014, 1, 1, 12, 10, 0)) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0), + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 10, 0), + numpy.timedelta64(5, 'm'), 24.0), ], values) values = self.storage.get_cross_metric_measures( [self.metric, metric2], - to_timestamp=datetime.datetime(2014, 1, 1, 12, 5, 0)) + to_timestamp=datetime64(2014, 1, 1, 12, 5, 0)) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), + (utils.datetime_utc(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(5, 'm'), 39.0), ], values) values = self.storage.get_cross_metric_measures( [self.metric, metric2], - from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10), - to_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10)) + from_timestamp=datetime64(2014, 1, 1, 12, 10, 10), + to_timestamp=datetime64(2014, 1, 1, 12, 10, 10)) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 24.0), + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 10), + numpy.timedelta64(5, 'm'), 24.0), ], values) values = self.storage.get_cross_metric_measures( [self.metric, metric2], - from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 1)) + from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), + to_timestamp=datetime64(2014, 1, 1, 12, 0, 1)) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(5, 'm'), 39.0), ], values) values = self.storage.get_cross_metric_measures( [self.metric, metric2], - from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 1), - granularity=300.0) + from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), + to_timestamp=datetime64(2014, 1, 1, 12, 0, 1), + granularity=numpy.timedelta64(5, 'm')) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(5, 'm'), 39.0), ], values) def test_add_and_get_cross_metric_measures_with_holes(self): @@ -908,11 +1124,16 @@ class TestStorageDriver(tests_base.TestCase): values = self.storage.get_cross_metric_measures([self.metric, metric2]) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 18.875), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 18.875), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), - (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 11.0), - (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 22.0) + (utils.datetime_utc(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 18.875), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 18.875), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(5, 'm'), 39.0), + (utils.datetime_utc(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(5, 'm'), 11.0), + (utils.datetime_utc(2014, 1, 1, 12, 10, 0), + numpy.timedelta64(5, 'm'), 22.0) ], values) def test_search_value(self): @@ -936,10 +1157,14 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual( {metric2: [], self.metric: [ - (utils.datetime_utc(2014, 1, 1), 86400, 33), - (utils.datetime_utc(2014, 1, 1, 12), 3600, 33), - (utils.datetime_utc(2014, 1, 1, 12), 300, 69), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300, 42)]}, + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 33), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 33), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(5, 'm'), 69), + (utils.datetime_utc(2014, 1, 1, 12, 10), + numpy.timedelta64(5, 'm'), 42)]}, self.storage.search_value( [metric2, self.metric], {u"≥": 30})) @@ -965,9 +1190,12 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing([str(m.id)]) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), - (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), - (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(5, 's'), 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 5), + numpy.timedelta64(5, 's'), 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 10), + numpy.timedelta64(5, 's'), 1.0), ], self.storage.get_measures(m)) # expand to more points self.index.update_archive_policy( @@ -978,18 +1206,24 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing([str(m.id)]) self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), - (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), - (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), - (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(5, 's'), 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 5), + numpy.timedelta64(5, 's'), 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 10), + numpy.timedelta64(5, 's'), 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 15), + numpy.timedelta64(5, 's'), 1.0), ], self.storage.get_measures(m)) # shrink timespan self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)]) m = self.index.list_metrics(ids=[m.id])[0] self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), - (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 10), + numpy.timedelta64(5, 's'), 1.0), + (utils.datetime_utc(2014, 1, 1, 12, 0, 15), + numpy.timedelta64(5, 's'), 1.0), ], self.storage.get_measures(m)) def test_resample_no_metric(self): @@ -997,10 +1231,10 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual([], self.storage.get_measures( self.metric, - utils.datetime_utc(2014, 1, 1), - utils.datetime_utc(2015, 1, 1), - granularity=300, - resample=3600)) + datetime64(2014, 1, 1), + datetime64(2015, 1, 1), + granularity=numpy.timedelta64(300, 's'), + resample=numpy.timedelta64(1, 'h'))) class TestMeasureQuery(tests_base.TestCase): diff --git a/gnocchi/utils.py b/gnocchi/utils.py index a0543b4e..9e014abb 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -140,12 +140,21 @@ def to_timespan(value): seconds = float(value) except Exception: try: - seconds = pd.to_timedelta(value).total_seconds() + seconds = pd.to_timedelta(value).to_timedelta64() except Exception: raise ValueError("Unable to parse timespan") - if seconds <= 0: + else: + seconds = numpy.timedelta64(int(seconds * 10e8), 'ns') + if seconds <= numpy.timedelta64(0, 'ns'): raise ValueError("Timespan must be positive") - return datetime.timedelta(seconds=seconds) + return seconds + + +_ONE_SECOND = numpy.timedelta64(1, 's') + + +def timespan_total_seconds(td): + return td / _ONE_SECOND def utcnow(): -- GitLab From 17606c2ae511efa9183fb9fc3a6260ce4186a3f4 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 24 Jul 2017 17:00:00 +0200 Subject: [PATCH 0882/1483] logging: fix syslog output A typo makes the syslog output not working. This fixes that. --- gnocchi/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/service.py b/gnocchi/service.py index 5800faaa..33775a1e 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -62,7 +62,7 @@ def prepare_service(args=None, conf=None, if conf.use_syslog: outputs.append( - daiquiri.output.Syslog(facilty=conf.syslog_log_faciltity)) + daiquiri.output.Syslog(facility=conf.syslog_log_faciltity)) if conf.use_journal: outputs.append(daiquiri.output.Journal()) -- GitLab From 0c24755732b57aaf2519e57509945cb8766c7120 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 24 Jul 2017 17:00:00 +0200 Subject: [PATCH 0883/1483] logging: fix syslog output A typo makes the syslog output not working. This fixes that. --- gnocchi/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/service.py b/gnocchi/service.py index 985d2f88..3a14710f 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -61,7 +61,7 @@ def prepare_service(args=None, conf=None, if conf.use_syslog: outputs.append( - daiquiri.output.Syslog(facilty=conf.syslog_log_faciltity)) + daiquiri.output.Syslog(facility=conf.syslog_log_faciltity)) if conf.use_journal: outputs.append(daiquiri.output.Journal()) -- GitLab From 6ce858415c426bad119c496002199e003e970c7a Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 24 Jul 2017 15:13:27 +0000 Subject: [PATCH 0884/1483] use pandas index values pandas naturally stores index as np.array(dtype='datetime64[ns]') --- gnocchi/carbonara.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index b18ab11f..f71b419f 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -100,8 +100,7 @@ class GroupedTimeSeries(object): start_derive = start - granularity self._ts_for_derive = ts[start_derive:] - self.indexes = round_timestamp( - numpy.array(self._ts.index, dtype=numpy.datetime64), granularity) + self.indexes = round_timestamp(self._ts.index.values, granularity) self.tstamps, self.counts = numpy.unique(self.indexes, return_counts=True) @@ -192,7 +191,7 @@ class TimeSerie(object): def __init__(self, ts=None): if ts is None: - ts = pandas.Series() + ts = pandas.Series(index=numpy.array([], dtype='datetime64[ns]')) self.ts = ts @staticmethod @@ -554,9 +553,7 @@ class AggregatedTimeSerie(TimeSerie): # to iter the whole series. freq = self.sampling * SplitKey.POINTS_PER_SPLIT keys, counts = numpy.unique( - round_timestamp( - numpy.array(self.ts.index, dtype=numpy.datetime64), - freq), + round_timestamp(self.ts.index.values, freq), return_counts=True) start = 0 for key, count in six.moves.zip(keys, counts): -- GitLab From 6efb90a8fb74d621d44b211fa12512a7f3d8b16c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 24 Jul 2017 09:33:46 +0200 Subject: [PATCH 0885/1483] carbonara: remove useless calls to pandas.to_datetime Pandas handles numpy.datetime64 naturally. --- gnocchi/carbonara.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index f71b419f..bf4354e1 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -130,8 +130,7 @@ class GroupedTimeSeries(object): default=None) def _count(self): - timestamps = self.tstamps.astype('datetime64[ns]', copy=False) - return (self.counts, timestamps) + return (self.counts, self.tstamps) def count(self): return pandas.Series(*self._count()) @@ -141,14 +140,14 @@ class GroupedTimeSeries(object): cumcounts = numpy.cumsum(counts) - 1 values = self._ts.values[cumcounts] - return pandas.Series(values, pandas.to_datetime(timestamps)) + return pandas.Series(values, timestamps) def first(self): counts, timestamps = self._count() counts = numpy.insert(counts[:-1], 0, 0) cumcounts = numpy.cumsum(counts) values = self._ts.values[cumcounts] - return pandas.Series(values, pandas.to_datetime(timestamps)) + return pandas.Series(values, timestamps) def quantile(self, q): return self._scipy_aggregate(ndimage.labeled_comprehension, @@ -170,8 +169,7 @@ class GroupedTimeSeries(object): values = method(self._ts.values, self.indexes, tstamps, *args, **kwargs) - timestamps = tstamps.astype('datetime64[ns]', copy=False) - return pandas.Series(values, pandas.to_datetime(timestamps)) + return pandas.Series(values, tstamps) def derived(self): timestamps = self._ts_for_derive.index[1:] @@ -323,7 +321,7 @@ class BoundTimeSerie(TimeSerie): values = numpy.frombuffer(values_raw, dtype=' Date: Mon, 24 Jul 2017 18:21:41 +0200 Subject: [PATCH 0886/1483] carbonara: remove outdated comment The update method is gone. --- gnocchi/carbonara.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index bf4354e1..33f60b45 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -756,11 +756,7 @@ class AggregatedTimeSerie(TimeSerie): points) def merge(self, ts): - """Merge a timeserie into this one. - - This is equivalent to `update` but is faster as they are is no - resampling. Be careful on what you merge. - """ + """Merge a timeserie into this one.""" self.ts = self.ts.combine_first(ts.ts) @classmethod -- GitLab From 6385754bdcb9d5d3b860c708098e1bfb0aca39e0 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 13 Jul 2017 18:12:24 +0200 Subject: [PATCH 0887/1483] incoming: unserialize using numpy rather than Pandas --- gnocchi/carbonara.py | 14 ++++---------- gnocchi/incoming/_carbonara.py | 15 ++++++--------- 2 files changed, 10 insertions(+), 19 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 33f60b45..a8e2d518 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -225,14 +225,14 @@ class TimeSerie(object): @property def first(self): try: - return self.ts.index[0] + return self.ts.index[0].to_datetime64() except IndexError: return @property def last(self): try: - return self.ts.index[-1] + return self.ts.index[-1].to_datetime64() except IndexError: return @@ -328,8 +328,7 @@ class BoundTimeSerie(TimeSerie): def serialize(self): # NOTE(jd) Use a double delta encoding for timestamps - timestamps = numpy.insert(numpy.diff(self.ts.index), - 0, self.first.value) + timestamps = numpy.insert(numpy.diff(self.ts.index), 0, self.first) timestamps = timestamps.astype(' Date: Tue, 25 Jul 2017 09:34:49 +0200 Subject: [PATCH 0888/1483] carbonara: make sure all data are list and not just iterables --- gnocchi/carbonara.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index a8e2d518..099b209f 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -360,7 +360,7 @@ class BoundTimeSerie(TimeSerie): [random.randint(0, 20000) for x in six.moves.range(points)]), ("Small number random neg", [random.randint(-20000, 0) for x in six.moves.range(points)]), - ("Sin(x)", map(math.sin, six.moves.range(points))), + ("Sin(x)", list(map(math.sin, six.moves.range(points)))), ("random ", [random.random() for x in six.moves.range(points)]), ]: @@ -780,7 +780,7 @@ class AggregatedTimeSerie(TimeSerie): [random.randint(0, 20000) for x in six.moves.range(points)]), ("Small number random neg", [random.randint(-20000, 0) for x in six.moves.range(points)]), - ("Sin(x)", map(math.sin, six.moves.range(points))), + ("Sin(x)", list(map(math.sin, six.moves.range(points)))), ("random ", [random.random() for x in six.moves.range(points)]), ]: -- GitLab From 34f042834feea285e36333b21ac6c8ee241e0812 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 25 Jul 2017 16:31:15 +0000 Subject: [PATCH 0889/1483] simplify hashing we don't need math to build a hash --- gnocchi/carbonara.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 099b209f..037161a8 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -447,11 +447,8 @@ class SplitKey(object): return self def __hash__(self): - return hash( - str(datetime64_to_epoch(self.key)) - + - str(self.sampling / ONE_SECOND) - ) + return hash(str(self.key.astype('datetime64[ns]')) + + str(self.sampling.astype('timedelta64[ns]'))) def __lt__(self, other): if isinstance(other, SplitKey): -- GitLab From 7e77ce2503a0f9cd3729f78cf0e1b0596461c47a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 25 Jul 2017 08:54:29 +0200 Subject: [PATCH 0890/1483] carbonara: compute random timestamps once and sort them --- gnocchi/carbonara.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 037161a8..8f017c1a 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -252,6 +252,16 @@ class TimeSerie(object): # byte type returned. return memoryview(lz4.block.compress(payload)).tobytes() + @staticmethod + def _generate_random_timestamps(how_many, + now=numpy.datetime64("2015-04-03 23:11")): + return numpy.sort( + numpy.array( + [now + numpy.timedelta64( + i * random.randint(1000000, 10000000), 'us') + for i in six.moves.range(how_many)], + dtype="datetime64[ns]")) + class BoundTimeSerie(TimeSerie): def __init__(self, ts=None, block_size=None, back_window=0): @@ -340,7 +350,7 @@ class BoundTimeSerie(TimeSerie): points = SplitKey.POINTS_PER_SPLIT serialize_times = 50 - now = datetime.datetime(2015, 4, 3, 23, 11) + timestamps = cls._generate_random_timestamps(points) print(cls.__name__) print("=" * len(cls.__name__)) @@ -365,12 +375,7 @@ class BoundTimeSerie(TimeSerie): for x in six.moves.range(points)]), ]: print(title) - pts = pandas.Series(values, - [now + datetime.timedelta( - seconds=i * random.randint(1, 10), - microseconds=random.randint(1, 999999)) - for i in six.moves.range(points)]) - pts = pts.sort_index() + pts = pandas.Series(values, timestamps) ts = cls(ts=pts) t0 = time.time() for i in six.moves.range(serialize_times): @@ -757,7 +762,7 @@ class AggregatedTimeSerie(TimeSerie): sampling = 5 resample = numpy.timedelta64(35, 's') - now = datetime.datetime(2015, 4, 3, 23, 11) + timestamps = cls._generate_random_timestamps(points) print(cls.__name__) print("=" * len(cls.__name__)) @@ -783,10 +788,7 @@ class AggregatedTimeSerie(TimeSerie): ]: print(title) serialize_times = 50 - pts = pandas.Series(values, - [now + datetime.timedelta(seconds=i*sampling) - for i in six.moves.range(points)]) - pts = pts.sort_index() + pts = pandas.Series(values, timestamps) ts = cls(ts=pts, sampling=numpy.timedelta64(sampling, 's'), aggregation_method='mean') t0 = time.time() -- GitLab From 5b2375b1a055aa86e8fb51c969ca5eb6cdc69e73 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 26 Jul 2017 14:03:12 +0200 Subject: [PATCH 0891/1483] carbonara: speed up BoundTimeSerie.unserialize by leveraging frombuffer numpy.frombuffer can have offset/count kwargs, so no need to copy the uncompressed string around. --- gnocchi/carbonara.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 8f017c1a..6d36bb3a 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -321,14 +321,14 @@ class BoundTimeSerie(TimeSerie): nb_points = ( len(uncompressed) // cls._SERIALIZATION_TIMESTAMP_VALUE_LEN ) - timestamps_raw = uncompressed[ - :nb_points*cls._SERIALIZATION_TIMESTAMP_LEN] - timestamps = numpy.frombuffer(timestamps_raw, dtype=' Date: Tue, 25 Jul 2017 17:29:41 +0200 Subject: [PATCH 0892/1483] storage: use a namedtuple for Measure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This should be faster and consume less memory… and work in the same way! --- gnocchi/storage/__init__.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index fc2687c2..397fd19c 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -13,6 +13,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import collections import operator import daiquiri @@ -36,15 +37,7 @@ class SackLockTimeoutError(Exception): pass -class Measure(object): - def __init__(self, timestamp, value): - self.timestamp = timestamp - self.value = value - - def __iter__(self): - """Allow to transform measure to tuple.""" - yield self.timestamp - yield self.value +Measure = collections.namedtuple("Measure", ['timestamp', 'value']) class Metric(object): -- GitLab From c9cd8b19949379745affcc3f9b6dadce5486c465 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 25 Jul 2017 20:39:37 +0200 Subject: [PATCH 0893/1483] rest: return timestamp without conversion The JSON encoder knows how to format properly datetime objects. --- gnocchi/rest/__init__.py | 41 ++++++++++++++-------------------------- 1 file changed, 14 insertions(+), 27 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 1fa3c4b8..820ee6b7 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -454,18 +454,14 @@ class MetricController(rest.RestController): if aggregation in self.custom_agg: warnings.warn("moving_average aggregation is deprecated.", category=DeprecationWarning) - measures = self.custom_agg[aggregation].compute( + return self.custom_agg[aggregation].compute( pecan.request.storage, self.metric, start, stop, **param) - else: - measures = pecan.request.storage.get_measures( - self.metric, start, stop, aggregation, - utils.to_timespan(granularity) - if granularity is not None else None, - resample) - # Replace timestamp keys by their string versions - return [(timestamp.isoformat(), offset, v) - for timestamp, offset, v in measures] + return pecan.request.storage.get_measures( + self.metric, start, stop, aggregation, + utils.to_timespan(granularity) + if granularity is not None else None, + resample) except (storage.MetricDoesNotExist, storage.GranularityDoesNotExist, storage.AggregationDoesNotExist) as e: @@ -1549,20 +1545,15 @@ class MetricsMeasuresBatchController(rest.RestController): except ValueError as e: abort(400, e) - metric_batch = {} try: - for metric in metrics: - measures = pecan.request.storage.get_measures( - metric, start, stop, aggregation, granularity) - metric_batch[str(metric.id)] = [ - (timestamp.isoformat(), offset, v) - for timestamp, offset, v in measures] + return dict((str(metric.id), + pecan.request.storage.get_measures( + metric, start, stop, aggregation, granularity)) + for metric in metrics) except (storage.GranularityDoesNotExist, storage.AggregationDoesNotExist) as e: abort(404, e) - return metric_batch - class SearchController(object): resource = SearchResourceController() @@ -1716,16 +1707,12 @@ class AggregationController(rest.RestController): if number_of_metrics == 1: # NOTE(sileht): don't do the aggregation if we only have one # metric - measures = pecan.request.storage.get_measures( + return pecan.request.storage.get_measures( metrics[0], start, stop, aggregation, granularity, resample) - else: - measures = pecan.request.storage.get_cross_metric_measures( - metrics, start, stop, aggregation, - reaggregation, resample, granularity, needed_overlap, fill) - # Replace timestamp keys by their string versions - return [(timestamp.isoformat(), offset, v) - for timestamp, offset, v in measures] + return pecan.request.storage.get_cross_metric_measures( + metrics, start, stop, aggregation, + reaggregation, resample, granularity, needed_overlap, fill) except storage.MetricUnaggregatable as e: abort(400, ("One of the metrics being aggregated doesn't have " "matching granularity: %s") % str(e)) -- GitLab From 493da2f78630d3b45ed622f76d32d9633beb5037 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 26 Jul 2017 13:45:27 +0200 Subject: [PATCH 0894/1483] carbonara: speed up AggregatedTimeSerie.unserialize by using frombuffer `numpy.frombuffer` offers `count` and `offset` kwargs, so using them should be enough to avoid a string copy and make things way faster. --- gnocchi/carbonara.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 6d36bb3a..4182c1da 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -618,17 +618,15 @@ class AggregatedTimeSerie(TimeSerie): memoryview(data)[1:].tobytes()) nb_points = len(uncompressed) // cls.COMPRESSED_SERIAL_LEN - timestamps_raw = uncompressed[ - :nb_points*cls.COMPRESSED_TIMESPAMP_LEN] try: - y = numpy.frombuffer(timestamps_raw, dtype=' Date: Tue, 25 Jul 2017 21:06:02 +0200 Subject: [PATCH 0895/1483] tests: set logging level to debug when setting GNOCCHI_TEST_DEBUG Other than that the tests are only running using WARNING. --- gnocchi/service.py | 17 +++++++++-------- gnocchi/tests/base.py | 9 +++++++-- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/gnocchi/service.py b/gnocchi/service.py index 33775a1e..aa00eb6e 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -33,7 +33,7 @@ LOG = daiquiri.getLogger(__name__) def prepare_service(args=None, conf=None, default_config_files=None, - log_to_std=False): + log_to_std=False, logging_level=None): if conf is None: conf = cfg.ConfigOpts() opts.set_defaults() @@ -68,13 +68,14 @@ def prepare_service(args=None, conf=None, outputs.append(daiquiri.output.Journal()) daiquiri.setup(outputs=outputs) - if conf.debug: - level = logging.DEBUG - elif conf.verbose: - level = logging.INFO - else: - level = logging.WARNING - logging.getLogger("gnocchi").setLevel(level) + if logging_level is None: + if conf.debug: + logging_level = logging.DEBUG + elif conf.verbose: + logging_level = logging.INFO + else: + logging_level = logging.WARNING + logging.getLogger("gnocchi").setLevel(logging_level) # HACK(jd) I'm not happy about that, fix AP class to handle a conf object? archive_policy.ArchivePolicy.DEFAULT_AGGREGATION_METHODS = ( diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index e259c36f..6a74e8aa 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -15,6 +15,7 @@ # under the License. import functools import json +import logging import os import subprocess import threading @@ -268,8 +269,12 @@ class TestCase(BaseTestCase): @classmethod def setUpClass(self): super(TestCase, self).setUpClass() - self.conf = service.prepare_service([], - default_config_files=[]) + + self.conf = service.prepare_service( + [], + default_config_files=[], + logging_level=logging.DEBUG) + if not os.getenv("GNOCCHI_TEST_DEBUG"): daiquiri.setup(outputs=[]) -- GitLab From 051b11abb41b239a1873e4009d0a6625795f1f76 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 21 Jul 2017 09:39:17 +0200 Subject: [PATCH 0896/1483] indexer: fix ne operator When we use the ne operator, row with NULL values are not returned by mysql or postgresql. This change fixes the sql query to return them. Closes #224 --- gnocchi/indexer/sqlalchemy.py | 6 +++- gnocchi/tests/functional/gabbits/search.yaml | 21 +++++++++++++ gnocchi/tests/test_indexer.py | 31 ++++++++++++++++++++ 3 files changed, 57 insertions(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 3eb177e2..e812c3fd 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1221,7 +1221,11 @@ class QueryTransformer(object): raise indexer.QueryValueError(value, field_name) break - return op(attr, value) + if op == operator.ne and value is not None: + return operator.or_(operator.eq(attr, None), + op(attr, value)) + else: + return op(attr, value) @classmethod def build_filter(cls, engine, table, tree): diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml index 0bd8f93d..ba55c648 100644 --- a/gnocchi/tests/functional/gabbits/search.yaml +++ b/gnocchi/tests/functional/gabbits/search.yaml @@ -74,3 +74,24 @@ tests: data: {} response_json_paths: $.`len`: 2 + + - name: post generic resource with project/user + POST: /v1/resource/generic + data: + id: 95573760-b085-4e69-9280-91f66fc3ed3c + started_at: "2014-01-03T02:02:02.000000" + status: 201 + + - name: search empty query again + POST: /v1/search/resource/generic + data: {} + response_json_paths: + $.`len`: 3 + + - name: search all resource not foobar + POST: /v1/search/resource/generic + data: + ne: + project_id: foobar + response_json_paths: + $.`len`: 3 diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 37388ba0..545b6507 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -753,6 +753,37 @@ class TestIndexerDriver(tests_base.TestCase): attribute_filter={"=": {"project_id": 'bad-project'}}) self.assertEqual(0, len(resources)) + def test_list_resources_with_no_project(self): + r1 = uuid.uuid4() + r2 = uuid.uuid4() + user = str(uuid.uuid4()) + project = str(uuid.uuid4()) + creator = user + ":" + project + g1 = self.index.create_resource('generic', r1, creator, user, project) + g2 = self.index.create_resource('generic', r2, creator, None, None) + + # Get null value + resources = self.index.list_resources( + 'generic', + attribute_filter={"and": [ + {"=": {"creator": creator}}, + {"!=": {"project_id": project}} + ]}) + self.assertEqual(1, len(resources)) + self.assertEqual(g2, resources[0]) + + # Get null and filled values + resources = self.index.list_resources( + 'generic', + attribute_filter={"and": [ + {"=": {"creator": creator}}, + {"!=": {"project_id": "foobar"}} + ]}, + sorts=["project_id:asc-nullsfirst"]) + self.assertEqual(2, len(resources)) + self.assertEqual(g2, resources[0]) + self.assertEqual(g1, resources[1]) + def test_list_resources_by_duration(self): r1 = uuid.uuid4() user = str(uuid.uuid4()) -- GitLab From 9bb05387a3f5c318659a929afdaf7259a13d5621 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 25 Jul 2017 17:44:20 +0200 Subject: [PATCH 0897/1483] incoming: use numpy to serialize measures --- gnocchi/incoming/_carbonara.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/gnocchi/incoming/_carbonara.py b/gnocchi/incoming/_carbonara.py index 771550cb..c605e1d3 100644 --- a/gnocchi/incoming/_carbonara.py +++ b/gnocchi/incoming/_carbonara.py @@ -15,8 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. from concurrent import futures -import itertools -import struct import daiquiri import numpy @@ -35,7 +33,6 @@ class CarbonaraBasedStorage(incoming.StorageDriver): SACK_PREFIX = "incoming" CFG_PREFIX = 'gnocchi-config' CFG_SACKS = 'sacks' - _MEASURE_SERIAL_FORMAT = "Qd" @property def NUM_SACKS(self): @@ -75,12 +72,12 @@ class CarbonaraBasedStorage(incoming.StorageDriver): lock_name = b'gnocchi-sack-%s-lock' % str(sack).encode('ascii') return coord.get_lock(lock_name) + _SERIALIZE_DTYPE = [('timestamps', ' Date: Wed, 26 Jul 2017 14:11:49 +0200 Subject: [PATCH 0898/1483] carbonara: correctly handle all frombuffer errors Only a few `numpy.frombuffer` calls are covered by `try/except ValueError` blocks, whereas they should all be covered and raise InvalidData in case of error. --- gnocchi/carbonara.py | 23 +++++++++++++---------- gnocchi/storage/_carbonara.py | 2 +- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 4182c1da..c8b10753 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -321,15 +321,19 @@ class BoundTimeSerie(TimeSerie): nb_points = ( len(uncompressed) // cls._SERIALIZATION_TIMESTAMP_VALUE_LEN ) - timestamps = numpy.frombuffer(uncompressed, dtype=' Date: Wed, 26 Jul 2017 16:35:17 +0000 Subject: [PATCH 0899/1483] BoundedSerie: don't truncate on init there is no scenario in normal workflow which requires us to truncate on init. when loading, the boundedserie should already be truncated when it was saved. truncating on init seems to only support a corrupted scenario. this improves unserialise perf by ~30x... or ~6x compared to v4.0 --- gnocchi/carbonara.py | 1 - gnocchi/tests/test_carbonara.py | 12 +++++------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index c8b10753..6ea922eb 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -283,7 +283,6 @@ class BoundTimeSerie(TimeSerie): super(BoundTimeSerie, self).__init__(ts) self.block_size = block_size self.back_window = back_window - self._truncate() @classmethod def from_data(cls, timestamps=None, values=None, diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index b19136e2..22e58782 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -45,12 +45,11 @@ class TestBoundTimeSerie(base.BaseTestCase): def test_block_size(self): ts = carbonara.BoundTimeSerie.from_data( - [datetime64(2014, 1, 1, 12, 0, 0), - datetime64(2014, 1, 1, 12, 0, 4), + [datetime64(2014, 1, 1, 12, 0, 5), datetime64(2014, 1, 1, 12, 0, 9)], - [3, 5, 6], + [5, 6], block_size=numpy.timedelta64(5, 's')) - self.assertEqual(1, len(ts)) + self.assertEqual(2, len(ts)) ts.set_values([(datetime64(2014, 1, 1, 12, 0, 10), 3), (datetime64(2014, 1, 1, 12, 0, 11), 4)]) self.assertEqual(2, len(ts)) @@ -70,10 +69,9 @@ class TestBoundTimeSerie(base.BaseTestCase): def test_block_size_unordered(self): ts = carbonara.BoundTimeSerie.from_data( - [datetime64(2014, 1, 1, 12, 0, 0), - datetime64(2014, 1, 1, 12, 0, 5), + [datetime64(2014, 1, 1, 12, 0, 5), datetime64(2014, 1, 1, 12, 0, 9)], - [10, 5, 23], + [5, 23], block_size=numpy.timedelta64(5, 's')) self.assertEqual(2, len(ts)) ts.set_values([(datetime64(2014, 1, 1, 12, 0, 11), 3), -- GitLab From 587f3a5e801df4bb72eaa8d1c32aa918372abf5d Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 26 Jul 2017 20:56:21 +0000 Subject: [PATCH 0900/1483] fix benchmark data generation aggregated ts generation cannot be the same as bounded ts because aggregated ts has to follow sampling cadence where as bounded ts has no cadence. because of that the results from aggregates benchmarks are wrong. revert to something similar to before but still generate timestamps only once. --- gnocchi/carbonara.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 6ea922eb..30938d29 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -252,16 +252,6 @@ class TimeSerie(object): # byte type returned. return memoryview(lz4.block.compress(payload)).tobytes() - @staticmethod - def _generate_random_timestamps(how_many, - now=numpy.datetime64("2015-04-03 23:11")): - return numpy.sort( - numpy.array( - [now + numpy.timedelta64( - i * random.randint(1000000, 10000000), 'us') - for i in six.moves.range(how_many)], - dtype="datetime64[ns]")) - class BoundTimeSerie(TimeSerie): def __init__(self, ts=None, block_size=None, back_window=0): @@ -353,7 +343,11 @@ class BoundTimeSerie(TimeSerie): points = SplitKey.POINTS_PER_SPLIT serialize_times = 50 - timestamps = cls._generate_random_timestamps(points) + now = datetime.datetime(2015, 4, 3, 23, 11) + timestamps = numpy.sort(numpy.array( + [now + datetime.timedelta(seconds=i * random.randint(1, 10), + microseconds=random.randint(1, 999999)) + for i in six.moves.range(points)])) print(cls.__name__) print("=" * len(cls.__name__)) @@ -762,7 +756,10 @@ class AggregatedTimeSerie(TimeSerie): sampling = 5 resample = numpy.timedelta64(35, 's') - timestamps = cls._generate_random_timestamps(points) + now = datetime.datetime(2015, 4, 3, 23, 11) + timestamps = numpy.sort(numpy.array( + [now + datetime.timedelta(seconds=i*sampling) + for i in six.moves.range(points)])) print(cls.__name__) print("=" * len(cls.__name__)) -- GitLab From 4bb00f630d57f9b6a7e76de50cff8554fbda3bc3 Mon Sep 17 00:00:00 2001 From: zhang-shaoman Date: Wed, 26 Jul 2017 15:18:06 +0800 Subject: [PATCH 0901/1483] Modify a mistake in plan Gnocchi storage In section plan Gnocchi's storage, there is a little mistake in the example, so fix it. --- doc/source/operating.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/operating.rst b/doc/source/operating.rst index 99d1d535..9b964fae 100644 --- a/doc/source/operating.rst +++ b/doc/source/operating.rst @@ -153,7 +153,7 @@ For example, if you want to keep a year of data with a one minute resolution:: Then:: - size in bytes = 525 600 bytes × 6 = 3 159 600 bytes = 3 085 KiB + size in bytes = 525 600 points × 8 bytes = 4 204 800 bytes = 4 106 KiB This is just for a single aggregated time series. If your archive policy uses the 6 default aggregation methods (mean, min, max, sum, std, count) with the -- GitLab From 570bcd19f4ced3b063940aed336da50488732869 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 27 Jul 2017 15:42:34 +0200 Subject: [PATCH 0902/1483] carbonara: print AggregatedTimeSerie benchmark result in op/s I don't like reading nanoseconds in second units. Print op/s. --- gnocchi/carbonara.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 30938d29..1ea40cc8 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -823,11 +823,14 @@ class AggregatedTimeSerie(TimeSerie): % (((points * 2 * 8) / ((t1 - t0) / serialize_times)) / (1024.0 * 1024.0))) + def per_sec(t1, t0): + return 1 / ((t1 - t0) / serialize_times) + t0 = time.time() for i in six.moves.range(serialize_times): list(ts.split()) t1 = time.time() - print(" split() speed: %.8f s" % ((t1 - t0) / serialize_times)) + print(" split() speed: %.2f Hz" % per_sec(t1, t0)) # NOTE(sileht): propose a new series with half overload timestamps pts = ts.ts.copy(deep=True) @@ -840,7 +843,7 @@ class AggregatedTimeSerie(TimeSerie): for i in six.moves.range(serialize_times): ts.merge(tsbis) t1 = time.time() - print(" merge() speed: %.8f s" % ((t1 - t0) / serialize_times)) + print(" merge() speed %.2f Hz" % per_sec(t1, t0)) for agg in ['mean', 'sum', 'max', 'min', 'std', 'median', 'first', 'last', 'count', '5pct', '90pct']: @@ -851,8 +854,8 @@ class AggregatedTimeSerie(TimeSerie): for i in six.moves.range(serialize_times): ts.resample(resample) t1 = time.time() - print(" resample(%s) speed: %.8f s" % (agg, (t1 - t0) / - serialize_times)) + print(" resample(%s) speed: %.2f Hz" + % (agg, per_sec(t1, t0))) @staticmethod def aggregated(timeseries, aggregation, from_timestamp=None, -- GitLab From a551db17ef2053597cc77559f842ad355b4cd4b1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 27 Jul 2017 12:47:51 +0200 Subject: [PATCH 0903/1483] ceph: fix ReportGenerationError raise The class is in gnocchi.incoming, not gnocchi.incoming._carbonara --- gnocchi/incoming/ceph.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/gnocchi/incoming/ceph.py b/gnocchi/incoming/ceph.py index 30fbe244..9a786a74 100644 --- a/gnocchi/incoming/ceph.py +++ b/gnocchi/incoming/ceph.py @@ -20,6 +20,7 @@ import uuid import six from gnocchi.common import ceph +from gnocchi import incoming from gnocchi.incoming import _carbonara rados = ceph.rados @@ -120,8 +121,8 @@ class CephStorage(_carbonara.CarbonaraBasedStorage): names = list(self._list_keys_to_process( i, marker=marker, limit=self.Q_LIMIT)) if names and names[0] < marker: - raise _carbonara.ReportGenerationError("Unable to cleanly " - "compute backlog.") + raise incoming.ReportGenerationError( + "Unable to cleanly compute backlog.") for name in names: count += 1 metric = name.split("_")[1] -- GitLab From 5c710dc3c5e3f55469c40ea87122cf8e3e529aed Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 24 Jul 2017 16:16:22 +0200 Subject: [PATCH 0904/1483] Remove most of Pandas usage from Carbonara This speeds up computing and should decrease memory usage. Pandas is still used for aggregation computing for now. --- gnocchi/aggregates/moving_stats.py | 2 +- gnocchi/carbonara.py | 239 ++++++++------ gnocchi/storage/_carbonara.py | 17 +- gnocchi/tests/test_aggregates.py | 4 +- gnocchi/tests/test_carbonara.py | 237 +++++++------- gnocchi/tests/test_statsd.py | 65 ++-- gnocchi/tests/test_storage.py | 493 ++++++++++++----------------- gnocchi/utils.py | 5 - 8 files changed, 485 insertions(+), 577 deletions(-) diff --git a/gnocchi/aggregates/moving_stats.py b/gnocchi/aggregates/moving_stats.py index 4d4cb2c1..8e136ebc 100644 --- a/gnocchi/aggregates/moving_stats.py +++ b/gnocchi/aggregates/moving_stats.py @@ -107,7 +107,7 @@ class MovingAverage(aggregates.CustomAggregator): # change from integer index to timestamp index result.index = data.index - return [(t, window, r) for t, r + return [(t.to_datetime64(), window, r) for t, r in six.iteritems(result[~result.isnull()])] except Exception as e: raise aggregates.CustomAggFailure(str(e)) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 1ea40cc8..f5584ce4 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -16,7 +16,6 @@ # under the License. """Time series data manipulation, better with pancetta.""" -import datetime import functools import itertools import logging @@ -85,6 +84,42 @@ def round_timestamp(ts, freq): (ts - UNIX_UNIVERSAL_START64) / freq) * freq +TIMESERIES_ARRAY_DTYPE = [('timestamps', 'datetime64[ns]'), + ('values', 'float64')] + + +def make_timeseries(timestamps, values): + """Return a Numpy array representing a timeseries. + + This array specifies correctly the data types, which is important for + Numpy to operate fastly. + """ + l = len(timestamps) + if l != len(values): + raise ValueError("Timestamps and values must have the same length") + arr = numpy.zeros(l, dtype=TIMESERIES_ARRAY_DTYPE) + arr['timestamps'] = timestamps + arr['values'] = values + return arr + + +def combine_timeseries(ts1, ts2): + """Combine a timeseries into this one. + + The timeseries does not need to be sorted. + + If a timestamp is present in both `ts1` and `ts2`, then value from `ts1` + is used. + + :param ts: The timeseries to combine. + :return: A new timeseries. + """ + _, index = numpy.unique( + numpy.append(ts1['timestamps'], ts2['timestamps']), + return_index=True) + return numpy.append(ts1, ts2)[index] + + class GroupedTimeSeries(object): def __init__(self, ts, granularity, start=None): # NOTE(sileht): The whole class assumes ts is ordered and don't have @@ -96,11 +131,13 @@ class GroupedTimeSeries(object): self._ts = ts self._ts_for_derive = ts else: - self._ts = ts[start:] + self._ts = ts[numpy.searchsorted(ts['timestamps'], start):] start_derive = start - granularity - self._ts_for_derive = ts[start_derive:] + self._ts_for_derive = ts[ + numpy.searchsorted(ts['timestamps'], start_derive): + ] - self.indexes = round_timestamp(self._ts.index.values, granularity) + self.indexes = round_timestamp(self._ts['timestamps'], granularity) self.tstamps, self.counts = numpy.unique(self.indexes, return_counts=True) @@ -129,25 +166,20 @@ class GroupedTimeSeries(object): out_dtype='float64', default=None) - def _count(self): - return (self.counts, self.tstamps) - def count(self): - return pandas.Series(*self._count()) + return make_timeseries(self.tstamps, self.counts) def last(self): - counts, timestamps = self._count() - cumcounts = numpy.cumsum(counts) - 1 - values = self._ts.values[cumcounts] + cumcounts = numpy.cumsum(self.counts) - 1 + values = self._ts['values'][cumcounts] - return pandas.Series(values, timestamps) + return make_timeseries(self.tstamps, values) def first(self): - counts, timestamps = self._count() - counts = numpy.insert(counts[:-1], 0, 0) + counts = numpy.insert(self.counts[:-1], 0, 0) cumcounts = numpy.cumsum(counts) - values = self._ts.values[cumcounts] - return pandas.Series(values, timestamps) + values = self._ts['values'][cumcounts] + return make_timeseries(self.tstamps, values) def quantile(self, q): return self._scipy_aggregate(ndimage.labeled_comprehension, @@ -165,18 +197,18 @@ class GroupedTimeSeries(object): tstamps = self.tstamps if len(tstamps) == 0: - return pandas.Series() + return make_timeseries([], []) - values = method(self._ts.values, self.indexes, tstamps, + values = method(self._ts['values'], self.indexes, tstamps, *args, **kwargs) - return pandas.Series(values, tstamps) + return make_timeseries(tstamps, values) def derived(self): - timestamps = self._ts_for_derive.index[1:] - values = numpy.diff(self._ts_for_derive.values) + timestamps = self._ts_for_derive['timestamps'][1:] + values = numpy.diff(self._ts_for_derive['values']) # FIXME(sileht): create some alternative __init__ to avoid creating - # useless Pandas object, recounting, timestamps convertion, ... - return GroupedTimeSeries(pandas.Series(values, timestamps), + # useless Numpy object, recounting, timestamps convertion, ... + return GroupedTimeSeries(make_timeseries(timestamps, values), self.granularity, self.start) @@ -189,50 +221,76 @@ class TimeSerie(object): def __init__(self, ts=None): if ts is None: - ts = pandas.Series(index=numpy.array([], dtype='datetime64[ns]')) + ts = make_timeseries([], []) self.ts = ts - @staticmethod - def clean_ts(ts): - if ts.index.has_duplicates: - ts = ts[~ts.index.duplicated(keep='last')] - if not ts.index.is_monotonic: - ts = ts.sort_index() - return ts - @classmethod def from_data(cls, timestamps=None, values=None): - return cls(pandas.Series(values, timestamps)) + return cls(make_timeseries(timestamps, values)) @classmethod def from_tuples(cls, timestamps_values): return cls.from_data(*zip(*timestamps_values)) def __eq__(self, other): - return (isinstance(other, TimeSerie) - and self.ts.all() == other.ts.all()) + return (isinstance(other, TimeSerie) and + numpy.all(self.ts == other.ts)) def __getitem__(self, key): + if isinstance(key, numpy.datetime64): + idx = numpy.searchsorted(self.timestamps, key) + if self.timestamps[idx] == key: + return self[idx] + raise KeyError(key) + if isinstance(key, slice): + if isinstance(key.start, numpy.datetime64): + start = numpy.searchsorted(self.timestamps, key.start) + else: + start = key.start + if isinstance(key.stop, numpy.datetime64): + stop = numpy.searchsorted(self.timestamps, key.stop) + else: + stop = key.stop + key = slice(start, stop, key.step) return self.ts[key] + def _merge(self, ts): + """Merge a Numpy timeseries into this one.""" + self.ts = combine_timeseries(ts, self.ts) + + def merge(self, ts): + """Merge a TimeSerie into this one.""" + return self._merge(ts.ts) + def set_values(self, values): - t = pandas.Series(*reversed(list(zip(*values)))) - self.ts = self.clean_ts(t).combine_first(self.ts) + """Set values into this timeseries. + + :param values: A list of tuple (timestamp, value). + """ + return self._merge(numpy.array(values, dtype=TIMESERIES_ARRAY_DTYPE)) def __len__(self): return len(self.ts) + @property + def timestamps(self): + return self.ts['timestamps'] + + @property + def values(self): + return self.ts['values'] + @property def first(self): try: - return self.ts.index[0].to_datetime64() + return self.timestamps[0] except IndexError: return @property def last(self): try: - return self.ts.index[-1].to_datetime64() + return self.timestamps[-1] except IndexError: return @@ -240,8 +298,8 @@ class TimeSerie(object): # NOTE(jd) Our whole serialization system is based on Epoch, and we # store unsigned integer, so we can't store anything before Epoch. # Sorry! - if not self.ts.empty and self.ts.index[0].value < 0: - raise BeforeEpochError(self.ts.index[0]) + if len(self.ts) != 0 and self.first < UNIX_UNIVERSAL_START64: + raise BeforeEpochError(self.first) return GroupedTimeSeries(self.ts, granularity, start) @@ -277,7 +335,7 @@ class BoundTimeSerie(TimeSerie): @classmethod def from_data(cls, timestamps=None, values=None, block_size=None, back_window=0): - return cls(pandas.Series(values, timestamps), + return cls(make_timeseries(timestamps, values), block_size=block_size, back_window=back_window) def __eq__(self, other): @@ -288,7 +346,7 @@ class BoundTimeSerie(TimeSerie): def set_values(self, values, before_truncate_callback=None): # NOTE: values must be sorted when passed in. - if self.block_size is not None and not self.ts.empty: + if self.block_size is not None and len(self.ts) != 0: first_block_timestamp = self.first_block_timestamp() for index, (timestamp, value) in enumerate(values): if timestamp >= first_block_timestamp: @@ -331,11 +389,9 @@ class BoundTimeSerie(TimeSerie): def serialize(self): # NOTE(jd) Use a double delta encoding for timestamps - timestamps = numpy.insert(numpy.diff(self.ts.index), 0, self.first) - timestamps = timestamps.astype(' Date: Wed, 26 Jul 2017 13:37:31 +0200 Subject: [PATCH 0905/1483] carbonara: remove weird key check This was likely a Pandas "issue", but that should not be a problem now. --- gnocchi/carbonara.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index f5584ce4..9a05b674 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -598,8 +598,6 @@ class AggregatedTimeSerie(TimeSerie): start = 0 for key, count in six.moves.zip(keys, counts): end = start + count - if key == -0.0: - key = abs(key) yield (SplitKey(key, self.sampling), AggregatedTimeSerie(self.sampling, self.aggregation_method, self[start:end])) -- GitLab From d2285ac105fa13707b1f69c4e798dbc96567e605 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 27 Jul 2017 13:55:27 +0000 Subject: [PATCH 0906/1483] fix syslog_log_facility option spelling (cherry picked from commit 687cff173ac1da583b4ce0f8d959b4f30507cd50) --- gnocchi/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/service.py b/gnocchi/service.py index 3a14710f..fb221bd2 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -61,7 +61,7 @@ def prepare_service(args=None, conf=None, if conf.use_syslog: outputs.append( - daiquiri.output.Syslog(facility=conf.syslog_log_faciltity)) + daiquiri.output.Syslog(facility=conf.syslog_log_facility)) if conf.use_journal: outputs.append(daiquiri.output.Journal()) -- GitLab From 37c3514b619c3e73a914de5d9a362b3b121595ea Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 27 Jul 2017 13:55:27 +0000 Subject: [PATCH 0907/1483] fix syslog_log_facility option spelling --- gnocchi/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/service.py b/gnocchi/service.py index aa00eb6e..53ab54f5 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -62,7 +62,7 @@ def prepare_service(args=None, conf=None, if conf.use_syslog: outputs.append( - daiquiri.output.Syslog(facility=conf.syslog_log_faciltity)) + daiquiri.output.Syslog(facility=conf.syslog_log_facility)) if conf.use_journal: outputs.append(daiquiri.output.Journal()) -- GitLab From 3ae150352db45a768ce9837ee001bbd22fe346f9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 27 Jul 2017 11:11:53 +0200 Subject: [PATCH 0908/1483] incoming: return directly a Numpy array of measures This read the incoming measures as a Numpy array ready to be sent to Carbonara, rather than a list that needs a cast. --- gnocchi/incoming/_carbonara.py | 9 +++++++++ gnocchi/incoming/ceph.py | 6 ++++-- gnocchi/incoming/file.py | 6 ++++-- gnocchi/incoming/redis.py | 9 ++++----- gnocchi/incoming/s3.py | 9 ++++++--- gnocchi/incoming/swift.py | 13 +++++++------ 6 files changed, 34 insertions(+), 18 deletions(-) diff --git a/gnocchi/incoming/_carbonara.py b/gnocchi/incoming/_carbonara.py index c605e1d3..b5686f9f 100644 --- a/gnocchi/incoming/_carbonara.py +++ b/gnocchi/incoming/_carbonara.py @@ -75,6 +75,15 @@ class CarbonaraBasedStorage(incoming.StorageDriver): _SERIALIZE_DTYPE = [('timestamps', ' Date: Thu, 27 Jul 2017 22:37:02 +0000 Subject: [PATCH 0909/1483] carbonara: don't need floor divide during serialization in an aggregatedtimeserie, all timestamps should be some multiple of the sampling. therefore, doing numpy.floor is useless since if it's not, then the data is corrupt. also, it doesn't matter anyways because we cast to integer anyways which is equivalent to the floor. so corruption is hidden anyways \o/ --- gnocchi/carbonara.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 9a05b674..d0e80cdf 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -716,8 +716,8 @@ class AggregatedTimeSerie(TimeSerie): if compressed: # NOTE(jd) Use a double delta encoding for timestamps timestamps = numpy.insert( - numpy.floor(numpy.diff(self.timestamps) / offset_div), - 0, numpy.floor((self.first - start.key) / offset_div)) + numpy.diff(self.timestamps) / offset_div, + 0, (self.first - start.key) / offset_div) timestamps = timestamps.astype(' Date: Tue, 25 Jul 2017 20:59:57 +0000 Subject: [PATCH 0910/1483] enforce boundaries on overlap if no boundary is set, the returned series does not necessarily honour the overlap value. change behaviour so we require a boundary if overlap is not 100%. Fixes: #17 --- doc/source/rest.j2 | 6 ++---- gnocchi/rest/__init__.py | 3 +++ gnocchi/tests/functional/gabbits/aggregation.yaml | 7 +++++++ gnocchi/tests/test_rest.py | 13 +++++++++---- ...ory-boundaries-for-overlap-af28dc1e0946c500.yaml | 7 +++++++ 5 files changed, 28 insertions(+), 8 deletions(-) create mode 100644 releasenotes/notes/mandatory-boundaries-for-overlap-af28dc1e0946c500.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index e5d9548f..b306152b 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -550,13 +550,11 @@ is set. When a boundary is set, Gnocchi expects that we have certain percent of timestamps common between timeseries. This percent is controlled by needed_overlap, which by default expects 100% overlap. If this percent is not -reached, an error is returned. If no boundaries are set, Gnocchi aggregates and -returns only the last contiguous range of common datapoints. +reached, an error is returned. .. note:: - Not setting a boundary may result in an extremely sparse result. - Additionally, it may not accurately reflect 'needed_overlap' value, if set. + If no boundaries are set, Gnocchi requires 100% overlap across all series The ability to fill in points missing from a subset of timeseries is supported by specifying a `fill` value. Valid fill values include any valid float or diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 820ee6b7..aea328c1 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1642,6 +1642,9 @@ class AggregationController(rest.RestController): needed_overlap = float(needed_overlap) except ValueError: abort(400, 'needed_overlap must be a number') + if needed_overlap != 100.0 and start is None and stop is None: + abort(400, 'start and/or stop must be provided if specifying ' + 'needed_overlap') if start is not None: try: diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index 163c25ab..84cc7dfa 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -162,6 +162,13 @@ tests: response_strings: - Granularity '42.0' for metric + - name: get measure aggregates no boundary custom overlap + desc: https://github.com/gnocchixyz/gnocchi/issues/17 + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&needed_overlap=50 + status: 400 + response_strings: + - start and/or stop must be provided if specifying needed_overlap + # Aggregation by resource and metric_name - name: post a resource diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 3166fae1..3a2d31b6 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -1654,17 +1654,22 @@ class ResourceTest(RestTest): + "/metric/foo?aggregation=max", params={"=": {"name": name}}, expect_errors=True) - self.assertEqual(400, result.status_code, result.text) self.assertIn("No overlap", result.text) result = self.app.post_json( - "/v1/aggregation/resource/" - + self.resource_type + "/metric/foo?aggregation=min" - + "&needed_overlap=0", + "/v1/aggregation/resource/" + self.resource_type + + "/metric/foo?aggregation=max&needed_overlap=5&start=2013-01-01", params={"=": {"name": name}}, expect_errors=True) + self.assertEqual(400, result.status_code, result.text) + self.assertIn("No overlap", result.text) + result = self.app.post_json( + "/v1/aggregation/resource/" + + self.resource_type + "/metric/foo?aggregation=min" + + "&needed_overlap=0&start=2013-01-01T00:00:00%2B00:00", + params={"=": {"name": name}}) self.assertEqual(200, result.status_code, result.text) measures = json.loads(result.text) self.assertEqual([['2013-01-01T00:00:00+00:00', 86400.0, 8.0], diff --git a/releasenotes/notes/mandatory-boundaries-for-overlap-af28dc1e0946c500.yaml b/releasenotes/notes/mandatory-boundaries-for-overlap-af28dc1e0946c500.yaml new file mode 100644 index 00000000..37dcdbfc --- /dev/null +++ b/releasenotes/notes/mandatory-boundaries-for-overlap-af28dc1e0946c500.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + When specifying `needed_overlap` while aggregating across metrics without + bounds, the result did not necessarily honour the required overlap + provided. Aggregation without bounds now requires 100% overlap; an error is + raised otherwise -- GitLab From 5e17bad3f29531c3e2b076fb075a9343b777bf3b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 28 Jul 2017 09:02:26 +0200 Subject: [PATCH 0911/1483] rest: allow filter= for batch delete Resource search allow to pass the query in "filter" query string option. Batch delete API is identical to resource search API. So this change allows this filter option too. --- gnocchi/rest/__init__.py | 34 ++++++++--------- .../tests/functional/gabbits/resource.yaml | 37 +++++++++++++++++++ gnocchi/tests/test_rest.py | 2 +- 3 files changed, 53 insertions(+), 20 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index aea328c1..3f1d2b00 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1053,7 +1053,12 @@ class ResourcesController(rest.RestController): def delete(self, **kwargs): # NOTE(sileht): Don't allow empty filter, this is going to delete # the entire database. - attr_filter = deserialize_and_validate(ResourceSearchSchema) + if pecan.request.body: + attr_filter = deserialize_and_validate(ResourceSearchSchema) + elif kwargs.get("filter"): + attr_filter = QueryStringSearchAttrFilter.parse(kwargs["filter"]) + else: + attr_filter = None # the voluptuous checks everything, but it is better to # have this here. @@ -1094,12 +1099,6 @@ class ResourcesByTypeController(rest.RestController): return ResourcesController(resource_type), remainder -class InvalidQueryStringSearchAttrFilter(Exception): - def __init__(self, reason): - super(InvalidQueryStringSearchAttrFilter, self).__init__( - "Invalid filter: %s" % reason) - - class QueryStringSearchAttrFilter(object): uninary_operators = ("not", ) binary_operator = (u">=", u"<=", u"!=", u">", u"<", u"=", u"==", u"eq", @@ -1170,13 +1169,19 @@ class QueryStringSearchAttrFilter(object): return result @classmethod - def parse(cls, query): + def _parse(cls, query): try: parsed_query = cls.expr.parseString(query, parseAll=True)[0] except pyparsing.ParseException as e: - raise InvalidQueryStringSearchAttrFilter(six.text_type(e)) + raise abort(400, "Invalid filter: %s" % six.text_type(e)) return cls._parsed_query2dict(parsed_query) + @classmethod + def parse(cls, query): + attr_filter = cls._parse(query) + return voluptuous.Schema(ResourceSearchSchema, + required=True)(attr_filter) + def ResourceSearchSchema(v): return _ResourceSearchSchema()(v) @@ -1222,20 +1227,11 @@ class SearchResourceTypeController(rest.RestController): def __init__(self, resource_type): self._resource_type = resource_type - @staticmethod - def parse_and_validate_qs_filter(query): - try: - attr_filter = QueryStringSearchAttrFilter.parse(query) - except InvalidQueryStringSearchAttrFilter as e: - raise abort(400, e) - return voluptuous.Schema(ResourceSearchSchema, - required=True)(attr_filter) - def _search(self, **kwargs): if pecan.request.body: attr_filter = deserialize_and_validate(ResourceSearchSchema) elif kwargs.get("filter"): - attr_filter = self.parse_and_validate_qs_filter(kwargs["filter"]) + attr_filter = QueryStringSearchAttrFilter.parse(kwargs["filter"]) else: attr_filter = None diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index da423767..c4d3bf11 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -642,6 +642,26 @@ tests: project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea status: 201 + - name: create resource seven + desc: before test batch delete, create some resources + POST: $LAST_URL + data: + id: cd09ecce-3e17-4733-ad32-8a6b2034dcfd + started_at: "2015-08-14T00:00:00.000000" + user_id: 0fbb231484614b1a80131fc22f6afc9c + project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + status: 201 + + - name: create resource eight + desc: before test batch delete, create some resources + POST: $LAST_URL + data: + id: 05fde895-cf8a-475c-90a7-a4c8598d935d + started_at: "2015-08-14T00:00:00.000000" + user_id: 0fbb231484614b1a80131fc22f6afc9c + project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea + status: 201 + - name: get resource one desc: ensure the resources exists GET: /v1/resource/generic/f93450f2-aaaa-4d67-9985-02511241e7d1 @@ -672,6 +692,16 @@ tests: GET: /v1/resource/generic/f93450f2-ffff-4d67-9985-02511241e7d1 status: 200 + - name: get resource seven + desc: ensure the resources exists + GET: /v1/resource/generic/cd09ecce-3e17-4733-ad32-8a6b2034dcfd + status: 200 + + - name: get resource eight + desc: ensure the resources exists + GET: /v1/resource/generic/05fde895-cf8a-475c-90a7-a4c8598d935d + status: 200 + - name: delete random data structure desc: delete an empty list test DELETE: /v1/resource/generic @@ -763,6 +793,7 @@ tests: response_json_paths: $.deleted: 2 + - name: delete both existent and non-existent data desc: delete exits and non-exist data DELETE: $LAST_URL @@ -788,3 +819,9 @@ tests: status: 200 response_json_paths: $.deleted: 0 + + - name: delete multiple with query string + DELETE: $LAST_URL?filter=id%20in%20%5Bcd09ecce-3e17-4733-ad32-8a6b2034dcfd%2C%2005fde895-cf8a-475c-90a7-a4c8598d935d%5D + response_json_paths: + $.deleted: 2 + diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 3a2d31b6..b0599ef3 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -1860,7 +1860,7 @@ class GenericResourceTest(RestTest): class QueryStringSearchAttrFilterTest(tests_base.TestCase): def _do_test(self, expr, expected): - req = rest.QueryStringSearchAttrFilter.parse(expr) + req = rest.QueryStringSearchAttrFilter._parse(expr) self.assertEqual(expected, req) def test_search_query_builder(self): -- GitLab From 38b597fb99c65d142a1c1c2212cc40d1ccda6659 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 27 Jul 2017 12:54:11 +0200 Subject: [PATCH 0912/1483] Merge gnocchi.incoming._carbonara into gnocchi.incoming There's no driver that is not based on Carbonara, and we don't envision this happening any time now. There is a lot of assumption everywhere that all drivers are based on Carbonara anyway, so let's remove that abstraction layer. --- gnocchi/incoming/__init__.py | 119 ++++++++++++++++++++++++++-- gnocchi/incoming/_carbonara.py | 139 --------------------------------- gnocchi/incoming/ceph.py | 3 +- gnocchi/incoming/file.py | 4 +- gnocchi/incoming/redis.py | 4 +- gnocchi/incoming/s3.py | 4 +- gnocchi/incoming/swift.py | 4 +- 7 files changed, 121 insertions(+), 156 deletions(-) delete mode 100644 gnocchi/incoming/_carbonara.py diff --git a/gnocchi/incoming/__init__.py b/gnocchi/incoming/__init__.py index 233ac9c7..8843d819 100644 --- a/gnocchi/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -14,10 +14,19 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +from concurrent import futures + +import daiquiri +import numpy +import six from gnocchi import exceptions from gnocchi import utils +LOG = daiquiri.getLogger(__name__) + +_NUM_WORKERS = utils.get_default_workers() + class ReportGenerationError(Exception): pass @@ -27,15 +36,77 @@ class SackDetectionError(Exception): pass -class StorageDriver(object): +class IncomingDriver(object): + MEASURE_PREFIX = "measure" + SACK_PREFIX = "incoming" + CFG_PREFIX = 'gnocchi-config' + CFG_SACKS = 'sacks' + + @property + def NUM_SACKS(self): + if not hasattr(self, '_num_sacks'): + try: + self._num_sacks = int(self.get_storage_sacks()) + except Exception as e: + LOG.error('Unable to detect the number of storage sacks. ' + 'Ensure gnocchi-upgrade has been executed: %s', e) + raise SackDetectionError(e) + return self._num_sacks @staticmethod def __init__(conf): pass + def get_sack_prefix(self, num_sacks=None): + sacks = num_sacks if num_sacks else self.NUM_SACKS + return self.SACK_PREFIX + str(sacks) + '-%s' + + def upgrade(self, num_sacks): + if not self.get_storage_sacks(): + self.set_storage_settings(num_sacks) + @staticmethod - def upgrade(): - pass + def set_storage_settings(num_sacks): + raise exceptions.NotImplementedError + + @staticmethod + def remove_sack_group(num_sacks): + raise exceptions.NotImplementedError + + @staticmethod + def get_storage_sacks(): + """Return the number of sacks in storage. None if not set.""" + raise exceptions.NotImplementedError + + @staticmethod + def get_sack_lock(coord, sack): + lock_name = b'gnocchi-sack-%s-lock' % str(sack).encode('ascii') + return coord.get_lock(lock_name) + + _SERIALIZE_DTYPE = [('timestamps', ' Date: Thu, 27 Jul 2017 19:54:01 +0200 Subject: [PATCH 0913/1483] doc: stop building 2.x doc on gnocchi.xyz Those version are now one year old. I don't think there's any user left. --- doc/source/conf.py | 2 +- tox.ini | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index c7140ec7..9d5e962b 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -182,7 +182,7 @@ scv_sort = ('semver',) scv_show_banner = True scv_banner_main_ref = 'stable/4.0' scv_priority = 'branches' -scv_whitelist_branches = ('master', '^stable/(2\.1|2\.2|[3-9]\.)') +scv_whitelist_branches = ('master', '^stable/([3-9]\.)') scv_whitelist_tags = ("^$",) here = os.path.dirname(os.path.realpath(__file__)) diff --git a/tox.ini b/tox.ini index 4da17e5e..8d4f495a 100644 --- a/tox.ini +++ b/tox.ini @@ -145,9 +145,6 @@ basepython = python2.7 setenv = GNOCCHI_STORAGE_DEPS=file deps = {[testenv:docs]deps} sphinxcontrib-versioning -# for 2.x doc - pytimeparse - retrying # for 3.x doc oslotest oslosphinx -- GitLab From cd093e4641a202c720ec8b54e335cc91a07f97ca Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 26 Jul 2017 09:05:26 +0200 Subject: [PATCH 0914/1483] rest: Fix invalid query validation Some operators accept a list or a dict at rest layer, when it should accept only an attribute value, like bool, str, None. For Postgresql, that result to an sql error, turned in HTTP 500. For Mysql, the request succeed with some magic casting. Closes #240 --- gnocchi/rest/__init__.py | 25 +++++++--- gnocchi/tests/functional/gabbits/search.yaml | 50 ++++++++++++++++++-- 2 files changed, 65 insertions(+), 10 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 3f1d2b00..677c5fc2 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1187,6 +1187,12 @@ def ResourceSearchSchema(v): return _ResourceSearchSchema()(v) +# NOTE(sileht): indexer will cast this type to the real attribute +# type, here we just want to be sure this is not a dict or a list +ResourceSearchSchemaAttributeValue = voluptuous.Any( + six.text_type, float, int, bool, None) + + def _ResourceSearchSchema(): user = pecan.request.auth_helper.get_current_user( pecan.request) @@ -1203,21 +1209,26 @@ def _ResourceSearchSchema(): u"<=", u"≤", u"le", u">=", u"≥", u"ge", u"!=", u"≠", u"ne", + u"like" + ): voluptuous.All( + voluptuous.Length(min=1, max=1), + {"id": _ResourceUUID, + six.text_type: ResourceSearchSchemaAttributeValue}, + ), + voluptuous.Any( u"in", - u"like", ): voluptuous.All( voluptuous.Length(min=1, max=1), - voluptuous.Any( - {"id": voluptuous.Any( - [_ResourceUUID], _ResourceUUID), - voluptuous.Extra: voluptuous.Extra})), + {"id": [_ResourceUUID], + six.text_type: [ResourceSearchSchemaAttributeValue]} + ), voluptuous.Any( u"and", u"∨", u"or", u"∧", - u"not", ): voluptuous.All( [ResourceSearchSchema], voluptuous.Length(min=1) - ) + ), + u"not": ResourceSearchSchema, } ) ) diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml index ba55c648..13906bf1 100644 --- a/gnocchi/tests/functional/gabbits/search.yaml +++ b/gnocchi/tests/functional/gabbits/search.yaml @@ -21,12 +21,49 @@ tests: GET: /v1/search/resource/foobar status: 404 + # FIXME(sileht): this test looks wrong, it talks about invalidity + # but asserts it return 200... - name: search with invalid uuid POST: /v1/search/resource/generic data: =: id: "cd9eef" + - name: search invalid and value + desc: and should be followed by a list, not dict + POST: /v1/search/resource/generic + data: + and: + project_id: foobar + status: 400 + response_strings: + - "expected a list for dictionary value @ data[" + - "'and']" + + - name: search invalid ne value + desc: attribute value for binary operator must not be dict or list + POST: /v1/search/resource/generic + data: + ne: + project_id: + - foobar + status: 400 + response_strings: + - "for dictionary value @ data[" + - "'ne'][" + - "'project_id']" + + - name: search invalid not value + desc: uninary operator must follow by dict, not list + POST: /v1/search/resource/generic + data: + not: + - project_id: foobar + status: 400 + response_strings: + - "expected a dictionary for dictionary value @ data[" + - "'not']" + - name: post generic resource POST: /v1/resource/generic data: @@ -55,15 +92,22 @@ tests: response_json_paths: $.`len`: 2 - - name: search like created_by_project_id + - name: search eq created_by_project_id POST: /v1/search/resource/generic data: eq: - created_by_project_id: - - f3d41b770cc14f0bb94a1d5be9c0e3ea + created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea response_json_paths: $.`len`: 0 + - name: search eq creator + POST: /v1/search/resource/generic + data: + eq: + creator: "foobar" + response_json_paths: + $.`len`: 2 + - name: search in_ query string POST: /v1/search/resource/generic?filter=id%20in%20%5Bfaef212f-0bf4-4030-a461-2186fef79be0%2C%20df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e%5D response_json_paths: -- GitLab From 6508ace95348998f88c523b54f6f050414946ff4 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 31 Jul 2017 17:18:35 +0200 Subject: [PATCH 0915/1483] rest: add a like test --- gnocchi/tests/functional/gabbits/search.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml index 13906bf1..61853a04 100644 --- a/gnocchi/tests/functional/gabbits/search.yaml +++ b/gnocchi/tests/functional/gabbits/search.yaml @@ -108,6 +108,14 @@ tests: response_json_paths: $.`len`: 2 + - name: search like + POST: /v1/search/resource/generic + data: + like: + creator: foo% + response_json_paths: + $.`len`: 2 + - name: search in_ query string POST: /v1/search/resource/generic?filter=id%20in%20%5Bfaef212f-0bf4-4030-a461-2186fef79be0%2C%20df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e%5D response_json_paths: -- GitLab From 5fbb96eb996b768b131f86395f0b0110c55058e5 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 3 Aug 2017 16:42:30 +0000 Subject: [PATCH 0916/1483] use numpy datatype we store new measures as array with ('timestamp', 'value') label, so we can use them as index. this is makes things more obvious and is faster. --- gnocchi/storage/_carbonara.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index d2879dd6..fce19b97 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -17,7 +17,6 @@ import collections import functools import itertools -import operator from concurrent import futures import daiquiri @@ -389,7 +388,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): LOG.debug("Skipping %s (already processed)", metric) return - measures = sorted(measures, key=operator.itemgetter(0)) + measures.sort(order='timestamps') agg_methods = list(metric.archive_policy.aggregation_methods) block_size = metric.archive_policy.max_block_size @@ -433,7 +432,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): # unaggregated measures matching largest # granularity. the following takes only the points # affected by new measures for specific granularity - tstamp = max(bound_timeserie.first, measures[0][0]) + tstamp = max(bound_timeserie.first, measures['timestamps'][0]) new_first_block_timestamp = bound_timeserie.first_block_timestamp() computed_points['number'] = len(bound_timeserie) for d in definition: -- GitLab From 047069217f5e4162fb58d8226588372775a7aca6 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 3 Aug 2017 19:03:39 +0000 Subject: [PATCH 0917/1483] use searchsorted to slice don't linear search to remove too old measures --- gnocchi/carbonara.py | 10 +- gnocchi/tests/test_carbonara.py | 316 ++++++++++++++++++-------------- 2 files changed, 180 insertions(+), 146 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index d0e80cdf..050bfb62 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -347,13 +347,9 @@ class BoundTimeSerie(TimeSerie): def set_values(self, values, before_truncate_callback=None): # NOTE: values must be sorted when passed in. if self.block_size is not None and len(self.ts) != 0: - first_block_timestamp = self.first_block_timestamp() - for index, (timestamp, value) in enumerate(values): - if timestamp >= first_block_timestamp: - values = values[index:] - break - else: - values = [] + index = numpy.searchsorted(values['timestamps'], + self.first_block_timestamp()) + values = values[index:] super(BoundTimeSerie, self).set_values(values) if before_truncate_callback: before_truncate_callback(self) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index c40dc498..efc5643d 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -50,8 +50,9 @@ class TestBoundTimeSerie(base.BaseTestCase): [5, 6], block_size=numpy.timedelta64(5, 's')) self.assertEqual(2, len(ts)) - ts.set_values([(datetime64(2014, 1, 1, 12, 0, 10), 3), - (datetime64(2014, 1, 1, 12, 0, 11), 4)]) + ts.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 0, 10), 3), + (datetime64(2014, 1, 1, 12, 0, 11), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE)) self.assertEqual(2, len(ts)) def test_block_size_back_window(self): @@ -63,8 +64,9 @@ class TestBoundTimeSerie(base.BaseTestCase): block_size=numpy.timedelta64(5, 's'), back_window=1) self.assertEqual(3, len(ts)) - ts.set_values([(datetime64(2014, 1, 1, 12, 0, 10), 3), - (datetime64(2014, 1, 1, 12, 0, 11), 4)]) + ts.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 0, 10), 3), + (datetime64(2014, 1, 1, 12, 0, 11), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE)) self.assertEqual(3, len(ts)) def test_block_size_unordered(self): @@ -74,8 +76,9 @@ class TestBoundTimeSerie(base.BaseTestCase): [5, 23], block_size=numpy.timedelta64(5, 's')) self.assertEqual(2, len(ts)) - ts.set_values([(datetime64(2014, 1, 1, 12, 0, 11), 3), - (datetime64(2014, 1, 1, 12, 0, 10), 4)]) + ts.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 0, 11), 3), + (datetime64(2014, 1, 1, 12, 0, 10), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE)) self.assertEqual(2, len(ts)) def test_duplicate_timestamps(self): @@ -87,11 +90,12 @@ class TestBoundTimeSerie(base.BaseTestCase): self.assertEqual(10.0, ts[0][1]) self.assertEqual(23.0, ts[1][1]) - ts.set_values([(datetime64(2014, 1, 1, 13, 0, 10), 3), - (datetime64(2014, 1, 1, 13, 0, 11), 9), - (datetime64(2014, 1, 1, 13, 0, 11), 8), - (datetime64(2014, 1, 1, 13, 0, 11), 7), - (datetime64(2014, 1, 1, 13, 0, 11), 4)]) + ts.set_values(numpy.array([(datetime64(2014, 1, 1, 13, 0, 10), 3), + (datetime64(2014, 1, 1, 13, 0, 11), 9), + (datetime64(2014, 1, 1, 13, 0, 11), 8), + (datetime64(2014, 1, 1, 13, 0, 11), 7), + (datetime64(2014, 1, 1, 13, 0, 11), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE)) self.assertEqual(4, len(ts)) self.assertEqual(10.0, ts[0][1]) self.assertEqual(23.0, ts[1][1]) @@ -408,10 +412,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 'size': 50, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - tsb1.set_values([(datetime64(2014, 1, 1, 11, 46, 4), 4)], + tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 11, 46, 4), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) - tsb2.set_values([(datetime64(2014, 1, 1, 9, 1, 4), 4)], + tsb2.set_values(numpy.array([(datetime64(2014, 1, 1, 9, 1, 4), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc2)) @@ -430,7 +436,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): max_size=50, aggregation_method='mean') - tsb1.set_values([(datetime64(2014, 1, 1, 12, 3, 0), 4)], + tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 3, 0), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) self.assertRaises(carbonara.UnAggregableTimeseries, @@ -448,7 +455,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): # NOTE(sileht): minute 8 is missing in both and # minute 7 in tsc2 too, but it looks like we have # enough point to do the aggregation - tsb1.set_values([ + tsb1.set_values(numpy.array([ (datetime64(2014, 1, 1, 11, 0, 0), 4), (datetime64(2014, 1, 1, 12, 1, 0), 3), (datetime64(2014, 1, 1, 12, 2, 0), 2), @@ -457,11 +464,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime64(2014, 1, 1, 12, 5, 0), 3), (datetime64(2014, 1, 1, 12, 6, 0), 4), (datetime64(2014, 1, 1, 12, 7, 0), 10), - (datetime64(2014, 1, 1, 12, 9, 0), 2), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) + (datetime64(2014, 1, 1, 12, 9, 0), 2)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) - tsb2.set_values([ + tsb2.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, 1, 0), 3), (datetime64(2014, 1, 1, 12, 2, 0), 4), (datetime64(2014, 1, 1, 12, 3, 0), 4), @@ -470,9 +478,10 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime64(2014, 1, 1, 12, 6, 0), 6), (datetime64(2014, 1, 1, 12, 9, 0), 2), (datetime64(2014, 1, 1, 12, 11, 0), 2), - (datetime64(2014, 1, 1, 12, 12, 0), 2), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) + (datetime64(2014, 1, 1, 12, 12, 0), 2)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) dtfrom = datetime64(2014, 1, 1, 12, 0, 0) dtto = datetime64(2014, 1, 1, 12, 10, 0) @@ -518,26 +527,28 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 'size': 10, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - tsb1.set_values([ + tsb1.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, 3, 0), 9), (datetime64(2014, 1, 1, 12, 4, 0), 1), (datetime64(2014, 1, 1, 12, 5, 0), 2), (datetime64(2014, 1, 1, 12, 6, 0), 7), (datetime64(2014, 1, 1, 12, 7, 0), 5), - (datetime64(2014, 1, 1, 12, 8, 0), 3), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) + (datetime64(2014, 1, 1, 12, 8, 0), 3)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) - tsb2.set_values([ + tsb2.set_values(numpy.array([ (datetime64(2014, 1, 1, 11, 0, 0), 6), (datetime64(2014, 1, 1, 12, 1, 0), 2), (datetime64(2014, 1, 1, 12, 2, 0), 13), (datetime64(2014, 1, 1, 12, 3, 0), 24), (datetime64(2014, 1, 1, 12, 4, 0), 4), (datetime64(2014, 1, 1, 12, 5, 0), 16), - (datetime64(2014, 1, 1, 12, 6, 0), 12), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) + (datetime64(2014, 1, 1, 12, 6, 0), 12)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) # By default we require 100% of point that overlap # but we allow that the last datapoint is missing @@ -564,16 +575,16 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 'size': 10, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - tsb1.set_values([ - (datetime64(2014, 1, 1, 12, 3, 0), 4), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) + tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 3, 0), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) - tsb2.set_values([ - (datetime64(2014, 1, 1, 11, 0, 0), 4), - (datetime64(2014, 1, 1, 12, 3, 0), 4), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) + tsb2.set_values(numpy.array([(datetime64(2014, 1, 1, 11, 0, 0), 4), + (datetime64(2014, 1, 1, 12, 3, 0), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) output = carbonara.AggregatedTimeSerie.aggregated( [tsc1['return'], tsc2['return']], aggregation='mean') @@ -588,7 +599,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 'size': 10, 'agg': 'mean'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - tsb.set_values([ + tsb.set_values(numpy.array([ (datetime64(2014, 1, 1, 11, 46, 4), 4), (datetime64(2014, 1, 1, 11, 47, 34), 8), (datetime64(2014, 1, 1, 11, 50, 54), 50), @@ -604,14 +615,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime64(2014, 1, 1, 12, 4, 9), 7), (datetime64(2014, 1, 1, 12, 5, 1), 15), (datetime64(2014, 1, 1, 12, 5, 12), 1), - (datetime64(2014, 1, 1, 12, 6, 0, 2), 3), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) + (datetime64(2014, 1, 1, 12, 6, 0, 2), 3)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) - tsb.set_values([ - (datetime64(2014, 1, 1, 12, 6), 5), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) + tsb.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 6), 5)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual([ (datetime64(2014, 1, 1, 11, 54), @@ -659,26 +671,28 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 'size': 10, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - tsb1.set_values([ + tsb1.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, 3, 0), 9), (datetime64(2014, 1, 1, 12, 4, 0), 1), (datetime64(2014, 1, 1, 12, 5, 0), 2), (datetime64(2014, 1, 1, 12, 6, 0), 7), (datetime64(2014, 1, 1, 12, 7, 0), 5), - (datetime64(2014, 1, 1, 12, 8, 0), 3), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) + (datetime64(2014, 1, 1, 12, 8, 0), 3)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) - tsb2.set_values([ + tsb2.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, 0, 0), 6), (datetime64(2014, 1, 1, 12, 1, 0), 2), (datetime64(2014, 1, 1, 12, 2, 0), 13), (datetime64(2014, 1, 1, 12, 3, 0), 24), (datetime64(2014, 1, 1, 12, 4, 0), 4), (datetime64(2014, 1, 1, 12, 5, 0), 16), - (datetime64(2014, 1, 1, 12, 6, 0), 12), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) + (datetime64(2014, 1, 1, 12, 6, 0), 12)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) output = carbonara.AggregatedTimeSerie.aggregated([ tsc1['return'], tsc2['return']], aggregation='mean', fill=0) @@ -712,26 +726,28 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 'size': 10, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - tsb1.set_values([ + tsb1.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, 3, 0), 9), (datetime64(2014, 1, 1, 12, 4, 0), 1), (datetime64(2014, 1, 1, 12, 5, 0), 2), (datetime64(2014, 1, 1, 12, 6, 0), 7), (datetime64(2014, 1, 1, 12, 7, 0), 5), - (datetime64(2014, 1, 1, 12, 8, 0), 3), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) + (datetime64(2014, 1, 1, 12, 8, 0), 3)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) - tsb2.set_values([ + tsb2.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, 0, 0), 6), (datetime64(2014, 1, 1, 12, 1, 0), 2), (datetime64(2014, 1, 1, 12, 2, 0), 13), (datetime64(2014, 1, 1, 12, 3, 0), 24), (datetime64(2014, 1, 1, 12, 4, 0), 4), (datetime64(2014, 1, 1, 12, 5, 0), 16), - (datetime64(2014, 1, 1, 12, 6, 0), 12), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) + (datetime64(2014, 1, 1, 12, 6, 0), 12)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) output = carbonara.AggregatedTimeSerie.aggregated([ tsc1['return'], tsc2['return']], aggregation='mean', fill='null') @@ -765,22 +781,24 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 'size': 10, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - tsb1.set_values([ + tsb1.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, 3, 0), 9), (datetime64(2014, 1, 1, 12, 4, 0), 1), (datetime64(2014, 1, 1, 12, 7, 0), 5), - (datetime64(2014, 1, 1, 12, 8, 0), 3), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) + (datetime64(2014, 1, 1, 12, 8, 0), 3)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) - tsb2.set_values([ + tsb2.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, 0, 0), 6), (datetime64(2014, 1, 1, 12, 1, 0), 2), (datetime64(2014, 1, 1, 12, 2, 0), 13), (datetime64(2014, 1, 1, 12, 3, 0), 24), - (datetime64(2014, 1, 1, 12, 4, 0), 4), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) + (datetime64(2014, 1, 1, 12, 4, 0), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) output = carbonara.AggregatedTimeSerie.aggregated([ tsc1['return'], tsc2['return']], aggregation='mean', fill=0) @@ -807,9 +825,10 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 'size': 3600 * 24, 'agg': '90pct'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - tsb.set_values([(datetime64(2014, 1, 1, 12, 0, 0), 3), - (datetime64(2014, 1, 1, 12, 0, 0, 123), 4), - (datetime64(2014, 1, 1, 12, 0, 2), 4)], + tsb.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 0, 0), 3), + (datetime64(2014, 1, 1, 12, 0, 0, 123), 4), + (datetime64(2014, 1, 1, 12, 0, 2), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) @@ -831,9 +850,11 @@ class TestAggregatedTimeSerie(base.BaseTestCase): # Rounding \o/ self.assertAlmostEqual(ref[2], res[2]) - tsb.set_values([(datetime64(2014, 1, 1, 12, 0, 2, 113), 110)], - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) + tsb.set_values(numpy.array([ + (datetime64(2014, 1, 1, 12, 0, 2, 113), 110)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) result = ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)) reference = [ @@ -858,19 +879,21 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 'size': 10, 'agg': 'mean'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - tsb.set_values([ + tsb.set_values(numpy.array([ (datetime64(2014, 1, 1, 11, 46, 0, 200123), 4), (datetime64(2014, 1, 1, 11, 46, 0, 340000), 8), (datetime64(2014, 1, 1, 11, 47, 0, 323154), 50), (datetime64(2014, 1, 1, 11, 48, 0, 590903), 4), - (datetime64(2014, 1, 1, 11, 48, 0, 903291), 4), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) + (datetime64(2014, 1, 1, 11, 48, 0, 903291), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) - tsb.set_values([ - (datetime64(2014, 1, 1, 11, 48, 0, 821312), 5), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) + tsb.set_values(numpy.array([ + (datetime64(2014, 1, 1, 11, 48, 0, 821312), 5)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual([ (datetime64(2014, 1, 1, 11, 46, 0, 200000), @@ -890,11 +913,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 'size': 60, 'agg': 'std'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - tsb.set_values([(datetime64(2014, 1, 1, 12, 0, 0), 3), - (datetime64(2014, 1, 1, 12, 1, 4), 4), - (datetime64(2014, 1, 1, 12, 1, 9), 7), - (datetime64(2014, 1, 1, 12, 2, 1), 15), - (datetime64(2014, 1, 1, 12, 2, 12), 1)], + tsb.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 0, 0), 3), + (datetime64(2014, 1, 1, 12, 1, 4), 4), + (datetime64(2014, 1, 1, 12, 1, 9), 7), + (datetime64(2014, 1, 1, 12, 2, 1), 15), + (datetime64(2014, 1, 1, 12, 2, 12), 1)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) @@ -905,7 +929,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): numpy.timedelta64(60000000000, 'ns'), 9.8994949366116654), ], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)))) - tsb.set_values([(datetime64(2014, 1, 1, 12, 2, 13), 110)], + tsb.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 2, 13), 110)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) @@ -921,11 +946,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 'size': 60, 'agg': 'max'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - tsb.set_values([(datetime64(2014, 1, 1, 12, 0, 0), 3), - (datetime64(2014, 1, 1, 12, 1, 4), 4), - (datetime64(2014, 1, 1, 12, 1, 9), 7), - (datetime64(2014, 1, 1, 12, 2, 1), 15), - (datetime64(2014, 1, 1, 12, 2, 12), 1)], + tsb.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 0, 0), 3), + (datetime64(2014, 1, 1, 12, 1, 4), 4), + (datetime64(2014, 1, 1, 12, 1, 9), 7), + (datetime64(2014, 1, 1, 12, 2, 1), 15), + (datetime64(2014, 1, 1, 12, 2, 12), 1)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) @@ -938,7 +964,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): numpy.timedelta64(60000000000, 'ns'), 15), ], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)))) - tsb.set_values([(datetime64(2014, 1, 1, 12, 2, 13), 110)], + tsb.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 2, 13), 110)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) @@ -955,14 +982,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts = {'sampling': numpy.timedelta64(500, 'ms'), 'agg': 'mean'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - tsb.set_values([ + tsb.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, 0, 0, 1234), 3), (datetime64(2014, 1, 1, 12, 0, 0, 321), 6), (datetime64(2014, 1, 1, 12, 1, 4, 234), 5), (datetime64(2014, 1, 1, 12, 1, 9, 32), 7), - (datetime64(2014, 1, 1, 12, 2, 12, 532), 1), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) + (datetime64(2014, 1, 1, 12, 2, 12, 532), 1)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) key = ts['return'].get_split_key() o, s = ts['return'].serialize(key) @@ -975,14 +1003,16 @@ class TestAggregatedTimeSerie(base.BaseTestCase): tsb = carbonara.BoundTimeSerie() for i in six.moves.range(1, 11): - tsb.set_values([ - (datetime64(2014, 1, 1, 12, i, i), float(i)) - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - tsb.set_values([ - (datetime64(2014, 1, 1, 12, i, i + 1), float(i + 1)) - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) + tsb.set_values(numpy.array([ + (datetime64(2014, 1, 1, 12, i, i), float(i))], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) + tsb.set_values(numpy.array([ + (datetime64(2014, 1, 1, 12, i, i + 1), float(i + 1))], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual(i, len(list(ts['return'].fetch()))) def test_back_window(self): @@ -994,14 +1024,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts = {'sampling': numpy.timedelta64(1, 's'), 'size': 60, 'agg': 'mean'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - tsb.set_values([ + tsb.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, 0, 1, 2300), 1), (datetime64(2014, 1, 1, 12, 0, 1, 4600), 2), (datetime64(2014, 1, 1, 12, 0, 2, 4500), 3), (datetime64(2014, 1, 1, 12, 0, 2, 7800), 4), - (datetime64(2014, 1, 1, 12, 0, 3, 8), 2.5), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) + (datetime64(2014, 1, 1, 12, 0, 3, 8), 2.5)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual( [ @@ -1026,14 +1057,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts = {'sampling': numpy.timedelta64(1, 's'), 'size': 60, 'agg': 'mean'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - tsb.set_values([ + tsb.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, 0, 1, 2300), 1), (datetime64(2014, 1, 1, 12, 0, 1, 4600), 2), (datetime64(2014, 1, 1, 12, 0, 2, 4500), 3), (datetime64(2014, 1, 1, 12, 0, 2, 7800), 4), - (datetime64(2014, 1, 1, 12, 0, 3, 8), 2.5), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) + (datetime64(2014, 1, 1, 12, 0, 3, 8), 2.5)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual( [ @@ -1046,10 +1078,11 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ], list(ts['return'].fetch())) - tsb.set_values([ - (datetime64(2014, 1, 1, 12, 0, 2, 99), 9), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) + tsb.set_values(numpy.array([ + (datetime64(2014, 1, 1, 12, 0, 2, 99), 9)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual( [ @@ -1062,11 +1095,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ], list(ts['return'].fetch())) - tsb.set_values([ + tsb.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, 0, 2, 99), 9), - (datetime64(2014, 1, 1, 12, 0, 3, 9), 4.5), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) + (datetime64(2014, 1, 1, 12, 0, 3, 9), 4.5)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=ts)) self.assertEqual( [ @@ -1123,7 +1157,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): if existing: existing.merge(tsc22['return']) - tsb1.set_values([ + tsb1.set_values(numpy.array([ (datetime64(2014, 1, 1, 11, 46, 4), 4), (datetime64(2014, 1, 1, 11, 47, 34), 8), (datetime64(2014, 1, 1, 11, 50, 54), 50), @@ -1139,10 +1173,11 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime64(2014, 1, 1, 12, 4, 9), 7), (datetime64(2014, 1, 1, 12, 5, 1), 15), (datetime64(2014, 1, 1, 12, 5, 12), 1), - (datetime64(2014, 1, 1, 12, 6, 0), 3), - ], before_truncate_callback=ts1_update) + (datetime64(2014, 1, 1, 12, 6, 0), 3)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=ts1_update) - tsb2.set_values([ + tsb2.set_values(numpy.array([ (datetime64(2014, 1, 1, 11, 46, 4), 6), (datetime64(2014, 1, 1, 11, 47, 34), 5), (datetime64(2014, 1, 1, 11, 50, 54), 51), @@ -1158,8 +1193,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime64(2014, 1, 1, 12, 4, 9), 4), (datetime64(2014, 1, 1, 12, 5, 1), 10), (datetime64(2014, 1, 1, 12, 5, 12), 1), - (datetime64(2014, 1, 1, 12, 6, 0), 1), - ], before_truncate_callback=ts2_update) + (datetime64(2014, 1, 1, 12, 6, 0), 1)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=ts2_update) output = carbonara.AggregatedTimeSerie.aggregated( [tsc1['return'], tsc12['return'], tsc2['return'], tsc22['return']], @@ -1205,21 +1241,23 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 'size': 60, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - tsb1.set_values([ + tsb1.set_values(numpy.array([ (datetime64(2015, 12, 3, 13, 19, 15), 1), (datetime64(2015, 12, 3, 13, 20, 15), 1), (datetime64(2015, 12, 3, 13, 21, 15), 1), - (datetime64(2015, 12, 3, 13, 22, 15), 1), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) + (datetime64(2015, 12, 3, 13, 22, 15), 1)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) - tsb2.set_values([ + tsb2.set_values(numpy.array([ (datetime64(2015, 12, 3, 13, 21, 15), 10), (datetime64(2015, 12, 3, 13, 22, 15), 10), (datetime64(2015, 12, 3, 13, 23, 15), 10), - (datetime64(2015, 12, 3, 13, 24, 15), 10), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) + (datetime64(2015, 12, 3, 13, 24, 15), 10)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) output = carbonara.AggregatedTimeSerie.aggregated( [tsc1['return'], tsc2['return']], aggregation="sum") -- GitLab From 9d88b94f593f5ee545d08f37025d746f42a3a8c0 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 3 Aug 2017 20:02:51 +0000 Subject: [PATCH 0918/1483] share series representation across incoming and storage our incoming representation (timestamps, values) is exactly the same as storage. just make both models use (' Date: Fri, 4 Aug 2017 20:36:43 +0200 Subject: [PATCH 0919/1483] Updating vcs fields. Signed-off-by: Daniel Baumann --- debian/control | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/control b/debian/control index 4096c187..cf167cc1 100644 --- a/debian/control +++ b/debian/control @@ -68,8 +68,8 @@ Build-Depends-Indep: alembic (>= 0.7.6), subunit (>= 0.0.18), testrepository, Standards-Version: 3.9.8 -Vcs-Browser: https://anonscm.debian.org/gitweb/?p=openstack/python-gnocchi.git -Vcs-Git: https://anonscm.debian.org/git/openstack/python-gnocchi.git +Vcs-Browser: https://anonscm.debian.org/cgit/openstack/services/gnocchi.git +Vcs-Git: https://anonscm.debian.org/git/openstack/services/gnocchi.git Homepage: https://github.com/openstack/gnocchi Package: python-gnocchi -- GitLab From a29f3b05b9e2611c52c87368e16bcec6a1759133 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 4 Aug 2017 21:03:36 +0200 Subject: [PATCH 0920/1483] Adding changelog message about updating vcs fields. Signed-off-by: Daniel Baumann --- debian/changelog | 3 +++ 1 file changed, 3 insertions(+) diff --git a/debian/changelog b/debian/changelog index 9549609b..15a4d24d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -7,6 +7,9 @@ gnocchi (2.0.2-7) unstable; urgency=medium * Updated Danish translation of the debconf templates (Closes: #830650). * Now using gnocchi-upgrade, and not gnocchi-dbsync (Closes: #832792). + [ Daniel Baumann ] + * Updating vcs fields. + -- Thomas Goirand Mon, 08 Aug 2016 12:21:06 +0000 gnocchi (2.0.2-6) unstable; urgency=medium -- GitLab From 557b5c4316582889340b4ad68061d9d7a1766112 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 4 Aug 2017 21:15:00 +0200 Subject: [PATCH 0921/1483] Updating copyright format url. Signed-off-by: Daniel Baumann --- debian/changelog | 1 + debian/copyright | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 15a4d24d..11edd4b4 100644 --- a/debian/changelog +++ b/debian/changelog @@ -9,6 +9,7 @@ gnocchi (2.0.2-7) unstable; urgency=medium [ Daniel Baumann ] * Updating vcs fields. + * Updating copyright format url. -- Thomas Goirand Mon, 08 Aug 2016 12:21:06 +0000 diff --git a/debian/copyright b/debian/copyright index 5d11d0d6..3deff357 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,4 +1,4 @@ -Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: gnocchi Upstream-Contact: Julien Danjou Source: https://github.com/openstack/gnocchi -- GitLab From c21f90c850323ebad4c54c1bae82fbe45ae4e7e2 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 4 Aug 2017 22:23:09 +0200 Subject: [PATCH 0922/1483] Fixing changelog. Signed-off-by: Daniel Baumann --- debian/changelog | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/debian/changelog b/debian/changelog index 11edd4b4..8b1e0a96 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +gnocchi (2.0.2-8) UNRELEASED; urgency=medium + + * Updating vcs fields. + * Updating copyright format url. + + -- Daniel Baumann Fri, 04 Aug 2017 21:59:18 +0200 + gnocchi (2.0.2-7) unstable; urgency=medium [ Ondřej Nový ] @@ -7,10 +14,6 @@ gnocchi (2.0.2-7) unstable; urgency=medium * Updated Danish translation of the debconf templates (Closes: #830650). * Now using gnocchi-upgrade, and not gnocchi-dbsync (Closes: #832792). - [ Daniel Baumann ] - * Updating vcs fields. - * Updating copyright format url. - -- Thomas Goirand Mon, 08 Aug 2016 12:21:06 +0000 gnocchi (2.0.2-6) unstable; urgency=medium -- GitLab From 8731e0eb1a096d88bb82f53a7e00cbc648e10642 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 4 Aug 2017 23:22:03 +0200 Subject: [PATCH 0923/1483] Updating maintainer field. Signed-off-by: Daniel Baumann --- debian/changelog | 1 + debian/control | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 8b1e0a96..8254839c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -2,6 +2,7 @@ gnocchi (2.0.2-8) UNRELEASED; urgency=medium * Updating vcs fields. * Updating copyright format url. + * Updating maintainer field. -- Daniel Baumann Fri, 04 Aug 2017 21:59:18 +0200 diff --git a/debian/control b/debian/control index cf167cc1..0dd65fe5 100644 --- a/debian/control +++ b/debian/control @@ -1,7 +1,7 @@ Source: gnocchi Section: net Priority: optional -Maintainer: PKG OpenStack +Maintainer: Debian OpenStack Uploaders: Thomas Goirand , Build-Depends: debhelper (>= 9), dh-python, -- GitLab From df3e2782e9093fc1193ea9393c1e624746864de3 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 4 Aug 2017 23:32:22 +0200 Subject: [PATCH 0924/1483] Running wrap-and-sort -bast. Signed-off-by: Daniel Baumann --- debian/changelog | 1 + debian/control | 282 +++++++++++++++++----------------- debian/gnocchi-common.install | 2 +- 3 files changed, 141 insertions(+), 144 deletions(-) diff --git a/debian/changelog b/debian/changelog index 8254839c..e2e95c72 100644 --- a/debian/changelog +++ b/debian/changelog @@ -3,6 +3,7 @@ gnocchi (2.0.2-8) UNRELEASED; urgency=medium * Updating vcs fields. * Updating copyright format url. * Updating maintainer field. + * Running wrap-and-sort -bast. -- Daniel Baumann Fri, 04 Aug 2017 21:59:18 +0200 diff --git a/debian/control b/debian/control index 0dd65fe5..5e756603 100644 --- a/debian/control +++ b/debian/control @@ -2,135 +2,104 @@ Source: gnocchi Section: net Priority: optional Maintainer: Debian OpenStack -Uploaders: Thomas Goirand , -Build-Depends: debhelper (>= 9), - dh-python, - openstack-pkg-tools (>= 40~), - python-all, - python-pbr, - python-setuptools, - python-sphinx, -Build-Depends-Indep: alembic (>= 0.7.6), - libpq-dev, - postgresql, - postgresql-server-dev-all, - python-concurrent.futures (>= 2.1.6), - python-coverage (>= 3.6), - python-doc8, - python-fixtures, - python-future (>= 0.15), - python-gabbi (>= 1), - python-jsonpatch (>= 1.9), - python-keystoneclient (>= 1:1.6.0), - python-keystonemiddleware (>= 4.0.0), - python-lz4, - python-mock, - python-msgpack, - python-mysqldb, - python-numpy, - python-oslo.config (>= 1:2.6.0), - python-oslo.db (>= 1.8.0), - python-oslo.log (>= 1.0.0), - python-oslo.middleware, - python-oslo.policy (>= 0.3.0), - python-oslo.serialization (>= 1.4.0), - python-oslo.utils (>= 1.6.0), - python-oslosphinx (>= 2.2.0.0), - python-oslotest, - python-pandas (>= 0.17), - python-paste, - python-pastedeploy, - python-pecan (>= 0.9), - python-prettytable, - python-psycopg2, - python-pymysql, - python-pytimeparse (>= 1.1.5), - python-requests, - python-retrying, - python-six, - python-sphinx-bootstrap-theme, - python-sphinxcontrib.httpdomain, - python-sqlalchemy, - python-sqlalchemy-utils, - python-stevedore, - python-swiftclient (>= 2.5.0), - python-sysv-ipc, - python-tempest-lib (>= 0.2.0), - python-testscenarios, - python-testtools (>= 0.9.38), - python-tooz (>= 1.34), - python-trollius, - python-voluptuous, - python-webob (>= 1.4.1), - python-webtest (>= 2.0.16), - python-werkzeug, - python-yaml, - subunit (>= 0.0.18), - testrepository, +Uploaders: + Thomas Goirand , +Build-Depends: + debhelper (>= 9), + dh-python, + openstack-pkg-tools (>= 40~), + python-all, + python-pbr, + python-setuptools, + python-sphinx, +Build-Depends-Indep: + alembic (>= 0.7.6), + libpq-dev, + postgresql, + postgresql-server-dev-all, + python-concurrent.futures (>= 2.1.6), + python-coverage (>= 3.6), + python-doc8, + python-fixtures, + python-future (>= 0.15), + python-gabbi (>= 1), + python-jsonpatch (>= 1.9), + python-keystoneclient (>= 1:1.6.0), + python-keystonemiddleware (>= 4.0.0), + python-lz4, + python-mock, + python-msgpack, + python-mysqldb, + python-numpy, + python-oslo.config (>= 1:2.6.0), + python-oslo.db (>= 1.8.0), + python-oslo.log (>= 1.0.0), + python-oslo.middleware, + python-oslo.policy (>= 0.3.0), + python-oslo.serialization (>= 1.4.0), + python-oslo.utils (>= 1.6.0), + python-oslosphinx (>= 2.2.0.0), + python-oslotest, + python-pandas (>= 0.17), + python-paste, + python-pastedeploy, + python-pecan (>= 0.9), + python-prettytable, + python-psycopg2, + python-pymysql, + python-pytimeparse (>= 1.1.5), + python-requests, + python-retrying, + python-six, + python-sphinx-bootstrap-theme, + python-sphinxcontrib.httpdomain, + python-sqlalchemy, + python-sqlalchemy-utils, + python-stevedore, + python-swiftclient (>= 2.5.0), + python-sysv-ipc, + python-tempest-lib (>= 0.2.0), + python-testscenarios, + python-testtools (>= 0.9.38), + python-tooz (>= 1.34), + python-trollius, + python-voluptuous, + python-webob (>= 1.4.1), + python-webtest (>= 2.0.16), + python-werkzeug, + python-yaml, + subunit (>= 0.0.18), + testrepository, Standards-Version: 3.9.8 Vcs-Browser: https://anonscm.debian.org/cgit/openstack/services/gnocchi.git Vcs-Git: https://anonscm.debian.org/git/openstack/services/gnocchi.git Homepage: https://github.com/openstack/gnocchi -Package: python-gnocchi -Section: python +Package: gnocchi-api Architecture: all -Depends: alembic (>= 0.7.6), - python-concurrent.futures (>= 2.1.6), - python-future (>= 0.15), - python-jsonpatch (>= 1.9), - python-keystoneclient (>= 1:1.6.0), - python-keystonemiddleware (>= 4.0.0), - python-lz4, - python-msgpack, - python-numpy, - python-oslo.config (>= 1:2.6.0), - python-oslo.db (>= 1.8.0), - python-oslo.log (>= 1.0.0), - python-oslo.middleware, - python-oslo.policy (>= 0.3.0), - python-oslo.serialization (>= 1.4.0), - python-oslo.utils (>= 1.6.0), - python-oslosphinx (>= 2.2.0.0), - python-pandas (>= 0.17), - python-paste, - python-pastedeploy, - python-pecan (>= 0.9), - python-prettytable, - python-psycopg2, - python-pymysql, - python-pytimeparse (>= 1.1.5), - python-requests, - python-retrying, - python-six, - python-sqlalchemy, - python-sqlalchemy-utils, - python-stevedore, - python-swiftclient (>= 2.5.0), - python-tooz (>= 1.34), - python-trollius, - python-voluptuous, - python-webob (>= 1.4.1), - python-werkzeug, - python-yaml, - ${misc:Depends}, - ${python:Depends}, -Suggests: python-gnocchi-doc, -Description: Metric as a Service - Python 2.x +Depends: + adduser, + gnocchi-common (= ${binary:Version}), + python-openstackclient, + q-text-as-data, + ${misc:Depends}, + ${python:Depends}, +Description: Metric as a Service - API daemon Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an HTTP REST API. . - This package contains the Python 2.x module. + This package contains the API server. Package: gnocchi-common Architecture: all -Depends: adduser, - dbconfig-common, - debconf, - python-gnocchi (= ${binary:Version}), - ${misc:Depends}, - ${python:Depends}, +Depends: + adduser, + dbconfig-common, + debconf, + python-gnocchi (= ${binary:Version}), + ${misc:Depends}, + ${python:Depends}, Description: Metric as a Service - common files Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an @@ -138,41 +107,68 @@ Description: Metric as a Service - common files . This package contains the common files. -Package: gnocchi-api +Package: gnocchi-metricd Architecture: all -Depends: adduser, - gnocchi-common (= ${binary:Version}), - python-openstackclient, - q-text-as-data, - ${misc:Depends}, - ${python:Depends}, -Description: Metric as a Service - API daemon +Depends: + gnocchi-common (= ${binary:Version}), + ${misc:Depends}, + ${python:Depends}, +Description: Metric as a Service - metric daemon Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an HTTP REST API. . - This package contains the API server. + This package contains the metric daemon. -Package: gnocchi-metricd +Package: python-gnocchi +Section: python Architecture: all -Depends: gnocchi-common (= ${binary:Version}), - ${misc:Depends}, - ${python:Depends}, -Description: Metric as a Service - metric daemon +Depends: + alembic (>= 0.7.6), + python-concurrent.futures (>= 2.1.6), + python-future (>= 0.15), + python-jsonpatch (>= 1.9), + python-keystoneclient (>= 1:1.6.0), + python-keystonemiddleware (>= 4.0.0), + python-lz4, + python-msgpack, + python-numpy, + python-oslo.config (>= 1:2.6.0), + python-oslo.db (>= 1.8.0), + python-oslo.log (>= 1.0.0), + python-oslo.middleware, + python-oslo.policy (>= 0.3.0), + python-oslo.serialization (>= 1.4.0), + python-oslo.utils (>= 1.6.0), + python-oslosphinx (>= 2.2.0.0), + python-pandas (>= 0.17), + python-paste, + python-pastedeploy, + python-pecan (>= 0.9), + python-prettytable, + python-psycopg2, + python-pymysql, + python-pytimeparse (>= 1.1.5), + python-requests, + python-retrying, + python-six, + python-sqlalchemy, + python-sqlalchemy-utils, + python-stevedore, + python-swiftclient (>= 2.5.0), + python-tooz (>= 1.34), + python-trollius, + python-voluptuous, + python-webob (>= 1.4.1), + python-werkzeug, + python-yaml, + ${misc:Depends}, + ${python:Depends}, +Suggests: + python-gnocchi-doc, +Description: Metric as a Service - Python 2.x Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an HTTP REST API. . - This package contains the metric daemon. - -#Package: gnocchi-doc -#Section: doc -#Architecture: all -#Depends: ${misc:Depends}, -# ${sphinxdoc:Depends}, -#Description: Metric as a Service - doc -# Gnocchi is a service for managing a set of resources and storing metrics about -# them, in a scalable and resilient way. Its functionalities are exposed over an -# HTTP REST API. -# . -# This package contains the documentation. + This package contains the Python 2.x module. diff --git a/debian/gnocchi-common.install b/debian/gnocchi-common.install index 339258a5..10da746c 100644 --- a/debian/gnocchi-common.install +++ b/debian/gnocchi-common.install @@ -1,3 +1,3 @@ -etc/gnocchi/policy.json /usr/share/gnocchi-common etc/gnocchi/api-paste.ini /usr/share/gnocchi-common +etc/gnocchi/policy.json /usr/share/gnocchi-common gnocchi/rest/app.wsgi /usr/share/gnocchi-common -- GitLab From 7d6614eaa54ae09bda06220729dd33c55523f526 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 4 Aug 2017 23:59:34 +0200 Subject: [PATCH 0925/1483] Updating standards version to 4.0.0. Signed-off-by: Daniel Baumann --- debian/changelog | 1 + debian/control | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index e2e95c72..03900385 100644 --- a/debian/changelog +++ b/debian/changelog @@ -4,6 +4,7 @@ gnocchi (2.0.2-8) UNRELEASED; urgency=medium * Updating copyright format url. * Updating maintainer field. * Running wrap-and-sort -bast. + * Updating standards version to 4.0.0. -- Daniel Baumann Fri, 04 Aug 2017 21:59:18 +0200 diff --git a/debian/control b/debian/control index 5e756603..5bbdee70 100644 --- a/debian/control +++ b/debian/control @@ -70,7 +70,7 @@ Build-Depends-Indep: python-yaml, subunit (>= 0.0.18), testrepository, -Standards-Version: 3.9.8 +Standards-Version: 4.0.0 Vcs-Browser: https://anonscm.debian.org/cgit/openstack/services/gnocchi.git Vcs-Git: https://anonscm.debian.org/git/openstack/services/gnocchi.git Homepage: https://github.com/openstack/gnocchi -- GitLab From 60ab4cdc6aecd458d9ece0c188f8f54bcd9ef659 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 5 Aug 2017 00:16:02 +0200 Subject: [PATCH 0926/1483] Removing gbp.conf, not used anymore or should be specified in the developers dotfiles. Signed-off-by: Daniel Baumann --- debian/changelog | 2 ++ debian/gbp.conf | 9 --------- 2 files changed, 2 insertions(+), 9 deletions(-) delete mode 100644 debian/gbp.conf diff --git a/debian/changelog b/debian/changelog index 03900385..8d47cb37 100644 --- a/debian/changelog +++ b/debian/changelog @@ -5,6 +5,8 @@ gnocchi (2.0.2-8) UNRELEASED; urgency=medium * Updating maintainer field. * Running wrap-and-sort -bast. * Updating standards version to 4.0.0. + * Removing gbp.conf, not used anymore or should be specified in the + developers dotfiles. -- Daniel Baumann Fri, 04 Aug 2017 21:59:18 +0200 diff --git a/debian/gbp.conf b/debian/gbp.conf deleted file mode 100644 index 7436424b..00000000 --- a/debian/gbp.conf +++ /dev/null @@ -1,9 +0,0 @@ -[DEFAULT] -upstream-branch = master -debian-branch = debian/mitaka -upstream-tag = %(version)s -compression = xz - -[buildpackage] -export-dir = ../build-area/ - -- GitLab From dfa78f9f4add85b57b95510642141099d790e902 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 5 Aug 2017 00:59:41 +0200 Subject: [PATCH 0927/1483] Correcting permissions in debian packaging files. Signed-off-by: Daniel Baumann --- debian/changelog | 1 + debian/gnocchi-api.postinst.in | 0 debian/gnocchi-common.postinst.in | 0 debian/gnocchi-common.postrm | 0 4 files changed, 1 insertion(+) mode change 100644 => 100755 debian/gnocchi-api.postinst.in mode change 100644 => 100755 debian/gnocchi-common.postinst.in mode change 100644 => 100755 debian/gnocchi-common.postrm diff --git a/debian/changelog b/debian/changelog index 8d47cb37..e926810e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -7,6 +7,7 @@ gnocchi (2.0.2-8) UNRELEASED; urgency=medium * Updating standards version to 4.0.0. * Removing gbp.conf, not used anymore or should be specified in the developers dotfiles. + * Correcting permissions in debian packaging files. -- Daniel Baumann Fri, 04 Aug 2017 21:59:18 +0200 diff --git a/debian/gnocchi-api.postinst.in b/debian/gnocchi-api.postinst.in old mode 100644 new mode 100755 diff --git a/debian/gnocchi-common.postinst.in b/debian/gnocchi-common.postinst.in old mode 100644 new mode 100755 diff --git a/debian/gnocchi-common.postrm b/debian/gnocchi-common.postrm old mode 100644 new mode 100755 -- GitLab From 4e658216e7a3038f6088b626e99ce926cab758df Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 6 Aug 2017 13:49:38 +0200 Subject: [PATCH 0928/1483] Updating standards version to 4.0.1. Signed-off-by: Daniel Baumann --- debian/changelog | 1 + debian/control | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index e926810e..c70f7dd8 100644 --- a/debian/changelog +++ b/debian/changelog @@ -8,6 +8,7 @@ gnocchi (2.0.2-8) UNRELEASED; urgency=medium * Removing gbp.conf, not used anymore or should be specified in the developers dotfiles. * Correcting permissions in debian packaging files. + * Updating standards version to 4.0.1. -- Daniel Baumann Fri, 04 Aug 2017 21:59:18 +0200 diff --git a/debian/control b/debian/control index 5bbdee70..25853882 100644 --- a/debian/control +++ b/debian/control @@ -70,7 +70,7 @@ Build-Depends-Indep: python-yaml, subunit (>= 0.0.18), testrepository, -Standards-Version: 4.0.0 +Standards-Version: 4.0.1 Vcs-Browser: https://anonscm.debian.org/cgit/openstack/services/gnocchi.git Vcs-Git: https://anonscm.debian.org/git/openstack/services/gnocchi.git Homepage: https://github.com/openstack/gnocchi -- GitLab From 87a5a47b9149b40b45e7c783d4f1587a185d5517 Mon Sep 17 00:00:00 2001 From: Pete Zaitcev Date: Wed, 2 Aug 2017 17:13:12 -0600 Subject: [PATCH 0929/1483] Fix pep8 with file() Apparently our CI does not check for tox -e pep8. --- gnocchi/tempest/scenario/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/tempest/scenario/__init__.py b/gnocchi/tempest/scenario/__init__.py index 7db0fd6f..f662dc7b 100644 --- a/gnocchi/tempest/scenario/__init__.py +++ b/gnocchi/tempest/scenario/__init__.py @@ -60,7 +60,7 @@ class GnocchiGabbiTest(tempest.test.BaseTestCase): os.environ["GNOCCHI_SERVICE_TOKEN"] = token os.environ["GNOCCHI_AUTHORIZATION"] = "not used" - with file(os.path.join(TEST_DIR, filename)) as f: + with open(os.path.join(TEST_DIR, filename)) as f: suite_dict = utils.load_yaml(f) suite_dict.setdefault('defaults', {})['ssl'] = require_ssl test_suite = suitemaker.test_suite_from_dict( -- GitLab From d28993016ec0ed21f3415e8dfe982e4126dddc42 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 8 Aug 2017 13:23:08 +0000 Subject: [PATCH 0930/1483] make to_timedelta return numpy to_timedelta naturally uses numpy.timedelta64 but gets cast to pandas[1]. just make it return numpy. [1] https://pandas.pydata.org/pandas-docs/stable/generated/pandas.to_timedelta.html --- gnocchi/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 31ec3229..47a86c94 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -103,7 +103,7 @@ def to_timestamps(values): # e.g. "-10 seconds" or "5 minutes" times = numpy.fromiter( numpy.add(numpy.datetime64(utcnow()), - pd.to_timedelta(values)), + pd.to_timedelta(values, box=False)), dtype='datetime64[ns]') else: times = numpy.array(values, dtype='datetime64[ns]') @@ -140,7 +140,7 @@ def to_timespan(value): seconds = float(value) except Exception: try: - seconds = pd.to_timedelta(value).to_timedelta64() + seconds = pd.to_timedelta(value, box=False) except Exception: raise ValueError("Unable to parse timespan") else: -- GitLab From 70555bf48b05339d326fe2ccaa021cd21573dd81 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 22 Jun 2017 15:17:42 +0200 Subject: [PATCH 0931/1483] redis: remove default connection timeout There's no good reason to timeout by default if nothing happens. This just makes reconnection being handled every 30 seconds by default, which sounds like a bad idea. --- gnocchi/common/redis.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/gnocchi/common/redis.py b/gnocchi/common/redis.py index 8491c369..7ac6dfd0 100644 --- a/gnocchi/common/redis.py +++ b/gnocchi/common/redis.py @@ -70,9 +70,6 @@ CLIENT_INT_ARGS = frozenset([ 'socket_timeout', ]) -#: Default socket timeout to use when none is provided. -CLIENT_DEFAULT_SOCKET_TO = 30 - def get_client(conf): if redis is None: @@ -104,8 +101,6 @@ def get_client(conf): else: v = options[a][-1] kwargs[a] = v - if 'socket_timeout' not in kwargs: - kwargs['socket_timeout'] = CLIENT_DEFAULT_SOCKET_TO # Ask the sentinel for the current master if there is a # sentinel arg. -- GitLab From 8a2e20edc61492458c46048806442d97f4fc6735 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Tue, 22 Aug 2017 18:54:08 +0200 Subject: [PATCH 0932/1483] Updating standards version to 4.1.0. Signed-off-by: Daniel Baumann --- debian/changelog | 1 + debian/control | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index c70f7dd8..764f01b4 100644 --- a/debian/changelog +++ b/debian/changelog @@ -9,6 +9,7 @@ gnocchi (2.0.2-8) UNRELEASED; urgency=medium developers dotfiles. * Correcting permissions in debian packaging files. * Updating standards version to 4.0.1. + * Updating standards version to 4.1.0. -- Daniel Baumann Fri, 04 Aug 2017 21:59:18 +0200 diff --git a/debian/control b/debian/control index 25853882..70a83854 100644 --- a/debian/control +++ b/debian/control @@ -70,7 +70,7 @@ Build-Depends-Indep: python-yaml, subunit (>= 0.0.18), testrepository, -Standards-Version: 4.0.1 +Standards-Version: 4.1.0 Vcs-Browser: https://anonscm.debian.org/cgit/openstack/services/gnocchi.git Vcs-Git: https://anonscm.debian.org/git/openstack/services/gnocchi.git Homepage: https://github.com/openstack/gnocchi -- GitLab From d659c4a6493cdf23508ca53c3c5979c04dda06a6 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 21 Jul 2017 09:39:17 +0200 Subject: [PATCH 0933/1483] indexer: fix ne operator When we use the ne operator, row with NULL values are not returned by mysql or postgresql. This change fixes the sql query to return them. Closes #224 (cherry picked from commit 051b11abb41b239a1873e4009d0a6625795f1f76) --- gnocchi/indexer/sqlalchemy.py | 6 +++- gnocchi/tests/functional/gabbits/search.yaml | 21 +++++++++++++ gnocchi/tests/test_indexer.py | 31 ++++++++++++++++++++ 3 files changed, 57 insertions(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index ecad2b65..7fd270c9 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1215,7 +1215,11 @@ class QueryTransformer(object): raise indexer.QueryValueError(value, field_name) break - return op(attr, value) + if op == operator.ne and value is not None: + return operator.or_(operator.eq(attr, None), + op(attr, value)) + else: + return op(attr, value) @classmethod def build_filter(cls, engine, table, tree): diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml index 0bd8f93d..ba55c648 100644 --- a/gnocchi/tests/functional/gabbits/search.yaml +++ b/gnocchi/tests/functional/gabbits/search.yaml @@ -74,3 +74,24 @@ tests: data: {} response_json_paths: $.`len`: 2 + + - name: post generic resource with project/user + POST: /v1/resource/generic + data: + id: 95573760-b085-4e69-9280-91f66fc3ed3c + started_at: "2014-01-03T02:02:02.000000" + status: 201 + + - name: search empty query again + POST: /v1/search/resource/generic + data: {} + response_json_paths: + $.`len`: 3 + + - name: search all resource not foobar + POST: /v1/search/resource/generic + data: + ne: + project_id: foobar + response_json_paths: + $.`len`: 3 diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index f6a29263..769ed4d6 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -730,6 +730,37 @@ class TestIndexerDriver(tests_base.TestCase): attribute_filter={"=": {"project_id": 'bad-project'}}) self.assertEqual(0, len(resources)) + def test_list_resources_with_no_project(self): + r1 = uuid.uuid4() + r2 = uuid.uuid4() + user = str(uuid.uuid4()) + project = str(uuid.uuid4()) + creator = user + ":" + project + g1 = self.index.create_resource('generic', r1, creator, user, project) + g2 = self.index.create_resource('generic', r2, creator, None, None) + + # Get null value + resources = self.index.list_resources( + 'generic', + attribute_filter={"and": [ + {"=": {"creator": creator}}, + {"!=": {"project_id": project}} + ]}) + self.assertEqual(1, len(resources)) + self.assertEqual(g2, resources[0]) + + # Get null and filled values + resources = self.index.list_resources( + 'generic', + attribute_filter={"and": [ + {"=": {"creator": creator}}, + {"!=": {"project_id": "foobar"}} + ]}, + sorts=["project_id:asc-nullsfirst"]) + self.assertEqual(2, len(resources)) + self.assertEqual(g2, resources[0]) + self.assertEqual(g1, resources[1]) + def test_list_resources_by_duration(self): r1 = uuid.uuid4() user = str(uuid.uuid4()) -- GitLab From 4a0dfbab90fc5b42e2b9bb561484d3d786e8cb77 Mon Sep 17 00:00:00 2001 From: Pete Zaitcev Date: Fri, 25 Aug 2017 23:22:04 -0600 Subject: [PATCH 0934/1483] Add #!/usr/bin/python to gnocchi-api Otherwise Aug 26 01:04:04 rhev-a24c-01 systemd: openstack-gnocchi-api.service: Failed at step EXEC spawning /usr/bin/gnocchi-api: Exec format error --- gnocchi/rest/gnocchi-api | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gnocchi/rest/gnocchi-api b/gnocchi/rest/gnocchi-api index a53ad9ea..de041985 100755 --- a/gnocchi/rest/gnocchi-api +++ b/gnocchi/rest/gnocchi-api @@ -1,3 +1,5 @@ +#!/usr/bin/python + # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -- GitLab From 15378ab41e913e53f0094ba2ac9d568a9f3f1a59 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 25 Aug 2017 08:47:56 +0200 Subject: [PATCH 0935/1483] Fix circular dependency on import gnocchi.rest.app imports gnocchi.service gnocchi.service imports gnocchi.opts gnocchi.opts imports gnocchi.rest.app Break that cycle so gnocchi.rest.app stops importing gnocchi.service Closes #294 --- gnocchi/rest/app.py | 5 ----- gnocchi/rest/gnocchi-api | 3 ++- gnocchi/rest/wsgi.py | 3 ++- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 7c99030b..e2bfbdbe 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -31,7 +31,6 @@ from gnocchi import exceptions from gnocchi import incoming as gnocchi_incoming from gnocchi import indexer as gnocchi_indexer from gnocchi import json -from gnocchi import service from gnocchi import storage as gnocchi_storage @@ -150,7 +149,3 @@ def app_factory(global_config, **local_conf): global APPCONFIGS appconfig = APPCONFIGS.get(global_config.get('configkey')) return _setup_app(root=local_conf.get('root'), **appconfig) - - -def build_wsgi_app(): - return load_app(service.prepare_service()) diff --git a/gnocchi/rest/gnocchi-api b/gnocchi/rest/gnocchi-api index de041985..1752dc7d 100755 --- a/gnocchi/rest/gnocchi-api +++ b/gnocchi/rest/gnocchi-api @@ -18,4 +18,5 @@ if __name__ == '__main__': sys.exit(cli.api()) else: from gnocchi.rest import app - application = app.build_wsgi_app() + from gnocchi import service + application = app.load_app(service.prepare_service()) diff --git a/gnocchi/rest/wsgi.py b/gnocchi/rest/wsgi.py index 3edc2468..b28cc452 100644 --- a/gnocchi/rest/wsgi.py +++ b/gnocchi/rest/wsgi.py @@ -12,4 +12,5 @@ # limitations under the License. """This file is loaded by gnocchi-api when executing uwsgi""" from gnocchi.rest import app -application = app.build_wsgi_app() +from gnocchi import service +application = app.load_app(service.prepare_service()) -- GitLab From bfe56b4766aea99c54081afa889f00d195f1effc Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 26 Jul 2017 09:05:26 +0200 Subject: [PATCH 0936/1483] rest: Fix invalid query validation Some operators accept a list or a dict at rest layer, when it should accept only an attribute value, like bool, str, None. For Postgresql, that result to an sql error, turned in HTTP 500. For Mysql, the request succeed with some magic casting. Closes #240 (cherry picked from commit cd093e4641a202c720ec8b54e335cc91a07f97ca) --- gnocchi/rest/__init__.py | 25 +++++++--- gnocchi/tests/functional/gabbits/search.yaml | 50 ++++++++++++++++++-- 2 files changed, 65 insertions(+), 10 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 56d71d95..e741c4b4 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1179,6 +1179,12 @@ def ResourceSearchSchema(v): return _ResourceSearchSchema()(v) +# NOTE(sileht): indexer will cast this type to the real attribute +# type, here we just want to be sure this is not a dict or a list +ResourceSearchSchemaAttributeValue = voluptuous.Any( + six.text_type, float, int, bool, None) + + def _ResourceSearchSchema(): user = pecan.request.auth_helper.get_current_user( pecan.request) @@ -1195,21 +1201,26 @@ def _ResourceSearchSchema(): u"<=", u"≤", u"le", u">=", u"≥", u"ge", u"!=", u"≠", u"ne", + u"like" + ): voluptuous.All( + voluptuous.Length(min=1, max=1), + {"id": _ResourceUUID, + six.text_type: ResourceSearchSchemaAttributeValue}, + ), + voluptuous.Any( u"in", - u"like", ): voluptuous.All( voluptuous.Length(min=1, max=1), - voluptuous.Any( - {"id": voluptuous.Any( - [_ResourceUUID], _ResourceUUID), - voluptuous.Extra: voluptuous.Extra})), + {"id": [_ResourceUUID], + six.text_type: [ResourceSearchSchemaAttributeValue]} + ), voluptuous.Any( u"and", u"∨", u"or", u"∧", - u"not", ): voluptuous.All( [ResourceSearchSchema], voluptuous.Length(min=1) - ) + ), + u"not": ResourceSearchSchema, } ) ) diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml index ba55c648..13906bf1 100644 --- a/gnocchi/tests/functional/gabbits/search.yaml +++ b/gnocchi/tests/functional/gabbits/search.yaml @@ -21,12 +21,49 @@ tests: GET: /v1/search/resource/foobar status: 404 + # FIXME(sileht): this test looks wrong, it talks about invalidity + # but asserts it return 200... - name: search with invalid uuid POST: /v1/search/resource/generic data: =: id: "cd9eef" + - name: search invalid and value + desc: and should be followed by a list, not dict + POST: /v1/search/resource/generic + data: + and: + project_id: foobar + status: 400 + response_strings: + - "expected a list for dictionary value @ data[" + - "'and']" + + - name: search invalid ne value + desc: attribute value for binary operator must not be dict or list + POST: /v1/search/resource/generic + data: + ne: + project_id: + - foobar + status: 400 + response_strings: + - "for dictionary value @ data[" + - "'ne'][" + - "'project_id']" + + - name: search invalid not value + desc: uninary operator must follow by dict, not list + POST: /v1/search/resource/generic + data: + not: + - project_id: foobar + status: 400 + response_strings: + - "expected a dictionary for dictionary value @ data[" + - "'not']" + - name: post generic resource POST: /v1/resource/generic data: @@ -55,15 +92,22 @@ tests: response_json_paths: $.`len`: 2 - - name: search like created_by_project_id + - name: search eq created_by_project_id POST: /v1/search/resource/generic data: eq: - created_by_project_id: - - f3d41b770cc14f0bb94a1d5be9c0e3ea + created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea response_json_paths: $.`len`: 0 + - name: search eq creator + POST: /v1/search/resource/generic + data: + eq: + creator: "foobar" + response_json_paths: + $.`len`: 2 + - name: search in_ query string POST: /v1/search/resource/generic?filter=id%20in%20%5Bfaef212f-0bf4-4030-a461-2186fef79be0%2C%20df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e%5D response_json_paths: -- GitLab From eff9269f21ab841af60a686e60f6966a961c91c9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 21 Aug 2017 14:13:12 +0200 Subject: [PATCH 0937/1483] Use pytimeparse to parse time span pytimeparse was used before switching entirely the parsing to Pandas, which was already in use. Since we're trying to get rid of Pandas, pytimeparse looks like the best alternative out there to parse the time span. Improves #61 --- gnocchi/utils.py | 16 +++++++--------- requirements.txt | 1 + 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 47a86c94..54d94ca1 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -26,7 +26,7 @@ import daiquiri import iso8601 import monotonic import numpy -import pandas as pd +import pytimeparse import six from stevedore import driver from tooz import coordination @@ -103,7 +103,7 @@ def to_timestamps(values): # e.g. "-10 seconds" or "5 minutes" times = numpy.fromiter( numpy.add(numpy.datetime64(utcnow()), - pd.to_timedelta(values, box=False)), + [to_timespan(v, True) for v in values]), dtype='datetime64[ns]') else: times = numpy.array(values, dtype='datetime64[ns]') @@ -133,19 +133,17 @@ def timestamp_to_datetime(v): v.astype(float) / 10e8).replace(tzinfo=iso8601.iso8601.UTC) -def to_timespan(value): +def to_timespan(value, allow_le_zero=False): if value is None: raise ValueError("Invalid timespan") try: seconds = float(value) except Exception: - try: - seconds = pd.to_timedelta(value, box=False) - except Exception: + seconds = pytimeparse.parse(value) + if seconds is None: raise ValueError("Unable to parse timespan") - else: - seconds = numpy.timedelta64(int(seconds * 10e8), 'ns') - if seconds <= numpy.timedelta64(0, 'ns'): + seconds = numpy.timedelta64(int(seconds * 10e8), 'ns') + if not allow_le_zero and seconds <= numpy.timedelta64(0, 'ns'): raise ValueError("Timespan must be positive") return seconds diff --git a/requirements.txt b/requirements.txt index 2bb754c4..72762e2f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,6 +5,7 @@ oslo.config>=3.22.0 oslo.policy>=0.3.0 oslo.middleware>=3.22.0 pandas>=0.18.0 +pytimeparse scipy>=0.18.1 # BSD pecan>=0.9 futures -- GitLab From 8c8896b3e759322b706df9aabf8bb2db747d6d4e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 16 Aug 2017 11:35:14 +0200 Subject: [PATCH 0938/1483] cors: fix CORS example formatting in Grafana page --- doc/source/grafana.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/grafana.rst b/doc/source/grafana.rst index d731e613..840b4aa5 100644 --- a/doc/source/grafana.rst +++ b/doc/source/grafana.rst @@ -33,7 +33,7 @@ steps: [cors] allowed_origin = http://example.com/grafana -2. Configure the CORS middleware in Keystone to allow request from Grafana too: +2. Configure the CORS middleware in Keystone to allow request from Grafana too:: [cors] allowed_origin = http://example.com/grafana -- GitLab From f63c493d9ac6ef16ef2e5b40aa20fae6e78a3695 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 16 Aug 2017 16:06:03 +0200 Subject: [PATCH 0939/1483] Push tag to PyPI using Travis Pushing git tag will now be enough to push a release to PyPI. --- .travis.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.travis.yml b/.travis.yml index 8b0782cb..4902bf59 100644 --- a/.travis.yml +++ b/.travis.yml @@ -46,3 +46,13 @@ notifications: skip_join: true channels: - "irc.freenode.org#gnocchi" + +deploy: + provider: pypi + user: jd + password: + secure: c+Ccx3SHCWepiy0PUxDJ7XO9r3aNYnHjkzxF5c/kjV8QaCJayAJEgXJnBKhvjroqwgn7JPUgpD6QdSWdB4FqjbZYQ3I3oHOO1YL0vYYa8wHG5HuMsMp4J8qvzgs3QNQDECPI1mXsPevn3VMfGszUN+6BQrHB3FbZsTtOmE+Kmgok5NCT+obsfEhVea/UOD0XFUkVW9VJhPjQ2ytvYvFIc46/73GQf2Er/5DCa/4GGDEBSD++bDJgp3kQj438xslCAFeZWDwGsa+cTc43PI0Y0+E144ySVY7QyVbZ1B66a1BGWVrXJuM+gW/eIBCMN1FJXmD7CDdPa22azKI8dfMF7qaH3Oiv3cVovPWpubOvhTUHUFwG8+W7Fx+zUKktCWiLer/fZvEd3W8tcgby2kNOdcUfKfDB2ImZJ+P694/OJ4jJ8T5TQerruNoP2OstzcBMon77Ry0XawXR15SZd4JhbqhSi+h7XV6EYmct1UN4zoysA7fx/cWHcBxdnm2G6R0gzmOiiGUd74ptU8lZ3IlEP6EZckK/OZOdy1I8EQeUe7aiTooXZDAn07iPkDZliYRr2e36ij/xjtWCe1AjCksn/xdKfHOKJv5UVob495DU2GuNObe01ewXzexcnldjfp9Sb8SVEFuhHx6IvH5OC+vAq+BVYu2jwvMcVfXi3VSOkB4= + on: + all_branches: true + tags: true + distributions: "sdist bdist_wheel" -- GitLab From 69bd3e10352a0f71f533ac904e84df75d7f64eec Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 24 Aug 2017 09:50:48 +0200 Subject: [PATCH 0940/1483] rest: fix docstring for strtobool Closes #292 --- gnocchi/rest/__init__.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 677c5fc2..250b3f50 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -167,10 +167,7 @@ def get_details(params): def strtobool(varname, v): - """Convert a string to a boolean. - - Default to false if unable to convert. - """ + """Convert a string to a boolean.""" try: return utils.strtobool(v) except ValueError as e: -- GitLab From dc7cefe16a467e21e64f474840b97c042b4a16ad Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 21 Aug 2017 20:45:25 +0200 Subject: [PATCH 0941/1483] rest: fix error message UUID attribute validation in resource type Improves #195 --- gnocchi/resource_type.py | 8 +++++++- gnocchi/tests/functional/gabbits/resource-type.yaml | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/gnocchi/resource_type.py b/gnocchi/resource_type.py index 73b75564..9c78565d 100644 --- a/gnocchi/resource_type.py +++ b/gnocchi/resource_type.py @@ -163,7 +163,13 @@ class StringSchema(CommonAttributeSchema): class UUIDSchema(CommonAttributeSchema): typename = "uuid" - schema_ext = staticmethod(utils.UUID) + + @staticmethod + def schema_ext(value): + try: + return utils.UUID(value) + except ValueError as e: + raise voluptuous.Invalid(e) class NumberSchema(CommonAttributeSchema): diff --git a/gnocchi/tests/functional/gabbits/resource-type.yaml b/gnocchi/tests/functional/gabbits/resource-type.yaml index 90b9a8ba..d536c036 100644 --- a/gnocchi/tests/functional/gabbits/resource-type.yaml +++ b/gnocchi/tests/functional/gabbits/resource-type.yaml @@ -246,7 +246,7 @@ tests: status: 400 response_strings: # split to not match the u' in py2 - - "Invalid input: not a valid value for dictionary value @ data[" + - "Invalid input: badly formed hexadecimal UUID string for dictionary value @ data[" - "'uuid']" # Good resources for this type -- GitLab From 92352dd53c5520fd9ea0be242489fad6610fe52f Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 2 Aug 2017 19:20:28 +0000 Subject: [PATCH 0942/1483] don't blindly truncate on init we don't need to truncate for most scenarios. - from_grouped_serie doesn't need to be truncated because it's based on bound_series which definitely doesn't need to be truncated - from_data is used to grab from storage but there is no point truncating here as don't check max_size when doing so. (we may want to) - from_timeseries needs to truncate as that's what used to return results --- gnocchi/carbonara.py | 21 +++++++++++++-------- gnocchi/tests/test_carbonara.py | 12 ++++++------ 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 8bb0b2ea..4cff5b1e 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -543,7 +543,8 @@ class AggregatedTimeSerie(TimeSerie): COMPRESSED_SERIAL_LEN = struct.calcsize(" Date: Tue, 29 Aug 2017 11:22:14 +0200 Subject: [PATCH 0943/1483] Add Gordon Chung to .mailmap This dude uses two different names in the git log, confusing git-log and other tools as there was two different authors. --- .mailmap | 1 + 1 file changed, 1 insertion(+) create mode 100644 .mailmap diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000..caddbe0c --- /dev/null +++ b/.mailmap @@ -0,0 +1 @@ +gord chung -- GitLab From 4591c73ccc79a22a18d541dc0fd7df364151b7f1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sun, 16 Jul 2017 23:29:19 +0200 Subject: [PATCH 0944/1483] use map instead of enumeration --- gnocchi/storage/_carbonara.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index fce19b97..4896d790 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -502,8 +502,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): [(metric, aggregation, granularity, from_timestamp, to_timestamp) for metric in metrics]) - for i, ts in enumerate(tss): - tss[i] = ts.resample(resample) + tss = map(lambda ts: ts.resample(resample), tss) else: tss = self._map_in_thread(self._get_measures_timeserie, [(metric, aggregation, g, -- GitLab From f0718aa5462c44c200ac8ff210bfe35d282c4e69 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sun, 16 Jul 2017 23:27:30 +0200 Subject: [PATCH 0945/1483] Allow to transform measures This change adds an API to transform retrieved measurements. First method implemented are absolute and negative. This API aims to do some transformations that are not aggregation related. This also fixes the data type passed to aggregated(), to ensure it's a list and not an iterator. --- doc/source/rest.j2 | 9 +++++ doc/source/rest.yaml | 3 ++ gnocchi/carbonara.py | 15 +++++++ gnocchi/rest/__init__.py | 24 +++++++++++- gnocchi/storage/__init__.py | 7 +++- gnocchi/storage/_carbonara.py | 12 ++++-- gnocchi/tests/functional/gabbits/metric.yaml | 39 +++++++++++++++++++ gnocchi/tests/test_carbonara.py | 23 +++++++++++ .../notes/tranform-API-da38b196c1c72d44.yaml | 6 +++ 9 files changed, 131 insertions(+), 7 deletions(-) create mode 100644 releasenotes/notes/tranform-API-da38b196c1c72d44.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index b306152b..fd4420fa 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -122,6 +122,15 @@ resampled to a new granularity. Depending on the aggregation method and frequency of measures, resampled data may lack accuracy as it is working against previously aggregated data. + +Stored measures can be transformed during retrieval, with: + +{{ scenarios['get-measures-transform']['doc'] }} + +The example will first apply absolute to the values and then negate them. + +Supported transformations are `absolute` and `negative`. + Measures batching ================= It is also possible to batch measures sending, i.e. send several measures for diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 7103e433..3598ea43 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -270,6 +270,9 @@ - name: get-measures-granularity request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?granularity=1 HTTP/1.1 +- name: get-measures-transform + request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?transform=absolute:negative HTTP/1.1 + - name: get-measures-refresh request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?refresh=true HTTP/1.1 diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 4cff5b1e..05d28b9c 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -562,6 +562,21 @@ class AggregatedTimeSerie(TimeSerie): return AggregatedTimeSerie.from_grouped_serie( self.group_serie(sampling), sampling, self.aggregation_method) + def transform(self, transform): + timestamps = self.ts["timestamps"] + values = self.ts["values"] + for trans in transform: + if trans == "absolute": + values = numpy.abs(values) + elif trans == "negative": + values = numpy.negative(values) + else: + raise ValueError("Transformation '%s' doesn't exists" % trans) + return AggregatedTimeSerie(self.sampling, + self.aggregation_method, + ts=make_timeseries(timestamps, values), + max_size=self.max_size) + @classmethod def from_data(cls, sampling, aggregation_method, timestamps, values, max_size=None): diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 250b3f50..2967fc2d 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -375,6 +375,21 @@ def MeasuresListSchema(measures): times.tolist(), values)) +VALID_TRANSFORMATION_METHODS = ["absolute", "negative"] + + +def TransformSchema(transform): + try: + transform = transform.split(":") + except Exception: + abort(400, "Invalid transformation") + + for trans in transform: + if trans not in VALID_TRANSFORMATION_METHODS: + abort(400, "Transformation '%s' doesn't exist" % trans) + return transform + + class MetricController(rest.RestController): _custom_actions = { 'measures': ['POST', 'GET'] @@ -407,7 +422,9 @@ class MetricController(rest.RestController): @pecan.expose('json') def get_measures(self, start=None, stop=None, aggregation='mean', - granularity=None, resample=None, refresh=False, **param): + granularity=None, resample=None, refresh=False, + transform=None, + **param): self.enforce_metric("get measures") if not (aggregation in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS @@ -419,6 +436,9 @@ class MetricController(rest.RestController): std=archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS, custom=str(self.custom_agg.keys()))) + if transform is not None: + transform = TransformSchema(transform) + if start is not None: try: start = utils.to_timestamp(start) @@ -458,7 +478,7 @@ class MetricController(rest.RestController): self.metric, start, stop, aggregation, utils.to_timespan(granularity) if granularity is not None else None, - resample) + resample, transform) except (storage.MetricDoesNotExist, storage.GranularityDoesNotExist, storage.AggregationDoesNotExist) as e: diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 397fd19c..20d75e7c 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -209,7 +209,8 @@ class StorageDriver(object): @staticmethod def get_measures(metric, from_timestamp=None, to_timestamp=None, - aggregation='mean', granularity=None, resample=None): + aggregation='mean', granularity=None, resample=None, + transform=None): """Get a measure to a metric. :param metric: The metric measured. @@ -218,6 +219,7 @@ class StorageDriver(object): :param aggregation: The type of aggregation to retrieve. :param granularity: The granularity to retrieve. :param resample: The granularity to resample to. + :param transform: List of transformation to apply to the series """ if aggregation not in metric.archive_policy.aggregation_methods: raise AggregationDoesNotExist(metric, aggregation) @@ -231,7 +233,7 @@ class StorageDriver(object): to_timestamp=None, aggregation='mean', reaggregation=None, resample=None, granularity=None, needed_overlap=None, - fill=None): + fill=None, transform=None): """Get aggregated measures of multiple entities. :param entities: The entities measured to aggregate. @@ -243,6 +245,7 @@ class StorageDriver(object): on the retrieved measures. :param resample: The granularity to resample to. :param fill: The value to use to fill in missing data in series. + :param transform: List of transformation to apply to the series """ for metric in metrics: if aggregation not in metric.archive_policy.aggregation_methods: diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 4896d790..11d67e4c 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -137,7 +137,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): return name.split("_")[-1] == 'v%s' % v def get_measures(self, metric, from_timestamp=None, to_timestamp=None, - aggregation='mean', granularity=None, resample=None): + aggregation='mean', granularity=None, resample=None, + transform=None): super(CarbonaraBasedStorage, self).get_measures( metric, from_timestamp, to_timestamp, aggregation) if granularity is None: @@ -154,6 +155,10 @@ class CarbonaraBasedStorage(storage.StorageDriver): agg_timeseries = agg_timeseries.resample(resample) agg_timeseries = [agg_timeseries] + if transform is not None: + agg_timeseries = list(map(lambda agg: agg.transform(transform), + agg_timeseries)) + return list(itertools.chain(*[ts.fetch(from_timestamp, to_timestamp) for ts in agg_timeseries])) @@ -473,7 +478,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): fill=None): super(CarbonaraBasedStorage, self).get_cross_metric_measures( metrics, from_timestamp, to_timestamp, - aggregation, reaggregation, resample, granularity, needed_overlap) + aggregation, reaggregation, resample, granularity, needed_overlap, + fill) if reaggregation is None: reaggregation = aggregation @@ -502,7 +508,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): [(metric, aggregation, granularity, from_timestamp, to_timestamp) for metric in metrics]) - tss = map(lambda ts: ts.resample(resample), tss) + tss = list(map(lambda ts: ts.resample(resample), tss)) else: tss = self._map_in_thread(self._get_measures_timeserie, [(metric, aggregation, g, diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index 987f9a51..436e4276 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -213,6 +213,45 @@ tests: - ["2015-03-06T14:34:00+00:00", 60.0, 14.0] - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + - name: push negative measurements to metric again + POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures + data: + - timestamp: "2015-03-06T14:36:15" + value: -16 + - timestamp: "2015-03-06T14:37:15" + value: -23 + status: 202 + + - name: get measurements from metric and transform + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?transform=absolute&refresh=true + response_json_paths: + $: + - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - ["2015-03-06T14:34:15+00:00", 1.0, 16.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] + - ["2015-03-06T14:36:15+00:00", 1.0, 16.0] + - ["2015-03-06T14:37:15+00:00", 1.0, 23.0] + + - name: get measurements from metric and two transforms + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?transform=absolute:negative + response_json_paths: + $: + - ["2015-03-06T14:33:57+00:00", 1.0, -43.1] + - ["2015-03-06T14:34:12+00:00", 1.0, -12.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -16.0] + - ["2015-03-06T14:35:12+00:00", 1.0, -9.0] + - ["2015-03-06T14:35:15+00:00", 1.0, -11.0] + - ["2015-03-06T14:36:15+00:00", 1.0, -16.0] + - ["2015-03-06T14:37:15+00:00", 1.0, -23.0] + + - name: get measurements from metric and invalid transform + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?transform=absolute:notexists + status: 400 + response_strings: + - Transformation 'notexists' doesn't exist + - name: get measurements from metric and resample no granularity GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=60 status: 400 diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 0c7f2f3b..b8e1c107 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -276,6 +276,29 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual(5.9000000000000004, ts[datetime64(2014, 1, 1, 12, 0, 0)][1]) + def test_transform(self): + ts = carbonara.TimeSerie.from_tuples( + [(datetime64(2014, 1, 1, 12, 0, 0), -3), + (datetime64(2014, 1, 1, 12, 1, 0), 5), + (datetime64(2014, 1, 1, 12, 2, 0), -6)]) + ts = carbonara.AggregatedTimeSerie.from_timeseries( + [ts], sampling=60, aggregation_method="last") + ts = ts.transform(["absolute"]) + + self.assertEqual(3, len(ts)) + self.assertEqual([3, 5, 6], [ + ts[datetime64(2014, 1, 1, 12, 0, 0)][1], + ts[datetime64(2014, 1, 1, 12, 1, 0)][1], + ts[datetime64(2014, 1, 1, 12, 2, 0)][1]]) + + ts = ts.transform(["absolute", "negative"]) + + self.assertEqual(3, len(ts)) + self.assertEqual([-3, -5, -6], [ + ts[datetime64(2014, 1, 1, 12, 0, 0)][1], + ts[datetime64(2014, 1, 1, 12, 1, 0)][1], + ts[datetime64(2014, 1, 1, 12, 2, 0)][1]]) + def _do_test_aggregation(self, name, v1, v2): ts = carbonara.TimeSerie.from_tuples( [(datetime64(2014, 1, 1, 12, 0, 0), 3), diff --git a/releasenotes/notes/tranform-API-da38b196c1c72d44.yaml b/releasenotes/notes/tranform-API-da38b196c1c72d44.yaml new file mode 100644 index 00000000..355f7ce6 --- /dev/null +++ b/releasenotes/notes/tranform-API-da38b196c1c72d44.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + It's now possible to made some transformation to the stored timeseries + during the retrieval of the measures. The allowed transformations are + absolute and negative. -- GitLab From 818253a4817de3a70195b0fa661046ba02ae584a Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sat, 29 Jul 2017 20:47:51 +0200 Subject: [PATCH 0946/1483] Add resample tranformer --- doc/source/rest.j2 | 19 +++--- doc/source/rest.yaml | 2 +- gnocchi/carbonara.py | 23 ++++++- gnocchi/rest/__init__.py | 24 +++---- gnocchi/rest/transformation.py | 58 ++++++++++++++++ gnocchi/storage/_carbonara.py | 4 ++ gnocchi/tests/functional/gabbits/metric.yaml | 10 ++- gnocchi/tests/test_carbonara.py | 22 +++++- gnocchi/tests/test_transformation.py | 70 ++++++++++++++++++++ 9 files changed, 199 insertions(+), 33 deletions(-) create mode 100644 gnocchi/rest/transformation.py create mode 100644 gnocchi/tests/test_transformation.py diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index fd4420fa..ae00d42e 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -117,19 +117,16 @@ resampled to a new granularity. {{ scenarios['get-measures-resample']['doc'] }} -.. note:: - - Depending on the aggregation method and frequency of measures, resampled - data may lack accuracy as it is working against previously aggregated data. - - -Stored measures can be transformed during retrieval, with: +Or we can apply absolute to the values and then negate them for example. {{ scenarios['get-measures-transform']['doc'] }} -The example will first apply absolute to the values and then negate them. +Supported transformations are `absolute`, `negative` and `resample(sampling-in-second)`. + +.. note:: -Supported transformations are `absolute` and `negative`. + Depending on the aggregation method and frequency of measures, resampled + data may lack accuracy as it is working against previously aggregated data. Measures batching ================= @@ -549,8 +546,8 @@ well. .. note:: - Resampling is done prior to any reaggregation if both parameters are - specified. + Tranformations (eg: resample, absolute, ...) are done prior to any + reaggregation if both parameters are specified. Also, aggregation across metrics have different behavior depending on whether boundary values are set ('start' and 'stop') and if 'needed_overlap' diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 3598ea43..34b31eac 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -277,7 +277,7 @@ request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?refresh=true HTTP/1.1 - name: get-measures-resample - request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?resample=5&granularity=1 HTTP/1.1 + request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?granularity=1&transform=resample(5) HTTP/1.1 - name: create-resource-generic request: | diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 05d28b9c..2ab5b13e 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -16,6 +16,7 @@ # under the License. """Time series data manipulation, better with pancetta.""" +import collections import functools import itertools import logging @@ -44,6 +45,8 @@ LOG = logging.getLogger(__name__) UNIX_UNIVERSAL_START64 = numpy.datetime64("1970", 'ns') ONE_SECOND = numpy.timedelta64(1, 's') +Transformation = collections.namedtuple('Transformation', ["method", "args"]) + class BeforeEpochError(Exception): """Error raised when a timestamp before Epoch is used.""" @@ -565,14 +568,28 @@ class AggregatedTimeSerie(TimeSerie): def transform(self, transform): timestamps = self.ts["timestamps"] values = self.ts["values"] + sampling = self.sampling + for trans in transform: - if trans == "absolute": + if trans.method == "absolute": values = numpy.abs(values) - elif trans == "negative": + + elif trans.method == "negative": values = numpy.negative(values) + + elif trans.method == "resample": + ts = AggregatedTimeSerie.from_data(sampling, + self.aggregation_method, + timestamps, values, + self.max_size) + sampling = trans.args[0] + ts = ts.resample(sampling) + timestamps = ts["timestamps"] + values = ts["values"] + else: raise ValueError("Transformation '%s' doesn't exists" % trans) - return AggregatedTimeSerie(self.sampling, + return AggregatedTimeSerie(sampling, self.aggregation_method, ts=make_timeseries(timestamps, values), max_size=self.max_size) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 2967fc2d..40da5c29 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -36,6 +36,7 @@ from gnocchi import incoming from gnocchi import indexer from gnocchi import json from gnocchi import resource_type +from gnocchi.rest import transformation from gnocchi import storage from gnocchi import utils @@ -375,19 +376,11 @@ def MeasuresListSchema(measures): times.tolist(), values)) -VALID_TRANSFORMATION_METHODS = ["absolute", "negative"] - - def TransformSchema(transform): try: - transform = transform.split(":") - except Exception: - abort(400, "Invalid transformation") - - for trans in transform: - if trans not in VALID_TRANSFORMATION_METHODS: - abort(400, "Transformation '%s' doesn't exist" % trans) - return transform + return transformation.parse(transform) + except transformation.TransformationParserError as e: + abort(400, str(e)) class MetricController(rest.RestController): @@ -436,9 +429,6 @@ class MetricController(rest.RestController): std=archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS, custom=str(self.custom_agg.keys()))) - if transform is not None: - transform = TransformSchema(transform) - if start is not None: try: start = utils.to_timestamp(start) @@ -452,6 +442,9 @@ class MetricController(rest.RestController): abort(400, "Invalid value for stop") if resample: + if transform: + abort(400, 'transform and resample are exclusive') + if not granularity: abort(400, 'A granularity must be specified to resample') try: @@ -459,6 +452,9 @@ class MetricController(rest.RestController): except ValueError as e: abort(400, e) + if transform is not None: + transform = TransformSchema(transform) + if (strtobool("refresh", refresh) and pecan.request.incoming.has_unprocessed(self.metric)): try: diff --git a/gnocchi/rest/transformation.py b/gnocchi/rest/transformation.py new file mode 100644 index 00000000..cf7ca641 --- /dev/null +++ b/gnocchi/rest/transformation.py @@ -0,0 +1,58 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools + +import pyparsing as pp + +from gnocchi import carbonara +from gnocchi import utils + + +# NOTE(sileht): setName is used to make clear error message without pyparsing +# object name +def transform(name, *args): + parser = pp.Keyword(name) + args_parser = pp.Suppress("(").setName("(") + first = True + for arg in args: + if not first: + args_parser += pp.Suppress(",").setName(",") + args_parser += arg + first = False + args_parser += pp.Suppress(")").setName(")") + if not args: + args_parser = pp.Optional(args_parser) + parser = parser + pp.Group(args_parser) + return parser.setParseAction( + lambda t: carbonara.Transformation(t[0], tuple(t[1]))) + + +# NOTE(sileht): not sure pp.nums + "." is enough to support all +# pandas.to_timedelta() formats +timespan = pp.Word(pp.nums + ".").setName("timespan") +timespan = timespan.setParseAction(lambda t: utils.to_timespan(t[0])) + +absolute = transform("absolute") +negative = transform("negative") +resample = transform("resample", timespan) + +transform = pp.delimitedList( + absolute | negative | resample, + delim=":") + +parse = functools.partial(transform.parseString, parseAll=True) +TransformationParserError = pp.ParseException diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 11d67e4c..76915363 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -152,6 +152,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): metric, aggregation, granularity, from_timestamp, to_timestamp) if resample: + # FIXME(sileht): deprecated this way to resample in favor of + # transform agg_timeseries = agg_timeseries.resample(resample) agg_timeseries = [agg_timeseries] @@ -504,6 +506,8 @@ class CarbonaraBasedStorage(storage.StorageDriver): granularities_in_common = [granularity] if resample and granularity: + # FIXME(sileht): deprecated this way to resample in favor of + # transform tss = self._map_in_thread(self._get_measures_timeserie, [(metric, aggregation, granularity, from_timestamp, to_timestamp) diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index 436e4276..68ac1172 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -213,6 +213,14 @@ tests: - ["2015-03-06T14:34:00+00:00", 60.0, 14.0] - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + - name: get measurements from metric and resample and negative + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true&transform=resample(60):negative&granularity=1 + response_json_paths: + $: + - ["2015-03-06T14:33:00+00:00", 60.0, -43.1] + - ["2015-03-06T14:34:00+00:00", 60.0, -14.0] + - ["2015-03-06T14:35:00+00:00", 60.0, -10.0] + - name: push negative measurements to metric again POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures data: @@ -249,8 +257,6 @@ tests: - name: get measurements from metric and invalid transform GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?transform=absolute:notexists status: 400 - response_strings: - - Transformation 'notexists' doesn't exist - name: get measurements from metric and resample no granularity GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=60 diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index b8e1c107..51cbfc22 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -283,7 +283,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime64(2014, 1, 1, 12, 2, 0), -6)]) ts = carbonara.AggregatedTimeSerie.from_timeseries( [ts], sampling=60, aggregation_method="last") - ts = ts.transform(["absolute"]) + ts = ts.transform([ + carbonara.Transformation("absolute", tuple())]) self.assertEqual(3, len(ts)) self.assertEqual([3, 5, 6], [ @@ -291,7 +292,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts[datetime64(2014, 1, 1, 12, 1, 0)][1], ts[datetime64(2014, 1, 1, 12, 2, 0)][1]]) - ts = ts.transform(["absolute", "negative"]) + ts = ts.transform([ + carbonara.Transformation("absolute", tuple()), + carbonara.Transformation("negative", tuple())]) self.assertEqual(3, len(ts)) self.assertEqual([-3, -5, -6], [ @@ -299,6 +302,14 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts[datetime64(2014, 1, 1, 12, 1, 0)][1], ts[datetime64(2014, 1, 1, 12, 2, 0)][1]]) + ts = ts.transform([ + carbonara.Transformation("absolute", tuple()), + carbonara.Transformation( + "resample", (numpy.timedelta64(360, 's'),))]) + + self.assertEqual(1, len(ts)) + self.assertEqual(6, ts[datetime64(2014, 1, 1, 12, 0, 0)][1]) + def _do_test_aggregation(self, name, v1, v2): ts = carbonara.TimeSerie.from_tuples( [(datetime64(2014, 1, 1, 12, 0, 0), 3), @@ -1461,3 +1472,10 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual(2, len(agg_ts)) self.assertEqual(5, agg_ts[0][1]) self.assertEqual(3, agg_ts[1][1]) + + agg_ts = agg_ts.transform([ + carbonara.Transformation( + "resample", (numpy.timedelta64(10, 's'), ))]) + self.assertEqual(2, len(agg_ts)) + self.assertEqual(5, agg_ts[0][1]) + self.assertEqual(3, agg_ts[1][1]) diff --git a/gnocchi/tests/test_transformation.py b/gnocchi/tests/test_transformation.py new file mode 100644 index 00000000..db0408d3 --- /dev/null +++ b/gnocchi/tests/test_transformation.py @@ -0,0 +1,70 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2017 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import numpy + +from gnocchi.rest import transformation +from gnocchi.tests import base + + +class TestTransformParser(base.BaseTestCase): + def test_good(self): + expressions = { + "absolute": [("absolute", tuple())], + "negative()": [("negative", tuple())], + "negative:absolute": [("negative", tuple()), + ("absolute", tuple())], + "negative():absolute": [("negative", tuple()), + ("absolute", tuple())], + "resample(2)": [("resample", (numpy.timedelta64(2, 's'),))], + "resample(5):absolute": [("resample", + (numpy.timedelta64(5, 's'),)), + ("absolute", tuple())], + } + for expr, expected in expressions.items(): + try: + parsed = transformation.parse(expr) + except transformation.TransformationParserError as e: + self.fail("%s invalid: %s" % (expr, str(e))) + for trans, trans_expected in zip(parsed, expected): + self.assertEqual(trans.method, trans_expected[0]) + self.assertEqual(trans.args, trans_expected[1]) + + def test_bad(self): + expressions = [ + "::", + "absolute(", + "absolute(:negative)", + "absolute:negative)", + "foobar:", + "absolute:", + ":absolute", + "absolute():negative():", + "()", + "(", + "foobar", + "foobar()", + "resample()", + "resample(,)", + "resample(-2)", + "resample(, 1.3)", + "resample(a)", + "resample(1.5, 1.3)", + + ] + for expr in expressions: + self.assertRaises(transformation.TransformationParserError, + transformation.parse, expr) -- GitLab From 9c15e499e3505cfe58d40553da589debae69fce4 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sun, 30 Jul 2017 18:54:10 +0200 Subject: [PATCH 0947/1483] rest: cross aggregation transformation This change allows to transform the cross aggregation measures --- gnocchi/rest/__init__.py | 27 +++++++++++++------ gnocchi/storage/_carbonara.py | 7 +++-- .../tests/functional/gabbits/aggregation.yaml | 26 ++++++++++++++++++ gnocchi/tests/functional/gabbits/metric.yaml | 6 +++++ 4 files changed, 56 insertions(+), 10 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 40da5c29..b9529e68 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1588,7 +1588,8 @@ class AggregationResourceController(rest.RestController): @pecan.expose('json') def post(self, start=None, stop=None, aggregation='mean', reaggregation=None, granularity=None, needed_overlap=100.0, - groupby=None, fill=None, refresh=False, resample=None): + groupby=None, fill=None, refresh=False, resample=None, + transform=None): # First, set groupby in the right format: a sorted list of unique # strings. groupby = sorted(set(arg_to_list(groupby))) @@ -1612,7 +1613,8 @@ class AggregationResourceController(rest.RestController): for r in resources))) return AggregationController.get_cross_metric_measures_from_objs( metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap, fill, refresh, resample) + granularity, needed_overlap, fill, refresh, resample, + transform) def groupper(r): return tuple((attr, r[attr]) for attr in groupby) @@ -1626,7 +1628,8 @@ class AggregationResourceController(rest.RestController): "group": dict(key), "measures": AggregationController.get_cross_metric_measures_from_objs( # noqa metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap, fill, refresh, resample) + granularity, needed_overlap, fill, refresh, resample, + transform) }) return results @@ -1657,7 +1660,8 @@ class AggregationController(rest.RestController): reaggregation=None, granularity=None, needed_overlap=100.0, fill=None, - refresh=False, resample=None): + refresh=False, resample=None, + transform=None): try: needed_overlap = float(needed_overlap) except ValueError: @@ -1699,6 +1703,9 @@ class AggregationController(rest.RestController): abort(400, e) if resample: + if transform: + abort(400, 'transform and resample are exclusive') + if not granularity: abort(400, 'A granularity must be specified to resample') try: @@ -1715,6 +1722,9 @@ class AggregationController(rest.RestController): if fill != 'null': abort(400, "fill must be a float or \'null\': %s" % e) + if transform is not None: + transform = TransformSchema(transform) + try: if strtobool("refresh", refresh): metrics_to_update = [ @@ -1732,10 +1742,11 @@ class AggregationController(rest.RestController): # metric return pecan.request.storage.get_measures( metrics[0], start, stop, aggregation, - granularity, resample) + granularity, resample, transform) return pecan.request.storage.get_cross_metric_measures( metrics, start, stop, aggregation, - reaggregation, resample, granularity, needed_overlap, fill) + reaggregation, resample, granularity, needed_overlap, fill, + transform) except storage.MetricUnaggregatable as e: abort(400, ("One of the metrics being aggregated doesn't have " "matching granularity: %s") % str(e)) @@ -1750,7 +1761,7 @@ class AggregationController(rest.RestController): def get_metric(self, metric=None, start=None, stop=None, aggregation='mean', reaggregation=None, granularity=None, needed_overlap=100.0, fill=None, - refresh=False, resample=None): + refresh=False, resample=None, transform=None): if pecan.request.method == 'GET': try: metric_ids = voluptuous.Schema( @@ -1772,7 +1783,7 @@ class AggregationController(rest.RestController): missing_metric_ids.pop())) return self.get_cross_metric_measures_from_objs( metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap, fill, refresh, resample) + granularity, needed_overlap, fill, refresh, resample, transform) post_metric = get_metric diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 76915363..236713eb 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -477,11 +477,11 @@ class CarbonaraBasedStorage(storage.StorageDriver): to_timestamp=None, aggregation='mean', reaggregation=None, resample=None, granularity=None, needed_overlap=100.0, - fill=None): + fill=None, transform=None): super(CarbonaraBasedStorage, self).get_cross_metric_measures( metrics, from_timestamp, to_timestamp, aggregation, reaggregation, resample, granularity, needed_overlap, - fill) + fill, transform) if reaggregation is None: reaggregation = aggregation @@ -520,6 +520,9 @@ class CarbonaraBasedStorage(storage.StorageDriver): for metric in metrics for g in granularities_in_common]) + if transform is not None: + tss = list(map(lambda ts: ts.transform(transform), tss)) + try: return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v) for timestamp, r, v diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index 84cc7dfa..d193b55c 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -131,6 +131,19 @@ tests: - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] + - name: get measure aggregates transform and resample + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&transform=absolute&resample=60 + status: 400 + response_strings: + - 'transform and resample are exclusive' + + - name: get measure aggregates and transform + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&transform=resample(60) + response_json_paths: + $: + - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] + - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] + - name: get measure aggregates with fill zero GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&fill=0 response_json_paths: @@ -233,6 +246,19 @@ tests: - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] + - name: get measure aggregates by granularity from resources transform and resample + POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&transform=absolute&resample=60 + status: 400 + response_strings: + - 'transform and resample are exclusive' + + - name: get measure aggregates by granularity from resources and transform + POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&transform=resample(60) + response_json_paths: + $: + - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] + - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] + - name: get measure aggregates by granularity from resources and bad resample POST: /v1/aggregation/resource/generic/metric/agg_meter?resample=abc status: 400 diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index 68ac1172..6d053460 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -268,6 +268,12 @@ tests: GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=abc status: 400 + - name: get measurements from metric resample and transform + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=60&transform=absolute + status: 400 + response_strings: + - 'transform and resample are exclusive' + - name: create valid metric two POST: /v1/metric data: -- GitLab From 9e909bb711392147ef122d6dc174b360896bc898 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sun, 30 Jul 2017 20:52:10 +0200 Subject: [PATCH 0948/1483] Remove resample from storage No need to support resample and transform on storage side. rest layer convert the old resample feature into the new transform feature. --- gnocchi/rest/__init__.py | 23 +++++++++++-------- gnocchi/storage/__init__.py | 7 ++---- gnocchi/storage/_carbonara.py | 34 ++++++++-------------------- gnocchi/tests/test_storage.py | 4 +++- gnocchi/tests/test_transformation.py | 4 ++++ 5 files changed, 33 insertions(+), 39 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index b9529e68..76ad5843 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -32,6 +32,7 @@ import werkzeug.http from gnocchi import aggregates from gnocchi import archive_policy +from gnocchi import carbonara from gnocchi import incoming from gnocchi import indexer from gnocchi import json @@ -441,7 +442,11 @@ class MetricController(rest.RestController): except Exception: abort(400, "Invalid value for stop") + if transform is not None: + transform = TransformSchema(transform) + if resample: + # TODO(sileht): This have to be deprecated at some point if transform: abort(400, 'transform and resample are exclusive') @@ -451,9 +456,7 @@ class MetricController(rest.RestController): resample = utils.to_timespan(resample) except ValueError as e: abort(400, e) - - if transform is not None: - transform = TransformSchema(transform) + transform = [carbonara.Transformation("resample", (resample,))] if (strtobool("refresh", refresh) and pecan.request.incoming.has_unprocessed(self.metric)): @@ -474,7 +477,7 @@ class MetricController(rest.RestController): self.metric, start, stop, aggregation, utils.to_timespan(granularity) if granularity is not None else None, - resample, transform) + transform) except (storage.MetricDoesNotExist, storage.GranularityDoesNotExist, storage.AggregationDoesNotExist) as e: @@ -1702,7 +1705,11 @@ class AggregationController(rest.RestController): except ValueError as e: abort(400, e) + if transform is not None: + transform = TransformSchema(transform) + if resample: + # TODO(sileht): This have to be deprecated at some point if transform: abort(400, 'transform and resample are exclusive') @@ -1712,6 +1719,7 @@ class AggregationController(rest.RestController): resample = utils.to_timespan(resample) except ValueError as e: abort(400, e) + transform = [carbonara.Transformation("resample", (resample,))] if fill is not None: if granularity is None: @@ -1722,9 +1730,6 @@ class AggregationController(rest.RestController): if fill != 'null': abort(400, "fill must be a float or \'null\': %s" % e) - if transform is not None: - transform = TransformSchema(transform) - try: if strtobool("refresh", refresh): metrics_to_update = [ @@ -1742,10 +1747,10 @@ class AggregationController(rest.RestController): # metric return pecan.request.storage.get_measures( metrics[0], start, stop, aggregation, - granularity, resample, transform) + granularity, transform) return pecan.request.storage.get_cross_metric_measures( metrics, start, stop, aggregation, - reaggregation, resample, granularity, needed_overlap, fill, + reaggregation, granularity, needed_overlap, fill, transform) except storage.MetricUnaggregatable as e: abort(400, ("One of the metrics being aggregated doesn't have " diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 20d75e7c..d011a654 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -209,8 +209,7 @@ class StorageDriver(object): @staticmethod def get_measures(metric, from_timestamp=None, to_timestamp=None, - aggregation='mean', granularity=None, resample=None, - transform=None): + aggregation='mean', granularity=None, transform=None): """Get a measure to a metric. :param metric: The metric measured. @@ -218,7 +217,6 @@ class StorageDriver(object): :param to timestamp: The timestamp to get the measure to. :param aggregation: The type of aggregation to retrieve. :param granularity: The granularity to retrieve. - :param resample: The granularity to resample to. :param transform: List of transformation to apply to the series """ if aggregation not in metric.archive_policy.aggregation_methods: @@ -231,7 +229,7 @@ class StorageDriver(object): @staticmethod def get_cross_metric_measures(metrics, from_timestamp=None, to_timestamp=None, aggregation='mean', - reaggregation=None, resample=None, + reaggregation=None, granularity=None, needed_overlap=None, fill=None, transform=None): """Get aggregated measures of multiple entities. @@ -243,7 +241,6 @@ class StorageDriver(object): :param aggregation: The type of aggregation to retrieve. :param reaggregation: The type of aggregation to compute on the retrieved measures. - :param resample: The granularity to resample to. :param fill: The value to use to fill in missing data in series. :param transform: List of transformation to apply to the series """ diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py index 236713eb..b834e158 100644 --- a/gnocchi/storage/_carbonara.py +++ b/gnocchi/storage/_carbonara.py @@ -137,7 +137,7 @@ class CarbonaraBasedStorage(storage.StorageDriver): return name.split("_")[-1] == 'v%s' % v def get_measures(self, metric, from_timestamp=None, to_timestamp=None, - aggregation='mean', granularity=None, resample=None, + aggregation='mean', granularity=None, transform=None): super(CarbonaraBasedStorage, self).get_measures( metric, from_timestamp, to_timestamp, aggregation) @@ -148,14 +148,9 @@ class CarbonaraBasedStorage(storage.StorageDriver): from_timestamp, to_timestamp) for ap in reversed(metric.archive_policy.definition))) else: - agg_timeseries = self._get_measures_timeserie( + agg_timeseries = [self._get_measures_timeserie( metric, aggregation, granularity, - from_timestamp, to_timestamp) - if resample: - # FIXME(sileht): deprecated this way to resample in favor of - # transform - agg_timeseries = agg_timeseries.resample(resample) - agg_timeseries = [agg_timeseries] + from_timestamp, to_timestamp)] if transform is not None: agg_timeseries = list(map(lambda agg: agg.transform(transform), @@ -475,12 +470,12 @@ class CarbonaraBasedStorage(storage.StorageDriver): def get_cross_metric_measures(self, metrics, from_timestamp=None, to_timestamp=None, aggregation='mean', - reaggregation=None, resample=None, + reaggregation=None, granularity=None, needed_overlap=100.0, fill=None, transform=None): super(CarbonaraBasedStorage, self).get_cross_metric_measures( metrics, from_timestamp, to_timestamp, - aggregation, reaggregation, resample, granularity, needed_overlap, + aggregation, reaggregation, granularity, needed_overlap, fill, transform) if reaggregation is None: @@ -505,20 +500,11 @@ class CarbonaraBasedStorage(storage.StorageDriver): else: granularities_in_common = [granularity] - if resample and granularity: - # FIXME(sileht): deprecated this way to resample in favor of - # transform - tss = self._map_in_thread(self._get_measures_timeserie, - [(metric, aggregation, granularity, - from_timestamp, to_timestamp) - for metric in metrics]) - tss = list(map(lambda ts: ts.resample(resample), tss)) - else: - tss = self._map_in_thread(self._get_measures_timeserie, - [(metric, aggregation, g, - from_timestamp, to_timestamp) - for metric in metrics - for g in granularities_in_common]) + tss = self._map_in_thread(self._get_measures_timeserie, + [(metric, aggregation, g, + from_timestamp, to_timestamp) + for metric in metrics + for g in granularities_in_common]) if transform is not None: tss = list(map(lambda ts: ts.transform(transform), tss)) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 13fa663a..9230add8 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -1145,13 +1145,15 @@ class TestStorageDriver(tests_base.TestCase): def test_resample_no_metric(self): """https://github.com/gnocchixyz/gnocchi/issues/69""" + transform = [carbonara.Transformation( + "resample", (numpy.timedelta64(1, 'h'),))] self.assertEqual([], self.storage.get_measures( self.metric, datetime64(2014, 1, 1), datetime64(2015, 1, 1), granularity=numpy.timedelta64(300, 's'), - resample=numpy.timedelta64(1, 'h'))) + transform=transform)) class TestMeasureQuery(tests_base.TestCase): diff --git a/gnocchi/tests/test_transformation.py b/gnocchi/tests/test_transformation.py index db0408d3..1cd07d6b 100644 --- a/gnocchi/tests/test_transformation.py +++ b/gnocchi/tests/test_transformation.py @@ -33,6 +33,10 @@ class TestTransformParser(base.BaseTestCase): "resample(5):absolute": [("resample", (numpy.timedelta64(5, 's'),)), ("absolute", tuple())], + "resample(5):resample(10)": [("resample", + (numpy.timedelta64(5, 's'),)), + ("resample", + (numpy.timedelta64(10, 's'),))] } for expr, expected in expressions.items(): try: -- GitLab From 146792fabac3c82057d8f93fcc67d58a8577fa6e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 30 Aug 2017 15:21:25 +0200 Subject: [PATCH 0949/1483] Add missing pyparsing requirement --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 72762e2f..bb6261eb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,3 +23,4 @@ Paste PasteDeploy monotonic daiquiri +pyparsing>=2.2.0 -- GitLab From 92b1cb80b235a0aca9c4506a6ef43d050b3bf2fb Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 18 Jul 2017 13:43:41 +0200 Subject: [PATCH 0950/1483] Merge gnocchi.storage._carbonara into gnocchi.storage The plan was to keep the storage drivers Carbonara agnostic, but we cannot say we succeeded. There are many places from metricd to gnocchi.storage where the abstraction leaks and a lot of assumption about how driver works are based on Carbonara. This patch simplifies the code base by breaking the assumption that someone will someday built a storage driver that is not based on Carbonara. I don't think it will ever happen and that there is any point at this stage of the project. --- gnocchi/opts.py | 2 +- gnocchi/storage/__init__.py | 599 +++++++++++++++++++++++++++++++--- gnocchi/storage/_carbonara.py | 568 -------------------------------- gnocchi/storage/ceph.py | 3 +- gnocchi/storage/file.py | 3 +- gnocchi/storage/redis.py | 3 +- gnocchi/storage/s3.py | 3 +- gnocchi/storage/swift.py | 3 +- gnocchi/tests/test_storage.py | 6 +- 9 files changed, 556 insertions(+), 634 deletions(-) delete mode 100644 gnocchi/storage/_carbonara.py diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 390cf48e..e0ec719b 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -158,7 +158,7 @@ def list_opts(): 'to force refresh of metric.'), ) + gnocchi.rest.app.API_OPTS, ), - ("storage", (_STORAGE_OPTS + gnocchi.storage._carbonara.OPTS)), + ("storage", _STORAGE_OPTS + gnocchi.storage._CARBONARA_OPTS), ("incoming", _INCOMING_OPTS), ("statsd", ( cfg.HostAddressOpt('host', diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index d011a654..8128041c 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -1,5 +1,6 @@ # -*- encoding: utf-8 -*- # +# Copyright © 2016-2017 Red Hat, Inc. # Copyright © 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -14,12 +15,18 @@ # License for the specific language governing permissions and limitations # under the License. import collections +import functools +import itertools import operator +from concurrent import futures import daiquiri +import iso8601 +import numpy from oslo_config import cfg +import six -from gnocchi import exceptions +from gnocchi import carbonara from gnocchi import indexer from gnocchi import utils @@ -30,6 +37,17 @@ OPTS = [ help='Storage driver to use'), ] +_CARBONARA_OPTS = [ + cfg.IntOpt('aggregation_workers_number', + default=1, min=1, + help='Number of threads to process and store aggregates. ' + 'Set value roughly equal to number of aggregates to be ' + 'computed per metric'), + cfg.StrOpt('coordination_url', + secret=True, + help='Coordination driver URL'), +] + LOG = daiquiri.getLogger(__name__) @@ -135,6 +153,13 @@ class LockedMetric(StorageError): super(LockedMetric, self).__init__("Metric %s is locked" % metric) +class CorruptionError(ValueError): + """Data corrupted, damn it.""" + + def __init__(self, message): + super(CorruptionError, self).__init__(message) + + def get_driver(conf, coord=None): """Return the configured driver.""" return utils.get_driver_class('gnocchi.storage', conf.storage)( @@ -142,37 +167,328 @@ def get_driver(conf, coord=None): class StorageDriver(object): + + def __init__(self, conf, coord=None): + self.aggregation_workers_number = conf.aggregation_workers_number + if self.aggregation_workers_number == 1: + # NOTE(jd) Avoid using futures at all if we don't want any threads. + self._map_in_thread = self._map_no_thread + else: + self._map_in_thread = self._map_in_futures_threads + self.coord = (coord if coord else + utils.get_coordinator_and_start(conf.coordination_url)) + self.shared_coord = bool(coord) + + def stop(self): + if not self.shared_coord: + self.coord.stop() + @staticmethod - def __init__(conf, coord=None): + def upgrade(): pass @staticmethod - def stop(): - pass + def _get_measures(metric, timestamp_key, aggregation, version=3): + raise NotImplementedError @staticmethod - def upgrade(): - pass + def _get_unaggregated_timeserie(metric, version=3): + raise NotImplementedError - def process_background_tasks(self, index, incoming, metrics, sync=False): - """Process background tasks for this storage. + def _get_unaggregated_timeserie_and_unserialize( + self, metric, block_size, back_window): + """Retrieve unaggregated timeserie for a metric and unserialize it. - This calls :func:`process_new_measures` to process new measures + Returns a gnocchi.carbonara.BoundTimeSerie object. If the data cannot + be retrieved, returns None. - :param index: An indexer to be used for querying metrics - :param incoming: The incoming storage - :param metrics: The list of metrics waiting for processing - :param sync: If True, then process everything synchronously and raise - on error - :type sync: bool """ + with utils.StopWatch() as sw: + raw_measures = ( + self._get_unaggregated_timeserie( + metric) + ) + if not raw_measures: + return + LOG.debug( + "Retrieve unaggregated measures " + "for %s in %.2fs", + metric.id, sw.elapsed()) try: - self.process_new_measures(index, incoming, metrics, sync) - except Exception: - if sync: - raise - LOG.error("Unexpected error during measures processing", - exc_info=True) + return carbonara.BoundTimeSerie.unserialize( + raw_measures, block_size, back_window) + except carbonara.InvalidData: + raise CorruptionError( + "Data corruption detected for %s " + "unaggregated timeserie" % metric.id) + + @staticmethod + def _store_unaggregated_timeserie(metric, data, version=3): + raise NotImplementedError + + @staticmethod + def _store_metric_measures(metric, timestamp_key, aggregation, + data, offset=None, version=3): + raise NotImplementedError + + def _list_split_keys_for_metric(self, metric, aggregation, granularity, + version=3): + return set(map( + functools.partial(carbonara.SplitKey, sampling=granularity), + (numpy.array( + list(self._list_split_keys( + metric, aggregation, granularity, version)), + dtype=numpy.float) * 10e8).astype('datetime64[ns]'))) + + @staticmethod + def _list_split_keys(metric, aggregation, granularity, version=3): + raise NotImplementedError + + @staticmethod + def _version_check(name, v): + """Validate object matches expected version. + + Version should be last attribute and start with 'v' + """ + return name.split("_")[-1] == 'v%s' % v + + def get_measures(self, metric, from_timestamp=None, to_timestamp=None, + aggregation='mean', granularity=None, transform=None): + """Get a measure to a metric. + + :param metric: The metric measured. + :param from timestamp: The timestamp to get the measure from. + :param to timestamp: The timestamp to get the measure to. + :param aggregation: The type of aggregation to retrieve. + :param granularity: The granularity to retrieve. + :param transform: List of transformation to apply to the series + """ + if aggregation not in metric.archive_policy.aggregation_methods: + raise AggregationDoesNotExist(metric, aggregation) + + if granularity is None: + agg_timeseries = self._map_in_thread( + self._get_measures_timeserie, + ((metric, aggregation, ap.granularity, + from_timestamp, to_timestamp) + for ap in reversed(metric.archive_policy.definition))) + else: + agg_timeseries = [self._get_measures_timeserie( + metric, aggregation, granularity, + from_timestamp, to_timestamp)] + + if transform is not None: + agg_timeseries = list(map(lambda agg: agg.transform(transform), + agg_timeseries)) + + return list(itertools.chain(*[ts.fetch(from_timestamp, to_timestamp) + for ts in agg_timeseries])) + + def _get_measures_and_unserialize(self, metric, key, aggregation): + data = self._get_measures(metric, key, aggregation) + try: + return carbonara.AggregatedTimeSerie.unserialize( + data, key, aggregation) + except carbonara.InvalidData: + LOG.error("Data corruption detected for %s " + "aggregated `%s' timeserie, granularity `%s' " + "around time `%s', ignoring.", + metric.id, aggregation, key.sampling, key) + + def _get_measures_timeserie(self, metric, + aggregation, granularity, + from_timestamp=None, to_timestamp=None): + + # Find the number of point + for d in metric.archive_policy.definition: + if d.granularity == granularity: + points = d.points + break + else: + raise GranularityDoesNotExist(metric, granularity) + + all_keys = None + try: + all_keys = self._list_split_keys_for_metric( + metric, aggregation, granularity) + except MetricDoesNotExist: + for d in metric.archive_policy.definition: + if d.granularity == granularity: + return carbonara.AggregatedTimeSerie( + sampling=granularity, + aggregation_method=aggregation, + max_size=d.points) + raise GranularityDoesNotExist(metric, granularity) + + if from_timestamp: + from_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( + from_timestamp, granularity) + + if to_timestamp: + to_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( + to_timestamp, granularity) + + timeseries = list(filter( + lambda x: x is not None, + self._map_in_thread( + self._get_measures_and_unserialize, + ((metric, key, aggregation) + for key in sorted(all_keys) + if ((not from_timestamp or key >= from_timestamp) + and (not to_timestamp or key <= to_timestamp)))) + )) + + return carbonara.AggregatedTimeSerie.from_timeseries( + sampling=granularity, + aggregation_method=aggregation, + timeseries=timeseries, + max_size=points) + + def _store_timeserie_split(self, metric, key, split, + aggregation, oldest_mutable_timestamp): + # NOTE(jd) We write the full split only if the driver works that way + # (self.WRITE_FULL) or if the oldest_mutable_timestamp is out of range. + write_full = self.WRITE_FULL or next(key) <= oldest_mutable_timestamp + if write_full: + try: + existing = self._get_measures_and_unserialize( + metric, key, aggregation) + except AggregationDoesNotExist: + pass + else: + if existing is not None: + if split is not None: + existing.merge(split) + split = existing + + if split is None: + # `split' can be none if existing is None and no split was passed + # in order to rewrite and compress the data; in that case, it means + # the split key is present and listed, but some aggregation method + # or granularity is missing. That means data is corrupted, but it + # does not mean we have to fail, we can just do nothing and log a + # warning. + LOG.warning("No data found for metric %s, granularity %f " + "and aggregation method %s (split key %s): " + "possible data corruption", + metric, key.sampling, + aggregation, key) + return + + offset, data = split.serialize(key, compressed=write_full) + + return self._store_metric_measures(metric, key, aggregation, + data, offset=offset) + + def _add_measures(self, aggregation, archive_policy_def, + metric, grouped_serie, + previous_oldest_mutable_timestamp, + oldest_mutable_timestamp): + + if aggregation.startswith("rate:"): + grouped_serie = grouped_serie.derived() + aggregation_to_compute = aggregation[5:] + else: + aggregation_to_compute = aggregation + + ts = carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped_serie, archive_policy_def.granularity, + aggregation_to_compute, max_size=archive_policy_def.points) + + # Don't do anything if the timeserie is empty + if not ts: + return + + # We only need to check for rewrite if driver is not in WRITE_FULL mode + # and if we already stored splits once + need_rewrite = ( + not self.WRITE_FULL + and previous_oldest_mutable_timestamp is not None + ) + + if archive_policy_def.timespan or need_rewrite: + existing_keys = self._list_split_keys_for_metric( + metric, aggregation, archive_policy_def.granularity) + + # First delete old splits + if archive_policy_def.timespan: + oldest_point_to_keep = ts.last - archive_policy_def.timespan + oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) + for key in list(existing_keys): + # NOTE(jd) Only delete if the key is strictly inferior to + # the timestamp; we don't delete any timeserie split that + # contains our timestamp, so we prefer to keep a bit more + # than deleting too much + if key < oldest_key_to_keep: + self._delete_metric_measures(metric, key, aggregation) + existing_keys.remove(key) + else: + oldest_key_to_keep = None + + # Rewrite all read-only splits just for fun (and compression). This + # only happens if `previous_oldest_mutable_timestamp' exists, which + # means we already wrote some splits at some point – so this is not the + # first time we treat this timeserie. + if need_rewrite: + previous_oldest_mutable_key = ts.get_split_key( + previous_oldest_mutable_timestamp) + oldest_mutable_key = ts.get_split_key(oldest_mutable_timestamp) + + if previous_oldest_mutable_key != oldest_mutable_key: + for key in existing_keys: + if previous_oldest_mutable_key <= key < oldest_mutable_key: + LOG.debug( + "Compressing previous split %s (%s) for metric %s", + key, aggregation, metric) + # NOTE(jd) Rewrite it entirely for fun (and later for + # compression). For that, we just pass None as split. + self._store_timeserie_split( + metric, key, + None, aggregation, oldest_mutable_timestamp) + + for key, split in ts.split(): + if oldest_key_to_keep is None or key >= oldest_key_to_keep: + LOG.debug( + "Storing split %s (%s) for metric %s", + key, aggregation, metric) + self._store_timeserie_split( + metric, key, split, aggregation, oldest_mutable_timestamp) + + @staticmethod + def _delete_metric(metric): + raise NotImplementedError + + def delete_metric(self, incoming, metric, sync=False): + LOG.debug("Deleting metric %s", metric) + lock = incoming.get_sack_lock( + self.coord, incoming.sack_for_metric(metric.id)) + if not lock.acquire(blocking=sync): + raise LockedMetric(metric) + # NOTE(gordc): no need to hold lock because the metric has been already + # marked as "deleted" in the indexer so no measure worker + # is going to process it anymore. + lock.release() + self._delete_metric(metric) + incoming.delete_unprocessed_measures_for_metric_id(metric.id) + LOG.debug("Deleted metric %s", metric) + + @staticmethod + def _delete_metric_measures(metric, timestamp_key, + aggregation, granularity, version=3): + raise NotImplementedError + + def refresh_metric(self, indexer, incoming, metric, timeout): + s = incoming.sack_for_metric(metric.id) + lock = incoming.get_sack_lock(self.coord, s) + if not lock.acquire(blocking=timeout): + raise SackLockTimeoutError( + 'Unable to refresh metric: %s. Metric is locked. ' + 'Please try again.' % metric.id) + try: + self.process_new_measures(indexer, incoming, + [six.text_type(metric.id)]) + finally: + lock.release() def expunge_metrics(self, incoming, index, sync=False): """Remove deleted metrics @@ -199,38 +515,138 @@ class StorageDriver(object): LOG.error("Unable to expunge metric %s from storage", m, exc_info=True) - @staticmethod - def process_new_measures(indexer, incoming, metrics, sync=False): - """Process added measures in background. + def process_background_tasks(self, index, incoming, metrics, sync=False): + """Process background tasks for this storage. - Some drivers might need to have a background task running that process - the measures sent to metrics. This is used for that. + This calls :func:`process_new_measures` to process new measures + + :param index: An indexer to be used for querying metrics + :param incoming: The incoming storage + :param metrics: The list of metrics waiting for processing + :param sync: If True, then process everything synchronously and raise + on error + :type sync: bool """ + try: + self.process_new_measures(index, incoming, metrics, sync) + except Exception: + if sync: + raise + LOG.error("Unexpected error during measures processing", + exc_info=True) - @staticmethod - def get_measures(metric, from_timestamp=None, to_timestamp=None, - aggregation='mean', granularity=None, transform=None): - """Get a measure to a metric. + def process_new_measures(self, indexer, incoming, metrics_to_process, + sync=False): + """Process added measures in background. - :param metric: The metric measured. - :param from timestamp: The timestamp to get the measure from. - :param to timestamp: The timestamp to get the measure to. - :param aggregation: The type of aggregation to retrieve. - :param granularity: The granularity to retrieve. - :param transform: List of transformation to apply to the series + Some drivers might need to have a background task running that process + the measures sent to metrics. This is used for that. """ - if aggregation not in metric.archive_policy.aggregation_methods: - raise AggregationDoesNotExist(metric, aggregation) - - @staticmethod - def delete_metric(metric, sync=False): - raise exceptions.NotImplementedError + # process only active metrics. deleted metrics with unprocessed + # measures will be skipped until cleaned by janitor. + metrics = indexer.list_metrics(ids=metrics_to_process) + for metric in metrics: + # NOTE(gordc): must lock at sack level + try: + LOG.debug("Processing measures for %s", metric) + with incoming.process_measure_for_metric(metric) \ + as measures: + self._compute_and_store_timeseries(metric, measures) + LOG.debug("Measures for metric %s processed", metric) + except Exception: + if sync: + raise + LOG.error("Error processing new measures", exc_info=True) + + def _compute_and_store_timeseries(self, metric, measures): + # NOTE(mnaser): The metric could have been handled by + # another worker, ignore if no measures. + if len(measures) == 0: + LOG.debug("Skipping %s (already processed)", metric) + return + + measures.sort(order='timestamps') + + agg_methods = list(metric.archive_policy.aggregation_methods) + block_size = metric.archive_policy.max_block_size + back_window = metric.archive_policy.back_window + definition = metric.archive_policy.definition + # NOTE(sileht): We keep one more blocks to calculate rate of change + # correctly + if any(filter(lambda x: x.startswith("rate:"), agg_methods)): + back_window += 1 - @staticmethod - def get_cross_metric_measures(metrics, from_timestamp=None, + try: + ts = self._get_unaggregated_timeserie_and_unserialize( + metric, block_size=block_size, back_window=back_window) + except MetricDoesNotExist: + try: + self._create_metric(metric) + except MetricAlreadyExists: + # Created in the mean time, do not worry + pass + ts = None + except CorruptionError as e: + LOG.error(e) + ts = None + + if ts is None: + # This is the first time we treat measures for this + # metric, or data are corrupted, create a new one + ts = carbonara.BoundTimeSerie(block_size=block_size, + back_window=back_window) + current_first_block_timestamp = None + else: + current_first_block_timestamp = ts.first_block_timestamp() + + # NOTE(jd) This is Python where you need such + # hack to pass a variable around a closure, + # sorry. + computed_points = {"number": 0} + + def _map_add_measures(bound_timeserie): + # NOTE (gordc): bound_timeserie is entire set of + # unaggregated measures matching largest + # granularity. the following takes only the points + # affected by new measures for specific granularity + tstamp = max(bound_timeserie.first, measures['timestamps'][0]) + new_first_block_timestamp = bound_timeserie.first_block_timestamp() + computed_points['number'] = len(bound_timeserie) + for d in definition: + ts = bound_timeserie.group_serie( + d.granularity, carbonara.round_timestamp( + tstamp, d.granularity)) + + self._map_in_thread( + self._add_measures, + ((aggregation, d, metric, ts, + current_first_block_timestamp, + new_first_block_timestamp) + for aggregation in agg_methods)) + + with utils.StopWatch() as sw: + ts.set_values(measures, + before_truncate_callback=_map_add_measures) + + number_of_operations = (len(agg_methods) * len(definition)) + perf = "" + elapsed = sw.elapsed() + if elapsed > 0: + perf = " (%d points/s, %d measures/s)" % ( + ((number_of_operations * computed_points['number']) / + elapsed), + ((number_of_operations * len(measures)) / elapsed) + ) + LOG.debug("Computed new metric %s with %d new measures " + "in %.2f seconds%s", + metric.id, len(measures), elapsed, perf) + + self._store_unaggregated_timeserie(metric, ts.serialize()) + + def get_cross_metric_measures(self, metrics, from_timestamp=None, to_timestamp=None, aggregation='mean', reaggregation=None, - granularity=None, needed_overlap=None, + granularity=None, needed_overlap=100.0, fill=None, transform=None): """Get aggregated measures of multiple entities. @@ -254,10 +670,59 @@ class StorageDriver(object): else: raise GranularityDoesNotExist(metric, granularity) - @staticmethod - def search_value(metrics, query, from_timestamp=None, - to_timestamp=None, - aggregation='mean', + if reaggregation is None: + reaggregation = aggregation + + if granularity is None: + granularities = ( + definition.granularity + for metric in metrics + for definition in metric.archive_policy.definition + ) + granularities_in_common = [ + g + for g, occurrence in six.iteritems( + collections.Counter(granularities)) + if occurrence == len(metrics) + ] + + if not granularities_in_common: + raise MetricUnaggregatable( + metrics, 'No granularity match') + else: + granularities_in_common = [granularity] + + tss = self._map_in_thread(self._get_measures_timeserie, + [(metric, aggregation, g, + from_timestamp, to_timestamp) + for metric in metrics + for g in granularities_in_common]) + + if transform is not None: + tss = list(map(lambda ts: ts.transform(transform), tss)) + + try: + return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v) + for timestamp, r, v + in carbonara.AggregatedTimeSerie.aggregated( + tss, reaggregation, from_timestamp, to_timestamp, + needed_overlap, fill)] + except carbonara.UnAggregableTimeseries as e: + raise MetricUnaggregatable(metrics, e.reason) + + def _find_measure(self, metric, aggregation, granularity, predicate, + from_timestamp, to_timestamp): + timeserie = self._get_measures_timeserie( + metric, aggregation, granularity, + from_timestamp, to_timestamp) + values = timeserie.fetch(from_timestamp, to_timestamp) + return {metric: + [(timestamp, g, value) + for timestamp, g, value in values + if predicate(value)]} + + def search_value(self, metrics, query, from_timestamp=None, + to_timestamp=None, aggregation='mean', granularity=None): """Search for an aggregated value that realizes a predicate. @@ -268,7 +733,41 @@ class StorageDriver(object): :param aggregation: The type of aggregation to retrieve. :param granularity: The granularity to retrieve. """ - raise exceptions.NotImplementedError + + granularity = granularity or [] + predicate = MeasureQuery(query) + + results = self._map_in_thread( + self._find_measure, + [(metric, aggregation, + gran, predicate, + from_timestamp, to_timestamp) + for metric in metrics + for gran in granularity or + (defin.granularity + for defin in metric.archive_policy.definition)]) + result = collections.defaultdict(list) + for r in results: + for metric, metric_result in six.iteritems(r): + result[metric].extend(metric_result) + + # Sort the result + for metric, r in six.iteritems(result): + # Sort by timestamp asc, granularity desc + r.sort(key=lambda t: (t[0], - t[1])) + + return result + + @staticmethod + def _map_no_thread(method, list_of_args): + return list(itertools.starmap(method, list_of_args)) + + def _map_in_futures_threads(self, method, list_of_args): + with futures.ThreadPoolExecutor( + max_workers=self.aggregation_workers_number) as executor: + # We use 'list' to iterate all threads here to raise the first + # exception now, not much choice + return list(executor.map(lambda args: method(*args), list_of_args)) class MeasureQuery(object): diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py deleted file mode 100644 index b834e158..00000000 --- a/gnocchi/storage/_carbonara.py +++ /dev/null @@ -1,568 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016-2017 Red Hat, Inc. -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import collections -import functools -import itertools - -from concurrent import futures -import daiquiri -import iso8601 -import numpy -from oslo_config import cfg -import six -import six.moves - -from gnocchi import carbonara -from gnocchi import storage -from gnocchi import utils - - -OPTS = [ - cfg.IntOpt('aggregation_workers_number', - default=1, min=1, - help='Number of threads to process and store aggregates. ' - 'Set value roughly equal to number of aggregates to be ' - 'computed per metric'), - cfg.StrOpt('coordination_url', - secret=True, - help='Coordination driver URL'), - -] - -LOG = daiquiri.getLogger(__name__) - - -class CorruptionError(ValueError): - """Data corrupted, damn it.""" - - def __init__(self, message): - super(CorruptionError, self).__init__(message) - - -class CarbonaraBasedStorage(storage.StorageDriver): - - def __init__(self, conf, coord=None): - super(CarbonaraBasedStorage, self).__init__(conf) - self.aggregation_workers_number = conf.aggregation_workers_number - if self.aggregation_workers_number == 1: - # NOTE(jd) Avoid using futures at all if we don't want any threads. - self._map_in_thread = self._map_no_thread - else: - self._map_in_thread = self._map_in_futures_threads - self.coord = (coord if coord else - utils.get_coordinator_and_start(conf.coordination_url)) - self.shared_coord = bool(coord) - - def stop(self): - if not self.shared_coord: - self.coord.stop() - - @staticmethod - def _get_measures(metric, timestamp_key, aggregation, version=3): - raise NotImplementedError - - @staticmethod - def _get_unaggregated_timeserie(metric, version=3): - raise NotImplementedError - - def _get_unaggregated_timeserie_and_unserialize( - self, metric, block_size, back_window): - """Retrieve unaggregated timeserie for a metric and unserialize it. - - Returns a gnocchi.carbonara.BoundTimeSerie object. If the data cannot - be retrieved, returns None. - - """ - with utils.StopWatch() as sw: - raw_measures = ( - self._get_unaggregated_timeserie( - metric) - ) - if not raw_measures: - return - LOG.debug( - "Retrieve unaggregated measures " - "for %s in %.2fs", - metric.id, sw.elapsed()) - try: - return carbonara.BoundTimeSerie.unserialize( - raw_measures, block_size, back_window) - except carbonara.InvalidData: - raise CorruptionError( - "Data corruption detected for %s " - "unaggregated timeserie" % metric.id) - - @staticmethod - def _store_unaggregated_timeserie(metric, data, version=3): - raise NotImplementedError - - @staticmethod - def _store_metric_measures(metric, timestamp_key, aggregation, - data, offset=None, version=3): - raise NotImplementedError - - def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=3): - return set(map( - functools.partial(carbonara.SplitKey, sampling=granularity), - (numpy.array( - list(self._list_split_keys( - metric, aggregation, granularity, version)), - dtype=numpy.float) * 10e8).astype('datetime64[ns]'))) - - @staticmethod - def _list_split_keys(metric, aggregation, granularity, version=3): - raise NotImplementedError - - @staticmethod - def _version_check(name, v): - """Validate object matches expected version. - - Version should be last attribute and start with 'v' - """ - return name.split("_")[-1] == 'v%s' % v - - def get_measures(self, metric, from_timestamp=None, to_timestamp=None, - aggregation='mean', granularity=None, - transform=None): - super(CarbonaraBasedStorage, self).get_measures( - metric, from_timestamp, to_timestamp, aggregation) - if granularity is None: - agg_timeseries = self._map_in_thread( - self._get_measures_timeserie, - ((metric, aggregation, ap.granularity, - from_timestamp, to_timestamp) - for ap in reversed(metric.archive_policy.definition))) - else: - agg_timeseries = [self._get_measures_timeserie( - metric, aggregation, granularity, - from_timestamp, to_timestamp)] - - if transform is not None: - agg_timeseries = list(map(lambda agg: agg.transform(transform), - agg_timeseries)) - - return list(itertools.chain(*[ts.fetch(from_timestamp, to_timestamp) - for ts in agg_timeseries])) - - def _get_measures_and_unserialize(self, metric, key, aggregation): - data = self._get_measures(metric, key, aggregation) - try: - return carbonara.AggregatedTimeSerie.unserialize( - data, key, aggregation) - except carbonara.InvalidData: - LOG.error("Data corruption detected for %s " - "aggregated `%s' timeserie, granularity `%s' " - "around time `%s', ignoring.", - metric.id, aggregation, key.sampling, key) - - def _get_measures_timeserie(self, metric, - aggregation, granularity, - from_timestamp=None, to_timestamp=None): - - # Find the number of point - for d in metric.archive_policy.definition: - if d.granularity == granularity: - points = d.points - break - else: - raise storage.GranularityDoesNotExist(metric, granularity) - - all_keys = None - try: - all_keys = self._list_split_keys_for_metric( - metric, aggregation, granularity) - except storage.MetricDoesNotExist: - for d in metric.archive_policy.definition: - if d.granularity == granularity: - return carbonara.AggregatedTimeSerie( - sampling=granularity, - aggregation_method=aggregation, - max_size=d.points) - raise storage.GranularityDoesNotExist(metric, granularity) - - if from_timestamp: - from_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( - from_timestamp, granularity) - - if to_timestamp: - to_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( - to_timestamp, granularity) - - timeseries = list(filter( - lambda x: x is not None, - self._map_in_thread( - self._get_measures_and_unserialize, - ((metric, key, aggregation) - for key in sorted(all_keys) - if ((not from_timestamp or key >= from_timestamp) - and (not to_timestamp or key <= to_timestamp)))) - )) - - return carbonara.AggregatedTimeSerie.from_timeseries( - sampling=granularity, - aggregation_method=aggregation, - timeseries=timeseries, - max_size=points) - - def _store_timeserie_split(self, metric, key, split, - aggregation, oldest_mutable_timestamp): - # NOTE(jd) We write the full split only if the driver works that way - # (self.WRITE_FULL) or if the oldest_mutable_timestamp is out of range. - write_full = self.WRITE_FULL or next(key) <= oldest_mutable_timestamp - if write_full: - try: - existing = self._get_measures_and_unserialize( - metric, key, aggregation) - except storage.AggregationDoesNotExist: - pass - else: - if existing is not None: - if split is not None: - existing.merge(split) - split = existing - - if split is None: - # `split' can be none if existing is None and no split was passed - # in order to rewrite and compress the data; in that case, it means - # the split key is present and listed, but some aggregation method - # or granularity is missing. That means data is corrupted, but it - # does not mean we have to fail, we can just do nothing and log a - # warning. - LOG.warning("No data found for metric %s, granularity %f " - "and aggregation method %s (split key %s): " - "possible data corruption", - metric, key.sampling, - aggregation, key) - return - - offset, data = split.serialize(key, compressed=write_full) - - return self._store_metric_measures(metric, key, aggregation, - data, offset=offset) - - def _add_measures(self, aggregation, archive_policy_def, - metric, grouped_serie, - previous_oldest_mutable_timestamp, - oldest_mutable_timestamp): - - if aggregation.startswith("rate:"): - grouped_serie = grouped_serie.derived() - aggregation_to_compute = aggregation[5:] - else: - aggregation_to_compute = aggregation - - ts = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped_serie, archive_policy_def.granularity, - aggregation_to_compute, max_size=archive_policy_def.points) - - # Don't do anything if the timeserie is empty - if not ts: - return - - # We only need to check for rewrite if driver is not in WRITE_FULL mode - # and if we already stored splits once - need_rewrite = ( - not self.WRITE_FULL - and previous_oldest_mutable_timestamp is not None - ) - - if archive_policy_def.timespan or need_rewrite: - existing_keys = self._list_split_keys_for_metric( - metric, aggregation, archive_policy_def.granularity) - - # First delete old splits - if archive_policy_def.timespan: - oldest_point_to_keep = ts.last - archive_policy_def.timespan - oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) - for key in list(existing_keys): - # NOTE(jd) Only delete if the key is strictly inferior to - # the timestamp; we don't delete any timeserie split that - # contains our timestamp, so we prefer to keep a bit more - # than deleting too much - if key < oldest_key_to_keep: - self._delete_metric_measures(metric, key, aggregation) - existing_keys.remove(key) - else: - oldest_key_to_keep = None - - # Rewrite all read-only splits just for fun (and compression). This - # only happens if `previous_oldest_mutable_timestamp' exists, which - # means we already wrote some splits at some point – so this is not the - # first time we treat this timeserie. - if need_rewrite: - previous_oldest_mutable_key = ts.get_split_key( - previous_oldest_mutable_timestamp) - oldest_mutable_key = ts.get_split_key(oldest_mutable_timestamp) - - if previous_oldest_mutable_key != oldest_mutable_key: - for key in existing_keys: - if previous_oldest_mutable_key <= key < oldest_mutable_key: - LOG.debug( - "Compressing previous split %s (%s) for metric %s", - key, aggregation, metric) - # NOTE(jd) Rewrite it entirely for fun (and later for - # compression). For that, we just pass None as split. - self._store_timeserie_split( - metric, key, - None, aggregation, oldest_mutable_timestamp) - - for key, split in ts.split(): - if oldest_key_to_keep is None or key >= oldest_key_to_keep: - LOG.debug( - "Storing split %s (%s) for metric %s", - key, aggregation, metric) - self._store_timeserie_split( - metric, key, split, aggregation, oldest_mutable_timestamp) - - @staticmethod - def _delete_metric(metric): - raise NotImplementedError - - def delete_metric(self, incoming, metric, sync=False): - LOG.debug("Deleting metric %s", metric) - lock = incoming.get_sack_lock( - self.coord, incoming.sack_for_metric(metric.id)) - if not lock.acquire(blocking=sync): - raise storage.LockedMetric(metric) - # NOTE(gordc): no need to hold lock because the metric has been already - # marked as "deleted" in the indexer so no measure worker - # is going to process it anymore. - lock.release() - self._delete_metric(metric) - incoming.delete_unprocessed_measures_for_metric_id(metric.id) - LOG.debug("Deleted metric %s", metric) - - @staticmethod - def _delete_metric_measures(metric, timestamp_key, - aggregation, granularity, version=3): - raise NotImplementedError - - def refresh_metric(self, indexer, incoming, metric, timeout): - s = incoming.sack_for_metric(metric.id) - lock = incoming.get_sack_lock(self.coord, s) - if not lock.acquire(blocking=timeout): - raise storage.SackLockTimeoutError( - 'Unable to refresh metric: %s. Metric is locked. ' - 'Please try again.' % metric.id) - try: - self.process_new_measures(indexer, incoming, - [six.text_type(metric.id)]) - finally: - lock.release() - - def process_new_measures(self, indexer, incoming, metrics_to_process, - sync=False): - # process only active metrics. deleted metrics with unprocessed - # measures will be skipped until cleaned by janitor. - metrics = indexer.list_metrics(ids=metrics_to_process) - for metric in metrics: - # NOTE(gordc): must lock at sack level - try: - LOG.debug("Processing measures for %s", metric) - with incoming.process_measure_for_metric(metric) \ - as measures: - self._compute_and_store_timeseries(metric, measures) - LOG.debug("Measures for metric %s processed", metric) - except Exception: - if sync: - raise - LOG.error("Error processing new measures", exc_info=True) - - def _compute_and_store_timeseries(self, metric, measures): - # NOTE(mnaser): The metric could have been handled by - # another worker, ignore if no measures. - if len(measures) == 0: - LOG.debug("Skipping %s (already processed)", metric) - return - - measures.sort(order='timestamps') - - agg_methods = list(metric.archive_policy.aggregation_methods) - block_size = metric.archive_policy.max_block_size - back_window = metric.archive_policy.back_window - definition = metric.archive_policy.definition - # NOTE(sileht): We keep one more blocks to calculate rate of change - # correctly - if any(filter(lambda x: x.startswith("rate:"), agg_methods)): - back_window += 1 - - try: - ts = self._get_unaggregated_timeserie_and_unserialize( - metric, block_size=block_size, back_window=back_window) - except storage.MetricDoesNotExist: - try: - self._create_metric(metric) - except storage.MetricAlreadyExists: - # Created in the mean time, do not worry - pass - ts = None - except CorruptionError as e: - LOG.error(e) - ts = None - - if ts is None: - # This is the first time we treat measures for this - # metric, or data are corrupted, create a new one - ts = carbonara.BoundTimeSerie(block_size=block_size, - back_window=back_window) - current_first_block_timestamp = None - else: - current_first_block_timestamp = ts.first_block_timestamp() - - # NOTE(jd) This is Python where you need such - # hack to pass a variable around a closure, - # sorry. - computed_points = {"number": 0} - - def _map_add_measures(bound_timeserie): - # NOTE (gordc): bound_timeserie is entire set of - # unaggregated measures matching largest - # granularity. the following takes only the points - # affected by new measures for specific granularity - tstamp = max(bound_timeserie.first, measures['timestamps'][0]) - new_first_block_timestamp = bound_timeserie.first_block_timestamp() - computed_points['number'] = len(bound_timeserie) - for d in definition: - ts = bound_timeserie.group_serie( - d.granularity, carbonara.round_timestamp( - tstamp, d.granularity)) - - self._map_in_thread( - self._add_measures, - ((aggregation, d, metric, ts, - current_first_block_timestamp, - new_first_block_timestamp) - for aggregation in agg_methods)) - - with utils.StopWatch() as sw: - ts.set_values(measures, - before_truncate_callback=_map_add_measures) - - number_of_operations = (len(agg_methods) * len(definition)) - perf = "" - elapsed = sw.elapsed() - if elapsed > 0: - perf = " (%d points/s, %d measures/s)" % ( - ((number_of_operations * computed_points['number']) / - elapsed), - ((number_of_operations * len(measures)) / elapsed) - ) - LOG.debug("Computed new metric %s with %d new measures " - "in %.2f seconds%s", - metric.id, len(measures), elapsed, perf) - - self._store_unaggregated_timeserie(metric, ts.serialize()) - - def get_cross_metric_measures(self, metrics, from_timestamp=None, - to_timestamp=None, aggregation='mean', - reaggregation=None, - granularity=None, needed_overlap=100.0, - fill=None, transform=None): - super(CarbonaraBasedStorage, self).get_cross_metric_measures( - metrics, from_timestamp, to_timestamp, - aggregation, reaggregation, granularity, needed_overlap, - fill, transform) - - if reaggregation is None: - reaggregation = aggregation - - if granularity is None: - granularities = ( - definition.granularity - for metric in metrics - for definition in metric.archive_policy.definition - ) - granularities_in_common = [ - g - for g, occurrence in six.iteritems( - collections.Counter(granularities)) - if occurrence == len(metrics) - ] - - if not granularities_in_common: - raise storage.MetricUnaggregatable( - metrics, 'No granularity match') - else: - granularities_in_common = [granularity] - - tss = self._map_in_thread(self._get_measures_timeserie, - [(metric, aggregation, g, - from_timestamp, to_timestamp) - for metric in metrics - for g in granularities_in_common]) - - if transform is not None: - tss = list(map(lambda ts: ts.transform(transform), tss)) - - try: - return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v) - for timestamp, r, v - in carbonara.AggregatedTimeSerie.aggregated( - tss, reaggregation, from_timestamp, to_timestamp, - needed_overlap, fill)] - except carbonara.UnAggregableTimeseries as e: - raise storage.MetricUnaggregatable(metrics, e.reason) - - def _find_measure(self, metric, aggregation, granularity, predicate, - from_timestamp, to_timestamp): - timeserie = self._get_measures_timeserie( - metric, aggregation, granularity, - from_timestamp, to_timestamp) - values = timeserie.fetch(from_timestamp, to_timestamp) - return {metric: - [(timestamp, g, value) - for timestamp, g, value in values - if predicate(value)]} - - def search_value(self, metrics, query, from_timestamp=None, - to_timestamp=None, aggregation='mean', - granularity=None): - granularity = granularity or [] - predicate = storage.MeasureQuery(query) - - results = self._map_in_thread( - self._find_measure, - [(metric, aggregation, - gran, predicate, - from_timestamp, to_timestamp) - for metric in metrics - for gran in granularity or - (defin.granularity - for defin in metric.archive_policy.definition)]) - result = collections.defaultdict(list) - for r in results: - for metric, metric_result in six.iteritems(r): - result[metric].extend(metric_result) - - # Sort the result - for metric, r in six.iteritems(result): - # Sort by timestamp asc, granularity desc - r.sort(key=lambda t: (t[0], - t[1])) - - return result - - @staticmethod - def _map_no_thread(method, list_of_args): - return list(itertools.starmap(method, list_of_args)) - - def _map_in_futures_threads(self, method, list_of_args): - with futures.ThreadPoolExecutor( - max_workers=self.aggregation_workers_number) as executor: - # We use 'list' to iterate all threads here to raise the first - # exception now, not much choice - return list(executor.map(lambda args: method(*args), list_of_args)) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 1e37969f..65660373 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -18,7 +18,6 @@ from oslo_config import cfg from gnocchi.common import ceph from gnocchi import storage -from gnocchi.storage import _carbonara from gnocchi import utils @@ -41,7 +40,7 @@ OPTS = [ rados = ceph.rados -class CephStorage(_carbonara.CarbonaraBasedStorage): +class CephStorage(storage.StorageDriver): WRITE_FULL = False def __init__(self, conf, coord=None): diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 47b88fbb..697fffff 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -22,7 +22,6 @@ import tempfile from oslo_config import cfg from gnocchi import storage -from gnocchi.storage import _carbonara from gnocchi import utils @@ -33,7 +32,7 @@ OPTS = [ ] -class FileStorage(_carbonara.CarbonaraBasedStorage): +class FileStorage(storage.StorageDriver): WRITE_FULL = True def __init__(self, conf, coord=None): diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index e744b02f..a7ebf173 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -17,7 +17,6 @@ from oslo_config import cfg from gnocchi.common import redis from gnocchi import storage -from gnocchi.storage import _carbonara from gnocchi import utils @@ -28,7 +27,7 @@ OPTS = [ ] -class RedisStorage(_carbonara.CarbonaraBasedStorage): +class RedisStorage(storage.StorageDriver): WRITE_FULL = True STORAGE_PREFIX = "timeseries" diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 28129218..32b7cbd0 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -20,7 +20,6 @@ import tenacity from gnocchi.common import s3 from gnocchi import storage -from gnocchi.storage import _carbonara from gnocchi import utils boto3 = s3.boto3 @@ -58,7 +57,7 @@ def retry_if_operationaborted(exception): and exception.response['Error'].get('Code') == "OperationAborted") -class S3Storage(_carbonara.CarbonaraBasedStorage): +class S3Storage(storage.StorageDriver): WRITE_FULL = True diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index de0166d7..c9ccb898 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -18,7 +18,6 @@ from oslo_config import cfg from gnocchi.common import swift from gnocchi import storage -from gnocchi.storage import _carbonara from gnocchi import utils swclient = swift.swclient @@ -79,7 +78,7 @@ OPTS = [ ] -class SwiftStorage(_carbonara.CarbonaraBasedStorage): +class SwiftStorage(storage.StorageDriver): WRITE_FULL = True diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 9230add8..60bb1c81 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -24,7 +24,6 @@ from gnocchi import archive_policy from gnocchi import carbonara from gnocchi import indexer from gnocchi import storage -from gnocchi.storage import _carbonara from gnocchi.storage import ceph from gnocchi.storage import file from gnocchi.storage import redis @@ -80,9 +79,6 @@ class TestStorageDriver(tests_base.TestCase): self.assertIsInstance(driver, storage.StorageDriver) def test_corrupted_data(self): - if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): - self.skipTest("This driver is not based on Carbonara") - self.incoming.add_measures(self.metric, [ storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) @@ -117,7 +113,7 @@ class TestStorageDriver(tests_base.TestCase): except Exception: pass - with mock.patch('gnocchi.storage._carbonara.LOG') as LOG: + with mock.patch('gnocchi.storage.LOG') as LOG: self.trigger_processing() self.assertFalse(LOG.error.called) -- GitLab From e890ca1abea8efab31ded21665f99612c7c7a62f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 18 Jul 2017 13:46:20 +0200 Subject: [PATCH 0951/1483] storage: make CorruptionError a StorageError This has no impact and is mostly aesthetic. --- gnocchi/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 8128041c..42c147a3 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -153,7 +153,7 @@ class LockedMetric(StorageError): super(LockedMetric, self).__init__("Metric %s is locked" % metric) -class CorruptionError(ValueError): +class CorruptionError(ValueError, StorageError): """Data corrupted, damn it.""" def __init__(self, message): -- GitLab From bdbee4543cc386c8d48433a991106f1217ede08f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 18 Jul 2017 13:48:57 +0200 Subject: [PATCH 0952/1483] storage: define SackLockTimeoutError as a StorageError This has no impact expect aesthetic. --- gnocchi/storage/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 42c147a3..e1da0f9f 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -51,10 +51,6 @@ _CARBONARA_OPTS = [ LOG = daiquiri.getLogger(__name__) -class SackLockTimeoutError(Exception): - pass - - Measure = collections.namedtuple("Measure", ['timestamp', 'value']) @@ -160,6 +156,10 @@ class CorruptionError(ValueError, StorageError): super(CorruptionError, self).__init__(message) +class SackLockTimeoutError(StorageError): + pass + + def get_driver(conf, coord=None): """Return the configured driver.""" return utils.get_driver_class('gnocchi.storage', conf.storage)( -- GitLab From e3ff4a0fb1b87e17abcc161c633e5a6e3458ef09 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 22 Aug 2017 14:28:08 +0200 Subject: [PATCH 0953/1483] Move cross aggregation computing to gnocchi.rest This moves the cross aggregation of timeseries to a new dedicated module gnocchi.rest.cross_metric. It turns out that this is one of the latest module depending on Pandas (for probably good reasons). However it is not directly related to Carbonara, so there's no need to move it there. It could be put into gnocchi.storage, but it actually have nothing to do with storage: it's only recomputing stuff once they are retrieved. Moving this to the REST area makes sure that it is only loaded by the REST API server, and therefore prevent other processes such as metricd to load Pandas (which is huge in memory). Closes #61 --- gnocchi/carbonara.py | 125 ----- gnocchi/rest/__init__.py | 8 +- gnocchi/rest/cross_metric.py | 223 ++++++++ gnocchi/storage/__init__.py | 79 --- gnocchi/tests/base.py | 14 + gnocchi/tests/test_carbonara.py | 584 -------------------- gnocchi/tests/test_cross_metric.py | 844 +++++++++++++++++++++++++++++ gnocchi/tests/test_storage.py | 221 -------- 8 files changed, 1086 insertions(+), 1012 deletions(-) create mode 100644 gnocchi/rest/cross_metric.py create mode 100644 gnocchi/tests/test_cross_metric.py diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 2ab5b13e..7caa36d3 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -19,7 +19,6 @@ import collections import functools import itertools -import logging import math import random import re @@ -29,18 +28,9 @@ import time import lz4.block import numpy import numpy.lib.recfunctions -import pandas from scipy import ndimage import six -# NOTE(sileht): pandas relies on time.strptime() -# and often triggers http://bugs.python.org/issue7980 -# its dues to our heavy threads usage, this is the workaround -# to ensure the module is correctly loaded before we use really it. -time.strptime("2016-02-19", "%Y-%m-%d") - -LOG = logging.getLogger(__name__) - UNIX_UNIVERSAL_START64 = numpy.datetime64("1970", 'ns') ONE_SECOND = numpy.timedelta64(1, 's') @@ -57,13 +47,6 @@ class BeforeEpochError(Exception): "%s is before Epoch" % timestamp) -class UnAggregableTimeseries(Exception): - """Error raised when timeseries cannot be aggregated.""" - def __init__(self, reason): - self.reason = reason - super(UnAggregableTimeseries, self).__init__(reason) - - class UnknownAggregationMethod(Exception): """Error raised when the aggregation method is unknown.""" def __init__(self, agg): @@ -921,114 +904,6 @@ class AggregatedTimeSerie(TimeSerie): print(" resample(%s) speed: %.2f Hz" % (agg, per_sec(t1, t0))) - @staticmethod - def aggregated(timeseries, aggregation, from_timestamp=None, - to_timestamp=None, needed_percent_of_overlap=100.0, - fill=None): - - index = ['timestamp', 'granularity'] - columns = ['timestamp', 'granularity', 'value'] - dataframes = [] - - if not timeseries: - return [] - - for timeserie in timeseries: - timeserie_raw = list(timeserie.fetch(from_timestamp, to_timestamp)) - - if timeserie_raw: - dataframe = pandas.DataFrame(timeserie_raw, columns=columns) - dataframe = dataframe.set_index(index) - dataframes.append(dataframe) - - if not dataframes: - return [] - - number_of_distinct_datasource = len(timeseries) / len( - set(ts.sampling for ts in timeseries) - ) - - left_boundary_ts = None - right_boundary_ts = None - if fill is not None: - fill_df = pandas.concat(dataframes, axis=1) - if fill != 'null': - fill_df = fill_df.fillna(fill) - single_df = pandas.concat([series for __, series in - fill_df.iteritems()]).to_frame() - grouped = single_df.groupby(level=index) - else: - grouped = pandas.concat(dataframes).groupby(level=index) - maybe_next_timestamp_is_left_boundary = False - - left_holes = 0 - right_holes = 0 - holes = 0 - for (timestamp, __), group in grouped: - if group.count()['value'] != number_of_distinct_datasource: - maybe_next_timestamp_is_left_boundary = True - if left_boundary_ts is not None: - right_holes += 1 - else: - left_holes += 1 - elif maybe_next_timestamp_is_left_boundary: - left_boundary_ts = timestamp - maybe_next_timestamp_is_left_boundary = False - else: - right_boundary_ts = timestamp - holes += right_holes - right_holes = 0 - - if to_timestamp is not None: - holes += left_holes - if from_timestamp is not None: - holes += right_holes - - if to_timestamp is not None or from_timestamp is not None: - maximum = len(grouped) - percent_of_overlap = (float(maximum - holes) * 100.0 / - float(maximum)) - if percent_of_overlap < needed_percent_of_overlap: - raise UnAggregableTimeseries( - 'Less than %f%% of datapoints overlap in this ' - 'timespan (%.2f%%)' % (needed_percent_of_overlap, - percent_of_overlap)) - if (needed_percent_of_overlap > 0 and - (right_boundary_ts == left_boundary_ts or - (right_boundary_ts is None - and maybe_next_timestamp_is_left_boundary))): - LOG.debug("We didn't find points that overlap in those " - "timeseries. " - "right_boundary_ts=%(right_boundary_ts)s, " - "left_boundary_ts=%(left_boundary_ts)s, " - "groups=%(groups)s", { - 'right_boundary_ts': right_boundary_ts, - 'left_boundary_ts': left_boundary_ts, - 'groups': list(grouped) - }) - raise UnAggregableTimeseries('No overlap') - - # NOTE(sileht): this call the aggregation method on already - # aggregated values, for some kind of aggregation this can - # result can looks weird, but this is the best we can do - # because we don't have anymore the raw datapoints in those case. - # FIXME(sileht): so should we bailout is case of stddev, percentile - # and median? - agg_timeserie = getattr(grouped, aggregation)() - agg_timeserie = agg_timeserie.dropna().reset_index() - - if from_timestamp is None and left_boundary_ts: - agg_timeserie = agg_timeserie[ - agg_timeserie['timestamp'] >= left_boundary_ts] - if to_timestamp is None and right_boundary_ts: - agg_timeserie = agg_timeserie[ - agg_timeserie['timestamp'] <= right_boundary_ts] - - points = agg_timeserie.sort_values(by=['granularity', 'timestamp'], - ascending=[0, 1]) - return six.moves.zip(points.timestamp, points.granularity, - points.value) - if __name__ == '__main__': import sys diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 76ad5843..ee91d455 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2016 Red Hat, Inc. +# Copyright © 2016-2017 Red Hat, Inc. # Copyright © 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -37,6 +37,7 @@ from gnocchi import incoming from gnocchi import indexer from gnocchi import json from gnocchi import resource_type +from gnocchi.rest import cross_metric from gnocchi.rest import transformation from gnocchi import storage from gnocchi import utils @@ -1748,11 +1749,12 @@ class AggregationController(rest.RestController): return pecan.request.storage.get_measures( metrics[0], start, stop, aggregation, granularity, transform) - return pecan.request.storage.get_cross_metric_measures( + return cross_metric.get_cross_metric_measures( + pecan.request.storage, metrics, start, stop, aggregation, reaggregation, granularity, needed_overlap, fill, transform) - except storage.MetricUnaggregatable as e: + except cross_metric.MetricUnaggregatable as e: abort(400, ("One of the metrics being aggregated doesn't have " "matching granularity: %s") % str(e)) except (storage.MetricDoesNotExist, diff --git a/gnocchi/rest/cross_metric.py b/gnocchi/rest/cross_metric.py new file mode 100644 index 00000000..1e0c9651 --- /dev/null +++ b/gnocchi/rest/cross_metric.py @@ -0,0 +1,223 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2016-2017 Red Hat, Inc. +# Copyright © 2014-2015 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Timeseries cross-aggregation.""" +import collections + + +import daiquiri +import iso8601 +import pandas +import six + +from gnocchi import storage as gnocchi_storage + + +LOG = daiquiri.getLogger(__name__) + + +class UnAggregableTimeseries(Exception): + """Error raised when timeseries cannot be aggregated.""" + def __init__(self, reason): + self.reason = reason + super(UnAggregableTimeseries, self).__init__(reason) + + +class MetricUnaggregatable(Exception): + """Error raised when metrics can't be aggregated.""" + + def __init__(self, metrics, reason): + self.metrics = metrics + self.reason = reason + super(MetricUnaggregatable, self).__init__( + "Metrics %s can't be aggregated: %s" + % (", ".join((str(m.id) for m in metrics)), reason)) + + +def get_cross_metric_measures(storage, metrics, from_timestamp=None, + to_timestamp=None, aggregation='mean', + reaggregation=None, + granularity=None, needed_overlap=100.0, + fill=None, transform=None): + """Get aggregated measures of multiple entities. + + :param storage: The storage driver. + :param metrics: The metrics measured to aggregate. + :param from timestamp: The timestamp to get the measure from. + :param to timestamp: The timestamp to get the measure to. + :param granularity: The granularity to retrieve. + :param aggregation: The type of aggregation to retrieve. + :param reaggregation: The type of aggregation to compute + on the retrieved measures. + :param fill: The value to use to fill in missing data in series. + :param transform: List of transformation to apply to the series + """ + for metric in metrics: + if aggregation not in metric.archive_policy.aggregation_methods: + raise gnocchi_storage.AggregationDoesNotExist(metric, aggregation) + if granularity is not None: + for d in metric.archive_policy.definition: + if d.granularity == granularity: + break + else: + raise gnocchi_storage.GranularityDoesNotExist( + metric, granularity) + + if reaggregation is None: + reaggregation = aggregation + + if granularity is None: + granularities = ( + definition.granularity + for metric in metrics + for definition in metric.archive_policy.definition + ) + granularities_in_common = [ + g + for g, occurrence in six.iteritems( + collections.Counter(granularities)) + if occurrence == len(metrics) + ] + + if not granularities_in_common: + raise MetricUnaggregatable( + metrics, 'No granularity match') + else: + granularities_in_common = [granularity] + + tss = storage._map_in_thread(storage._get_measures_timeserie, + [(metric, aggregation, g, + from_timestamp, to_timestamp) + for metric in metrics + for g in granularities_in_common]) + + if transform is not None: + tss = list(map(lambda ts: ts.transform(transform), tss)) + + try: + return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v) + for timestamp, r, v + in aggregated(tss, reaggregation, from_timestamp, to_timestamp, + needed_overlap, fill)] + except UnAggregableTimeseries as e: + raise MetricUnaggregatable(metrics, e.reason) + + +def aggregated(timeseries, aggregation, from_timestamp=None, + to_timestamp=None, needed_percent_of_overlap=100.0, + fill=None): + index = ['timestamp', 'granularity'] + columns = ['timestamp', 'granularity', 'value'] + dataframes = [] + + if not timeseries: + return [] + + for timeserie in timeseries: + timeserie_raw = list(timeserie.fetch(from_timestamp, to_timestamp)) + + if timeserie_raw: + dataframe = pandas.DataFrame(timeserie_raw, columns=columns) + dataframe = dataframe.set_index(index) + dataframes.append(dataframe) + + if not dataframes: + return [] + + number_of_distinct_datasource = len(timeseries) / len( + set(ts.sampling for ts in timeseries) + ) + + left_boundary_ts = None + right_boundary_ts = None + if fill is not None: + fill_df = pandas.concat(dataframes, axis=1) + if fill != 'null': + fill_df = fill_df.fillna(fill) + single_df = pandas.concat([series for __, series in + fill_df.iteritems()]).to_frame() + grouped = single_df.groupby(level=index) + else: + grouped = pandas.concat(dataframes).groupby(level=index) + maybe_next_timestamp_is_left_boundary = False + + left_holes = 0 + right_holes = 0 + holes = 0 + for (timestamp, __), group in grouped: + if group.count()['value'] != number_of_distinct_datasource: + maybe_next_timestamp_is_left_boundary = True + if left_boundary_ts is not None: + right_holes += 1 + else: + left_holes += 1 + elif maybe_next_timestamp_is_left_boundary: + left_boundary_ts = timestamp + maybe_next_timestamp_is_left_boundary = False + else: + right_boundary_ts = timestamp + holes += right_holes + right_holes = 0 + + if to_timestamp is not None: + holes += left_holes + if from_timestamp is not None: + holes += right_holes + + if to_timestamp is not None or from_timestamp is not None: + maximum = len(grouped) + percent_of_overlap = (float(maximum - holes) * 100.0 / + float(maximum)) + if percent_of_overlap < needed_percent_of_overlap: + raise UnAggregableTimeseries( + 'Less than %f%% of datapoints overlap in this ' + 'timespan (%.2f%%)' % (needed_percent_of_overlap, + percent_of_overlap)) + if (needed_percent_of_overlap > 0 and + (right_boundary_ts == left_boundary_ts or + (right_boundary_ts is None + and maybe_next_timestamp_is_left_boundary))): + LOG.debug("We didn't find points that overlap in those " + "timeseries. " + "right_boundary_ts=%(right_boundary_ts)s, " + "left_boundary_ts=%(left_boundary_ts)s, " + "groups=%(groups)s", { + 'right_boundary_ts': right_boundary_ts, + 'left_boundary_ts': left_boundary_ts, + 'groups': list(grouped) + }) + raise UnAggregableTimeseries('No overlap') + + # NOTE(sileht): this call the aggregation method on already + # aggregated values, for some kind of aggregation this can + # result can looks weird, but this is the best we can do + # because we don't have anymore the raw datapoints in those case. + # FIXME(sileht): so should we bailout is case of stddev, percentile + # and median? + agg_timeserie = getattr(grouped, aggregation)() + agg_timeserie = agg_timeserie.dropna().reset_index() + + if from_timestamp is None and left_boundary_ts: + agg_timeserie = agg_timeserie[ + agg_timeserie['timestamp'] >= left_boundary_ts] + if to_timestamp is None and right_boundary_ts: + agg_timeserie = agg_timeserie[ + agg_timeserie['timestamp'] <= right_boundary_ts] + + points = agg_timeserie.sort_values(by=['granularity', 'timestamp'], + ascending=[0, 1]) + return six.moves.zip(points.timestamp, points.granularity, + points.value) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index e1da0f9f..4b5aed25 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -21,7 +21,6 @@ import operator from concurrent import futures import daiquiri -import iso8601 import numpy from oslo_config import cfg import six @@ -130,17 +129,6 @@ class MetricAlreadyExists(StorageError): "Metric %s already exists" % metric) -class MetricUnaggregatable(StorageError): - """Error raised when metrics can't be aggregated.""" - - def __init__(self, metrics, reason): - self.metrics = metrics - self.reason = reason - super(MetricUnaggregatable, self).__init__( - "Metrics %s can't be aggregated: %s" - % (", ".join((str(m.id) for m in metrics)), reason)) - - class LockedMetric(StorageError): """Error raised when this metric is already being handled by another.""" @@ -643,73 +631,6 @@ class StorageDriver(object): self._store_unaggregated_timeserie(metric, ts.serialize()) - def get_cross_metric_measures(self, metrics, from_timestamp=None, - to_timestamp=None, aggregation='mean', - reaggregation=None, - granularity=None, needed_overlap=100.0, - fill=None, transform=None): - """Get aggregated measures of multiple entities. - - :param entities: The entities measured to aggregate. - :param from timestamp: The timestamp to get the measure from. - :param to timestamp: The timestamp to get the measure to. - :param granularity: The granularity to retrieve. - :param aggregation: The type of aggregation to retrieve. - :param reaggregation: The type of aggregation to compute - on the retrieved measures. - :param fill: The value to use to fill in missing data in series. - :param transform: List of transformation to apply to the series - """ - for metric in metrics: - if aggregation not in metric.archive_policy.aggregation_methods: - raise AggregationDoesNotExist(metric, aggregation) - if granularity is not None: - for d in metric.archive_policy.definition: - if d.granularity == granularity: - break - else: - raise GranularityDoesNotExist(metric, granularity) - - if reaggregation is None: - reaggregation = aggregation - - if granularity is None: - granularities = ( - definition.granularity - for metric in metrics - for definition in metric.archive_policy.definition - ) - granularities_in_common = [ - g - for g, occurrence in six.iteritems( - collections.Counter(granularities)) - if occurrence == len(metrics) - ] - - if not granularities_in_common: - raise MetricUnaggregatable( - metrics, 'No granularity match') - else: - granularities_in_common = [granularity] - - tss = self._map_in_thread(self._get_measures_timeserie, - [(metric, aggregation, g, - from_timestamp, to_timestamp) - for metric in metrics - for g in granularities_in_common]) - - if transform is not None: - tss = list(map(lambda ts: ts.transform(transform), tss)) - - try: - return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v) - for timestamp, r, v - in carbonara.AggregatedTimeSerie.aggregated( - tss, reaggregation, from_timestamp, to_timestamp, - needed_overlap, fill)] - except carbonara.UnAggregableTimeseries as e: - raise MetricUnaggregatable(metrics, e.reason) - def _find_measure(self, metric, aggregation, granularity, predicate, from_timestamp, to_timestamp): timeserie = self._get_measures_timeserie( diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 6a74e8aa..d5cb4def 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -372,3 +372,17 @@ class TestCase(BaseTestCase): self.index.disconnect() self.storage.stop() super(TestCase, self).tearDown() + + def _create_metric(self, archive_policy_name="low"): + """Create a metric and return it""" + m = storage.Metric(uuid.uuid4(), + self.archive_policies[archive_policy_name]) + m_sql = self.index.create_metric(m.id, str(uuid.uuid4()), + archive_policy_name) + return m, m_sql + + def trigger_processing(self, metrics=None): + if metrics is None: + metrics = [str(self.metric.id)] + self.storage.process_background_tasks( + self.index, self.incoming, metrics, sync=True) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 51cbfc22..d3ade51a 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -438,196 +438,6 @@ class TestAggregatedTimeSerie(base.BaseTestCase): existing.merge(agg_dict['return']) agg_dict['return'] = existing - def test_aggregated_different_archive_no_overlap(self): - tsc1 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 50, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 50, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 11, 46, 4), 4)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - tsb2.set_values(numpy.array([(datetime64(2014, 1, 1, 9, 1, 4), 4)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - dtfrom = datetime64(2014, 1, 1, 11, 0, 0) - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.AggregatedTimeSerie.aggregated, - [tsc1['return'], tsc2['return']], - from_timestamp=dtfrom, aggregation='mean') - - def test_aggregated_different_archive_no_overlap2(self): - tsc1 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 50, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = carbonara.AggregatedTimeSerie( - sampling=numpy.timedelta64(60, 's'), - max_size=50, - aggregation_method='mean') - - tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 3, 0), 4)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.AggregatedTimeSerie.aggregated, - [tsc1['return'], tsc2], aggregation='mean') - - def test_aggregated_different_archive_overlap(self): - tsc1 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - # NOTE(sileht): minute 8 is missing in both and - # minute 7 in tsc2 too, but it looks like we have - # enough point to do the aggregation - tsb1.set_values(numpy.array([ - (datetime64(2014, 1, 1, 11, 0, 0), 4), - (datetime64(2014, 1, 1, 12, 1, 0), 3), - (datetime64(2014, 1, 1, 12, 2, 0), 2), - (datetime64(2014, 1, 1, 12, 3, 0), 4), - (datetime64(2014, 1, 1, 12, 4, 0), 2), - (datetime64(2014, 1, 1, 12, 5, 0), 3), - (datetime64(2014, 1, 1, 12, 6, 0), 4), - (datetime64(2014, 1, 1, 12, 7, 0), 10), - (datetime64(2014, 1, 1, 12, 9, 0), 2)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - - tsb2.set_values(numpy.array([ - (datetime64(2014, 1, 1, 12, 1, 0), 3), - (datetime64(2014, 1, 1, 12, 2, 0), 4), - (datetime64(2014, 1, 1, 12, 3, 0), 4), - (datetime64(2014, 1, 1, 12, 4, 0), 6), - (datetime64(2014, 1, 1, 12, 5, 0), 3), - (datetime64(2014, 1, 1, 12, 6, 0), 6), - (datetime64(2014, 1, 1, 12, 9, 0), 2), - (datetime64(2014, 1, 1, 12, 11, 0), 2), - (datetime64(2014, 1, 1, 12, 12, 0), 2)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - dtfrom = datetime64(2014, 1, 1, 12, 0, 0) - dtto = datetime64(2014, 1, 1, 12, 10, 0) - - # By default we require 100% of point that overlap - # so that fail - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.AggregatedTimeSerie.aggregated, - [tsc1['return'], tsc2['return']], - from_timestamp=dtfrom, - to_timestamp=dtto, aggregation='mean') - - # Retry with 80% and it works - output = carbonara.AggregatedTimeSerie.aggregated([ - tsc1['return'], tsc2['return']], - from_timestamp=dtfrom, to_timestamp=dtto, - aggregation='mean', needed_percent_of_overlap=80.0) - - self.assertEqual([ - (datetime64(2014, 1, 1, 12, 1, 0), - numpy.timedelta64(60, 's'), 3.0), - (datetime64(2014, 1, 1, 12, 2, 0), - numpy.timedelta64(60, 's'), 3.0), - (datetime64(2014, 1, 1, 12, 3, 0), - numpy.timedelta64(60, 's'), 4.0), - (datetime64(2014, 1, 1, 12, 4, 0), - numpy.timedelta64(60, 's'), 4.0), - (datetime64(2014, 1, 1, 12, 5, 0), - numpy.timedelta64(60, 's'), 3.0), - (datetime64(2014, 1, 1, 12, 6, 0), - numpy.timedelta64(60, 's'), 5.0), - (datetime64(2014, 1, 1, 12, 7, 0), - numpy.timedelta64(60, 's'), 10.0), - (datetime64(2014, 1, 1, 12, 9, 0), - numpy.timedelta64(60, 's'), 2.0), - ], list(output)) - - def test_aggregated_different_archive_overlap_edge_missing1(self): - tsc1 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - tsb1.set_values(numpy.array([ - (datetime64(2014, 1, 1, 12, 3, 0), 9), - (datetime64(2014, 1, 1, 12, 4, 0), 1), - (datetime64(2014, 1, 1, 12, 5, 0), 2), - (datetime64(2014, 1, 1, 12, 6, 0), 7), - (datetime64(2014, 1, 1, 12, 7, 0), 5), - (datetime64(2014, 1, 1, 12, 8, 0), 3)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - - tsb2.set_values(numpy.array([ - (datetime64(2014, 1, 1, 11, 0, 0), 6), - (datetime64(2014, 1, 1, 12, 1, 0), 2), - (datetime64(2014, 1, 1, 12, 2, 0), 13), - (datetime64(2014, 1, 1, 12, 3, 0), 24), - (datetime64(2014, 1, 1, 12, 4, 0), 4), - (datetime64(2014, 1, 1, 12, 5, 0), 16), - (datetime64(2014, 1, 1, 12, 6, 0), 12)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - # By default we require 100% of point that overlap - # but we allow that the last datapoint is missing - # of the precisest granularity - output = carbonara.AggregatedTimeSerie.aggregated([ - tsc1['return'], tsc2['return']], aggregation='sum') - - self.assertEqual([ - (datetime64(2014, 1, 1, 12, 3, 0), - numpy.timedelta64(60, 's'), 33.0), - (datetime64(2014, 1, 1, 12, 4, 0), - numpy.timedelta64(60, 's'), 5.0), - (datetime64(2014, 1, 1, 12, 5, 0), - numpy.timedelta64(60, 's'), 18.0), - (datetime64(2014, 1, 1, 12, 6, 0), - numpy.timedelta64(60, 's'), 19.0), - ], list(output)) - - def test_aggregated_different_archive_overlap_edge_missing2(self): - tsc1 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 3, 0), 4)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - - tsb2.set_values(numpy.array([(datetime64(2014, 1, 1, 11, 0, 0), 4), - (datetime64(2014, 1, 1, 12, 3, 0), 4)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1['return'], tsc2['return']], aggregation='mean') - self.assertEqual([ - (datetime64( - 2014, 1, 1, 12, 3, 0 - ), numpy.timedelta64(60000000000, 'ns'), 4.0), - ], list(output)) - def test_fetch(self): ts = {'sampling': numpy.timedelta64(60, 's'), 'size': 10, 'agg': 'mean'} @@ -697,163 +507,6 @@ class TestAggregatedTimeSerie(base.BaseTestCase): numpy.timedelta64(60000000000, 'ns'), 4.0) ], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)))) - def test_aggregated_some_overlap_with_fill_zero(self): - tsc1 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - tsb1.set_values(numpy.array([ - (datetime64(2014, 1, 1, 12, 3, 0), 9), - (datetime64(2014, 1, 1, 12, 4, 0), 1), - (datetime64(2014, 1, 1, 12, 5, 0), 2), - (datetime64(2014, 1, 1, 12, 6, 0), 7), - (datetime64(2014, 1, 1, 12, 7, 0), 5), - (datetime64(2014, 1, 1, 12, 8, 0), 3)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - - tsb2.set_values(numpy.array([ - (datetime64(2014, 1, 1, 12, 0, 0), 6), - (datetime64(2014, 1, 1, 12, 1, 0), 2), - (datetime64(2014, 1, 1, 12, 2, 0), 13), - (datetime64(2014, 1, 1, 12, 3, 0), 24), - (datetime64(2014, 1, 1, 12, 4, 0), 4), - (datetime64(2014, 1, 1, 12, 5, 0), 16), - (datetime64(2014, 1, 1, 12, 6, 0), 12)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - output = carbonara.AggregatedTimeSerie.aggregated([ - tsc1['return'], tsc2['return']], aggregation='mean', fill=0) - - self.assertEqual([ - (datetime.datetime(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(60000000000, 'ns'), 3.0), - (datetime.datetime(2014, 1, 1, 12, 1, 0), - numpy.timedelta64(60000000000, 'ns'), 1.0), - (datetime.datetime(2014, 1, 1, 12, 2, 0), - numpy.timedelta64(60000000000, 'ns'), 6.5), - (datetime.datetime(2014, 1, 1, 12, 3, 0), - numpy.timedelta64(60000000000, 'ns'), 16.5), - (datetime.datetime(2014, 1, 1, 12, 4, 0), - numpy.timedelta64(60000000000, 'ns'), 2.5), - (datetime.datetime(2014, 1, 1, 12, 5, 0), - numpy.timedelta64(60000000000, 'ns'), 9.0), - (datetime.datetime(2014, 1, 1, 12, 6, 0), - numpy.timedelta64(60000000000, 'ns'), 9.5), - (datetime.datetime(2014, 1, 1, 12, 7, 0), - numpy.timedelta64(60000000000, 'ns'), 2.5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), - numpy.timedelta64(60000000000, 'ns'), 1.5), - ], list(output)) - - def test_aggregated_some_overlap_with_fill_null(self): - tsc1 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - tsb1.set_values(numpy.array([ - (datetime64(2014, 1, 1, 12, 3, 0), 9), - (datetime64(2014, 1, 1, 12, 4, 0), 1), - (datetime64(2014, 1, 1, 12, 5, 0), 2), - (datetime64(2014, 1, 1, 12, 6, 0), 7), - (datetime64(2014, 1, 1, 12, 7, 0), 5), - (datetime64(2014, 1, 1, 12, 8, 0), 3)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - - tsb2.set_values(numpy.array([ - (datetime64(2014, 1, 1, 12, 0, 0), 6), - (datetime64(2014, 1, 1, 12, 1, 0), 2), - (datetime64(2014, 1, 1, 12, 2, 0), 13), - (datetime64(2014, 1, 1, 12, 3, 0), 24), - (datetime64(2014, 1, 1, 12, 4, 0), 4), - (datetime64(2014, 1, 1, 12, 5, 0), 16), - (datetime64(2014, 1, 1, 12, 6, 0), 12)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - output = carbonara.AggregatedTimeSerie.aggregated([ - tsc1['return'], tsc2['return']], aggregation='mean', fill='null') - - self.assertEqual([ - (datetime.datetime(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(60000000000, 'ns'), 6.0), - (datetime.datetime(2014, 1, 1, 12, 1, 0), - numpy.timedelta64(60000000000, 'ns'), 2.0), - (datetime.datetime(2014, 1, 1, 12, 2, 0), - numpy.timedelta64(60000000000, 'ns'), 13.0), - (datetime.datetime(2014, 1, 1, 12, 3, 0), - numpy.timedelta64(60000000000, 'ns'), 16.5), - (datetime.datetime(2014, 1, 1, 12, 4, 0), - numpy.timedelta64(60000000000, 'ns'), 2.5), - (datetime.datetime(2014, 1, 1, 12, 5, 0), - numpy.timedelta64(60000000000, 'ns'), 9.0), - (datetime.datetime(2014, 1, 1, 12, 6, 0), - numpy.timedelta64(60000000000, 'ns'), 9.5), - (datetime.datetime(2014, 1, 1, 12, 7, 0), - numpy.timedelta64(60000000000, 'ns'), 5.0), - (datetime.datetime(2014, 1, 1, 12, 8, 0), - numpy.timedelta64(60000000000, 'ns'), 3.0), - ], list(output)) - - def test_aggregate_no_points_with_fill_zero(self): - tsc1 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - tsb1.set_values(numpy.array([ - (datetime64(2014, 1, 1, 12, 3, 0), 9), - (datetime64(2014, 1, 1, 12, 4, 0), 1), - (datetime64(2014, 1, 1, 12, 7, 0), 5), - (datetime64(2014, 1, 1, 12, 8, 0), 3)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - - tsb2.set_values(numpy.array([ - (datetime64(2014, 1, 1, 12, 0, 0), 6), - (datetime64(2014, 1, 1, 12, 1, 0), 2), - (datetime64(2014, 1, 1, 12, 2, 0), 13), - (datetime64(2014, 1, 1, 12, 3, 0), 24), - (datetime64(2014, 1, 1, 12, 4, 0), 4)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - output = carbonara.AggregatedTimeSerie.aggregated([ - tsc1['return'], tsc2['return']], aggregation='mean', fill=0) - - self.assertEqual([ - (datetime.datetime(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(60000000000, 'ns'), 3.0), - (datetime.datetime(2014, 1, 1, 12, 1, 0), - numpy.timedelta64(60000000000, 'ns'), 1.0), - (datetime.datetime(2014, 1, 1, 12, 2, 0), - numpy.timedelta64(60000000000, 'ns'), 6.5), - (datetime.datetime(2014, 1, 1, 12, 3, 0), - numpy.timedelta64(60000000000, 'ns'), 16.5), - (datetime.datetime(2014, 1, 1, 12, 4, 0), - numpy.timedelta64(60000000000, 'ns'), 2.5), - (datetime.datetime(2014, 1, 1, 12, 7, 0), - numpy.timedelta64(60000000000, 'ns'), 2.5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), - numpy.timedelta64(60000000000, 'ns'), 1.5), - ], list(output)) - def test_fetch_agg_pct(self): ts = {'sampling': numpy.timedelta64(1, 's'), 'size': 3600 * 24, 'agg': '90pct'} @@ -1147,243 +800,6 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ], list(ts['return'].fetch())) - def test_aggregated_nominal(self): - tsc1 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} - tsc12 = {'sampling': numpy.timedelta64(300, 's'), - 'size': 6, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc12['sampling']) - tsc2 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} - tsc22 = {'sampling': numpy.timedelta64(300, 's'), - 'size': 6, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc22['sampling']) - - def ts1_update(ts): - grouped = ts.group_serie(tsc1['sampling']) - existing = tsc1.get('return') - tsc1['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, tsc1['sampling'], tsc1['agg'], - max_size=tsc1['size'], truncate=True) - if existing: - existing.merge(tsc1['return']) - grouped = ts.group_serie(tsc12['sampling']) - existing = tsc12.get('return') - tsc12['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, tsc12['sampling'], tsc12['agg'], - max_size=tsc12['size'], truncate=True) - if existing: - existing.merge(tsc12['return']) - - def ts2_update(ts): - grouped = ts.group_serie(tsc2['sampling']) - existing = tsc2.get('return') - tsc2['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, tsc2['sampling'], tsc2['agg'], - max_size=tsc2['size'], truncate=True) - if existing: - existing.merge(tsc2['return']) - grouped = ts.group_serie(tsc22['sampling']) - existing = tsc22.get('return') - tsc22['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, tsc22['sampling'], tsc22['agg'], - max_size=tsc22['size'], truncate=True) - if existing: - existing.merge(tsc22['return']) - - tsb1.set_values(numpy.array([ - (datetime64(2014, 1, 1, 11, 46, 4), 4), - (datetime64(2014, 1, 1, 11, 47, 34), 8), - (datetime64(2014, 1, 1, 11, 50, 54), 50), - (datetime64(2014, 1, 1, 11, 54, 45), 4), - (datetime64(2014, 1, 1, 11, 56, 49), 4), - (datetime64(2014, 1, 1, 11, 57, 22), 6), - (datetime64(2014, 1, 1, 11, 58, 22), 5), - (datetime64(2014, 1, 1, 12, 1, 4), 4), - (datetime64(2014, 1, 1, 12, 1, 9), 7), - (datetime64(2014, 1, 1, 12, 2, 1), 15), - (datetime64(2014, 1, 1, 12, 2, 12), 1), - (datetime64(2014, 1, 1, 12, 3, 0), 3), - (datetime64(2014, 1, 1, 12, 4, 9), 7), - (datetime64(2014, 1, 1, 12, 5, 1), 15), - (datetime64(2014, 1, 1, 12, 5, 12), 1), - (datetime64(2014, 1, 1, 12, 6, 0), 3)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=ts1_update) - - tsb2.set_values(numpy.array([ - (datetime64(2014, 1, 1, 11, 46, 4), 6), - (datetime64(2014, 1, 1, 11, 47, 34), 5), - (datetime64(2014, 1, 1, 11, 50, 54), 51), - (datetime64(2014, 1, 1, 11, 54, 45), 5), - (datetime64(2014, 1, 1, 11, 56, 49), 5), - (datetime64(2014, 1, 1, 11, 57, 22), 7), - (datetime64(2014, 1, 1, 11, 58, 22), 5), - (datetime64(2014, 1, 1, 12, 1, 4), 5), - (datetime64(2014, 1, 1, 12, 1, 9), 8), - (datetime64(2014, 1, 1, 12, 2, 1), 10), - (datetime64(2014, 1, 1, 12, 2, 12), 2), - (datetime64(2014, 1, 1, 12, 3, 0), 6), - (datetime64(2014, 1, 1, 12, 4, 9), 4), - (datetime64(2014, 1, 1, 12, 5, 1), 10), - (datetime64(2014, 1, 1, 12, 5, 12), 1), - (datetime64(2014, 1, 1, 12, 6, 0), 1)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=ts2_update) - - output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1['return'], tsc12['return'], tsc2['return'], tsc22['return']], - 'mean') - self.assertEqual([ - (datetime.datetime(2014, 1, 1, 11, 45), - numpy.timedelta64(300, 's'), 5.75), - (datetime.datetime(2014, 1, 1, 11, 50), - numpy.timedelta64(300, 's'), 27.5), - (datetime.datetime(2014, 1, 1, 11, 55), - numpy.timedelta64(300, 's'), 5.3333333333333339), - (datetime.datetime(2014, 1, 1, 12, 0), - numpy.timedelta64(300, 's'), 6.0), - (datetime.datetime(2014, 1, 1, 12, 5), - numpy.timedelta64(300, 's'), 5.1666666666666661), - (datetime.datetime(2014, 1, 1, 11, 54), - numpy.timedelta64(60, 's'), 4.5), - (datetime.datetime(2014, 1, 1, 11, 56), - numpy.timedelta64(60, 's'), 4.5), - (datetime.datetime(2014, 1, 1, 11, 57), - numpy.timedelta64(60, 's'), 6.5), - (datetime.datetime(2014, 1, 1, 11, 58), - numpy.timedelta64(60, 's'), 5.0), - (datetime.datetime(2014, 1, 1, 12, 1), - numpy.timedelta64(60, 's'), 6.0), - (datetime.datetime(2014, 1, 1, 12, 2), - numpy.timedelta64(60, 's'), 7.0), - (datetime.datetime(2014, 1, 1, 12, 3), - numpy.timedelta64(60, 's'), 4.5), - (datetime.datetime(2014, 1, 1, 12, 4), - numpy.timedelta64(60, 's'), 5.5), - (datetime.datetime(2014, 1, 1, 12, 5), - numpy.timedelta64(60, 's'), 6.75), - (datetime.datetime(2014, 1, 1, 12, 6), - numpy.timedelta64(60, 's'), 2.0), - ], list(output)) - - def test_aggregated_partial_overlap(self): - tsc1 = {'sampling': numpy.timedelta64(1, 's'), - 'size': 86400, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': numpy.timedelta64(1, 's'), - 'size': 60, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - tsb1.set_values(numpy.array([ - (datetime64(2015, 12, 3, 13, 19, 15), 1), - (datetime64(2015, 12, 3, 13, 20, 15), 1), - (datetime64(2015, 12, 3, 13, 21, 15), 1), - (datetime64(2015, 12, 3, 13, 22, 15), 1)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - - tsb2.set_values(numpy.array([ - (datetime64(2015, 12, 3, 13, 21, 15), 10), - (datetime64(2015, 12, 3, 13, 22, 15), 10), - (datetime64(2015, 12, 3, 13, 23, 15), 10), - (datetime64(2015, 12, 3, 13, 24, 15), 10)], - dtype=carbonara.TIMESERIES_ARRAY_DTYPE), - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1['return'], tsc2['return']], aggregation="sum") - - self.assertEqual([ - (datetime64( - 2015, 12, 3, 13, 21, 15 - ), numpy.timedelta64(1, 's'), 11.0), - (datetime64( - 2015, 12, 3, 13, 22, 15 - ), numpy.timedelta64(1, 's'), 11.0), - ], list(output)) - - dtfrom = datetime64(2015, 12, 3, 13, 17, 0) - dtto = datetime64(2015, 12, 3, 13, 25, 0) - - output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1['return'], tsc2['return']], - from_timestamp=dtfrom, to_timestamp=dtto, - aggregation="sum", needed_percent_of_overlap=0) - - self.assertEqual([ - (datetime64( - 2015, 12, 3, 13, 19, 15 - ), numpy.timedelta64(1, 's'), 1.0), - (datetime64( - 2015, 12, 3, 13, 20, 15 - ), numpy.timedelta64(1, 's'), 1.0), - (datetime64( - 2015, 12, 3, 13, 21, 15 - ), numpy.timedelta64(1, 's'), 11.0), - (datetime64( - 2015, 12, 3, 13, 22, 15 - ), numpy.timedelta64(1, 's'), 11.0), - (datetime64( - 2015, 12, 3, 13, 23, 15 - ), numpy.timedelta64(1, 's'), 10.0), - (datetime64( - 2015, 12, 3, 13, 24, 15 - ), numpy.timedelta64(1, 's'), 10.0), - ], list(output)) - - # By default we require 100% of point that overlap - # so that fail if from or to is set - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.AggregatedTimeSerie.aggregated, - [tsc1['return'], tsc2['return']], - to_timestamp=dtto, aggregation='mean') - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.AggregatedTimeSerie.aggregated, - [tsc1['return'], tsc2['return']], - from_timestamp=dtfrom, aggregation='mean') - - # Retry with 50% and it works - output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1['return'], tsc2['return']], from_timestamp=dtfrom, - aggregation="sum", - needed_percent_of_overlap=50.0) - self.assertEqual([ - (datetime64( - 2015, 12, 3, 13, 19, 15 - ), numpy.timedelta64(1, 's'), 1.0), - (datetime64( - 2015, 12, 3, 13, 20, 15 - ), numpy.timedelta64(1, 's'), 1.0), - (datetime64( - 2015, 12, 3, 13, 21, 15 - ), numpy.timedelta64(1, 's'), 11.0), - (datetime64( - 2015, 12, 3, 13, 22, 15 - ), numpy.timedelta64(1, 's'), 11.0), - ], list(output)) - - output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1['return'], tsc2['return']], to_timestamp=dtto, - aggregation="sum", - needed_percent_of_overlap=50.0) - self.assertEqual([ - (datetime64( - 2015, 12, 3, 13, 21, 15 - ), numpy.timedelta64(1, 's'), 11.0), - (datetime64( - 2015, 12, 3, 13, 22, 15 - ), numpy.timedelta64(1, 's'), 11.0), - (datetime64( - 2015, 12, 3, 13, 23, 15 - ), numpy.timedelta64(1, 's'), 10.0), - (datetime64( - 2015, 12, 3, 13, 24, 15 - ), numpy.timedelta64(1, 's'), 10.0), - ], list(output)) - def test_split_key(self): self.assertEqual( numpy.datetime64("2014-10-07"), diff --git a/gnocchi/tests/test_cross_metric.py b/gnocchi/tests/test_cross_metric.py new file mode 100644 index 00000000..5e201926 --- /dev/null +++ b/gnocchi/tests/test_cross_metric.py @@ -0,0 +1,844 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2014-2016 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import datetime +import functools +import uuid + +import numpy + +from gnocchi import carbonara +from gnocchi.rest import cross_metric +from gnocchi import storage +from gnocchi.tests import base +from gnocchi import utils + + +def datetime64(*args): + return numpy.datetime64(datetime.datetime(*args)) + + +class TestAggregatedTimeseries(base.BaseTestCase): + @staticmethod + def _resample_and_merge(ts, agg_dict): + """Helper method that mimics _add_measures workflow.""" + grouped = ts.group_serie(agg_dict['sampling']) + existing = agg_dict.get('return') + agg_dict['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped, agg_dict['sampling'], agg_dict['agg'], + max_size=agg_dict.get('size'), truncate=True) + if existing: + existing.merge(agg_dict['return']) + agg_dict['return'] = existing + + def test_aggregated_different_archive_no_overlap(self): + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 50, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 50, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) + + tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 11, 46, 4), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) + tsb2.set_values(numpy.array([(datetime64(2014, 1, 1, 9, 1, 4), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) + + dtfrom = datetime64(2014, 1, 1, 11, 0, 0) + self.assertRaises(cross_metric.UnAggregableTimeseries, + cross_metric.aggregated, + [tsc1['return'], tsc2['return']], + from_timestamp=dtfrom, aggregation='mean') + + def test_aggregated_different_archive_no_overlap2(self): + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 50, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = carbonara.AggregatedTimeSerie( + sampling=numpy.timedelta64(60, 's'), + max_size=50, + aggregation_method='mean') + + tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 3, 0), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) + self.assertRaises(cross_metric.UnAggregableTimeseries, + cross_metric.aggregated, + [tsc1['return'], tsc2], aggregation='mean') + + def test_aggregated_different_archive_overlap(self): + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) + + # NOTE(sileht): minute 8 is missing in both and + # minute 7 in tsc2 too, but it looks like we have + # enough point to do the aggregation + tsb1.set_values(numpy.array([ + (datetime64(2014, 1, 1, 11, 0, 0), 4), + (datetime64(2014, 1, 1, 12, 1, 0), 3), + (datetime64(2014, 1, 1, 12, 2, 0), 2), + (datetime64(2014, 1, 1, 12, 3, 0), 4), + (datetime64(2014, 1, 1, 12, 4, 0), 2), + (datetime64(2014, 1, 1, 12, 5, 0), 3), + (datetime64(2014, 1, 1, 12, 6, 0), 4), + (datetime64(2014, 1, 1, 12, 7, 0), 10), + (datetime64(2014, 1, 1, 12, 9, 0), 2)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) + + tsb2.set_values(numpy.array([ + (datetime64(2014, 1, 1, 12, 1, 0), 3), + (datetime64(2014, 1, 1, 12, 2, 0), 4), + (datetime64(2014, 1, 1, 12, 3, 0), 4), + (datetime64(2014, 1, 1, 12, 4, 0), 6), + (datetime64(2014, 1, 1, 12, 5, 0), 3), + (datetime64(2014, 1, 1, 12, 6, 0), 6), + (datetime64(2014, 1, 1, 12, 9, 0), 2), + (datetime64(2014, 1, 1, 12, 11, 0), 2), + (datetime64(2014, 1, 1, 12, 12, 0), 2)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) + + dtfrom = datetime64(2014, 1, 1, 12, 0, 0) + dtto = datetime64(2014, 1, 1, 12, 10, 0) + + # By default we require 100% of point that overlap + # so that fail + self.assertRaises(cross_metric.UnAggregableTimeseries, + cross_metric.aggregated, + [tsc1['return'], tsc2['return']], + from_timestamp=dtfrom, + to_timestamp=dtto, aggregation='mean') + + # Retry with 80% and it works + output = cross_metric.aggregated([ + tsc1['return'], tsc2['return']], + from_timestamp=dtfrom, to_timestamp=dtto, + aggregation='mean', needed_percent_of_overlap=80.0) + + self.assertEqual([ + (datetime64(2014, 1, 1, 12, 1, 0), + numpy.timedelta64(60, 's'), 3.0), + (datetime64(2014, 1, 1, 12, 2, 0), + numpy.timedelta64(60, 's'), 3.0), + (datetime64(2014, 1, 1, 12, 3, 0), + numpy.timedelta64(60, 's'), 4.0), + (datetime64(2014, 1, 1, 12, 4, 0), + numpy.timedelta64(60, 's'), 4.0), + (datetime64(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(60, 's'), 3.0), + (datetime64(2014, 1, 1, 12, 6, 0), + numpy.timedelta64(60, 's'), 5.0), + (datetime64(2014, 1, 1, 12, 7, 0), + numpy.timedelta64(60, 's'), 10.0), + (datetime64(2014, 1, 1, 12, 9, 0), + numpy.timedelta64(60, 's'), 2.0), + ], list(output)) + + def test_aggregated_different_archive_overlap_edge_missing1(self): + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) + + tsb1.set_values(numpy.array([ + (datetime64(2014, 1, 1, 12, 3, 0), 9), + (datetime64(2014, 1, 1, 12, 4, 0), 1), + (datetime64(2014, 1, 1, 12, 5, 0), 2), + (datetime64(2014, 1, 1, 12, 6, 0), 7), + (datetime64(2014, 1, 1, 12, 7, 0), 5), + (datetime64(2014, 1, 1, 12, 8, 0), 3)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) + + tsb2.set_values(numpy.array([ + (datetime64(2014, 1, 1, 11, 0, 0), 6), + (datetime64(2014, 1, 1, 12, 1, 0), 2), + (datetime64(2014, 1, 1, 12, 2, 0), 13), + (datetime64(2014, 1, 1, 12, 3, 0), 24), + (datetime64(2014, 1, 1, 12, 4, 0), 4), + (datetime64(2014, 1, 1, 12, 5, 0), 16), + (datetime64(2014, 1, 1, 12, 6, 0), 12)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) + + # By default we require 100% of point that overlap + # but we allow that the last datapoint is missing + # of the precisest granularity + output = cross_metric.aggregated([ + tsc1['return'], tsc2['return']], aggregation='sum') + + self.assertEqual([ + (datetime64(2014, 1, 1, 12, 3, 0), + numpy.timedelta64(60, 's'), 33.0), + (datetime64(2014, 1, 1, 12, 4, 0), + numpy.timedelta64(60, 's'), 5.0), + (datetime64(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(60, 's'), 18.0), + (datetime64(2014, 1, 1, 12, 6, 0), + numpy.timedelta64(60, 's'), 19.0), + ], list(output)) + + def test_aggregated_different_archive_overlap_edge_missing2(self): + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) + + tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 3, 0), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) + + tsb2.set_values(numpy.array([(datetime64(2014, 1, 1, 11, 0, 0), 4), + (datetime64(2014, 1, 1, 12, 3, 0), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) + + output = cross_metric.aggregated( + [tsc1['return'], tsc2['return']], aggregation='mean') + self.assertEqual([ + (datetime64( + 2014, 1, 1, 12, 3, 0 + ), numpy.timedelta64(60000000000, 'ns'), 4.0), + ], list(output)) + + def test_aggregated_some_overlap_with_fill_zero(self): + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) + + tsb1.set_values(numpy.array([ + (datetime64(2014, 1, 1, 12, 3, 0), 9), + (datetime64(2014, 1, 1, 12, 4, 0), 1), + (datetime64(2014, 1, 1, 12, 5, 0), 2), + (datetime64(2014, 1, 1, 12, 6, 0), 7), + (datetime64(2014, 1, 1, 12, 7, 0), 5), + (datetime64(2014, 1, 1, 12, 8, 0), 3)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) + + tsb2.set_values(numpy.array([ + (datetime64(2014, 1, 1, 12, 0, 0), 6), + (datetime64(2014, 1, 1, 12, 1, 0), 2), + (datetime64(2014, 1, 1, 12, 2, 0), 13), + (datetime64(2014, 1, 1, 12, 3, 0), 24), + (datetime64(2014, 1, 1, 12, 4, 0), 4), + (datetime64(2014, 1, 1, 12, 5, 0), 16), + (datetime64(2014, 1, 1, 12, 6, 0), 12)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) + + output = cross_metric.aggregated([ + tsc1['return'], tsc2['return']], aggregation='mean', fill=0) + + self.assertEqual([ + (datetime.datetime(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(60000000000, 'ns'), 3.0), + (datetime.datetime(2014, 1, 1, 12, 1, 0), + numpy.timedelta64(60000000000, 'ns'), 1.0), + (datetime.datetime(2014, 1, 1, 12, 2, 0), + numpy.timedelta64(60000000000, 'ns'), 6.5), + (datetime.datetime(2014, 1, 1, 12, 3, 0), + numpy.timedelta64(60000000000, 'ns'), 16.5), + (datetime.datetime(2014, 1, 1, 12, 4, 0), + numpy.timedelta64(60000000000, 'ns'), 2.5), + (datetime.datetime(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(60000000000, 'ns'), 9.0), + (datetime.datetime(2014, 1, 1, 12, 6, 0), + numpy.timedelta64(60000000000, 'ns'), 9.5), + (datetime.datetime(2014, 1, 1, 12, 7, 0), + numpy.timedelta64(60000000000, 'ns'), 2.5), + (datetime.datetime(2014, 1, 1, 12, 8, 0), + numpy.timedelta64(60000000000, 'ns'), 1.5), + ], list(output)) + + def test_aggregated_some_overlap_with_fill_null(self): + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) + + tsb1.set_values(numpy.array([ + (datetime64(2014, 1, 1, 12, 3, 0), 9), + (datetime64(2014, 1, 1, 12, 4, 0), 1), + (datetime64(2014, 1, 1, 12, 5, 0), 2), + (datetime64(2014, 1, 1, 12, 6, 0), 7), + (datetime64(2014, 1, 1, 12, 7, 0), 5), + (datetime64(2014, 1, 1, 12, 8, 0), 3)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) + + tsb2.set_values(numpy.array([ + (datetime64(2014, 1, 1, 12, 0, 0), 6), + (datetime64(2014, 1, 1, 12, 1, 0), 2), + (datetime64(2014, 1, 1, 12, 2, 0), 13), + (datetime64(2014, 1, 1, 12, 3, 0), 24), + (datetime64(2014, 1, 1, 12, 4, 0), 4), + (datetime64(2014, 1, 1, 12, 5, 0), 16), + (datetime64(2014, 1, 1, 12, 6, 0), 12)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) + + output = cross_metric.aggregated([ + tsc1['return'], tsc2['return']], aggregation='mean', fill='null') + + self.assertEqual([ + (datetime.datetime(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(60000000000, 'ns'), 6.0), + (datetime.datetime(2014, 1, 1, 12, 1, 0), + numpy.timedelta64(60000000000, 'ns'), 2.0), + (datetime.datetime(2014, 1, 1, 12, 2, 0), + numpy.timedelta64(60000000000, 'ns'), 13.0), + (datetime.datetime(2014, 1, 1, 12, 3, 0), + numpy.timedelta64(60000000000, 'ns'), 16.5), + (datetime.datetime(2014, 1, 1, 12, 4, 0), + numpy.timedelta64(60000000000, 'ns'), 2.5), + (datetime.datetime(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(60000000000, 'ns'), 9.0), + (datetime.datetime(2014, 1, 1, 12, 6, 0), + numpy.timedelta64(60000000000, 'ns'), 9.5), + (datetime.datetime(2014, 1, 1, 12, 7, 0), + numpy.timedelta64(60000000000, 'ns'), 5.0), + (datetime.datetime(2014, 1, 1, 12, 8, 0), + numpy.timedelta64(60000000000, 'ns'), 3.0), + ], list(output)) + + def test_aggregate_no_points_with_fill_zero(self): + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) + + tsb1.set_values(numpy.array([ + (datetime64(2014, 1, 1, 12, 3, 0), 9), + (datetime64(2014, 1, 1, 12, 4, 0), 1), + (datetime64(2014, 1, 1, 12, 7, 0), 5), + (datetime64(2014, 1, 1, 12, 8, 0), 3)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) + + tsb2.set_values(numpy.array([ + (datetime64(2014, 1, 1, 12, 0, 0), 6), + (datetime64(2014, 1, 1, 12, 1, 0), 2), + (datetime64(2014, 1, 1, 12, 2, 0), 13), + (datetime64(2014, 1, 1, 12, 3, 0), 24), + (datetime64(2014, 1, 1, 12, 4, 0), 4)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) + + output = cross_metric.aggregated([ + tsc1['return'], tsc2['return']], aggregation='mean', fill=0) + + self.assertEqual([ + (datetime.datetime(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(60000000000, 'ns'), 3.0), + (datetime.datetime(2014, 1, 1, 12, 1, 0), + numpy.timedelta64(60000000000, 'ns'), 1.0), + (datetime.datetime(2014, 1, 1, 12, 2, 0), + numpy.timedelta64(60000000000, 'ns'), 6.5), + (datetime.datetime(2014, 1, 1, 12, 3, 0), + numpy.timedelta64(60000000000, 'ns'), 16.5), + (datetime.datetime(2014, 1, 1, 12, 4, 0), + numpy.timedelta64(60000000000, 'ns'), 2.5), + (datetime.datetime(2014, 1, 1, 12, 7, 0), + numpy.timedelta64(60000000000, 'ns'), 2.5), + (datetime.datetime(2014, 1, 1, 12, 8, 0), + numpy.timedelta64(60000000000, 'ns'), 1.5), + ], list(output)) + + def test_aggregated_nominal(self): + tsc1 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsc12 = {'sampling': numpy.timedelta64(300, 's'), + 'size': 6, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc12['sampling']) + tsc2 = {'sampling': numpy.timedelta64(60, 's'), + 'size': 10, 'agg': 'mean'} + tsc22 = {'sampling': numpy.timedelta64(300, 's'), + 'size': 6, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc22['sampling']) + + def ts1_update(ts): + grouped = ts.group_serie(tsc1['sampling']) + existing = tsc1.get('return') + tsc1['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped, tsc1['sampling'], tsc1['agg'], + max_size=tsc1['size'], truncate=True) + if existing: + existing.merge(tsc1['return']) + grouped = ts.group_serie(tsc12['sampling']) + existing = tsc12.get('return') + tsc12['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped, tsc12['sampling'], tsc12['agg'], + max_size=tsc12['size'], truncate=True) + if existing: + existing.merge(tsc12['return']) + + def ts2_update(ts): + grouped = ts.group_serie(tsc2['sampling']) + existing = tsc2.get('return') + tsc2['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped, tsc2['sampling'], tsc2['agg'], + max_size=tsc2['size'], truncate=True) + if existing: + existing.merge(tsc2['return']) + grouped = ts.group_serie(tsc22['sampling']) + existing = tsc22.get('return') + tsc22['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped, tsc22['sampling'], tsc22['agg'], + max_size=tsc22['size'], truncate=True) + if existing: + existing.merge(tsc22['return']) + tsb1.set_values(numpy.array([ + (datetime64(2014, 1, 1, 11, 46, 4), 4), + (datetime64(2014, 1, 1, 11, 47, 34), 8), + (datetime64(2014, 1, 1, 11, 50, 54), 50), + (datetime64(2014, 1, 1, 11, 54, 45), 4), + (datetime64(2014, 1, 1, 11, 56, 49), 4), + (datetime64(2014, 1, 1, 11, 57, 22), 6), + (datetime64(2014, 1, 1, 11, 58, 22), 5), + (datetime64(2014, 1, 1, 12, 1, 4), 4), + (datetime64(2014, 1, 1, 12, 1, 9), 7), + (datetime64(2014, 1, 1, 12, 2, 1), 15), + (datetime64(2014, 1, 1, 12, 2, 12), 1), + (datetime64(2014, 1, 1, 12, 3, 0), 3), + (datetime64(2014, 1, 1, 12, 4, 9), 7), + (datetime64(2014, 1, 1, 12, 5, 1), 15), + (datetime64(2014, 1, 1, 12, 5, 12), 1), + (datetime64(2014, 1, 1, 12, 6, 0), 3)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=ts1_update) + + tsb2.set_values(numpy.array([ + (datetime64(2014, 1, 1, 11, 46, 4), 6), + (datetime64(2014, 1, 1, 11, 47, 34), 5), + (datetime64(2014, 1, 1, 11, 50, 54), 51), + (datetime64(2014, 1, 1, 11, 54, 45), 5), + (datetime64(2014, 1, 1, 11, 56, 49), 5), + (datetime64(2014, 1, 1, 11, 57, 22), 7), + (datetime64(2014, 1, 1, 11, 58, 22), 5), + (datetime64(2014, 1, 1, 12, 1, 4), 5), + (datetime64(2014, 1, 1, 12, 1, 9), 8), + (datetime64(2014, 1, 1, 12, 2, 1), 10), + (datetime64(2014, 1, 1, 12, 2, 12), 2), + (datetime64(2014, 1, 1, 12, 3, 0), 6), + (datetime64(2014, 1, 1, 12, 4, 9), 4), + (datetime64(2014, 1, 1, 12, 5, 1), 10), + (datetime64(2014, 1, 1, 12, 5, 12), 1), + (datetime64(2014, 1, 1, 12, 6, 0), 1)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=ts2_update) + output = cross_metric.aggregated( + [tsc1['return'], tsc12['return'], tsc2['return'], tsc22['return']], + 'mean') + self.assertEqual([ + (datetime.datetime(2014, 1, 1, 11, 45), + numpy.timedelta64(300, 's'), 5.75), + (datetime.datetime(2014, 1, 1, 11, 50), + numpy.timedelta64(300, 's'), 27.5), + (datetime.datetime(2014, 1, 1, 11, 55), + numpy.timedelta64(300, 's'), 5.3333333333333339), + (datetime.datetime(2014, 1, 1, 12, 0), + numpy.timedelta64(300, 's'), 6.0), + (datetime.datetime(2014, 1, 1, 12, 5), + numpy.timedelta64(300, 's'), 5.1666666666666661), + (datetime.datetime(2014, 1, 1, 11, 54), + numpy.timedelta64(60, 's'), 4.5), + (datetime.datetime(2014, 1, 1, 11, 56), + numpy.timedelta64(60, 's'), 4.5), + (datetime.datetime(2014, 1, 1, 11, 57), + numpy.timedelta64(60, 's'), 6.5), + (datetime.datetime(2014, 1, 1, 11, 58), + numpy.timedelta64(60, 's'), 5.0), + (datetime.datetime(2014, 1, 1, 12, 1), + numpy.timedelta64(60, 's'), 6.0), + (datetime.datetime(2014, 1, 1, 12, 2), + numpy.timedelta64(60, 's'), 7.0), + (datetime.datetime(2014, 1, 1, 12, 3), + numpy.timedelta64(60, 's'), 4.5), + (datetime.datetime(2014, 1, 1, 12, 4), + numpy.timedelta64(60, 's'), 5.5), + (datetime.datetime(2014, 1, 1, 12, 5), + numpy.timedelta64(60, 's'), 6.75), + (datetime.datetime(2014, 1, 1, 12, 6), + numpy.timedelta64(60, 's'), 2.0), + ], list(output)) + + def test_aggregated_partial_overlap(self): + tsc1 = {'sampling': numpy.timedelta64(1, 's'), + 'size': 86400, 'agg': 'mean'} + tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) + tsc2 = {'sampling': numpy.timedelta64(1, 's'), + 'size': 60, 'agg': 'mean'} + tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) + + tsb1.set_values(numpy.array([ + (datetime64(2015, 12, 3, 13, 19, 15), 1), + (datetime64(2015, 12, 3, 13, 20, 15), 1), + (datetime64(2015, 12, 3, 13, 21, 15), 1), + (datetime64(2015, 12, 3, 13, 22, 15), 1)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc1)) + + tsb2.set_values(numpy.array([ + (datetime64(2015, 12, 3, 13, 21, 15), 10), + (datetime64(2015, 12, 3, 13, 22, 15), 10), + (datetime64(2015, 12, 3, 13, 23, 15), 10), + (datetime64(2015, 12, 3, 13, 24, 15), 10)], + dtype=carbonara.TIMESERIES_ARRAY_DTYPE), + before_truncate_callback=functools.partial( + self._resample_and_merge, agg_dict=tsc2)) + + output = cross_metric.aggregated( + [tsc1['return'], tsc2['return']], aggregation="sum") + + self.assertEqual([ + (datetime64( + 2015, 12, 3, 13, 21, 15 + ), numpy.timedelta64(1, 's'), 11.0), + (datetime64( + 2015, 12, 3, 13, 22, 15 + ), numpy.timedelta64(1, 's'), 11.0), + ], list(output)) + + dtfrom = datetime64(2015, 12, 3, 13, 17, 0) + dtto = datetime64(2015, 12, 3, 13, 25, 0) + + output = cross_metric.aggregated( + [tsc1['return'], tsc2['return']], + from_timestamp=dtfrom, to_timestamp=dtto, + aggregation="sum", needed_percent_of_overlap=0) + self.assertEqual([ + (datetime64( + 2015, 12, 3, 13, 19, 15 + ), numpy.timedelta64(1, 's'), 1.0), + (datetime64( + 2015, 12, 3, 13, 20, 15 + ), numpy.timedelta64(1, 's'), 1.0), + (datetime64( + 2015, 12, 3, 13, 21, 15 + ), numpy.timedelta64(1, 's'), 11.0), + (datetime64( + 2015, 12, 3, 13, 22, 15 + ), numpy.timedelta64(1, 's'), 11.0), + (datetime64( + 2015, 12, 3, 13, 23, 15 + ), numpy.timedelta64(1, 's'), 10.0), + (datetime64( + 2015, 12, 3, 13, 24, 15 + ), numpy.timedelta64(1, 's'), 10.0), + ], list(output)) + + # By default we require 100% of point that overlap + # so that fail if from or to is set + self.assertRaises(cross_metric.UnAggregableTimeseries, + cross_metric.aggregated, + [tsc1['return'], tsc2['return']], + to_timestamp=dtto, aggregation='mean') + self.assertRaises(cross_metric.UnAggregableTimeseries, + cross_metric.aggregated, + [tsc1['return'], tsc2['return']], + from_timestamp=dtfrom, aggregation='mean') + # Retry with 50% and it works + output = cross_metric.aggregated( + [tsc1['return'], tsc2['return']], from_timestamp=dtfrom, + aggregation="sum", + needed_percent_of_overlap=50.0) + self.assertEqual([ + (datetime64( + 2015, 12, 3, 13, 19, 15 + ), numpy.timedelta64(1, 's'), 1.0), + (datetime64( + 2015, 12, 3, 13, 20, 15 + ), numpy.timedelta64(1, 's'), 1.0), + (datetime64( + 2015, 12, 3, 13, 21, 15 + ), numpy.timedelta64(1, 's'), 11.0), + (datetime64( + 2015, 12, 3, 13, 22, 15 + ), numpy.timedelta64(1, 's'), 11.0), + ], list(output)) + + output = cross_metric.aggregated( + [tsc1['return'], tsc2['return']], to_timestamp=dtto, + aggregation="sum", + needed_percent_of_overlap=50.0) + self.assertEqual([ + (datetime64( + 2015, 12, 3, 13, 21, 15 + ), numpy.timedelta64(1, 's'), 11.0), + (datetime64( + 2015, 12, 3, 13, 22, 15 + ), numpy.timedelta64(1, 's'), 11.0), + (datetime64( + 2015, 12, 3, 13, 23, 15 + ), numpy.timedelta64(1, 's'), 10.0), + (datetime64( + 2015, 12, 3, 13, 24, 15 + ), numpy.timedelta64(1, 's'), 10.0), + ], list(output)) + + +class CrossMetricAggregated(base.TestCase): + def setUp(self): + super(CrossMetricAggregated, self).setUp() + # A lot of tests wants a metric, create one + self.metric, __ = self._create_metric() + + def test_get_cross_metric_measures_unknown_metric(self): + self.assertEqual([], + cross_metric.get_cross_metric_measures( + self.storage, + [storage.Metric(uuid.uuid4(), + self.archive_policies['low']), + storage.Metric(uuid.uuid4(), + self.archive_policies['low'])])) + + def test_get_cross_metric_measures_unknown_aggregation(self): + metric2 = storage.Metric(uuid.uuid4(), + self.archive_policies['low']) + self.incoming.add_measures(self.metric, [ + storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), + ]) + self.incoming.add_measures(metric2, [ + storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), + ]) + self.assertRaises(storage.AggregationDoesNotExist, + cross_metric.get_cross_metric_measures, + self.storage, + [self.metric, metric2], + aggregation='last') + + def test_get_cross_metric_measures_unknown_granularity(self): + metric2 = storage.Metric(uuid.uuid4(), + self.archive_policies['low']) + self.incoming.add_measures(self.metric, [ + storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), + ]) + self.incoming.add_measures(metric2, [ + storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), + ]) + self.assertRaises(storage.GranularityDoesNotExist, + cross_metric.get_cross_metric_measures, + self.storage, + [self.metric, metric2], + granularity=numpy.timedelta64(12345456, 'ms')) + + def test_add_and_get_cross_metric_measures_different_archives(self): + metric2 = storage.Metric(uuid.uuid4(), + self.archive_policies['no_granularity_match']) + self.incoming.add_measures(self.metric, [ + storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), + ]) + self.incoming.add_measures(metric2, [ + storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), + ]) + + self.assertRaises(cross_metric.MetricUnaggregatable, + cross_metric.get_cross_metric_measures, + self.storage, + [self.metric, metric2]) + + def test_add_and_get_cross_metric_measures(self): + metric2, __ = self._create_metric() + self.incoming.add_measures(self.metric, [ + storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), + ]) + self.incoming.add_measures(metric2, [ + storage.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), + storage.Measure(datetime64(2014, 1, 1, 12, 7, 41), 2), + storage.Measure(datetime64(2014, 1, 1, 12, 10, 31), 4), + storage.Measure(datetime64(2014, 1, 1, 12, 13, 10), 4), + ]) + self.trigger_processing([str(self.metric.id), str(metric2.id)]) + + values = cross_metric.get_cross_metric_measures( + self.storage, [self.metric, metric2]) + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(5, 'm'), 39.0), + (utils.datetime_utc(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(5, 'm'), 12.5), + (utils.datetime_utc(2014, 1, 1, 12, 10, 0), + numpy.timedelta64(5, 'm'), 24.0) + ], values) + + values = cross_metric.get_cross_metric_measures( + self.storage, [self.metric, metric2], reaggregation='max') + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 39.75), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(5, 'm'), 69), + (utils.datetime_utc(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(5, 'm'), 23), + (utils.datetime_utc(2014, 1, 1, 12, 10, 0), + numpy.timedelta64(5, 'm'), 44) + ], values) + + values = cross_metric.get_cross_metric_measures( + self.storage, [self.metric, metric2], + from_timestamp=datetime64(2014, 1, 1, 12, 10, 0)) + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 10, 0), + numpy.timedelta64(5, 'm'), 24.0), + ], values) + + values = cross_metric.get_cross_metric_measures( + self.storage, [self.metric, metric2], + to_timestamp=datetime64(2014, 1, 1, 12, 5, 0)) + + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(5, 'm'), 39.0), + ], values) + + values = cross_metric.get_cross_metric_measures( + self.storage, [self.metric, metric2], + from_timestamp=datetime64(2014, 1, 1, 12, 10, 10), + to_timestamp=datetime64(2014, 1, 1, 12, 10, 10)) + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12), + numpy.timedelta64(1, 'h'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 10), + numpy.timedelta64(5, 'm'), 24.0), + ], values) + + values = cross_metric.get_cross_metric_measures( + self.storage, [self.metric, metric2], + from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), + to_timestamp=datetime64(2014, 1, 1, 12, 0, 1)) + + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1), + numpy.timedelta64(1, 'D'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 22.25), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(5, 'm'), 39.0), + ], values) + + values = cross_metric.get_cross_metric_measures( + self.storage, [self.metric, metric2], + from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), + to_timestamp=datetime64(2014, 1, 1, 12, 0, 1), + granularity=numpy.timedelta64(5, 'm')) + + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(5, 'm'), 39.0), + ], values) + + def test_add_and_get_cross_metric_measures_with_holes(self): + metric2, __ = self._create_metric() + self.incoming.add_measures(self.metric, [ + storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), + storage.Measure(datetime64(2014, 1, 1, 12, 5, 31), 8), + storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), + storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 42), + ]) + self.incoming.add_measures(metric2, [ + storage.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), + storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 2), + storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 6), + storage.Measure(datetime64(2014, 1, 1, 12, 13, 10), 2), + ]) + self.trigger_processing([str(self.metric.id), str(metric2.id)]) + + values = cross_metric.get_cross_metric_measures( + self.storage, [self.metric, metric2]) + self.assertEqual([ + (utils.datetime_utc(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 18.875), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 18.875), + (utils.datetime_utc(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(5, 'm'), 39.0), + (utils.datetime_utc(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(5, 'm'), 11.0), + (utils.datetime_utc(2014, 1, 1, 12, 10, 0), + numpy.timedelta64(5, 'm'), 22.0) + ], values) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 60bb1c81..5f206676 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -31,7 +31,6 @@ from gnocchi.storage import s3 from gnocchi.storage import swift from gnocchi.tests import base as tests_base from gnocchi.tests import utils as tests_utils -from gnocchi import utils def datetime64(*args): @@ -44,13 +43,6 @@ class TestStorageDriver(tests_base.TestCase): # A lot of tests wants a metric, create one self.metric, __ = self._create_metric() - def _create_metric(self, archive_policy_name="low"): - m = storage.Metric(uuid.uuid4(), - self.archive_policies[archive_policy_name]) - m_sql = self.index.create_metric(m.id, str(uuid.uuid4()), - archive_policy_name) - return m, m_sql - def test_driver_str(self): driver = storage.get_driver(self.conf) @@ -68,12 +60,6 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual(str(driver), "%s: %s" % ( driver.__class__.__name__, s)) - def trigger_processing(self, metrics=None): - if metrics is None: - metrics = [str(self.metric.id)] - self.storage.process_background_tasks( - self.index, self.incoming, metrics, sync=True) - def test_get_driver(self): driver = storage.get_driver(self.conf) self.assertIsInstance(driver, storage.StorageDriver) @@ -840,14 +826,6 @@ class TestStorageDriver(tests_base.TestCase): self.metric, granularity=numpy.timedelta64(42, 's')) - def test_get_cross_metric_measures_unknown_metric(self): - self.assertEqual([], - self.storage.get_cross_metric_measures( - [storage.Metric(uuid.uuid4(), - self.archive_policies['low']), - storage.Metric(uuid.uuid4(), - self.archive_policies['low'])])) - def test_get_measure_unknown_aggregation(self): self.incoming.add_measures(self.metric, [ storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), @@ -859,205 +837,6 @@ class TestStorageDriver(tests_base.TestCase): self.storage.get_measures, self.metric, aggregation='last') - def test_get_cross_metric_measures_unknown_aggregation(self): - metric2 = storage.Metric(uuid.uuid4(), - self.archive_policies['low']) - self.incoming.add_measures(self.metric, [ - storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), - ]) - self.incoming.add_measures(metric2, [ - storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), - ]) - self.assertRaises(storage.AggregationDoesNotExist, - self.storage.get_cross_metric_measures, - [self.metric, metric2], - aggregation='last') - - def test_get_cross_metric_measures_unknown_granularity(self): - metric2 = storage.Metric(uuid.uuid4(), - self.archive_policies['low']) - self.incoming.add_measures(self.metric, [ - storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), - ]) - self.incoming.add_measures(metric2, [ - storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), - ]) - self.assertRaises(storage.GranularityDoesNotExist, - self.storage.get_cross_metric_measures, - [self.metric, metric2], - granularity=numpy.timedelta64(12345456, 'ms')) - - def test_add_and_get_cross_metric_measures_different_archives(self): - metric2 = storage.Metric(uuid.uuid4(), - self.archive_policies['no_granularity_match']) - self.incoming.add_measures(self.metric, [ - storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), - ]) - self.incoming.add_measures(metric2, [ - storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), - ]) - - self.assertRaises(storage.MetricUnaggregatable, - self.storage.get_cross_metric_measures, - [self.metric, metric2]) - - def test_add_and_get_cross_metric_measures(self): - metric2, __ = self._create_metric() - self.incoming.add_measures(self.metric, [ - storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), - ]) - self.incoming.add_measures(metric2, [ - storage.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), - storage.Measure(datetime64(2014, 1, 1, 12, 7, 41), 2), - storage.Measure(datetime64(2014, 1, 1, 12, 10, 31), 4), - storage.Measure(datetime64(2014, 1, 1, 12, 13, 10), 4), - ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) - - values = self.storage.get_cross_metric_measures([self.metric, metric2]) - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 0, 0, 0), - numpy.timedelta64(1, 'D'), 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(1, 'h'), 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(5, 'm'), 39.0), - (utils.datetime_utc(2014, 1, 1, 12, 5, 0), - numpy.timedelta64(5, 'm'), 12.5), - (utils.datetime_utc(2014, 1, 1, 12, 10, 0), - numpy.timedelta64(5, 'm'), 24.0) - ], values) - - values = self.storage.get_cross_metric_measures([self.metric, metric2], - reaggregation='max') - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 0, 0, 0), - numpy.timedelta64(1, 'D'), 39.75), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(1, 'h'), 39.75), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(5, 'm'), 69), - (utils.datetime_utc(2014, 1, 1, 12, 5, 0), - numpy.timedelta64(5, 'm'), 23), - (utils.datetime_utc(2014, 1, 1, 12, 10, 0), - numpy.timedelta64(5, 'm'), 44) - ], values) - - values = self.storage.get_cross_metric_measures( - [self.metric, metric2], - from_timestamp=datetime64(2014, 1, 1, 12, 10, 0)) - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), - numpy.timedelta64(1, 'D'), 22.25), - (utils.datetime_utc(2014, 1, 1, 12), - numpy.timedelta64(1, 'h'), 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 10, 0), - numpy.timedelta64(5, 'm'), 24.0), - ], values) - - values = self.storage.get_cross_metric_measures( - [self.metric, metric2], - to_timestamp=datetime64(2014, 1, 1, 12, 5, 0)) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 0, 0, 0), - numpy.timedelta64(1, 'D'), 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(1, 'h'), 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(5, 'm'), 39.0), - ], values) - - values = self.storage.get_cross_metric_measures( - [self.metric, metric2], - from_timestamp=datetime64(2014, 1, 1, 12, 10, 10), - to_timestamp=datetime64(2014, 1, 1, 12, 10, 10)) - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), - numpy.timedelta64(1, 'D'), 22.25), - (utils.datetime_utc(2014, 1, 1, 12), - numpy.timedelta64(1, 'h'), 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 10), - numpy.timedelta64(5, 'm'), 24.0), - ], values) - - values = self.storage.get_cross_metric_measures( - [self.metric, metric2], - from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime64(2014, 1, 1, 12, 0, 1)) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), - numpy.timedelta64(1, 'D'), 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(1, 'h'), 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(5, 'm'), 39.0), - ], values) - - values = self.storage.get_cross_metric_measures( - [self.metric, metric2], - from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime64(2014, 1, 1, 12, 0, 1), - granularity=numpy.timedelta64(5, 'm')) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(5, 'm'), 39.0), - ], values) - - def test_add_and_get_cross_metric_measures_with_holes(self): - metric2, __ = self._create_metric() - self.incoming.add_measures(self.metric, [ - storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(datetime64(2014, 1, 1, 12, 5, 31), 8), - storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 42), - ]) - self.incoming.add_measures(metric2, [ - storage.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), - storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 2), - storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 6), - storage.Measure(datetime64(2014, 1, 1, 12, 13, 10), 2), - ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) - - values = self.storage.get_cross_metric_measures([self.metric, metric2]) - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 0, 0, 0), - numpy.timedelta64(1, 'D'), 18.875), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(1, 'h'), 18.875), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(5, 'm'), 39.0), - (utils.datetime_utc(2014, 1, 1, 12, 5, 0), - numpy.timedelta64(5, 'm'), 11.0), - (utils.datetime_utc(2014, 1, 1, 12, 10, 0), - numpy.timedelta64(5, 'm'), 22.0) - ], values) - def test_search_value(self): metric2, __ = self._create_metric() self.incoming.add_measures(self.metric, [ -- GitLab From 450ff79729923f25e0b46e26709d71a640edb7de Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 7 Sep 2017 14:17:45 +0200 Subject: [PATCH 0954/1483] rest: filter query string validation Invalid query string returns a 500 instead of a 400. This change fixes that. --- gnocchi/rest/__init__.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index ee91d455..1cccb439 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -124,15 +124,20 @@ def deserialize(expected_content_types=None): return params -def deserialize_and_validate(schema, required=True, - expected_content_types=None): +def validate(schema, data, required=True): try: - return voluptuous.Schema(schema, required=required)( - deserialize(expected_content_types=expected_content_types)) + return voluptuous.Schema(schema, required=required)(data) except voluptuous.Error as e: abort(400, "Invalid input: %s" % e) +def deserialize_and_validate(schema, required=True, + expected_content_types=None): + return validate(schema, + deserialize(expected_content_types=expected_content_types), + required) + + def PositiveOrNullInt(value): value = int(value) if value < 0: @@ -1196,8 +1201,7 @@ class QueryStringSearchAttrFilter(object): @classmethod def parse(cls, query): attr_filter = cls._parse(query) - return voluptuous.Schema(ResourceSearchSchema, - required=True)(attr_filter) + return validate(ResourceSearchSchema, attr_filter, required=True) def ResourceSearchSchema(v): -- GitLab From 933a47e3de4780eddcebea4d7f0aa879ea09dec4 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 7 Sep 2017 13:54:54 +0200 Subject: [PATCH 0955/1483] rest: don't allow empty in Currently we can write {"id": []}. This query doesn't make sense it will always return an empty list and it's obviously a input error. This change returns 400 is this case. --- gnocchi/indexer/sqlalchemy.py | 1 + gnocchi/rest/__init__.py | 8 ++++++-- gnocchi/tests/functional/gabbits/resource.yaml | 6 +++--- gnocchi/tests/functional/gabbits/search.yaml | 15 +++++++++++++++ 4 files changed, 25 insertions(+), 5 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index e812c3fd..21a1136e 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1104,6 +1104,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): class QueryTransformer(object): + unary_operators = { u"not": sqlalchemy.not_, } diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 1cccb439..524ea503 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1240,8 +1240,12 @@ def _ResourceSearchSchema(): u"in", ): voluptuous.All( voluptuous.Length(min=1, max=1), - {"id": [_ResourceUUID], - six.text_type: [ResourceSearchSchemaAttributeValue]} + {"id": voluptuous.All( + [_ResourceUUID], + voluptuous.Length(min=1)), + six.text_type: voluptuous.All( + [ResourceSearchSchemaAttributeValue], + voluptuous.Length(min=1))} ), voluptuous.Any( u"and", u"∨", diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index c4d3bf11..0fda4398 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -724,9 +724,9 @@ tests: data: in: id: [] - status: 200 - response_json_paths: - $.deleted: 0 + status: 400 + response_strings: + - length of value must be at least 1 - name: delete something empty b desc: use empty filter for delete diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml index 61853a04..59e9f963 100644 --- a/gnocchi/tests/functional/gabbits/search.yaml +++ b/gnocchi/tests/functional/gabbits/search.yaml @@ -121,6 +121,21 @@ tests: response_json_paths: $.`len`: 2 + - name: search empty in_ + POST: /v1/search/resource/generic + data: + in: + id: [] + status: 400 + response_strings: + - length of value must be at least 1 + + - name: search empty in_ query string + POST: /v1/search/resource/generic?filter=id%20in%20%5B%5D + status: 400 + response_strings: + - length of value must be at least 1 + - name: search empty query POST: /v1/search/resource/generic data: {} -- GitLab From 07f770aeaed3192b446e3e0aa5a2e66a8fe6931b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 8 Sep 2017 11:09:46 +0200 Subject: [PATCH 0956/1483] Simplify dependencies by requiring Carbonara Since all drivers are based on Carbonara and there's no (more) plan to not do that, this patch simplifies dependency by requiring tooz and lz4 as base requirements. --- requirements.txt | 2 ++ setup.cfg | 13 ------------- 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/requirements.txt b/requirements.txt index bb6261eb..d944b4e0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -24,3 +24,5 @@ PasteDeploy monotonic daiquiri pyparsing>=2.2.0 +lz4>=0.9.0 +tooz>=1.38 diff --git a/setup.cfg b/setup.cfg index c1794dfa..ad74825f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -36,26 +36,14 @@ postgresql = s3 = boto3 botocore>=1.5 - lz4>=0.9.0 - tooz>=1.38 redis = redis>=2.10.0 # MIT - lz4>=0.9.0 - tooz>=1.38 swift = python-swiftclient>=3.1.0 - lz4>=0.9.0 - tooz>=1.38 -ceph = - lz4>=0.9.0 - tooz>=1.38 ceph_recommended_lib = cradox>=1.2.0 ceph_alternative_lib = python-rados>=10.1.0 # not available on pypi -file = - lz4>=0.9.0 - tooz>=1.38 doc = sphinx<1.6.0 sphinx_rtd_theme @@ -77,7 +65,6 @@ test = testtools>=0.9.38 WebTest>=2.0.16 doc8 - tooz>=1.38 keystonemiddleware>=4.0.0 wsgi_intercept>=1.4.1 test-swift = -- GitLab From 47013d28c3262d6bdfaa58387ea72020fc017be6 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 6 Sep 2017 17:09:22 +0200 Subject: [PATCH 0957/1483] Move REST API code in its own submodule The whole code is in gnocchi.rest, making it imported as soon as anything in gnocchi.rest is imported. This might be needed when just importing e.g. gnocchi.rest.app to get access to the options. --- gnocchi/rest/__init__.py | 1908 ----------------------------------- gnocchi/rest/api-paste.ini | 4 +- gnocchi/rest/api.py | 1908 +++++++++++++++++++++++++++++++++++ gnocchi/rest/auth_helper.py | 14 +- gnocchi/tests/test_rest.py | 4 +- 5 files changed, 1919 insertions(+), 1919 deletions(-) create mode 100644 gnocchi/rest/api.py diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 524ea503..e69de29b 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -1,1908 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016-2017 Red Hat, Inc. -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import functools -import itertools -import uuid -import warnings - -import jsonpatch -import pecan -from pecan import rest -import pyparsing -import six -from six.moves.urllib import parse as urllib_parse -from stevedore import extension -import voluptuous -import webob.exc -import werkzeug.http - -from gnocchi import aggregates -from gnocchi import archive_policy -from gnocchi import carbonara -from gnocchi import incoming -from gnocchi import indexer -from gnocchi import json -from gnocchi import resource_type -from gnocchi.rest import cross_metric -from gnocchi.rest import transformation -from gnocchi import storage -from gnocchi import utils - - -def arg_to_list(value): - if isinstance(value, list): - return value - elif value: - return [value] - return [] - - -def abort(status_code, detail='', headers=None, comment=None, **kw): - """Like pecan.abort, but make sure detail is a string.""" - if status_code == 404 and not detail: - raise RuntimeError("http code 404 must have 'detail' set") - if isinstance(detail, Exception): - detail = six.text_type(detail) - return pecan.abort(status_code, detail, headers, comment, **kw) - - -def flatten_dict_to_keypairs(d, separator=':'): - """Generator that produces sequence of keypairs for nested dictionaries. - - :param d: dictionaries which may be nested - :param separator: symbol between names - """ - for name, value in sorted(six.iteritems(d)): - if isinstance(value, dict): - for subname, subvalue in flatten_dict_to_keypairs(value, - separator): - yield ('%s%s%s' % (name, separator, subname), subvalue) - else: - yield name, value - - -def enforce(rule, target): - """Return the user and project the request should be limited to. - - :param rule: The rule name - :param target: The target to enforce on. - - """ - creds = pecan.request.auth_helper.get_auth_info(pecan.request) - - if not isinstance(target, dict): - if hasattr(target, "jsonify"): - target = target.jsonify() - else: - target = target.__dict__ - - # Flatten dict - target = dict(flatten_dict_to_keypairs(d=target, separator='.')) - - if not pecan.request.policy_enforcer.enforce(rule, target, creds): - abort(403) - - -def set_resp_location_hdr(location): - location = '%s%s' % (pecan.request.script_name, location) - # NOTE(sileht): according the pep-3333 the headers must be - # str in py2 and py3 even this is not the same thing in both - # version - # see: http://legacy.python.org/dev/peps/pep-3333/#unicode-issues - if six.PY2 and isinstance(location, six.text_type): - location = location.encode('utf-8') - location = urllib_parse.quote(location) - pecan.response.headers['Location'] = location - - -def deserialize(expected_content_types=None): - if expected_content_types is None: - expected_content_types = ("application/json", ) - - mime_type, options = werkzeug.http.parse_options_header( - pecan.request.headers.get('Content-Type')) - if mime_type not in expected_content_types: - abort(415) - try: - params = json.load(pecan.request.body_file) - except Exception as e: - abort(400, "Unable to decode body: " + six.text_type(e)) - return params - - -def validate(schema, data, required=True): - try: - return voluptuous.Schema(schema, required=required)(data) - except voluptuous.Error as e: - abort(400, "Invalid input: %s" % e) - - -def deserialize_and_validate(schema, required=True, - expected_content_types=None): - return validate(schema, - deserialize(expected_content_types=expected_content_types), - required) - - -def PositiveOrNullInt(value): - value = int(value) - if value < 0: - raise ValueError("Value must be positive") - return value - - -def PositiveNotNullInt(value): - value = int(value) - if value <= 0: - raise ValueError("Value must be positive and not null") - return value - - -def Timespan(value): - try: - return utils.to_timespan(value) - except ValueError as e: - raise voluptuous.Invalid(e) - - -def get_header_option(name, params): - type, options = werkzeug.http.parse_options_header( - pecan.request.headers.get('Accept')) - return strtobool('Accept header' if name in options else name, - options.get(name, params.pop(name, 'false'))) - - -def get_history(params): - return get_header_option('history', params) - - -def get_details(params): - return get_header_option('details', params) - - -def strtobool(varname, v): - """Convert a string to a boolean.""" - try: - return utils.strtobool(v) - except ValueError as e: - abort(400, "Unable to parse `%s': %s" % (varname, six.text_type(e))) - - -RESOURCE_DEFAULT_PAGINATION = ['revision_start:asc', - 'started_at:asc'] - -METRIC_DEFAULT_PAGINATION = ['id:asc'] - - -def get_pagination_options(params, default): - max_limit = pecan.request.conf.api.max_limit - limit = params.pop('limit', max_limit) - marker = params.pop('marker', None) - sorts = params.pop('sort', default) - if not isinstance(sorts, list): - sorts = [sorts] - - try: - limit = PositiveNotNullInt(limit) - except ValueError: - abort(400, "Invalid 'limit' value: %s" % params.get('limit')) - - limit = min(limit, max_limit) - - return {'limit': limit, - 'marker': marker, - 'sorts': sorts} - - -def ValidAggMethod(value): - value = six.text_type(value) - if value in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS_VALUES: - return value - raise ValueError("Invalid aggregation method") - - -class ArchivePolicyController(rest.RestController): - def __init__(self, archive_policy): - self.archive_policy = archive_policy - - @pecan.expose('json') - def get(self): - ap = pecan.request.indexer.get_archive_policy(self.archive_policy) - if ap: - enforce("get archive policy", ap) - return ap - abort(404, indexer.NoSuchArchivePolicy(self.archive_policy)) - - @pecan.expose('json') - def patch(self): - ap = pecan.request.indexer.get_archive_policy(self.archive_policy) - if not ap: - abort(404, indexer.NoSuchArchivePolicy(self.archive_policy)) - enforce("update archive policy", ap) - - body = deserialize_and_validate(voluptuous.Schema({ - voluptuous.Required("definition"): - voluptuous.All([{ - "granularity": Timespan, - "points": PositiveNotNullInt, - "timespan": Timespan}], voluptuous.Length(min=1)), - })) - # Validate the data - try: - ap_items = [archive_policy.ArchivePolicyItem(**item) for item in - body['definition']] - except ValueError as e: - abort(400, e) - - try: - return pecan.request.indexer.update_archive_policy( - self.archive_policy, ap_items) - except indexer.UnsupportedArchivePolicyChange as e: - abort(400, e) - - @pecan.expose() - def delete(self): - # NOTE(jd) I don't think there's any point in fetching and passing the - # archive policy here, as the rule is probably checking the actual role - # of the user, not the content of the AP. - enforce("delete archive policy", {}) - try: - pecan.request.indexer.delete_archive_policy(self.archive_policy) - except indexer.NoSuchArchivePolicy as e: - abort(404, e) - except indexer.ArchivePolicyInUse as e: - abort(400, e) - - -class ArchivePoliciesController(rest.RestController): - @pecan.expose() - def _lookup(self, archive_policy, *remainder): - return ArchivePolicyController(archive_policy), remainder - - @pecan.expose('json') - def post(self): - # NOTE(jd): Initialize this one at run-time because we rely on conf - conf = pecan.request.conf - enforce("create archive policy", {}) - ArchivePolicySchema = voluptuous.Schema({ - voluptuous.Required("name"): six.text_type, - voluptuous.Required("back_window", default=0): PositiveOrNullInt, - voluptuous.Required( - "aggregation_methods", - default=set(conf.archive_policy.default_aggregation_methods)): - [ValidAggMethod], - voluptuous.Required("definition"): - voluptuous.All([{ - "granularity": Timespan, - "points": PositiveNotNullInt, - "timespan": Timespan, - }], voluptuous.Length(min=1)), - }) - - body = deserialize_and_validate(ArchivePolicySchema) - # Validate the data - try: - ap = archive_policy.ArchivePolicy.from_dict(body) - except ValueError as e: - abort(400, e) - enforce("create archive policy", ap) - try: - ap = pecan.request.indexer.create_archive_policy(ap) - except indexer.ArchivePolicyAlreadyExists as e: - abort(409, e) - - location = "/archive_policy/" + ap.name - set_resp_location_hdr(location) - pecan.response.status = 201 - return ap - - @pecan.expose('json') - def get_all(self): - enforce("list archive policy", {}) - return pecan.request.indexer.list_archive_policies() - - -class ArchivePolicyRulesController(rest.RestController): - @pecan.expose('json') - def post(self): - enforce("create archive policy rule", {}) - ArchivePolicyRuleSchema = voluptuous.Schema({ - voluptuous.Required("name"): six.text_type, - voluptuous.Required("metric_pattern"): six.text_type, - voluptuous.Required("archive_policy_name"): six.text_type, - }) - - body = deserialize_and_validate(ArchivePolicyRuleSchema) - enforce("create archive policy rule", body) - try: - ap = pecan.request.indexer.create_archive_policy_rule( - body['name'], body['metric_pattern'], - body['archive_policy_name'] - ) - except indexer.ArchivePolicyRuleAlreadyExists as e: - abort(409, e) - - location = "/archive_policy_rule/" + ap.name - set_resp_location_hdr(location) - pecan.response.status = 201 - return ap - - @pecan.expose('json') - def get_one(self, name): - ap = pecan.request.indexer.get_archive_policy_rule(name) - if ap: - enforce("get archive policy rule", ap) - return ap - abort(404, indexer.NoSuchArchivePolicyRule(name)) - - @pecan.expose('json') - def get_all(self): - enforce("list archive policy rule", {}) - return pecan.request.indexer.list_archive_policy_rules() - - @pecan.expose() - def delete(self, name): - # NOTE(jd) I don't think there's any point in fetching and passing the - # archive policy rule here, as the rule is probably checking the actual - # role of the user, not the content of the AP rule. - enforce("delete archive policy rule", {}) - try: - pecan.request.indexer.delete_archive_policy_rule(name) - except indexer.NoSuchArchivePolicyRule as e: - abort(404, e) - - -def MeasuresListSchema(measures): - try: - times = utils.to_timestamps((m['timestamp'] for m in measures)) - except TypeError: - abort(400, "Invalid format for measures") - except ValueError as e: - abort(400, "Invalid input for timestamp: %s" % e) - - try: - values = [float(i['value']) for i in measures] - except Exception: - abort(400, "Invalid input for a value") - - return (storage.Measure(t, v) for t, v in six.moves.zip( - times.tolist(), values)) - - -def TransformSchema(transform): - try: - return transformation.parse(transform) - except transformation.TransformationParserError as e: - abort(400, str(e)) - - -class MetricController(rest.RestController): - _custom_actions = { - 'measures': ['POST', 'GET'] - } - - def __init__(self, metric): - self.metric = metric - mgr = extension.ExtensionManager(namespace='gnocchi.aggregates', - invoke_on_load=True) - self.custom_agg = dict((x.name, x.obj) for x in mgr) - - def enforce_metric(self, rule): - enforce(rule, json.to_primitive(self.metric)) - - @pecan.expose('json') - def get_all(self): - self.enforce_metric("get metric") - return self.metric - - @pecan.expose() - def post_measures(self): - self.enforce_metric("post measures") - params = deserialize() - if not isinstance(params, list): - abort(400, "Invalid input for measures") - if params: - pecan.request.incoming.add_measures( - self.metric, MeasuresListSchema(params)) - pecan.response.status = 202 - - @pecan.expose('json') - def get_measures(self, start=None, stop=None, aggregation='mean', - granularity=None, resample=None, refresh=False, - transform=None, - **param): - self.enforce_metric("get measures") - if not (aggregation - in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS - or aggregation in self.custom_agg): - msg = '''Invalid aggregation value %(agg)s, must be one of %(std)s - or %(custom)s''' - abort(400, msg % dict( - agg=aggregation, - std=archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS, - custom=str(self.custom_agg.keys()))) - - if start is not None: - try: - start = utils.to_timestamp(start) - except Exception: - abort(400, "Invalid value for start") - - if stop is not None: - try: - stop = utils.to_timestamp(stop) - except Exception: - abort(400, "Invalid value for stop") - - if transform is not None: - transform = TransformSchema(transform) - - if resample: - # TODO(sileht): This have to be deprecated at some point - if transform: - abort(400, 'transform and resample are exclusive') - - if not granularity: - abort(400, 'A granularity must be specified to resample') - try: - resample = utils.to_timespan(resample) - except ValueError as e: - abort(400, e) - transform = [carbonara.Transformation("resample", (resample,))] - - if (strtobool("refresh", refresh) and - pecan.request.incoming.has_unprocessed(self.metric)): - try: - pecan.request.storage.refresh_metric( - pecan.request.indexer, pecan.request.incoming, self.metric, - pecan.request.conf.api.refresh_timeout) - except storage.SackLockTimeoutError as e: - abort(503, e) - try: - if aggregation in self.custom_agg: - warnings.warn("moving_average aggregation is deprecated.", - category=DeprecationWarning) - return self.custom_agg[aggregation].compute( - pecan.request.storage, self.metric, - start, stop, **param) - return pecan.request.storage.get_measures( - self.metric, start, stop, aggregation, - utils.to_timespan(granularity) - if granularity is not None else None, - transform) - except (storage.MetricDoesNotExist, - storage.GranularityDoesNotExist, - storage.AggregationDoesNotExist) as e: - abort(404, e) - except aggregates.CustomAggFailure as e: - abort(400, e) - - @pecan.expose() - def delete(self): - self.enforce_metric("delete metric") - try: - pecan.request.indexer.delete_metric(self.metric.id) - except indexer.NoSuchMetric as e: - abort(404, e) - - -class MetricsController(rest.RestController): - - @pecan.expose() - def _lookup(self, id, *remainder): - try: - metric_id = uuid.UUID(id) - except ValueError: - abort(404, indexer.NoSuchMetric(id)) - metrics = pecan.request.indexer.list_metrics( - id=metric_id, details=True) - if not metrics: - abort(404, indexer.NoSuchMetric(id)) - return MetricController(metrics[0]), remainder - - _MetricSchema = voluptuous.Schema({ - "archive_policy_name": six.text_type, - "name": six.text_type, - voluptuous.Optional("unit"): - voluptuous.All(six.text_type, voluptuous.Length(max=31)), - }) - - # NOTE(jd) Define this method as it was a voluptuous schema – it's just a - # smarter version of a voluptuous schema, no? - @classmethod - def MetricSchema(cls, definition): - # First basic validation - definition = cls._MetricSchema(definition) - archive_policy_name = definition.get('archive_policy_name') - - name = definition.get('name') - if name and '/' in name: - abort(400, "'/' is not supported in metric name") - if archive_policy_name is None: - try: - ap = pecan.request.indexer.get_archive_policy_for_metric(name) - except indexer.NoArchivePolicyRuleMatch: - # NOTE(jd) Since this is a schema-like function, we - # should/could raise ValueError, but if we do so, voluptuous - # just returns a "invalid value" with no useful message – so we - # prefer to use abort() to make sure the user has the right - # error message - abort(400, "No archive policy name specified " - "and no archive policy rule found matching " - "the metric name %s" % name) - else: - definition['archive_policy_name'] = ap.name - - creator = pecan.request.auth_helper.get_current_user( - pecan.request) - - enforce("create metric", { - "creator": creator, - "archive_policy_name": archive_policy_name, - "name": name, - "unit": definition.get('unit'), - }) - - return definition - - @pecan.expose('json') - def post(self): - creator = pecan.request.auth_helper.get_current_user( - pecan.request) - body = deserialize_and_validate(self.MetricSchema) - try: - m = pecan.request.indexer.create_metric( - uuid.uuid4(), - creator, - name=body.get('name'), - unit=body.get('unit'), - archive_policy_name=body['archive_policy_name']) - except indexer.NoSuchArchivePolicy as e: - abort(400, e) - set_resp_location_hdr("/metric/" + str(m.id)) - pecan.response.status = 201 - return m - - MetricListSchema = voluptuous.Schema({ - "user_id": six.text_type, - "project_id": six.text_type, - "creator": six.text_type, - "limit": six.text_type, - "name": six.text_type, - "id": six.text_type, - "unit": six.text_type, - "archive_policy_name": six.text_type, - "status": voluptuous.Any("active", "delete"), - "sort": voluptuous.Any([six.text_type], six.text_type), - "marker": six.text_type, - }) - - @classmethod - @pecan.expose('json') - def get_all(cls, **kwargs): - kwargs = cls.MetricListSchema(kwargs) - - # Compat with old user/project API - provided_user_id = kwargs.pop('user_id', None) - provided_project_id = kwargs.pop('project_id', None) - if provided_user_id is None and provided_project_id is None: - provided_creator = kwargs.pop('creator', None) - else: - provided_creator = ( - (provided_user_id or "") - + ":" - + (provided_project_id or "") - ) - try: - enforce("list all metric", {}) - except webob.exc.HTTPForbidden: - enforce("list metric", {}) - creator = pecan.request.auth_helper.get_current_user( - pecan.request) - if provided_creator and creator != provided_creator: - abort(403, "Insufficient privileges to filter by user/project") - attr_filter = {} - if provided_creator is not None: - attr_filter['creator'] = provided_creator - attr_filter.update(get_pagination_options( - kwargs, METRIC_DEFAULT_PAGINATION)) - attr_filter.update(kwargs) - try: - return pecan.request.indexer.list_metrics(**attr_filter) - except indexer.IndexerException as e: - abort(400, e) - - -_MetricsSchema = voluptuous.Schema({ - six.text_type: voluptuous.Any(utils.UUID, - MetricsController.MetricSchema), -}) - - -def MetricsSchema(data): - # NOTE(jd) Before doing any kind of validation, copy the metric name - # into the metric definition. This is required so we have the name - # available when doing the metric validation with its own MetricSchema, - # and so we can do things such as applying archive policy rules. - if isinstance(data, dict): - for metric_name, metric_def in six.iteritems(data): - if isinstance(metric_def, dict): - metric_def['name'] = metric_name - return _MetricsSchema(data) - - -class NamedMetricController(rest.RestController): - def __init__(self, resource_id, resource_type): - self.resource_id = resource_id - self.resource_type = resource_type - - @pecan.expose() - def _lookup(self, name, *remainder): - details = True if pecan.request.method == 'GET' else False - m = pecan.request.indexer.list_metrics(details=details, - name=name, - resource_id=self.resource_id) - if m: - return MetricController(m[0]), remainder - - resource = pecan.request.indexer.get_resource(self.resource_type, - self.resource_id) - if resource: - abort(404, indexer.NoSuchMetric(name)) - else: - abort(404, indexer.NoSuchResource(self.resource_id)) - - @pecan.expose('json') - def post(self): - resource = pecan.request.indexer.get_resource( - self.resource_type, self.resource_id) - if not resource: - abort(404, indexer.NoSuchResource(self.resource_id)) - enforce("update resource", resource) - metrics = deserialize_and_validate(MetricsSchema) - try: - r = pecan.request.indexer.update_resource( - self.resource_type, - self.resource_id, - metrics=metrics, - append_metrics=True, - create_revision=False) - except (indexer.NoSuchMetric, - indexer.NoSuchArchivePolicy, - ValueError) as e: - abort(400, e) - except indexer.NamedMetricAlreadyExists as e: - abort(409, e) - except indexer.NoSuchResource as e: - abort(404, e) - - return r.metrics - - @pecan.expose('json') - def get_all(self): - resource = pecan.request.indexer.get_resource( - self.resource_type, self.resource_id) - if not resource: - abort(404, indexer.NoSuchResource(self.resource_id)) - enforce("get resource", resource) - return pecan.request.indexer.list_metrics(resource_id=self.resource_id) - - -class ResourceHistoryController(rest.RestController): - def __init__(self, resource_id, resource_type): - self.resource_id = resource_id - self.resource_type = resource_type - - @pecan.expose('json') - def get(self, **kwargs): - details = get_details(kwargs) - pagination_opts = get_pagination_options( - kwargs, RESOURCE_DEFAULT_PAGINATION) - - resource = pecan.request.indexer.get_resource( - self.resource_type, self.resource_id) - if not resource: - abort(404, indexer.NoSuchResource(self.resource_id)) - - enforce("get resource", resource) - - try: - # FIXME(sileht): next API version should returns - # {'resources': [...], 'links': [ ... pagination rel ...]} - return pecan.request.indexer.list_resources( - self.resource_type, - attribute_filter={"=": {"id": self.resource_id}}, - details=details, - history=True, - **pagination_opts - ) - except indexer.IndexerException as e: - abort(400, e) - - -def etag_precondition_check(obj): - etag, lastmodified = obj.etag, obj.lastmodified - # NOTE(sileht): Checks and order come from rfc7232 - # in webob, the '*' and the absent of the header is handled by - # if_match.__contains__() and if_none_match.__contains__() - # and are identique... - if etag not in pecan.request.if_match: - abort(412) - elif (not pecan.request.environ.get("HTTP_IF_MATCH") - and pecan.request.if_unmodified_since - and pecan.request.if_unmodified_since < lastmodified): - abort(412) - - if etag in pecan.request.if_none_match: - if pecan.request.method in ['GET', 'HEAD']: - abort(304) - else: - abort(412) - elif (not pecan.request.environ.get("HTTP_IF_NONE_MATCH") - and pecan.request.if_modified_since - and (pecan.request.if_modified_since >= - lastmodified) - and pecan.request.method in ['GET', 'HEAD']): - abort(304) - - -def etag_set_headers(obj): - pecan.response.etag = obj.etag - pecan.response.last_modified = obj.lastmodified - - -def AttributesPath(value): - if value.startswith("/attributes"): - return value - raise ValueError("Only attributes can be modified") - - -ResourceTypeJsonPatchSchema = voluptuous.Schema([{ - "op": voluptuous.Any("add", "remove"), - "path": AttributesPath, - voluptuous.Optional("value"): dict, -}]) - - -class ResourceTypeController(rest.RestController): - def __init__(self, name): - self._name = name - - @pecan.expose('json') - def get(self): - try: - rt = pecan.request.indexer.get_resource_type(self._name) - except indexer.NoSuchResourceType as e: - abort(404, e) - enforce("get resource type", rt) - return rt - - @pecan.expose('json') - def patch(self): - # NOTE(sileht): should we check for "application/json-patch+json" - # Content-Type ? - - try: - rt = pecan.request.indexer.get_resource_type(self._name) - except indexer.NoSuchResourceType as e: - abort(404, e) - enforce("update resource type", rt) - - # Ensure this is a valid jsonpatch dict - patch = deserialize_and_validate( - ResourceTypeJsonPatchSchema, - expected_content_types=["application/json-patch+json"]) - - # Add new attributes to the resource type - rt_json_current = rt.jsonify() - try: - rt_json_next = jsonpatch.apply_patch(rt_json_current, patch) - except jsonpatch.JsonPatchException as e: - abort(400, e) - del rt_json_next['state'] - - # Validate that the whole new resource_type is valid - schema = pecan.request.indexer.get_resource_type_schema() - try: - rt_json_next = voluptuous.Schema(schema.for_update, required=True)( - rt_json_next) - except voluptuous.Error as e: - abort(400, "Invalid input: %s" % e) - - # Get only newly formatted and deleted attributes - add_attrs = {k: v for k, v in rt_json_next["attributes"].items() - if k not in rt_json_current["attributes"]} - del_attrs = [k for k in rt_json_current["attributes"] - if k not in rt_json_next["attributes"]] - - if not add_attrs and not del_attrs: - # NOTE(sileht): just returns the resource, the asked changes - # just do nothing - return rt - - try: - add_attrs = schema.attributes_from_dict(add_attrs) - except resource_type.InvalidResourceAttribute as e: - abort(400, "Invalid input: %s" % e) - - try: - return pecan.request.indexer.update_resource_type( - self._name, add_attributes=add_attrs, - del_attributes=del_attrs) - except indexer.NoSuchResourceType as e: - abort(400, e) - - @pecan.expose() - def delete(self): - try: - pecan.request.indexer.get_resource_type(self._name) - except indexer.NoSuchResourceType as e: - abort(404, e) - enforce("delete resource type", resource_type) - try: - pecan.request.indexer.delete_resource_type(self._name) - except (indexer.NoSuchResourceType, - indexer.ResourceTypeInUse) as e: - abort(400, e) - - -class ResourceTypesController(rest.RestController): - - @pecan.expose() - def _lookup(self, name, *remainder): - return ResourceTypeController(name), remainder - - @pecan.expose('json') - def post(self): - schema = pecan.request.indexer.get_resource_type_schema() - body = deserialize_and_validate(schema) - body["state"] = "creating" - - try: - rt = schema.resource_type_from_dict(**body) - except resource_type.InvalidResourceAttribute as e: - abort(400, "Invalid input: %s" % e) - - enforce("create resource type", body) - try: - rt = pecan.request.indexer.create_resource_type(rt) - except indexer.ResourceTypeAlreadyExists as e: - abort(409, e) - set_resp_location_hdr("/resource_type/" + rt.name) - pecan.response.status = 201 - return rt - - @pecan.expose('json') - def get_all(self, **kwargs): - enforce("list resource type", {}) - try: - return pecan.request.indexer.list_resource_types() - except indexer.IndexerException as e: - abort(400, e) - - -def ResourceSchema(schema): - base_schema = { - voluptuous.Optional('started_at'): utils.to_datetime, - voluptuous.Optional('ended_at'): utils.to_datetime, - voluptuous.Optional('user_id'): voluptuous.Any(None, six.text_type), - voluptuous.Optional('project_id'): voluptuous.Any(None, six.text_type), - voluptuous.Optional('metrics'): MetricsSchema, - } - base_schema.update(schema) - return base_schema - - -class ResourceController(rest.RestController): - - def __init__(self, resource_type, id): - self._resource_type = resource_type - creator = pecan.request.auth_helper.get_current_user( - pecan.request) - try: - self.id = utils.ResourceUUID(id, creator) - except ValueError: - abort(404, indexer.NoSuchResource(id)) - self.metric = NamedMetricController(str(self.id), self._resource_type) - self.history = ResourceHistoryController(str(self.id), - self._resource_type) - - @pecan.expose('json') - def get(self): - resource = pecan.request.indexer.get_resource( - self._resource_type, self.id, with_metrics=True) - if resource: - enforce("get resource", resource) - etag_precondition_check(resource) - etag_set_headers(resource) - return resource - abort(404, indexer.NoSuchResource(self.id)) - - @pecan.expose('json') - def patch(self): - resource = pecan.request.indexer.get_resource( - self._resource_type, self.id, with_metrics=True) - if not resource: - abort(404, indexer.NoSuchResource(self.id)) - enforce("update resource", resource) - etag_precondition_check(resource) - - body = deserialize_and_validate( - schema_for(self._resource_type), - required=False) - - if len(body) == 0: - etag_set_headers(resource) - return resource - - for k, v in six.iteritems(body): - if k != 'metrics' and getattr(resource, k) != v: - create_revision = True - break - else: - if 'metrics' not in body: - # No need to go further, we assume the db resource - # doesn't change between the get and update - return resource - create_revision = False - - try: - resource = pecan.request.indexer.update_resource( - self._resource_type, - self.id, - create_revision=create_revision, - **body) - except (indexer.NoSuchMetric, - indexer.NoSuchArchivePolicy, - ValueError) as e: - abort(400, e) - except indexer.NoSuchResource as e: - abort(404, e) - etag_set_headers(resource) - return resource - - @pecan.expose() - def delete(self): - resource = pecan.request.indexer.get_resource( - self._resource_type, self.id) - if not resource: - abort(404, indexer.NoSuchResource(self.id)) - enforce("delete resource", resource) - etag_precondition_check(resource) - try: - pecan.request.indexer.delete_resource(self.id) - except indexer.NoSuchResource as e: - abort(404, e) - - -def schema_for(resource_type): - resource_type = pecan.request.indexer.get_resource_type(resource_type) - return ResourceSchema(resource_type.schema) - - -def ResourceUUID(value, creator): - try: - return utils.ResourceUUID(value, creator) - except ValueError as e: - raise voluptuous.Invalid(e) - - -def ResourceID(value, creator): - return (six.text_type(value), ResourceUUID(value, creator)) - - -class ResourcesController(rest.RestController): - def __init__(self, resource_type): - self._resource_type = resource_type - - @pecan.expose() - def _lookup(self, id, *remainder): - return ResourceController(self._resource_type, id), remainder - - @pecan.expose('json') - def post(self): - # NOTE(sileht): we need to copy the dict because when change it - # and we don't want that next patch call have the "id" - schema = dict(schema_for(self._resource_type)) - creator = pecan.request.auth_helper.get_current_user( - pecan.request) - schema["id"] = functools.partial(ResourceID, creator=creator) - - body = deserialize_and_validate(schema) - body["original_resource_id"], body["id"] = body["id"] - - target = { - "resource_type": self._resource_type, - } - target.update(body) - enforce("create resource", target) - rid = body['id'] - del body['id'] - try: - resource = pecan.request.indexer.create_resource( - self._resource_type, rid, creator, - **body) - except (ValueError, - indexer.NoSuchMetric, - indexer.NoSuchArchivePolicy) as e: - abort(400, e) - except indexer.ResourceAlreadyExists as e: - abort(409, e) - set_resp_location_hdr("/resource/" - + self._resource_type + "/" - + six.text_type(resource.id)) - etag_set_headers(resource) - pecan.response.status = 201 - return resource - - @pecan.expose('json') - def get_all(self, **kwargs): - details = get_details(kwargs) - history = get_history(kwargs) - pagination_opts = get_pagination_options( - kwargs, RESOURCE_DEFAULT_PAGINATION) - policy_filter = pecan.request.auth_helper.get_resource_policy_filter( - pecan.request, "list resource", self._resource_type) - - try: - # FIXME(sileht): next API version should returns - # {'resources': [...], 'links': [ ... pagination rel ...]} - return pecan.request.indexer.list_resources( - self._resource_type, - attribute_filter=policy_filter, - details=details, - history=history, - **pagination_opts - ) - except indexer.IndexerException as e: - abort(400, e) - - @pecan.expose('json') - def delete(self, **kwargs): - # NOTE(sileht): Don't allow empty filter, this is going to delete - # the entire database. - if pecan.request.body: - attr_filter = deserialize_and_validate(ResourceSearchSchema) - elif kwargs.get("filter"): - attr_filter = QueryStringSearchAttrFilter.parse(kwargs["filter"]) - else: - attr_filter = None - - # the voluptuous checks everything, but it is better to - # have this here. - if not attr_filter: - abort(400, "caution: the query can not be empty, or it will \ - delete entire database") - - policy_filter = pecan.request.auth_helper.get_resource_policy_filter( - pecan.request, - "delete resources", self._resource_type) - - if policy_filter: - attr_filter = {"and": [policy_filter, attr_filter]} - - try: - delete_num = pecan.request.indexer.delete_resources( - self._resource_type, attribute_filter=attr_filter) - except indexer.IndexerException as e: - abort(400, e) - - return {"deleted": delete_num} - - -class ResourcesByTypeController(rest.RestController): - @pecan.expose('json') - def get_all(self): - return dict( - (rt.name, - pecan.request.application_url + '/resource/' + rt.name) - for rt in pecan.request.indexer.list_resource_types()) - - @pecan.expose() - def _lookup(self, resource_type, *remainder): - try: - pecan.request.indexer.get_resource_type(resource_type) - except indexer.NoSuchResourceType as e: - abort(404, e) - return ResourcesController(resource_type), remainder - - -class QueryStringSearchAttrFilter(object): - uninary_operators = ("not", ) - binary_operator = (u">=", u"<=", u"!=", u">", u"<", u"=", u"==", u"eq", - u"ne", u"lt", u"gt", u"ge", u"le", u"in", u"like", u"≠", - u"≥", u"≤") - multiple_operators = (u"and", u"or", u"∧", u"∨") - - operator = pyparsing.Regex(u"|".join(binary_operator)) - null = pyparsing.Regex("None|none|null").setParseAction( - pyparsing.replaceWith(None)) - boolean = "False|True|false|true" - boolean = pyparsing.Regex(boolean).setParseAction( - lambda t: t[0].lower() == "true") - hex_string = lambda n: pyparsing.Word(pyparsing.hexnums, exact=n) - uuid_string = pyparsing.Combine( - hex_string(8) + (pyparsing.Optional("-") + hex_string(4)) * 3 + - pyparsing.Optional("-") + hex_string(12)) - number = r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?" - number = pyparsing.Regex(number).setParseAction(lambda t: float(t[0])) - identifier = pyparsing.Word(pyparsing.alphas, pyparsing.alphanums + "_") - quoted_string = pyparsing.QuotedString('"') | pyparsing.QuotedString("'") - comparison_term = pyparsing.Forward() - in_list = pyparsing.Group( - pyparsing.Suppress('[') + - pyparsing.Optional(pyparsing.delimitedList(comparison_term)) + - pyparsing.Suppress(']'))("list") - comparison_term << (null | boolean | uuid_string | identifier | number | - quoted_string | in_list) - condition = pyparsing.Group(comparison_term + operator + comparison_term) - - expr = pyparsing.infixNotation(condition, [ - ("not", 1, pyparsing.opAssoc.RIGHT, ), - ("and", 2, pyparsing.opAssoc.LEFT, ), - ("∧", 2, pyparsing.opAssoc.LEFT, ), - ("or", 2, pyparsing.opAssoc.LEFT, ), - ("∨", 2, pyparsing.opAssoc.LEFT, ), - ]) - - @classmethod - def _parsed_query2dict(cls, parsed_query): - result = None - while parsed_query: - part = parsed_query.pop() - if part in cls.binary_operator: - result = {part: {parsed_query.pop(): result}} - - elif part in cls.multiple_operators: - if result.get(part): - result[part].append( - cls._parsed_query2dict(parsed_query.pop())) - else: - result = {part: [result]} - - elif part in cls.uninary_operators: - result = {part: result} - elif isinstance(part, pyparsing.ParseResults): - kind = part.getName() - if kind == "list": - res = part.asList() - else: - res = cls._parsed_query2dict(part) - if result is None: - result = res - elif isinstance(result, dict): - list(result.values())[0].append(res) - else: - result = part - return result - - @classmethod - def _parse(cls, query): - try: - parsed_query = cls.expr.parseString(query, parseAll=True)[0] - except pyparsing.ParseException as e: - raise abort(400, "Invalid filter: %s" % six.text_type(e)) - return cls._parsed_query2dict(parsed_query) - - @classmethod - def parse(cls, query): - attr_filter = cls._parse(query) - return validate(ResourceSearchSchema, attr_filter, required=True) - - -def ResourceSearchSchema(v): - return _ResourceSearchSchema()(v) - - -# NOTE(sileht): indexer will cast this type to the real attribute -# type, here we just want to be sure this is not a dict or a list -ResourceSearchSchemaAttributeValue = voluptuous.Any( - six.text_type, float, int, bool, None) - - -def _ResourceSearchSchema(): - user = pecan.request.auth_helper.get_current_user( - pecan.request) - _ResourceUUID = functools.partial(ResourceUUID, creator=user) - - return voluptuous.Schema( - voluptuous.All( - voluptuous.Length(min=0, max=1), - { - voluptuous.Any( - u"=", u"==", u"eq", - u"<", u"lt", - u">", u"gt", - u"<=", u"≤", u"le", - u">=", u"≥", u"ge", - u"!=", u"≠", u"ne", - u"like" - ): voluptuous.All( - voluptuous.Length(min=1, max=1), - {"id": _ResourceUUID, - six.text_type: ResourceSearchSchemaAttributeValue}, - ), - voluptuous.Any( - u"in", - ): voluptuous.All( - voluptuous.Length(min=1, max=1), - {"id": voluptuous.All( - [_ResourceUUID], - voluptuous.Length(min=1)), - six.text_type: voluptuous.All( - [ResourceSearchSchemaAttributeValue], - voluptuous.Length(min=1))} - ), - voluptuous.Any( - u"and", u"∨", - u"or", u"∧", - ): voluptuous.All( - [ResourceSearchSchema], voluptuous.Length(min=1) - ), - u"not": ResourceSearchSchema, - } - ) - ) - - -class SearchResourceTypeController(rest.RestController): - def __init__(self, resource_type): - self._resource_type = resource_type - - def _search(self, **kwargs): - if pecan.request.body: - attr_filter = deserialize_and_validate(ResourceSearchSchema) - elif kwargs.get("filter"): - attr_filter = QueryStringSearchAttrFilter.parse(kwargs["filter"]) - else: - attr_filter = None - - details = get_details(kwargs) - history = get_history(kwargs) - pagination_opts = get_pagination_options( - kwargs, RESOURCE_DEFAULT_PAGINATION) - - policy_filter = pecan.request.auth_helper.get_resource_policy_filter( - pecan.request, "search resource", self._resource_type) - if policy_filter: - if attr_filter: - attr_filter = {"and": [ - policy_filter, - attr_filter - ]} - else: - attr_filter = policy_filter - - return pecan.request.indexer.list_resources( - self._resource_type, - attribute_filter=attr_filter, - details=details, - history=history, - **pagination_opts) - - @pecan.expose('json') - def post(self, **kwargs): - try: - return self._search(**kwargs) - except indexer.IndexerException as e: - abort(400, e) - - -class SearchResourceController(rest.RestController): - @pecan.expose() - def _lookup(self, resource_type, *remainder): - try: - pecan.request.indexer.get_resource_type(resource_type) - except indexer.NoSuchResourceType as e: - abort(404, e) - return SearchResourceTypeController(resource_type), remainder - - -def _MetricSearchSchema(v): - """Helper method to indirect the recursivity of the search schema""" - return SearchMetricController.MetricSearchSchema(v) - - -def _MetricSearchOperationSchema(v): - """Helper method to indirect the recursivity of the search schema""" - return SearchMetricController.MetricSearchOperationSchema(v) - - -class SearchMetricController(rest.RestController): - - MetricSearchOperationSchema = voluptuous.Schema( - voluptuous.All( - voluptuous.Length(min=1, max=1), - { - voluptuous.Any( - u"=", u"==", u"eq", - u"<", u"lt", - u">", u"gt", - u"<=", u"≤", u"le", - u">=", u"≥", u"ge", - u"!=", u"≠", u"ne", - u"%", u"mod", - u"+", u"add", - u"-", u"sub", - u"*", u"×", u"mul", - u"/", u"÷", u"div", - u"**", u"^", u"pow", - ): voluptuous.Any( - float, int, - voluptuous.All( - [float, int, - voluptuous.Any(_MetricSearchOperationSchema)], - voluptuous.Length(min=2, max=2), - ), - ), - }, - ) - ) - - MetricSearchSchema = voluptuous.Schema( - voluptuous.Any( - MetricSearchOperationSchema, - voluptuous.All( - voluptuous.Length(min=1, max=1), - { - voluptuous.Any( - u"and", u"∨", - u"or", u"∧", - u"not", - ): [_MetricSearchSchema], - } - ) - ) - ) - - @pecan.expose('json') - def post(self, metric_id, start=None, stop=None, aggregation='mean', - granularity=None): - granularity = [utils.to_timespan(g) - for g in arg_to_list(granularity or [])] - metrics = pecan.request.indexer.list_metrics( - ids=arg_to_list(metric_id)) - - for metric in metrics: - enforce("search metric", metric) - - if not pecan.request.body: - abort(400, "No query specified in body") - - query = deserialize_and_validate(self.MetricSearchSchema) - - if start is not None: - try: - start = utils.to_timestamp(start) - except Exception: - abort(400, "Invalid value for start") - - if stop is not None: - try: - stop = utils.to_timestamp(stop) - except Exception: - abort(400, "Invalid value for stop") - - try: - return { - str(metric.id): values - for metric, values in six.iteritems( - pecan.request.storage.search_value( - metrics, query, start, stop, aggregation, - granularity - ) - ) - } - except storage.InvalidQuery as e: - abort(400, e) - except storage.GranularityDoesNotExist as e: - abort(400, e) - - -class ResourcesMetricsMeasuresBatchController(rest.RestController): - @pecan.expose('json') - def post(self, create_metrics=False): - creator = pecan.request.auth_helper.get_current_user( - pecan.request) - MeasuresBatchSchema = voluptuous.Schema( - {functools.partial(ResourceID, creator=creator): - {six.text_type: MeasuresListSchema}} - ) - - body = deserialize_and_validate(MeasuresBatchSchema) - - known_metrics = [] - unknown_metrics = [] - unknown_resources = [] - body_by_rid = {} - for original_resource_id, resource_id in body: - body_by_rid[resource_id] = body[(original_resource_id, - resource_id)] - names = body[(original_resource_id, resource_id)].keys() - metrics = pecan.request.indexer.list_metrics( - names=names, resource_id=resource_id) - - known_names = [m.name for m in metrics] - if strtobool("create_metrics", create_metrics): - already_exists_names = [] - for name in names: - if name not in known_names: - metric = MetricsController.MetricSchema({ - "name": name - }) - try: - m = pecan.request.indexer.create_metric( - uuid.uuid4(), - creator=creator, - resource_id=resource_id, - name=metric.get('name'), - unit=metric.get('unit'), - archive_policy_name=metric[ - 'archive_policy_name']) - except indexer.NamedMetricAlreadyExists as e: - already_exists_names.append(e.metric) - except indexer.NoSuchResource: - unknown_resources.append({ - 'resource_id': six.text_type(resource_id), - 'original_resource_id': original_resource_id}) - break - except indexer.IndexerException as e: - # This catch NoSuchArchivePolicy, which is unlikely - # be still possible - abort(400, e) - else: - known_metrics.append(m) - - if already_exists_names: - # Add metrics created in the meantime - known_names.extend(already_exists_names) - known_metrics.extend( - pecan.request.indexer.list_metrics( - names=already_exists_names, - resource_id=resource_id) - ) - - elif len(names) != len(metrics): - unknown_metrics.extend( - ["%s/%s" % (six.text_type(resource_id), m) - for m in names if m not in known_names]) - - known_metrics.extend(metrics) - - if unknown_resources: - abort(400, {"cause": "Unknown resources", - "detail": unknown_resources}) - - if unknown_metrics: - abort(400, "Unknown metrics: %s" % ", ".join( - sorted(unknown_metrics))) - - for metric in known_metrics: - enforce("post measures", metric) - - pecan.request.incoming.add_measures_batch( - dict((metric, - body_by_rid[metric.resource_id][metric.name]) - for metric in known_metrics)) - - pecan.response.status = 202 - - -class MetricsMeasuresBatchController(rest.RestController): - # NOTE(sileht): we don't allow to mix both formats - # to not have to deal with id collision that can - # occurs between a metric_id and a resource_id. - # Because while json allow duplicate keys in dict payload - # only the last key will be retain by json python module to - # build the python dict. - MeasuresBatchSchema = voluptuous.Schema( - {utils.UUID: MeasuresListSchema} - ) - - @pecan.expose() - def post(self): - body = deserialize_and_validate(self.MeasuresBatchSchema) - metrics = pecan.request.indexer.list_metrics(ids=body.keys()) - - if len(metrics) != len(body): - missing_metrics = sorted(set(body) - set(m.id for m in metrics)) - abort(400, "Unknown metrics: %s" % ", ".join( - six.moves.map(str, missing_metrics))) - - for metric in metrics: - enforce("post measures", metric) - - pecan.request.incoming.add_measures_batch( - dict((metric, body[metric.id]) for metric in - metrics)) - - pecan.response.status = 202 - - @staticmethod - @pecan.expose('json') - def get_all(**kwargs): - # Check RBAC policy - metric_ids = arg_to_list(kwargs.get('metric', [])) - metrics = pecan.request.indexer.list_metrics(ids=metric_ids) - missing_metric_ids = (set(metric_ids) - - set(six.text_type(m.id) for m in metrics)) - if missing_metric_ids: - abort(400, {"cause": "Unknown metrics", - "detail": list(missing_metric_ids)}) - - for metric in metrics: - enforce("get metric", metric) - - start = kwargs.get('start') - if start is not None: - try: - start = utils.to_timestamp(start) - except Exception: - abort(400, "Invalid value for start") - - stop = kwargs.get('stop') - if stop is not None: - try: - stop = utils.to_timestamp(stop) - except Exception: - abort(400, "Invalid value for stop") - - aggregation = kwargs.get('aggregation', 'mean') - if (aggregation - not in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS): - abort( - 400, - 'Invalid aggregation value %s, must be one of %s' - % (aggregation, - archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)) - - granularity = kwargs.get('granularity') - if granularity is not None: - try: - granularity = utils.to_timespan(granularity) - except ValueError as e: - abort(400, e) - - try: - return dict((str(metric.id), - pecan.request.storage.get_measures( - metric, start, stop, aggregation, granularity)) - for metric in metrics) - except (storage.GranularityDoesNotExist, - storage.AggregationDoesNotExist) as e: - abort(404, e) - - -class SearchController(object): - resource = SearchResourceController() - metric = SearchMetricController() - - -class AggregationResourceController(rest.RestController): - def __init__(self, resource_type, metric_name): - self.resource_type = resource_type - self.metric_name = metric_name - - @pecan.expose('json') - def post(self, start=None, stop=None, aggregation='mean', - reaggregation=None, granularity=None, needed_overlap=100.0, - groupby=None, fill=None, refresh=False, resample=None, - transform=None): - # First, set groupby in the right format: a sorted list of unique - # strings. - groupby = sorted(set(arg_to_list(groupby))) - - # NOTE(jd) Sort by groupby so we are sure we do not return multiple - # groups when using itertools.groupby later. - try: - resources = SearchResourceTypeController( - self.resource_type)._search(sort=groupby) - except indexer.InvalidPagination: - abort(400, "Invalid groupby attribute") - except indexer.IndexerException as e: - abort(400, e) - - if resources is None: - return [] - - if not groupby: - metrics = list(filter(None, - (r.get_metric(self.metric_name) - for r in resources))) - return AggregationController.get_cross_metric_measures_from_objs( - metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap, fill, refresh, resample, - transform) - - def groupper(r): - return tuple((attr, r[attr]) for attr in groupby) - - results = [] - for key, resources in itertools.groupby(resources, groupper): - metrics = list(filter(None, - (r.get_metric(self.metric_name) - for r in resources))) - results.append({ - "group": dict(key), - "measures": AggregationController.get_cross_metric_measures_from_objs( # noqa - metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap, fill, refresh, resample, - transform) - }) - - return results - - -class AggregationController(rest.RestController): - _custom_actions = { - 'metric': ['POST', 'GET'], - } - - @pecan.expose() - def _lookup(self, object_type, resource_type, key, metric_name, - *remainder): - if object_type != "resource" or key != "metric": - # NOTE(sileht): we want the raw 404 message here - # so use directly pecan - pecan.abort(404) - try: - pecan.request.indexer.get_resource_type(resource_type) - except indexer.NoSuchResourceType as e: - abort(404, e) - return AggregationResourceController(resource_type, - metric_name), remainder - - @staticmethod - def get_cross_metric_measures_from_objs(metrics, start=None, stop=None, - aggregation='mean', - reaggregation=None, - granularity=None, - needed_overlap=100.0, fill=None, - refresh=False, resample=None, - transform=None): - try: - needed_overlap = float(needed_overlap) - except ValueError: - abort(400, 'needed_overlap must be a number') - if needed_overlap != 100.0 and start is None and stop is None: - abort(400, 'start and/or stop must be provided if specifying ' - 'needed_overlap') - - if start is not None: - try: - start = utils.to_timestamp(start) - except Exception: - abort(400, "Invalid value for start") - - if stop is not None: - try: - stop = utils.to_timestamp(stop) - except Exception: - abort(400, "Invalid value for stop") - - if (aggregation - not in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS): - abort( - 400, - 'Invalid aggregation value %s, must be one of %s' - % (aggregation, - archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)) - - for metric in metrics: - enforce("get metric", metric) - - number_of_metrics = len(metrics) - if number_of_metrics == 0: - return [] - if granularity is not None: - try: - granularity = utils.to_timespan(granularity) - except ValueError as e: - abort(400, e) - - if transform is not None: - transform = TransformSchema(transform) - - if resample: - # TODO(sileht): This have to be deprecated at some point - if transform: - abort(400, 'transform and resample are exclusive') - - if not granularity: - abort(400, 'A granularity must be specified to resample') - try: - resample = utils.to_timespan(resample) - except ValueError as e: - abort(400, e) - transform = [carbonara.Transformation("resample", (resample,))] - - if fill is not None: - if granularity is None: - abort(400, "Unable to fill without a granularity") - try: - fill = float(fill) - except ValueError as e: - if fill != 'null': - abort(400, "fill must be a float or \'null\': %s" % e) - - try: - if strtobool("refresh", refresh): - metrics_to_update = [ - m for m in metrics - if pecan.request.incoming.has_unprocessed(m)] - for m in metrics_to_update: - try: - pecan.request.storage.refresh_metric( - pecan.request.indexer, pecan.request.incoming, m, - pecan.request.conf.api.refresh_timeout) - except storage.SackLockTimeoutError as e: - abort(503, e) - if number_of_metrics == 1: - # NOTE(sileht): don't do the aggregation if we only have one - # metric - return pecan.request.storage.get_measures( - metrics[0], start, stop, aggregation, - granularity, transform) - return cross_metric.get_cross_metric_measures( - pecan.request.storage, - metrics, start, stop, aggregation, - reaggregation, granularity, needed_overlap, fill, - transform) - except cross_metric.MetricUnaggregatable as e: - abort(400, ("One of the metrics being aggregated doesn't have " - "matching granularity: %s") % str(e)) - except (storage.MetricDoesNotExist, - storage.GranularityDoesNotExist, - storage.AggregationDoesNotExist) as e: - abort(404, e) - - MetricIDsSchema = [utils.UUID] - - @pecan.expose('json') - def get_metric(self, metric=None, start=None, stop=None, - aggregation='mean', reaggregation=None, granularity=None, - needed_overlap=100.0, fill=None, - refresh=False, resample=None, transform=None): - if pecan.request.method == 'GET': - try: - metric_ids = voluptuous.Schema( - self.MetricIDsSchema, required=True)(arg_to_list(metric)) - except voluptuous.Error as e: - abort(400, "Invalid input: %s" % e) - else: - self._workaround_pecan_issue_88() - metric_ids = deserialize_and_validate(self.MetricIDsSchema) - - metric_ids = [six.text_type(m) for m in metric_ids] - # Check RBAC policy - metrics = pecan.request.indexer.list_metrics(ids=metric_ids) - missing_metric_ids = (set(metric_ids) - - set(six.text_type(m.id) for m in metrics)) - if missing_metric_ids: - # Return one of the missing one in the error - abort(404, storage.MetricDoesNotExist( - missing_metric_ids.pop())) - return self.get_cross_metric_measures_from_objs( - metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap, fill, refresh, resample, transform) - - post_metric = get_metric - - def _workaround_pecan_issue_88(self): - # FIXME(sileht): https://github.com/pecan/pecan/pull/88 - if pecan.request.path_info.startswith("/aggregation/resource"): - pecan.abort(405) - - -class CapabilityController(rest.RestController): - @staticmethod - @pecan.expose('json') - def get(): - aggregation_methods = set( - archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS) - return dict(aggregation_methods=aggregation_methods, - dynamic_aggregation_methods=[ - ext.name for ext in extension.ExtensionManager( - namespace='gnocchi.aggregates') - ]) - - -class StatusController(rest.RestController): - @staticmethod - @pecan.expose('json') - def get(details=True): - enforce("get status", {}) - try: - report = pecan.request.incoming.measures_report( - strtobool("details", details)) - except incoming.ReportGenerationError: - abort(503, 'Unable to generate status. Please retry.') - report_dict = {"storage": {"summary": report['summary']}} - if 'details' in report: - report_dict["storage"]["measures_to_process"] = report['details'] - return report_dict - - -class MetricsBatchController(object): - measures = MetricsMeasuresBatchController() - - -class ResourcesMetricsBatchController(object): - measures = ResourcesMetricsMeasuresBatchController() - - -class ResourcesBatchController(object): - metrics = ResourcesMetricsBatchController() - - -class BatchController(object): - metrics = MetricsBatchController() - resources = ResourcesBatchController() - - -class V1Controller(object): - - def __init__(self): - self.sub_controllers = { - "search": SearchController(), - "archive_policy": ArchivePoliciesController(), - "archive_policy_rule": ArchivePolicyRulesController(), - "metric": MetricsController(), - "batch": BatchController(), - "resource": ResourcesByTypeController(), - "resource_type": ResourceTypesController(), - "aggregation": AggregationController(), - "capabilities": CapabilityController(), - "status": StatusController(), - } - for name, ctrl in self.sub_controllers.items(): - setattr(self, name, ctrl) - - @pecan.expose('json') - def index(self): - return { - "version": "1.0", - "links": [ - {"rel": "self", - "href": pecan.request.application_url} - ] + [ - {"rel": name, - "href": pecan.request.application_url + "/" + name} - for name in sorted(self.sub_controllers) - ] - } - - -class VersionsController(object): - @staticmethod - @pecan.expose('json') - def index(): - return { - "versions": [ - { - "status": "CURRENT", - "links": [ - { - "rel": "self", - "href": pecan.request.application_url + "/v1/" - } - ], - "id": "v1.0", - "updated": "2015-03-19" - } - ] - } diff --git a/gnocchi/rest/api-paste.ini b/gnocchi/rest/api-paste.ini index 2cdacaa2..2b6df853 100644 --- a/gnocchi/rest/api-paste.ini +++ b/gnocchi/rest/api-paste.ini @@ -27,11 +27,11 @@ pipeline = http_proxy_to_wsgi gnocchiversions [app:gnocchiversions] paste.app_factory = gnocchi.rest.app:app_factory -root = gnocchi.rest.VersionsController +root = gnocchi.rest.api.VersionsController [app:gnocchiv1] paste.app_factory = gnocchi.rest.app:app_factory -root = gnocchi.rest.V1Controller +root = gnocchi.rest.api.V1Controller [filter:keystone_authtoken] use = egg:keystonemiddleware#auth_token diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py new file mode 100644 index 00000000..524ea503 --- /dev/null +++ b/gnocchi/rest/api.py @@ -0,0 +1,1908 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2016-2017 Red Hat, Inc. +# Copyright © 2014-2015 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import functools +import itertools +import uuid +import warnings + +import jsonpatch +import pecan +from pecan import rest +import pyparsing +import six +from six.moves.urllib import parse as urllib_parse +from stevedore import extension +import voluptuous +import webob.exc +import werkzeug.http + +from gnocchi import aggregates +from gnocchi import archive_policy +from gnocchi import carbonara +from gnocchi import incoming +from gnocchi import indexer +from gnocchi import json +from gnocchi import resource_type +from gnocchi.rest import cross_metric +from gnocchi.rest import transformation +from gnocchi import storage +from gnocchi import utils + + +def arg_to_list(value): + if isinstance(value, list): + return value + elif value: + return [value] + return [] + + +def abort(status_code, detail='', headers=None, comment=None, **kw): + """Like pecan.abort, but make sure detail is a string.""" + if status_code == 404 and not detail: + raise RuntimeError("http code 404 must have 'detail' set") + if isinstance(detail, Exception): + detail = six.text_type(detail) + return pecan.abort(status_code, detail, headers, comment, **kw) + + +def flatten_dict_to_keypairs(d, separator=':'): + """Generator that produces sequence of keypairs for nested dictionaries. + + :param d: dictionaries which may be nested + :param separator: symbol between names + """ + for name, value in sorted(six.iteritems(d)): + if isinstance(value, dict): + for subname, subvalue in flatten_dict_to_keypairs(value, + separator): + yield ('%s%s%s' % (name, separator, subname), subvalue) + else: + yield name, value + + +def enforce(rule, target): + """Return the user and project the request should be limited to. + + :param rule: The rule name + :param target: The target to enforce on. + + """ + creds = pecan.request.auth_helper.get_auth_info(pecan.request) + + if not isinstance(target, dict): + if hasattr(target, "jsonify"): + target = target.jsonify() + else: + target = target.__dict__ + + # Flatten dict + target = dict(flatten_dict_to_keypairs(d=target, separator='.')) + + if not pecan.request.policy_enforcer.enforce(rule, target, creds): + abort(403) + + +def set_resp_location_hdr(location): + location = '%s%s' % (pecan.request.script_name, location) + # NOTE(sileht): according the pep-3333 the headers must be + # str in py2 and py3 even this is not the same thing in both + # version + # see: http://legacy.python.org/dev/peps/pep-3333/#unicode-issues + if six.PY2 and isinstance(location, six.text_type): + location = location.encode('utf-8') + location = urllib_parse.quote(location) + pecan.response.headers['Location'] = location + + +def deserialize(expected_content_types=None): + if expected_content_types is None: + expected_content_types = ("application/json", ) + + mime_type, options = werkzeug.http.parse_options_header( + pecan.request.headers.get('Content-Type')) + if mime_type not in expected_content_types: + abort(415) + try: + params = json.load(pecan.request.body_file) + except Exception as e: + abort(400, "Unable to decode body: " + six.text_type(e)) + return params + + +def validate(schema, data, required=True): + try: + return voluptuous.Schema(schema, required=required)(data) + except voluptuous.Error as e: + abort(400, "Invalid input: %s" % e) + + +def deserialize_and_validate(schema, required=True, + expected_content_types=None): + return validate(schema, + deserialize(expected_content_types=expected_content_types), + required) + + +def PositiveOrNullInt(value): + value = int(value) + if value < 0: + raise ValueError("Value must be positive") + return value + + +def PositiveNotNullInt(value): + value = int(value) + if value <= 0: + raise ValueError("Value must be positive and not null") + return value + + +def Timespan(value): + try: + return utils.to_timespan(value) + except ValueError as e: + raise voluptuous.Invalid(e) + + +def get_header_option(name, params): + type, options = werkzeug.http.parse_options_header( + pecan.request.headers.get('Accept')) + return strtobool('Accept header' if name in options else name, + options.get(name, params.pop(name, 'false'))) + + +def get_history(params): + return get_header_option('history', params) + + +def get_details(params): + return get_header_option('details', params) + + +def strtobool(varname, v): + """Convert a string to a boolean.""" + try: + return utils.strtobool(v) + except ValueError as e: + abort(400, "Unable to parse `%s': %s" % (varname, six.text_type(e))) + + +RESOURCE_DEFAULT_PAGINATION = ['revision_start:asc', + 'started_at:asc'] + +METRIC_DEFAULT_PAGINATION = ['id:asc'] + + +def get_pagination_options(params, default): + max_limit = pecan.request.conf.api.max_limit + limit = params.pop('limit', max_limit) + marker = params.pop('marker', None) + sorts = params.pop('sort', default) + if not isinstance(sorts, list): + sorts = [sorts] + + try: + limit = PositiveNotNullInt(limit) + except ValueError: + abort(400, "Invalid 'limit' value: %s" % params.get('limit')) + + limit = min(limit, max_limit) + + return {'limit': limit, + 'marker': marker, + 'sorts': sorts} + + +def ValidAggMethod(value): + value = six.text_type(value) + if value in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS_VALUES: + return value + raise ValueError("Invalid aggregation method") + + +class ArchivePolicyController(rest.RestController): + def __init__(self, archive_policy): + self.archive_policy = archive_policy + + @pecan.expose('json') + def get(self): + ap = pecan.request.indexer.get_archive_policy(self.archive_policy) + if ap: + enforce("get archive policy", ap) + return ap + abort(404, indexer.NoSuchArchivePolicy(self.archive_policy)) + + @pecan.expose('json') + def patch(self): + ap = pecan.request.indexer.get_archive_policy(self.archive_policy) + if not ap: + abort(404, indexer.NoSuchArchivePolicy(self.archive_policy)) + enforce("update archive policy", ap) + + body = deserialize_and_validate(voluptuous.Schema({ + voluptuous.Required("definition"): + voluptuous.All([{ + "granularity": Timespan, + "points": PositiveNotNullInt, + "timespan": Timespan}], voluptuous.Length(min=1)), + })) + # Validate the data + try: + ap_items = [archive_policy.ArchivePolicyItem(**item) for item in + body['definition']] + except ValueError as e: + abort(400, e) + + try: + return pecan.request.indexer.update_archive_policy( + self.archive_policy, ap_items) + except indexer.UnsupportedArchivePolicyChange as e: + abort(400, e) + + @pecan.expose() + def delete(self): + # NOTE(jd) I don't think there's any point in fetching and passing the + # archive policy here, as the rule is probably checking the actual role + # of the user, not the content of the AP. + enforce("delete archive policy", {}) + try: + pecan.request.indexer.delete_archive_policy(self.archive_policy) + except indexer.NoSuchArchivePolicy as e: + abort(404, e) + except indexer.ArchivePolicyInUse as e: + abort(400, e) + + +class ArchivePoliciesController(rest.RestController): + @pecan.expose() + def _lookup(self, archive_policy, *remainder): + return ArchivePolicyController(archive_policy), remainder + + @pecan.expose('json') + def post(self): + # NOTE(jd): Initialize this one at run-time because we rely on conf + conf = pecan.request.conf + enforce("create archive policy", {}) + ArchivePolicySchema = voluptuous.Schema({ + voluptuous.Required("name"): six.text_type, + voluptuous.Required("back_window", default=0): PositiveOrNullInt, + voluptuous.Required( + "aggregation_methods", + default=set(conf.archive_policy.default_aggregation_methods)): + [ValidAggMethod], + voluptuous.Required("definition"): + voluptuous.All([{ + "granularity": Timespan, + "points": PositiveNotNullInt, + "timespan": Timespan, + }], voluptuous.Length(min=1)), + }) + + body = deserialize_and_validate(ArchivePolicySchema) + # Validate the data + try: + ap = archive_policy.ArchivePolicy.from_dict(body) + except ValueError as e: + abort(400, e) + enforce("create archive policy", ap) + try: + ap = pecan.request.indexer.create_archive_policy(ap) + except indexer.ArchivePolicyAlreadyExists as e: + abort(409, e) + + location = "/archive_policy/" + ap.name + set_resp_location_hdr(location) + pecan.response.status = 201 + return ap + + @pecan.expose('json') + def get_all(self): + enforce("list archive policy", {}) + return pecan.request.indexer.list_archive_policies() + + +class ArchivePolicyRulesController(rest.RestController): + @pecan.expose('json') + def post(self): + enforce("create archive policy rule", {}) + ArchivePolicyRuleSchema = voluptuous.Schema({ + voluptuous.Required("name"): six.text_type, + voluptuous.Required("metric_pattern"): six.text_type, + voluptuous.Required("archive_policy_name"): six.text_type, + }) + + body = deserialize_and_validate(ArchivePolicyRuleSchema) + enforce("create archive policy rule", body) + try: + ap = pecan.request.indexer.create_archive_policy_rule( + body['name'], body['metric_pattern'], + body['archive_policy_name'] + ) + except indexer.ArchivePolicyRuleAlreadyExists as e: + abort(409, e) + + location = "/archive_policy_rule/" + ap.name + set_resp_location_hdr(location) + pecan.response.status = 201 + return ap + + @pecan.expose('json') + def get_one(self, name): + ap = pecan.request.indexer.get_archive_policy_rule(name) + if ap: + enforce("get archive policy rule", ap) + return ap + abort(404, indexer.NoSuchArchivePolicyRule(name)) + + @pecan.expose('json') + def get_all(self): + enforce("list archive policy rule", {}) + return pecan.request.indexer.list_archive_policy_rules() + + @pecan.expose() + def delete(self, name): + # NOTE(jd) I don't think there's any point in fetching and passing the + # archive policy rule here, as the rule is probably checking the actual + # role of the user, not the content of the AP rule. + enforce("delete archive policy rule", {}) + try: + pecan.request.indexer.delete_archive_policy_rule(name) + except indexer.NoSuchArchivePolicyRule as e: + abort(404, e) + + +def MeasuresListSchema(measures): + try: + times = utils.to_timestamps((m['timestamp'] for m in measures)) + except TypeError: + abort(400, "Invalid format for measures") + except ValueError as e: + abort(400, "Invalid input for timestamp: %s" % e) + + try: + values = [float(i['value']) for i in measures] + except Exception: + abort(400, "Invalid input for a value") + + return (storage.Measure(t, v) for t, v in six.moves.zip( + times.tolist(), values)) + + +def TransformSchema(transform): + try: + return transformation.parse(transform) + except transformation.TransformationParserError as e: + abort(400, str(e)) + + +class MetricController(rest.RestController): + _custom_actions = { + 'measures': ['POST', 'GET'] + } + + def __init__(self, metric): + self.metric = metric + mgr = extension.ExtensionManager(namespace='gnocchi.aggregates', + invoke_on_load=True) + self.custom_agg = dict((x.name, x.obj) for x in mgr) + + def enforce_metric(self, rule): + enforce(rule, json.to_primitive(self.metric)) + + @pecan.expose('json') + def get_all(self): + self.enforce_metric("get metric") + return self.metric + + @pecan.expose() + def post_measures(self): + self.enforce_metric("post measures") + params = deserialize() + if not isinstance(params, list): + abort(400, "Invalid input for measures") + if params: + pecan.request.incoming.add_measures( + self.metric, MeasuresListSchema(params)) + pecan.response.status = 202 + + @pecan.expose('json') + def get_measures(self, start=None, stop=None, aggregation='mean', + granularity=None, resample=None, refresh=False, + transform=None, + **param): + self.enforce_metric("get measures") + if not (aggregation + in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS + or aggregation in self.custom_agg): + msg = '''Invalid aggregation value %(agg)s, must be one of %(std)s + or %(custom)s''' + abort(400, msg % dict( + agg=aggregation, + std=archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS, + custom=str(self.custom_agg.keys()))) + + if start is not None: + try: + start = utils.to_timestamp(start) + except Exception: + abort(400, "Invalid value for start") + + if stop is not None: + try: + stop = utils.to_timestamp(stop) + except Exception: + abort(400, "Invalid value for stop") + + if transform is not None: + transform = TransformSchema(transform) + + if resample: + # TODO(sileht): This have to be deprecated at some point + if transform: + abort(400, 'transform and resample are exclusive') + + if not granularity: + abort(400, 'A granularity must be specified to resample') + try: + resample = utils.to_timespan(resample) + except ValueError as e: + abort(400, e) + transform = [carbonara.Transformation("resample", (resample,))] + + if (strtobool("refresh", refresh) and + pecan.request.incoming.has_unprocessed(self.metric)): + try: + pecan.request.storage.refresh_metric( + pecan.request.indexer, pecan.request.incoming, self.metric, + pecan.request.conf.api.refresh_timeout) + except storage.SackLockTimeoutError as e: + abort(503, e) + try: + if aggregation in self.custom_agg: + warnings.warn("moving_average aggregation is deprecated.", + category=DeprecationWarning) + return self.custom_agg[aggregation].compute( + pecan.request.storage, self.metric, + start, stop, **param) + return pecan.request.storage.get_measures( + self.metric, start, stop, aggregation, + utils.to_timespan(granularity) + if granularity is not None else None, + transform) + except (storage.MetricDoesNotExist, + storage.GranularityDoesNotExist, + storage.AggregationDoesNotExist) as e: + abort(404, e) + except aggregates.CustomAggFailure as e: + abort(400, e) + + @pecan.expose() + def delete(self): + self.enforce_metric("delete metric") + try: + pecan.request.indexer.delete_metric(self.metric.id) + except indexer.NoSuchMetric as e: + abort(404, e) + + +class MetricsController(rest.RestController): + + @pecan.expose() + def _lookup(self, id, *remainder): + try: + metric_id = uuid.UUID(id) + except ValueError: + abort(404, indexer.NoSuchMetric(id)) + metrics = pecan.request.indexer.list_metrics( + id=metric_id, details=True) + if not metrics: + abort(404, indexer.NoSuchMetric(id)) + return MetricController(metrics[0]), remainder + + _MetricSchema = voluptuous.Schema({ + "archive_policy_name": six.text_type, + "name": six.text_type, + voluptuous.Optional("unit"): + voluptuous.All(six.text_type, voluptuous.Length(max=31)), + }) + + # NOTE(jd) Define this method as it was a voluptuous schema – it's just a + # smarter version of a voluptuous schema, no? + @classmethod + def MetricSchema(cls, definition): + # First basic validation + definition = cls._MetricSchema(definition) + archive_policy_name = definition.get('archive_policy_name') + + name = definition.get('name') + if name and '/' in name: + abort(400, "'/' is not supported in metric name") + if archive_policy_name is None: + try: + ap = pecan.request.indexer.get_archive_policy_for_metric(name) + except indexer.NoArchivePolicyRuleMatch: + # NOTE(jd) Since this is a schema-like function, we + # should/could raise ValueError, but if we do so, voluptuous + # just returns a "invalid value" with no useful message – so we + # prefer to use abort() to make sure the user has the right + # error message + abort(400, "No archive policy name specified " + "and no archive policy rule found matching " + "the metric name %s" % name) + else: + definition['archive_policy_name'] = ap.name + + creator = pecan.request.auth_helper.get_current_user( + pecan.request) + + enforce("create metric", { + "creator": creator, + "archive_policy_name": archive_policy_name, + "name": name, + "unit": definition.get('unit'), + }) + + return definition + + @pecan.expose('json') + def post(self): + creator = pecan.request.auth_helper.get_current_user( + pecan.request) + body = deserialize_and_validate(self.MetricSchema) + try: + m = pecan.request.indexer.create_metric( + uuid.uuid4(), + creator, + name=body.get('name'), + unit=body.get('unit'), + archive_policy_name=body['archive_policy_name']) + except indexer.NoSuchArchivePolicy as e: + abort(400, e) + set_resp_location_hdr("/metric/" + str(m.id)) + pecan.response.status = 201 + return m + + MetricListSchema = voluptuous.Schema({ + "user_id": six.text_type, + "project_id": six.text_type, + "creator": six.text_type, + "limit": six.text_type, + "name": six.text_type, + "id": six.text_type, + "unit": six.text_type, + "archive_policy_name": six.text_type, + "status": voluptuous.Any("active", "delete"), + "sort": voluptuous.Any([six.text_type], six.text_type), + "marker": six.text_type, + }) + + @classmethod + @pecan.expose('json') + def get_all(cls, **kwargs): + kwargs = cls.MetricListSchema(kwargs) + + # Compat with old user/project API + provided_user_id = kwargs.pop('user_id', None) + provided_project_id = kwargs.pop('project_id', None) + if provided_user_id is None and provided_project_id is None: + provided_creator = kwargs.pop('creator', None) + else: + provided_creator = ( + (provided_user_id or "") + + ":" + + (provided_project_id or "") + ) + try: + enforce("list all metric", {}) + except webob.exc.HTTPForbidden: + enforce("list metric", {}) + creator = pecan.request.auth_helper.get_current_user( + pecan.request) + if provided_creator and creator != provided_creator: + abort(403, "Insufficient privileges to filter by user/project") + attr_filter = {} + if provided_creator is not None: + attr_filter['creator'] = provided_creator + attr_filter.update(get_pagination_options( + kwargs, METRIC_DEFAULT_PAGINATION)) + attr_filter.update(kwargs) + try: + return pecan.request.indexer.list_metrics(**attr_filter) + except indexer.IndexerException as e: + abort(400, e) + + +_MetricsSchema = voluptuous.Schema({ + six.text_type: voluptuous.Any(utils.UUID, + MetricsController.MetricSchema), +}) + + +def MetricsSchema(data): + # NOTE(jd) Before doing any kind of validation, copy the metric name + # into the metric definition. This is required so we have the name + # available when doing the metric validation with its own MetricSchema, + # and so we can do things such as applying archive policy rules. + if isinstance(data, dict): + for metric_name, metric_def in six.iteritems(data): + if isinstance(metric_def, dict): + metric_def['name'] = metric_name + return _MetricsSchema(data) + + +class NamedMetricController(rest.RestController): + def __init__(self, resource_id, resource_type): + self.resource_id = resource_id + self.resource_type = resource_type + + @pecan.expose() + def _lookup(self, name, *remainder): + details = True if pecan.request.method == 'GET' else False + m = pecan.request.indexer.list_metrics(details=details, + name=name, + resource_id=self.resource_id) + if m: + return MetricController(m[0]), remainder + + resource = pecan.request.indexer.get_resource(self.resource_type, + self.resource_id) + if resource: + abort(404, indexer.NoSuchMetric(name)) + else: + abort(404, indexer.NoSuchResource(self.resource_id)) + + @pecan.expose('json') + def post(self): + resource = pecan.request.indexer.get_resource( + self.resource_type, self.resource_id) + if not resource: + abort(404, indexer.NoSuchResource(self.resource_id)) + enforce("update resource", resource) + metrics = deserialize_and_validate(MetricsSchema) + try: + r = pecan.request.indexer.update_resource( + self.resource_type, + self.resource_id, + metrics=metrics, + append_metrics=True, + create_revision=False) + except (indexer.NoSuchMetric, + indexer.NoSuchArchivePolicy, + ValueError) as e: + abort(400, e) + except indexer.NamedMetricAlreadyExists as e: + abort(409, e) + except indexer.NoSuchResource as e: + abort(404, e) + + return r.metrics + + @pecan.expose('json') + def get_all(self): + resource = pecan.request.indexer.get_resource( + self.resource_type, self.resource_id) + if not resource: + abort(404, indexer.NoSuchResource(self.resource_id)) + enforce("get resource", resource) + return pecan.request.indexer.list_metrics(resource_id=self.resource_id) + + +class ResourceHistoryController(rest.RestController): + def __init__(self, resource_id, resource_type): + self.resource_id = resource_id + self.resource_type = resource_type + + @pecan.expose('json') + def get(self, **kwargs): + details = get_details(kwargs) + pagination_opts = get_pagination_options( + kwargs, RESOURCE_DEFAULT_PAGINATION) + + resource = pecan.request.indexer.get_resource( + self.resource_type, self.resource_id) + if not resource: + abort(404, indexer.NoSuchResource(self.resource_id)) + + enforce("get resource", resource) + + try: + # FIXME(sileht): next API version should returns + # {'resources': [...], 'links': [ ... pagination rel ...]} + return pecan.request.indexer.list_resources( + self.resource_type, + attribute_filter={"=": {"id": self.resource_id}}, + details=details, + history=True, + **pagination_opts + ) + except indexer.IndexerException as e: + abort(400, e) + + +def etag_precondition_check(obj): + etag, lastmodified = obj.etag, obj.lastmodified + # NOTE(sileht): Checks and order come from rfc7232 + # in webob, the '*' and the absent of the header is handled by + # if_match.__contains__() and if_none_match.__contains__() + # and are identique... + if etag not in pecan.request.if_match: + abort(412) + elif (not pecan.request.environ.get("HTTP_IF_MATCH") + and pecan.request.if_unmodified_since + and pecan.request.if_unmodified_since < lastmodified): + abort(412) + + if etag in pecan.request.if_none_match: + if pecan.request.method in ['GET', 'HEAD']: + abort(304) + else: + abort(412) + elif (not pecan.request.environ.get("HTTP_IF_NONE_MATCH") + and pecan.request.if_modified_since + and (pecan.request.if_modified_since >= + lastmodified) + and pecan.request.method in ['GET', 'HEAD']): + abort(304) + + +def etag_set_headers(obj): + pecan.response.etag = obj.etag + pecan.response.last_modified = obj.lastmodified + + +def AttributesPath(value): + if value.startswith("/attributes"): + return value + raise ValueError("Only attributes can be modified") + + +ResourceTypeJsonPatchSchema = voluptuous.Schema([{ + "op": voluptuous.Any("add", "remove"), + "path": AttributesPath, + voluptuous.Optional("value"): dict, +}]) + + +class ResourceTypeController(rest.RestController): + def __init__(self, name): + self._name = name + + @pecan.expose('json') + def get(self): + try: + rt = pecan.request.indexer.get_resource_type(self._name) + except indexer.NoSuchResourceType as e: + abort(404, e) + enforce("get resource type", rt) + return rt + + @pecan.expose('json') + def patch(self): + # NOTE(sileht): should we check for "application/json-patch+json" + # Content-Type ? + + try: + rt = pecan.request.indexer.get_resource_type(self._name) + except indexer.NoSuchResourceType as e: + abort(404, e) + enforce("update resource type", rt) + + # Ensure this is a valid jsonpatch dict + patch = deserialize_and_validate( + ResourceTypeJsonPatchSchema, + expected_content_types=["application/json-patch+json"]) + + # Add new attributes to the resource type + rt_json_current = rt.jsonify() + try: + rt_json_next = jsonpatch.apply_patch(rt_json_current, patch) + except jsonpatch.JsonPatchException as e: + abort(400, e) + del rt_json_next['state'] + + # Validate that the whole new resource_type is valid + schema = pecan.request.indexer.get_resource_type_schema() + try: + rt_json_next = voluptuous.Schema(schema.for_update, required=True)( + rt_json_next) + except voluptuous.Error as e: + abort(400, "Invalid input: %s" % e) + + # Get only newly formatted and deleted attributes + add_attrs = {k: v for k, v in rt_json_next["attributes"].items() + if k not in rt_json_current["attributes"]} + del_attrs = [k for k in rt_json_current["attributes"] + if k not in rt_json_next["attributes"]] + + if not add_attrs and not del_attrs: + # NOTE(sileht): just returns the resource, the asked changes + # just do nothing + return rt + + try: + add_attrs = schema.attributes_from_dict(add_attrs) + except resource_type.InvalidResourceAttribute as e: + abort(400, "Invalid input: %s" % e) + + try: + return pecan.request.indexer.update_resource_type( + self._name, add_attributes=add_attrs, + del_attributes=del_attrs) + except indexer.NoSuchResourceType as e: + abort(400, e) + + @pecan.expose() + def delete(self): + try: + pecan.request.indexer.get_resource_type(self._name) + except indexer.NoSuchResourceType as e: + abort(404, e) + enforce("delete resource type", resource_type) + try: + pecan.request.indexer.delete_resource_type(self._name) + except (indexer.NoSuchResourceType, + indexer.ResourceTypeInUse) as e: + abort(400, e) + + +class ResourceTypesController(rest.RestController): + + @pecan.expose() + def _lookup(self, name, *remainder): + return ResourceTypeController(name), remainder + + @pecan.expose('json') + def post(self): + schema = pecan.request.indexer.get_resource_type_schema() + body = deserialize_and_validate(schema) + body["state"] = "creating" + + try: + rt = schema.resource_type_from_dict(**body) + except resource_type.InvalidResourceAttribute as e: + abort(400, "Invalid input: %s" % e) + + enforce("create resource type", body) + try: + rt = pecan.request.indexer.create_resource_type(rt) + except indexer.ResourceTypeAlreadyExists as e: + abort(409, e) + set_resp_location_hdr("/resource_type/" + rt.name) + pecan.response.status = 201 + return rt + + @pecan.expose('json') + def get_all(self, **kwargs): + enforce("list resource type", {}) + try: + return pecan.request.indexer.list_resource_types() + except indexer.IndexerException as e: + abort(400, e) + + +def ResourceSchema(schema): + base_schema = { + voluptuous.Optional('started_at'): utils.to_datetime, + voluptuous.Optional('ended_at'): utils.to_datetime, + voluptuous.Optional('user_id'): voluptuous.Any(None, six.text_type), + voluptuous.Optional('project_id'): voluptuous.Any(None, six.text_type), + voluptuous.Optional('metrics'): MetricsSchema, + } + base_schema.update(schema) + return base_schema + + +class ResourceController(rest.RestController): + + def __init__(self, resource_type, id): + self._resource_type = resource_type + creator = pecan.request.auth_helper.get_current_user( + pecan.request) + try: + self.id = utils.ResourceUUID(id, creator) + except ValueError: + abort(404, indexer.NoSuchResource(id)) + self.metric = NamedMetricController(str(self.id), self._resource_type) + self.history = ResourceHistoryController(str(self.id), + self._resource_type) + + @pecan.expose('json') + def get(self): + resource = pecan.request.indexer.get_resource( + self._resource_type, self.id, with_metrics=True) + if resource: + enforce("get resource", resource) + etag_precondition_check(resource) + etag_set_headers(resource) + return resource + abort(404, indexer.NoSuchResource(self.id)) + + @pecan.expose('json') + def patch(self): + resource = pecan.request.indexer.get_resource( + self._resource_type, self.id, with_metrics=True) + if not resource: + abort(404, indexer.NoSuchResource(self.id)) + enforce("update resource", resource) + etag_precondition_check(resource) + + body = deserialize_and_validate( + schema_for(self._resource_type), + required=False) + + if len(body) == 0: + etag_set_headers(resource) + return resource + + for k, v in six.iteritems(body): + if k != 'metrics' and getattr(resource, k) != v: + create_revision = True + break + else: + if 'metrics' not in body: + # No need to go further, we assume the db resource + # doesn't change between the get and update + return resource + create_revision = False + + try: + resource = pecan.request.indexer.update_resource( + self._resource_type, + self.id, + create_revision=create_revision, + **body) + except (indexer.NoSuchMetric, + indexer.NoSuchArchivePolicy, + ValueError) as e: + abort(400, e) + except indexer.NoSuchResource as e: + abort(404, e) + etag_set_headers(resource) + return resource + + @pecan.expose() + def delete(self): + resource = pecan.request.indexer.get_resource( + self._resource_type, self.id) + if not resource: + abort(404, indexer.NoSuchResource(self.id)) + enforce("delete resource", resource) + etag_precondition_check(resource) + try: + pecan.request.indexer.delete_resource(self.id) + except indexer.NoSuchResource as e: + abort(404, e) + + +def schema_for(resource_type): + resource_type = pecan.request.indexer.get_resource_type(resource_type) + return ResourceSchema(resource_type.schema) + + +def ResourceUUID(value, creator): + try: + return utils.ResourceUUID(value, creator) + except ValueError as e: + raise voluptuous.Invalid(e) + + +def ResourceID(value, creator): + return (six.text_type(value), ResourceUUID(value, creator)) + + +class ResourcesController(rest.RestController): + def __init__(self, resource_type): + self._resource_type = resource_type + + @pecan.expose() + def _lookup(self, id, *remainder): + return ResourceController(self._resource_type, id), remainder + + @pecan.expose('json') + def post(self): + # NOTE(sileht): we need to copy the dict because when change it + # and we don't want that next patch call have the "id" + schema = dict(schema_for(self._resource_type)) + creator = pecan.request.auth_helper.get_current_user( + pecan.request) + schema["id"] = functools.partial(ResourceID, creator=creator) + + body = deserialize_and_validate(schema) + body["original_resource_id"], body["id"] = body["id"] + + target = { + "resource_type": self._resource_type, + } + target.update(body) + enforce("create resource", target) + rid = body['id'] + del body['id'] + try: + resource = pecan.request.indexer.create_resource( + self._resource_type, rid, creator, + **body) + except (ValueError, + indexer.NoSuchMetric, + indexer.NoSuchArchivePolicy) as e: + abort(400, e) + except indexer.ResourceAlreadyExists as e: + abort(409, e) + set_resp_location_hdr("/resource/" + + self._resource_type + "/" + + six.text_type(resource.id)) + etag_set_headers(resource) + pecan.response.status = 201 + return resource + + @pecan.expose('json') + def get_all(self, **kwargs): + details = get_details(kwargs) + history = get_history(kwargs) + pagination_opts = get_pagination_options( + kwargs, RESOURCE_DEFAULT_PAGINATION) + policy_filter = pecan.request.auth_helper.get_resource_policy_filter( + pecan.request, "list resource", self._resource_type) + + try: + # FIXME(sileht): next API version should returns + # {'resources': [...], 'links': [ ... pagination rel ...]} + return pecan.request.indexer.list_resources( + self._resource_type, + attribute_filter=policy_filter, + details=details, + history=history, + **pagination_opts + ) + except indexer.IndexerException as e: + abort(400, e) + + @pecan.expose('json') + def delete(self, **kwargs): + # NOTE(sileht): Don't allow empty filter, this is going to delete + # the entire database. + if pecan.request.body: + attr_filter = deserialize_and_validate(ResourceSearchSchema) + elif kwargs.get("filter"): + attr_filter = QueryStringSearchAttrFilter.parse(kwargs["filter"]) + else: + attr_filter = None + + # the voluptuous checks everything, but it is better to + # have this here. + if not attr_filter: + abort(400, "caution: the query can not be empty, or it will \ + delete entire database") + + policy_filter = pecan.request.auth_helper.get_resource_policy_filter( + pecan.request, + "delete resources", self._resource_type) + + if policy_filter: + attr_filter = {"and": [policy_filter, attr_filter]} + + try: + delete_num = pecan.request.indexer.delete_resources( + self._resource_type, attribute_filter=attr_filter) + except indexer.IndexerException as e: + abort(400, e) + + return {"deleted": delete_num} + + +class ResourcesByTypeController(rest.RestController): + @pecan.expose('json') + def get_all(self): + return dict( + (rt.name, + pecan.request.application_url + '/resource/' + rt.name) + for rt in pecan.request.indexer.list_resource_types()) + + @pecan.expose() + def _lookup(self, resource_type, *remainder): + try: + pecan.request.indexer.get_resource_type(resource_type) + except indexer.NoSuchResourceType as e: + abort(404, e) + return ResourcesController(resource_type), remainder + + +class QueryStringSearchAttrFilter(object): + uninary_operators = ("not", ) + binary_operator = (u">=", u"<=", u"!=", u">", u"<", u"=", u"==", u"eq", + u"ne", u"lt", u"gt", u"ge", u"le", u"in", u"like", u"≠", + u"≥", u"≤") + multiple_operators = (u"and", u"or", u"∧", u"∨") + + operator = pyparsing.Regex(u"|".join(binary_operator)) + null = pyparsing.Regex("None|none|null").setParseAction( + pyparsing.replaceWith(None)) + boolean = "False|True|false|true" + boolean = pyparsing.Regex(boolean).setParseAction( + lambda t: t[0].lower() == "true") + hex_string = lambda n: pyparsing.Word(pyparsing.hexnums, exact=n) + uuid_string = pyparsing.Combine( + hex_string(8) + (pyparsing.Optional("-") + hex_string(4)) * 3 + + pyparsing.Optional("-") + hex_string(12)) + number = r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?" + number = pyparsing.Regex(number).setParseAction(lambda t: float(t[0])) + identifier = pyparsing.Word(pyparsing.alphas, pyparsing.alphanums + "_") + quoted_string = pyparsing.QuotedString('"') | pyparsing.QuotedString("'") + comparison_term = pyparsing.Forward() + in_list = pyparsing.Group( + pyparsing.Suppress('[') + + pyparsing.Optional(pyparsing.delimitedList(comparison_term)) + + pyparsing.Suppress(']'))("list") + comparison_term << (null | boolean | uuid_string | identifier | number | + quoted_string | in_list) + condition = pyparsing.Group(comparison_term + operator + comparison_term) + + expr = pyparsing.infixNotation(condition, [ + ("not", 1, pyparsing.opAssoc.RIGHT, ), + ("and", 2, pyparsing.opAssoc.LEFT, ), + ("∧", 2, pyparsing.opAssoc.LEFT, ), + ("or", 2, pyparsing.opAssoc.LEFT, ), + ("∨", 2, pyparsing.opAssoc.LEFT, ), + ]) + + @classmethod + def _parsed_query2dict(cls, parsed_query): + result = None + while parsed_query: + part = parsed_query.pop() + if part in cls.binary_operator: + result = {part: {parsed_query.pop(): result}} + + elif part in cls.multiple_operators: + if result.get(part): + result[part].append( + cls._parsed_query2dict(parsed_query.pop())) + else: + result = {part: [result]} + + elif part in cls.uninary_operators: + result = {part: result} + elif isinstance(part, pyparsing.ParseResults): + kind = part.getName() + if kind == "list": + res = part.asList() + else: + res = cls._parsed_query2dict(part) + if result is None: + result = res + elif isinstance(result, dict): + list(result.values())[0].append(res) + else: + result = part + return result + + @classmethod + def _parse(cls, query): + try: + parsed_query = cls.expr.parseString(query, parseAll=True)[0] + except pyparsing.ParseException as e: + raise abort(400, "Invalid filter: %s" % six.text_type(e)) + return cls._parsed_query2dict(parsed_query) + + @classmethod + def parse(cls, query): + attr_filter = cls._parse(query) + return validate(ResourceSearchSchema, attr_filter, required=True) + + +def ResourceSearchSchema(v): + return _ResourceSearchSchema()(v) + + +# NOTE(sileht): indexer will cast this type to the real attribute +# type, here we just want to be sure this is not a dict or a list +ResourceSearchSchemaAttributeValue = voluptuous.Any( + six.text_type, float, int, bool, None) + + +def _ResourceSearchSchema(): + user = pecan.request.auth_helper.get_current_user( + pecan.request) + _ResourceUUID = functools.partial(ResourceUUID, creator=user) + + return voluptuous.Schema( + voluptuous.All( + voluptuous.Length(min=0, max=1), + { + voluptuous.Any( + u"=", u"==", u"eq", + u"<", u"lt", + u">", u"gt", + u"<=", u"≤", u"le", + u">=", u"≥", u"ge", + u"!=", u"≠", u"ne", + u"like" + ): voluptuous.All( + voluptuous.Length(min=1, max=1), + {"id": _ResourceUUID, + six.text_type: ResourceSearchSchemaAttributeValue}, + ), + voluptuous.Any( + u"in", + ): voluptuous.All( + voluptuous.Length(min=1, max=1), + {"id": voluptuous.All( + [_ResourceUUID], + voluptuous.Length(min=1)), + six.text_type: voluptuous.All( + [ResourceSearchSchemaAttributeValue], + voluptuous.Length(min=1))} + ), + voluptuous.Any( + u"and", u"∨", + u"or", u"∧", + ): voluptuous.All( + [ResourceSearchSchema], voluptuous.Length(min=1) + ), + u"not": ResourceSearchSchema, + } + ) + ) + + +class SearchResourceTypeController(rest.RestController): + def __init__(self, resource_type): + self._resource_type = resource_type + + def _search(self, **kwargs): + if pecan.request.body: + attr_filter = deserialize_and_validate(ResourceSearchSchema) + elif kwargs.get("filter"): + attr_filter = QueryStringSearchAttrFilter.parse(kwargs["filter"]) + else: + attr_filter = None + + details = get_details(kwargs) + history = get_history(kwargs) + pagination_opts = get_pagination_options( + kwargs, RESOURCE_DEFAULT_PAGINATION) + + policy_filter = pecan.request.auth_helper.get_resource_policy_filter( + pecan.request, "search resource", self._resource_type) + if policy_filter: + if attr_filter: + attr_filter = {"and": [ + policy_filter, + attr_filter + ]} + else: + attr_filter = policy_filter + + return pecan.request.indexer.list_resources( + self._resource_type, + attribute_filter=attr_filter, + details=details, + history=history, + **pagination_opts) + + @pecan.expose('json') + def post(self, **kwargs): + try: + return self._search(**kwargs) + except indexer.IndexerException as e: + abort(400, e) + + +class SearchResourceController(rest.RestController): + @pecan.expose() + def _lookup(self, resource_type, *remainder): + try: + pecan.request.indexer.get_resource_type(resource_type) + except indexer.NoSuchResourceType as e: + abort(404, e) + return SearchResourceTypeController(resource_type), remainder + + +def _MetricSearchSchema(v): + """Helper method to indirect the recursivity of the search schema""" + return SearchMetricController.MetricSearchSchema(v) + + +def _MetricSearchOperationSchema(v): + """Helper method to indirect the recursivity of the search schema""" + return SearchMetricController.MetricSearchOperationSchema(v) + + +class SearchMetricController(rest.RestController): + + MetricSearchOperationSchema = voluptuous.Schema( + voluptuous.All( + voluptuous.Length(min=1, max=1), + { + voluptuous.Any( + u"=", u"==", u"eq", + u"<", u"lt", + u">", u"gt", + u"<=", u"≤", u"le", + u">=", u"≥", u"ge", + u"!=", u"≠", u"ne", + u"%", u"mod", + u"+", u"add", + u"-", u"sub", + u"*", u"×", u"mul", + u"/", u"÷", u"div", + u"**", u"^", u"pow", + ): voluptuous.Any( + float, int, + voluptuous.All( + [float, int, + voluptuous.Any(_MetricSearchOperationSchema)], + voluptuous.Length(min=2, max=2), + ), + ), + }, + ) + ) + + MetricSearchSchema = voluptuous.Schema( + voluptuous.Any( + MetricSearchOperationSchema, + voluptuous.All( + voluptuous.Length(min=1, max=1), + { + voluptuous.Any( + u"and", u"∨", + u"or", u"∧", + u"not", + ): [_MetricSearchSchema], + } + ) + ) + ) + + @pecan.expose('json') + def post(self, metric_id, start=None, stop=None, aggregation='mean', + granularity=None): + granularity = [utils.to_timespan(g) + for g in arg_to_list(granularity or [])] + metrics = pecan.request.indexer.list_metrics( + ids=arg_to_list(metric_id)) + + for metric in metrics: + enforce("search metric", metric) + + if not pecan.request.body: + abort(400, "No query specified in body") + + query = deserialize_and_validate(self.MetricSearchSchema) + + if start is not None: + try: + start = utils.to_timestamp(start) + except Exception: + abort(400, "Invalid value for start") + + if stop is not None: + try: + stop = utils.to_timestamp(stop) + except Exception: + abort(400, "Invalid value for stop") + + try: + return { + str(metric.id): values + for metric, values in six.iteritems( + pecan.request.storage.search_value( + metrics, query, start, stop, aggregation, + granularity + ) + ) + } + except storage.InvalidQuery as e: + abort(400, e) + except storage.GranularityDoesNotExist as e: + abort(400, e) + + +class ResourcesMetricsMeasuresBatchController(rest.RestController): + @pecan.expose('json') + def post(self, create_metrics=False): + creator = pecan.request.auth_helper.get_current_user( + pecan.request) + MeasuresBatchSchema = voluptuous.Schema( + {functools.partial(ResourceID, creator=creator): + {six.text_type: MeasuresListSchema}} + ) + + body = deserialize_and_validate(MeasuresBatchSchema) + + known_metrics = [] + unknown_metrics = [] + unknown_resources = [] + body_by_rid = {} + for original_resource_id, resource_id in body: + body_by_rid[resource_id] = body[(original_resource_id, + resource_id)] + names = body[(original_resource_id, resource_id)].keys() + metrics = pecan.request.indexer.list_metrics( + names=names, resource_id=resource_id) + + known_names = [m.name for m in metrics] + if strtobool("create_metrics", create_metrics): + already_exists_names = [] + for name in names: + if name not in known_names: + metric = MetricsController.MetricSchema({ + "name": name + }) + try: + m = pecan.request.indexer.create_metric( + uuid.uuid4(), + creator=creator, + resource_id=resource_id, + name=metric.get('name'), + unit=metric.get('unit'), + archive_policy_name=metric[ + 'archive_policy_name']) + except indexer.NamedMetricAlreadyExists as e: + already_exists_names.append(e.metric) + except indexer.NoSuchResource: + unknown_resources.append({ + 'resource_id': six.text_type(resource_id), + 'original_resource_id': original_resource_id}) + break + except indexer.IndexerException as e: + # This catch NoSuchArchivePolicy, which is unlikely + # be still possible + abort(400, e) + else: + known_metrics.append(m) + + if already_exists_names: + # Add metrics created in the meantime + known_names.extend(already_exists_names) + known_metrics.extend( + pecan.request.indexer.list_metrics( + names=already_exists_names, + resource_id=resource_id) + ) + + elif len(names) != len(metrics): + unknown_metrics.extend( + ["%s/%s" % (six.text_type(resource_id), m) + for m in names if m not in known_names]) + + known_metrics.extend(metrics) + + if unknown_resources: + abort(400, {"cause": "Unknown resources", + "detail": unknown_resources}) + + if unknown_metrics: + abort(400, "Unknown metrics: %s" % ", ".join( + sorted(unknown_metrics))) + + for metric in known_metrics: + enforce("post measures", metric) + + pecan.request.incoming.add_measures_batch( + dict((metric, + body_by_rid[metric.resource_id][metric.name]) + for metric in known_metrics)) + + pecan.response.status = 202 + + +class MetricsMeasuresBatchController(rest.RestController): + # NOTE(sileht): we don't allow to mix both formats + # to not have to deal with id collision that can + # occurs between a metric_id and a resource_id. + # Because while json allow duplicate keys in dict payload + # only the last key will be retain by json python module to + # build the python dict. + MeasuresBatchSchema = voluptuous.Schema( + {utils.UUID: MeasuresListSchema} + ) + + @pecan.expose() + def post(self): + body = deserialize_and_validate(self.MeasuresBatchSchema) + metrics = pecan.request.indexer.list_metrics(ids=body.keys()) + + if len(metrics) != len(body): + missing_metrics = sorted(set(body) - set(m.id for m in metrics)) + abort(400, "Unknown metrics: %s" % ", ".join( + six.moves.map(str, missing_metrics))) + + for metric in metrics: + enforce("post measures", metric) + + pecan.request.incoming.add_measures_batch( + dict((metric, body[metric.id]) for metric in + metrics)) + + pecan.response.status = 202 + + @staticmethod + @pecan.expose('json') + def get_all(**kwargs): + # Check RBAC policy + metric_ids = arg_to_list(kwargs.get('metric', [])) + metrics = pecan.request.indexer.list_metrics(ids=metric_ids) + missing_metric_ids = (set(metric_ids) + - set(six.text_type(m.id) for m in metrics)) + if missing_metric_ids: + abort(400, {"cause": "Unknown metrics", + "detail": list(missing_metric_ids)}) + + for metric in metrics: + enforce("get metric", metric) + + start = kwargs.get('start') + if start is not None: + try: + start = utils.to_timestamp(start) + except Exception: + abort(400, "Invalid value for start") + + stop = kwargs.get('stop') + if stop is not None: + try: + stop = utils.to_timestamp(stop) + except Exception: + abort(400, "Invalid value for stop") + + aggregation = kwargs.get('aggregation', 'mean') + if (aggregation + not in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS): + abort( + 400, + 'Invalid aggregation value %s, must be one of %s' + % (aggregation, + archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)) + + granularity = kwargs.get('granularity') + if granularity is not None: + try: + granularity = utils.to_timespan(granularity) + except ValueError as e: + abort(400, e) + + try: + return dict((str(metric.id), + pecan.request.storage.get_measures( + metric, start, stop, aggregation, granularity)) + for metric in metrics) + except (storage.GranularityDoesNotExist, + storage.AggregationDoesNotExist) as e: + abort(404, e) + + +class SearchController(object): + resource = SearchResourceController() + metric = SearchMetricController() + + +class AggregationResourceController(rest.RestController): + def __init__(self, resource_type, metric_name): + self.resource_type = resource_type + self.metric_name = metric_name + + @pecan.expose('json') + def post(self, start=None, stop=None, aggregation='mean', + reaggregation=None, granularity=None, needed_overlap=100.0, + groupby=None, fill=None, refresh=False, resample=None, + transform=None): + # First, set groupby in the right format: a sorted list of unique + # strings. + groupby = sorted(set(arg_to_list(groupby))) + + # NOTE(jd) Sort by groupby so we are sure we do not return multiple + # groups when using itertools.groupby later. + try: + resources = SearchResourceTypeController( + self.resource_type)._search(sort=groupby) + except indexer.InvalidPagination: + abort(400, "Invalid groupby attribute") + except indexer.IndexerException as e: + abort(400, e) + + if resources is None: + return [] + + if not groupby: + metrics = list(filter(None, + (r.get_metric(self.metric_name) + for r in resources))) + return AggregationController.get_cross_metric_measures_from_objs( + metrics, start, stop, aggregation, reaggregation, + granularity, needed_overlap, fill, refresh, resample, + transform) + + def groupper(r): + return tuple((attr, r[attr]) for attr in groupby) + + results = [] + for key, resources in itertools.groupby(resources, groupper): + metrics = list(filter(None, + (r.get_metric(self.metric_name) + for r in resources))) + results.append({ + "group": dict(key), + "measures": AggregationController.get_cross_metric_measures_from_objs( # noqa + metrics, start, stop, aggregation, reaggregation, + granularity, needed_overlap, fill, refresh, resample, + transform) + }) + + return results + + +class AggregationController(rest.RestController): + _custom_actions = { + 'metric': ['POST', 'GET'], + } + + @pecan.expose() + def _lookup(self, object_type, resource_type, key, metric_name, + *remainder): + if object_type != "resource" or key != "metric": + # NOTE(sileht): we want the raw 404 message here + # so use directly pecan + pecan.abort(404) + try: + pecan.request.indexer.get_resource_type(resource_type) + except indexer.NoSuchResourceType as e: + abort(404, e) + return AggregationResourceController(resource_type, + metric_name), remainder + + @staticmethod + def get_cross_metric_measures_from_objs(metrics, start=None, stop=None, + aggregation='mean', + reaggregation=None, + granularity=None, + needed_overlap=100.0, fill=None, + refresh=False, resample=None, + transform=None): + try: + needed_overlap = float(needed_overlap) + except ValueError: + abort(400, 'needed_overlap must be a number') + if needed_overlap != 100.0 and start is None and stop is None: + abort(400, 'start and/or stop must be provided if specifying ' + 'needed_overlap') + + if start is not None: + try: + start = utils.to_timestamp(start) + except Exception: + abort(400, "Invalid value for start") + + if stop is not None: + try: + stop = utils.to_timestamp(stop) + except Exception: + abort(400, "Invalid value for stop") + + if (aggregation + not in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS): + abort( + 400, + 'Invalid aggregation value %s, must be one of %s' + % (aggregation, + archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)) + + for metric in metrics: + enforce("get metric", metric) + + number_of_metrics = len(metrics) + if number_of_metrics == 0: + return [] + if granularity is not None: + try: + granularity = utils.to_timespan(granularity) + except ValueError as e: + abort(400, e) + + if transform is not None: + transform = TransformSchema(transform) + + if resample: + # TODO(sileht): This have to be deprecated at some point + if transform: + abort(400, 'transform and resample are exclusive') + + if not granularity: + abort(400, 'A granularity must be specified to resample') + try: + resample = utils.to_timespan(resample) + except ValueError as e: + abort(400, e) + transform = [carbonara.Transformation("resample", (resample,))] + + if fill is not None: + if granularity is None: + abort(400, "Unable to fill without a granularity") + try: + fill = float(fill) + except ValueError as e: + if fill != 'null': + abort(400, "fill must be a float or \'null\': %s" % e) + + try: + if strtobool("refresh", refresh): + metrics_to_update = [ + m for m in metrics + if pecan.request.incoming.has_unprocessed(m)] + for m in metrics_to_update: + try: + pecan.request.storage.refresh_metric( + pecan.request.indexer, pecan.request.incoming, m, + pecan.request.conf.api.refresh_timeout) + except storage.SackLockTimeoutError as e: + abort(503, e) + if number_of_metrics == 1: + # NOTE(sileht): don't do the aggregation if we only have one + # metric + return pecan.request.storage.get_measures( + metrics[0], start, stop, aggregation, + granularity, transform) + return cross_metric.get_cross_metric_measures( + pecan.request.storage, + metrics, start, stop, aggregation, + reaggregation, granularity, needed_overlap, fill, + transform) + except cross_metric.MetricUnaggregatable as e: + abort(400, ("One of the metrics being aggregated doesn't have " + "matching granularity: %s") % str(e)) + except (storage.MetricDoesNotExist, + storage.GranularityDoesNotExist, + storage.AggregationDoesNotExist) as e: + abort(404, e) + + MetricIDsSchema = [utils.UUID] + + @pecan.expose('json') + def get_metric(self, metric=None, start=None, stop=None, + aggregation='mean', reaggregation=None, granularity=None, + needed_overlap=100.0, fill=None, + refresh=False, resample=None, transform=None): + if pecan.request.method == 'GET': + try: + metric_ids = voluptuous.Schema( + self.MetricIDsSchema, required=True)(arg_to_list(metric)) + except voluptuous.Error as e: + abort(400, "Invalid input: %s" % e) + else: + self._workaround_pecan_issue_88() + metric_ids = deserialize_and_validate(self.MetricIDsSchema) + + metric_ids = [six.text_type(m) for m in metric_ids] + # Check RBAC policy + metrics = pecan.request.indexer.list_metrics(ids=metric_ids) + missing_metric_ids = (set(metric_ids) + - set(six.text_type(m.id) for m in metrics)) + if missing_metric_ids: + # Return one of the missing one in the error + abort(404, storage.MetricDoesNotExist( + missing_metric_ids.pop())) + return self.get_cross_metric_measures_from_objs( + metrics, start, stop, aggregation, reaggregation, + granularity, needed_overlap, fill, refresh, resample, transform) + + post_metric = get_metric + + def _workaround_pecan_issue_88(self): + # FIXME(sileht): https://github.com/pecan/pecan/pull/88 + if pecan.request.path_info.startswith("/aggregation/resource"): + pecan.abort(405) + + +class CapabilityController(rest.RestController): + @staticmethod + @pecan.expose('json') + def get(): + aggregation_methods = set( + archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS) + return dict(aggregation_methods=aggregation_methods, + dynamic_aggregation_methods=[ + ext.name for ext in extension.ExtensionManager( + namespace='gnocchi.aggregates') + ]) + + +class StatusController(rest.RestController): + @staticmethod + @pecan.expose('json') + def get(details=True): + enforce("get status", {}) + try: + report = pecan.request.incoming.measures_report( + strtobool("details", details)) + except incoming.ReportGenerationError: + abort(503, 'Unable to generate status. Please retry.') + report_dict = {"storage": {"summary": report['summary']}} + if 'details' in report: + report_dict["storage"]["measures_to_process"] = report['details'] + return report_dict + + +class MetricsBatchController(object): + measures = MetricsMeasuresBatchController() + + +class ResourcesMetricsBatchController(object): + measures = ResourcesMetricsMeasuresBatchController() + + +class ResourcesBatchController(object): + metrics = ResourcesMetricsBatchController() + + +class BatchController(object): + metrics = MetricsBatchController() + resources = ResourcesBatchController() + + +class V1Controller(object): + + def __init__(self): + self.sub_controllers = { + "search": SearchController(), + "archive_policy": ArchivePoliciesController(), + "archive_policy_rule": ArchivePolicyRulesController(), + "metric": MetricsController(), + "batch": BatchController(), + "resource": ResourcesByTypeController(), + "resource_type": ResourceTypesController(), + "aggregation": AggregationController(), + "capabilities": CapabilityController(), + "status": StatusController(), + } + for name, ctrl in self.sub_controllers.items(): + setattr(self, name, ctrl) + + @pecan.expose('json') + def index(self): + return { + "version": "1.0", + "links": [ + {"rel": "self", + "href": pecan.request.application_url} + ] + [ + {"rel": name, + "href": pecan.request.application_url + "/" + name} + for name in sorted(self.sub_controllers) + ] + } + + +class VersionsController(object): + @staticmethod + @pecan.expose('json') + def index(): + return { + "versions": [ + { + "status": "CURRENT", + "links": [ + { + "rel": "self", + "href": pecan.request.application_url + "/v1/" + } + ], + "id": "v1.0", + "updated": "2015-03-19" + } + ] + } diff --git a/gnocchi/rest/auth_helper.py b/gnocchi/rest/auth_helper.py index 3cf3951c..f04c955e 100644 --- a/gnocchi/rest/auth_helper.py +++ b/gnocchi/rest/auth_helper.py @@ -17,7 +17,7 @@ import webob import werkzeug.http -from gnocchi import rest +from gnocchi.rest import api class KeystoneAuthHelper(object): @@ -44,7 +44,7 @@ class KeystoneAuthHelper(object): def get_resource_policy_filter(request, rule, resource_type): try: # Check if the policy allows the user to list any resource - rest.enforce(rule, { + api.enforce(rule, { "resource_type": resource_type, }) except webob.exc.HTTPForbidden: @@ -54,7 +54,7 @@ class KeystoneAuthHelper(object): try: # Check if the policy allows the user to list resources linked # to their project - rest.enforce(rule, { + api.enforce(rule, { "resource_type": resource_type, "project_id": project_id, }) @@ -66,7 +66,7 @@ class KeystoneAuthHelper(object): try: # Check if the policy allows the user to list resources linked # to their created_by_project - rest.enforce(rule, { + api.enforce(rule, { "resource_type": resource_type, "created_by_project_id": project_id, }) @@ -81,7 +81,7 @@ class KeystoneAuthHelper(object): if not policy_filter: # We need to have at least one policy filter in place - rest.abort(403, "Insufficient privileges") + api.abort(403, "Insufficient privileges") return {"or": policy_filter} @@ -92,7 +92,7 @@ class BasicAuthHelper(object): auth = werkzeug.http.parse_authorization_header( request.headers.get("Authorization")) if auth is None: - rest.abort(401) + api.abort(401) return auth.username def get_auth_info(self, request): @@ -115,7 +115,7 @@ class RemoteUserAuthHelper(object): def get_current_user(request): user = request.remote_user if user is None: - rest.abort(401) + api.abort(401) return user.decode('iso-8859-1') def get_auth_info(self, request): diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index b0599ef3..a2ddcc55 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -33,7 +33,7 @@ from testtools import testcase import webtest from gnocchi import archive_policy -from gnocchi import rest +from gnocchi.rest import api from gnocchi.rest import app from gnocchi.tests import base as tests_base from gnocchi.tests import utils as tests_utils @@ -1860,7 +1860,7 @@ class GenericResourceTest(RestTest): class QueryStringSearchAttrFilterTest(tests_base.TestCase): def _do_test(self, expr, expected): - req = rest.QueryStringSearchAttrFilter._parse(expr) + req = api.QueryStringSearchAttrFilter._parse(expr) self.assertEqual(expected, req) def test_search_query_builder(self): -- GitLab From b1ecfa80884b5a62830dc42a8f64f0380766800a Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 6 Sep 2017 15:08:14 +0000 Subject: [PATCH 0958/1483] move api_opts so we don't load everything we shouldn't load entire api just to get opts --- gnocchi/cli.py | 4 ++-- gnocchi/opts.py | 13 +++++++++++-- gnocchi/rest/app.py | 11 ----------- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 0dc776f5..00c68884 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -33,7 +33,7 @@ from gnocchi import archive_policy from gnocchi import genconfig from gnocchi import incoming from gnocchi import indexer -from gnocchi.rest import app +from gnocchi import opts from gnocchi import service from gnocchi import statsd as statsd_service from gnocchi import storage @@ -333,7 +333,7 @@ def api(): sys.argv.pop(double_dash) conf = cfg.ConfigOpts() - for opt in app.API_OPTS: + for opt in opts.API_OPTS: # NOTE(jd) Register the API options without a default, so they are only # used to override the one in the config file c = copy.copy(opt) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index e0ec719b..2347123e 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -22,7 +22,6 @@ from oslo_middleware import cors import gnocchi.archive_policy import gnocchi.indexer -import gnocchi.rest.app import gnocchi.storage import gnocchi.storage.ceph import gnocchi.storage.file @@ -59,6 +58,16 @@ for opt in _INCOMING_OPTS: opt.default = '${storage.%s}' % opt.name +API_OPTS = ( + cfg.HostAddressOpt('host', + default="0.0.0.0", + help="Host to listen on"), + cfg.PortOpt('port', + default=8041, + help="Port to listen on"), +) + + _cli_options = ( cfg.BoolOpt( 'debug', @@ -156,7 +165,7 @@ def list_opts(): default=10, min=0, help='Number of seconds before timeout when attempting ' 'to force refresh of metric.'), - ) + gnocchi.rest.app.API_OPTS, + ) + API_OPTS, ), ("storage", _STORAGE_OPTS + gnocchi.storage._CARBONARA_OPTS), ("incoming", _INCOMING_OPTS), diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index e2bfbdbe..5581f322 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -18,7 +18,6 @@ import pkg_resources import uuid import daiquiri -from oslo_config import cfg from oslo_middleware import cors from oslo_policy import policy from paste import deploy @@ -37,16 +36,6 @@ from gnocchi import storage as gnocchi_storage LOG = daiquiri.getLogger(__name__) -API_OPTS = ( - cfg.HostAddressOpt('host', - default="0.0.0.0", - help="Host to listen on"), - cfg.PortOpt('port', - default=8041, - help="Port to listen on"), -) - - # Register our encoder by default for everything jsonify.jsonify.register(object)(json.to_primitive) -- GitLab From dacc8d513e778cc44110d83920cd6d926c667f47 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 11 Sep 2017 15:01:15 +0200 Subject: [PATCH 0959/1483] Add an ISSUE_TEMPLATE for users opening GitHub issues This will make sure they are reminded to provide accurate and complete information. --- .github/ISSUE_TEMPLATE.md | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE.md diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 00000000..df9cfe59 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,10 @@ +Before reporting an issue on Gnocchi, please be sure to provide all necessary +information. + +### Which version of Gnocchi are you using + +### How to reproduce your problem + +### What is the result that you get + +### What is result that you expected -- GitLab From 65e65a64d064b4e378e8af053f34f77745cc8708 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 11 Sep 2017 10:27:21 +0200 Subject: [PATCH 0960/1483] rest: don't load useless resource We don't need to query the resource tables when we fetch measures. This change removes the query to the resource table in this case. --- gnocchi/rest/api.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 524ea503..1b878911 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -508,8 +508,11 @@ class MetricsController(rest.RestController): metric_id = uuid.UUID(id) except ValueError: abort(404, indexer.NoSuchMetric(id)) + + # NOTE(sileht): Don't get detail for measure + details = len(remainder) == 0 metrics = pecan.request.indexer.list_metrics( - id=metric_id, details=True) + id=metric_id, details=details) if not metrics: abort(404, indexer.NoSuchMetric(id)) return MetricController(metrics[0]), remainder @@ -652,7 +655,9 @@ class NamedMetricController(rest.RestController): @pecan.expose() def _lookup(self, name, *remainder): - details = True if pecan.request.method == 'GET' else False + # NOTE(sileht): We want detail only when we GET /metric/ + # and not for /metric//measures + details = pecan.request.method == 'GET' and len(remainder) == 0 m = pecan.request.indexer.list_metrics(details=details, name=name, resource_id=self.resource_id) -- GitLab From 086a2af4810933d2ce14198a6898783888fa2b03 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 11 Sep 2017 21:30:58 +0000 Subject: [PATCH 0961/1483] small spelling change --- doc/source/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index 17d29332..308683f6 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -67,7 +67,7 @@ Gnocchi vs Prometheus includes everything from polling the metrics to storing and archiving them. It offers advanced features such as alerting. -In comparison, Gnocchi does not offer polling has it prefers to leverage +In comparison, Gnocchi does not offer polling as it prefers to leverage existing solutions (e.g. `collectd `_). However, it provides high-availability and horizontal scalablity as well as multi-tenancy. -- GitLab From a2955f926c49915c28893549ab5d6c13a9db25b9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 6 Sep 2017 15:55:32 +0200 Subject: [PATCH 0962/1483] swift: replace unused swift_preauthurl by swift_url MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit swift_preauthurl has actually never been used. Replace it with swift_url, and pass it to swiftclient to overrides Swift URL – which is what the preauthurl parameter actually does. --- gnocchi/common/swift.py | 1 + gnocchi/storage/swift.py | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/gnocchi/common/swift.py b/gnocchi/common/swift.py index 5af262f0..ef57acdb 100644 --- a/gnocchi/common/swift.py +++ b/gnocchi/common/swift.py @@ -39,6 +39,7 @@ def get_connection(conf): os_options['region_name'] = conf.swift_region return swclient.Connection( + preauthurl=conf.swift_url, auth_version=conf.swift_auth_version, authurl=conf.swift_authurl, preauthtoken=conf.swift_preauthtoken, diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index c9ccb898..98f7cf51 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -31,8 +31,9 @@ OPTS = [ default=False, help='If True, swiftclient won\'t check for a valid SSL ' 'certificate when authenticating.'), - cfg.StrOpt('swift_preauthurl', - help='Swift pre-auth URL.'), + cfg.StrOpt('swift_url', + help='Swift URL. ' + 'If unset, it is obtained from the auth service.'), cfg.StrOpt('swift_authurl', default="http://localhost:8080/auth/v1.0", help='Swift auth URL.'), -- GitLab From 6007dca7c0a16672800aa0e9bf04e307208a2e24 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 12 Sep 2017 13:13:10 +0200 Subject: [PATCH 0963/1483] doc: enable logging By default, daiquiri output is hidden. The side effect is that some sphinx error are not printed. This change fixes that. --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 8d4f495a..5fd9f048 100644 --- a/tox.ini +++ b/tox.ini @@ -137,6 +137,7 @@ basepython = python2.7 # .[postgresql,doc] # setenv = GNOCCHI_STORAGE_DEPS=file deps = .[test,file,postgresql,doc] +setenv = GNOCCHI_TEST_DEBUG=1 commands = doc8 --ignore-path doc/source/rest.rst,doc/source/comparison-table.rst doc/source pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx -W -- GitLab From ae767fcf142c89d9ccb27f17ca168c4f83d7b1b2 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 11 Sep 2017 15:20:32 +0200 Subject: [PATCH 0964/1483] doc: add rate of change in aggregations list This change adds 'rate:XXX' to aggregation list. Related #323 --- doc/source/rest.j2 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index ae00d42e..e005353b 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -107,6 +107,9 @@ method. It is possible to request for any other method by specifying the The list of aggregation method available is: *mean*, *sum*, *last*, *max*, *min*, *std*, *median*, *first*, *count* and *Npct* (with 0 < N < 100). +They can be prefixed by "rate:" (like rate:last) to compute the rate of change +before doing the aggregation. + It's possible to provide the `granularity` argument to specify the granularity to retrieve, rather than all the granularities available: -- GitLab From 022807f7f3caecc4abf371955e23ddd544a9c629 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 11 Sep 2017 15:23:01 +0200 Subject: [PATCH 0965/1483] doc: Add subsection in many section Related #323 --- doc/source/rest.j2 | 43 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index e005353b..8733e505 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -57,6 +57,14 @@ using query parameters: {{ scenarios['list-metric-pagination']['doc'] }} +See also :ref:`Resource's named metrics `. + +Measures +======== + +Push and retrieve +----------------- + It is possible to send measures to the metric: {{ scenarios['post-measures']['doc'] }} @@ -70,7 +78,6 @@ status code. It is possible to provide any number of measures. needed to honor constraints defined by the archive policy used by the metric, such as the maximum timespan. - Once measures are sent, it is possible to retrieve them using *GET* on the same endpoint: @@ -115,6 +122,11 @@ to retrieve, rather than all the granularities available: {{ scenarios['get-measures-granularity']['doc'] }} +See also :ref:`Aggregation across metrics ` and :ref:`Resource's named metrics ` . + +Transformations +------------------------ + In addition to granularities defined by the archive policy, measures can be resampled to a new granularity. @@ -131,8 +143,9 @@ Supported transformations are `absolute`, `negative` and `resample(sampling-in-s Depending on the aggregation method and frequency of measures, resampled data may lack accuracy as it is working against previously aggregated data. -Measures batching -================= +Batching +-------- + It is also possible to batch measures sending, i.e. send several measures for different metrics in a simple call: @@ -283,6 +296,9 @@ It is possible to delete an archive policy rule: Resources ========= +Creation +-------- + Gnocchi provides the ability to store and index resources. Each resource has a type. The basic type of resources is *generic*, but more specialized subtypes also exist, especially to describe OpenStack resources. @@ -311,20 +327,32 @@ some requests: {{ scenarios['create-resource-with-new-metrics']['doc'] }} +Querying +-------- + To retrieve a resource by its URL provided by the `Location` header at creation time: {{ scenarios['get-resource-generic']['doc'] }} +Modification +------------ + It's possible to modify a resource by re-uploading it partially with the modified fields: {{ scenarios['patch-resource']['doc'] }} +History modification +-------------------- + And to retrieve its modification history: {{ scenarios['get-patched-instance-history']['doc'] }} +Deletion +-------- + It is possible to delete a resource altogether: {{ scenarios['delete-resource-generic']['doc'] }} @@ -348,6 +376,8 @@ or delete resources based on time: When a batch of resources are deleted, an attribute filter is required to avoid deletion of the entire database. +Listing +------- All resources can be listed, either by using the `generic` type that will list all types of resources, or by filtering on their resource type: @@ -374,6 +404,11 @@ revision_start time and started_at values: {{ scenarios['list-resource-generic-pagination']['doc'] }} +.. _resource-named-metrics: + +Named metrics +------------- + Each resource can be linked to any number of metrics. The `metrics` attributes is a key/value field where the key is the name of the relationship and the value is a metric: @@ -508,6 +543,8 @@ argument, and in this case the second argument passed is the value, or it. The operators or (`or` or `∨`), and (`and` or `∧`) and `not` are also supported, and take a list of arguments as parameters. +.. _aggregation-across-metrics: + Aggregation across metrics ========================== -- GitLab From 30b294a19895e3285dab7e393f31def4c91abfc6 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 13 Sep 2017 14:59:32 +0200 Subject: [PATCH 0966/1483] Fix old doc build The rest page of the stable branch is missing We remove retrying from our deps, but miss to put it in docs-gnocchi.xyz This change fixes that. --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 5fd9f048..23f89071 100644 --- a/tox.ini +++ b/tox.ini @@ -149,6 +149,7 @@ deps = {[testenv:docs]deps} # for 3.x doc oslotest oslosphinx + retrying commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-versioning build doc/source doc/build/html -- GitLab From 2be95704a18229e4d05e57dd3e96de489a16e61d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 8 Sep 2017 10:27:19 +0200 Subject: [PATCH 0967/1483] rest: fix ACL enforcement for non-admin on metric list The current metric listing on /v1/metric does not enforce correctly the attribute filtering on the creator field when the user is non-admin. That means the attr_filter is empty and a non-admin can actually list all metrics for all users. This fixes that by setting the filter correctly. --- gnocchi/rest/api.py | 1 + gnocchi/tests/functional/gabbits/metric-list.yaml | 8 +++++++- gnocchi/tests/test_rest.py | 12 ++++++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 1b878911..ca467a33 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -618,6 +618,7 @@ class MetricsController(rest.RestController): pecan.request) if provided_creator and creator != provided_creator: abort(403, "Insufficient privileges to filter by user/project") + provided_creator = creator attr_filter = {} if provided_creator is not None: attr_filter['creator'] = provided_creator diff --git a/gnocchi/tests/functional/gabbits/metric-list.yaml b/gnocchi/tests/functional/gabbits/metric-list.yaml index fe7a7ad9..7b0f8089 100644 --- a/gnocchi/tests/functional/gabbits/metric-list.yaml +++ b/gnocchi/tests/functional/gabbits/metric-list.yaml @@ -89,7 +89,7 @@ tests: - name: list metrics GET: /v1/metric response_json_paths: - $.`len`: 4 + $.`len`: 2 - name: list metrics by id GET: /v1/metric?id=$HISTORY['create metric 1'].$RESPONSE['id'] @@ -100,6 +100,9 @@ tests: - name: list metrics by name GET: /v1/metric?name=disk.io.rate + request_headers: + # User admin + authorization: "basic YWRtaW46" response_json_paths: $.`len`: 2 $[0].name: disk.io.rate @@ -116,6 +119,9 @@ tests: - name: list metrics by archive_policy GET: /v1/metric?archive_policy_name=first_archive&sort=name:desc + request_headers: + # User admin + authorization: "basic YWRtaW46" response_json_paths: $.`len`: 3 $[0].name: disk.io.rate diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index a2ddcc55..e662a7b4 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -315,6 +315,18 @@ class MetricTest(RestTest): with self.app.use_another_user(): self.app.get("/v1/metric/%s" % metric_id) + def test_list_metric_with_another_user(self): + metric_created = self.app.post_json( + "/v1/metric", + params={"archive_policy_name": "medium"}, + status=201) + + metric_id = metric_created.json["id"] + + with self.app.use_another_user(): + metric_list = self.app.get("/v1/metric") + self.assertNotIn(metric_id, [m["id"] for m in metric_list.json]) + def test_get_metric_with_another_user(self): result = self.app.post_json("/v1/metric", params={"archive_policy_name": "medium"}, -- GitLab From bdb9a9a94b487e2c25368359dcdb75e4e9da4511 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 13 Sep 2017 15:31:15 +0200 Subject: [PATCH 0968/1483] doc: fix formatting issues in default archive policies list --- doc/source/operating.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/operating.rst b/doc/source/operating.rst index 9b964fae..8cf0481f 100644 --- a/doc/source/operating.rst +++ b/doc/source/operating.rst @@ -125,6 +125,7 @@ values are sent, the maximum pessimistic storage size is taken into account. * maximum estimated size per metric: 1 057 KiB - bool + * 1 second granularity over 1 year * aggregation methods used: *last* * maximum optimistic size per metric: 1 539 KiB -- GitLab From ab0c30d4a596c64f64b50f1a3c77d1534d1ca4d9 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 12 Sep 2017 12:57:09 +0000 Subject: [PATCH 0969/1483] add note regarding basic auth encoding basic auth is a very ambiguous term. link to what it is actually referring to. Closes #316 --- doc/source/rest.j2 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 8733e505..c5dd322d 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -5,11 +5,13 @@ Authentication ============== -By default, the authentication is configured to the "basic" mode. You need to +By default, the authentication is configured to the `"basic" mode`_. You need to provide an `Authorization` header in your HTTP requests with a valid username (the password is not used). The "admin" username is granted all privileges, whereas any other username is recognize as having standard permissions. +.. _"basic" mode: https://tools.ietf.org/html/rfc7617 + You can customize permissions by specifying a different `policy_file` than the default one. -- GitLab From e49f31a2d33d2ba20e485fbe02a8c52f4302c539 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 13 Sep 2017 15:31:15 +0200 Subject: [PATCH 0970/1483] doc: fix formatting issues in default archive policies list (cherry picked from commit bdb9a9a94b487e2c25368359dcdb75e4e9da4511) --- doc/source/running.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/running.rst b/doc/source/running.rst index 48c437ca..cd2d75e6 100644 --- a/doc/source/running.rst +++ b/doc/source/running.rst @@ -114,6 +114,7 @@ values are sent, the maximum pessimistic storage size is taken into account. * maximum estimated size per metric: 1 057 KiB - bool + * 1 second granularity over 1 year * aggregation methods used: *last* * maximum optimistic size per metric: 1 539 KiB -- GitLab From eca855fbaccfe75c2e4f027967774be82b3a3af0 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 8 Sep 2017 10:27:19 +0200 Subject: [PATCH 0971/1483] rest: fix ACL enforcement for non-admin on metric list The current metric listing on /v1/metric does not enforce correctly the attribute filtering on the creator field when the user is non-admin. That means the attr_filter is empty and a non-admin can actually list all metrics for all users. This fixes that by setting the filter correctly. (cherry picked from commit 2be95704a18229e4d05e57dd3e96de489a16e61d) --- gnocchi/rest/__init__.py | 1 + gnocchi/tests/functional/gabbits/metric-list.yaml | 8 +++++++- gnocchi/tests/test_rest.py | 12 ++++++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index e741c4b4..ada780c0 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -590,6 +590,7 @@ class MetricsController(rest.RestController): pecan.request) if provided_creator and creator != provided_creator: abort(403, "Insufficient privileges to filter by user/project") + provided_creator = creator attr_filter = {} if provided_creator is not None: attr_filter['creator'] = provided_creator diff --git a/gnocchi/tests/functional/gabbits/metric-list.yaml b/gnocchi/tests/functional/gabbits/metric-list.yaml index fe7a7ad9..7b0f8089 100644 --- a/gnocchi/tests/functional/gabbits/metric-list.yaml +++ b/gnocchi/tests/functional/gabbits/metric-list.yaml @@ -89,7 +89,7 @@ tests: - name: list metrics GET: /v1/metric response_json_paths: - $.`len`: 4 + $.`len`: 2 - name: list metrics by id GET: /v1/metric?id=$HISTORY['create metric 1'].$RESPONSE['id'] @@ -100,6 +100,9 @@ tests: - name: list metrics by name GET: /v1/metric?name=disk.io.rate + request_headers: + # User admin + authorization: "basic YWRtaW46" response_json_paths: $.`len`: 2 $[0].name: disk.io.rate @@ -116,6 +119,9 @@ tests: - name: list metrics by archive_policy GET: /v1/metric?archive_policy_name=first_archive&sort=name:desc + request_headers: + # User admin + authorization: "basic YWRtaW46" response_json_paths: $.`len`: 3 $[0].name: disk.io.rate diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index d6b78538..4e8a8b18 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -317,6 +317,18 @@ class MetricTest(RestTest): with self.app.use_another_user(): self.app.get("/v1/metric/%s" % metric_id) + def test_list_metric_with_another_user(self): + metric_created = self.app.post_json( + "/v1/metric", + params={"archive_policy_name": "medium"}, + status=201) + + metric_id = metric_created.json["id"] + + with self.app.use_another_user(): + metric_list = self.app.get("/v1/metric") + self.assertNotIn(metric_id, [m["id"] for m in metric_list.json]) + def test_get_metric_with_another_user(self): result = self.app.post_json("/v1/metric", params={"archive_policy_name": "medium"}, -- GitLab From 1364f26c81fa6c30d22f0311e4da1cf4e327688b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 13 Sep 2017 18:21:03 +0200 Subject: [PATCH 0972/1483] doc: Enable warning as error on multiversion To enable warning as error, this changes * Remove empty 2.1 and 2.2 release notes * Add a hack to make pkg_resource find local entry_point * Add a script to check all versions are linked on the main page * Ensure logo is the latest --- doc/source/conf.py | 11 ++++---- doc/source/releasenotes/2.1.rst | 6 ----- doc/source/releasenotes/2.2.rst | 6 ----- doc/source/releasenotes/index.rst | 2 -- gnocchi/gendoc.py | 43 ++++++++++++++++++++++--------- tools/validate_docs.sh | 16 ++++++++++++ tox.ini | 3 +++ 7 files changed, 55 insertions(+), 32 deletions(-) delete mode 100644 doc/source/releasenotes/2.1.rst delete mode 100644 doc/source/releasenotes/2.2.rst create mode 100755 tools/validate_docs.sh diff --git a/doc/source/conf.py b/doc/source/conf.py index 9d5e962b..f852686e 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -187,11 +187,10 @@ scv_whitelist_tags = ("^$",) here = os.path.dirname(os.path.realpath(__file__)) html_static_path_abs = ",".join([os.path.join(here, p) for p in html_static_path]) -# NOTE(sileht): Override some conf for old version. Also, warning as error have -# been enable in version > 3.1. so we can remove all of this when we don't -# publish version <= 3.1.X anymore +# NOTE(sileht): Override some conf for old version. scv_overflow = ("-D", "html_theme=sphinx_rtd_theme", "-D", "html_theme_options.logo_only=True", - "-D", "html_logo=gnocchi-logo.png", - "-D", "html_favicon=gnocchi-icon.ico", - "-D", "html_static_path=%s" % html_static_path_abs) + "-D", "html_logo=_static/gnocchi-logo.png", + "-D", "html_favicon=_static/gnocchi-icon.ico", + "-D", "html_static_path=%s" % html_static_path_abs, + "-W") diff --git a/doc/source/releasenotes/2.1.rst b/doc/source/releasenotes/2.1.rst deleted file mode 100644 index 75b12881..00000000 --- a/doc/source/releasenotes/2.1.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - 2.1 Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/2.1 diff --git a/doc/source/releasenotes/2.2.rst b/doc/source/releasenotes/2.2.rst deleted file mode 100644 index fea024d6..00000000 --- a/doc/source/releasenotes/2.2.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - 2.2 Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/2.2 diff --git a/doc/source/releasenotes/index.rst b/doc/source/releasenotes/index.rst index 44677b75..c74aac00 100644 --- a/doc/source/releasenotes/index.rst +++ b/doc/source/releasenotes/index.rst @@ -8,5 +8,3 @@ Release Notes 4.0 3.1 3.0 - 2.2 - 2.1 diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index 1d27c60c..3f864362 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -99,28 +99,37 @@ class ScenarioList(list): multiversion_hack = """ +import shutil import subprocess import sys import os +local_branch_path = os.getcwd() srcdir = os.path.join("%s", "..", "..") os.chdir(srcdir) sys.path.insert(0, srcdir) version = sys.argv[1] -# NOTE(sileht): We delete releasenotes from old documentation -# only master will have it. -if os.path.exists("doc/source/releasenotes/index.rst.backup"): - os.remove("doc/source/releasenotes/index.rst") - os.rename("doc/source/releasenotes/index.rst.backup", - "doc/source/releasenotes/index.rst") - -if version not in ["", "master"] and os.path.exists("releasenotes"): - os.rename("doc/source/releasenotes/index.rst", - "doc/source/releasenotes/index.rst.backup") - with open("doc/source/releasenotes/index.rst", "w") as f: - f.write(\"\"\" +if version not in ["", "master"]: + # NOTE(sileht): Update _static files (mainly logos) + if not os.path.exists("doc/source/_static"): + os.makedirs("doc/source/_static") + for f in ("doc/source/_static/gnocchi-icon.ico", + "doc/source/_static/gnocchi-logo.png"): + if os.path.exists(f): + os.remove(f) + shutil.copy(local_branch_path + "/" + f, f) + + # NOTE(sileht): We delete releasenotes from old documentation + # only master will have it. + if (os.path.exists("releasenotes") + and os.path.exists("doc/source/releasenotes/unreleased.rst")): + shutil.rmtree("releasenotes") + shutil.move("doc/source/releasenotes", "backup") + os.makedirs("doc/source/releasenotes") + with open("doc/source/releasenotes/index.rst", "w") as f: + f.write(\"\"\" Release Notes ============= @@ -133,6 +142,16 @@ Releases notes can be found `here `_ \"\"\") + + +# NOTE(sileht): entry_points have old and new location mixed, +# We create symlink to fool pkg_resource so it will find them even +# if the new location is here. +try: + os.symlink("storage/incoming", "gnocchi/incoming") +except OSError: + pass + class FakeApp(object): def info(self, *args, **kwasrgs): pass diff --git a/tools/validate_docs.sh b/tools/validate_docs.sh new file mode 100755 index 00000000..98527ea2 --- /dev/null +++ b/tools/validate_docs.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e + +# NOTE(sileht): The flags -W with sphinx-versionning does not return 1 +# but when a build fail and the flags is present, the failed version does not appear +# in the version selector. This testchecks this. +ret=0 +for path in doc/build/html/stable*; do + version=$(basename $path) # stable_XXX + if ! grep -q $version doc/build/html/index.html ; then + echo "Version $version is missing" + ret=1 + fi +done +exit $ret diff --git a/tox.ini b/tox.ini index 23f89071..cc5ef38d 100644 --- a/tox.ini +++ b/tox.ini @@ -143,7 +143,9 @@ commands = doc8 --ignore-path doc/source/rest.rst,doc/source/comparison-table.rs [testenv:docs-gnocchi.xyz] basepython = python2.7 +whitelist_externals = bash setenv = GNOCCHI_STORAGE_DEPS=file + GNOCCHI_TEST_DEBUG=1 deps = {[testenv:docs]deps} sphinxcontrib-versioning # for 3.x doc @@ -152,6 +154,7 @@ deps = {[testenv:docs]deps} retrying commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-versioning build doc/source doc/build/html + bash tools/validate_docs.sh [doc8] ignore-path = doc/source/rest.rst,doc/source/comparison-table.rst -- GitLab From 3c313cca2a7dd6bf532f8dfdd5e2ba9ca26d567f Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sat, 16 Sep 2017 08:44:50 +0200 Subject: [PATCH 0973/1483] doc: Ensure doc/build is empty --- tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index cc5ef38d..48a830c0 100644 --- a/tox.ini +++ b/tox.ini @@ -143,7 +143,7 @@ commands = doc8 --ignore-path doc/source/rest.rst,doc/source/comparison-table.rs [testenv:docs-gnocchi.xyz] basepython = python2.7 -whitelist_externals = bash +whitelist_externals = bash rm setenv = GNOCCHI_STORAGE_DEPS=file GNOCCHI_TEST_DEBUG=1 deps = {[testenv:docs]deps} @@ -153,6 +153,7 @@ deps = {[testenv:docs]deps} oslosphinx retrying commands = + rm -rf doc/build/html pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-versioning build doc/source doc/build/html bash tools/validate_docs.sh -- GitLab From 369d84c4467f8a435aefc1ca66b650dfd868fddf Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 15 Sep 2017 16:06:27 +0000 Subject: [PATCH 0974/1483] split gnocchi commands we don't necessarily need to load the same stuff for each service. this way we can customise how we prepare each service and what libs they need. --- gnocchi/cli/__init__.py | 0 gnocchi/cli/api.py | 81 ++++++++++++++++++ gnocchi/cli/manage.py | 100 ++++++++++++++++++++++ gnocchi/{cli.py => cli/metricd.py} | 133 ----------------------------- gnocchi/cli/statsd.py | 20 +++++ gnocchi/rest/gnocchi-api | 4 +- setup.cfg | 10 +-- 7 files changed, 208 insertions(+), 140 deletions(-) create mode 100644 gnocchi/cli/__init__.py create mode 100644 gnocchi/cli/api.py create mode 100644 gnocchi/cli/manage.py rename gnocchi/{cli.py => cli/metricd.py} (68%) create mode 100644 gnocchi/cli/statsd.py diff --git a/gnocchi/cli/__init__.py b/gnocchi/cli/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/gnocchi/cli/api.py b/gnocchi/cli/api.py new file mode 100644 index 00000000..3ca9965a --- /dev/null +++ b/gnocchi/cli/api.py @@ -0,0 +1,81 @@ +# Copyright (c) 2013 Mirantis Inc. +# Copyright (c) 2015-2017 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +from distutils import spawn +import math +import os +import sys + +import daiquiri +from oslo_config import cfg + +from gnocchi import opts +from gnocchi import service +from gnocchi import utils + + +LOG = daiquiri.getLogger(__name__) + + +def api(): + # Compat with previous pbr script + try: + double_dash = sys.argv.index("--") + except ValueError: + double_dash = None + else: + sys.argv.pop(double_dash) + + conf = cfg.ConfigOpts() + for opt in opts.API_OPTS: + # NOTE(jd) Register the API options without a default, so they are only + # used to override the one in the config file + c = copy.copy(opt) + c.default = None + conf.register_cli_opt(c) + conf = service.prepare_service(conf=conf) + + if double_dash is not None: + # NOTE(jd) Wait to this stage to log so we're sure the logging system + # is in place + LOG.warning( + "No need to pass `--' in gnocchi-api command line anymore, " + "please remove") + + uwsgi = spawn.find_executable("uwsgi") + if not uwsgi: + LOG.error("Unable to find `uwsgi'.\n" + "Be sure it is installed and in $PATH.") + return 1 + + workers = utils.get_default_workers() + + return os.execl( + uwsgi, uwsgi, + "--http", "%s:%d" % (conf.host or conf.api.host, + conf.port or conf.api.port), + "--master", + "--enable-threads", + "--die-on-term", + # NOTE(jd) See https://github.com/gnocchixyz/gnocchi/issues/156 + "--add-header", "Connection: close", + "--processes", str(math.floor(workers * 1.5)), + "--threads", str(workers), + "--lazy-apps", + "--chdir", "/", + "--wsgi", "gnocchi.rest.wsgi", + "--pyargv", " ".join(sys.argv[1:]), + ) diff --git a/gnocchi/cli/manage.py b/gnocchi/cli/manage.py new file mode 100644 index 00000000..2f11f5f6 --- /dev/null +++ b/gnocchi/cli/manage.py @@ -0,0 +1,100 @@ +# Copyright (c) 2013 Mirantis Inc. +# Copyright (c) 2015-2017 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +import sys + +import daiquiri +from oslo_config import cfg +import six + +from gnocchi import archive_policy +from gnocchi import genconfig +from gnocchi import incoming +from gnocchi import indexer +from gnocchi import service +from gnocchi import storage + + +LOG = daiquiri.getLogger(__name__) + + +def config_generator(): + return genconfig.prehook(None, sys.argv[1:]) + + +_SACK_NUMBER_OPT = cfg.IntOpt( + "sacks-number", min=1, max=65535, required=True, + help="Number of incoming storage sacks to create.") + + +def upgrade(): + conf = cfg.ConfigOpts() + sack_number_opt = copy.copy(_SACK_NUMBER_OPT) + sack_number_opt.default = 128 + conf.register_cli_opts([ + cfg.BoolOpt("skip-index", default=False, + help="Skip index upgrade."), + cfg.BoolOpt("skip-storage", default=False, + help="Skip storage upgrade."), + cfg.BoolOpt("skip-incoming", default=False, + help="Skip incoming storage upgrade."), + cfg.BoolOpt("skip-archive-policies-creation", default=False, + help="Skip default archive policies creation."), + sack_number_opt, + ]) + conf = service.prepare_service(conf=conf, log_to_std=True) + if not conf.skip_index: + index = indexer.get_driver(conf) + LOG.info("Upgrading indexer %s", index) + index.upgrade() + if not conf.skip_storage: + s = storage.get_driver(conf) + LOG.info("Upgrading storage %s", s) + s.upgrade() + if not conf.skip_incoming: + i = incoming.get_driver(conf) + LOG.info("Upgrading incoming storage %s", i) + i.upgrade(conf.sacks_number) + + if (not conf.skip_archive_policies_creation + and not index.list_archive_policies() + and not index.list_archive_policy_rules()): + if conf.skip_index: + index = indexer.get_driver(conf) + for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES): + index.create_archive_policy(ap) + index.create_archive_policy_rule("default", "*", "low") + + +def change_sack_size(): + conf = cfg.ConfigOpts() + conf.register_cli_opts([_SACK_NUMBER_OPT]) + conf = service.prepare_service(conf=conf, log_to_std=True) + s = incoming.get_driver(conf) + try: + report = s.measures_report(details=False) + except incoming.SackDetectionError: + # issue is already logged by NUM_SACKS, abort. + return + remainder = report['summary']['measures'] + if remainder: + LOG.error('Cannot change sack when non-empty backlog. Process ' + 'remaining %s measures and try again', remainder) + return + LOG.info("Changing sack size to: %s", conf.sacks_number) + old_num_sacks = s.get_storage_sacks() + s.set_storage_settings(conf.sacks_number) + s.remove_sack_group(old_num_sacks) diff --git a/gnocchi/cli.py b/gnocchi/cli/metricd.py similarity index 68% rename from gnocchi/cli.py rename to gnocchi/cli/metricd.py index 00c68884..0d82d0af 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli/metricd.py @@ -13,11 +13,6 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. -import copy -from distutils import spawn -import math -import os -import sys import threading import time @@ -29,13 +24,9 @@ import six import tenacity import tooz -from gnocchi import archive_policy -from gnocchi import genconfig from gnocchi import incoming from gnocchi import indexer -from gnocchi import opts from gnocchi import service -from gnocchi import statsd as statsd_service from gnocchi import storage from gnocchi import utils @@ -43,79 +34,6 @@ from gnocchi import utils LOG = daiquiri.getLogger(__name__) -def config_generator(): - return genconfig.prehook(None, sys.argv[1:]) - - -_SACK_NUMBER_OPT = cfg.IntOpt( - "sacks-number", min=1, max=65535, required=True, - help="Number of incoming storage sacks to create.") - - -def upgrade(): - conf = cfg.ConfigOpts() - sack_number_opt = copy.copy(_SACK_NUMBER_OPT) - sack_number_opt.default = 128 - conf.register_cli_opts([ - cfg.BoolOpt("skip-index", default=False, - help="Skip index upgrade."), - cfg.BoolOpt("skip-storage", default=False, - help="Skip storage upgrade."), - cfg.BoolOpt("skip-incoming", default=False, - help="Skip incoming storage upgrade."), - cfg.BoolOpt("skip-archive-policies-creation", default=False, - help="Skip default archive policies creation."), - sack_number_opt, - ]) - conf = service.prepare_service(conf=conf, log_to_std=True) - if not conf.skip_index: - index = indexer.get_driver(conf) - LOG.info("Upgrading indexer %s", index) - index.upgrade() - if not conf.skip_storage: - s = storage.get_driver(conf) - LOG.info("Upgrading storage %s", s) - s.upgrade() - if not conf.skip_incoming: - i = incoming.get_driver(conf) - LOG.info("Upgrading incoming storage %s", i) - i.upgrade(conf.sacks_number) - - if (not conf.skip_archive_policies_creation - and not index.list_archive_policies() - and not index.list_archive_policy_rules()): - if conf.skip_index: - index = indexer.get_driver(conf) - for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES): - index.create_archive_policy(ap) - index.create_archive_policy_rule("default", "*", "low") - - -def change_sack_size(): - conf = cfg.ConfigOpts() - conf.register_cli_opts([_SACK_NUMBER_OPT]) - conf = service.prepare_service(conf=conf, log_to_std=True) - s = incoming.get_driver(conf) - try: - report = s.measures_report(details=False) - except incoming.SackDetectionError: - # issue is already logged by NUM_SACKS, abort. - return - remainder = report['summary']['measures'] - if remainder: - LOG.error('Cannot change sack when non-empty backlog. Process ' - 'remaining %s measures and try again', remainder) - return - LOG.info("Changing sack size to: %s", conf.sacks_number) - old_num_sacks = s.get_storage_sacks() - s.set_storage_settings(conf.sacks_number) - s.remove_sack_group(old_num_sacks) - - -def statsd(): - statsd_service.start() - - # Retry with exponential backoff for up to 1 minute _wait_exponential = tenacity.wait_exponential(multiplier=0.5, max=60) @@ -323,57 +241,6 @@ def metricd_tester(conf): list(metrics)[:conf.stop_after_processing_metrics], True) -def api(): - # Compat with previous pbr script - try: - double_dash = sys.argv.index("--") - except ValueError: - double_dash = None - else: - sys.argv.pop(double_dash) - - conf = cfg.ConfigOpts() - for opt in opts.API_OPTS: - # NOTE(jd) Register the API options without a default, so they are only - # used to override the one in the config file - c = copy.copy(opt) - c.default = None - conf.register_cli_opt(c) - conf = service.prepare_service(conf=conf) - - if double_dash is not None: - # NOTE(jd) Wait to this stage to log so we're sure the logging system - # is in place - LOG.warning( - "No need to pass `--' in gnocchi-api command line anymore, " - "please remove") - - uwsgi = spawn.find_executable("uwsgi") - if not uwsgi: - LOG.error("Unable to find `uwsgi'.\n" - "Be sure it is installed and in $PATH.") - return 1 - - workers = utils.get_default_workers() - - return os.execl( - uwsgi, uwsgi, - "--http", "%s:%d" % (conf.host or conf.api.host, - conf.port or conf.api.port), - "--master", - "--enable-threads", - "--die-on-term", - # NOTE(jd) See https://github.com/gnocchixyz/gnocchi/issues/156 - "--add-header", "Connection: close", - "--processes", str(math.floor(workers * 1.5)), - "--threads", str(workers), - "--lazy-apps", - "--chdir", "/", - "--wsgi", "gnocchi.rest.wsgi", - "--pyargv", " ".join(sys.argv[1:]), - ) - - def metricd(): conf = cfg.ConfigOpts() conf.register_cli_opts([ diff --git a/gnocchi/cli/statsd.py b/gnocchi/cli/statsd.py new file mode 100644 index 00000000..40f2deef --- /dev/null +++ b/gnocchi/cli/statsd.py @@ -0,0 +1,20 @@ +# Copyright (c) 2013 Mirantis Inc. +# Copyright (c) 2015-2017 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from gnocchi import statsd as statsd_service + + +def statsd(): + statsd_service.start() diff --git a/gnocchi/rest/gnocchi-api b/gnocchi/rest/gnocchi-api index 1752dc7d..8709331e 100755 --- a/gnocchi/rest/gnocchi-api +++ b/gnocchi/rest/gnocchi-api @@ -14,8 +14,8 @@ # limitations under the License. if __name__ == '__main__': import sys - from gnocchi import cli - sys.exit(cli.api()) + from gnocchi.cli import api + sys.exit(api.api()) else: from gnocchi.rest import app from gnocchi import service diff --git a/setup.cfg b/setup.cfg index ad74825f..ac32c04c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -120,11 +120,11 @@ gnocchi.rest.auth_helper = remoteuser = gnocchi.rest.auth_helper:RemoteUserAuthHelper console_scripts = - gnocchi-config-generator = gnocchi.cli:config_generator - gnocchi-upgrade = gnocchi.cli:upgrade - gnocchi-change-sack-size = gnocchi.cli:change_sack_size - gnocchi-statsd = gnocchi.cli:statsd - gnocchi-metricd = gnocchi.cli:metricd + gnocchi-config-generator = gnocchi.cli.manage:config_generator + gnocchi-upgrade = gnocchi.cli.manage:upgrade + gnocchi-change-sack-size = gnocchi.cli.manage:change_sack_size + gnocchi-statsd = gnocchi.cli.statsd:statsd + gnocchi-metricd = gnocchi.cli.metricd:metricd oslo.config.opts = gnocchi = gnocchi.opts:list_opts -- GitLab From 4af81145582532ade08d82e1b2ce567935517952 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 15 Sep 2017 08:42:36 +0200 Subject: [PATCH 0975/1483] travis: don't build merge push This is a hack to quickly exists useless jobs. --- .travis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 4902bf59..62cd9e87 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,7 +24,7 @@ env: before_script: # Travis We need to fetch all tags/branches for documentation target - - case $TARGET in + - \[ "$TRAVIS_PULL_REQUEST" != "false" -o -n "$TRAVIS_TAG" \] && case $TARGET in docs*) git config --get-all remote.origin.fetch; git config --unset-all remote.origin.fetch; @@ -34,9 +34,9 @@ before_script: ;; esac install: - - docker pull gnocchixyz/ci-tools:latest + - \[ "$TRAVIS_PULL_REQUEST" != "false" -o -n "$TRAVIS_TAG" \] && docker pull gnocchixyz/ci-tools:latest script: - - docker run -v ~/.cache/pip:/home/tester/.cache/pip -v $(pwd):/home/tester/src gnocchixyz/ci-tools:latest tox -e ${TARGET} + - \[ "$TRAVIS_PULL_REQUEST" != "false" -o -n "$TRAVIS_TAG" \] && docker run -v ~/.cache/pip:/home/tester/.cache/pip -v $(pwd):/home/tester/src gnocchixyz/ci-tools:latest tox -e ${TARGET} notifications: email: false -- GitLab From 3d5c5c5b93ef0868a3122afc09049c97bdbeea82 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 18 Sep 2017 20:10:41 +0000 Subject: [PATCH 0976/1483] load api modules only when needed. we should only load and setup policy and CORS when preparing the API service. --- gnocchi/cli/api.py | 20 +++++++++++++++++++- gnocchi/opts.py | 2 +- gnocchi/rest/gnocchi-api | 4 ++-- gnocchi/rest/wsgi.py | 4 ++-- gnocchi/service.py | 12 ------------ gnocchi/tests/base.py | 3 ++- gnocchi/tests/functional/fixtures.py | 2 +- gnocchi/tests/utils.py | 12 ++++++++++++ 8 files changed, 39 insertions(+), 20 deletions(-) diff --git a/gnocchi/cli/api.py b/gnocchi/cli/api.py index 3ca9965a..170379a1 100644 --- a/gnocchi/cli/api.py +++ b/gnocchi/cli/api.py @@ -21,6 +21,7 @@ import sys import daiquiri from oslo_config import cfg +from oslo_policy import opts as policy_opts from gnocchi import opts from gnocchi import service @@ -30,6 +31,23 @@ from gnocchi import utils LOG = daiquiri.getLogger(__name__) +def prepare_service(conf=None): + if conf is None: + conf = cfg.ConfigOpts() + + opts.set_defaults() + policy_opts.set_defaults(conf) + conf = service.prepare_service(conf=conf) + cfg_path = conf.oslo_policy.policy_file + if not os.path.isabs(cfg_path): + cfg_path = conf.find_file(cfg_path) + if cfg_path is None or not os.path.exists(cfg_path): + cfg_path = os.path.abspath(os.path.join(os.path.dirname(__file__), + '..', 'rest', 'policy.json')) + conf.set_default('policy_file', cfg_path, group='oslo_policy') + return conf + + def api(): # Compat with previous pbr script try: @@ -46,7 +64,7 @@ def api(): c = copy.copy(opt) c.default = None conf.register_cli_opt(c) - conf = service.prepare_service(conf=conf) + conf = prepare_service(conf) if double_dash is not None: # NOTE(jd) Wait to this stage to log so we're sure the logging system diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 2347123e..5a1e13de 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -18,7 +18,6 @@ import pkg_resources import uuid from oslo_config import cfg -from oslo_middleware import cors import gnocchi.archive_policy import gnocchi.indexer @@ -196,6 +195,7 @@ def list_opts(): def set_defaults(): + from oslo_middleware import cors cfg.set_defaults(cors.CORS_OPTS, allow_headers=[ 'Authorization', diff --git a/gnocchi/rest/gnocchi-api b/gnocchi/rest/gnocchi-api index 8709331e..0663d1a5 100755 --- a/gnocchi/rest/gnocchi-api +++ b/gnocchi/rest/gnocchi-api @@ -17,6 +17,6 @@ if __name__ == '__main__': from gnocchi.cli import api sys.exit(api.api()) else: + from gnocchi.cli import api from gnocchi.rest import app - from gnocchi import service - application = app.load_app(service.prepare_service()) + application = app.load_app(api.prepare_service()) diff --git a/gnocchi/rest/wsgi.py b/gnocchi/rest/wsgi.py index b28cc452..0ebe7533 100644 --- a/gnocchi/rest/wsgi.py +++ b/gnocchi/rest/wsgi.py @@ -11,6 +11,6 @@ # See the License for the specific language governing permissions and # limitations under the License. """This file is loaded by gnocchi-api when executing uwsgi""" +from gnocchi.cli import api from gnocchi.rest import app -from gnocchi import service -application = app.load_app(service.prepare_service()) +application = app.load_app(api.prepare_service()) diff --git a/gnocchi/service.py b/gnocchi/service.py index 53ab54f5..503e088d 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -15,12 +15,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import os import daiquiri from oslo_config import cfg from oslo_db import options as db_options -from oslo_policy import opts as policy_opts import pbr.version from six.moves.urllib import parse as urlparse @@ -36,10 +34,8 @@ def prepare_service(args=None, conf=None, log_to_std=False, logging_level=None): if conf is None: conf = cfg.ConfigOpts() - opts.set_defaults() # FIXME(jd) Use the pkg_entry info to register the options of these libs db_options.set_defaults(conf) - policy_opts.set_defaults(conf) # Register our own Gnocchi options for group, options in opts.list_opts(): @@ -103,14 +99,6 @@ def prepare_service(args=None, conf=None, urlparse.urlunparse(parsed), "storage") - cfg_path = conf.oslo_policy.policy_file - if not os.path.isabs(cfg_path): - cfg_path = conf.find_file(cfg_path) - if cfg_path is None or not os.path.exists(cfg_path): - cfg_path = os.path.abspath(os.path.join(os.path.dirname(__file__), - 'rest', 'policy.json')) - conf.set_default('policy_file', cfg_path, group='oslo_policy') - conf.log_opt_values(LOG, logging.DEBUG) return conf diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index d5cb4def..919fa48f 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -39,6 +39,7 @@ from gnocchi import incoming from gnocchi import indexer from gnocchi import service from gnocchi import storage +from gnocchi.tests import utils class SkipNotImplementedMeta(type): @@ -271,7 +272,7 @@ class TestCase(BaseTestCase): super(TestCase, self).setUpClass() self.conf = service.prepare_service( - [], + [], conf=utils.prepare_conf(), default_config_files=[], logging_level=logging.DEBUG) diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 5b6cbbf9..988c5365 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -80,7 +80,7 @@ class ConfigFixture(fixture.GabbiFixture): dcf = None else: dcf = [] - conf = service.prepare_service([], + conf = service.prepare_service([], conf=utils.prepare_conf(), default_config_files=dcf) if not os.getenv("GNOCCHI_TEST_DEBUG"): daiquiri.setup(outputs=[]) diff --git a/gnocchi/tests/utils.py b/gnocchi/tests/utils.py index e9b0b339..413264a8 100644 --- a/gnocchi/tests/utils.py +++ b/gnocchi/tests/utils.py @@ -11,9 +11,21 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +from oslo_config import cfg +from oslo_policy import opts as policy_opts import six +from gnocchi import opts + def list_all_incoming_metrics(incoming): return set.union(*[incoming.list_metric_with_measures_to_process(i) for i in six.moves.range(incoming.NUM_SACKS)]) + + +def prepare_conf(): + conf = cfg.ConfigOpts() + + opts.set_defaults() + policy_opts.set_defaults(conf) + return conf -- GitLab From c783538d48da3eadd13ab29170ef4b3ede251e57 Mon Sep 17 00:00:00 2001 From: Olivier Destras Date: Thu, 21 Sep 2017 11:15:38 +0200 Subject: [PATCH 0977/1483] Doc: update deprecated grafana plugin source --- doc/source/grafana.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/grafana.rst b/doc/source/grafana.rst index 840b4aa5..6c31108c 100644 --- a/doc/source/grafana.rst +++ b/doc/source/grafana.rst @@ -5,7 +5,7 @@ Grafana support `Grafana`_ has support for Gnocchi through a plugin. It can be installed with grafana-cli:: - sudo grafana-cli plugins install sileht-gnocchi-datasource + sudo grafana-cli plugins install gnocchixyz-gnocchi-datasource `Source`_ and `Documentation`_ are also available. -- GitLab From 46a933f61fa134c8ecfd4a805206ace1635b4c12 Mon Sep 17 00:00:00 2001 From: Olivier Destras Date: Thu, 21 Sep 2017 11:15:38 +0200 Subject: [PATCH 0978/1483] Doc: update deprecated grafana plugin source (cherry picked from commit 609cc2ae17ed6bde09adb7dd40768e2d73c9b9e6) --- doc/source/grafana.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/grafana.rst b/doc/source/grafana.rst index d731e613..1eaeec54 100644 --- a/doc/source/grafana.rst +++ b/doc/source/grafana.rst @@ -5,7 +5,7 @@ Grafana support `Grafana`_ has support for Gnocchi through a plugin. It can be installed with grafana-cli:: - sudo grafana-cli plugins install sileht-gnocchi-datasource + sudo grafana-cli plugins install gnocchixyz-gnocchi-datasource `Source`_ and `Documentation`_ are also available. -- GitLab From 7c6b402e7ce82b1d74526821416db458997a9bcf Mon Sep 17 00:00:00 2001 From: Asu4ni Date: Thu, 21 Sep 2017 00:07:14 +0800 Subject: [PATCH 0979/1483] Glossary: add 'aggregate' and modify the doc 1. add item 'aggregate': differentiate aggregates from measures 2. add item 'timespan' 3. modify measures to aggregates if needed 4. glossary minor fixes 5. Change architecture image --- doc/source/architecture.png | Bin 60234 -> 0 bytes doc/source/architecture.rst | 6 ++++-- doc/source/architecture.svg | 4 ++++ doc/source/glossary.rst | 37 +++++++++++++++++++++++------------- doc/source/index.rst | 8 ++++---- doc/source/operating.rst | 4 ++-- doc/source/rest.j2 | 17 +++++++++-------- 7 files changed, 47 insertions(+), 29 deletions(-) delete mode 100644 doc/source/architecture.png create mode 100644 doc/source/architecture.svg diff --git a/doc/source/architecture.png b/doc/source/architecture.png deleted file mode 100644 index a54f873fb11a5010eec2e5322b3207bebabb6c67..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 60234 zcmZ_0Wk8f$_dZOggoKC;a-@+)I*)V+NOz}F14Fk642{x_G)RMV4Im&bJ#;g4NcaEd z9Q{4#{qTO_!!Y;0_gec}Yh5e$guYdh!NYlmgM@^HCnqbZiiCs;LqbCNiG=}tV&q2t z4GGB~Nlx;$y8Fy-I}>3!2q^(sas zJtHE0W7YEb)bH3eR)Bs?kv?T@^C$N21wvfq6)m4u`n=-7W+>Z@*cS52zG5abn{9p# zulA;OEo*r)I~m9x4C(x0WPjRKLII^{GBWX0Bc)T!aaWku)_Htb!BA zyD@a%C(->@CG}dw54qR#Q*7-EvLOyjKR20t-cAYN)qtNl{LvQSUV+WayP{YE86!~C zjBI~7pECaU41KDo6WJdA+k`)7@-5sjImsv~%4|7hXLbh6=<54YNk1*Je5>xyyRsGa zo9=->Q50M}40ngsGxR8*ygM4E&Q3P*bggf$prIe;$`4?i5Fm!{`pyp|DX^5Wrum6S9M18!&F;4}vc_dw0Y-CEWSBEAVU}cfT4nEJ zG=k%2?YH=e`)*0|bFxaKj}oXstu2@tQx1B+aZHqy?F_OV9+i=Pvy=JExwgVLkLp>l ztGExvx!cujVwNb_uk#l;TSS2xZ$HrP%I>#aNd`EXA+r_0Y70yzI#DYQR&2-Bbd8!Y z3ZCFg`KVwFdAEVQW~$m89>MN>r}>F1+Jd{*?Ho#K)}_@E7e5bc!c__v8-Ht%_AfJ> zLvOD}u_ljikGupVr*}seg^2NR`zxE$uV)d{Mk37hdp;4n7(O!W-V$vtf8_nUY&$@M z;IyaJUH7`KVQ!v+DuTU(;6#t?UwDL|nQK44yE$1p)37n4BZxD-muXzCyDI%70g96x zZ>?kcoFw6r$Hxl9SGJ2{-<{vc`s`1VzxtCRr-Jn{n)@`?E+{FS@;^_;d=9{Hl~qDR zHdG5u3FmT@{okhE+=TwsHW0j1!6Srzx*?#Id~s6ze;cA^B`$QxP^F;sSR|0j*?{yZOn^yg^IQ+q&94)r~Fh&#{o8q2}vY7 z@qbLfvHPcYiMKn_8C?L<;ut2T!)9<7LI8A04|%iwCZodW|NJN>hFgc*!G0tG(QAJh zvL$ONt+~a3PF3@&ALi_7ec)h2rr)}IeEsO9d{3x*qLFLkMoN#hfkxecIeMBvN9?KU z2aw?1Y=!C4y~) zS@ySHOtEM}sG2!(W>-}F6C+_mF)X%tM=$z0lko~vCiVZbAcoYI`^Mz+qiH-(Nh?qb zKZVgwp@db{&#=Qc|NEAt$Ylvi-8b?{iQCG9h8J+k(B*;X1d{_G%#FqDJvejwt;o)J zA4=~V`DN#bGR|b8;OqbM!j`B?QWK=g}r#l>TE6-#(`X z=qXfz&3pKp7ga~R-B@bp06;)|_!U(wT=+>=nt<3HwHHi7vdezn`GJ)$yOAfl7wh<_ zMEz0p8ajGx5m-=~J?PaN-9IS&20*#3v4{{;kGbSv-v7O3;I;XRdK*jH%1j4y+4rb~ zK5q2HQLh6je)ucY)4XZ^-pkg~&yICm1{qMAu4H#!d3+`cvj)l>L@LRq2K{eGsX}d= zu6A^GOJ-)IS6oN3_>Qh{Re1TtJdHdloOrcqmBYb|X9~~4P5(ty{j6gYyMx2KS7pW~ zd^`JF?mRfrHlIn2!wy;gbBxfE^{a}Vt@gcxlYu@qs1(Gg?84~>x7b72YEH*1)79I; zd$M1yCwPZ!mA+sdB$1WaPX4SkYUB^#(c>)gml%s1Dk~P)6D|6Vmyy!sZCE%^lww(U zw^P0(rvApMO(E%h4jlZCQaZD2-Xus+=KQ8D)TV3Ba$UtAgj&a9cnB{atmb{J=9tZadT;k!Mh69CgCAu0UdsL1 zIOm_Er|w7sw2fF{0U7E?8Bl6Gw(LsGo_~ef<7u%Hz2VzWe5idaP%SrU`JYz=pOAQ` z*&JBrFRk^2gF|+%T7_zTjsyAhp`{OmXs2nJ<6JK;s;!EqCW^N^#g;?IMzyH(k01M- zRcIsF>a6nQjzNUHpzj!Lu8Y0RSXDw;UIIG<;`2l)Np)3!dBf1*u|(xgr9$QTLO8gB zn?j~q6l@8Z4oMpTQ&E^wsFeol*27bvLE1 z_BGlx-o^6*`m7H|WL7Kp1LupInUO{S*Yeiz)O$o>Ht0Xb>G}A7I+0B4#JSXu738PP z(ct_Y60jamk)*P47I7xyyF@(sb3BL&vy+#=*BG|JzfCDH#H|~)^|WQkm+cR?`N%k^ z)Gi{mb>Z4(%fj5-P3386tcFnEH9IaYD}oP}PwzEwQ?Q@*!B#D=o0cD~lV+K50d7-1 z_O&4B_lrh+`4P^x(T`ZRW5Ns!#=y4TZlKn3c6?!Br0WcdfcbtIQv?tLBm1dJ0m^;> zSe0*~*qLK*E(NY5e-D&+eykE8fWzW8^T9Z-5oKIOL9djx{|}VecySaaw+6agO8EPM zA0Of`;Mme_ZfoiE8Io*J)za99I+i0yKum9XoUB`B-@@=mAWO~>8L0oKiI68M@4q`s zA=E)8LFe<8@KMbz5hGpz4@Ub?)O2tDnOCUn;uHOEVP-$%IhZ?29$9UE`mM0<4RLGN z6~fHuJ&gGeDMA!L(VN{+zZRUlHju_qa#rXh7+7WeS)M%)R^mWlK%x%KDe-?}sUkOj zX-t5L-}}A`F7pBpdV#T#zEioL>1pQYih~b;;iE6}3zJ7__9GNu{y2dUw4TJ&FhoS2ofxTLr@YA1%SfvV%Tzwj1E%NfkU^TwJV8=}As_|Siy0rd#u_FCsvA6wz9lbQL~ zRzxv|Q9`(cf!a}+N1DKuM$m>SwAVTI8UJx$4NLq#&&Wz{lF@;sWx3RBEoWJd%<*Dl zGOj$GbY~W0)l}aP$?MKU#YIHu^4$n7UHd5?T~0P$SL%?0sc`phmFl9+O5}okBAl`a zRT7c|d_ccIiYpXw9#2!{@%21Y=JCIn*Mby1b7Wwd z^4VoA4O3CO_nxwxf>1(2qE_N!ow-c=((75T=oWjMdpS3I3a-6{`Kgecd15qM>-9w=WR!eHTyN17Ap=~z8A+-w@2Yn52^v4#grDWC98b( zM4xTqgD26gu8{M7Xxn^|^1*pkIL`fk|G->)T$=aj4`;iW;d27!pZ%rW!~U;EtMw1I zhLu$`an8hmBrIFQw`EzkBYo^`X)#rnA0?c)!|=K%=l4NTKcP*;Gh%?BSA@sJ#26}{ zUjv-TdFoPN*XCsg>ggAZ-}iYvF9~&e{s3&Od9(b$o2bUR8?z}qwh*O?3)CH8%!tLb{qD@v@Qx-&+7sc=V~=SIowVR zZ%Yqc^v!lnY2GpA*i5z2(J>rE*)R<$ceTYMvZA=iZBPa(RJ0s1qDXRAiv`7;89_=N zQ6p(af9xhZhq1b&sqY7TB!bPr^`vp*>v>tyWlalcTj5=7v#YKB^=X&^jitKXrl#o0 z;P~#Yovi$fl`oZoBK?c*f-jmt+WJ|x^oZhX2~}jNhbTxQAli&zjFf+fys*X(NpRsG zM@LO^z$q{a{``1>vbU&6#!FrQ=q|})QS?ID&w%|!WPtb8u%EEw1fNxl2FGF5RIIA1 zYs%updwz+ZX8o9rxh7*{A1j3t)~5zh^WcjN#P@l}GTO#wCDWRL0^H*B>;D{{q)k!9 zXpGgJ7smYyrl{(k01 zi6bCeh;npq`wNeO%cou!5mV6~HG=!#2eJRyAp+WIRY1eEm3}feH{aRny?C9efBiL= zEBH8c%2#XlK5BN7{#y0@68A`HL^qkCbOwP}LiG0`_s7PT3xDl0$@eU}7-9>7u3)v> z-Xqm!-x{6%rF`rlPX1JdUu$Pu)sS7ty{lyWLcPN6riXHC%t*s4nf0pB$-qsI)4XL0 zGaPV5Y4$@UpeYXv3Xr(*bP>l*z$xa5tUT+xHHe8c3nH)ALnMX3OjvAVYDUbP@nyu- z(onq{gbt0+VPXS_>oNb>4cE48Nowy~7Zm(dHzP0HL4m$C=$UX{%~}v)wwZ~Exu>dp zUq<%lL@niu^Y}&;^75<&$Qx#;lMbS~5*CsR+scp% zTE$2Tg4eT5zzk_Cglo0?<^04z>Ww$IcqlxcdYf*&bN3|COiPY_VWNRP`%MMW8}jE_ zouA*n89=W!e>PnxsoF{QY0dA)8WZg`S<1~@6>5^!yNzUF%U}K;ib~u}s7gt076nyl zk6A3Pj!DHsKB<|0KPNC?XPzB!v8D?SO~ht4^cCV^xVGcJx51@86=yk!|FCLTTA|c} zg()^kiD$#?*f*$Q0Nz->E41mq%RlKJiGH&3&CPGh`Sop%1$=h00XEH47`=x4gvxMl z6>QDhr;Fhd5#{zW;-qzon4fnnBq&fJG+x-|mmF<6Z~0=*7VJDLtb$NHFs#o!)4^!n zEP6i;U5V-W`UwH?)+eo|EM<;S-Nq?*1Wxy0MERc#M- z0o#9)`_gDffGj`i>k`{(CMK=moMhpTtb%hk%0g*dWsya#)|TP7L`HPo%->061x;z) z!`a~2FC?+n_;_6Id;HE~&4~W)8svdIzcrJjg<~PkNVQC5^wvjrJMOn96s!80g^5l{ zrr+*IXU|Xe{K!kcLM)XFo=cLVYx*;FRA)tvIKF;z==}n`#)KV5#L$2#S1{)9^YfGS zi$H7U%eci^zvJSZ7*RTooYq@@3UfR0r>PqWu1a0KXWS)Rrn+Jde<`^KoigA4A{q3oS-`?D6C#9yIiS}iHA&nPD!oaiCVmu z&Mx3itsEA)B{){qp-doKtMI=0m30T7S~zu`{t^~%T&*CM$mZ#5eQnM&er>i7Oc^nt&^pBGHN9h|EBLR)^Mf~nlWnI&3>(dYQ$jT!E@Fxe){V(JQ)i`*n#jY;zy4s zu-;%P$3sFA^tNZSke0a6OVTNqR<`W*C?^$%jNc`&FDK;Il`Ox6O{T$(l~7{*Kt+Gt zQSc#9%uK*SX|_dk#s6WaKP3MBc}eYnlvD< zqw|Z1-Jclj7{=~0A=W01ZU+u_X4cK&Xi4dxr(XyFl^sR8_nuZOUxr?DDu(tF0gSug zHDwNs65rCI{p6exUHd38+|2OK%msr`JR|`djPNU)N$_)Qd*#+vqcmFJoupiHnJ(61 zGZpK8CUjSU&2hl^cr9@nOj&E0$IUyr5+f9wC7@sVNJzR`ZO$?_R1owzhw$S14R%G0 zaRa3sbg9G)p!3=2tsmO%Mi#gFjpja2l^<+{wiz&8s@?Fl-I)0We_qV>h~_yweawrj zq|m)Zxq9eJjdciQc%?FeQ5`xqu6c5lX-b`GX1e(hwKp9&z{?=-VmJjs%M4`vxp~oX zlDO3(6mO#zLVLv8Uo)$a$QAVZ0H*Hze~w*%w6 z26*9qLNLvajF}BJ7DuX&K|;7eM#1o^@G39J{A%xFn+9_v~Rzo3Z+J5KMWx1w9VNvbCOz2bFBvv_Dlb}pFg z+j&a0bP5nD1K^n1d!-erT|Trn;__x7z>m0+EvlXFQy{{A`=)q{VRzU{}HPp8Y z9p}o5;<>cCPf}=w`dWhZXLuEEWPfo%1 z=fr2{yc8)rtjL;i_*fnA5_}*&4M{ngRrb%b@ zvQfsq^5BsD4-n^#HW7tEI>Lpp_P&_ZpyFy78J>uR;;l>tYHkoReBWteTCmKi`IWbw z{Hz;i70p^8!#e+|B4ti7K#+)xr9LA8I3CBYNL@2(^QJzanVQIV*;Y65oDF=%*HSNg z9B9UJe=>D?rvk}5JpbB!70Aw9ENa(rVRmt;zqWM*zt}*d)@{*ZipyV!u*XfK zO|BxG#-o%4jGbQ}$yn95zCxTmF%&HkWujYub45xMwQ@8a+18_ZyU=;+cN)C^be4g* zV8y_9dP{H$I(BXVa+0$^PwGEpsfw4@4AEhFo)cy^-oDuDnF;{)kDMep6?_h@8_F5y z+6HHved$FF4F#U+`i=pVxixXB=BYaKSH+o^GB?Ls%82+gp@g(#N5Q?6Vx^_e(3^`+ zhnc%3e8l~P{X70Xx&WZ8Xw(Q2!rU-*dx~#`p5{>y$D#?H=Yirn?}tCrMz`ci(pYw) z#K*44mh}%Jq?SjAr`Ang9o<^pY}G}S_iuWQnBZ6L@odjXXx;RSW#)fU8K5nsR9mLJ zKDghYx=3a!_=TzO#4W&c>ioVf#;iB<>O0e&sQYJX+O|dTol#38^WDw|)!k?-JbYpXsfWTn5-;piApqabNQ-6-wNKeama5yxGdX& zKT^Gi9>xHGl=B5l-_5-$ex8j!XAoh;;Q^4SCdtT`aq_f@vMYk^(aFNbhOxw-og1kt z#L)RO9^DY?7VBfZK;Ew}@tz*OEa=UwzojjXi>o`aNAJNY2Q{?q?l+x9rCeXAgmI?w z(ETE`#DCGY>@|EPBYFV~Z6y{pOw^jZ|HRIRP*;`ql<4zUP+BPyx`~uXF}llA^C0Ls zU>l$*_6F#bAHvf%ROZq^=ffVE#md{bbgT=EkO^bAK@)ET)$-8Z0y_# z`QE2?%kLL9D1#bq+R2*~>{~pP5x=rriVT|4)(&VIAc)|#7awoxZ=Z-+&K+PTGU}cl z|G3NO5UH_t7rjHYw(=83qj)Pk#`nu56pQ}#(tIhU6S6eG@MO7-qy5EB8jY|Nl*SV+ z=@$oF*1-?r2ccDykI)KXN54xzYsM{o{udyJ3~$L(_Oyc@iB~s~8pElgE6UFd4KwEo z&WqM=n$>z61PVsOy(otoN^Fg$ipxs&99Q8p;bJ~or!aPJd&ESvNF(HA407&w(xY(Z zZ95%}@ARCjD|z`@y zt*zgYUI4`B9B8uP*A48qVjqa<2Dcnzm?$gM`mMhocLo^Y@VSIxMA~2cd^JQv;JHZa zlpH~oiGT{OZu9hB41S3zDKzv8IbwX5)@iImTp~s~B~zvCgQwUFY}il4W&Lx6%gaty z6Z>y2+N*tqPgx6MZn}z;PU8aPMo#h%qzZnvREYM{CQ~($a({`xr5zZ)8mWfHnnR5JN6l(AbWX#;o?iq z2I2A1+$1U#XY}Xc8Z1G>H+h|=vf4`!pT>7`b)6y?8@%y#5~c%jXZyARmgzJT!c&~M zAfk*p#NvR4opD@evRrDc@LLR7XQB(nW4)k-g`x4I08}oZavq$^6L7V!ZQD>lfd9-x z^mm^?;lsLFma$(06~1l5$<35@2V>;sSj*5mIb0+Kh1{!>HKSR3=AnpzECa#GY z=%oU4Dm|WAq$aMvlun{XD5=qR4+rdrbw_380_VbVDZ$c7Kmid=`!fi zgPqN3!irESnoCJXKEvt&Oh4`EA|=#=VtxF-O$MvuueA&3;hie8Ja|2DIP+^|uKkqG zAt7Sja-6ewq_TbRQBP+~iMv<9}Z(6I^BM{S_z% zNN|=X#v9;=`+9P?Hm&r(7`wUno9XP>wa-=WWy=R0wJm3#)+${4pAGq2Q}4Js&GFK< zD<(nstfXgf`wB90TG$I~?|755CT-JP+P7CI*8_`V?&+#Qgg~@K0lfhFhiyPVHz$j5 z3>*mg>}9pGR&gUeP29fRJRt|31TEO)McUwNZU6j6!OJ*nZo&UqP#UU_20z(;ui+OT zxPz7MEJ*6ETcG-T*PmLGFK#1)_~&LY)IWbRqG|2HaOog$iF(HgD1l@tQZpn``}HJ8 zb1jvc=F~K2iefL3NyArs@;iMxWKAL#f}|;LtEFMVNSuG+)#0!i{^~F@abw9^h7~ft z09ITwBeoqhkn^#I#l3l7-~ub&9h}nwD_Ug8v4Ld^6n{&N9rUh#{>R2=M6j_N_m2ah z56(Uel}Pug*F?nQ7tGM?O=>Z}@o#i3Td4hdpLF%b3D!Q2#{Jt>Ok(RN72vH``Jk;mdfIo;1SdrWlgue18 zBn5v`y2G0Cv0Q)+lmHGa8km-_w8&lhVbBGqC~IgZ**}uv{}dlU@Jsbbz?#vX8@Biw z2pAyV!R0YPNRMtk?EFVsvi@|iFlyxA26qzcAbHY|MgqE172Dlnd~7K8Oz*yk@64)T z=`90QD_v#Sd;amn$ZGiC;S(mj0b9CXuu-d2y^m=&0G6 z^E+LxsI%er{qlHIH!rBnL*JKi!l$Z2OoZH}p~(HYlEjP0LSQC3wxaKO@o=;-Qsq(s zZ?@(yug0F|pmSabCjMth?25W*D%TPcQI@#bQ}$k*dq4m#XeRK z;6sNKwuB#_gJrp)(&d4#)IB48@3v*VA?n33#3=2w;cGr2CgV{S<4E$HV5S9_h5 z`of4lnyuqa>NEWt8EK~_n4P7IW(`UHgjKv(VaWY-)B2PO( zF5vo*xxJG0FFJxk3^3YtG~cnNZF5jaR_dr{44zW#-Vwv1;5we@?&gw7Wc>gHb&)tJ3kau zkn@Ef@s7?e&Mks^SC%UFgbXVi^PLyBjS z!c8dUwRP13qt3CRjRCAFA0Q?@uw>?aKz?zQ7E6BGG)Uh04B|PS#!(W<5`!ZD_@yNc zj9vCi9DgqfZ8`@-pU6L@qJMlgo*N~+Mh^B_G}-+YnCG~q6gg@l&vUmp;<&k+usQ6` zls4;hDDTe9pbFEXV(j{==Vf{tpI(`-g>m;iv)Z)e)T}GW&_)B0&BRcWNzNaU;lPeI z-oEvevR7$_lL2;Idb8lLrZt-TT`!+{tN!y+`WZDagz!(G!fYW&RE$R!$-!NutV#^H zAKkZH(_1y z3lFkcgudCj7|}3KK3bB5lvmQNa>C6^$FykbEmR1XyW;(`z<+(X?uDh7rxTHD^gz%3 zbyvK56KjW8>)UKUZ-G1AgpZLMzO^061TVh_75cv_@f87}lo4@Z07;_E(Lr9cWk-|s zPwhRG_AXBZ7SA9{!xiz`3*vn706?N8LXHuvt$Tx*k8Xx77^&i{t?j?6nGd5^-rN`w z9g^`wUI$rYm&7OqRc~-WA3>fAP8CG;2sqs|8TV|unA}i=k|SF}tz5k(V}OE8;PITA zKM?zul>ySPNeeT{Vx+_N+fE5C6(j#}|4(x*`Nf3{1C=Yq<2#;l4ux;{_&HRVR)fp$ zQ_(}(c%V9a3$}TFpU~jge~%46EBSTSi;?WxdK-N98m{IAO(jzwAdm1%Gs_$U z1^V)v1M8(48kBCA$M|oBb>nuKxrCAZ3yl8c`CjO)%}W(xMPMXv{c~Zjd^={X6N4lp z7zjBG@5=NBU%oFX7Z|{)n57enGngLlvQlEYAndvZ=rZgIS5SA-CElYDtDP!7bb3M&jIRxY{?T( zYN{F5^H=R8Xk*o)ZWGUXkt86A(0J(nHp5(24mKd+gqjxY2lQGV4j7R|O@l5oX&7!- z`B$4g(1=hH0_jFpO2Y3nmj*~T(oFu%!%f$Y)HZbRHUvA?k6ywyVKqUdbSc$8J#MGA z)LH*M*bSz>H|?ztlP%gYM*j>&Hjj7bQGJt)!Ma|=Pixp@-lb4-P25Q7{Fr^?SnV|U zVx9-Qflc-h6@3F34fRwWb`&RzTfsgBjWlBIYSoT2;&VIcXzy^oF+@;2ehoJd>pW^0 zs&E@zb_`jdlY_tTtc^c!u-56Z{#_XW;_CN&B(mI9+|FM|uUY!bo0P!ik3hr6%NXep z$+))|1G-*SlY0&-+^%1V<^yuh#8`p^E%>R2{n{4T(gaowe*?2??+r^pZ!NVlf z>mLjGo{y#^@zT}86#$F4vomidQh^!w6bb8p0___da3%{&Lvj7y#3C6q$JPCL)cL^Q z5>jL!Bb|{}lkvGlU|O=`T4*xk)?kcOrr+tXa7ct2qqlOj_p*NX=Kv3MI_>=%H1_R7 z--7lcb@urG#qa@aFauRo8PKt_dpz*Kod{^YE|LdcC9=ufehdHkDKYF-TYa`&KcmPi zG_SBx$15S!^jn`SG`!0q{+1Q_U27ZCdRkl|m;J|%Ff4w9l&NG8uTJofQh_uxs&GG0 z%(JOy|9RUuF1VFo>U6mM2pFvNvdX?2LAKV`3stjcY}az_?ycPgnneXv*ziTN>y>e! zM^jQtnHBkIibAeU7D4q`=E30WxCR6os|oLTptn?%weq(fdJ@4|d~u3-yvcek2c?yGb6aq& zXg4s_=I*Q|Ty`8Cq6SzSqBA+Ce|Z%5aRv{RV$oR4f@>5DOm=*V80aAb)&oe|hlmej zr;A=MVq2bDdfA%|L+L&^Oe{rQd=RNi+EDFzYcC@at zOsorPv2E&{)9`ECOg!w08JiTS!T~-kf!11 zYMF{2Fo?g;Tzp7|1}Pvb(K}=|*&hFZZ?)lc$3VMfCvi`9$7X72yd$d+n50cc1lN{+ zy*a%r1Z?8f!tLn~Sn@yZ7pm0+3YGNq0MB$JDU=SsyVxb}k{MhprW^r&w-5taG`M1X zym8+l&A(+wZI#k9EMvRByQXf_RO^Nezppi5zO4!k2r##?h@JN zDsEHT6A zepUD$@m}~`pwm~hTa@abKyeWby)rWrF1?-Ae`s6TZ!frWk%)6^(Bx0^Q9}3PPsy$4 zQ##4Kj|aQ8iS2S&TqArnQA!wzmL#7bBS3l|^Y8F%N;udmW?3qTc5&WE%B0M{FJ06W zn}Uxo&%I-#5Xlzmuovs@TYaYd6;;wJr<6;%=3L-~aT~F)(@yny`s(MQUdYn}(A{O9 zPl}1u-%dbqXjv9@6LHQxIB1+xTBS9a5(;9|DVz7xheD-lZi~t}Rwm(gE#1AT~kjN3E$Tj==+Sshs0rkB!x&6Do$EDoQ2~=-`=F4Yg zOurU5?NL?@T&G6nFApdPlOf5M?3UMR%zIx$>~qR2GH!<|XW35FJElAX#d@BX<6Ngr zP9p?{`rMIIHuw^^*BKs&#%4k=!NSuZJ7Ac)U)mUvCjiffd?GB(hqX31O@Uk~fSR%l z3j!WBA~GQQ^?%yWfK3l3;go=$c)YB7C9;^g|GDK-6Ky^`&K^=M_j=!|r1Y$D;A{>m z^vzvh$UcOBnr77EITL}oXo}z~zt)^cIDCH}{zPGB18*Yww3^ylQEEt9;BD}u1Pq+zR?mB=g8Ys+*ozn05stAE4S zzm&L;QMPj#7|P^%jFvRm+UHbJjf!tpNebtRR#!gxQN|2A7Ex`U3Hv1&txk0&TjMp0 z5v|T%Bi8h@dU)$hl-_HG#w%(6Z0s|BP*G2o@7yP_s|k&Z(;+2^Q+@Yvkron z>eM;nF5@5E0?8-XOBE>2(gX{6tS1gSoF6Bq)OuY)iN@TCmlUchS=$@`zNf=bA`*>; zC3t)~X+Si?G`%w@ zQ3#Vny*HGphV2^U@KKQPODqp=+GRMNbNo6S4 z+?=bL=)|{qf{gg2?o=d#{lK~mikPQ6NMbh*CEafY}tqC~Tm+{l)~$iM+{;F{G`2|qG=UV5HlzO#ah&* zV+>QSpP}}(Y>UT0;Ps}1Au9P<&0&M(#{JMYiLdOu9)+am6znL?EN$U_8taVr2i^LJ zd&RDaJ1T^8%o5YT0`s8UTzaKNGaHGzrIL{eE;b$4ZUCbNZI1Sc!_B%$UO&Gb!_!p; zcK+G2E05z@kMr{ln=FsA=0)T4Uc|t);3DCnJK2&=ic<{Dc=2is_-Y7Zf%2(-}1Zf+m18so8e3L z_s225k&DF!$0-jTQd0werj(nDyy48I!TH!UoTD^{G-lXC`yhDdCgVKGm&EyWb;}`> zxwX0cp4SJ4bu&rCJ+mGbm8&%xC7Q-sI@?OC(m zfzOfEM+3AFH)OU!NvVT3X%TLh76m&X;93sp7a+=D2W_ai36sBr^;(JFSk4WPi}SjI+u?yGjIx_zjMOYNHZm*U~ZC<8r9gNtOM2j#Q%l}BD) zlcmS^KN9JIVqu6ov9a%S+o#;c4J5jnA*kp{@zbNezPY&!n623s2V{)Wjax-~0&2L=Z0-FXo)6I`zua+@~OV3oP4Jz}%sw_RxLg#lf2CHH_+HbX?cp_dDnn6rI?KK)|PzLOn zMi=%NCrI{SCd2!!ZTvh(XzsEMH$4D(pc+Q;wo(Q)W>GxZ8K=#T)6P^jAnAJvRxpnz zUAo>&>GW_NT&~b@9_%yX3nR-r@}8xfX`XI#xV@?le$&Q#6h$EB zXEVw7Ea*7ClIoqS{n3uv{=2zg@xjZk!FjUt<^5=l%|x!Mq3Pg`3(EF0$pF)pm=OT;l`yb!lNW0oXKp;$Q``%`KZWWhpc11{- zID*A(aofhx;j;uR3c|?ync;fS^;KaQfzbc+^_pKt+TjAvG$@th~V=3LQ5pXVt#qbu>E!?&Ckwz4iqqO zdqlGuR>BOM)w-=cYUTen+oCKwWE6MiFa|OfB@kcUm^v7^RjMW?EnM|ZtaRyrM0yoKAn>dbNkRTTjm{$a3g_3PBj59Ae-o%ZKLu#BE76-;x zaP<>q9(%0ewqUP^mULv_dENMP75^p~xb*dr{S`XHV#9O26%}7CO=GX&H@j2s70?zIzQ3dpYOCt4qaxfi76EaH>E);tb5wyH`x zuQzUV>SE-NXh^KxGAH_dXWS{)e=?`X8T3_YiPy2*PeWB(o9mTV@0c3X3U+ZSge0fa zt(tthr-t4cPCP^2Fk3QnM2H`I5R2|lVsS_;JsjdKKmc#KhwTs8zU>ldlEC0?@pFXIWh(nK?Xr>@+fU7{p5eHmU~>Sy zE{FY{aMUA4SRKhS*yH(=UZcQGuzVyP)s|Dgz-a@s`f~Y9jvVD`gX(iF45qWD1#ob_9*OgD%SKbG~mkGLLQAJ zAVXV(NK^>TaW!4jH{V;6pAAt2im*=K#TTo@B04#_#h8q+9_zjYO1wsrqghdDsB?2f z@=Uc=0|M=QdRc}~b0oe9v?YLL9hc4-yV}9f>qeA;ZJP=V^)e(iLV+88_$`Ey(`0IE zV%1*IK@GU=x`^vyCu&4o^7k^}E}ACHL?>bRGl9n`dR32zikH(Q25uYTw=N2il)2ww zZR9`|<(+4U!~?AZWm*UIW6jRKA2Via_|KdCOhfHo?ji8a^Fa3Xb_G{jU|MKtGv$$J z*q@Kr4aJ+gF><3PiOF!TbIE>)j_u5EPR+I8j_#%$au{H;J4XL%y(l1K6w@5@ma zj6l-JmQo%jpi%fc5$~>MV|#eFt5!xh&&R6?12ZTZ%i_`OQB$h>P-c}v;~K?xIZ zO!oG>8+Go7ONP$lAXCwdSl5NAcM^zq)CiF zjL*w5XE(P_iJy^#GLY|*$w3u8d!E-s;s22!Wtc}?9_z6@w(rc^w_fsC52^(aUO?Bj zU4=kZbEN7{OZuXMHNJ^!s&%F5U{6=iuFo8(gR;IP#*GRTu&0@I`B||s+u{O(hW@mL>#%eOM^_Ww z_4xPP!^RajCA)7M%=kSKMW~h{>TN~a906wST*&t1jrTnWbdVT1ys1trDY-l6J?_HZFICbM8k)$;%tX!o-G(#lQ2Ah1}MCw71r z)WAsTH&!GYQkXT~ii}WI*udk9S{05griCfhce5eJ8x?cr_6O}RYM3iY-qfu^6wu+b zpZc2C9;cc*`Yl{G+bVSlRPFoJQ)Wzahpp_S^X;ZX6v*?vo&0tMDdhogX&H1Q9y4%o ze8*IU%>vWcOWXNKh0NyF?Onpj#I^X$tg74mW3PMq*FZwMxJByNL0>D2knZK5Rz5-t zXxgF7$n+M;O#FQUb#|o-s@?_hU!sl!$6_5Hek~y4AgZ%D(rG)mN~nxbW^RwaC$X;D zLqqT2x@);`@O1ghakK%}=%eZk`Rn>1YqD;_jujMUZok7Dr;75AE`1xVC`|^Kdh`Itvv~^1Y>!fg zELSd_k-bNGg1Up0h%T50rk~l1ylTKjZXdXqBk& z`3Q+BLz0?sO}E7@xG2mmiZ6i2Fe2K3Oyyl(Rqji=T>t|7KVf7T0Cj^zXEY8o#Bk|v z+u7Tq_QeBS!aTld$!d7iW}99)iVhNmR;X2YCUEx8v*V-T0nxX?`qN4!cGt+++da;> zQYm0rYvJyuH=4lGP#Zz5cs~pD+}Mtc8y4Tz8%cYA9x;%jO}y1f`{gpcqI0Jwf-=vZ zqNwrgHDut037+kT&DC~(Ia#^TAkAQhl(!a!K&@1As8R%#$~;sB7H1;;8IVkR*t=b9tLg*>w5NyM?((7YJW6*Fh<4A{@H@0DF3MH zRg14_#P8n&bESUS71~Q`H@3##8e@m@s{S-2MesB%49A7K6R}}cTRYtgcmL#kkp1ql zt?%~srTDVJf)F_HjzHY_&F}sp@w4^enUNsz#fQO^5bVmWI}>Zl(!c}z%kY(qhrlbc zCU!A4F)>u<-z#NlEJ|uU$iS`F9qxdmEy~J1zy0`S-HM)@>_0K$UTV_ybBB%ss)CX3 zXn53|KMqb8&umjiK>T}se&ukcq>4Mro58UXFj+>sVx3!I%_YBbB zBz>i-#)+S{Je{7U6OPGju7Gf#da24-Ac1_4fcMj(+3B(ANQSj(#GhX^G$NZF)^^RuIpyZSN_MnmN&DT50 zXdLMdfQplsNTdE z1~3*TzR?M6E*GYsTQE!0Hd>Zr+}`(J^cRWtS|MBe8Bo(qWSJ5dCEe;{xDxfAf8z6_ z{n?INe{elzH77O}!iqz|sBm&(ZH&cY$5 z>lCCTojq@sKBP0qkJ2_6WG^i^@I#8s(G6TW_gIP@2a>;HgCRNHwxv7$3T85a(d3+; zK5fC5!{25q?77Q?GU=PX9K5^^R!&7V}@X;KB@+Us3b21$i4rN1MpDDP+HmK z7aXc*rWQJS2iS8wABcfRb}++r{ALI|dn-nkH0knU>=&EP%xXHpkuv5OJ%^)`E1{yv(K#EP_gW>r4;D;%W-$_ z*hV>8<#=e+)OM|kyg<-l<$k}K%K)=b*4~{oOkVYYb=uDBNB$v0`Qg81ebGj0dr75%$GQ|8 zlsXfXH#4$HdZ@aZ0)t*IZ8jL5y({!{|B(t*F{=OWmUE@(uM$y{Z1DH9dGPSBVavWR ze7dVtmPiNh!LAUIh%i`7r`V+)YLz*fjIuueLzM7AuQl+<*GhENle15WP59O52kxJK z$*72anAGf~Qe(9KbVI7ia(%r(Lh)!yD8jpiRK+?#hqw}*z-ds5?Dz`IpSk;aydTQF zVgpIrd@)udNrf=k>|)etRPRqgGGCI6)$v68kw%uWVqF;p2vKU;g}l5Q-$3T$Sx2C7 zOt9~#u0aN5nkHsnBkRnX>wxoYDXcr>9?LG_dA@Cvf z;^a9XEZZ+|`br8<<7DmPnu;|FF%RY~F4M_0ZFofvybF@qT71OX$G}S3x3zS6u%jUK zQc+`#H(O@nqZTYc=G8epju@Mc+_wCHpBq3~Rw9q~_{yk$R*c%8$*#iuVdgiBvoC#P zlL8_EFRA@2`V5yGiET|Uuzz7Qb};AQo8r+^v#?CGQolZ;^?tHNY9&h&=M4&4#k9mG6yc1O4|$VcMvfK~ zbb=UsnafZ#&Y*h7>s$)Y;2T(L8g*r7cDkKHr<}4vI_KXiG0)(<^Y9YQP&#~fLuNHU znliV(T5H08=~sf{+AR@rI^guB{5CdjKAfRj^C@t+DAfl4Vg97`5RIVpaaF|9mGLvl zNoJ&N9&;yi+3hAm{L-HnjkT@@c3g~TE48}wyrKMcu9&O-%vBwbZ6mnCHP!3QHdCGG zl-Wb4P|arMdXto0d{zU7PFc^9!(T;#sdyr?aKPJl>XLF^rZz~g#jz9vKUU${t@)Mv zEWHC4q6HV?TvOGDl>Ox6zf3(1>_=$MOnN<%nMs^~xJ0jqYCQRYH}tf8tfnO04ADCw z$wcXp<;HJ)rgYCC!QGaSHF1S3;XH8L1Ahry`(MW?VPFE^_qGW;{B~07dKRPQ9D4-R z6SQjbWrU*kcU=m69G1-|X4=$ZuBI<#Pq541%EDWeZ6MKX!?}v|1a@)>y~2%ArdO@* z&CwO;%`?P6%+Ac2!`KzbU~(cDIS5rnT}CgoNycG~e~JcXDk5)Cfo49yFN$c_xlB(a zU{l6jU<(3zjf`hCIWj!Z*pZ|2*?xry9Mh!g$_1PN)kMA34Is(h7$eUb{+4FveBivV z#N4gk=IFRSp{U)UK8;uCq7$_mhSu0JbltUVHdO{I$2bw*M1;o-C+tS>E*@|VvggrE zSbXws92a>P?h{Y4aZ!HNP(me^@T(f8O&!rC{@-OMH8-5;GS!^FpfB)#KK2J05iY%| zb=jfy!d$&}&=dL?(>-lKWo#a&U-d^|TeEekrhj+yW!dcrul{b_a{kC;O9B9nGE!aq zXK(K5deLx7gG-EGQrzCXs`}h->(~(>`YYFFq?EapG$evDziQf8>*K}cxg6{cUBnKh z@m;_Q*UcM2`6iwpmsSf=h)JG?ccQ2yQb_}Jo!m|$o)iuL0A^?ejb2|mmX$G0xJ!+B zJw~-=CJuSO7IQ67n6K=g(_XQmi+X=cW}=*qb6qjb8W7}!Fx~kJD<*4sFhAkF;;Hw| z?b09@;uqyN)j1<-3wAi7Zk@7M8(uj^2Sox~6fOUa;?m~S!PxTVG|6c&*02~i=ese^ zbKDo-)Auf6@oT1425p=TY1S_My}!#vm_~E!JD7;$ue4Ld=Q1GcsBQN?H=wl0MQwWXIjG%8 z%*LKMD0OO?^TI8=Jp#6mB|1qVZR~7*-Zt;3S&*W;IOy~xX=N}q0PSCgp@ycuhuJlu zjgtuAw+u#-vnM-WIRz644mww1jVa@{NxQPN9cG(8<<`!nt0FrDU?QAFwS_XgR<{!6 zeObgyVgh|dioBFJKZl&E70bnKX9gY<4j>-j97`gNRBY)`d*@P68KC9waindrYfKmD zALkSE^A{#9ur_{_McpFl(cHg70D)Vwu_lwTzdmgINXWTo8V*f_KJT{nB7@ zMqL=B4D?HBsE|mzfm$PjsOS`?9~LT&&%IPr&gT|*&dSlEiPVnJNBgznCi?n=S{dX z4|1PPM4^I>O>5>U#kVc~37IIk1V@-PLj92n97~ZEH%Qqh{eNE0v{;7m_bDweeOf?~RdqAjrcCQOR9 zOjVhWm~C3@xv*9>YQCSDv-pAZCWE;R8kmurVx~24h@UWg4^#fB_)kzpaRFb^*=B6p zX$Q#EsoR?5=2KJ~IUtS%>~HA>rxVM(*XN`lH3|PNK2ky1HlqKkX)3yRu6IZf+54kt z?36W$fg0yB0&5aJ>xuAk9+WaW*Ok`mA5_tUkrsau!k|X_no;dLVEK8riqTn&Mfimi z%86S>zpPX!VOD$(c;EFo3@4xFA zC}x6e5ov_E)=-=--v72?pIEdL_~^5W#-B{=r5&W@dpfi0!5oWE6~UeQGnRu`~;)Mu^ z4W5n9r*)y_L3Z)vS%;FZa{BuOuzb3qyN_;SDgTD5?R;8T%>7$(JSvjaPbQvG%ih6J zlIUZ+6oC_(zT@C#e5X~* zIr;K2y{J!GwO{`@rR7=UdeZ1VHMiq#WwsU6K{STTLW7?w3{T;*G23t1!GU~vYs4Zg>V<}cwf-~*8=)+-ekG)zR`A{# z8#wM3j;Uq*9IoIOcj})Ds@hli9lsl~`w`9(d6R^7TV90q_F}+YUbR{z0FFWxRKO4! zS^sD1YH%0?mh&%WK<`U<{T4MW@zZpt46I%--2ceD|sFD-VEkFWIX6nO?U6|?V? zplzPSG$&(f(!Li)M=4fOb>b4C&5ftc0=cWIgCYBBe*oOACkf@`y09}!7mq4vL?JxS z#aalzqC)B7lV+lcXk=1Gu{WxPqRVNg4g|F-bj?vx(;95zhfpV!VJWZmSFmb-b}F@C z*Sv(bef+BFc#EY#O=&9m_a{A?OcDidO2VVQ*LFiIf4a83)Uhf^zS%uAU0_r z6;J_cm)6FwQs^tj2`Jq+a~8;f=W)@k1OB=TALC$zzyaZyZNhd!ek$KkhXoeRfM6(Q zs_i79uPT-s%B8|Wt~5O3&Yi31zMJ81)Hp9FPT+1nYzp*n_jK?em1mG*0c*E6xfJ+G za-ed4gilA_nV_8rP*vEP3PMqrz%O378K;Qq@_+>U{#cX!%}s?7sy5v%oRJ1iQ2a`{ z>)qHjn8=Ooc8mRd-b{GuozDtvFCL$C1;upef!fT%3Iihy?z-997x{ffQ7lM{8B9J zH3nOD9sh~waq97wd3FDs7BmUi?$Z*+iuGe&<=elmPIF4ksUhsDU1-FJ*Uy_oV#R8l z2bESi^pvOc0<7s^N)yNKv=(lqw{TjZ|sYk(K(mQL5M zyj-i4&u&U62U<37(>=X}{NFktfBgA4_=H_L4=)G1}f)O8aoF9#{w# zhC}%YXzBv^izI5`?tmIcboSnl3oJjW7TMQByu8=LY6*pC>?8jM>NiaLEHaQ2JLNU; znT4%LuJgOgjD-I?kSd7o*1RN%gbz`66EiRlrEZP$1MV!G{sMx%1Ae(`eq0ea0~__H zCz=TVfM={wSJFW{ACqFtlH-&@zO`2L^52z`66BIe$F-oy90fMHdSfDkaMjfHx*+qFg3t0Mh#fRo zp!X_SC#JeTUa^n0Yd^*2l_(4DYB0+-j*c2VybyhZz|c+JGq{z`h{4X6t3?z#0 z$<%k3z(hNeDq@QH!3lk+NZ!}~ZIH4S6rgaSjG`7qI&E%0uMXg6savWvG2<9%V7*NATd;Mb zVxfHQn;^q4q~oSCVf=&jHRLp#aGf#G@H*Y!QE4U3TR+EbFDY@)et*2ba{}6`z=vO+ zJdNq=yg&bUQG2{dCQ^^mfyqp6)o_P%Q72r6<&-sEUC;_HC-1q#oa+~HZzqZ^962P2 zFn?91(hh{Bzh3Zn+j4~xPXJzt2r1=Qcj0~{J1FSlc{w5L#2@j47f0h36w4*rA;BwF z9Zg%*FypWbI40j?Ae{{hn3J{$9=GHl1=U1s#rM%FHT!m*GOZh z(ot@`??IG7CeKWWiNDx?8@n7zN-yWb$dn;vl~fKFb7q;2)GE|_iCt1{lWZS(?&t*7y^5<7FQi>XpV^jq>r{ihTaVR- zd9Gm~jG*I}Xb|ED#!oqo!0#wWVXo0T;_iAoTPe^saayK zBH1Jc)!3Y(j8Fw?>PC255qozO9A3m(LWrl$92(1#q*bXC`(5U&oy@Ca*Lh2F~X7IHuW=fN+123AAR2NO=%O?;BUaC+g?ogGS# z3+Kd&xNnpip{$`#w$92;?Qu5x|IVbvr&mcmUOfa`!GVNlA`KUHgy)W6tZlNfOkdSV zYhXmTH7$kuh}AF`bi@f>^OvB3x-9(ikhzE={KhZ71+Q@yDQ)eFXu7*PgNd@VE*pAy z6RIPA8yEVWfrRl;*Cc-#1%6u&h2Qx_v#9anr1^cX!!2xC z;7}OQKAT6Ob?hR3jYXBADQR-}eEH(sQ!BrcLe-!`V(j8 zah*?b#Kt?5dx%w@fmx|TI5swbL6tWkSBS6>Lc&XQ1ar?aV6 zg&Pil8?l4iBRZ56y1+AjpW(E-N7j?e74EEGSA4x%k@8-brAUtrv`1p0h)aGyYg;Z< z@yqr=#HnnjoO1e4ZRffqC$sYQJA7K{zt^H(X?z{OcHKco#@_MBZ6O7BCd-*CyMzD0 zQS7%x7yn_gN5-kqaTpW2qKoTKP6b~vX{pm1`gyWIjIP5Mjx)l zZR!KkbxeE>N|D8OJYLk9lxg3QRo<~V&XinwmQ&nj=!^!?pH{aam_B0UR;PoU)c36_ zCMn5hkvY^kPq7p#feLd zEhuWfVGCKtcG;)j)sy zaaqt=q>lJJL12@t!PSBI35W<5hXu-Gqw%=4pG4xeZTHiPu9O?+gs?HWcfB&~tCSfHtjlTh+DP1eQCt$d)RzP5ohk z504_W(bmqNYV$G3^nnsH80vlC?8A<3p{#Pe$e)+bhPXex*HgfcZ6#M$MHyNGzUm!_{+=X zIRifB6fGif*y>nJ(w2J~_X@1sfg8f$bmb9jmwwPdgVl7%Nh+xfRICRd99Hn2lZr4| zgrQGExB7j@t}us3qP;=RbaLwS5bBAbY`(Fpu|wPX3*2&@C5x_8&vIX}4N12J*(`Q0 zN85=Km-ubjHJeH3R~}7^pZnBvSPeFp!FNlh7k=x*%PxyAUq@NM&jxNLO~^Vwfd(Du z)tEt_(g?mRfn0F`2i%Wnq*m3t7Xw^A>9V1 zyLBSePYAFvoA9+O#>gBcW?@Xc0)8p9Gv6e`uxkpiHN-vFeJ?+REt*`6_DGfBA{oaG zr84QbyH|1>NA9~Wb!WT1@}sZC;)BHha^~;uCK`Kwo1pyj`tQ$#_HD?tzvRezX=29> z^yf_H9$CKEt>fRBppHp+`rDT>c#xqt(s&-<7$TYIzY!B^;R1Bp*&TG%{K_w0 zOI!g?hh2m_=o5TZ;A;2J>AQ36#ELHkXDjWW1IcBINA2iDfN?c`n|W#WrQvTM&!u2gXc4C%O-eWHqM_9mGaCkk2Uc339BVRSE(SE0FtY6 z?&ZpUXt?Inc9VmUWZ>xTh90&ov19k$WGujg=-W&I7mP~cZiiRr;gqL24N=>14$sC~ z?Gi49s0YP;2K=JWY=P(|YJWH%)vmP#WR8`0eKUz!{QXf97TL3O7az~;`DOo(kV3N_ zjs%^AXDO&>v6r@jd5n9j3*P(eJzMB+UT#s@S*Uz&xMgzLs~b~4`BFKF!e`JA`y%|b z>(t$0kAidro*XP7w$LH)N@nF)XoMgW`{9JXc(Mn#yz{W2ny))Bz9aT>c7K7dF%wlF zXkl7_zm9$WcaZ-KH^n;f{(0ZZ!{5y6faMk8V?E~sc2O0OS}q%JU>a^@PLb;e)3?Q@ ziwB&%g!{YkmXfyk7|iV2gdA1wafJsPM4;YS6(gt_>OX*CWb~Z7}Hwcpq{J!gC2Mo~U+NgeY|hPa~SbYq)e+ zzj8LYU8a@4^>m zFgJ=|+|PUAMPYApSpQ|;^aY4s^BU`HfT zsZ0v?5TEW3PjKsfb!TQxhHP?V(9kTM8wzEE8%jFmhS_>%+NAcAZEJyvCD0&<_~1~S zVuaTQ#7G<9ci_N-8&ETZUPjhoV>*Bwv8+m!l)OP*d(Y1D^7}gI``qSL)OKJOT%P`? zdfd&<`K^8Aulp6~Fa&Eg&jXMVS*xF;9ukV@Om86YznSoW>jG!%adzW!44pkwEJ&Nw zA{KKpZ#h1m-|ejMu`Vu!lOylPPC-3-xzKx?vhF<*VDs*WH(E&%nIpx9-_uhkSL#3H2WM~b8g~5^;#3RSmqaenEFQXQ zA`$QB&~7u7pS<^7F+cANyr=xt{~%ZUn&vU2$UleNeMNo&k5ByvIaLWUv;wSA485Rs zMH#CZL$~mQ2tdr3i0c=?I0Z~yh!kW znDEyxdiuUV+kejw_^w))yG^-tg--#^P2^FfkciSdH7@h z#8~*w@RW9!#*)P1z2J*R{SqMA@V3)8W ziQ06fR#jCo#0lGAQ%?>;qQINCExGT$2%+wb$mXfgV2^@4SleXCmG{J#NE0& z*e>FCK~&D#okBVyWFCb&r-842I$I6%s210#4SCm;^q$&qmMC^WF3|yDzNeTMhJBRj zs8XvceGx)OT@q;hy?Xw*<=))*K+l|X|3bGz4u96@!#-6Q$)}HQG6)Mn7}n?y4WLy8 zn+x4P7umu&wX+3bKH`fx77~heLfQy(1q!W*V^ykk{6gd!1S(MsAexivX`hkm*8p)> zWH7JqX~ftW+J(1=uZqgi12IHDEV0GKtJZqpp-|u`&>;{ZKjkN=r+m2_IyFy{=+yUV z>POrfgXaKYzDAvu#3x$!1;>JTgpt{^Xp~S;ph6)2HpkpjkcbHK*ZCRTJ}tbydVl{QzdwA!t%Y__e&xeK{&S|heRgZ1|#M)hi!u)#`0Wlec>L9X0QF^~vh3nzB#^xaI zSc0mL4s+E*gv3i{=0SfwOL$z!NQK~z(nR^lgh51^C<2siu@>^k;CV`1BlJs6?OnOF zmd?L*tr*(}4Y{oE-c6=!%6`(!SF;d>)N~5yA%L6ae4=}W&R^D(AgqGhn!kWQ!q6167t^a7vL!vZ*JZ|Ynf$F{o%~y~{S=1dhHR^1` z`fKnU!vt_eE8t%}8&}{XB2EKnn%9@GATFM>FCSGcll>e&Rv{q@qR#OZ1GnhK6l>VPt!u z<_GTJ{g!bzG8)hs2NpwkkFp~;HCE<~$Y$Z*Ct4b#tB9@Tg9$G1@!-myq&%c;1M21hfFra=J34!E% zmplVs0-m+W()EW8JhOjtXF&5hT7k`$;jMuHj?E}(fGNh2XhmC(VX=noEw*UL~-ubSM5qA*h!d?d|O>Qb6`{f(SQtO(sei$yT}h~znCTQ4cQ%3}}(7G8k< z9K2kj$C;SJQ1VG?d(Gv#LKeY0o29=_RuYP^eluSn4DXnZ7#NOFAUr8i70g^8nL>qg zTfrrk6UEo)>WZ~`uEFVBswV1uF4ZB^- z0Esj8^`$}OO0}YDYp0U^`X7H;+7h4NUi@&EfB7Rq3DEQPw{Vt!2!{Ss5+Kva<-p%! zerK470O71C>i1dh3i1JXRt~xa1ov3hV~d8VkuyKy>SD-3D^xC+P9`W$c;y%sx#^qd z=CI=8fHEiJZ3Fska#Lnqv!|~fbq{=o^pi@D+C_Exg`TBm3%Y0 zl)T2n|2t#22meB3`>UY~1~d>qvl9!ktU5k{5!I$o)Un@5qj56TOOoO-WdF=>I*``f zNJQ*AK>F}kh?AWj+D8%2GdPo6H#X*c42En%AZ+o1v!rq*_N>Y?pJAmqk^Hl8p~U6- z0lz!LIHrIBjeMe!^PKGLPP;X}ted(C=2m*P&O^HV?B*H2+d}DO&wWpd7S)}g>t~1c zl%2oHLmuuZJCt@ZNArwh*IRDnocye^xj2ER-da~Dj&FJw8!pZ}=L^pEXBYzJr4AR5 zuP8hCFaADURe0TcR|W*!_*TZCR3IY4Vr>1Xm-hE``Z`}Hq(HycR7}I`cnL$ceB7Yy(rFwG=UbG_cN4wK7WUXT-JVrO5{%P#?Sc7D zMtt{o5beF)&Ooa*S2D`4WX{q1W;X&}t!$>mrgoQ?z1z?ivlOIV#Jj5=8I+(!3iyVQ z^#K2*OJXGZnVEc~03j=o=j6Oj@uKD*Aiftl+t!|-nKzF`nTHeGMf3dDA_(K;ouJaL zW9qj=??hc+norkZQjza404$VX9XN0=9&i&e2*VqJ7Vh=ZX2h&PD)2t@=)o?;BnXWo zCx7-*{BD&v56-z78^Y5yj$H%MlwhQQ7XfzzKhbC#4O-O00wFcE41rG~&@6&LRu8)o z?iN7;CS8?5Wb6%g3)s$gJt6BR@^vKr>n3X3h!FlGlN6vniZ_ecMRgJdr|*|YwAtr| zFDdd>gJ_8aA0>8Q%X_Qeeb&2hq?|#&I97}bu_jijyX<9kt3QHB8t9ET07)DgDR)D- zs$|KNPD`|wHEY3RF zmt6NA*gFc&aw$(6tX7vu4#A1=pTR%wXI$E>#X({XNq8O?kqWN^&PA{Tm2LF;g?tV;f7YsG z{ghoEK}da*e7By+Zx`f#2lX)}p&`>OnZZqhu2>SOEwYcG$8WongWQdwAev1F)`u7% zyEkY5iQP8o)1l}yZlV>MWuN-93gTwZgpp1;UaxV+B$YYPqW+ZM8f~!CfaB;uL=+I* z0K~oPNeK9J#I5<8N>87-GZHa)m3TpuXs z)ujcx$Ms}!d;@URhN0?4s3CmK2=umXEA9n+2Y3VWCY@;?*+?dWT{yhU00Cs&7i1Bc z!=U7&ZmRVjy4aOUs0eKFXyr zVsXs_!q{LLJe~a^!&VSB&1#ESRDaWZCVPL}H%mE!TxroMAokEaJ99kaiIb=Fybv9& zi+HpzatyGwVCZ28Sgcz}+SE-`PbABAevi-4h5fjK6U1x0a; zOhIZMn$nLEU9-6Fs_5j8Qt0!*XIxX`IP5!3$;F(;^aQ!#?PwE^`2%^h|&FdFmUVxU+6xkoGSB+S7vTd ze3A^UAMqdft@{;tmLPDE@3a*1*y|x<7ado`g`|ACKFc~uUVr4enJ?RFc@KkM_V4-M z6@xf5GSaTKC4fy*rBZq6UPMMUd&>btcFz;8&2|r61s?B4BJ!sq3@}PKDa441*Ef8v z32<-*rut8ub(I-`^grX)@D%+H);AgTm@TkY>w&}Yd;XJ3<*|X~aik1>k5jwgr#VT( zb-!f)99rE04UfL@(T|*1OO^HpB(`OZo_}!viA131XhV%|)4l@1gAD91|NUi%sg@7l zvQt}pO5X4CIG{9*5Xzru`u}$6J<8FLIW=(7T1+U=MBfEv1R$;$K^MFkm3&E;>_u3L z$KhoGisCH)Ppd)`OzQB!Hqa8Fkm}UY@N!INuTeqXdLV)Bq1S|sljTrhpmg+y`u{EB zjWaK4<+?5saCDm?un_2O6iO#)jEmRM`fNN+~LluI+QV0-LpPKbAAHYB9rBp z<5L#9qUYvEccu%4S@Cl%GmF3Xw|8lt=?C-?kP_<=U)Jx?9Hf}1)_RBCv_whj+q-en?XLKWQ6UC7Sl zSKQ}FWHe0aXpm!Ldn?J@l&#y`8$ZX>I)kt$x#$%S25@+lFdnf2yuCLiQh(BbQslbzx{44L#yRqn*>RV{XfaX_l zm!9X0!C`ax-#uhgnZRQ*T$9T?{>3%chc)GY2ZKkm2r;~_2LPKu*-YUlTM}K3UErdQ zV&AqLAHmCPw4YQW3htr0 z*6$*Y)N~L@Y({63#h0E}Zga6dd(3txIgv~|aWE^kXK;Wuu^<3VboBm53l1VBZbc-j zvVT3vfr?zRU?@Lu&DI&7Ln9fS2Qe0QVyEt5K2*l+A~dbW>~t99-onnpfUFi5NW$I1 z73m{H0Ut&Hia03cYr2o$Eeln7ktc;Mc(lNFv@o#st)R)FpRA`h#x~Q@8J#~26S6u~ zL7^NXM@w88;301dLfYs7O}jVj$D@PRIhS8OfjQ$Hz|Pz45+>qSzMV}GF^F1y@Dv@&s=~aCg1kzIiDo z^R!~ed!Tx9KcK86erc00{ui82G_2WT7CT|&p!6^zpR{}tF>%wt(0*V@IN z1d`hz_!P}!Z76`wI7(YAtewTFjTdP)cbUWFVUdthu|FdCR5o>g z{V++%)c)0Yr#3C|z_8$K`%vre%`2umlMcRR&QO!0IdAmNEC8mcQTd+YPNMGFpCWTK z9^U)YY6EE4y4y+Yu`XR1A>!ja?t3(gx4=*y#RgcColN*r$uP*|hD3r71=Jgo-e#_M zAohtQ=FSb{QAKb&ZWJ#)(=LA&oN|<|(IlyKwEGx(@gUqRrU}j+>$s=~?{>f6*9P0d zoQOk=h2H903~KixS)@w+KvYfdiMG7|!IWju95hcYA}_>MKno{wqnT7*O?Gs<$9E{0 zGvpXLN230AuU~v0ccK>DGdsF&lIBhJV|6@XcUp}YQ#!Os8q zTe9fRS;d77CJ;dP`nPTRG3Pos)2D%GHTJPfZHME!pPgxGQQ>M>BURWtVi+JL1WaQ) z2|KaLD%DImLGq~G@ed+jyQYx9CMx0Dl-|!C=`O&nde=vnzMb_~(%f!+l}YP=WC7RR zYK8bbwnIm(b}`t+XFx%;EgP}1Ybq8)NajD`s~fmq)i;V0c={rmO1LN$4?tDzimplO zs%LdwfCA(UZ^+4&9fpcn;W|(Dn4kbtJW>hb$(7p?_X|Esr$63`0JI&E;6n9@EM3cH zR1B}VLOQ~qIX&lsCT@#5b)=zxQl@VwFf6zAg-7S-Y95b`oR9p`A zBM5H|t%s*ghtvI+mgR25842^<;jZ!@+Uj(p+dPt#YsBo(yymT3Q>w2vGAbxo{pX|g zUE2PjS@8$9r(IV+Tbx@ylMh{ak4_-NU-KL=#z-s)nl*ngb3^TdcWSxe1v%w4gEIj( z=paa@G~JOt4-W`y?q`MKLhEmPoZ>?%wjKH4fxT0wvR26HypEkVC*y#^yOFe=v>jZt zJpLh6@QoV{Rnp_0=kAZL)HYK^nWw{_&D#)T$!~qWnL!Oue1=@FS%8i#r1wla@SWbcx&*F}kzjzo- z{fZ5L+r_jTwK>an4+p7s5Xs{04>8?)h-51-DQVkW@pZ5#g_!S^Zo)aWD_5LN_j-bx zJw@#&58!d4J;1}>hw{45AOPS=r(;^6C z7SX+Bh{AUwgcE2Mhr8S3b9$ar>gZt&Vt{xq-&SveOo=4CZmltF!PWQH>^Xf0_u$E5 zjjDZLHjssh0DTbtJly}#^_F2(by2(U+H?rgDN;&zhlEH80wUcY(%lUkq*J7n7U>k} z-hhO3cPY}b>5gx0p7)&h{P}+ArPp3-&M`-i`=0B>WtkQLGt7{;cD6`hz=6H3mLAJg zocxBd`BY<|l;y;Y!#%L%0BkE10=k0ia}sOu)ROp>T|DQ0Q>^e~!|R*lRR|frnTHs( z!k~x$U0UTGm_%mMe~#Law>v9vpr5mWkZnz9S6t^u%>QaVprD&X0TbZDw?)4_NkP0j z6AK;@ze0OS&2w=g7~H8Vc0fvh^&N3NmLNbcGlLuOYJ%ibRAr-pLK-e2U$Yv*i%5AM>n1(6OAhF6EcUHi2&X8f7Go8xmb8tYXTV=(2d!u zP%Gcbd}0TT+-uHnlBksz(26zZ?eq>fEP9_K&S|YQ8159L1Sa)vNS|vlfs|Db`g~c? z-whS3eNP9~loYCphZR4m4hvr^+)u}aazDpUK9%4(Y?0^fwlP|ae+u36Gzfrj%plc^ zabQne?37zzJI&f5f8x$3$2YwvGnW(?E40%UKyV3I2&CLb?QB7seBr;0f3mB$@_`(y zozl8z5DoAqapmdiC!pX+q1<;Q+1Omws@iN-j!{xt_LXb^_f+SJbok9*kfbJl6pfw^ zyjAz^_EB{c)=2%S2g6chFj*!^+-p@Orcb4#Of11tPK6))6tV4lk(o{vM@zV*wZfvo`3hr8CcByb z3QIGvOC7dDF;3aQ+~W>*Gx4m)^SJG&?+sxU*GsOLD|-hWN8G>cnTm{WdZ3EIAeOMrWKo*Ies@8o|T| z+gTmG6#d-GoJ()l|FyyKDD{;x7USZQ+}}uFI<5@Dj%C+}-jY+V zqHUGJYIKcgE)3Z5Z>U5>s2YN2^rPHp^T@ZR$wt?^863PC!~a|6lr_ISNH-zU zV37Jnh??zAS#j{vI73pid)4E)2FXY@%fG!K7m4gXi=p@T@Lv0kt2KVAS^cK+PbBqp z0BI*#Ta>GXhdw8i%=F85=hz455QMp9^AVwT3QW?T(`0qboQvcujSCmyFW@*}yQdqB zsxrBDz}=UpR@skYQHzGcf0^0}h^VUO+DeNsoiSB->Xfg{)2V5M)WC=#^kReLvO zDDr88Y`wkwsBCaO!wP@Y`~O{CC0BRJSHy`!g2VrEc-gW(g_De6@!dW~ly_s0vAkDo zFt{azmW(D+c_ap@);$Z(?4a)^CF!emc0#)KKQSG&fcLbM_E5xwo=%oF7eyS9jt*$* z;Qr9%7!r!K*~in8C#vL@wX4^ssm}xEyBGT9ExPNMI)|L2ulKYJ6Fmz0a@?igVCg>$ z%|JU2p6Ep=wj8Z@cx7oX*GsnzWS0JFvw=fanFQVUGqLWwuHx^);{V!y-`Gt5vFcap zC;ANSdUg-$_-Tiz;D48e&;QT)J2*&L!vJbjT2(^TNi@<1GfdKml((auMCo3kTlF=# zE9W#;#ccjfeUbq8)lcD{RWoG6{=X}c*62j61o}yLe*1+S#U8^Y$1MIo(XItz0mV5N z>NI{~4pO`)C>-;7$VkjX;df_!r;ZgkFwzWc9VL*r^S?_`{U)y@2!^x5N2t9bn@|p# z-InZS`p?QyL0UGmxr}_zB-38wb4k0a{w7y7&HYx_%TMlBKIo3_tA}iuRZj|90yzEy z-E4RyKKKU@{#XXLgl!dc4ixjj91Tr6v;HKew%vy+NToCf;$ zq(Q3OSJ~A~8HQL}gPqP)2c%tPw(%wGEZW5O2=n}JQQAwM{8eDCd1TdgV@HgK=199o zqPjBK9{$pfcS#ABder1uK@s&+5c}f2FV|1^rO#%V`~v3n1~5UI_DawI5l~JrCZ3@^ zQVm4|c`#ju4j3rZ+=dm+g$^sl9qSy99}oRAX33`O`(qDf8ZWV|&dsfSV24mP3APLZ z?YieHr^f$Rt8<>XRbqmjawI&QJ|lp2EZuH`go}jlXr5E1Lg?}3gf43aZh*~AK%!ti z>0W?jd5^W%qi-E9by&&!1OHb*GD_dXyFkMwCUwJr(*E>RkI|r5VmoLOrExq}$gK=f zN>iPmS2xcUy&t%MCkft7`=fvNB*QYRLER1hf0lx@kXTbm&cs48#|ew-g_{JHc|$57 z#J1(j%H&3KL*^ML*KQwO`DnId!yQtN@}++!r3YBuO!rZeJ}LAT_`hmMl$7Rp+!P8L zSo|dZrNr{L?woeAC<=`9XMZs+_DmA;HACXn}f8TT(%0|fl(B(JOf zSG-<$bgyVXqQNV(GQh&0@#ig|OQ!%yr}HuO-N4}?ZeL<)~=ec$?Q zH92WYDMhE&Vcz8U;V+8qH`f}>`Nlvwung=DTiO(t##8h6bpET2z%;fH30u%Pn(M8) zXUJd85-N;N&k6THE~55r8~|+Jw#+GhjcuweiReOZS__>~<~|Rlprnp*U)pk2$W_$Y zPuLmzTP^y(qS8Q`WF4YCVYja{)<3^Gg&lZr&HEqem>|JmnOaBn9ASPF$GG)>N&$hslfQwEJgDLzl=;YJ(; z55ZLV@fGTdDy4#YGlkP-^roAOCw4+}mwnWVD*hj#uhA|gs;YZo;s5eTOu1{;w;$V8 zw;-FsV~Wwti3_VnA8{gmmk>V}z4d+RUD0^|YPm>~_iL|l&C11xF$1DT zs&sti{+63US{1~SQ_BV)L1A2UupX3rcNCPL#5|MA^lALe(qndc=IR3~NoZmeM7oU3 zjvg2Dz#W(a#R;C3mlVSxdReUwoTy=IXd516iAE)lj3C9w4lULW)@VG!ow= zk|hbKv*}VW{>|=M!+-&#i8h*(hf$m}dyITD{@%a@2+`R!%ViVML?>>x0(N?)&4y!r zg6K?+AvtEWMf$vof0Xp!}dp|WcOVHd*y~fWMQubIMN{Gg7MhjfP=NG7et0u|h_di1| z|A|jG7M4DQ3-%1c9zo;9e-`EB9BIW+)jgrz1*cPyk+}l>gU`Sw0NmC&WOMDu@e6rZ z9+PIZxPXB4;N$&xc(J*asISLfqPyX9t|utI1uTw9*eG#*+^Q(XBGQ1qk*daNfm>9{ z-D+Ok9pq0W=ycUxYe4ejK}SO{GmGfAgRyJ}^1I>P2{{P-Y*Ju#QA9i`6w*Yd76pz% zH^Oh8CU022k`7S4^)6wXPqqq^f+15+N)y5M zhgbMc6m=K()%M1i5vGOYAeFp?4nEczWHFX;Kklq;_iV$}W)}I?;W2Eu1=ah!E{d_z zL}x5&uD8OXSA$*XakLx=yh#xR7+DA=1N@JY{Mu;Vjv|DS(b!dQwakhC#V_($@11)7 zwlmB&tX0|rHp6>tWp7zbd!)vBqqo8&0(WT9RkH&jE>-ec-FMZe?ru0hH$_(3()__#)kFC0?nk-tZP%3qm^6N>vIM1eU zbS;BCujPH*Ui-`unL*)a&^@if2a!YN9eM9HK#`L}9A)0m?GzYzz~@nz!9JL3CFvNB zs;ha+(HJg%3rxFKRscsPZvuBsX;_UuuIXREX8jE{YKS1rL0vQh57U61E4(fI2|~aX zWB0)Qf}_G-Kt-CKpRcanWYdxGx2+t?thUC6@ugyi(#x)!b@Bk8qp?PbFB%ei^ep0C zF*$6|Df7TujWMU(iJ3&PuN|Dget7IYkq8-ei^`mJ)(bgw%CU5OX1Ua#8QVp#%*Y^| zF9S<#YYfHdZ?YJHaWepBv!3B|T7jVog4ZIUSLT^~dvKd=0=VH>e8O_uPQ&r($NBkF zLLZUSssOJt|HJ#Hzt)Gf}CT5V%5@FZe0uKgySp$yguzG(?mTO zp-QLLY_Hy@NS`%6@AG(r`o>Q-$zt`R_urG(s$;a&H~jL63$>Hx5qD@-bzm2wPxBSh zeK3g2GEe}NCnom9e7XpOnZR=h(V^n6D!#w&;5Bc(h4t^DaES0Nog~IL~ z2T}$1%$CT)WWks@o`~Q6#5LPq2m_$knD(qVTVYa=y0&$sYv{?C{=~7wsA?P?x5&Ad z-1}p&?;EIJ9bWsIr9V`Fd_{Da20yRA{~-)cm;;W0i4`<7G!_N@%>PcG+ctcXlnE1& z^4(&O@>dgOJycjlR__|D_Q+|F3%|Ep8)}+mTRH3Imkpo z#E*iCE0la5#LkB&4eqMSVY2%r=&Sf11Q(u7om&vIoVQu|sOol*d_SvvbBRKBPvH_}fE%Dq3?E558tF5{L!feEDL^L!t#f93u)++?=tJ7Ab<@!8c%op2O zJok^cQjcay1subkZ%0|eeU#<8!li{j!Z;=OKgG(<`GiTbLvdY}_fOlGM32arN6}II z3CY0*1eU(pcagvq+B=(vT0;rkN6`fT_tvXvCKhSqobQ{N*DCjRi_KO6T>+;*RliI7 zaC@+iWr-%GEcxi?WTYYHv62Sr`CVJYy)`ZuPt{CS?QWVGbqHrhAW^d`b!G z_dbPR6yC`AoCw*W9mv$I@{`vp5n+LD@psUB6Tcge|GR}%K0Db-&Zk~AAEDAc9a_Gw zhu8d71ZfvpM(Q(4#1S@xBQ`(3G9mqn7=X`dROOz8QaL*=U8isCpt!S>q3;oe>jswS z(~vQrF5U=&EdKxGqPcgRYQ4-rHd+~pG+cO<7b%;x@&n)+^Y&3|p5k0d!SL@Xr^sE= zAnNw!P+cu~;lW(7Nj$eN4;6@T1|7j9GZlM=E!O`LK%TWq>e=pTmhByTdw6U@ny&dR z_ID{6;xVuI*tzt-{L!dSAGz0+Z-01UGedVN6c7zp(kR4sxtJ678`b%t@zg>j%i#&A z7$x=*C@w5~NL(2)M%?;uRbM{b$H3=0mXq4%zp~~og-^x0+`M0UUR=y7Y-D*z2cvw} z;YA};U8jr+2;?49!1ZFx9c6aF7CW1r;wAjeR98fN-;H^EKj1smXQR z2>D-*6RT-NO}jfvc-xiYx|W{s(5Hw@ z#E^E~z1cM~EGa2wy-&KolNoi72+}oK*e}G=4j#CnDT~k1GJ%i+``l|94c!gzm5tP0 z%Tr>Z-U4H1tma{hYe4e%RcRUPwSR}=!syMkatg9_0>U->)HaE z*sw3|uEj?LAek9dZ_qnUNd5DOkmiMS$a~mp8ur>Nb>k|_Pc@!OJ5OI z#<)?hJ2Z}JChcvb4m}(K1xY|l5U0NRjjXO+XrwaJ2i_^-^s=d3s7|Q_hZ7I)`*wbpR}K4>2+QV~*Xi4XPI zb`muK#dl+(7duUgCFHzrr#c%9*ak{x!vAxY>5LMU1!~K+f1daq9?AMmt+|}ujGJ%H z&0=0(e&5RR0WIWwdtI}Q-@;@t%_YtHe!diSysfC2NJ}$X39RAUGmruoky_?aNzRLb z_&3sGG3VQd_>=Fjx{9yHn*~;+aqU>_;?i9#LJ^0*_Dwe8=rJBt&F8LwdU|}WVvN|W zsz%rSqh?${#A=<{svGqA- z(`gLPR`hAe#5|zk-kPBD6`L(2DZuD3HHyY{I040jM2gWY)33u11 zDr025@*I6aAt$zek_Aa;VdQfCPBhTlcsRLIY;3?R4MK#Ai8qxvD}3H73~|&_OdL^m zs#eKq>*swTM^ew?P$OR;n_jL&gGYhh`LV4)rSaBYtYy#HKvhsttJ-i^W|>ZX|2ea%*l5~tuBb#=od4^4J+I%yjU8;rpPsizLv_Dk zz+b6sM?Usb*UEABaH~;f6OkEiw$(ZE&Q74Co=*AC8#n?L_w5?L)i-voRi-(gG-l$- z$aU2Dx^GEnYHHlD$>K#bZN6kYicvQrDzYZh)@H>EXF7<49?UbMW>fT^ETsA38>TN) zvLJ_>1(u8ytBH8cVZwA|!>Dhi2w>)Jes95VrTH3g+r*aJT2CddH|QRJ(j2xoopTi+ zPRsIE)mrdsD{roUr8Q7KO&nou;Mcd+xZh`U`nqmW%;YM^x1kX5-C7B6&AuzPx1)J!U%rW~{deAKm+~2~Wuy)T~75Ak_;@nIA}b z68+jUtBuV)nw6G9ENn#bUHKIy=ZiHv7U?9I!`R`(!JjH$cS5E%3 zW{shYiV_aXX$`@%t;lP1F)rE4e4zp^`jDG=3)i*2YuCx2PYt}T-WhPaB$s~?Fwn~K zEzX?v*Wk%(__Gw25P$w&zbOYe(D94g`Wj;qTk=Nd>z8~|eb-k6k^MNyj`~8~icjY< zef|6RJs7gGJkHmH_DQLbH)&{o5I~ELkd1RAIbN8W|6Cz9iHI#8zJ>WpE+>ZvsGMM7 z@Cg|oB*XTtlg$22-Sk!~C^(Y6Q*w$eA;Q-0Qeu79`7Hv_C*$-yi1?7}84FqYnqp8^ zkmXihp1NGfYHhJu+*PnujLkW&F@3(>H~DxoGAwgq`)h@lRhvnfTiM8_nr2O zj{aKCjQlPtQw8n;Qk<8)->(MJ&`%bUSrhtgD3wQ3#IMjR(O0nH%f+lLC7<_R8H1z6 zHFQ`8tIF%tPttOJZ+;MUM|vk>5P#wxBK>g{DtSnua~6B_E*nfTtEA=V)ojC2%jrr2 ziOBI#K$vv&MtQT!@Yba~&$f<_eOjjb+*gs&A8)m$2B(O5cU(1A-)9I_$uK@*)AVrN z`W0qB9-#-Hi14AFNRYd|}r_8o98WAPpngqF0aA_9Otme;I(m-={dMZRMd2fq+0o8x=ny6aD41N+AD($l{iOUn zs)jvKPIye|HksaFnrT4o+3MNc(;6_xT#z;6ZnyKi@hnI&;X2C=bXft%JtwdIJf6ql z^k^3zHhe={>7Q=PC**f)Y_zvOa%wy^XS1A|Q%dmM_Z?@8-EFWX4~)_=VvY3iO{=)+CGu7Cn@nM==5+pF$F{T^mV8LUE4LJ?4?m1_;5y0}0uxc3jC5Eab1$$s(_D zmWUt+px5!l)WN%^y9jt{4dlPVg#oqy!VZp>UQbuO-RfAn!ERzaQKku@c)MF3WywsV zciByX>}*i~%Q=@@OQ}Sx@@X>i2MhiT-u>HgbHUBof?DNNq?p^k!qBNj27QmCF({*BbNiG#7sL2PWAcpW$V(lH_HW#UW=?PRWB_GK)NBk`0!eGC z_;&?FYTSWOMVaN~bX%bw>e3#nPevHwG#qI1I9?SThOlN6yI&yIkCnA(fZs$)w=$ZC zY>e#|?rxsozi<*5mj(QG(X@MkuT%@ORSSjuAUVM!&lvapQ-7-q)XLQ1k=H<@Cv0HP z=mx{b-p|LREb&34#vxp_m>AtobU8M0RHMc|;!q>$`r$S>D`WB>--D&CQ*0Giq!rIS zpGu=AkC#5Kb_!WRt7F8os!yCB!v>w2pkZ!K$}_F=n38eG#=gJ5hok2=!Sv(qS^8}* zMCAcxGoqHpY7hKx;ju{$-jC>9C!SL{@gLrLc~cxMI<+3-fXlM}(k%@z#$8eq&GMKzXoUp)EH^kmdL zppj?nCojYpc2sYb`6&Ip@X%uB<48Zq=i3d_D5#PE+D!S!3A+GYK@ns}Wsa3U&Et8I zgwO=)ll$yvrGnlNQtOK<)ym7MN| zTHW_mBw_*$`rUoC!MpX~;3ZXIb3{0|!)P!aDwhHxK+K8ZG8Az2`1ddx$)TXGJy9ysK@PMkh`VG;O4DDA?7> z0l4ZY*fdc^oJ+*@ezO-@{|-egT1e1sDSba-14}v0PQ2Iu8YUSSbDCe%4foCB()ZCK z8Ee2ALaZ9oH;^;IUWu50Ui8OaWk_%ACn`Z~ebtvVWK{Pyh}u#}ar4Wpr#Zo2w%p5@ z4pl%pmzQ3QOeX7x1S})KAQzXlpqTGnIAo}(yZQYi&g^ljot>~(O`JsYM{UcR--d{x zc8S81-tu;tSkO^_%Pe&lCi+{-J#!;^0KFUu2Se6-xnbxh19f^U?WY^HwU`*I-h+Np zL9)f}lIM{NyUhZ-(ut$vbNe5yDOK$mn7~WH8WT0s#fL(ANdkMp??>>0i=D;DxR=xL z>6`C@wA>qMJrt_ey)fhx+s8z;~Wmw;UpB4KGq)+n6E2;UO#iN~U zAeG#AkwTt+emZ75z5U&B?l{Hkc}C#EXzz%BO?6+ zp{5+xNj_N|h}vSPgUJ#VquU*?MrMvDC&isSdoboN9Z%FId-OCOPVMn#BTaa30jgy# zCjDm{T5^2w28jYB<0ei(kggwU{zA@e%@^P*7Myp{aqTMmy$9$XE(}T7{+Z7x$BZ`; zCH^eNn9e$c&%EYt{VZ1-gz{&i3lZ{PdP`BF${XRo75l5#h9@HGNRr@%H8?;D<$0C0cCil7 zjZrX=PPppqimk^0X`-QyMEMyGNiwS*`@SC;S<@N3)BFHYPtfhk1+dG`n)Rl&N2=P- z-h^1y@(6pbiIb7WLq-?{2GOtPlfRkMLhIe4X0( zRoCmhV=S0|&FjRoFNXsB;?HrjFO!ef$(G*!qQ<3E-9M8lt`osuJNI>Lo{eCt2*;oS zl?9}~aAvrg+50nHE{r0)CzW?~*a&JD*!mQBId zw?Av=?!HX!AAFrfsQOfOwMU5BnXa$1by}oPCQFysmq@!2ujLsT`@QRNiMnR`3D4;!CsD&yAf? zdgd_$#aUs?Lj z%~Zmv!EiIK6PshJqQw0|Z+t1%Qr2=12MBO|S<`*4zSIg_xcsmw>_%U3Sjk%<+Xy+uKVw z=p^5FTtpD<5qJas7uP9_?WBlaAQCARg~Qcv{;TAkr~0mzrASNy^YTc3}PE3wR}(ePj7te>`q zAGKCqzdUU{tVl|6Hy}g>3@<~DPe3MQ!l)4vG1WST)AeA~62eJRYlL^_(Iw4X)N_2t zs0H_i2Ra|K>@sJGcjcz5n4VzIFTWoHdW&yTlKGmh(>{@?!r|>IJ>f?Js+jp^*qu{E z+b0YsOGy0=VZoM+;?8I0p=MSL4bxvtb%;O ziTqb7E(8L4-1RHcj}lo%9^W^pl+z*HonUud#8ugnKn7cRgrAbs=X3z=KDo!4h$X{~eGKH27Q`7qP%c?Mw^8^8fQd4(Sh`Wgu6|>cdQh zd!RMZlvLN+9mXwL7#0-nS<5e6$4MIz+c;h;q+;1Vy>;jcIxx7 z=2!j$3r`ohq#31J)%7;mXI}n2)6B5XHLOHEFdlT#6a!}efy`>Gbj=^l;-f=!bRrFJSd}@g~nlqKLLkjQ*jcT8bPsw~YWl zSd87QQaQ9Fu2ZIwpaDA47j}tGbtO?hI6=iKJf!%feDMB%tO`tV=qE+vO^8NVx^vIM zqiGDTs35Dp19G6N-{SG8Y>fjsZ%|i1e7XVfv{Z{~MlYZ&hLdjpXAQ=*6ubG{9wbaW zyRtz(6Bk8^qNO-xXH#yxv4`JX|9uLi#~2AWB+ zkm+2qJdt<7QSs}m4dDlQd-4b^(waIFqRxPTBvp0Cm~g_7yK_d6Ol~Z4*6Mp})A-Z- z>iTByw<6+C@w96E z)VkU3uwzw4>Hm8V&$@ZV75U!ozX27;r(x|oXr^0V!}Ud)nz~ye6Hj_` zyDSP>{0b?qLBQX9O38(WBcj^~QV#hcne8^_WyD+;bkwKFoOOtGf(dhB>7i3bK|?o~ zZ0BIHJTee@ma*~B3?08bOXua7;#D?uejMPSKte9N(V0xQ$Ge6jDCv1n!+KJ-qvtx0 zPPz@6YWl(4o=QduD@#&&k>ciKamRH`PlWsRLjEzUT>^8kcv{p4<)?`GVJ7 zkDEP$$$KQ{sQY=%kRXoOX_5|aA*IKT0O6si9I~7TGrVP2$=?KUEcbGa<<2W$!US#` za(@0r0rZCLzsMr=6RZlsFFH?3#?FI&DUv0_@iBA@_xQ9;4v_%3`uc5s56CXbpKh%< z(#8v6!FbeUx+AbgM!wD`&qsv}bQ8&egzWVvwID>1NaHEeP3$^m&_f`LEvT6Jp$AsI zM*&$4D<^w52ADAft-X!{tD%T5DM^9D@)Rm!}Q?4mu97jA2=Bp zXx-=k9aKu3_#+*vDp`^N{^$_d7@{3LU50%jj`)=$##iGnbVczG!As<0)%^Y zoA0K4?I|xP31)k_{GX&;CqD;E7%Dq}B)rRWu^Rw^fUJ>NS!k$1RbSgbA`*kG&8F3e zX*ZW)Mz_fma=3Du7@WAPdZXSdijZ8KJAf0k;P1$=b)5d^LGz#gTp=95pQ%qrJZ(Y$ z<2^=?*LIA?A6h*i1flU;&CW7QNkN;Qn9Y71@%S7WxxliP%DpF|9H2*mnXlolj=rI* zM?5E~X;ktG!++wP%CaPA`I@26hh!e#`vxxFp&EGX@zM74Y_Hr+h?T;g|q|fI?FN zo#ANU6hkJ<>2(Vt!36CA7avgMBV``2$=3$GG!x4lUh}jk>q9plq@qJ$GNtp?Hf?FSHMWU{(AB? z&4bMVT{OfJHX2sJ{f4=9nXM6Q#-8X|EJk0 z9&xJ*DHXI%QN*RAE*!hPBxWSq!elCshf#AkCo&ss4 z0+jIap$SHWkQ3?Ogt$-C$367&L8$$@;PAyz%;U_jDQT|Kp^#1zb$tAxyi`|VB(9;g zcrDS-%qs`r2qKuEPyn2hWW4@Iy$Y**$`UMh`lM>mmkAZHF+vSE_Jv)1a{5>WmJ&>n z3aITp(LVpYsQ!~4v6dT$c+>te7Jd;b7nok4*VToQKp{}EEOB}-jy}ObZ9h{bf}2j1 zu#{zQXW6|7o6ysJ&)OQcSjuUL>FBXn>{svozjLLm-#1nRuvGQG&ezFE)~QI=z0L&D zjWVhUcT1FU6cP+v`u`1LWx7-&tZFnIz`0tLaZ#VUbza(2UFY|+ApXW~rPr|tAdCNn zNHwp~0SFo-lxqmvICvW9RYf+Iu6G&&5fYz&3hlcj^FNyxO>mRA>DBQ_Hf- zxcv?0<$ND#-ZH2oya+C*k>o@4V2+H;+9sWN;dNJYyv(`|UOx9d=F#B(nHS(w1BUAQ0jTP)?Eq^y_YESRAW+`yN3j(B z#viyjEWvlJ$hU$owO{?)<4vt(}-v%`QKA%9OL}A^S7ic{Ht{_%W6wRuOD(rRrMRbCX>111d{1y zo4`DEQ#S}@O%=g!yL9*VC%0>Dt>w8ubhY{JbkN?1u1erF$G{&yXdmbn82^BsEa?atN=X%|pUJ@s16UhsUhp4QoZp-iBv^PC~bLZe(HkXcF$g%gu2kF-exl8e>X@IW`*pxF> zu3XM88YF?5NE6tl(RCF2;+sj6C*^9-pW;!~SxpKMM4G}}pp=GnD>xUO*X-)6Zq?sJ z?MZ<+w;!CO(8|X?_Tfu^TeM?)J>CMLDT_|q5wXC$7u2*C-gBtCS=)1caL%bx{!yS3 z8heU2*yU{>SgFoD;{Hfr4$||GY_u*vhSly>2Beqx3|VqY$C$j3EeNzTE&XV!#)H;O z1l+V&7Q%(7)o|v@00WUNk=1pc9}Pv*24;UXx=Z?DJJEuPfr6BeQv&NK;p~Le0^;v< z+pgOZbzdQ3GUa128BrzsvvsC?osFDb>CKtG&eZj%#Q(qN}Nce11CL;5_6p8d%%B>GO>S^5)x!!H|cMhz@s=!mR3 z`;)8j0;d~o-0(+Ou<*IY3`W3)P2#%Gi*T%K(eCff!GeSter*WG&-u^<$d7y89du(? z^XxMAn?_Z881Ecz(X{7dafc!iJ_S@5&Nf4f{OzvMAZ>G2Xo%vThe7~-K*t)AplBb; zMQ9%i6um$9m9ZPS+vn;;xgJ$9L}H{yMC^c(Sy#Wl1J3gU6865VNCjFm17xrStnznx z{~Dnm0)OXk<6=Cs1*APrvuMt8CtiG;Xk(FMZ77vgl29RdoMu?n3#H;VH`uFL*se#R z!QjUO)Py=7nKPa)=IuD#X&ctfI>I}DgG|yZ)DAGt=M>gkE-~|G6=o!PQcB|S-3lrI zw%-%(3QJI5OwR%bent*yL|piRwXiVPwIUW8Km_FUsRb6@ zKjDp7X`E)5w8=wVAv`a}ToqrXCID%pk)nXs;9+asal@cae=~d$^|5d40T|GjSj>xc z7I=0pQyYIZh5wq3?1aYu?#7hEgznAgM9U5Sy9h%15%MycP?;1`JF3+D*s6Og|NYN# z#Udi+=2{*^=o)K>oX;wMOk@V=y)%nb>-1m)B=m}>m$E1IF96R4KO5dQFcY8yIn)nm z^h=HRPCL76ilXax1z>DgO0ROfYAWj9Zj8kOkfVWbLd)voI1NB54q*C%r8efUs3#J5>`!S(T`<5To$Hl*WB1wM?pxqz^-C8Zn zpZ0iv3HnX4e`I7+&Ap`O529p=WWdV?9%!Emccv7?Kqw^1efx=YyPcb{UOl#|mMCgm zh7^+;5NfCKLC>=-7t=;NC#%}1L+r+ z#5k+Nz)3>KPtiEXe83jl_uIP~%3B$q_i0Qbp{@a<#0h$A{qEv;;5pC%Agme0A21vE zwC8y*8Q8e_!Fx`t$A{jO^WL6Roq&_|4CiKty#_lGr*|LA4SiEHT}+L1K}M*tE&6j`~vU+86F}x8U-47^nT{^Yy~8#kyd_u$Rw}@Nf%h>-o9FhW-nimgb=o zjg4j*01XfN_#V(l`VeGxOby+F&HL}$ij-_MCV#7nV+0D0zf!W9=>};@SQ3DTD`!;ES8s=4*^sWL<;sYxy6ICYi9W7!_=ay_bWX)2A` z9Yz6Fn{}VHAJS>~=z14EirXh_WM=f!wsoitiHioHUV;dHPyxZt2Q`na`g^;3h!QbYHv{++yn=K7~!we{na*#p#pA$K0Tzh74Kk)5Sn zrj+nFXodPMZYzmdcTQ6R>ZyCr4S8E`&-lPQk^%8uWXCO%e7&!@@1@K--imz0e6tt( zfqX9A-YFSw&)Dw)ze*q4NE5$W*n%ZBiEW6jH)4Ar*{@qP1lPZW$B%qDm-pbM6>4u- z932q(xc?$!Bz0?TwsUbBz3Ea@@Y%2%?)sd0kdttE@O<#tzSp2v-`lm|`5V1*pQl74 z8zS9rhOgF+Dfz0!u9N+K^I&ewZa7<5L&zS)pAKuBVO8ux&7lZmZe`}!l(I9-8^b=TPYQ|xoQSqkj$814NOJl`X!O8S6 zo7B_pwAJRk+5I{5<$j(fza}i03)+gg_Hj0)X_D7-(20NA4xVi&7K;|i_3x=&5T2-S*p)vFkf}saspR z{?j9Y0)J4n9-iuP9k> z-m^inO}?)(zhW<2ZEnEqwcvw$Ssk}}@NML$M~SuCp{~4y)a$pU**kbN5LfswmI5!t zTpLL(AG>8azbY$jyU-clT3)HF6N8W$hsP*I`_soI{I}16S_a$2F_f~fw3XT#%vj8h z0={-nwc|!>skvfvU8%)AC7`(k^M?kZl*4|r(Yjwn>J}EElCfA8YYbIL_>npqS9RZI-R6Sa{00{4t*i8RSA0BINO5Xs~=v zu!|*0RICo9U^55__NLbCYEWmjVxPFC)cJv*0Q}j2>*;+r+M~RT!aDr~)XrDdzN5X= zhDY-Spz&eCI@gaFqKPq$r)xW$g0!#Ebgt95++7G$&jpP(mLDCql-HCgikeiTOiS3b zw0=)*D4JXwJo3uobYBvj7YFw(=}{5i(llGqP+qn;v%9*e#09@HXC;E$gbb7^!CJC0 zoA>Jkn(Sc`uYO*37Xd=r&1c-dGfT(?|` z_V|x1u~5fMJ`nikb1?al65z#g!LJC>uP|&H5^0v(3{}pq|=i zMERteHZ-O-e4E~z99Ya26u1`On%4v}S%HFAdW}ta&Segm-Fq8=MXzg+ozHCXi~Yie z?z)BBhTGCVXP>qo#ekJ|==gfXbJOKv7ncWjM(eC~x+!;&xNk0{FH&u$eYmHDw>XM2 zM1VW4vA7hx-js${-)bhWgoVtHEpVr*OZ)n_|M<`9Ei^n!U}fIu05x<>mIZPCmS4wmv>p3wDEE4c|_~ zf_YO-`^DE%*Wmo~%0g{LI`HnP;a_*_r^)N}_LfsE4TKZqV?cWBZfc!A&kmZp*CKAk zo|z)L$JB{OOTbmPR>_c8zbmPrV0E8BBh`HKkWy8C%Hr+1_o9zD7wa2~jKJY*_G8^^ zW&ruo*NESvqDx-4zma~tLptD^6*ry>wKgYnl3GGlsu7KwPtC3?!nA%KZEv6NP-@;- zXt(A)btJC0K9f>5=sq_;GxH{Go2{vGHtxMe)j}v_eqLix|N9HeTW5p)>|6H0^JeR_ z#SBhMaKL3zH4r+~SWumxHLC03QFT{r?%hyz5;91Q%#e1uCRTTyOmzp8q8LLvyJCwb z37387;ArdsDwX2x{BW6Nol5BJ?H!1K)J{1pLzObc<-q zMZ*p)JT8yA7^$2lKKmHf+q}As=WKb0P;@jD5LxHKB$4~rU$y8@8SOnPuh#-|iG5}| z#r^169rbnnM=&~}7z2oxJqWYec>!CK%4e)!b2*jf|7-7B!;;GWHKmO)m0HeJR%)F| zZ89}A?$mo^cIL{H3OGsI|b(5zC!YbTD01H!q^{-gu zg&#ikca4syJ7a;HRg%&IKUH!=>FnNYMzbHIi4G9NTd5`K=o!D`S?{JAQwC74MBuycRD| zlx@?FfW4Z?om=G%@|{C0Ls`W|l3@g;MVRXsf zubyo17vw`_QcW4-|61nQm+jVp$#lC$g1+WvPh!*yeejgn2@^P1b>_c`~B`lfzwG%LfAJE)+5#=K1a&EI!aEc{s;Tvd_(Ro?5DEHBDJFFf<-1rj+yVTt&# zCYZGtb{=WI_WhLyH`_{)AbdgU<7>&p|- zI|U;ZVECnpSA5X2Ik3Z}BYNjSWM5AhWxMZ9^OY9Sj@4eQ3g&!7?%2bILuyAu zUE#g+_E-y;19HgwtU7V>){VAG$);6&$5+Qra<6?yv&~tNu5jy*(Uc9`9oP)N5^6>j zw{?~!@iGDptlf20-;#4?`EKdY&CPKykcoXmUxVnUUdfG}$SO^UiJ|71)1HYXx#mRG zb0cD@3bE46z?>ClGUlb4pxo7^P)lm~wPZ2ZAAwq9a23GR{4Nk&@Ox-6H07DN3WONu zSma@@oYw@7+Mn*vkE)peZNP-5m^U;BXJyd5StFV6yoWFk-~Su^(X9BYWkxBvC7|&L zHy$iBw;$CR;eL0PnKvXzxso72jaTUea<>!#v{&bswbr{%uNT&r02G0^ z4WGq&STgXIE?`$6aE)&SU#_?pWz;{_pe;SZ#t*}SMp+e{DwZKVcB9?eC{6!abFwA$Z+_2j zcvWwtv77jJ$FtHvYreIF$&lhWiVS}x6^kLO(1=>DrD_PdbH!LKfGtaO$kecY^@}zE zLvc;8EasHTg~iK~)136jwr=ch)-WTM=h~*8p0u^37Q}^6OXLIBcmKh)sucq-@P zxnOTe_}iRs2lF8TOrldW9#^>F+Aq|HeE|Arb^he9d{w2C4yId4tX&(`lt4#XILLeP zgp)PfO*!mOj>aPoal*P!iLfpY+mwgU!fTzSAXifa5Zr~xHEAk&uqV`zH&)5 zJZ?FDfVTg9L`m9SFwAEBzy*u4gX^lbiQDehs`^6?@oQq$xC$4&2NoK z+sT+u#hD%{l%U_A=r&FP8yqyqW9KwuYG(rs>7|THrB2>#0uV0*9loQRF0%_A%Pd@4 z_P53WpDy*)_zK_C>N3-Y8=Kv4$ZWoB_l!Hk6u;Zvd|cjD^4&k`bv~+ze>i?K)aS9H z=(|*r#d3~KhvPnC%F4DA2G49UO(e4R!`N>GW{F#NwG=1bjWat3<3++d!#iU~zwu~l zO>q6HaIf1Xr)1JAL!l_L`Gs?tB|A!ozU{AMU4xCau`^eAO&`2+P$XMR#h=;394e+u zTOjIx@r4h~L2nW>Wp@d7jm@h>E$m^Kl63T{z>XxP&Tnb*mXuC|ayDmvPh^Ym8YLkq z-k1@&51@BFC6Q#rxq;cPnLtbd3iV+6)@<6T5$(~_GaJqC7Mwz7-`N{9)WhQJWl>6< zI;wX+Y6+!o7RFJ2H1-EZ1)7yvg-*p`@UOMPgM8Ut#p)y+{pS{Gxx33 z-J5NtJhl^_z-?TUk6so4tW%d2Fy&DI7&h7-9KXL{tl+z1*Y)O48%V`7E@r2~ufz@c zz69^JEOQ8@COMAx`F8hFE+VdmHs8KXuWHoHjBO}jVol>@D5t=ZTV&7;!1aj^mt7fW`pdd2+sB;91G$t<$SW+^XUvg27bC@QK)8;intctQsE zozw^ax_jXdZlA9{m{mhE0TvC%4y0@u49awcx|(l?y);-k+d0_c5Fq&at4RzcGV;3MX1R4H2o# zlsOOCPB>#3_v6!mny(cLbr(9^Z)BD)d3J?#jcph+fBo2B9mILJOpgJ4a>cK2{{(zK zgNJq109FiWiIZtwFeovt0m8HSSMkbQt$$pC@8!pvRxWaZ@NQ%W=Cstr+AoAvu_>77 z2wQLSk2j7!7t@XCcvPGL5lBlRLRrk_wa9-1u(0PMIxcQZR+#w#NvsA4?tR8D(BQRd z8q@P+X?wOonr4m669|Hd0rnf#{DH>kMME}>h1@0)CCf=|zpUXsEI9z+MI4_rtjYIJ ze*`YwlHr7UI7_`xHB2?BJu*GuqglW2W%P00!b^0;MpdlleN1p$+NY{c0AEYPmCa)w z_0jxl?6)HXUi>|UbBp>-uiYi&a<~q3swP<>_1n;J#y4|Ta+^1mhUHY0HUL^1rKyB* za+MudKLVg}Vw&uhDZqTu3}Dh*16?}l?+f;mDxp{2H7akvE>824>Tu`Z9K@CfFn z$Tc&AUV4>4FGY0e;Fli<>ru!uhgW4jfPR+n)ZP2%8r0`5xOmp8$68b$IOo$>Ex}Sg zqqHzCVpzMcvbeo;Y!qT9F3Rd<&46Kzq2*h4Z69?Xm94~D^FBNZTZTq9%jN`|?W}oa zuCW<}xWabKOp*Fr7kR^26fR}GNXVRx6pHk(d|c3Y8fP0U6Rw0IH%ew0qFm}`U+@lT zR>ECiIgj!H&mt1|0hke}gSs&BjWHM#B}NGQY`usT{4kc>6LJhn0m~gFl2l;P41y3i z3^i9rm`r_{W3W{Odj~j-+jiJ_)VN}tJZ#3BQSU1yXCfCoC-09_i?Dmq$^)VFm10pc5GCwI z?Ek7fIY?tKwr!N8u8)veLUEoWy3{AA;muxj_bn}HP8aMwnr4F=3KH3+g)ylHowP*3 z%H{joqCQ3)Cbx#O$GfF{GF1jrD1M_w(4z{~H_3MFJ{KWlKXPxkZ8}zaR3;r9P8$l$ zUf0AKAh+C8PM2+RS+{dd+jGZs+kw|+9u>|FQOX_JKXjiQ+w;|S0~dqtZDYNO@3f~- zlcKpv)TWfm8zNBjPbe6tUOjVZC->+Dxl%%a8suf1CZw;llf$ z3R08f;tau<^G>Xq8F=A^7r-l%y?!SWZ^=Vd-kwji460kcDTErm`-(`f;V z=UKYuFJWk(0H-qduDiUq@>kbIqjMHKiwliL_t*+-Twxh6*nwSv2M{D5u_^tV8Bs^FGdI{9<(sjwmx0irP8zm{imip zj9Ay)9vmonm zti*|fCwGA?bJGD}oK4Aa8v00wO%5LQG-VgbQ!@llplg)fWtp5TRXjoJ6WP@2MTyr_ zYto2<0E1mR8#pLxN)($j_$r4!H=br*zxjPUD8n(w#>0+19Z$*(lZNE3)l>pTRh}G zb=TmyIeKNT8>`@Tmcsq2GK>pV0is z1YvGQmk-+HuO@B|1KP_1TbG8#MxiaU`n`duwR*Y2zN&QqxG1rC{U1GgB91AIqv@PUx;I$+OZ(pX^@XWg7?Vwd_0 zyb{+k{DAhWz|LYvaY&u&Z&&j?XVo|}!)p7s+`1vCYTO03&Lp&Ep={vEBy7Qbc`#GH zX?0gXQ>X6I^WO)ltSa@5>TiaQ)+O+Xu;!{jN z0y|P@4T{R-*r_gEoS*RYZlKIjE3BKkQ#&P=3ly=Zv^SK;GAMQc9k$iHqg(xC(oyC< zTt#VK%liScJIY@a5;NtC{Cc$=3il zdo;2M_aOwjmQMtXT`sCLGou?po0o5VbHhw^VZer(1%mx4JMb7Mk2o8{MFf&p3S3Q$ zgUK?|H(_C*oq;v8`EV@rdvcH<`0QHV6tGC$EoSCbKlxhcINC~aTyrqs9(kpr5SuRR z0$v10tmoez91l&U2lF4};xZC5`&-0*4ivvHPC9ttMcHjNcl;t;r`azW=DY zH7~6N&?J&pXM>L`+W-BfX7d1`@${Ol0>DQv`#-;A9wUOdP^QA_3EvKSPWe>shG;ZN z2CO;|&!3li~UgBNI}Uq+)r&(% + + + diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst index 4dcb0b45..33fc26ef 100644 --- a/doc/source/glossary.rst +++ b/doc/source/glossary.rst @@ -3,31 +3,42 @@ Glossary ======== .. glossary:: + :sorted: Resource An entity representing anything in your infrastructure that you will - associate metric(s) with. It is identified by a unique ID and can contain - attributes. + associate metric(s) with. It is identified by a unique ID and can + contain attributes. Metric - An entity storing measures identified by an UUID. It can be attached to a - resource using a name. How a metric stores its measure is defined by the - archive policy it is associated to. + An entity storing aggregates identified by an UUID. It can be attached + to a resource using a name. How a metric stores its aggregates is + defined by the archive policy it is associated to. Measure - A datapoint tuple composed of timestamp and a value. + An incoming datapoint tuple sent to Gnocchi by the api. It is composed + of a timestamp and a value. Archive policy - A measure storage policy attached to a metric. It determines how long - measures will be kept in a metric and how they will be aggregated. + An aggregate storage policy attached to a metric. It determines how long + aggregates will be kept in a metric and how they will be aggregated. Granularity - The time between two measures in an aggregated timeseries of a metric. + The time between two aggregates in an aggregated time series of a metric. Timeseries - A list of measures. + A list of aggregates ordered by time. Aggregation method - Function used to aggregate multiple measures in one. For example, the - `min` aggregation method will aggregate the values of different measures - to the minimum value of all the measures in time range. + Function used to aggregate multiple measures into an aggregate. For + example, the `min` aggregation method will aggregate the values of + different measures to the minimum value of all the measures in the time + range. + + Aggregate + A datapoint tuple generated from several measures according to the + archive policy definition. It is composed of a timestamp and a value. + + Timespan + The time period for which a metric keeps its aggregates. It is used in + the context of archive policy. diff --git a/doc/source/index.rst b/doc/source/index.rst index 308683f6..822263f8 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -38,10 +38,10 @@ Why Gnocchi? Gnocchi has been created to fulfill the need of a time series database usable in the context of cloud computing: providing the ability to store large -quantities of metrics. It has been designed to handle large amount of measures -being stored, while being performant, scalable and fault-tolerant. While doing -this, the goal was to be sure to not build any hard dependency on any complex -storage system. +quantities of metrics. It has been designed to handle large amount of +aggregates being stored, while being performant, scalable and fault-tolerant. +While doing this, the goal was to be sure to not build any hard dependency on +any complex storage system. The Gnocchi project was started in 2014 as a spin-off of the `OpenStack Ceilometer`_ project to address the performance issues that Ceilometer diff --git a/doc/source/operating.rst b/doc/source/operating.rst index 8cf0481f..bc80e58b 100644 --- a/doc/source/operating.rst +++ b/doc/source/operating.rst @@ -135,7 +135,7 @@ How to plan for Gnocchi’s storage ================================= Gnocchi uses a custom file format based on its library *Carbonara*. In Gnocchi, -a time series is a collection of points, where a point is a given measure, or +a time series is a collection of points, where a point is a given aggregate, or sample, in the lifespan of a time series. The storage format is compressed using various techniques, therefore the computing of a time series' size can be estimated based on its **worst** case scenario with the following formula:: @@ -170,7 +170,7 @@ maximize CPU utilisation when computing metric aggregation. You can use the metric processing. It’ll show you the number of metric to process, known as the processing backlog for `gnocchi-metricd`. As long as this backlog is not continuously increasing, that means that `gnocchi-metricd` is able to cope with -the amount of metric that are being sent. In case this number of measure to +the amount of metric that are being sent. In case this number of measures to process is continuously increasing, you will need to (maybe temporarily) increase the number of `gnocchi-metricd` daemons. You can run any number of metricd daemon on any number of servers. diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index c5dd322d..316afa16 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -80,14 +80,15 @@ status code. It is possible to provide any number of measures. needed to honor constraints defined by the archive policy used by the metric, such as the maximum timespan. -Once measures are sent, it is possible to retrieve them using *GET* on the same -endpoint: +Once measures are sent, it is possible to retrieve aggregates using *GET* on +the same endpoint: {{ scenarios['get-measures']['doc'] }} Depending on the driver, there may be some lag after POSTing measures before -they are processed and queryable. To ensure your query returns all measures -that have been POSTed, you can force any unprocessed measures to be handled: +they are processed and queryable. To ensure your query returns all aggregates +that have been POSTed and processed, you can force any unprocessed measures to +be handled: {{ scenarios['get-measures-refresh']['doc'] }} @@ -100,7 +101,7 @@ The list of points returned is composed of tuples with (timestamp, granularity, value) sorted by timestamp. The granularity is the timespan covered by aggregation for this point. -It is possible to filter the measures over a time range by specifying the +It is possible to filter the aggregates over a time range by specifying the *start* and/or *stop* parameters to the query with timestamp. The timestamp format can be either a floating number (UNIX epoch) or an ISO8601 formated timestamp: @@ -129,7 +130,7 @@ See also :ref:`Aggregation across metrics ` and :ref Transformations ------------------------ -In addition to granularities defined by the archive policy, measures can be +In addition to granularities defined by the archive policy, aggregates can be resampled to a new granularity. {{ scenarios['get-measures-resample']['doc'] }} @@ -232,7 +233,7 @@ It is also possible to list archive policies: {{ scenarios['list-archive-policy']['doc'] }} Existing archive policies can be modified to retain more or less data depending -on requirements. If the policy coverage is expanded, measures are not +on requirements. If the policy coverage is expanded, aggregates are not retroactively calculated as backfill to accommodate the new timespan: {{ scenarios['update-archive-policy']['doc'] }} @@ -581,7 +582,7 @@ requested resource type, and then compute the aggregation: {{ scenarios['get-across-metrics-measures-by-attributes-lookup-groupby']['doc'] }} -Similar to retrieving measures for a single metric, the `refresh` parameter +Similar to retrieving aggregates for a single metric, the `refresh` parameter can be provided to force all POSTed measures to be processed across all metrics before computing the result. The `resample` parameter may be used as well. -- GitLab From 954b7b9427a00b47b8759a3f7ba91b4981579ced Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 15 Sep 2017 21:37:33 +0000 Subject: [PATCH 0980/1483] catch transformation errors - add a transformerror exception in case something goes wrong during transform --- gnocchi/carbonara.py | 11 ++++++++++- gnocchi/rest/api.py | 7 ++++++- gnocchi/tests/test_carbonara.py | 10 ++++++++++ 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 7caa36d3..ff880d70 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -38,6 +38,14 @@ ONE_SECOND = numpy.timedelta64(1, 's') Transformation = collections.namedtuple('Transformation', ["method", "args"]) +class TransformError(Exception): + """Error raised when transforming series fails""" + + def __init__(self, msg): + super(TransformError, self).__init__( + "Failed to transform series: %s" % msg) + + class BeforeEpochError(Exception): """Error raised when a timestamp before Epoch is used.""" @@ -571,7 +579,8 @@ class AggregatedTimeSerie(TimeSerie): values = ts["values"] else: - raise ValueError("Transformation '%s' doesn't exists" % trans) + raise TransformError("Transformation '%s' doesn't exists" % + trans.method) return AggregatedTimeSerie(sampling, self.aggregation_method, ts=make_timeseries(timestamps, values), diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index ca467a33..b2649fa6 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -488,7 +488,8 @@ class MetricController(rest.RestController): storage.GranularityDoesNotExist, storage.AggregationDoesNotExist) as e: abort(404, e) - except aggregates.CustomAggFailure as e: + except (aggregates.CustomAggFailure, + carbonara.TransformError) as e: abort(400, e) @pecan.expose() @@ -1591,6 +1592,8 @@ class MetricsMeasuresBatchController(rest.RestController): except (storage.GranularityDoesNotExist, storage.AggregationDoesNotExist) as e: abort(404, e) + except carbonara.TransformError as e: + abort(400, e) class SearchController(object): @@ -1775,6 +1778,8 @@ class AggregationController(rest.RestController): storage.GranularityDoesNotExist, storage.AggregationDoesNotExist) as e: abort(404, e) + except carbonara.TransformError as e: + abort(400, e) MetricIDsSchema = [utils.UUID] diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index d3ade51a..82f06433 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -310,6 +310,16 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual(1, len(ts)) self.assertEqual(6, ts[datetime64(2014, 1, 1, 12, 0, 0)][1]) + def test_unknown_transform(self): + ts = carbonara.TimeSerie.from_tuples( + [(datetime64(2014, 1, 1, 12, 0, 0), -3), + (datetime64(2014, 1, 1, 12, 1, 0), 5), + (datetime64(2014, 1, 1, 12, 2, 0), -6)]) + ts = carbonara.AggregatedTimeSerie.from_timeseries( + [ts], sampling=60, aggregation_method="last") + self.assertRaises(carbonara.TransformError, ts.transform, + [carbonara.Transformation("rubbish", tuple())]) + def _do_test_aggregation(self, name, v1, v2): ts = carbonara.TimeSerie.from_tuples( [(datetime64(2014, 1, 1, 12, 0, 0), 3), -- GitLab From ba14f8290d6aeef2d8502664443ad82cf449b4f4 Mon Sep 17 00:00:00 2001 From: Asu4ni Date: Thu, 21 Sep 2017 22:40:28 +0800 Subject: [PATCH 0981/1483] Fix doc word usage inconsistency - 'time series' Both 'timeseries' and 'time series' are used. And after some research, 'time series' is used more frequently. All 'timeseries' are changed to 'time series'. --- doc/source/glossary.rst | 2 +- doc/source/index.rst | 2 +- doc/source/rest.j2 | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst index 33fc26ef..0cf580d3 100644 --- a/doc/source/glossary.rst +++ b/doc/source/glossary.rst @@ -26,7 +26,7 @@ Glossary Granularity The time between two aggregates in an aggregated time series of a metric. - Timeseries + Time series A list of aggregates ordered by time. Aggregation method diff --git a/doc/source/index.rst b/doc/source/index.rst index 822263f8..adff356b 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -102,7 +102,7 @@ Gnocchi vs Graphite `Graphite `_ is essentially a data metric storage composed of flat files (Whisper), and focuses on rendering those -timeseries. Each timeseries stored is composed of points that are stored +time series. Each time series stored is composed of points that are stored regularly and are related to the current date and time. In comparison, Gnocchi offers much more scalability, a better file format and diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 316afa16..c66a4717 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -597,7 +597,7 @@ on whether boundary values are set ('start' and 'stop') and if 'needed_overlap' is set. When a boundary is set, Gnocchi expects that we have certain percent of -timestamps common between timeseries. This percent is controlled by +timestamps common between time series. This percent is controlled by needed_overlap, which by default expects 100% overlap. If this percent is not reached, an error is returned. @@ -605,12 +605,12 @@ reached, an error is returned. If no boundaries are set, Gnocchi requires 100% overlap across all series -The ability to fill in points missing from a subset of timeseries is supported +The ability to fill in points missing from a subset of time series is supported by specifying a `fill` value. Valid fill values include any valid float or `null` which will compute aggregation with only the points that exist. The `fill` parameter will not backfill timestamps which contain no points in any -of the timeseries. Only timestamps which have datapoints in at least one of -the timeseries is returned. +of the time series. Only timestamps which have datapoints in at least one of +the time series is returned. .. note:: -- GitLab From 71beadb653df37613e161db96706bd078b279b4d Mon Sep 17 00:00:00 2001 From: Leandro Reox Date: Tue, 29 Aug 2017 15:01:59 -0300 Subject: [PATCH 0982/1483] Added support for max_pool_connections on s3 driver --- gnocchi/common/s3.py | 5 ++++- gnocchi/storage/s3.py | 5 +++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/gnocchi/common/s3.py b/gnocchi/common/s3.py index d7969f2a..4981a8a3 100644 --- a/gnocchi/common/s3.py +++ b/gnocchi/common/s3.py @@ -18,6 +18,7 @@ import daiquiri import tenacity try: import boto3 + import botocore.config as boto_config import botocore.exceptions except ImportError: boto3 = None @@ -41,7 +42,9 @@ def get_connection(conf): endpoint_url=conf.s3_endpoint_url, region_name=conf.s3_region_name, aws_access_key_id=conf.s3_access_key_id, - aws_secret_access_key=conf.s3_secret_access_key) + aws_secret_access_key=conf.s3_secret_access_key, + config=boto_config.Config( + max_pool_connections=conf.s3_max_pool_connections)) return conn, conf.s3_region_name, conf.s3_bucket_prefix diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 32b7cbd0..ac9593ef 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -49,6 +49,11 @@ OPTS = [ help="Maximum time to wait checking data consistency when " "writing to S3. Set to 0 to disable data consistency " "validation."), + cfg.IntOpt('s3_max_pool_connections', + min=1, + default=50, + help="The maximum number of connections to keep in a " + "connection pool."), ] -- GitLab From 0035279e9fc0d438ac3021ea815fdb467a6e4521 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 22 Sep 2017 07:49:37 +0200 Subject: [PATCH 0983/1483] Fix fake metricd used in gabbi storage.process_background_tasks() was originally locks each metrics it processes. But since we move to the sack system the locks are handled by the caller. So, the fake-metricd thread of gabbi does not lock metric is process anymore. This can lead to race when an API call use refresh=true and metricd also try to process the same metric. This change fixes that by also using refresh_metric in the fake metricd used in gabbi tests. Closes-bug: #244 --- gnocchi/tests/functional/fixtures.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 988c5365..dfb1cf02 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -179,8 +179,12 @@ class MetricdThread(threading.Thread): def run(self): while self.flag: metrics = utils.list_all_incoming_metrics(self.incoming) - self.storage.process_background_tasks( - self.index, self.incoming, metrics) + metrics = self.index.list_metrics(ids=metrics) + for metric in metrics: + self.storage.refresh_metric(self.index, + self.incoming, + metric, + timeout=None) time.sleep(0.1) def stop(self): -- GitLab From 4da010d52b27a7a61ab8c535c09e3cac0e8a71b5 Mon Sep 17 00:00:00 2001 From: Haikel Guemar Date: Thu, 21 Sep 2017 15:23:08 +0200 Subject: [PATCH 0984/1483] Add documentation for delete-metric API Closes #371 --- doc/source/rest.j2 | 4 ++++ doc/source/rest.yaml | 12 ++++++++++++ 2 files changed, 16 insertions(+) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index c66a4717..11e62288 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -46,6 +46,10 @@ To retrieve the list of all the metrics created, use the following request: {{ scenarios['list-metric']['doc'] }} +Metrics can be deleted through a request: + +{{ scenarios['delete-metric']['doc'] }} + .. note:: Considering the large volume of metrics Gnocchi will store, query results are diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 34b31eac..eeda2b6a 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -104,6 +104,18 @@ "archive_policy_name": "low" } +- name: create-metric-3 + request: | + POST /v1/metric HTTP/1.1 + Content-Type: application/json + + { + "archive_policy_name": "medium" + } + +- name: delete-metric + request: DELETE /v1/metric/{{ scenarios['create-metric-3']['response'].json['id'] }} HTTP/1.1 + - name: create-archive-policy-rule request: | POST /v1/archive_policy_rule HTTP/1.1 -- GitLab From a9e5815297da9dc21ea6cdd6099ce792db687e16 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 11 Sep 2017 16:18:35 +0200 Subject: [PATCH 0985/1483] rest: implements pagination links This change implements pagination links according the RFC5988. It only implements the 'next' link, since other need extra mysql query that we don't really want for now. Related #3 --- gnocchi/rest/api.py | 43 ++++++++++++++++--- .../tests/functional/gabbits/metric-list.yaml | 33 ++++++++++++++ .../tests/functional/gabbits/pagination.yaml | 2 + .../tests/functional/gabbits/resource.yaml | 9 ++++ .../pagination-link-3cc64889ac414d28.yaml | 6 +++ 5 files changed, 88 insertions(+), 5 deletions(-) create mode 100644 releasenotes/notes/pagination-link-3cc64889ac414d28.yaml diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index b2649fa6..219dee1d 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -109,6 +109,23 @@ def set_resp_location_hdr(location): pecan.response.headers['Location'] = location +def set_resp_link_hdr(marker, *args): + # NOTE(sileht): This comes from rfc5988. + # Setting prev, last is too costly/complicated, so just set next for now. + options = {} + for arg in args: + options.update(arg) + if "sorts" in options: + options["sort"] = options["sorts"] + del options["sorts"] + options["marker"] = marker + # NOTE(sileht): To always have the same orders + options = sorted(options.items()) + params = urllib_parse.urlencode(options, doseq=True) + pecan.response.headers.add("Link", '<%s?%s>; rel="next"' % + (pecan.request.path_url, params)) + + def deserialize(expected_content_types=None): if expected_content_types is None: expected_content_types = ("application/json", ) @@ -620,14 +637,19 @@ class MetricsController(rest.RestController): if provided_creator and creator != provided_creator: abort(403, "Insufficient privileges to filter by user/project") provided_creator = creator + + pagination_opts = get_pagination_options(kwargs, + METRIC_DEFAULT_PAGINATION) attr_filter = {} if provided_creator is not None: attr_filter['creator'] = provided_creator - attr_filter.update(get_pagination_options( - kwargs, METRIC_DEFAULT_PAGINATION)) + attr_filter.update(pagination_opts) attr_filter.update(kwargs) try: - return pecan.request.indexer.list_metrics(**attr_filter) + metrics = pecan.request.indexer.list_metrics(**attr_filter) + if metrics and len(metrics) >= pagination_opts['limit']: + set_resp_link_hdr(str(metrics[-1].id), kwargs, pagination_opts) + return metrics except indexer.IndexerException as e: abort(400, e) @@ -727,16 +749,23 @@ class ResourceHistoryController(rest.RestController): enforce("get resource", resource) + # FIXME(sileht): pagination doesn't work, the marker + # expected is currently only the resource id while it should be + # the resource_id+revision for the history try: # FIXME(sileht): next API version should returns # {'resources': [...], 'links': [ ... pagination rel ...]} - return pecan.request.indexer.list_resources( + resources = pecan.request.indexer.list_resources( self.resource_type, attribute_filter={"=": {"id": self.resource_id}}, details=details, history=True, **pagination_opts ) + if resources and len(resources) >= pagination_opts['limit']: + set_resp_link_hdr(str(resources[-1].id), kwargs, + pagination_opts) + return resources except indexer.IndexerException as e: abort(400, e) @@ -1068,13 +1097,17 @@ class ResourcesController(rest.RestController): try: # FIXME(sileht): next API version should returns # {'resources': [...], 'links': [ ... pagination rel ...]} - return pecan.request.indexer.list_resources( + resources = pecan.request.indexer.list_resources( self._resource_type, attribute_filter=policy_filter, details=details, history=history, **pagination_opts ) + if resources and len(resources) >= pagination_opts['limit']: + set_resp_link_hdr(str(resources[-1].id), kwargs, + pagination_opts) + return resources except indexer.IndexerException as e: abort(400, e) diff --git a/gnocchi/tests/functional/gabbits/metric-list.yaml b/gnocchi/tests/functional/gabbits/metric-list.yaml index 7b0f8089..347e3c87 100644 --- a/gnocchi/tests/functional/gabbits/metric-list.yaml +++ b/gnocchi/tests/functional/gabbits/metric-list.yaml @@ -131,6 +131,39 @@ tests: $[1].archive_policy.name: first_archive $[2].archive_policy.name: first_archive + - name: list metrics by archive_policy with limit and pagination links page 1 + GET: /v1/metric?archive_policy_name=first_archive&sort=name:desc&limit=2 + request_headers: + # User admin + authorization: "basic YWRtaW46" + response_headers: + Link: "<$SCHEME://$NETLOC/v1/metric?archive_policy_name=first_archive&limit=2&marker=$RESPONSE['$[1].id']&sort=name%3Adesc>; rel=\"next\"" + response_json_paths: + $.`len`: 2 + $[0].name: disk.io.rate + $[1].name: disk.io.rate + $[0].archive_policy.name: first_archive + $[1].archive_policy.name: first_archive + + - name: list metrics by archive_policy with limit and pagination links page 2 + GET: /v1/metric?archive_policy_name=first_archive&limit=2&marker=$RESPONSE['$[1].id']&sort=name:desc + request_headers: + # User admin + authorization: "basic YWRtaW46" + response_json_paths: + $.`len`: 1 + $[0].name: cpu_util + $[0].archive_policy.name: first_archive + + - name: list metrics ensure no Link header + GET: /v1/metric?archive_policy_name=first_archive&sort=name:desc + request_headers: + # User admin + authorization: "basic YWRtaW46" + xfail: true + response_headers: + Link: whatever + - name: list metrics by creator jd GET: /v1/metric?creator=jd request_headers: diff --git a/gnocchi/tests/functional/gabbits/pagination.yaml b/gnocchi/tests/functional/gabbits/pagination.yaml index 1b9bda97..d9e17af5 100644 --- a/gnocchi/tests/functional/gabbits/pagination.yaml +++ b/gnocchi/tests/functional/gabbits/pagination.yaml @@ -89,6 +89,8 @@ tests: - name: list first two items order by id GET: /v1/resource/generic?limit=2&sort=id:asc + response_headers: + link: "<$SCHEME://$NETLOC/v1/resource/generic?limit=2&marker=28593168-52bb-43b5-a6db-fc2343aac02a&sort=id%3Aasc>; rel=\"next\"" response_json_paths: $.`len`: 2 $[0].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index 0fda4398..da8d20f9 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -216,6 +216,15 @@ tests: $[1].revision_end: null $[1].metrics.'disk.iops': $RESPONSE["metrics.'disk.iops'"] + - name: get generic history with links + desc: Ensure we can get the history + xfail: true + GET: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9/history?sort=revision_end:asc-nullslast&limit=1 + response_headers: + link: "<$SCHEME://$NETLOC/v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9/history?limit=1&marker=&sort=revision_end%3Aasc-nullslast>; rel=\"next\"" + response_json_paths: + $.`len`: 1 + - name: patch generic bad metric association PATCH: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9 data: diff --git a/releasenotes/notes/pagination-link-3cc64889ac414d28.yaml b/releasenotes/notes/pagination-link-3cc64889ac414d28.yaml new file mode 100644 index 00000000..231179dc --- /dev/null +++ b/releasenotes/notes/pagination-link-3cc64889ac414d28.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + All listing endpoints (/v1/metric, /v1/resource/, /v1/search/*, ...) + now returns a `Link` header as described by the RFC5988. For now, only the + next page link is provided. -- GitLab From ceb3e1bd965cffe51c702ce68fd0c4f5259c7dbb Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 11 Sep 2017 21:45:06 +0200 Subject: [PATCH 0986/1483] fix pagination for resource history --- gnocchi/indexer/sqlalchemy.py | 31 ++++++++++++++----- gnocchi/rest/api.py | 30 ++++++++++++------ .../tests/functional/gabbits/pagination.yaml | 28 +++++++++++++++++ .../tests/functional/gabbits/resource.yaml | 13 ++++++-- gnocchi/tests/functional/gabbits/search.yaml | 16 ++++++++++ .../pagination-link-3cc64889ac414d28.yaml | 2 +- 6 files changed, 99 insertions(+), 21 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 21a1136e..35b65dd2 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -696,7 +696,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): if details: q = q.options(sqlalchemy.orm.joinedload('resource')) - sort_keys, sort_dirs = self._build_sort_keys(sorts) + sort_keys, sort_dirs = self._build_sort_keys(sorts, ['id']) if marker: metric_marker = self.list_metrics(ids=[marker]) @@ -981,9 +981,11 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): if history: target_cls = self._get_history_result_mapper( session, resource_type) + unique_keys = ["id", "revision"] else: target_cls = self._resource_type_to_mappers( session, resource_type)["resource"] + unique_keys = ["id"] q = session.query(target_cls) @@ -1001,10 +1003,24 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): q = q.filter(f) - sort_keys, sort_dirs = self._build_sort_keys(sorts) + sort_keys, sort_dirs = self._build_sort_keys(sorts, unique_keys) if marker: - resource_marker = self.get_resource(resource_type, marker) + marker_q = session.query(target_cls) + if history: + try: + rid, rrev = marker.split("@") + rrev = int(rrev) + except ValueError: + resource_marker = None + else: + resource_marker = marker_q.filter( + target_cls.id == rid, + target_cls.revision == rrev).first() + else: + resource_marker = marker_q.filter( + target_cls.id == marker).first() + if resource_marker is None: raise indexer.InvalidPagination( "Invalid marker: `%s'" % marker) @@ -1086,7 +1102,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): raise indexer.NoSuchMetric(id) @staticmethod - def _build_sort_keys(sorts): + def _build_sort_keys(sorts, unique_keys): # transform the api-wg representation to the oslo.db one sort_keys = [] sort_dirs = [] @@ -1096,9 +1112,10 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): sort_dirs.append(sort_dir or 'asc') # paginate_query require at list one uniq column - if 'id' not in sort_keys: - sort_keys.append('id') - sort_dirs.append('asc') + for key in unique_keys: + if key not in sort_keys: + sort_keys.append(key) + sort_dirs.append('asc') return sort_keys, sort_dirs diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 219dee1d..01e1b5c6 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -738,6 +738,7 @@ class ResourceHistoryController(rest.RestController): @pecan.expose('json') def get(self, **kwargs): + initial_kwargs = kwargs.copy() details = get_details(kwargs) pagination_opts = get_pagination_options( kwargs, RESOURCE_DEFAULT_PAGINATION) @@ -749,12 +750,7 @@ class ResourceHistoryController(rest.RestController): enforce("get resource", resource) - # FIXME(sileht): pagination doesn't work, the marker - # expected is currently only the resource id while it should be - # the resource_id+revision for the history try: - # FIXME(sileht): next API version should returns - # {'resources': [...], 'links': [ ... pagination rel ...]} resources = pecan.request.indexer.list_resources( self.resource_type, attribute_filter={"=": {"id": self.resource_id}}, @@ -763,8 +759,8 @@ class ResourceHistoryController(rest.RestController): **pagination_opts ) if resources and len(resources) >= pagination_opts['limit']: - set_resp_link_hdr(str(resources[-1].id), kwargs, - pagination_opts) + marker = "%s@%s" % (resources[-1].id, resources[-1].revision) + set_resp_link_hdr(marker, initial_kwargs, pagination_opts) return resources except indexer.IndexerException as e: abort(400, e) @@ -1087,6 +1083,7 @@ class ResourcesController(rest.RestController): @pecan.expose('json') def get_all(self, **kwargs): + initial_kwargs = kwargs.copy() details = get_details(kwargs) history = get_history(kwargs) pagination_opts = get_pagination_options( @@ -1105,8 +1102,12 @@ class ResourcesController(rest.RestController): **pagination_opts ) if resources and len(resources) >= pagination_opts['limit']: - set_resp_link_hdr(str(resources[-1].id), kwargs, - pagination_opts) + if history: + marker = "%s@%s" % (resources[-1].id, + resources[-1].revision) + else: + marker = str(resources[-1].id) + set_resp_link_hdr(marker, initial_kwargs, pagination_opts) return resources except indexer.IndexerException as e: abort(400, e) @@ -1304,6 +1305,7 @@ class SearchResourceTypeController(rest.RestController): self._resource_type = resource_type def _search(self, **kwargs): + initial_kwargs = kwargs.copy() if pecan.request.body: attr_filter = deserialize_and_validate(ResourceSearchSchema) elif kwargs.get("filter"): @@ -1327,12 +1329,20 @@ class SearchResourceTypeController(rest.RestController): else: attr_filter = policy_filter - return pecan.request.indexer.list_resources( + resources = pecan.request.indexer.list_resources( self._resource_type, attribute_filter=attr_filter, details=details, history=history, **pagination_opts) + if resources and len(resources) >= pagination_opts['limit']: + if history: + marker = "%s@%s" % (resources[-1].id, + resources[-1].revision) + else: + marker = str(resources[-1].id) + set_resp_link_hdr(marker, initial_kwargs, pagination_opts) + return resources @pecan.expose('json') def post(self, **kwargs): diff --git a/gnocchi/tests/functional/gabbits/pagination.yaml b/gnocchi/tests/functional/gabbits/pagination.yaml index d9e17af5..567c5d87 100644 --- a/gnocchi/tests/functional/gabbits/pagination.yaml +++ b/gnocchi/tests/functional/gabbits/pagination.yaml @@ -199,6 +199,34 @@ tests: $[2].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 $[2].ended_at: null + - name: limit with history and links page 1 + GET: /v1/resource/generic?history=true&sort=id:asc&sort=ended_at:asc-nullsfirst&limit=1 + response_headers: + link: "<$SCHEME://$NETLOC/v1/resource/generic?history=true&limit=1&marker=1e3d5702-2cbf-46e0-ba13-0ddaa3c71150%401&sort=id%3Aasc&sort=ended_at%3Aasc-nullsfirst>; rel=\"next\"" + response_json_paths: + $.`len`: 1 + $[0].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 + $[0].ended_at: null + + - name: limit with history and links page 2 + xfail: https://bugs.launchpad.net/oslo.db/+bug/1615938 + GET: /v1/resource/generic?history=true&limit=1&marker=1e3d5702-2cbf-46e0-ba13-0ddaa3c71150@1&sort=id:asc&sort=ended_at:asc-nullsfirst + response_headers: + link: "<$SCHEME://$NETLOC/v1/resource/generic?history=true&limit=1&marker=1e3d5702-2cbf-46e0-ba13-0ddaa3c71150%402&sort=id%3Aasc&sort=ended_at%3Aasc-nullsfirst>; rel=\"next\"" + response_json_paths: + $.`len`: 1 + $[0].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 + $[0].ended_at: "2014-01-30T02:02:02+00:00" + + - name: limit with history and links page 3 with no limit + GET: /v1/resource/generic?history=true&marker=1e3d5702-2cbf-46e0-ba13-0ddaa3c71150@2&sort=id:asc&sort=ended_at:asc-nullsfirst + response_headers: + link: "<$SCHEME://$NETLOC/v1/resource/generic?history=true&limit=7&marker=9b6af245-57df-4ed6-a8c0-f64b77d8867f%40-1&sort=id%3Aasc&sort=ended_at%3Aasc-nullsfirst>; rel=\"next\"" + response_json_paths: + $.`len`: 7 + $[0].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 + $[0].ended_at: "2014-01-31T02:02:02+00:00" + # # Create metrics # diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index da8d20f9..878aff0d 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -216,15 +216,22 @@ tests: $[1].revision_end: null $[1].metrics.'disk.iops': $RESPONSE["metrics.'disk.iops'"] - - name: get generic history with links + - name: get generic history with links page 1 desc: Ensure we can get the history - xfail: true GET: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9/history?sort=revision_end:asc-nullslast&limit=1 response_headers: - link: "<$SCHEME://$NETLOC/v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9/history?limit=1&marker=&sort=revision_end%3Aasc-nullslast>; rel=\"next\"" + link: "<$SCHEME://$NETLOC/v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9/history?limit=1&marker=75c44741-cc60-4033-804e-2d3098c7d2e9%401&sort=revision_end%3Aasc-nullslast>; rel=\"next\"" response_json_paths: $.`len`: 1 + - name: get generic history with links page 2 + xfail: https://bugs.launchpad.net/oslo.db/+bug/1615938 + desc: Ensure we can get the history + GET: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9/history?limit=1&marker=75c44741-cc60-4033-804e-2d3098c7d2e9@1&sort=revision_end:asc-nullslast + response_json_paths: + $.`len`: 1 + $[0].revision_end: null + - name: patch generic bad metric association PATCH: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9 data: diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml index 59e9f963..6826d537 100644 --- a/gnocchi/tests/functional/gabbits/search.yaml +++ b/gnocchi/tests/functional/gabbits/search.yaml @@ -142,6 +142,22 @@ tests: response_json_paths: $.`len`: 2 + - name: search empty query page 1 + POST: /v1/search/resource/generic?limit=1 + data: {} + response_headers: + link: "<$SCHEME://$NETLOC/v1/search/resource/generic?limit=1&marker=faef212f-0bf4-4030-a461-2186fef79be0&sort=revision_start%3Aasc&sort=started_at%3Aasc>; rel=\"next\"" + response_json_paths: + $.`len`: 1 + + - name: search empty query last page + POST: /v1/search/resource/generic?marker=faef212f-0bf4-4030-a461-2186fef79be0&sort=revision_start:asc&sort=started_at:asc + data: {} + response_forbidden_headers: + - link + response_json_paths: + $.`len`: 1 + - name: post generic resource with project/user POST: /v1/resource/generic data: diff --git a/releasenotes/notes/pagination-link-3cc64889ac414d28.yaml b/releasenotes/notes/pagination-link-3cc64889ac414d28.yaml index 231179dc..db26fc33 100644 --- a/releasenotes/notes/pagination-link-3cc64889ac414d28.yaml +++ b/releasenotes/notes/pagination-link-3cc64889ac414d28.yaml @@ -1,6 +1,6 @@ --- features: - | - All listing endpoints (/v1/metric, /v1/resource/, /v1/search/*, ...) + All listing endpoints (/v1/metric, /v1/resource/, /v1/search/resource, ...) now returns a `Link` header as described by the RFC5988. For now, only the next page link is provided. -- GitLab From c4765d876f60bc0b6a4a9bfa42b5986b8fd99560 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 12 Sep 2017 19:58:54 +0000 Subject: [PATCH 0987/1483] supporting rolling aggregates this implements rolling aggregates which works similar to how pandas rolling works but more trivial. this differs from deprecated moving_average by correctly matching points to trailing timestamp rather than leading timestamp. it does not return the NaN values that result in shift in values. Closes: #186 --- doc/source/rest.j2 | 3 +- gnocchi/carbonara.py | 23 +++++++++++++ gnocchi/rest/transformation.py | 3 +- gnocchi/tests/functional/gabbits/metric.yaml | 20 ++++++++++++ gnocchi/tests/test_carbonara.py | 32 +++++++++++++++++++ gnocchi/tests/test_transformation.py | 6 ++-- ...rt-rolling-transfrom-a12ab4fb4aa8168f.yaml | 6 ++++ 7 files changed, 89 insertions(+), 4 deletions(-) create mode 100644 releasenotes/notes/support-rolling-transfrom-a12ab4fb4aa8168f.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 11e62288..e7cf01e7 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -143,7 +143,8 @@ Or we can apply absolute to the values and then negate them for example. {{ scenarios['get-measures-transform']['doc'] }} -Supported transformations are `absolute`, `negative` and `resample(sampling-in-second)`. +Supported transformations are `absolute`, `negative`, +`rolling(aggregation-method, window-size)` and `resample(sampling-in-second)`. .. note:: diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index ff880d70..4728ea1b 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -28,6 +28,7 @@ import time import lz4.block import numpy import numpy.lib.recfunctions +from numpy.lib.stride_tricks import as_strided from scipy import ndimage import six @@ -578,6 +579,28 @@ class AggregatedTimeSerie(TimeSerie): timestamps = ts["timestamps"] values = ts["values"] + elif trans.method == "rolling": + agg = trans.args[0] + if agg not in ('min', 'max', 'mean', 'median', + 'std', 'sum', 'var'): + raise TransformError("'%s' aggregation is unsupported" % + agg) + agg = getattr(numpy, agg) + + window = int(trans.args[1]) + if window < 1: + raise TransformError("Window must be 1 or greater") + if window > len(values): + raise TransformError("Window is greater than serie: %s" % + self) + + # arogozhnikov.github.io/2015/09/30/NumpyTipsAndTricks2.html + stride = values.strides[0] + timestamps = timestamps[window - 1:] + values = agg(as_strided( + values, shape=[len(values) - window + 1, window], + strides=[stride, stride]), axis=1) + else: raise TransformError("Transformation '%s' doesn't exists" % trans.method) diff --git a/gnocchi/rest/transformation.py b/gnocchi/rest/transformation.py index cf7ca641..c67c16ff 100644 --- a/gnocchi/rest/transformation.py +++ b/gnocchi/rest/transformation.py @@ -49,9 +49,10 @@ timespan = timespan.setParseAction(lambda t: utils.to_timespan(t[0])) absolute = transform("absolute") negative = transform("negative") resample = transform("resample", timespan) +rolling = transform("rolling", pp.Word(pp.alphas), pp.Word(pp.nums)) transform = pp.delimitedList( - absolute | negative | resample, + absolute | negative | resample | rolling, delim=":") parse = functools.partial(transform.parseString, parseAll=True) diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index 6d053460..35d5f6e4 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -242,6 +242,18 @@ tests: - ["2015-03-06T14:36:15+00:00", 1.0, 16.0] - ["2015-03-06T14:37:15+00:00", 1.0, 23.0] + - name: rolling-mean transform + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?transform=rolling(mean,2) + status: 200 + response_json_paths: + $: + - ["2015-03-06T14:34:12+00:00", 1.0, 27.55] + - ["2015-03-06T14:34:15+00:00", 1.0, 14.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 12.5] + - ["2015-03-06T14:35:15+00:00", 1.0, 10.0] + - ["2015-03-06T14:36:15+00:00", 1.0, -2.5] + - ["2015-03-06T14:37:15+00:00", 1.0, -19.5] + - name: get measurements from metric and two transforms GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?transform=absolute:negative response_json_paths: @@ -274,6 +286,14 @@ tests: response_strings: - 'transform and resample are exclusive' + - name: get rolling bad aggregate + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?transform=rolling(blah,2) + status: 400 + + - name: get rolling-mean missing window + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?transform=rolling(mean) + status: 400 + - name: create valid metric two POST: /v1/metric data: diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 82f06433..7fcf6863 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -320,6 +320,38 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertRaises(carbonara.TransformError, ts.transform, [carbonara.Transformation("rubbish", tuple())]) + def test_transform_rolling(self): + ts = carbonara.TimeSerie.from_tuples( + [(datetime64(2014, 1, 1, 12, 0, 0), 1), + (datetime64(2014, 1, 1, 12, 1, 0), 3), + (datetime64(2014, 1, 1, 12, 2, 0), 5), + (datetime64(2014, 1, 1, 12, 3, 0), 7), + (datetime64(2014, 1, 1, 12, 4, 0), 9)]) + ts = carbonara.AggregatedTimeSerie.from_timeseries( + [ts], sampling=60, aggregation_method="last") + + ts1 = ts.transform([ + carbonara.Transformation("rolling", ('mean', '2'))]) + self.assertEqual(4, len(ts1)) + self.assertEqual([2.0, 4.0, 6.0, 8.0], [ + ts1[datetime64(2014, 1, 1, 12, 1, 0)][1], + ts1[datetime64(2014, 1, 1, 12, 2, 0)][1], + ts1[datetime64(2014, 1, 1, 12, 3, 0)][1], + ts1[datetime64(2014, 1, 1, 12, 4, 0)][1]]) + + ts1 = ts.transform([ + carbonara.Transformation("rolling", ('sum', '4'))]) + self.assertEqual(2, len(ts1)) + self.assertEqual([16.0, 24.0], [ + ts1[datetime64(2014, 1, 1, 12, 3, 0)][1], + ts1[datetime64(2014, 1, 1, 12, 4, 0)][1]]) + + self.assertRaises(carbonara.TransformError, ts.transform, + [carbonara.Transformation("rolling", ('sum', 0))]) + + self.assertRaises(carbonara.TransformError, ts.transform, + [carbonara.Transformation("rolling", ('sum', 10))]) + def _do_test_aggregation(self, name, v1, v2): ts = carbonara.TimeSerie.from_tuples( [(datetime64(2014, 1, 1, 12, 0, 0), 3), diff --git a/gnocchi/tests/test_transformation.py b/gnocchi/tests/test_transformation.py index 1cd07d6b..5b243149 100644 --- a/gnocchi/tests/test_transformation.py +++ b/gnocchi/tests/test_transformation.py @@ -36,7 +36,8 @@ class TestTransformParser(base.BaseTestCase): "resample(5):resample(10)": [("resample", (numpy.timedelta64(5, 's'),)), ("resample", - (numpy.timedelta64(10, 's'),))] + (numpy.timedelta64(10, 's'),))], + "rolling(mean, 2)": [("rolling", ("mean", '2'))], } for expr, expected in expressions.items(): try: @@ -67,7 +68,8 @@ class TestTransformParser(base.BaseTestCase): "resample(, 1.3)", "resample(a)", "resample(1.5, 1.3)", - + "rolling(mean)", + "rolling(mean, mean)", ] for expr in expressions: self.assertRaises(transformation.TransformationParserError, diff --git a/releasenotes/notes/support-rolling-transfrom-a12ab4fb4aa8168f.yaml b/releasenotes/notes/support-rolling-transfrom-a12ab4fb4aa8168f.yaml new file mode 100644 index 00000000..322d1dac --- /dev/null +++ b/releasenotes/notes/support-rolling-transfrom-a12ab4fb4aa8168f.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Added support for defining 'rolling' transform. This provides ability to + compute the mean, min, max, stdev and various other computations across + a rolling or moving window of a specified size. -- GitLab From 3395958ec51c4053c65160a53803781b1c851a77 Mon Sep 17 00:00:00 2001 From: astacksu Date: Fri, 22 Sep 2017 11:23:01 +0800 Subject: [PATCH 0988/1483] Move doc image into dedicated folder Move arch image to folder & change display size display size 80% -> 95% Move grafana image to folder --- doc/source/{ => _static}/architecture.svg | 0 doc/source/{ => _static}/grafana-screenshot.png | Bin doc/source/architecture.rst | 4 ++-- doc/source/grafana.rst | 2 +- 4 files changed, 3 insertions(+), 3 deletions(-) rename doc/source/{ => _static}/architecture.svg (100%) rename doc/source/{ => _static}/grafana-screenshot.png (100%) diff --git a/doc/source/architecture.svg b/doc/source/_static/architecture.svg similarity index 100% rename from doc/source/architecture.svg rename to doc/source/_static/architecture.svg diff --git a/doc/source/grafana-screenshot.png b/doc/source/_static/grafana-screenshot.png similarity index 100% rename from doc/source/grafana-screenshot.png rename to doc/source/_static/grafana-screenshot.png diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index 7c35357e..3f72e984 100755 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -11,9 +11,9 @@ computing, metric cleanup, etc...) on the received data in the background. Both the HTTP REST API and the asynchronous processing daemon are stateless and are scalable. Additional workers can be added depending on load. -.. image:: architecture.svg +.. image:: _static/architecture.svg :align: center - :width: 80% + :width: 95% :alt: Gnocchi architecture .. image source: https://docs.google.com/drawings/d/1aHV86TPNFt7FlCLEjsTvV9FWoFYxXCaQOzfg7NdXVwM/edit?usp=sharing diff --git a/doc/source/grafana.rst b/doc/source/grafana.rst index 6c31108c..73842dc1 100644 --- a/doc/source/grafana.rst +++ b/doc/source/grafana.rst @@ -42,7 +42,7 @@ steps: project and a password. Your browser will query Keystone for a token, and then query Gnocchi based on what Grafana needs. -.. image:: grafana-screenshot.png +.. image:: _static/grafana-screenshot.png :align: center :alt: Grafana screenshot -- GitLab From a498c6a6377c37a5776305fefe9100b728f27b74 Mon Sep 17 00:00:00 2001 From: astacksu Date: Fri, 22 Sep 2017 13:08:08 +0800 Subject: [PATCH 0989/1483] Add glossary anchors for doc Add substitution rules for glossary anchor Add anchor into pages --- doc/source/architecture.rst | 24 +- doc/source/glossary.rst | 34 ++- doc/source/include/term-substitution.rst | 24 ++ doc/source/index.rst | 12 +- doc/source/install.rst | 4 +- doc/source/nagios.rst | 4 +- doc/source/operating.rst | 98 +++--- doc/source/rest.j2 | 367 ++++++++++++----------- doc/source/statsd.rst | 26 +- 9 files changed, 321 insertions(+), 272 deletions(-) create mode 100644 doc/source/include/term-substitution.rst diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst index 3f72e984..761b921a 100755 --- a/doc/source/architecture.rst +++ b/doc/source/architecture.rst @@ -6,7 +6,7 @@ Gnocchi consists of several services: a HTTP REST API (see :doc:`rest`), an optional statsd-compatible daemon (see :doc:`statsd`), and an asynchronous processing daemon (named `gnocchi-metricd`). Data is received via the HTTP REST API or statsd daemon. `gnocchi-metricd` performs operations (statistics -computing, metric cleanup, etc...) on the received data in the background. +computing, |metric| cleanup, etc...) on the received data in the background. Both the HTTP REST API and the asynchronous processing daemon are stateless and are scalable. Additional workers can be added depending on load. @@ -23,19 +23,21 @@ Back-ends --------- Gnocchi uses three different back-ends for storing data: one for storing new -incoming measures (the incoming driver), one for storing the time series (the +incoming |measures| (the incoming driver), one for storing the time series (the storage driver) and one for indexing the data (the index driver). -The *incoming* storage is responsible for storing new measures sent to metrics. -It is by default – and usually – the same driver as the *storage* one. +The *incoming* storage is responsible for storing new |measures| sent to +|metrics|. It is by default – and usually – the same driver as the *storage* +one. -The *storage* is responsible for storing aggregates of created metrics. It +The *storage* is responsible for storing |aggregates| of created |metrics|. It receives timestamps and values, and pre-computes aggregations according to the -defined archive policies. +defined |archive policies|. -The *indexer* is responsible for storing the index of all resources, archive -policies and metrics, along with their definitions, types and properties. The -indexer is also responsible for linking resources with metrics. +The *indexer* is responsible for storing the index of all |resources|, +|archive policies| and |metrics|, along with their definitions, types and +properties. The indexer is also responsible for linking |resources| with +|metrics|. Available incoming and storage back-ends ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -77,8 +79,10 @@ Gnocchi currently offers different index drivers: * `MySQL`_ (at least version 5.6.4) Those drivers offer almost the same performance and features, though PostgreSQL -tends to be more performant and has some additional features (e.g. resource +tends to be more performant and has some additional features (e.g. |resource| duration computing). .. _PostgreSQL: http://postgresql.org .. _MySQL: http://mysql.org + +.. include:: include/term-substitution.rst diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst index 0cf580d3..7f26d2fc 100644 --- a/doc/source/glossary.rst +++ b/doc/source/glossary.rst @@ -7,38 +7,42 @@ Glossary Resource An entity representing anything in your infrastructure that you will - associate metric(s) with. It is identified by a unique ID and can + associate |metric|\ (s) with. It is identified by a unique ID and can contain attributes. Metric - An entity storing aggregates identified by an UUID. It can be attached - to a resource using a name. How a metric stores its aggregates is - defined by the archive policy it is associated to. + An entity storing |aggregates| identified by an UUID. It can be attached + to a |resource| using a name. How a metric stores its |aggregates| is + defined by the |archive policy| it is associated to. Measure An incoming datapoint tuple sent to Gnocchi by the api. It is composed of a timestamp and a value. Archive policy - An aggregate storage policy attached to a metric. It determines how long - aggregates will be kept in a metric and how they will be aggregated. + An |aggregate| storage policy attached to a |metric|. It determines how + long |aggregates| will be kept in a |metric| and + :term:`how they will be aggregated`\ . Granularity - The time between two aggregates in an aggregated time series of a metric. + The time between two |aggregates| in an aggregated |time series| of a + |metric|. Time series - A list of aggregates ordered by time. + A list of |aggregates| ordered by time. Aggregation method - Function used to aggregate multiple measures into an aggregate. For + Function used to aggregate multiple |measures| into an |aggregate|. For example, the `min` aggregation method will aggregate the values of - different measures to the minimum value of all the measures in the time - range. + different |measures| to the minimum value of all the |measures| in the + time range. Aggregate - A datapoint tuple generated from several measures according to the - archive policy definition. It is composed of a timestamp and a value. + A datapoint tuple generated from several |measures| according to the + |archive policy| definition. It is composed of a timestamp and a value. Timespan - The time period for which a metric keeps its aggregates. It is used in - the context of archive policy. + The time period for which a |metric| keeps its |aggregates|. It is used in + the context of |archive policy|. + +.. include:: include/term-substitution.rst diff --git a/doc/source/include/term-substitution.rst b/doc/source/include/term-substitution.rst new file mode 100644 index 00000000..37b8020b --- /dev/null +++ b/doc/source/include/term-substitution.rst @@ -0,0 +1,24 @@ +.. |resource| replace:: :term:`resource` +.. |resources| replace:: :term:`resources` + +.. |metric| replace:: :term:`metric` +.. |metrics| replace:: :term:`metrics` + +.. |measure| replace:: :term:`measure` +.. |measures| replace:: :term:`measures` + +.. |archive policy| replace:: :term:`archive policy` +.. |archive policies| replace:: :term:`archive policies` + +.. |granularity| replace:: :term:`granularity` +.. |granularities| replace:: :term:`granularities` + +.. |time series| replace:: :term:`time series -- GitLab From 0b08406f07314424e2d54a736cfb57f7132b421f Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 9 Oct 2017 11:09:26 +0200 Subject: [PATCH 1033/1483] aggregates: fetch api This adds the /v1/aggregates/fetch API It can only retrieved metrics in batch and/or aggregates them for now. More operations will come in further PR. Related #419 --- gnocchi/rest/aggregates/api.py | 175 ++++++++ gnocchi/rest/aggregates/processor.py | 49 ++- gnocchi/rest/api.py | 100 +++-- .../functional/gabbits/aggregates-fetch.yaml | 397 ++++++++++++++++++ .../tests/functional/gabbits/resource.yaml | 4 +- gnocchi/tests/test_aggregates.py | 6 +- gnocchi/tests/test_rest.py | 3 +- 7 files changed, 669 insertions(+), 65 deletions(-) create mode 100644 gnocchi/rest/aggregates/api.py create mode 100644 gnocchi/tests/functional/gabbits/aggregates-fetch.yaml diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py new file mode 100644 index 00000000..a9b013ed --- /dev/null +++ b/gnocchi/rest/aggregates/api.py @@ -0,0 +1,175 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2016-2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pecan +from pecan import rest +import pyparsing +import six +import voluptuous + +from gnocchi.rest.aggregates import operations as agg_operations +from gnocchi.rest.aggregates import processor +from gnocchi.rest import api +from gnocchi import storage +from gnocchi import utils + + +def _OperationsSubNodeSchema(v): + return OperationsSubNodeSchema(v) + + +def MetricSchema(v): + """metric keyword schema + + It could be: + + ["metric", "metric-ref", "aggregation"] + + or + + ["metric, ["metric-ref", "aggregation"], ["metric-ref", "aggregation"]] + """ + if not isinstance(v, (list, tuple)) or len(v) <= 2 or v[0] != u"metric": + raise voluptuous.Invalid("'metric' is invalid") + return [u"metric"] + voluptuous.Schema(voluptuous.Any( + voluptuous.ExactSequence([six.text_type, six.text_type]), + voluptuous.All( + voluptuous.Length(min=1), + [voluptuous.ExactSequence([six.text_type, six.text_type])], + )), required=True)(v[1:]) + + +OperationsSchemaBase = [ + MetricSchema, + voluptuous.ExactSequence( + [u"aggregate", + voluptuous.Any(*list(agg_operations.AGG_MAP.keys())), + _OperationsSubNodeSchema] + ), +] + + +OperationsSubNodeSchema = voluptuous.Schema(voluptuous.Any(*tuple( + OperationsSchemaBase + [voluptuous.Coerce(float)] +)), required=True) + + +def OperationsSchema(v): + if isinstance(v, six.text_type): + try: + v = pyparsing.OneOrMore( + pyparsing.nestedExpr()).parseString(v).asList()[0] + except pyparsing.ParseException as e: + api.abort(400, {"cause": "Invalid operations", + "reason": "Fail to parse the operations string", + "detail": six.text_type(e)}) + return voluptuous.Schema(voluptuous.Any(*OperationsSchemaBase), + required=True)(v) + + +def extract_references(nodes): + references = set() + if nodes[0] == "metric": + if isinstance(nodes[1], list): + for subnodes in nodes[1:]: + references.add(tuple(subnodes)) + else: + references.add(tuple(nodes[1:])) + else: + for subnodes in nodes[1:]: + if isinstance(subnodes, list): + references |= extract_references(subnodes) + return references + + +def get_measures_or_abort(metrics_and_aggregations, operations, start, + stop, granularity, needed_overlap, fill, + ref_identifier): + try: + return processor.get_measures( + pecan.request.storage, + metrics_and_aggregations, + operations, + start, stop, + granularity, needed_overlap, fill, + ref_identifier=ref_identifier) + except processor.UnAggregableTimeseries as e: + api.abort(400, e) + # TODO(sileht): We currently got only one metric for these exceptions but + # we can improve processor to returns all missing metrics at once, so we + # returns a list for the future + except storage.MetricDoesNotExist as e: + api.abort(404, {"cause": "Unknown metrics", + "detail": [str(e.metric.id)]}) + except storage.AggregationDoesNotExist as e: + api.abort(404, {"cause": "Metrics with unknown aggregation", + "detail": [(str(e.metric.id), e.method)]}) + + +class FetchController(rest.RestController): + + FetchSchema = { + "operations": OperationsSchema + } + + @pecan.expose("json") + def post(self, start=None, stop=None, granularity=None, + needed_overlap=100.0, fill=None): + start, stop, granularity, needed_overlap, fill = api.validate_qs( + start, stop, granularity, needed_overlap, fill) + + body = api.deserialize_and_validate(self.FetchSchema) + + references = list(extract_references(body["operations"])) + if not references: + api.abort(400, {"cause": "operations is invalid", + "reason": "at least one 'metric' is required", + "detail": body["operations"]}) + + try: + metric_ids = [six.text_type(utils.UUID(m)) + for (m, a) in references] + except ValueError as e: + api.abort(400, {"cause": "Invalid metric references", + "reason": six.text_type(e), + "detail": references}) + + metrics = pecan.request.indexer.list_metrics(ids=metric_ids) + missing_metric_ids = (set(metric_ids) + - set(six.text_type(m.id) for m in metrics)) + if missing_metric_ids: + api.abort(404, {"cause": "Unknown metrics", + "reason": "Provided metrics don't exists", + "detail": missing_metric_ids}) + + number_of_metrics = len(metrics) + if number_of_metrics == 0: + return [] + + for metric in metrics: + api.enforce("get metric", metric) + + metrics_by_ids = dict((six.text_type(m.id), m) for m in metrics) + metrics_and_aggregations = [(metrics_by_ids[m], a) + for (m, a) in references] + return get_measures_or_abort( + metrics_and_aggregations, body["operations"], + start, stop, granularity, needed_overlap, fill, + ref_identifier="id") + + +class AggregatesController(object): + fetch = FetchController() diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index 73997159..af35df7c 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -24,6 +24,7 @@ import six from gnocchi import carbonara from gnocchi.rest.aggregates import operations as agg_operations from gnocchi import storage as gnocchi_storage +from gnocchi import utils LOG = daiquiri.getLogger(__name__) @@ -31,21 +32,17 @@ LOG = daiquiri.getLogger(__name__) class UnAggregableTimeseries(Exception): """Error raised when timeseries cannot be aggregated.""" - def __init__(self, reason): + def __init__(self, references, reason): + self.references = references self.reason = reason super(UnAggregableTimeseries, self).__init__(reason) - -class MetricUnaggregatable(Exception): - """Error raised when metrics can't be aggregated.""" - - def __init__(self, metrics_and_aggregations, reason): - self.metrics_and_aggregations = metrics_and_aggregations - self.reason = reason - metrics = ("%s/%s" % (m.id, a) for (m, a) in metrics_and_aggregations) - super(MetricUnaggregatable, self).__init__( - "Metrics %s can't be aggregated: %s" % ( - ", ".join(metrics), reason)) + def jsonify(self): + return { + "cause": "Metrics can't being aggregated", + "reason": self.reason, + "detail": self.references + } def _get_measures_timeserie(storage, metric, aggregation, ref_identifier, @@ -71,6 +68,8 @@ def get_measures(storage, metrics_and_aggregations, :param fill: The value to use to fill in missing data in series. :param resample: The granularity to resample to. """ + + references_with_missing_granularity = [] for (metric, aggregation) in metrics_and_aggregations: if aggregation not in metric.archive_policy.aggregation_methods: raise gnocchi_storage.AggregationDoesNotExist(metric, aggregation) @@ -79,8 +78,14 @@ def get_measures(storage, metrics_and_aggregations, if d.granularity == granularity: break else: - raise gnocchi_storage.GranularityDoesNotExist( - metric, granularity) + references_with_missing_granularity.append( + (getattr(metric, ref_identifier), aggregation)) + + if references_with_missing_granularity: + raise UnAggregableTimeseries( + references_with_missing_granularity, + "granularity '%d' is missing" % + utils.timespan_total_seconds(granularity)) if granularity is None: granularities = ( @@ -96,8 +101,10 @@ def get_measures(storage, metrics_and_aggregations, ] if not granularities_in_common: - raise MetricUnaggregatable( - metrics_and_aggregations, 'No granularity match') + raise UnAggregableTimeseries( + list((str(getattr(m, ref_identifier)), a) + for (m, a) in metrics_and_aggregations), + 'No granularity match') else: granularities_in_common = [granularity] @@ -113,11 +120,8 @@ def get_measures(storage, metrics_and_aggregations, tss = list(map(lambda ref_and_ts: ( ref_and_ts[0], ref_and_ts[1].resample(resample)), tss)) - try: - return aggregated(tss, operations, from_timestamp, to_timestamp, - needed_overlap, fill) - except (UnAggregableTimeseries, carbonara.UnknownAggregationMethod) as e: - raise MetricUnaggregatable(metrics_and_aggregations, e.reason) + return aggregated(tss, operations, from_timestamp, to_timestamp, + needed_overlap, fill) def aggregated(refs_and_timeseries, operations, from_timestamp=None, @@ -154,7 +158,7 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, overlap = numpy.flatnonzero(~numpy.any(numpy.isnan(values), axis=1)) if overlap.size == 0 and needed_percent_of_overlap > 0: - raise UnAggregableTimeseries('No overlap') + raise UnAggregableTimeseries(references[key], 'No overlap') # if no boundary set, use first/last timestamp which overlap if to_timestamp is None and overlap.size: times = times[:overlap[-1] + 1] @@ -165,6 +169,7 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, percent_of_overlap = overlap.size * 100.0 / times.size if percent_of_overlap < needed_percent_of_overlap: raise UnAggregableTimeseries( + references[key], 'Less than %f%% of datapoints overlap in this ' 'timespan (%.2f%%)' % (needed_percent_of_overlap, percent_of_overlap)) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index cd773318..1f2c13df 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1651,6 +1651,62 @@ class AggregationResourceController(rest.RestController): return results +# FIXME(sileht): should be in aggregates.api but we need to split all +# controllers to do this +def validate_qs(start, stop, granularity, needed_overlap, fill): + try: + needed_overlap = float(needed_overlap) + except ValueError: + abort(400, {"cause": "Argument value error", + "detail": "needed_overlap", + "reason": "Must be a number"}) + if needed_overlap != 100.0 and start is None and stop is None: + abort(400, {"cause": "Argument value error", + "detail": "needed_overlap", + "reason": "start and/or stop must be provided " + "if specifying needed_overlap"}) + + if start is not None: + try: + start = utils.to_timestamp(start) + except Exception: + abort(400, {"cause": "Argument value error", + "detail": "start", + "reason": "Must be a datetime or a timestamp"}) + + if stop is not None: + try: + stop = utils.to_timestamp(stop) + except Exception: + abort(400, {"cause": "Argument value error", + "detail": "stop", + "reason": "Must be a datetime or a timestamp"}) + + if granularity is not None: + try: + granularity = utils.to_timespan(granularity) + except ValueError as e: + abort(400, {"cause": "Argument value error", + "detail": "granularity", + "reason": six.text_type(e)}) + + if fill is not None: + if granularity is None: + abort(400, {"cause": "Argument value error", + "detail": "granularity", + "reason": "Unable to fill without a granularity"}) + if fill != "null": + try: + fill = float(fill) + except ValueError: + abort(400, + {"cause": "Argument value error", + "detail": "fill", + "reason": "Must be a float or \'null\', got '%s'" % + fill}) + return start, stop, granularity, needed_overlap, fill + + class AggregationController(rest.RestController): _custom_actions = { 'metric': ['POST', 'GET'], @@ -1677,25 +1733,8 @@ class AggregationController(rest.RestController): granularity=None, needed_overlap=100.0, fill=None, refresh=False, resample=None): - try: - needed_overlap = float(needed_overlap) - except ValueError: - abort(400, 'needed_overlap must be a number') - if needed_overlap != 100.0 and start is None and stop is None: - abort(400, 'start and/or stop must be provided if specifying ' - 'needed_overlap') - - if start is not None: - try: - start = utils.to_timestamp(start) - except Exception: - abort(400, "Invalid value for start") - - if stop is not None: - try: - stop = utils.to_timestamp(stop) - except Exception: - abort(400, "Invalid value for stop") + start, stop, granularity, needed_overlap, fill = validate_qs( + start, stop, granularity, needed_overlap, fill) if (aggregation not in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS): @@ -1718,11 +1757,6 @@ class AggregationController(rest.RestController): number_of_metrics = len(metrics) if number_of_metrics == 0: return [] - if granularity is not None: - try: - granularity = utils.to_timespan(granularity) - except ValueError as e: - abort(400, six.text_type(e)) if resample: if not granularity: @@ -1732,15 +1766,6 @@ class AggregationController(rest.RestController): except ValueError as e: abort(400, six.text_type(e)) - if fill is not None: - if granularity is None: - abort(400, "Unable to fill without a granularity") - try: - fill = float(fill) - except ValueError as e: - if fill != 'null': - abort(400, "fill must be a float or \'null\': %s" % e) - try: if strtobool("refresh", refresh): metrics_to_update = [ @@ -1765,9 +1790,8 @@ class AggregationController(rest.RestController): operations, start, stop, granularity, needed_overlap, fill, resample)["aggregated"] - except processor.MetricUnaggregatable as e: - abort(400, ("One of the metrics being aggregated doesn't have " - "matching granularity: %s") % str(e)) + except processor.UnAggregableTimeseries as e: + abort(400, e) except (storage.MetricDoesNotExist, storage.GranularityDoesNotExist, storage.AggregationDoesNotExist) as e: @@ -1862,6 +1886,9 @@ class BatchController(object): class V1Controller(object): def __init__(self): + # FIXME(sileht): split controllers to avoid lazy loading + from gnocchi.rest.aggregates import api as agg_api + self.sub_controllers = { "search": SearchController(), "archive_policy": ArchivePoliciesController(), @@ -1873,6 +1900,7 @@ class V1Controller(object): "aggregation": AggregationController(), "capabilities": CapabilityController(), "status": StatusController(), + "aggregates": agg_api.AggregatesController(), } for name, ctrl in self.sub_controllers.items(): setattr(self, name, ctrl) diff --git a/gnocchi/tests/functional/gabbits/aggregates-fetch.yaml b/gnocchi/tests/functional/gabbits/aggregates-fetch.yaml new file mode 100644 index 00000000..0285f3bf --- /dev/null +++ b/gnocchi/tests/functional/gabbits/aggregates-fetch.yaml @@ -0,0 +1,397 @@ +fixtures: + - ConfigFixture + +defaults: + request_headers: + content-type: application/json + # User foobar + authorization: "basic Zm9vYmFyOg==" + +tests: + - name: create archive policy + desc: for later use + POST: /v1/archive_policy + request_headers: + # User admin + authorization: "basic YWRtaW46" + data: + name: cookies + definition: + - granularity: 1 second + - granularity: 60 second + status: 201 + + - name: create second archive policy + desc: for later use + POST: /v1/archive_policy + request_headers: + # User admin + authorization: "basic YWRtaW46" + data: + name: cake + definition: + - granularity: 5 second + status: 201 + + - name: create metric1 + POST: /v1/metric + data: + archive_policy_name: cookies + status: 201 + + - name: create metric3 + POST: /v1/metric + data: + archive_policy_name: cake + status: 201 + + - name: create metric2 + POST: /v1/metric + data: + archive_policy_name: cookies + status: 201 + + - name: push measurements to metric1 + POST: /v1/metric/$HISTORY['create metric1'].$RESPONSE['$.id']/measures + data: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + - timestamp: "2015-03-06T14:34:15" + value: -16 + - timestamp: "2015-03-06T14:35:12" + value: 9 + - timestamp: "2015-03-06T14:35:15" + value: 11 + status: 202 + + - name: push measurements to metric2 + POST: /v1/metric/$HISTORY['create metric2'].$RESPONSE['$.id']/measures + data: + - timestamp: "2015-03-06T14:33:57" + value: 2 + - timestamp: "2015-03-06T14:34:12" + value: 4 + - timestamp: "2015-03-06T14:34:15" + value: 5 + - timestamp: "2015-03-06T14:35:12" + value: 10 + - timestamp: "2015-03-06T14:35:15" + value: 15 + status: 202 + + - name: get measurements from metric1 + GET: /v1/metric/$HISTORY['create metric1'].$RESPONSE['$.id']/measures?refresh=true + response_json_paths: + $: + - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] + - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] + - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -16.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] + + + - name: get measurements from metric2 + GET: /v1/metric/$HISTORY['create metric2'].$RESPONSE['$.id']/measures?refresh=true + response_json_paths: + $: + - ["2015-03-06T14:33:00+00:00", 60.0, 2.0] + - ["2015-03-06T14:34:00+00:00", 60.0, 4.5] + - ["2015-03-06T14:35:00+00:00", 60.0, 12.5] + - ["2015-03-06T14:33:57+00:00", 1.0, 2.0] + - ["2015-03-06T14:34:12+00:00", 1.0, 4.0] + - ["2015-03-06T14:34:15+00:00", 1.0, 5.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 10.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 15.0] + + - name: get aggregates + POST: /v1/aggregates/fetch + data: + operations: ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] + response_json_paths: + $.`len`: 2 + $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] + - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] + - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -16.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] + $."$HISTORY['create metric2'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:33:00+00:00", 60.0, 2.0] + - ["2015-03-06T14:34:00+00:00", 60.0, 4.5] + - ["2015-03-06T14:35:00+00:00", 60.0, 12.5] + - ["2015-03-06T14:33:57+00:00", 1.0, 2.0] + - ["2015-03-06T14:34:12+00:00", 1.0, 4.0] + - ["2015-03-06T14:34:15+00:00", 1.0, 5.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 10.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 15.0] + + - name: get aggregates start and stop + POST: /v1/aggregates/fetch + query_parameters: + start: "2015-03-06T14:34:00" + stop: "2015-03-06T14:35:13" + data: + operations: ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] + response_json_paths: + $.`len`: 2 + $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] + - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -16.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] + $."$HISTORY['create metric2'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:34:00+00:00", 60.0, 4.5] + - ["2015-03-06T14:35:00+00:00", 60.0, 12.5] + - ["2015-03-06T14:34:12+00:00", 1.0, 4.0] + - ["2015-03-06T14:34:15+00:00", 1.0, 5.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 10.0] + + - name: get aggregates granularity + POST: /v1/aggregates/fetch?granularity=60 + data: + operations: ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "max"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "min"]] + response_json_paths: + $.`len`: 2 + $."$HISTORY['create metric1'].$RESPONSE['$.id']_max": + - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] + - ["2015-03-06T14:34:00+00:00", 60.0, 12.0] + - ["2015-03-06T14:35:00+00:00", 60.0, 11.0] + $."$HISTORY['create metric2'].$RESPONSE['$.id']_min": + - ["2015-03-06T14:33:00+00:00", 60.0, 2.0] + - ["2015-03-06T14:34:00+00:00", 60.0, 4.0] + - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + + - name: get aggregates mean aggregate + POST: /v1/aggregates/fetch + data: + operations: "(aggregate mean (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)))" + response_json_paths: + $.`len`: 1 + $."aggregated": + - ["2015-03-06T14:33:00+00:00", 60.0, 22.55] + - ["2015-03-06T14:34:00+00:00", 60.0, 1.25] + - ["2015-03-06T14:35:00+00:00", 60.0, 11.25] + - ["2015-03-06T14:33:57+00:00", 1.0, 22.55] + - ["2015-03-06T14:34:12+00:00", 1.0, 8.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -5.5] + - ["2015-03-06T14:35:12+00:00", 1.0, 9.5] + - ["2015-03-06T14:35:15+00:00", 1.0, 13.0] + +# Negative tests + + - name: get no operations + POST: /v1/aggregates/fetch + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + operations: [] + status: 400 + response_strings: + - "'metric' is invalid" + + - name: invalid operations string + POST: /v1/aggregates/fetch + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + operations: "(metroc foo bar" + status: 400 + response_json_paths: + $.code: 400 + $.description.cause: "Invalid operations" + $.description.reason: "Fail to parse the operations string" + $.description.detail: "Expected \")\" (at char 15), (line:1, col:16)" + + - name: get invalid metric operations + POST: /v1/aggregates/fetch + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + operations: ["metric"] + status: 400 + response_strings: + - "'metric' is invalid" + + - name: get unknown metrics + POST: /v1/aggregates/fetch + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + operations: + - metric + - ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"] + - ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"] + - ["8c062a7e-9f9f-4b1c-9996-9d0328512ab7", "mean"] + - ["e4864464-1b27-4622-9fbb-dc900e06c192", "mean"] + status: 404 + response_json_paths: + $.code: 404 + $.description.cause: "Unknown metrics" + $.description.detail.`sorted`: + - "8c062a7e-9f9f-4b1c-9996-9d0328512ab7" + - "e4864464-1b27-4622-9fbb-dc900e06c192" + + - name: get not matching granularity + POST: /v1/aggregates/fetch + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + operations: + - metric + - ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"] + - ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"] + - ["$HISTORY['create metric3'].$RESPONSE['$.id']", "mean"] + + status: 400 + response_json_paths: + $.code: 400 + $.description.cause: "Metrics can't being aggregated" + $.description.reason: "No granularity match" + $.description.detail.`len`: 3 + + - name: get unknown granularity + POST: /v1/aggregates/fetch?granularity=123 + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + operations: + - metric + - "$HISTORY['create metric1'].$RESPONSE['$.id']" + - "mean" + status: 400 + response_json_paths: + $.code: 400 + $.description.cause: "Metrics can't being aggregated" + $.description.reason: "granularity '123' is missing" + $.description.detail: + - ["$HISTORY['create metric1'].$RESPONSE['$.id']", mean] + + - name: get unknown aggregation + POST: /v1/aggregates/fetch + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + operations: + - metric + - ["$HISTORY['create metric1'].$RESPONSE['$.id']", "what?"] + - ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"] + status: 404 + response_json_paths: + $.code: 404 + $.description.cause: "Metrics with unknown aggregation" + $.description.detail: + - ["$HISTORY['create metric1'].$RESPONSE['$.id']", "what?"] + + - name: invalid start + POST: /v1/aggregates/fetch?start=notadate + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + status: 400 + response_json_paths: + $.code: 400 + $.description.cause: "Argument value error" + $.description.detail: "start" + $.description.reason: "Must be a datetime or a timestamp" + + - name: invalid stop + POST: /v1/aggregates/fetch?stop=notadate + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + status: 400 + response_json_paths: + $.code: 400 + $.description.cause: "Argument value error" + $.description.detail: "stop" + $.description.reason: "Must be a datetime or a timestamp" + + - name: invalid needed_overlap + POST: /v1/aggregates/fetch?needed_overlap=notnumber + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + status: 400 + response_json_paths: + $.code: 400 + $.description.cause: "Argument value error" + $.description.detail: "needed_overlap" + $.description.reason: "Must be a number" + + - name: incomplete needed_overlap + POST: /v1/aggregates/fetch?needed_overlap=50 + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + status: 400 + response_json_paths: + $.code: 400 + $.description.cause: "Argument value error" + $.description.detail: "needed_overlap" + $.description.reason: "start and/or stop must be provided if specifying needed_overlap" + + - name: invalid granularity + POST: /v1/aggregates/fetch?granularity=foobar + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + status: 400 + response_json_paths: + $.code: 400 + $.description.cause: "Argument value error" + $.description.detail: "granularity" + $.description.reason: "Unable to parse timespan" + + - name: incomplete fill + POST: /v1/aggregates/fetch?fill=123 + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + status: 400 + response_json_paths: + $.code: 400 + $.description.cause: "Argument value error" + $.description.detail: "granularity" + $.description.reason: "Unable to fill without a granularity" + + - name: invalid fill + POST: /v1/aggregates/fetch?fill=foobar&granularity=5 + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + status: 400 + response_json_paths: + $.code: 400 + $.description.cause: "Argument value error" + $.description.detail: "fill" + $.description.reason: "Must be a float or 'null', got 'foobar'" diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index 878aff0d..ae62f6f9 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -58,9 +58,9 @@ tests: redirects: true response_json_paths: $.version: "1.0" - $.links.`len`: 11 + $.links.`len`: 12 $.links[0].href: $SCHEME://$NETLOC/v1 - $.links[7].href: $SCHEME://$NETLOC/v1/resource + $.links[8].href: $SCHEME://$NETLOC/v1/resource - name: root of resource GET: /v1/resource diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 3accd412..9243b5d0 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -815,7 +815,7 @@ class CrossMetricAggregated(base.TestCase): def test_get_measures_empty_metrics_no_overlap(self): self.assertRaises( - processor.MetricUnaggregatable, + processor.UnAggregableTimeseries, processor.get_measures, self.storage, [(indexer.Metric(uuid.uuid4(), self.archive_policies['low']), 'mean'), @@ -865,7 +865,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) - self.assertRaises(storage.GranularityDoesNotExist, + self.assertRaises(processor.UnAggregableTimeseries, processor.get_measures, self.storage, [(self.metric, "mean"), (metric2, "mean")], @@ -892,7 +892,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) - self.assertRaises(processor.MetricUnaggregatable, + self.assertRaises(processor.UnAggregableTimeseries, processor.get_measures, self.storage, [(self.metric, "mean"), (metric2, "mean")], diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 964c8b65..afc7eb5a 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -1636,8 +1636,7 @@ class ResourceTest(RestTest): + self.resource_type + "/metric/foo?aggregation=max", params={"=": {"name": name}}, status=400) - self.assertIn(b"One of the metrics being aggregated doesn't have " - b"matching granularity", + self.assertIn(b"Metrics can't being aggregated", result.body) def test_get_res_named_metric_measure_aggregation_nooverlap(self): -- GitLab From 00b06f365f32cfeb563b27c245ca020144049d6e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 12 Oct 2017 09:27:39 +0200 Subject: [PATCH 1034/1483] Rename storage.aggregation_workers_number to parallel_operations The option storage.aggregation_workers_number is not only used by storage, but also by API cross aggregation. Make it generic and move the map_in_thread method in a generic parallel_map function in gnocchi.utils. --- gnocchi/opts.py | 8 +++++ gnocchi/rest/aggregates/processor.py | 14 ++++----- gnocchi/service.py | 4 ++- gnocchi/storage/__init__.py | 31 +++---------------- gnocchi/tests/test_utils.py | 14 +++++++++ gnocchi/utils.py | 17 ++++++++++ ...tion_workers_numbers-cb3a8cf62211bd5b.yaml | 7 +++++ 7 files changed, 60 insertions(+), 35 deletions(-) create mode 100644 releasenotes/notes/parallel_operations_replaces_aggregation_workers_numbers-cb3a8cf62211bd5b.yaml diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 5a1e13de..608294ca 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -94,6 +94,14 @@ _cli_options = ( def list_opts(): return [ ("DEFAULT", _cli_options + ( + cfg.IntOpt( + 'parallel_operations', + min=1, + deprecated_name='aggregation_workers_number', + deprecated_group='storage', + help='Number of threads to use to parallelize ' + 'some operations. ' + 'Default is set to the number of CPU available.'), cfg.BoolOpt( 'use-syslog', default=False, diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index af35df7c..f7116bcf 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -108,13 +108,13 @@ def get_measures(storage, metrics_and_aggregations, else: granularities_in_common = [granularity] - tss = storage._map_in_thread(_get_measures_timeserie, - [(storage, metric, aggregation, - ref_identifier, - g, from_timestamp, to_timestamp) - for (metric, aggregation) - in metrics_and_aggregations - for g in granularities_in_common]) + tss = utils.parallel_map(_get_measures_timeserie, + [(storage, metric, aggregation, + ref_identifier, + g, from_timestamp, to_timestamp) + for (metric, aggregation) + in metrics_and_aggregations + for g in granularities_in_common]) if resample and granularity: tss = list(map(lambda ref_and_ts: ( diff --git a/gnocchi/service.py b/gnocchi/service.py index 5644ae11..3c580e78 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -46,12 +46,14 @@ def prepare_service(args=None, conf=None, workers = utils.get_default_workers() conf.set_default("workers", workers, group="metricd") - conf.set_default("aggregation_workers_number", workers, group="storage") + conf.set_default("parallel_operations", workers) conf(args, project='gnocchi', validate_default_values=True, default_config_files=default_config_files, version=pbr.version.VersionInfo('gnocchi').version_string()) + utils.parallel_map.NUM_WORKERS = conf.parallel_operations + if not log_to_std and (conf.log_dir or conf.log_file): outputs = [daiquiri.output.File(filename=conf.log_file, directory=conf.log_dir)] diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 97008cb3..f0806f67 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -19,7 +19,6 @@ import functools import itertools import operator -from concurrent import futures import daiquiri import numpy from oslo_config import cfg @@ -37,11 +36,6 @@ OPTS = [ ] _CARBONARA_OPTS = [ - cfg.IntOpt('aggregation_workers_number', - min=1, - help='Number of threads to process and store aggregates. ' - 'Set value roughly equal to number of aggregates to be ' - 'computed per metric'), cfg.StrOpt('coordination_url', secret=True, help='Coordination driver URL'), @@ -126,12 +120,6 @@ def get_driver(conf, coord=None): class StorageDriver(object): def __init__(self, conf, coord=None): - self.aggregation_workers_number = conf.aggregation_workers_number - if self.aggregation_workers_number == 1: - # NOTE(jd) Avoid using futures at all if we don't want any threads. - self._map_in_thread = self._map_no_thread - else: - self._map_in_thread = self._map_in_futures_threads self.coord = (coord if coord else utils.get_coordinator_and_start(conf.coordination_url)) self.shared_coord = bool(coord) @@ -224,7 +212,7 @@ class StorageDriver(object): raise AggregationDoesNotExist(metric, aggregation) if granularity is None: - agg_timeseries = self._map_in_thread( + agg_timeseries = utils.parallel_map( self._get_measures_timeserie, ((metric, aggregation, ap.granularity, from_timestamp, to_timestamp) @@ -287,7 +275,7 @@ class StorageDriver(object): timeseries = list(filter( lambda x: x is not None, - self._map_in_thread( + utils.parallel_map( self._get_measures_and_unserialize, ((metric, key, aggregation) for key in sorted(all_keys) @@ -554,7 +542,7 @@ class StorageDriver(object): d.granularity, carbonara.round_timestamp( tstamp, d.granularity)) - self._map_in_thread( + utils.parallel_map( self._add_measures, ((aggregation, d, metric, ts, current_first_block_timestamp, @@ -607,7 +595,7 @@ class StorageDriver(object): granularity = granularity or [] predicate = MeasureQuery(query) - results = self._map_in_thread( + results = utils.parallel_map( self._find_measure, [(metric, aggregation, gran, predicate, @@ -628,17 +616,6 @@ class StorageDriver(object): return result - @staticmethod - def _map_no_thread(method, list_of_args): - return list(itertools.starmap(method, list_of_args)) - - def _map_in_futures_threads(self, method, list_of_args): - with futures.ThreadPoolExecutor( - max_workers=self.aggregation_workers_number) as executor: - # We use 'list' to iterate all threads here to raise the first - # exception now, not much choice - return list(executor.map(lambda args: method(*args), list_of_args)) - class MeasureQuery(object): binary_operators = { diff --git a/gnocchi/tests/test_utils.py b/gnocchi/tests/test_utils.py index 2e07b25d..b3caebe8 100644 --- a/gnocchi/tests/test_utils.py +++ b/gnocchi/tests/test_utils.py @@ -115,3 +115,17 @@ class StopWatchTest(tests_base.TestCase): with utils.StopWatch() as watch: pass self.assertGreater(watch.elapsed(), 0) + + +class ParallelMap(tests_base.TestCase): + def test_parallel_map_one(self): + utils.parallel_map.NUM_WORKERS = 1 + self.assertEqual([1, 2, 3], + utils.parallel_map(lambda x: x, + [[1], [2], [3]])) + + def test_parallel_map_four(self): + utils.parallel_map.NUM_WORKERS = 4 + self.assertEqual([1, 2, 3], + utils.parallel_map(lambda x: x, + [[1], [2], [3]])) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index df424666..c86b7889 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -22,6 +22,7 @@ import multiprocessing import os import uuid +from concurrent import futures import daiquiri import iso8601 import monotonic @@ -303,3 +304,19 @@ def get_driver_class(namespace, conf): """ return driver.DriverManager(namespace, conf.driver).driver + + +def parallel_map(fn, list_of_args): + """Run a function in parallel.""" + + if parallel_map.MAX_WORKERS == 1: + return list(itertools.starmap(fn, list_of_args)) + + with futures.ThreadPoolExecutor( + max_workers=parallel_map.MAX_WORKERS) as executor: + # We use 'list' to iterate all threads here to raise the first + # exception now, not much choice + return list(executor.map(lambda args: fn(*args), list_of_args)) + + +parallel_map.MAX_WORKERS = get_default_workers() diff --git a/releasenotes/notes/parallel_operations_replaces_aggregation_workers_numbers-cb3a8cf62211bd5b.yaml b/releasenotes/notes/parallel_operations_replaces_aggregation_workers_numbers-cb3a8cf62211bd5b.yaml new file mode 100644 index 00000000..b31838db --- /dev/null +++ b/releasenotes/notes/parallel_operations_replaces_aggregation_workers_numbers-cb3a8cf62211bd5b.yaml @@ -0,0 +1,7 @@ +--- +upgrade: + - | + The `storage.aggregation_workers_number` parameter has been replaced by a + more general `parallel_operations` option. It controls the number of + parallel jobs that can be run by a worker using threads in various code + paths. -- GitLab From 28d917c5963bfcbd24f628bc20acd7ebbbae1534 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 13 Oct 2017 00:12:39 +0200 Subject: [PATCH 1035/1483] aggregates: implements more operations This changes implements the following operations for the aggregates API: - rolling - resample - math (*/+-%...) - abosolute/negative It also re-add transformation tests related to metrics endpoint but with new aggregates API. Related #419 --- doc/source/rest.j2 | 143 ++++- doc/source/rest.yaml | 43 ++ gnocchi/rest/aggregates/api.py | 42 +- gnocchi/rest/aggregates/exceptions.py | 30 + gnocchi/rest/aggregates/operations.py | 203 ++++++- gnocchi/rest/aggregates/processor.py | 25 +- gnocchi/rest/api.py | 3 +- .../functional/gabbits/aggregates-fetch.yaml | 187 +++++- .../tests/functional/gabbits/aggregation.yaml | 9 + gnocchi/tests/functional/gabbits/metric.yaml | 61 ++ gnocchi/tests/test_aggregates.py | 530 ++++++++++++++++-- .../aggregates-API-d31db66e674cbf60.yaml | 8 + 12 files changed, 1180 insertions(+), 104 deletions(-) create mode 100644 gnocchi/rest/aggregates/exceptions.py create mode 100644 releasenotes/notes/aggregates-API-d31db66e674cbf60.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index ddeb6e84..a9034d8e 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -551,15 +551,134 @@ argument, and in this case the second argument passed is the value, or it. The operators or (`or` or `∨`), and (`and` or `∧`) and `not` are also supported, and take a list of arguments as parameters. +.. _aggregates: + +Aggregates: on the fly, measurements modification and aggregation +=================================================================== + +Gnocchi allows to do on-the-fly aggregation and modification of already +aggregated data of |metrics|. + +It can be done by providing the list of |metrics| to aggregate: + +{{ scenarios['get-aggregates-by-metric-ids']['doc'] }} + +This example computes the mean aggregates with `all` metrics listed in +`metrics` and then multiples it by `4`. + +.. note:: + + `operations` can also be passed as a string, for example: + `"operations": "(aggregate mean (metric (metric-id aggregation) (metric-id aggregation))"` + +Operations between metrics can also be done, such as: + +{{ scenarios['get-aggregates-between-metrics']['doc'] }} + +Aggregation across |metrics| have different behavior depending +on whether boundary values are set (`start` and `stop`) and if `needed_overlap` +is set. + +Gnocchi expects that time series have a certain percentage of timestamps in +common. This percent is controlled by the `needed_overlap` needed_overlap, +which by default expects 100% overlap. If this percentage is not reached, an +error is returned. + +.. note:: + + If `start` or `stop` boundary is not set, Gnocchi will set the missing + boundary to the first or last timestamp common across all series. + +The ability to fill in missing points from a subset of time series is supported +by specifying a `fill` value. Valid fill values include any float or `null`. In +the case of `null`, Gnocchi will compute the aggregation using only the +existing points. The `fill` parameter will not backfill timestamps which contain no +points in any of the time series. Only timestamps which have datapoints in at +least one of the time series is returned. + +.. note:: + + A |granularity| must be specified when using the `fill` parameter. + +{{ scenarios['get-aggregates-by-metric-ids-fill']['doc'] }} + + + +List of supported +------------------------------ + +getting one or more metrics:: + + (metric ) + (metric (( ), ( ), ...)) + + metric-id: the id of a metric to retrieve + aggregation: the aggregation method to retrieve + +rolling window aggregation:: + + (rolling ()) + + aggregation method: the aggregation method to use to compute the rolling window. + (mean, median, std, min, max, sum, var, count) + rolling window: number of previous values to aggregate + + +aggregation across metrics:: + + aggregate ((), (), ...)) + + aggregation method: the aggregation method to use to compute the aggregate between metrics + (mean, median, std, min, max, sum, var, count) + +resampling metrics:: + + (resample ()) + + aggregation method: the aggregation method to use to compute the aggregate between metrics + (mean, median, std, min, max, sum, var, count) + + granularity: the granularity (e.g.: 1d, 60s, ...) + +math operations:: + + ( ) + + operator: %, mod, +, add, -, sub, *, ×, mul, /, ÷, div, **, ^, pow + +boolean operations:: + + ( ) + + operator: =, ==, eq, <, lt, >, gt, <=, ≤, le, =, ≥, ge, !=, ≠, ne + +function operations:: + + (abs ()) + (absolute ()) + (neg ()) + (negative ()) + (cos ()) + (sin ()) + (tan ()) + (floor ()) + (ceil ()) + + .. _aggregation-across-metrics: -Aggregation across metrics -========================== +Aggregation across metrics (deprecated) +======================================= + +.. Note:: + + This API have been replaced by the more flexible :ref:`aggregates API ` + -Gnocchi allows to do on-the-fly aggregation of already aggregated data of +Gnocchi supports on-the-fly aggregation of previously aggregated data of |metrics|. -It can also be done by providing the list of |metrics| to aggregate: +It can be done by providing the list of |metrics| to aggregate: {{ scenarios['get-across-metrics-measures-by-metric-ids']['doc'] }} @@ -604,19 +723,19 @@ is set. Gnocchi expects that we have a certain percent of timestamps common between time series. This percent is controlled by needed_overlap, which by default -expects 100% overlap. If this percent is not reached, an error is returned. +expects 100% overlap. If this percentage is not reached, an error is returned. .. note:: - If a start and end boundary are not set, Gnocchi will set the missing + If `start` or `stop` boundary is not set, Gnocchi will set the missing boundary to the first or last timestamp common across all series. -The ability to fill in points missing from a subset of time series is supported -by specifying a `fill` value. Valid fill values include any valid float or -`null` which will compute aggregation with only the points that exist. The -`fill` parameter will not backfill timestamps which contain no points in any -of the time series. Only timestamps which have datapoints in at least one of -the time series is returned. +The ability to fill in missing points from a subset of time series is supported +by specifying a `fill` value. Valid fill values include any float or `null`. In +the case of `null`, Gnocchi will compute the aggregation using only the +existing points. The `fill` parameter will not backfill timestamps which contain no +points in any of the time series. Only timestamps which have datapoints in at +least one of the time series is returned. .. note:: diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 07d0ddd6..f7749b14 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -773,6 +773,49 @@ {"memory": {"archive_policy_name": "low"}} +- name: get-aggregates-by-metric-ids + request: | + POST /v1/aggregates/fetch?start=2014-10-06T14:34&stop=2017-10-06T14:34 HTTP/1.1 + Content-Type: application/json + + { + "operations": [ + "*", + ["aggregate", "mean", [ + "metric", + ["{{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }}", "mean"], + ["{{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['metrics']['cpu.util'] }}", "mean"] + ]], + 4 + ] + } + +- name: get-aggregates-between-metrics + request: | + POST /v1/aggregates/fetch?start=2014-10-06T14:34&stop=2017-10-06T14:34 HTTP/1.1 + Content-Type: application/json + + { + "operations": [ + "absolute", + [ + "**", + ["metric", "{{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }}", "mean"], + ["metric", "{{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['metrics']['cpu.util'] }}", "mean"] + ] + ] + } + + +- name: get-aggregates-by-metric-ids-fill + request: | + POST /v1/aggregates/fetch?fill=0&granularity=1 HTTP/1.1 + Content-Type: application/json + + { + "operations": "(* (aggregate mean (metric {{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }} mean)) 4)" + } + - name: get-capabilities request: GET /v1/capabilities HTTP/1.1 diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index a9b013ed..59696a93 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -20,6 +20,7 @@ import pyparsing import six import voluptuous +from gnocchi.rest.aggregates import exceptions from gnocchi.rest.aggregates import operations as agg_operations from gnocchi.rest.aggregates import processor from gnocchi.rest import api @@ -42,8 +43,19 @@ def MetricSchema(v): ["metric, ["metric-ref", "aggregation"], ["metric-ref", "aggregation"]] """ - if not isinstance(v, (list, tuple)) or len(v) <= 2 or v[0] != u"metric": - raise voluptuous.Invalid("'metric' is invalid") + if not isinstance(v, (list, tuple)): + raise voluptuous.Invalid("Expected a tuple/list, got a %s" % type(v)) + elif not v: + raise voluptuous.Invalid("Operation must not be empty") + elif len(v) < 2: + raise voluptuous.Invalid("Operation need at least one argument") + elif v[0] != u"metric": + # NOTE(sileht): this error message doesn't looks related to "metric", + # but because that the last schema validated by voluptuous, we have + # good chance (voluptuous.Any is not predictable) to print this + # message even if it's an other operation that invalid. + raise voluptuous.Invalid("'%s' operation invalid" % v[0]) + return [u"metric"] + voluptuous.Schema(voluptuous.Any( voluptuous.ExactSequence([six.text_type, six.text_type]), voluptuous.All( @@ -54,11 +66,35 @@ def MetricSchema(v): OperationsSchemaBase = [ MetricSchema, + voluptuous.ExactSequence( + [voluptuous.Any(*list( + agg_operations.binary_operators.keys())), + _OperationsSubNodeSchema, _OperationsSubNodeSchema] + ), + voluptuous.ExactSequence( + [voluptuous.Any(*list( + agg_operations.unary_operators.keys())), + _OperationsSubNodeSchema] + ), voluptuous.ExactSequence( [u"aggregate", voluptuous.Any(*list(agg_operations.AGG_MAP.keys())), _OperationsSubNodeSchema] ), + voluptuous.ExactSequence( + [u"resample", + voluptuous.Any(*list(agg_operations.AGG_MAP.keys())), + utils.to_timespan, _OperationsSubNodeSchema] + ), + voluptuous.ExactSequence( + [u"rolling", + voluptuous.Any(*list(agg_operations.AGG_MAP.keys())), + voluptuous.All( + voluptuous.Coerce(int), + voluptuous.Range(min=1), + ), + _OperationsSubNodeSchema] + ) ] @@ -106,7 +142,7 @@ def get_measures_or_abort(metrics_and_aggregations, operations, start, start, stop, granularity, needed_overlap, fill, ref_identifier=ref_identifier) - except processor.UnAggregableTimeseries as e: + except exceptions.UnAggregableTimeseries as e: api.abort(400, e) # TODO(sileht): We currently got only one metric for these exceptions but # we can improve processor to returns all missing metrics at once, so we diff --git a/gnocchi/rest/aggregates/exceptions.py b/gnocchi/rest/aggregates/exceptions.py new file mode 100644 index 00000000..00387d7f --- /dev/null +++ b/gnocchi/rest/aggregates/exceptions.py @@ -0,0 +1,30 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2016-2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class UnAggregableTimeseries(Exception): + """Error raised when timeseries cannot be aggregated.""" + def __init__(self, references, reason): + self.references = references + self.reason = reason + super(UnAggregableTimeseries, self).__init__(reason) + + def jsonify(self): + return { + "cause": "Metrics can't being aggregated", + "reason": self.reason, + "detail": self.references + } diff --git a/gnocchi/rest/aggregates/operations.py b/gnocchi/rest/aggregates/operations.py index 5981a698..242235d5 100644 --- a/gnocchi/rest/aggregates/operations.py +++ b/gnocchi/rest/aggregates/operations.py @@ -14,9 +14,13 @@ # License for the specific language governing permissions and limitations # under the License. +import numbers + import numpy +from numpy.lib.stride_tricks import as_strided from gnocchi import carbonara +from gnocchi.rest.aggregates import exceptions AGG_MAP = { @@ -32,7 +36,113 @@ AGG_MAP = { } -def handle_aggregate(agg, granularity, timestamps, values, is_aggregated): +# TODO(sileht): expose all operators in capability API +binary_operators = { + u"=": numpy.equal, + u"==": numpy.equal, + u"eq": numpy.equal, + + u"<": numpy.less, + u"lt": numpy.less, + + u">": numpy.greater, + u"gt": numpy.greater, + + u"<=": numpy.less_equal, + u"≤": numpy.less_equal, + u"le": numpy.less_equal, + + u">=": numpy.greater_equal, + u"≥": numpy.greater_equal, + u"ge": numpy.greater_equal, + + u"!=": numpy.not_equal, + u"≠": numpy.not_equal, + u"ne": numpy.not_equal, + + u"%": numpy.mod, + u"mod": numpy.mod, + + u"+": numpy.add, + u"add": numpy.add, + + u"-": numpy.subtract, + u"sub": numpy.subtract, + + u"*": numpy.multiply, + u"×": numpy.multiply, + u"mul": numpy.multiply, + + u"/": numpy.true_divide, + u"÷": numpy.true_divide, + u"div": numpy.true_divide, + + u"**": numpy.power, + u"^": numpy.power, + u"pow": numpy.power, + +} + +# TODO(sileht): adds, numpy.around, but it take a decimal argument to handle +unary_operators = { + u"abs": numpy.absolute, + u"absolute": numpy.absolute, + + u"neg": numpy.negative, + u"negative": numpy.negative, + + u"cos": numpy.cos, + u"sin": numpy.sin, + u"tan": numpy.tan, + u"floor": numpy.floor, + u"ceil": numpy.ceil, +} + + +def handle_unary_operator(nodes, granularity, timestamps, initial_values, + is_aggregated, references): + op = nodes[0] + granularity, timestamps, values, is_aggregated = evaluate( + nodes[1], granularity, timestamps, initial_values, + is_aggregated, references) + + values = unary_operators[op](values) + return granularity, timestamps, values, is_aggregated + + +def handle_binary_operator(nodes, granularity, timestamps, + initial_values, is_aggregated, references): + op = nodes[0] + g1, t1, v1, is_a1 = evaluate(nodes[1], granularity, timestamps, + initial_values, is_aggregated, references) + g2, t2, v2, is_a2 = evaluate(nodes[2], granularity, timestamps, + initial_values, is_aggregated, references) + + is_aggregated = is_a1 or is_a2 + # We keep the computed timeseries + if isinstance(v1, numpy.ndarray) and isinstance(v2, numpy.ndarray): + if not numpy.array_equal(t1, t2) or g1 != g2: + raise exceptions.UnAggregableTimeseries( + references, + "Can't compute timeseries with different " + "granularity %s <> %s" % (nodes[1], nodes[2])) + timestamps = t1 + granularity = g1 + is_aggregated = True + + elif isinstance(v2, numpy.ndarray): + timestamps = t2 + granularity = g2 + else: + timestamps = t1 + granularity = g1 + + values = binary_operators[op](v1, v2) + return granularity, timestamps, values, is_aggregated + + +def handle_aggregate(agg, granularity, timestamps, values, is_aggregated, + references): values = numpy.array([AGG_MAP[agg](values, axis=1)]).T if values.shape[1] != 1: raise RuntimeError("Unexpected resulting aggregated array shape: %s" % @@ -40,31 +150,114 @@ def handle_aggregate(agg, granularity, timestamps, values, is_aggregated): return (granularity, timestamps, values, True) +def handle_rolling(agg, granularity, timestamps, values, is_aggregated, + references, window): + if window > len(values): + raise exceptions.UnAggregableTimeseries( + references, + "Rolling window '%d' is greater than serie length '%d'" % + (window, len(values)) + ) + + # TODO(sileht): make a more optimised version that + # compute the data across the whole matrix + new_values = None + timestamps = timestamps[window - 1:] + for ts in values.T: + # arogozhnikov.github.io/2015/09/30/NumpyTipsAndTricks2.html + stride = ts.strides[0] + ts = AGG_MAP[agg](as_strided( + ts, shape=[len(ts) - window + 1, window], + strides=[stride, stride]), axis=1) + if new_values is None: + new_values = numpy.array([ts]) + else: + new_values = numpy.append(new_values, [ts], axis=0) + return granularity, timestamps, new_values.T, is_aggregated + + +def handle_resample(agg, granularity, timestamps, values, is_aggregated, + references, sampling): + # TODO(sileht): make a more optimised version that + # compute the data across the whole matrix + new_values = None + result_timestamps = timestamps + for ts in values.T: + ts = carbonara.AggregatedTimeSerie.from_data(None, agg, timestamps, ts) + ts = ts.resample(sampling) + result_timestamps = ts["timestamps"] + if new_values is None: + new_values = numpy.array([ts["values"]]) + else: + new_values = numpy.append(new_values, [ts["values"]], axis=0) + return sampling, result_timestamps, new_values.T, is_aggregated + + def handle_aggregation_operator(nodes, granularity, timestamps, initial_values, is_aggregated, references): op = aggregation_operators[nodes[0]] agg = nodes[1] subnodes = nodes[-1] args = nodes[2:-1] - if agg not in AGG_MAP: - raise carbonara.UnknownAggregationMethod(agg) granularity, timestamps, values, is_aggregated = evaluate( subnodes, granularity, timestamps, initial_values, is_aggregated, references) - return op(agg, granularity, timestamps, values, is_aggregated, *args) + return op(agg, granularity, timestamps, values, is_aggregated, + references, *args) aggregation_operators = { u"aggregate": handle_aggregate, + u"rolling": handle_rolling, + u"resample": handle_resample, } +def sanity_check(method): + # NOTE(sileht): This is important checks, because caller may use zip and + # build an incomplete timeseries without we notice the result is + # unexpected. + + def inner(*args, **kwargs): + granularity, timestamps, values, is_aggregated = method( + *args, **kwargs) + + t_len = len(timestamps) + if t_len > 2 and not ((timestamps[1] - timestamps[0]) / + granularity).is_integer(): + # NOTE(sileht): numpy.mod is not possible with timedelta64, + # we don't really care about the remainder value, instead we just + # check we don't have remainder, by using floor_divide and checking + # the result is an integer. + raise RuntimeError("timestamps and granularity doesn't match: " + "%s vs %s" % (timestamps[1] - timestamps[0], + granularity)) + + elif isinstance(values, numpy.ndarray) and t_len != len(values): + raise RuntimeError("timestamps and values length are different: " + "%s vs %s" % (t_len, len(values))) + + return granularity, timestamps, values, is_aggregated + return inner + + +@sanity_check def evaluate(nodes, granularity, timestamps, initial_values, is_aggregated, references): - if nodes[0] in aggregation_operators: + if isinstance(nodes, numbers.Number): + return granularity, timestamps, nodes, is_aggregated + elif nodes[0] in aggregation_operators: return handle_aggregation_operator(nodes, granularity, timestamps, initial_values, is_aggregated, references) + elif nodes[0] in binary_operators: + return handle_binary_operator(nodes, granularity, timestamps, + initial_values, is_aggregated, + references) + elif nodes[0] in unary_operators: + return handle_unary_operator(nodes, granularity, timestamps, + initial_values, is_aggregated, + references) elif nodes[0] == "metric": if isinstance(nodes[1], list): predicat = lambda r: r in nodes[1:] diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index f7116bcf..aaa522c3 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -22,6 +22,7 @@ import numpy import six from gnocchi import carbonara +from gnocchi.rest.aggregates import exceptions from gnocchi.rest.aggregates import operations as agg_operations from gnocchi import storage as gnocchi_storage from gnocchi import utils @@ -30,21 +31,6 @@ from gnocchi import utils LOG = daiquiri.getLogger(__name__) -class UnAggregableTimeseries(Exception): - """Error raised when timeseries cannot be aggregated.""" - def __init__(self, references, reason): - self.references = references - self.reason = reason - super(UnAggregableTimeseries, self).__init__(reason) - - def jsonify(self): - return { - "cause": "Metrics can't being aggregated", - "reason": self.reason, - "detail": self.references - } - - def _get_measures_timeserie(storage, metric, aggregation, ref_identifier, *args, **kwargs): return ([str(getattr(metric, ref_identifier)), aggregation], @@ -82,7 +68,7 @@ def get_measures(storage, metrics_and_aggregations, (getattr(metric, ref_identifier), aggregation)) if references_with_missing_granularity: - raise UnAggregableTimeseries( + raise exceptions.UnAggregableTimeseries( references_with_missing_granularity, "granularity '%d' is missing" % utils.timespan_total_seconds(granularity)) @@ -101,7 +87,7 @@ def get_measures(storage, metrics_and_aggregations, ] if not granularities_in_common: - raise UnAggregableTimeseries( + raise exceptions.UnAggregableTimeseries( list((str(getattr(m, ref_identifier)), a) for (m, a) in metrics_and_aggregations), 'No granularity match') @@ -158,7 +144,8 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, overlap = numpy.flatnonzero(~numpy.any(numpy.isnan(values), axis=1)) if overlap.size == 0 and needed_percent_of_overlap > 0: - raise UnAggregableTimeseries(references[key], 'No overlap') + raise exceptions.UnAggregableTimeseries(references[key], + 'No overlap') # if no boundary set, use first/last timestamp which overlap if to_timestamp is None and overlap.size: times = times[:overlap[-1] + 1] @@ -168,7 +155,7 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, values = values[overlap[0]:] percent_of_overlap = overlap.size * 100.0 / times.size if percent_of_overlap < needed_percent_of_overlap: - raise UnAggregableTimeseries( + raise exceptions.UnAggregableTimeseries( references[key], 'Less than %f%% of datapoints overlap in this ' 'timespan (%.2f%%)' % (needed_percent_of_overlap, diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 1f2c13df..1daa1ba0 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -37,6 +37,7 @@ from gnocchi import incoming from gnocchi import indexer from gnocchi import json from gnocchi import resource_type +from gnocchi.rest.aggregates import exceptions from gnocchi.rest.aggregates import processor from gnocchi import storage from gnocchi import utils @@ -1790,7 +1791,7 @@ class AggregationController(rest.RestController): operations, start, stop, granularity, needed_overlap, fill, resample)["aggregated"] - except processor.UnAggregableTimeseries as e: + except exceptions.UnAggregableTimeseries as e: abort(400, e) except (storage.MetricDoesNotExist, storage.GranularityDoesNotExist, diff --git a/gnocchi/tests/functional/gabbits/aggregates-fetch.yaml b/gnocchi/tests/functional/gabbits/aggregates-fetch.yaml index 0285f3bf..815b1f58 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-fetch.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-fetch.yaml @@ -170,6 +170,112 @@ tests: - ["2015-03-06T14:34:00+00:00", 60.0, 4.0] - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + - name: get aggregates simple with array + POST: /v1/aggregates/fetch + data: + operations: ["+", ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]], 2.0] + response_json_paths: + $.`len`: 2 + $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:33:00+00:00", 60.0, 45.1] + - ["2015-03-06T14:34:00+00:00", 60.0, 0.0] + - ["2015-03-06T14:35:00+00:00", 60.0, 12.0] + - ["2015-03-06T14:33:57+00:00", 1.0, 45.1] + - ["2015-03-06T14:34:12+00:00", 1.0, 14.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -14.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 11.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 13.0] + $."$HISTORY['create metric2'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:33:00+00:00", 60.0, 4.0] + - ["2015-03-06T14:34:00+00:00", 60.0, 6.5] + - ["2015-03-06T14:35:00+00:00", 60.0, 14.5] + - ["2015-03-06T14:33:57+00:00", 1.0, 4.0] + - ["2015-03-06T14:34:12+00:00", 1.0, 6.0] + - ["2015-03-06T14:34:15+00:00", 1.0, 7.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 12.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 17.0] + + - name: get aggregates resample + POST: /v1/aggregates/fetch?granularity=1 + data: + operations: + - resample + - mean + - 60 + - ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] + response_json_paths: + $.`len`: 2 + $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] + - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] + - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + $."$HISTORY['create metric2'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:33:00+00:00", 60.0, 2.0] + - ["2015-03-06T14:34:00+00:00", 60.0, 4.5] + - ["2015-03-06T14:35:00+00:00", 60.0, 12.5] + + - name: get aggregates rolling + POST: /v1/aggregates/fetch?granularity=1 + data: + operations: + - rolling + - mean + - 2 + - ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] + response_json_paths: + $.`len`: 2 + $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:34:12+00:00", 1.0, 27.55] + - ["2015-03-06T14:34:15+00:00", 1.0, -2.0] + - ["2015-03-06T14:35:12+00:00", 1.0, -3.5] + - ["2015-03-06T14:35:15+00:00", 1.0, 10.0] + $."$HISTORY['create metric2'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:34:12+00:00", 1.0, 3.0] + - ["2015-03-06T14:34:15+00:00", 1.0, 4.5] + - ["2015-03-06T14:35:12+00:00", 1.0, 7.5] + - ["2015-03-06T14:35:15+00:00", 1.0, 12.5] + + - name: get aggregates math with string + POST: /v1/aggregates/fetch + data: + operations: "(+ (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)) 2.0)" + response_json_paths: + $.`len`: 2 + $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:33:00+00:00", 60.0, 45.1] + - ["2015-03-06T14:34:00+00:00", 60.0, 0.0] + - ["2015-03-06T14:35:00+00:00", 60.0, 12.0] + - ["2015-03-06T14:33:57+00:00", 1.0, 45.1] + - ["2015-03-06T14:34:12+00:00", 1.0, 14.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -14.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 11.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 13.0] + $."$HISTORY['create metric2'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:33:00+00:00", 60.0, 4.0] + - ["2015-03-06T14:34:00+00:00", 60.0, 6.5] + - ["2015-03-06T14:35:00+00:00", 60.0, 14.5] + - ["2015-03-06T14:33:57+00:00", 1.0, 4.0] + - ["2015-03-06T14:34:12+00:00", 1.0, 6.0] + - ["2015-03-06T14:34:15+00:00", 1.0, 7.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 12.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 17.0] + + - name: get aggregates substact + POST: /v1/aggregates/fetch + data: + operations: "(- (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean) (metric $HISTORY['create metric2'].$RESPONSE['$.id'] mean)))" + response_json_paths: + $.`len`: 1 + $."aggregated": + - ["2015-03-06T14:33:00+00:00", 60.0, 41.1] + - ["2015-03-06T14:34:00+00:00", 60.0, -6.5] + - ["2015-03-06T14:35:00+00:00", 60.0, -2.5] + - ["2015-03-06T14:33:57+00:00", 1.0, 41.1] + - ["2015-03-06T14:34:12+00:00", 1.0, 8.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -21.0] + - ["2015-03-06T14:35:12+00:00", 1.0, -1.0] + - ["2015-03-06T14:35:15+00:00", 1.0, -4.0] + - name: get aggregates mean aggregate POST: /v1/aggregates/fetch data: @@ -186,6 +292,22 @@ tests: - ["2015-03-06T14:35:12+00:00", 1.0, 9.5] - ["2015-03-06T14:35:15+00:00", 1.0, 13.0] + - name: get aggregates negative absolute + POST: /v1/aggregates/fetch + data: + operations: "(negative (absolute (aggregate mean (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)))))" + response_json_paths: + $.`len`: 1 + $."aggregated": + - ["2015-03-06T14:33:00+00:00", 60.0, -22.55] + - ["2015-03-06T14:34:00+00:00", 60.0, -1.25] + - ["2015-03-06T14:35:00+00:00", 60.0, -11.25] + - ["2015-03-06T14:33:57+00:00", 1.0, -22.55] + - ["2015-03-06T14:34:12+00:00", 1.0, -8.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -5.5] + - ["2015-03-06T14:35:12+00:00", 1.0, -9.5] + - ["2015-03-06T14:35:15+00:00", 1.0, -13.0] + # Negative tests - name: get no operations @@ -198,7 +320,20 @@ tests: operations: [] status: 400 response_strings: - - "'metric' is invalid" + - "Invalid input: Operation must not be empty" + + - name: get operations without list + POST: /v1/aggregates/fetch + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + operations: + foo: bar + status: 400 + response_strings: + - "Invalid input: Expected a tuple/list, got a " - name: invalid operations string POST: /v1/aggregates/fetch @@ -225,7 +360,7 @@ tests: operations: ["metric"] status: 400 response_strings: - - "'metric' is invalid" + - "Invalid input: Operation need at least one argument for dictionary value" - name: get unknown metrics POST: /v1/aggregates/fetch @@ -395,3 +530,51 @@ tests: $.description.cause: "Argument value error" $.description.detail: "fill" $.description.reason: "Must be a float or 'null', got 'foobar'" + + - name: get rolling bad aggregate + POST: /v1/aggregates/fetch + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + operations: "(rolling blah 2 (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean))" + status: 400 + response_strings: + - "Invalid input: 'rolling' operation invalid for dictionary value" + + - name: get rolling-mean missing window + POST: /v1/aggregates/fetch + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + operations: "(rolling mean (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean))" + status: 400 + response_strings: + - "Invalid input: 'rolling' operation invalid for dictionary value" + + - name: get measurements from metric and invalid operations + POST: /v1/aggregates/fetch + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + operations: "(notexist (absolute (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean)))" + status: 400 + response_strings: + - "Invalid input: 'notexist' operation invalid for dictionary value" + + - name: invalid resample + POST: /v1/aggregates/fetch + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + operations: "(resample mean invalid (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)))" + status: 400 + response_strings: + - "Invalid input: 'resample' operation invalid for dictionary value" diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index 84cc7dfa..0d626a94 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -131,6 +131,15 @@ tests: - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] + - name: get measure aggregates and operations + POST: /v1/aggregates/fetch?granularity=1 + data: + operations: "(aggregate mean (resample mean 60 (metric ($HISTORY['get metric list'].$RESPONSE['$[0].id'] mean) ($HISTORY['get metric list'].$RESPONSE['$[1].id'] mean))))" + response_json_paths: + $.aggregated: + - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] + - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] + - name: get measure aggregates with fill zero GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&fill=0 response_json_paths: diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index a5329540..1fe0a894 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -213,6 +213,16 @@ tests: - ["2015-03-06T14:34:00+00:00", 60.0, 14.0] - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + - name: get measurements from metric and resample and negative + POST: /v1/aggregates/fetch?granularity=1 + data: + operations: "(negative (resample mean 60 (metric $HISTORY['list valid metrics'].$RESPONSE['$[0].id'] mean)))" + response_json_paths: + $."$HISTORY['list valid metrics'].$RESPONSE['$[0].id']_mean": + - ["2015-03-06T14:33:00+00:00", 60.0, -43.1] + - ["2015-03-06T14:34:00+00:00", 60.0, -14.0] + - ["2015-03-06T14:35:00+00:00", 60.0, -10.0] + - name: push negative measurements to metric again POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures data: @@ -222,6 +232,57 @@ tests: value: -23 status: 202 + - name: refresh metric + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true + + - name: get absolute measurements from metric + POST: /v1/aggregates/fetch + data: + operations: "(absolute (metric $HISTORY['list valid metrics'].$RESPONSE['$[0].id'] mean))" + response_json_paths: + $."$HISTORY['list valid metrics'].$RESPONSE['$[0].id']_mean": + - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - ["2015-03-06T14:34:15+00:00", 1.0, 16.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] + - ["2015-03-06T14:36:15+00:00", 1.0, 16.0] + - ["2015-03-06T14:37:15+00:00", 1.0, 23.0] + + - name: rolling-mean + POST: /v1/aggregates/fetch + data: + operations: "(rolling mean 2 (metric $HISTORY['list valid metrics'].$RESPONSE['$[0].id'] mean))" + status: 200 + response_json_paths: + $."$HISTORY['list valid metrics'].$RESPONSE['$[0].id']_mean": + - ["2015-03-06T14:34:12+00:00", 1.0, 27.55] + - ["2015-03-06T14:34:15+00:00", 1.0, 14.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 12.5] + - ["2015-03-06T14:35:15+00:00", 1.0, 10.0] + - ["2015-03-06T14:36:15+00:00", 1.0, -2.5] + - ["2015-03-06T14:37:15+00:00", 1.0, -19.5] + + - name: get measurements from metric and two operations + POST: /v1/aggregates/fetch + data: + operations: "(negative (absolute (metric $HISTORY['list valid metrics'].$RESPONSE['$[0].id'] mean)))" + response_json_paths: + $."$HISTORY['list valid metrics'].$RESPONSE['$[0].id']_mean": + - ["2015-03-06T14:33:57+00:00", 1.0, -43.1] + - ["2015-03-06T14:34:12+00:00", 1.0, -12.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -16.0] + - ["2015-03-06T14:35:12+00:00", 1.0, -9.0] + - ["2015-03-06T14:35:15+00:00", 1.0, -11.0] + - ["2015-03-06T14:36:15+00:00", 1.0, -16.0] + - ["2015-03-06T14:37:15+00:00", 1.0, -23.0] + + - name: get measurements from metric and invalid operations + POST: /v1/aggregates/fetch + data: + operations: "(notexist (absolute (metric $HISTORY['list valid metrics'].$RESPONSE['$[0].id'] mean)))" + status: 400 + - name: get measurements from metric and resample no granularity GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=60 status: 400 diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 9243b5d0..8c1c274d 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -22,11 +22,20 @@ import numpy from gnocchi import carbonara from gnocchi import incoming from gnocchi import indexer +from gnocchi.rest.aggregates import exceptions from gnocchi.rest.aggregates import processor from gnocchi import storage from gnocchi.tests import base +class EqNan(object): + def __eq__(self, other): + return numpy.isnan(other) + + +eq_nan = EqNan() + + def datetime64(*args): return numpy.datetime64(datetime.datetime(*args)) @@ -38,7 +47,7 @@ class TestAggregatedTimeseries(base.BaseTestCase): grouped = ts.group_serie(agg_dict['sampling']) existing = agg_dict.get('return') agg_dict['return'] = ( - ["foo", 'mean'], + [agg_dict.get("name", "all"), 'mean'], carbonara.AggregatedTimeSerie.from_grouped_serie( grouped, agg_dict['sampling'], agg_dict['agg'], max_size=agg_dict.get('size'), truncate=True)) @@ -64,12 +73,12 @@ class TestAggregatedTimeseries(base.BaseTestCase): self._resample_and_merge, agg_dict=tsc2)) dtfrom = datetime64(2014, 1, 1, 11, 0, 0) - self.assertRaises(processor.UnAggregableTimeseries, + self.assertRaises(exceptions.UnAggregableTimeseries, processor.aggregated, [tsc1['return'], tsc2['return']], from_timestamp=dtfrom, operations=["aggregate", "mean", [ - "metric", ["foo", "mean"]]]) + "metric", ["all", "mean"]]]) def test_aggregated_different_archive_no_overlap2(self): tsc1 = {'sampling': numpy.timedelta64(60, 's'), @@ -84,13 +93,11 @@ class TestAggregatedTimeseries(base.BaseTestCase): dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) - self.assertRaises(processor.UnAggregableTimeseries, + self.assertRaises(exceptions.UnAggregableTimeseries, processor.aggregated, - [tsc1['return'], (("bar", "mean"), tsc2)], + [tsc1['return'], (("all", "mean"), tsc2)], operations=["aggregate", "mean", - ["metric", - ["foo", "mean"], - ["bar", "mean"]]]) + ["metric", "all", "mean"]]) def test_aggregated_different_archive_overlap(self): tsc1 = {'sampling': numpy.timedelta64(60, 's'), @@ -136,7 +143,7 @@ class TestAggregatedTimeseries(base.BaseTestCase): # By default we require 100% of point that overlap # so that fail - self.assertRaises(processor.UnAggregableTimeseries, + self.assertRaises(exceptions.UnAggregableTimeseries, processor.aggregated, [tsc1['return'], tsc2['return']], from_timestamp=dtfrom, @@ -369,10 +376,10 @@ class TestAggregatedTimeseries(base.BaseTestCase): def test_aggregated_some_overlap_with_fill_zero(self): tsc1 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} + 'size': 10, 'agg': 'mean', "name": "foo"} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) tsc2 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} + 'size': 10, 'agg': 'mean', "name": "bar"} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values(numpy.array([ @@ -425,12 +432,39 @@ class TestAggregatedTimeseries(base.BaseTestCase): numpy.timedelta64(60000000000, 'ns'), 1.5), ], list(output)) + output = processor.aggregated([ + tsc1['return'], tsc2['return']], + operations=["-", ["metric"] + tsc1['return'][0], + ["metric"] + tsc2['return'][0] + ], fill=0)["aggregated"] + + self.assertEqual([ + (datetime64(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(60000000000, 'ns'), -6.0), + (datetime64(2014, 1, 1, 12, 1, 0), + numpy.timedelta64(60000000000, 'ns'), -2.0), + (datetime64(2014, 1, 1, 12, 2, 0), + numpy.timedelta64(60000000000, 'ns'), -13), + (datetime64(2014, 1, 1, 12, 3, 0), + numpy.timedelta64(60000000000, 'ns'), -15), + (datetime64(2014, 1, 1, 12, 4, 0), + numpy.timedelta64(60000000000, 'ns'), -3), + (datetime64(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(60000000000, 'ns'), -14), + (datetime64(2014, 1, 1, 12, 6, 0), + numpy.timedelta64(60000000000, 'ns'), -5), + (datetime64(2014, 1, 1, 12, 7, 0), + numpy.timedelta64(60000000000, 'ns'), 5), + (datetime64(2014, 1, 1, 12, 8, 0), + numpy.timedelta64(60000000000, 'ns'), 3), + ], list(output)) + def test_aggregated_some_overlap_with_fill_null(self): tsc1 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} + 'size': 10, 'agg': 'mean', 'name': 'foo'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) tsc2 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} + 'size': 10, 'agg': 'mean', 'name': 'bar'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values(numpy.array([ @@ -483,12 +517,39 @@ class TestAggregatedTimeseries(base.BaseTestCase): numpy.timedelta64(60000000000, 'ns'), 3.0), ], list(output)) + output = processor.aggregated([ + tsc1['return'], tsc2['return']], + operations=["-", ["metric"] + tsc1['return'][0], + ["metric"] + tsc2['return'][0] + ], fill='null')["aggregated"] + + self.assertEqual([ + (datetime64(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(60000000000, 'ns'), eq_nan), + (datetime64(2014, 1, 1, 12, 1, 0), + numpy.timedelta64(60000000000, 'ns'), eq_nan), + (datetime64(2014, 1, 1, 12, 2, 0), + numpy.timedelta64(60000000000, 'ns'), eq_nan), + (datetime64(2014, 1, 1, 12, 3, 0), + numpy.timedelta64(60000000000, 'ns'), -15.0), + (datetime64(2014, 1, 1, 12, 4, 0), + numpy.timedelta64(60000000000, 'ns'), -3.0), + (datetime64(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(60000000000, 'ns'), -14.0), + (datetime64(2014, 1, 1, 12, 6, 0), + numpy.timedelta64(60000000000, 'ns'), -5.0), + (datetime64(2014, 1, 1, 12, 7, 0), + numpy.timedelta64(60000000000, 'ns'), eq_nan), + (datetime64(2014, 1, 1, 12, 8, 0), + numpy.timedelta64(60000000000, 'ns'), eq_nan), + ], list(output)) + def test_aggregate_no_points_with_fill_zero(self): tsc1 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} + 'size': 10, 'agg': 'mean', 'name': 'foo'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) tsc2 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} + 'size': 10, 'agg': 'mean', 'name': 'bar'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values(numpy.array([ @@ -533,57 +594,49 @@ class TestAggregatedTimeseries(base.BaseTestCase): numpy.timedelta64(60000000000, 'ns'), 1.5), ], list(output)) + output = processor.aggregated([ + tsc1['return'], tsc2['return']], + operations=["-", ["metric"] + tsc1['return'][0], + ["metric"] + tsc2['return'][0] + ], fill=0)["aggregated"] + + self.assertEqual([ + (datetime64(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(60000000000, 'ns'), -6.0), + (datetime64(2014, 1, 1, 12, 1, 0), + numpy.timedelta64(60000000000, 'ns'), -2.0), + (datetime64(2014, 1, 1, 12, 2, 0), + numpy.timedelta64(60000000000, 'ns'), -13), + (datetime64(2014, 1, 1, 12, 3, 0), + numpy.timedelta64(60000000000, 'ns'), -15), + (datetime64(2014, 1, 1, 12, 4, 0), + numpy.timedelta64(60000000000, 'ns'), -3), + (datetime64(2014, 1, 1, 12, 7, 0), + numpy.timedelta64(60000000000, 'ns'), 5), + (datetime64(2014, 1, 1, 12, 8, 0), + numpy.timedelta64(60000000000, 'ns'), 3), + ], list(output)) + def test_aggregated_nominal(self): tsc1 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} + 'size': 10, 'agg': 'mean', 'name': '1'} tsc12 = {'sampling': numpy.timedelta64(300, 's'), - 'size': 6, 'agg': 'mean'} + 'size': 6, 'agg': 'mean', 'name': '12'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc12['sampling']) tsc2 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean'} + 'size': 10, 'agg': 'mean', 'name': '2'} tsc22 = {'sampling': numpy.timedelta64(300, 's'), - 'size': 6, 'agg': 'mean'} + 'size': 6, 'agg': 'mean', 'name': '22'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc22['sampling']) def ts1_update(ts): - grouped = ts.group_serie(tsc1['sampling']) - existing = tsc1.get('return') - tsc1['return'] = ( - ['foobar', 'mean'], - carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, tsc1['sampling'], tsc1['agg'], - max_size=tsc1['size'], truncate=True)) - if existing: - existing[1].merge(tsc1['return'][1]) - grouped = ts.group_serie(tsc12['sampling']) - existing = tsc12.get('return') - tsc12['return'] = ( - ['foobar', 'mean'], - carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, tsc12['sampling'], tsc12['agg'], - max_size=tsc12['size'], truncate=True)) - if existing: - existing[1].merge(tsc12['return'][1]) + self._resample_and_merge(ts, tsc1) + self._resample_and_merge(ts, tsc12) def ts2_update(ts): - grouped = ts.group_serie(tsc2['sampling']) - existing = tsc2.get('return') - tsc2['return'] = ( - ['foobar', 'mean'], - carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, tsc2['sampling'], tsc2['agg'], - max_size=tsc2['size'], truncate=True)) - if existing: - existing[1].merge(tsc2['return'][1]) - grouped = ts.group_serie(tsc22['sampling']) - existing = tsc22.get('return') - tsc22['return'] = ( - ['foobar', 'mean'], - carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, tsc22['sampling'], tsc22['agg'], - max_size=tsc22['size'], truncate=True)) - if existing: - existing[1].merge(tsc22['return'][1]) + self._resample_and_merge(ts, tsc2) + self._resample_and_merge(ts, tsc22) + tsb1.set_values(numpy.array([ (datetime64(2014, 1, 1, 11, 46, 4), 4), (datetime64(2014, 1, 1, 11, 47, 34), 8), @@ -665,10 +718,10 @@ class TestAggregatedTimeseries(base.BaseTestCase): def test_aggregated_partial_overlap(self): tsc1 = {'sampling': numpy.timedelta64(1, 's'), - 'size': 86400, 'agg': 'mean'} + 'size': 86400, 'agg': 'mean', 'name': 'foo'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) tsc2 = {'sampling': numpy.timedelta64(1, 's'), - 'size': 60, 'agg': 'mean'} + 'size': 60, 'agg': 'mean', 'name': 'bar'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values(numpy.array([ @@ -751,14 +804,14 @@ class TestAggregatedTimeseries(base.BaseTestCase): # By default we require 100% of point that overlap # so that fail if from or to is set - self.assertRaises(processor.UnAggregableTimeseries, + self.assertRaises(exceptions.UnAggregableTimeseries, processor.aggregated, [tsc1['return'], tsc2['return']], to_timestamp=dtto, operations=["aggregate", "sum", [ "metric", tsc1['return'][0], tsc2['return'][0] ]]) - self.assertRaises(processor.UnAggregableTimeseries, + self.assertRaises(exceptions.UnAggregableTimeseries, processor.aggregated, [tsc1['return'], tsc2['return']], from_timestamp=dtfrom, @@ -815,7 +868,7 @@ class CrossMetricAggregated(base.TestCase): def test_get_measures_empty_metrics_no_overlap(self): self.assertRaises( - processor.UnAggregableTimeseries, + exceptions.UnAggregableTimeseries, processor.get_measures, self.storage, [(indexer.Metric(uuid.uuid4(), self.archive_policies['low']), 'mean'), @@ -865,7 +918,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) - self.assertRaises(processor.UnAggregableTimeseries, + self.assertRaises(exceptions.UnAggregableTimeseries, processor.get_measures, self.storage, [(self.metric, "mean"), (metric2, "mean")], @@ -892,7 +945,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) - self.assertRaises(processor.UnAggregableTimeseries, + self.assertRaises(exceptions.UnAggregableTimeseries, processor.get_measures, self.storage, [(self.metric, "mean"), (metric2, "mean")], @@ -1082,3 +1135,356 @@ class CrossMetricAggregated(base.TestCase): (datetime64(2014, 1, 1, 12, 10, 0), numpy.timedelta64(5, 'm'), 22.0) ], values) + + def test_resample(self): + metric2, __ = self._create_metric() + self.incoming.add_measures(self.metric, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), + ]) + self.incoming.add_measures(metric2, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), 2), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), 4), + ]) + self.trigger_processing([str(self.metric.id), str(metric2.id)]) + + values = processor.get_measures( + self.storage, [(self.metric, "mean"), (metric2, "mean")], + ["resample", "mean", numpy.timedelta64(1, 'D'), + ["metric", + [str(self.metric.id), "mean"], + [str(metric2.id), "mean"]]], + granularity=numpy.timedelta64(1, 'h')) + + self.assertEqual({ + "%s_%s" % (self.metric.id, "mean"): [ + (datetime64(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 39.75)], + "%s_%s" % (metric2.id, "mean"): [ + (datetime64(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 4.75)], + }, values) + + def test_resample_minus_2_on_right(self): + metric2, __ = self._create_metric() + self.incoming.add_measures(self.metric, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), + ]) + self.incoming.add_measures(metric2, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), 2), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), 4), + ]) + self.trigger_processing([str(self.metric.id), str(metric2.id)]) + + values = processor.get_measures( + self.storage, [(self.metric, "mean"), (metric2, "mean")], + ["-", ["resample", "mean", numpy.timedelta64(1, 'D'), + ["metric", + [str(self.metric.id), "mean"], + [str(metric2.id), "mean"]]], 2], + granularity=numpy.timedelta64(1, 'h')) + + self.assertEqual({ + "%s_%s" % (self.metric.id, "mean"): [ + (datetime64(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 37.75)], + "%s_%s" % (metric2.id, "mean"): [ + (datetime64(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 2.75)], + }, values) + + def test_resample_minus_2_on_left(self): + metric2, __ = self._create_metric() + self.incoming.add_measures(self.metric, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), + ]) + self.incoming.add_measures(metric2, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), 2), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), 4), + ]) + self.trigger_processing([str(self.metric.id), str(metric2.id)]) + + values = processor.get_measures( + self.storage, [(self.metric, "mean"), (metric2, "mean")], + ["-", + 2, + ["resample", "mean", numpy.timedelta64(1, 'D'), + ["metric", + [str(self.metric.id), "mean"], + [str(metric2.id), "mean"]]]], + granularity=numpy.timedelta64(1, 'h')) + + self.assertEqual({ + "%s_%s" % (self.metric.id, "mean"): [ + (datetime64(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), -37.75)], + "%s_%s" % (metric2.id, "mean"): [ + (datetime64(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), -2.75)], + }, values) + + def test_rolling(self): + metric2, __ = self._create_metric() + self.incoming.add_measures(self.metric, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + incoming.Measure(datetime64(2014, 1, 1, 12, 5, 31), 42), + incoming.Measure(datetime64(2014, 1, 1, 12, 10, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 12, 15, 45), 44), + ]) + self.incoming.add_measures(metric2, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), + incoming.Measure(datetime64(2014, 1, 1, 12, 5, 41), 2), + incoming.Measure(datetime64(2014, 1, 1, 12, 10, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 12, 15, 10), 4), + ]) + self.trigger_processing([str(self.metric.id), str(metric2.id)]) + + values = processor.get_measures( + self.storage, [(self.metric, "mean"), (metric2, "mean")], + ["/", ["rolling", "sum", 2, + ["metric", [str(self.metric.id), "mean"], + [str(metric2.id), "mean"]]], 2], + granularity=numpy.timedelta64(5, 'm')) + + self.assertEqual({ + "%s_%s" % (self.metric.id, "mean"): [ + (datetime64(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(5, 'm'), 55.5), + (datetime64(2014, 1, 1, 12, 10, 0), + numpy.timedelta64(5, 'm'), 23), + (datetime64(2014, 1, 1, 12, 15, 0), + numpy.timedelta64(5, 'm'), 24) + ], + "%s_%s" % (metric2.id, "mean"): [ + (datetime64(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(5, 'm'), 5.5), + (datetime64(2014, 1, 1, 12, 10, 0), + numpy.timedelta64(5, 'm'), 3), + (datetime64(2014, 1, 1, 12, 15, 0), + numpy.timedelta64(5, 'm'), 4), + ], + }, values) + + def test_binary_operator_with_two_references(self): + metric2, __ = self._create_metric() + self.incoming.add_measures(self.metric, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), + ]) + self.incoming.add_measures(metric2, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), 2), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), 4), + ]) + self.trigger_processing([str(self.metric.id), str(metric2.id)]) + + values = processor.get_measures( + self.storage, [(self.metric, "mean"), (metric2, "mean")], + ["*", ["metric", str(self.metric.id), "mean"], + ["metric", str(metric2.id), "mean"]], + granularity=numpy.timedelta64(1, 'h'))["aggregated"] + + self.assertEqual([ + (datetime64(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 621), + (datetime64(2014, 1, 1, 13, 0, 0), + numpy.timedelta64(1, 'h'), 84), + (datetime64(2014, 1, 1, 14, 0, 0), + numpy.timedelta64(1, 'h'), 16), + (datetime64(2014, 1, 1, 15, 0, 0), + numpy.timedelta64(1, 'h'), 176), + ], values) + + def test_binary_operator_ts_on_left(self): + metric2, __ = self._create_metric() + self.incoming.add_measures(self.metric, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), + ]) + self.trigger_processing([str(self.metric.id)]) + + values = processor.get_measures( + self.storage, [(self.metric, "mean")], + ["*", ["metric", str(self.metric.id), "mean"], 2], + granularity=numpy.timedelta64(1, 'h')) + + self.assertEqual([ + (datetime64(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 138), + (datetime64(2014, 1, 1, 13, 0, 0), + numpy.timedelta64(1, 'h'), 84), + (datetime64(2014, 1, 1, 14, 0, 0), + numpy.timedelta64(1, 'h'), 8), + (datetime64(2014, 1, 1, 15, 0, 0), + numpy.timedelta64(1, 'h'), 88), + ], values["%s_mean" % self.metric.id]) + + def test_binary_operator_ts_on_right(self): + metric2, __ = self._create_metric() + self.incoming.add_measures(self.metric, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), + ]) + self.trigger_processing([str(self.metric.id)]) + + values = processor.get_measures( + self.storage, [(self.metric, "mean")], + ["*", 2, ["metric", str(self.metric.id), "mean"]], + granularity=numpy.timedelta64(1, 'h')) + + self.assertEqual([ + (datetime64(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 138), + (datetime64(2014, 1, 1, 13, 0, 0), + numpy.timedelta64(1, 'h'), 84), + (datetime64(2014, 1, 1, 14, 0, 0), + numpy.timedelta64(1, 'h'), 8), + (datetime64(2014, 1, 1, 15, 0, 0), + numpy.timedelta64(1, 'h'), 88), + ], values["%s_mean" % self.metric.id]) + + def test_mix(self): + metric2, __ = self._create_metric() + self.incoming.add_measures(self.metric, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), + ]) + self.incoming.add_measures(metric2, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), 2), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), 4), + ]) + self.trigger_processing([str(self.metric.id), str(metric2.id)]) + + values = processor.get_measures( + self.storage, [(self.metric, "mean"), (metric2, "mean")], + [ + "rolling", + "sum", + 2, + ["*", ["metric", str(self.metric.id), "mean"], + ["metric", str(metric2.id), "mean"]], + ], + granularity=numpy.timedelta64(1, 'h'))["aggregated"] + + self.assertEqual([ + (datetime64(2014, 1, 1, 13, 0, 0), + numpy.timedelta64(1, 'h'), 705), + (datetime64(2014, 1, 1, 14, 0, 0), + numpy.timedelta64(1, 'h'), 100), + (datetime64(2014, 1, 1, 15, 0, 0), + numpy.timedelta64(1, 'h'), 192), + ], values) + + def test_bool(self): + metric2, __ = self._create_metric() + self.incoming.add_measures(self.metric, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), + ]) + self.incoming.add_measures(metric2, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), 2), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), 4), + ]) + self.trigger_processing([str(self.metric.id), str(metric2.id)]) + + values = processor.get_measures( + self.storage, [(self.metric, "mean"), (metric2, "mean")], + [ + "gt", + [ + "/", + [ + "*", + ["*", ["metric", str(self.metric.id), "mean"], + ["metric", str(metric2.id), "mean"]], + 100, + ], + 1000 + ], + 10 + ], + granularity=numpy.timedelta64(1, 'h'))["aggregated"] + self.assertEqual([ + (datetime64(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 1), + (datetime64(2014, 1, 1, 13, 0, 0), + numpy.timedelta64(1, 'h'), 0), + (datetime64(2014, 1, 1, 14, 0, 0), + numpy.timedelta64(1, 'h'), 0), + (datetime64(2014, 1, 1, 15, 0, 0), + numpy.timedelta64(1, 'h'), 1), + ], values) + + def test_unary_operator(self): + metric2, __ = self._create_metric() + self.incoming.add_measures(self.metric, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), -69), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), -4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), + ]) + self.incoming.add_measures(metric2, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), -9), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), -2), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), -4), + ]) + self.trigger_processing([str(self.metric.id), str(metric2.id)]) + + values = processor.get_measures( + self.storage, [(self.metric, "mean"), (metric2, "mean")], + ["abs", ["metric", [str(self.metric.id), "mean"], + [str(metric2.id), "mean"]]], + granularity=numpy.timedelta64(1, 'h')) + + self.assertEqual({ + "%s_%s" % (self.metric.id, "mean"): [ + (datetime64(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 69), + (datetime64(2014, 1, 1, 13, 0, 0), + numpy.timedelta64(1, 'h'), 42), + (datetime64(2014, 1, 1, 14, 0, 0), + numpy.timedelta64(1, 'h'), 4), + (datetime64(2014, 1, 1, 15, 0, 0), + numpy.timedelta64(1, 'h'), 44) + ], + "%s_%s" % (metric2.id, "mean"): [ + (datetime64(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 9), + (datetime64(2014, 1, 1, 13, 0, 0), + numpy.timedelta64(1, 'h'), 2), + (datetime64(2014, 1, 1, 14, 0, 0), + numpy.timedelta64(1, 'h'), 4), + (datetime64(2014, 1, 1, 15, 0, 0), + numpy.timedelta64(1, 'h'), 4), + ], + }, values) diff --git a/releasenotes/notes/aggregates-API-d31db66e674cbf60.yaml b/releasenotes/notes/aggregates-API-d31db66e674cbf60.yaml new file mode 100644 index 00000000..6eeb87bc --- /dev/null +++ b/releasenotes/notes/aggregates-API-d31db66e674cbf60.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + New API endpoint allows to retrieve, transform, aggregates measurements on the + fly in an flexible way. The endpoint location is `/v1/aggregates/fetch`. + This endpoint allows to describe `operations` to be done on a metrics + list. Example: `(* 5 (rolling mean 3 (aggregate sum (metric (metric1 mean) + (metric2 mean)))))`. More details are available in the documentation. -- GitLab From fb8774406dc35059848040c58593ba2fa43c7b43 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 16 Oct 2017 10:06:20 +0200 Subject: [PATCH 1036/1483] aggregated: remove useless resample argument "resample" can be done within the "operations". This change does that. --- gnocchi/rest/aggregates/processor.py | 7 +------ gnocchi/rest/api.py | 20 ++++++++++++++------ 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index aaa522c3..39dc7c85 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -42,7 +42,7 @@ def get_measures(storage, metrics_and_aggregations, operations, from_timestamp=None, to_timestamp=None, granularity=None, needed_overlap=100.0, - fill=None, resample=None, ref_identifier="id"): + fill=None, ref_identifier="id"): """Get aggregated measures of multiple entities. :param storage: The storage driver. @@ -52,7 +52,6 @@ def get_measures(storage, metrics_and_aggregations, :param to timestamp: The timestamp to get the measure to. :param granularity: The granularity to retrieve. :param fill: The value to use to fill in missing data in series. - :param resample: The granularity to resample to. """ references_with_missing_granularity = [] @@ -102,10 +101,6 @@ def get_measures(storage, metrics_and_aggregations, in metrics_and_aggregations for g in granularities_in_common]) - if resample and granularity: - tss = list(map(lambda ref_and_ts: ( - ref_and_ts[0], ref_and_ts[1].resample(resample)), tss)) - return aggregated(tss, operations, from_timestamp, to_timestamp, needed_overlap, fill) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 1daa1ba0..816bbafd 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1748,10 +1748,6 @@ class AggregationController(rest.RestController): if reaggregation is None: reaggregation = aggregation - metrics_and_aggregations = [[str(m.id), aggregation] for m in metrics] - operations = ["aggregate", reaggregation, - ["metric"] + metrics_and_aggregations] - for metric in metrics: enforce("get metric", metric) @@ -1767,6 +1763,19 @@ class AggregationController(rest.RestController): except ValueError as e: abort(400, six.text_type(e)) + operations = ["aggregate", reaggregation, []] + if resample: + operations[2].extend( + ["resample", aggregation, resample, + ["metric"] + [[str(m.id), aggregation] + for m in metrics]] + ) + else: + operations[2].extend( + ["metric"] + [[str(m.id), aggregation] + for m in metrics] + ) + try: if strtobool("refresh", refresh): metrics_to_update = [ @@ -1789,8 +1798,7 @@ class AggregationController(rest.RestController): pecan.request.storage, [(m, aggregation) for m in metrics], operations, start, stop, - granularity, needed_overlap, fill, - resample)["aggregated"] + granularity, needed_overlap, fill)["aggregated"] except exceptions.UnAggregableTimeseries as e: abort(400, e) except (storage.MetricDoesNotExist, -- GitLab From c8170ffba2a375d1ef05c7bded83ee30174452da Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 24 Oct 2017 08:30:25 +0200 Subject: [PATCH 1037/1483] aggregates: more tests --- .../functional/gabbits/aggregates-fetch.yaml | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/gnocchi/tests/functional/gabbits/aggregates-fetch.yaml b/gnocchi/tests/functional/gabbits/aggregates-fetch.yaml index 815b1f58..7044a21d 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-fetch.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-fetch.yaml @@ -235,6 +235,39 @@ tests: - ["2015-03-06T14:35:12+00:00", 1.0, 7.5] - ["2015-03-06T14:35:15+00:00", 1.0, 12.5] + + - name: get one metric + POST: /v1/aggregates/fetch + data: + operations: "(metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean)" + response_json_paths: + $.`len`: 1 + $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] + - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] + - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -16.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] + + - name: get aggregates one metric + POST: /v1/aggregates/fetch + data: + operations: "(aggregate mean (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean))" + response_json_paths: + $.`len`: 1 + $."aggregated": + - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] + - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] + - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -16.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] + - name: get aggregates math with string POST: /v1/aggregates/fetch data: -- GitLab From f8b8b64f6ffe82ca703d4195fcaa127f8a1be7ba Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 24 Oct 2017 12:54:49 +0000 Subject: [PATCH 1038/1483] transpose once instead of for every single metric (even though they are in same matrix) --- gnocchi/rest/aggregates/processor.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index 39dc7c85..fc74ca08 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -160,17 +160,18 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, agg_operations.evaluate(operations, key, times, values, False, references[key])) + values = values.T if is_aggregated: result["aggregated"]["timestamps"].extend(times) result["aggregated"]['granularity'].extend([granularity] * len(times)) - result["aggregated"]['values'].extend(values.T[0]) + result["aggregated"]['values'].extend(values[0]) else: for i, ref in enumerate(references[key]): ident = "%s_%s" % tuple(ref) result[ident]["timestamps"].extend(times) result[ident]['granularity'].extend([granularity] * len(times)) - result[ident]['values'].extend(values.T[i]) + result[ident]['values'].extend(values[i]) return dict(((ident, list(six.moves.zip(result[ident]['timestamps'], result[ident]['granularity'], -- GitLab From 6da105a755e23560db769b6cb48e5511128b786f Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 10 Oct 2017 08:50:55 +0200 Subject: [PATCH 1039/1483] aggregates: resources search api This change allows to search for metrics with the same format as resource search and cross metric aggregation. And then it applies operations. It also re-add transformation tests related to cross metric aggregation endpoint but with new aggregates API. Related #419 --- doc/source/rest.j2 | 9 + doc/source/rest.yaml | 28 ++- gnocchi/rest/aggregates/api.py | 145 +++++++++---- ...h.yaml => aggregates-with-metric-ids.yaml} | 62 +++--- .../gabbits/aggregates-with-resources.yaml | 190 ++++++++++++++++++ .../tests/functional/gabbits/aggregation.yaml | 60 +++++- gnocchi/tests/functional/gabbits/metric.yaml | 10 +- .../aggregates-API-d31db66e674cbf60.yaml | 10 +- 8 files changed, 430 insertions(+), 84 deletions(-) rename gnocchi/tests/functional/gabbits/{aggregates-fetch.yaml => aggregates-with-metric-ids.yaml} (94%) create mode 100644 gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index a9034d8e..f5d23429 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -602,7 +602,16 @@ least one of the time series is returned. {{ scenarios['get-aggregates-by-metric-ids-fill']['doc'] }} +It's also possible to do that aggregation on |metrics| linked to |resources|. +In order to select these |resources|, the following endpoint accepts a query +such as the one described in `Searching for resources`_. + +{{ scenarios['get-aggregates-by-attributes-lookup']['doc'] }} + +It is possible to group the |resource| search results by any attribute of the +requested |resource| type, and then compute the aggregation: +{{ scenarios['get-aggregates-by-attributes-lookup-groupby']['doc'] }} List of supported ------------------------------ diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index f7749b14..f7510589 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -775,7 +775,7 @@ - name: get-aggregates-by-metric-ids request: | - POST /v1/aggregates/fetch?start=2014-10-06T14:34&stop=2017-10-06T14:34 HTTP/1.1 + POST /v1/aggregates?start=2014-10-06T14:34&stop=2017-10-06T14:34 HTTP/1.1 Content-Type: application/json { @@ -792,7 +792,7 @@ - name: get-aggregates-between-metrics request: | - POST /v1/aggregates/fetch?start=2014-10-06T14:34&stop=2017-10-06T14:34 HTTP/1.1 + POST /v1/aggregates?start=2014-10-06T14:34&stop=2017-10-06T14:34 HTTP/1.1 Content-Type: application/json { @@ -809,13 +809,35 @@ - name: get-aggregates-by-metric-ids-fill request: | - POST /v1/aggregates/fetch?fill=0&granularity=1 HTTP/1.1 + POST /v1/aggregates?fill=0&granularity=1 HTTP/1.1 Content-Type: application/json { "operations": "(* (aggregate mean (metric {{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }} mean)) 4)" } +- name: get-aggregates-by-attributes-lookup + request: | + POST /v1/aggregates?start=2014-10-06T14:34 HTTP/1.1 + Content-Type: application/json + + { + "resource_type": "instance", + "search": {"=": {"server_group": "my_autoscaling_group"}}, + "operations": ["*", ["aggregate", "mean", ["metric", "cpu.util", "mean"]], 4] + } + +- name: get-aggregates-by-attributes-lookup-groupby + request: | + POST /v1/aggregates?start=2014-10-06T14:34&groupby=host&groupby=flavor_id HTTP/1.1 + Content-Type: application/json + + { + "resource_type": "instance", + "search": "server_group='my_autoscaling_group'", + "operations": "(* (aggregate mean (metric cpu.util mean)) 4)" + } + - name: get-capabilities request: GET /v1/capabilities HTTP/1.1 diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index 59696a93..15b2341f 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -14,12 +14,15 @@ # License for the specific language governing permissions and limitations # under the License. +import itertools + import pecan from pecan import rest import pyparsing import six import voluptuous +from gnocchi import indexer from gnocchi.rest.aggregates import exceptions from gnocchi.rest.aggregates import operations as agg_operations from gnocchi.rest.aggregates import processor @@ -155,15 +158,28 @@ def get_measures_or_abort(metrics_and_aggregations, operations, start, "detail": [(str(e.metric.id), e.method)]}) -class FetchController(rest.RestController): +def ResourceTypeSchema(resource_type): + try: + pecan.request.indexer.get_resource_type(resource_type) + except indexer.NoSuchResourceType as e: + api.abort(400, e) + return resource_type + + +class AggregatesController(rest.RestController): - FetchSchema = { + FetchSchema = voluptuous.Any({ "operations": OperationsSchema - } + }, { + "operations": OperationsSchema, + "resource_type": ResourceTypeSchema, + "search": voluptuous.Any(api.ResourceSearchSchema, + api.QueryStringSearchAttrFilter.parse), + }) @pecan.expose("json") def post(self, start=None, stop=None, granularity=None, - needed_overlap=100.0, fill=None): + needed_overlap=100.0, fill=None, groupby=None): start, stop, granularity, needed_overlap, fill = api.validate_qs( start, stop, granularity, needed_overlap, fill) @@ -171,41 +187,90 @@ class FetchController(rest.RestController): references = list(extract_references(body["operations"])) if not references: - api.abort(400, {"cause": "operations is invalid", - "reason": "at least one 'metric' is required", + api.abort(400, {"cause": "Operations is invalid", + "reason": "At least one 'metric' is required", "detail": body["operations"]}) - try: - metric_ids = [six.text_type(utils.UUID(m)) - for (m, a) in references] - except ValueError as e: - api.abort(400, {"cause": "Invalid metric references", - "reason": six.text_type(e), - "detail": references}) - - metrics = pecan.request.indexer.list_metrics(ids=metric_ids) - missing_metric_ids = (set(metric_ids) - - set(six.text_type(m.id) for m in metrics)) - if missing_metric_ids: - api.abort(404, {"cause": "Unknown metrics", - "reason": "Provided metrics don't exists", - "detail": missing_metric_ids}) - - number_of_metrics = len(metrics) - if number_of_metrics == 0: - return [] - - for metric in metrics: - api.enforce("get metric", metric) - - metrics_by_ids = dict((six.text_type(m.id), m) for m in metrics) - metrics_and_aggregations = [(metrics_by_ids[m], a) - for (m, a) in references] - return get_measures_or_abort( - metrics_and_aggregations, body["operations"], - start, stop, granularity, needed_overlap, fill, - ref_identifier="id") - - -class AggregatesController(object): - fetch = FetchController() + if "resource_type" in body: + attr_filter = body["search"] + policy_filter = ( + pecan.request.auth_helper.get_resource_policy_filter( + pecan.request, "search resource", body["resource_type"])) + if policy_filter: + if attr_filter: + attr_filter = {"and": [ + policy_filter, + attr_filter + ]} + else: + attr_filter = policy_filter + + groupby = sorted(set(api.arg_to_list(groupby))) + resources = pecan.request.indexer.list_resources( + body["resource_type"], + attribute_filter=attr_filter, + sorts=groupby) + if not groupby: + return self._get_measures_by_name( + resources, references, body["operations"], start, stop, + granularity, needed_overlap, fill) + + def groupper(r): + return tuple((attr, r[attr]) for attr in groupby) + + results = [] + for key, resources in itertools.groupby(resources, groupper): + results.append({ + "group": dict(key), + "measures": self._get_measures_by_name( + resources, references, body["operations"], start, stop, + granularity, needed_overlap, fill) + }) + return results + + else: + try: + metric_ids = [six.text_type(utils.UUID(m)) + for (m, a) in references] + except ValueError as e: + api.abort(400, {"cause": "Invalid metric references", + "reason": six.text_type(e), + "detail": references}) + + metrics = pecan.request.indexer.list_metrics(ids=metric_ids) + missing_metric_ids = (set(metric_ids) + - set(six.text_type(m.id) for m in metrics)) + if missing_metric_ids: + api.abort(404, {"cause": "Unknown metrics", + "reason": "Provided metrics don't exists", + "detail": missing_metric_ids}) + + number_of_metrics = len(metrics) + if number_of_metrics == 0: + return [] + + for metric in metrics: + api.enforce("get metric", metric) + + metrics_by_ids = dict((six.text_type(m.id), m) for m in metrics) + metrics_and_aggregations = [(metrics_by_ids[m], a) + for (m, a) in references] + return get_measures_or_abort( + metrics_and_aggregations, body["operations"], + start, stop, granularity, needed_overlap, fill, + ref_identifier="id") + + def _get_measures_by_name(self, resources, metric_names, operations, + start, stop, granularity, needed_overlap, fill): + + metrics_and_aggregations = list(filter( + lambda x: x[0] is not None, ([r.get_metric(metric_name), agg] + for (metric_name, agg) in metric_names + for r in resources))) + if not metrics_and_aggregations: + api.abort(400, {"cause": "Metrics not found", + "detail": set((m for (m, a) in metric_names))}) + + return get_measures_or_abort(metrics_and_aggregations, operations, + start, stop, granularity, needed_overlap, + fill, ref_identifier="name") diff --git a/gnocchi/tests/functional/gabbits/aggregates-fetch.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml similarity index 94% rename from gnocchi/tests/functional/gabbits/aggregates-fetch.yaml rename to gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml index 7044a21d..15878185 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-fetch.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml @@ -109,7 +109,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 15.0] - name: get aggregates - POST: /v1/aggregates/fetch + POST: /v1/aggregates data: operations: ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] response_json_paths: @@ -134,7 +134,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 15.0] - name: get aggregates start and stop - POST: /v1/aggregates/fetch + POST: /v1/aggregates query_parameters: start: "2015-03-06T14:34:00" stop: "2015-03-06T14:35:13" @@ -156,7 +156,7 @@ tests: - ["2015-03-06T14:35:12+00:00", 1.0, 10.0] - name: get aggregates granularity - POST: /v1/aggregates/fetch?granularity=60 + POST: /v1/aggregates?granularity=60 data: operations: ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "max"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "min"]] response_json_paths: @@ -171,7 +171,7 @@ tests: - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] - name: get aggregates simple with array - POST: /v1/aggregates/fetch + POST: /v1/aggregates data: operations: ["+", ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]], 2.0] response_json_paths: @@ -196,7 +196,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 17.0] - name: get aggregates resample - POST: /v1/aggregates/fetch?granularity=1 + POST: /v1/aggregates?granularity=1 data: operations: - resample @@ -215,7 +215,7 @@ tests: - ["2015-03-06T14:35:00+00:00", 60.0, 12.5] - name: get aggregates rolling - POST: /v1/aggregates/fetch?granularity=1 + POST: /v1/aggregates?granularity=1 data: operations: - rolling @@ -237,7 +237,7 @@ tests: - name: get one metric - POST: /v1/aggregates/fetch + POST: /v1/aggregates data: operations: "(metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean)" response_json_paths: @@ -253,7 +253,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] - name: get aggregates one metric - POST: /v1/aggregates/fetch + POST: /v1/aggregates data: operations: "(aggregate mean (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean))" response_json_paths: @@ -269,7 +269,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] - name: get aggregates math with string - POST: /v1/aggregates/fetch + POST: /v1/aggregates data: operations: "(+ (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)) 2.0)" response_json_paths: @@ -294,7 +294,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 17.0] - name: get aggregates substact - POST: /v1/aggregates/fetch + POST: /v1/aggregates data: operations: "(- (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean) (metric $HISTORY['create metric2'].$RESPONSE['$.id'] mean)))" response_json_paths: @@ -310,7 +310,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, -4.0] - name: get aggregates mean aggregate - POST: /v1/aggregates/fetch + POST: /v1/aggregates data: operations: "(aggregate mean (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)))" response_json_paths: @@ -326,7 +326,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 13.0] - name: get aggregates negative absolute - POST: /v1/aggregates/fetch + POST: /v1/aggregates data: operations: "(negative (absolute (aggregate mean (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)))))" response_json_paths: @@ -344,7 +344,7 @@ tests: # Negative tests - name: get no operations - POST: /v1/aggregates/fetch + POST: /v1/aggregates request_headers: accept: application/json content-type: application/json @@ -356,7 +356,7 @@ tests: - "Invalid input: Operation must not be empty" - name: get operations without list - POST: /v1/aggregates/fetch + POST: /v1/aggregates request_headers: accept: application/json content-type: application/json @@ -369,7 +369,7 @@ tests: - "Invalid input: Expected a tuple/list, got a " - name: invalid operations string - POST: /v1/aggregates/fetch + POST: /v1/aggregates request_headers: accept: application/json content-type: application/json @@ -384,7 +384,7 @@ tests: $.description.detail: "Expected \")\" (at char 15), (line:1, col:16)" - name: get invalid metric operations - POST: /v1/aggregates/fetch + POST: /v1/aggregates request_headers: accept: application/json content-type: application/json @@ -396,7 +396,7 @@ tests: - "Invalid input: Operation need at least one argument for dictionary value" - name: get unknown metrics - POST: /v1/aggregates/fetch + POST: /v1/aggregates request_headers: accept: application/json content-type: application/json @@ -417,7 +417,7 @@ tests: - "e4864464-1b27-4622-9fbb-dc900e06c192" - name: get not matching granularity - POST: /v1/aggregates/fetch + POST: /v1/aggregates request_headers: accept: application/json content-type: application/json @@ -437,7 +437,7 @@ tests: $.description.detail.`len`: 3 - name: get unknown granularity - POST: /v1/aggregates/fetch?granularity=123 + POST: /v1/aggregates?granularity=123 request_headers: accept: application/json content-type: application/json @@ -456,7 +456,7 @@ tests: - ["$HISTORY['create metric1'].$RESPONSE['$.id']", mean] - name: get unknown aggregation - POST: /v1/aggregates/fetch + POST: /v1/aggregates request_headers: accept: application/json content-type: application/json @@ -474,7 +474,7 @@ tests: - ["$HISTORY['create metric1'].$RESPONSE['$.id']", "what?"] - name: invalid start - POST: /v1/aggregates/fetch?start=notadate + POST: /v1/aggregates?start=notadate request_headers: accept: application/json content-type: application/json @@ -487,7 +487,7 @@ tests: $.description.reason: "Must be a datetime or a timestamp" - name: invalid stop - POST: /v1/aggregates/fetch?stop=notadate + POST: /v1/aggregates?stop=notadate request_headers: accept: application/json content-type: application/json @@ -500,7 +500,7 @@ tests: $.description.reason: "Must be a datetime or a timestamp" - name: invalid needed_overlap - POST: /v1/aggregates/fetch?needed_overlap=notnumber + POST: /v1/aggregates?needed_overlap=notnumber request_headers: accept: application/json content-type: application/json @@ -513,7 +513,7 @@ tests: $.description.reason: "Must be a number" - name: incomplete needed_overlap - POST: /v1/aggregates/fetch?needed_overlap=50 + POST: /v1/aggregates?needed_overlap=50 request_headers: accept: application/json content-type: application/json @@ -526,7 +526,7 @@ tests: $.description.reason: "start and/or stop must be provided if specifying needed_overlap" - name: invalid granularity - POST: /v1/aggregates/fetch?granularity=foobar + POST: /v1/aggregates?granularity=foobar request_headers: accept: application/json content-type: application/json @@ -539,7 +539,7 @@ tests: $.description.reason: "Unable to parse timespan" - name: incomplete fill - POST: /v1/aggregates/fetch?fill=123 + POST: /v1/aggregates?fill=123 request_headers: accept: application/json content-type: application/json @@ -552,7 +552,7 @@ tests: $.description.reason: "Unable to fill without a granularity" - name: invalid fill - POST: /v1/aggregates/fetch?fill=foobar&granularity=5 + POST: /v1/aggregates?fill=foobar&granularity=5 request_headers: accept: application/json content-type: application/json @@ -565,7 +565,7 @@ tests: $.description.reason: "Must be a float or 'null', got 'foobar'" - name: get rolling bad aggregate - POST: /v1/aggregates/fetch + POST: /v1/aggregates request_headers: accept: application/json content-type: application/json @@ -577,7 +577,7 @@ tests: - "Invalid input: 'rolling' operation invalid for dictionary value" - name: get rolling-mean missing window - POST: /v1/aggregates/fetch + POST: /v1/aggregates request_headers: accept: application/json content-type: application/json @@ -589,7 +589,7 @@ tests: - "Invalid input: 'rolling' operation invalid for dictionary value" - name: get measurements from metric and invalid operations - POST: /v1/aggregates/fetch + POST: /v1/aggregates request_headers: accept: application/json content-type: application/json @@ -601,7 +601,7 @@ tests: - "Invalid input: 'notexist' operation invalid for dictionary value" - name: invalid resample - POST: /v1/aggregates/fetch + POST: /v1/aggregates request_headers: accept: application/json content-type: application/json diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml new file mode 100644 index 00000000..9dce6125 --- /dev/null +++ b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml @@ -0,0 +1,190 @@ +fixtures: + - ConfigFixture + +defaults: + request_headers: + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json + +tests: + - name: create archive policy + desc: for later use + POST: /v1/archive_policy + request_headers: + # User admin + authorization: "basic YWRtaW46" + data: + name: low + definition: + - granularity: 1 second + - granularity: 300 seconds + status: 201 + + - name: create another archive policy + desc: for later use + POST: /v1/archive_policy + request_headers: + # User admin + authorization: "basic YWRtaW46" + data: + name: unrelated + definition: + - granularity: 5 second + status: 201 + + - name: create resource 1 + POST: /v1/resource/generic + data: + id: 4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4 + user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 + project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 + metrics: + cpu.util: + archive_policy_name: low + status: 201 + + - name: post cpuutil measures 1 + POST: /v1/resource/generic/4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4/metric/cpu.util/measures + data: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 + status: 202 + + - name: create resource 2 + POST: /v1/resource/generic + data: + id: 1447CD7E-48A6-4C50-A991-6677CC0D00E6 + user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 + project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 + metrics: + cpu.util: + archive_policy_name: low + status: 201 + + - name: post cpuutil measures 2 + POST: /v1/resource/generic/1447CD7E-48A6-4C50-A991-6677CC0D00E6/metric/cpu.util/measures + data: + - timestamp: "2015-03-06T14:33:57" + value: 23 + - timestamp: "2015-03-06T14:34:12" + value: 8 + status: 202 + + - name: create resource 3 + POST: /v1/resource/generic + data: + id: 33333BC5-5948-4F29-B7DF-7DE607660452 + user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 + project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 + metrics: + cpu.util: + archive_policy_name: low + status: 201 + + - name: post cpuutil measures 3 + POST: /v1/resource/generic/33333BC5-5948-4F29-B7DF-7DE607660452/metric/cpu.util/measures + data: + - timestamp: "2015-03-06T14:33:57" + value: 230 + - timestamp: "2015-03-06T14:34:12" + value: 45.41 + status: 202 + + - name: create resource 4 + POST: /v1/resource/generic + data: + id: b1409ec6-3909-4b37-bbff-f9a5448fe328 + user_id: 70b5b732-9d81-4dfb-a8a1-a424ef3eae6b + project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 + metrics: + cpu.util: + archive_policy_name: unrelated + status: 201 + + - name: post cpuutil measures 4 + POST: /v1/resource/generic/b1409ec6-3909-4b37-bbff-f9a5448fe328/metric/cpu.util/measures + data: + - timestamp: "2015-03-06T14:33:57" + value: 230 + - timestamp: "2015-03-06T14:34:12" + value: 45.41 + status: 202 + + - name: aggregate metric + POST: /v1/aggregates + data: + resource_type: generic + search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" + operations: "(aggregate mean (metric cpu.util mean))" + poll: + count: 10 + delay: 1 + response_json_paths: + $.aggregated: + - ['2015-03-06T14:30:00+00:00', 300.0, 60.251666666666665] + - ['2015-03-06T14:33:57+00:00', 1.0, 98.7] + - ['2015-03-06T14:34:12+00:00', 1.0, 21.80333333333333] + + - name: aggregate metric with groupby on project_id and user_id with aggregates API + POST: /v1/aggregates?groupby=project_id&groupby=user_id + data: + resource_type: generic + search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" + operations: "(aggregate mean (metric cpu.util mean))" + response_json_paths: + $: + - measures: + aggregated: + - ['2015-03-06T14:30:00+00:00', 300.0, 21.525] + - ['2015-03-06T14:33:57+00:00', 1.0, 33.05] + - ['2015-03-06T14:34:12+00:00', 1.0, 10.0] + group: + user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 + project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 + - measures: + aggregated: + - ['2015-03-06T14:30:00+00:00', 300.0, 137.70499999999998] + - ['2015-03-06T14:33:57+00:00', 1.0, 230.0] + - ['2015-03-06T14:34:12+00:00', 1.0, 45.41] + group: + user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 + project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 + +# Negative tests + + - name: not matching granularity + POST: /v1/aggregates + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + resource_type: generic + search: {} + operations: "(aggregate mean (metric cpu.util mean))" + status: 400 + response_json_paths: + $.code: 400 + $.description.cause: "Metrics can't being aggregated" + $.description.detail.`len`: 4 + + - name: not matching metrics + POST: /v1/aggregates + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + resource_type: generic + search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" + operations: "(aggregate mean (metric (notexists mean) (foobar mean)))" + status: 400 + response_json_paths: + $.code: 400 + $.description.cause: "Metrics not found" + $.description.detail.`sorted`: + - foobar + - notexists diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index 0d626a94..1121f90a 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -132,7 +132,7 @@ tests: - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] - name: get measure aggregates and operations - POST: /v1/aggregates/fetch?granularity=1 + POST: /v1/aggregates?granularity=1 data: operations: "(aggregate mean (resample mean 60 (metric ($HISTORY['get metric list'].$RESPONSE['$[0].id'] mean) ($HISTORY['get metric list'].$RESPONSE['$[1].id'] mean))))" response_json_paths: @@ -235,6 +235,17 @@ tests: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + - name: get measure aggregates by granularity from aggregates API + POST: /v1/aggregates?granularity=1 + data: + resource_type: generic + search: {} + operations: '(aggregate mean (metric agg_meter mean))' + response_json_paths: + $.aggregated: + - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + - name: get measure aggregates by granularity from resources and resample POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&resample=60 response_json_paths: @@ -242,6 +253,28 @@ tests: - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] + - name: get measure aggregates by granularity from aggregates API and resample + POST: /v1/aggregates?granularity=1 + data: + resource_type: generic + search: {} + operations: '(aggregate mean (resample mean 60 (metric agg_meter mean)))' + response_json_paths: + $.aggregated: + - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] + - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] + + - name: get measure aggregates by granularity from resources and operations + POST: /v1/aggregates?granularity=1 + data: + resource_type: generic + search: {} + operations: '(aggregate mean (resample mean 60 (metric agg_meter mean)))' + response_json_paths: + $.aggregated: + - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] + - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] + - name: get measure aggregates by granularity from resources and bad resample POST: /v1/aggregation/resource/generic/metric/agg_meter?resample=abc status: 400 @@ -262,6 +295,20 @@ tests: - ['2015-03-06T14:30:00+00:00', 300.0, 15.05] - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - name: get measure aggregates by granularity with timestamps from aggregates API + POST: /v1/aggregates?start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 + data: + resource_type: generic + search: {} + operations: '(aggregate mean (metric agg_meter mean))' + poll: + count: 10 + delay: 1 + response_json_paths: + $.aggregated: + - ['2015-03-06T14:30:00+00:00', 300.0, 15.05] + - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - name: get measure aggregates by granularity from resources and reaggregate POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&reaggregation=min poll: @@ -280,6 +327,17 @@ tests: - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - ['2015-03-06T14:35:12+00:00', 1.0, 2.5] + - name: get measure aggregates from aggregates API with fill zero + POST: /v1/aggregates?granularity=1&fill=0 + data: + resource_type: generic + search: {} + operations: '(aggregate mean (metric agg_meter mean))' + response_json_paths: + $.aggregated: + - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + - ['2015-03-06T14:35:12+00:00', 1.0, 2.5] # Some negative tests diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index 1fe0a894..9a277d0c 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -214,7 +214,7 @@ tests: - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] - name: get measurements from metric and resample and negative - POST: /v1/aggregates/fetch?granularity=1 + POST: /v1/aggregates?granularity=1 data: operations: "(negative (resample mean 60 (metric $HISTORY['list valid metrics'].$RESPONSE['$[0].id'] mean)))" response_json_paths: @@ -236,7 +236,7 @@ tests: GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true - name: get absolute measurements from metric - POST: /v1/aggregates/fetch + POST: /v1/aggregates data: operations: "(absolute (metric $HISTORY['list valid metrics'].$RESPONSE['$[0].id'] mean))" response_json_paths: @@ -250,7 +250,7 @@ tests: - ["2015-03-06T14:37:15+00:00", 1.0, 23.0] - name: rolling-mean - POST: /v1/aggregates/fetch + POST: /v1/aggregates data: operations: "(rolling mean 2 (metric $HISTORY['list valid metrics'].$RESPONSE['$[0].id'] mean))" status: 200 @@ -264,7 +264,7 @@ tests: - ["2015-03-06T14:37:15+00:00", 1.0, -19.5] - name: get measurements from metric and two operations - POST: /v1/aggregates/fetch + POST: /v1/aggregates data: operations: "(negative (absolute (metric $HISTORY['list valid metrics'].$RESPONSE['$[0].id'] mean)))" response_json_paths: @@ -278,7 +278,7 @@ tests: - ["2015-03-06T14:37:15+00:00", 1.0, -23.0] - name: get measurements from metric and invalid operations - POST: /v1/aggregates/fetch + POST: /v1/aggregates data: operations: "(notexist (absolute (metric $HISTORY['list valid metrics'].$RESPONSE['$[0].id'] mean)))" status: 400 diff --git a/releasenotes/notes/aggregates-API-d31db66e674cbf60.yaml b/releasenotes/notes/aggregates-API-d31db66e674cbf60.yaml index 6eeb87bc..5b15939b 100644 --- a/releasenotes/notes/aggregates-API-d31db66e674cbf60.yaml +++ b/releasenotes/notes/aggregates-API-d31db66e674cbf60.yaml @@ -2,7 +2,9 @@ features: - | New API endpoint allows to retrieve, transform, aggregates measurements on the - fly in an flexible way. The endpoint location is `/v1/aggregates/fetch`. - This endpoint allows to describe `operations` to be done on a metrics - list. Example: `(* 5 (rolling mean 3 (aggregate sum (metric (metric1 mean) - (metric2 mean)))))`. More details are available in the documentation. + fly in an flexible way. The endpoint location is `/v1/aggregates`. + This endpoint allows to describe `operations` to be done on a metrics list. + Example: `(* 5 (rolling mean 3 (aggregate sum (metric (metric1 mean) + (metric2 mean)))))`. The metrics list can be retrieved by searching in + resources by setting 'resource_type' and 'search'. More details are + available in the documentation. -- GitLab From 900c3f0c984b5a50f9c7a1c2a508f599e9072d76 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 24 Oct 2017 14:47:51 +0000 Subject: [PATCH 1040/1483] rolling window over matrix support rolling window over 2d array --- gnocchi/rest/aggregates/operations.py | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/gnocchi/rest/aggregates/operations.py b/gnocchi/rest/aggregates/operations.py index 242235d5..b322c2d5 100644 --- a/gnocchi/rest/aggregates/operations.py +++ b/gnocchi/rest/aggregates/operations.py @@ -159,20 +159,13 @@ def handle_rolling(agg, granularity, timestamps, values, is_aggregated, (window, len(values)) ) - # TODO(sileht): make a more optimised version that - # compute the data across the whole matrix - new_values = None timestamps = timestamps[window - 1:] - for ts in values.T: - # arogozhnikov.github.io/2015/09/30/NumpyTipsAndTricks2.html - stride = ts.strides[0] - ts = AGG_MAP[agg](as_strided( - ts, shape=[len(ts) - window + 1, window], - strides=[stride, stride]), axis=1) - if new_values is None: - new_values = numpy.array([ts]) - else: - new_values = numpy.append(new_values, [ts], axis=0) + values = values.T + # rigtorp.se/2011/01/01/rolling-statistics-numpy.html + shape = values.shape[:-1] + (values.shape[-1] - window + 1, window) + strides = values.strides + (values.strides[-1],) + new_values = AGG_MAP[agg](as_strided(values, shape=shape, strides=strides), + axis=-1) return granularity, timestamps, new_values.T, is_aggregated -- GitLab From 29bb5bfbe13261059415c259c192807d8ebc07fb Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 18 Oct 2017 18:15:58 +0200 Subject: [PATCH 1041/1483] storage: do not create coordinator oneself This forces the storage driver to receive a coordinator as parameter. --- gnocchi/cli/manage.py | 5 ++++- gnocchi/cli/metricd.py | 21 +++++++++++---------- gnocchi/rest/app.py | 13 +++++++++---- gnocchi/storage/__init__.py | 12 +++--------- gnocchi/tests/base.py | 21 ++++++++++----------- gnocchi/tests/functional/fixtures.py | 9 ++++++++- gnocchi/tests/test_storage.py | 4 ++-- 7 files changed, 47 insertions(+), 38 deletions(-) diff --git a/gnocchi/cli/manage.py b/gnocchi/cli/manage.py index 7ecb1ef5..dd94ae03 100644 --- a/gnocchi/cli/manage.py +++ b/gnocchi/cli/manage.py @@ -61,7 +61,10 @@ def upgrade(): LOG.info("Upgrading indexer %s", index) index.upgrade() if not conf.skip_storage: - s = storage.get_driver(conf) + # FIXME(jd) Pass None as coordinator because it's not needed in this + # case. This will be removed when the storage will stop requiring a + # coordinator object. + s = storage.get_driver(conf, None) LOG.info("Upgrading storage %s", s) s.upgrade() if not conf.skip_incoming: diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index 046b483c..4f9d0b14 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -57,7 +57,10 @@ class MetricProcessBase(cotyledon.Service): self._wake_up.set() def _configure(self): - self.store = retry_on_exception(storage.get_driver, self.conf) + self.coord = retry_on_exception(utils.get_coordinator_and_start, + self.conf.storage.coordination_url) + self.store = retry_on_exception( + storage.get_driver, self.conf, self.coord) self.incoming = retry_on_exception(incoming.get_driver, self.conf) self.index = retry_on_exception(indexer.get_driver, self.conf) @@ -85,9 +88,8 @@ class MetricProcessBase(cotyledon.Service): self._shutdown_done.wait() self.close_services() - @staticmethod - def close_services(): - pass + def close_services(self): + self.coord.stop() @staticmethod def _run_job(): @@ -104,6 +106,10 @@ class MetricReporting(MetricProcessBase): def _configure(self): self.incoming = retry_on_exception(incoming.get_driver, self.conf) + @staticmethod + def close_services(): + pass + def _run_job(self): try: report = self.incoming.measures_report(details=False) @@ -140,12 +146,7 @@ class MetricProcessor(MetricProcessBase): # Never retry except when explicitly asked by raising TryAgain retry=tenacity.retry_never) def _configure(self): - self.coord = retry_on_exception(utils.get_coordinator_and_start, - self.conf.storage.coordination_url) - self.store = retry_on_exception(storage.get_driver, - self.conf, self.coord) - self.incoming = retry_on_exception(incoming.get_driver, self.conf) - self.index = retry_on_exception(indexer.get_driver, self.conf) + super(MetricProcessor, self)._configure() # create fallback in case paritioning fails or assigned no tasks self.fallback_tasks = list( diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 5581f322..dc6e8c71 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -31,6 +31,7 @@ from gnocchi import incoming as gnocchi_incoming from gnocchi import indexer as gnocchi_indexer from gnocchi import json from gnocchi import storage as gnocchi_storage +from gnocchi import utils LOG = daiquiri.getLogger(__name__) @@ -83,14 +84,18 @@ global APPCONFIGS APPCONFIGS = {} -def load_app(conf, indexer=None, storage=None, incoming=None, +def load_app(conf, indexer=None, storage=None, incoming=None, coord=None, not_implemented_middleware=True): global APPCONFIGS - # NOTE(sileht): We load config, storage and indexer, - # so all if not storage: - storage = gnocchi_storage.get_driver(conf) + if not coord: + # NOTE(jd) This coordinator is never stop. I don't think it's a + # real problem since the Web app can never really be stopped + # anyway, except by quitting it entirely. + coord = utils.get_coordinator_and_start( + conf.storage.coordination_url) + storage = gnocchi_storage.get_driver(conf, coord) if not incoming: incoming = gnocchi_incoming.get_driver(conf) if not indexer: diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index f0806f67..d792c2a1 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -111,7 +111,7 @@ class SackLockTimeoutError(StorageError): pass -def get_driver(conf, coord=None): +def get_driver(conf, coord): """Return the configured driver.""" return utils.get_driver_class('gnocchi.storage', conf.storage)( conf.storage, coord) @@ -119,14 +119,8 @@ def get_driver(conf, coord=None): class StorageDriver(object): - def __init__(self, conf, coord=None): - self.coord = (coord if coord else - utils.get_coordinator_and_start(conf.coordination_url)) - self.shared_coord = bool(coord) - - def stop(self): - if not self.shared_coord: - self.coord.stop() + def __init__(self, conf, coord): + self.coord = coord @staticmethod def upgrade(): diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index b6daed2e..9c4a881f 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -31,7 +31,6 @@ try: except ImportError: swexc = None from testtools import testcase -from tooz import coordination from gnocchi import archive_policy from gnocchi import exceptions @@ -40,6 +39,7 @@ from gnocchi import indexer from gnocchi import service from gnocchi import storage from gnocchi.tests import utils +from gnocchi import utils as g_utils class SkipNotImplementedMeta(type): @@ -300,22 +300,17 @@ class TestCase(BaseTestCase): self.index = indexer.get_driver(self.conf) + self.coord = g_utils.get_coordinator_and_start( + self.conf.storage.coordination_url) + # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all # their tables in a single transaction even with the # checkfirst=True, so what we do here is we force the upgrade code # path to be sequential to avoid race conditions as the tests run # in parallel. - self.coord = coordination.get_coordinator( - self.conf.storage.coordination_url, - str(uuid.uuid4()).encode('ascii')) - - self.coord.start(start_heart=True) - with self.coord.get_lock(b"gnocchi-tests-db-lock"): self.index.upgrade() - self.coord.stop() - self.archive_policies = self.ARCHIVE_POLICIES.copy() for name, ap in six.iteritems(self.archive_policies): # Create basic archive policies @@ -356,7 +351,7 @@ class TestCase(BaseTestCase): self.conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26], "storage") - self.storage = storage.get_driver(self.conf) + self.storage = storage.get_driver(self.conf, self.coord) self.incoming = incoming.get_driver(self.conf) if self.conf.storage.driver == 'redis': @@ -371,9 +366,13 @@ class TestCase(BaseTestCase): def tearDown(self): self.index.disconnect() - self.storage.stop() super(TestCase, self).tearDown() + @classmethod + def tearDownClass(cls): + cls.coord.stop() + super(TestCase, cls).tearDownClass() + def _create_metric(self, archive_policy_name="low"): """Create a metric and return it""" m = indexer.Metric(uuid.uuid4(), diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index dfb1cf02..30b874b0 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -36,6 +36,7 @@ from gnocchi import service from gnocchi import storage from gnocchi.tests import base from gnocchi.tests import utils +from gnocchi import utils as g_utils # NOTE(chdent): Hack to restore semblance of global configuration to # pass to the WSGI app used per test suite. LOAD_APP_KWARGS are the olso @@ -126,12 +127,15 @@ class ConfigFixture(fixture.GabbiFixture): self.index = index - s = storage.get_driver(conf) + self.coord = g_utils.get_coordinator_and_start( + conf.storage.coordination_url) + s = storage.get_driver(conf, self.coord) s.upgrade() i = incoming.get_driver(conf) i.upgrade(128) LOAD_APP_KWARGS = { + 'coord': self.coord, 'storage': s, 'indexer': index, 'incoming': i, @@ -161,6 +165,9 @@ class ConfigFixture(fixture.GabbiFixture): if self.tmp_dir: shutil.rmtree(self.tmp_dir) + if hasattr(self, 'coord'): + self.coord.stop() + self.conf.reset() if not os.getenv("GNOCCHI_TEST_DEBUG"): self.output.cleanUp() diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 3dfb5c82..837e66b6 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -45,7 +45,7 @@ class TestStorageDriver(tests_base.TestCase): self.metric, __ = self._create_metric() def test_driver_str(self): - driver = storage.get_driver(self.conf) + driver = storage.get_driver(self.conf, None) if isinstance(driver, file.FileStorage): s = driver.basepath @@ -62,7 +62,7 @@ class TestStorageDriver(tests_base.TestCase): driver.__class__.__name__, s)) def test_get_driver(self): - driver = storage.get_driver(self.conf) + driver = storage.get_driver(self.conf, None) self.assertIsInstance(driver, storage.StorageDriver) def test_corrupted_data(self): -- GitLab From 78770f73e2be64f02fe9e3b0181d2bb07b76973a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 18 Oct 2017 18:38:30 +0200 Subject: [PATCH 1042/1483] Move coordinator out of utils This makes sure tooz is not loaded by any piece of code that would like to use gnocchi.utils. --- gnocchi/cli/metricd.py | 10 +++++++++- gnocchi/rest/app.py | 4 ++-- gnocchi/tests/base.py | 4 ++-- gnocchi/tests/functional/fixtures.py | 4 ++-- gnocchi/utils.py | 7 ------- 5 files changed, 15 insertions(+), 14 deletions(-) diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index 4f9d0b14..6e705471 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -15,6 +15,7 @@ # limitations under the License. import threading import time +import uuid import cachetools.func import cotyledon @@ -24,6 +25,7 @@ from oslo_config import cfg import six import tenacity import tooz +from tooz import coordination from gnocchi import exceptions from gnocchi import incoming @@ -43,6 +45,12 @@ _wait_exponential = tenacity.wait_exponential(multiplier=0.5, max=60) retry_on_exception = tenacity.Retrying(wait=_wait_exponential) +def get_coordinator_and_start(url): + coord = coordination.get_coordinator(url, str(uuid.uuid4()).encode()) + coord.start(start_heart=True) + return coord + + class MetricProcessBase(cotyledon.Service): def __init__(self, worker_id, conf, interval_delay=0): super(MetricProcessBase, self).__init__(worker_id) @@ -57,7 +65,7 @@ class MetricProcessBase(cotyledon.Service): self._wake_up.set() def _configure(self): - self.coord = retry_on_exception(utils.get_coordinator_and_start, + self.coord = retry_on_exception(get_coordinator_and_start, self.conf.storage.coordination_url) self.store = retry_on_exception( storage.get_driver, self.conf, self.coord) diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index dc6e8c71..720e516a 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -26,12 +26,12 @@ from pecan import jsonify from stevedore import driver import webob.exc +from gnocchi.cli import metricd from gnocchi import exceptions from gnocchi import incoming as gnocchi_incoming from gnocchi import indexer as gnocchi_indexer from gnocchi import json from gnocchi import storage as gnocchi_storage -from gnocchi import utils LOG = daiquiri.getLogger(__name__) @@ -93,7 +93,7 @@ def load_app(conf, indexer=None, storage=None, incoming=None, coord=None, # NOTE(jd) This coordinator is never stop. I don't think it's a # real problem since the Web app can never really be stopped # anyway, except by quitting it entirely. - coord = utils.get_coordinator_and_start( + coord = metricd.get_coordinator_and_start( conf.storage.coordination_url) storage = gnocchi_storage.get_driver(conf, coord) if not incoming: diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 9c4a881f..ea93d9cd 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -33,13 +33,13 @@ except ImportError: from testtools import testcase from gnocchi import archive_policy +from gnocchi.cli import metricd from gnocchi import exceptions from gnocchi import incoming from gnocchi import indexer from gnocchi import service from gnocchi import storage from gnocchi.tests import utils -from gnocchi import utils as g_utils class SkipNotImplementedMeta(type): @@ -300,7 +300,7 @@ class TestCase(BaseTestCase): self.index = indexer.get_driver(self.conf) - self.coord = g_utils.get_coordinator_and_start( + self.coord = metricd.get_coordinator_and_start( self.conf.storage.coordination_url) # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 30b874b0..9d7f8446 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -28,6 +28,7 @@ from oslo_config import cfg from oslo_middleware import cors import sqlalchemy_utils +from gnocchi.cli import metricd from gnocchi import incoming from gnocchi import indexer from gnocchi.indexer import sqlalchemy @@ -36,7 +37,6 @@ from gnocchi import service from gnocchi import storage from gnocchi.tests import base from gnocchi.tests import utils -from gnocchi import utils as g_utils # NOTE(chdent): Hack to restore semblance of global configuration to # pass to the WSGI app used per test suite. LOAD_APP_KWARGS are the olso @@ -127,7 +127,7 @@ class ConfigFixture(fixture.GabbiFixture): self.index = index - self.coord = g_utils.get_coordinator_and_start( + self.coord = metricd.get_coordinator_and_start( conf.storage.coordination_url) s = storage.get_driver(conf, self.coord) s.upgrade() diff --git a/gnocchi/utils.py b/gnocchi/utils.py index c86b7889..d4da8b16 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -30,7 +30,6 @@ import numpy import pytimeparse import six from stevedore import driver -from tooz import coordination LOG = daiquiri.getLogger(__name__) @@ -71,12 +70,6 @@ def UUID(value): raise ValueError(e) -def get_coordinator_and_start(url): - coord = coordination.get_coordinator(url, str(uuid.uuid4()).encode()) - coord.start(start_heart=True) - return coord - - unix_universal_start64 = numpy.datetime64("1970") -- GitLab From 5fae6d6322c4297a836d535a0b8f3928cca16e42 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 18 Oct 2017 11:09:11 +0200 Subject: [PATCH 1043/1483] incoming: use utils.parallel_map rather than futures --- gnocchi/incoming/__init__.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/gnocchi/incoming/__init__.py b/gnocchi/incoming/__init__.py index 61088a25..0e76a2b1 100644 --- a/gnocchi/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -15,7 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. import collections -from concurrent import futures import daiquiri import numpy @@ -27,8 +26,6 @@ from gnocchi import utils LOG = daiquiri.getLogger(__name__) -_NUM_WORKERS = utils.get_default_workers() - Measure = collections.namedtuple("Measure", ['timestamp', 'value']) @@ -124,12 +121,11 @@ class IncomingDriver(object): :param metrics_and_measures: A dict where keys are metrics and value are measure. """ - with futures.ThreadPoolExecutor(max_workers=_NUM_WORKERS) as executor: - list(executor.map( - lambda args: self._store_new_measures(*args), - ((metric, self._encode_measures(measures)) - for metric, measures - in six.iteritems(metrics_and_measures)))) + utils.parallel_map( + self._store_new_measures, + ((metric, self._encode_measures(measures)) + for metric, measures + in six.iteritems(metrics_and_measures))) @staticmethod def _store_new_measures(metric, data): -- GitLab From 1e9dd7b756b9b6fbddd549ef018429c90163c537 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 26 Oct 2017 06:30:26 +0200 Subject: [PATCH 1044/1483] Re-add debian/pike branch. --- debian/changelog | 67 +++++-- debian/compat | 2 +- debian/control | 297 ++++++++++++++++-------------- debian/copyright | 2 +- debian/gnocchi-api.postinst.in | 0 debian/gnocchi-common.install | 2 +- debian/gnocchi-common.postinst.in | 0 debian/gnocchi-common.postrm | 0 debian/po/de.po | 151 +++------------ debian/po/it.po | 45 ++--- debian/rules | 4 - debian/source/options | 1 + 12 files changed, 254 insertions(+), 317 deletions(-) mode change 100755 => 100644 debian/gnocchi-api.postinst.in mode change 100755 => 100644 debian/gnocchi-common.postinst.in mode change 100755 => 100644 debian/gnocchi-common.postrm diff --git a/debian/changelog b/debian/changelog index 764f01b4..b97e7f98 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,17 +1,56 @@ -gnocchi (2.0.2-8) UNRELEASED; urgency=medium - - * Updating vcs fields. - * Updating copyright format url. - * Updating maintainer field. - * Running wrap-and-sort -bast. - * Updating standards version to 4.0.0. - * Removing gbp.conf, not used anymore or should be specified in the - developers dotfiles. - * Correcting permissions in debian packaging files. - * Updating standards version to 4.0.1. - * Updating standards version to 4.1.0. - - -- Daniel Baumann Fri, 04 Aug 2017 21:59:18 +0200 +gnocchi (3.0.4-4) unstable; urgency=medium + + * German debconf translation update (Closes: #842481). + + -- Thomas Goirand Mon, 03 Apr 2017 18:13:32 +0200 + +gnocchi (3.0.4-3) unstable; urgency=medium + + * Team upload. + * Revert net-tools dependency. + * Bump build dependency on openstack-pkg-tools (Closes: #858697). + + -- David Rabel Sat, 01 Apr 2017 11:35:30 +0200 + +gnocchi (3.0.4-2) unstable; urgency=medium + + * Team upload. + * Add missing dependency net-tools (Closes: #858697) + + -- David Rabel Sat, 25 Mar 2017 12:06:03 +0100 + +gnocchi (3.0.4-1) unstable; urgency=medium + + * Team upload. + + [ Ondřej Nový ] + * Bumped debhelper compat version to 10 + * Added lsb-base to depends + + [ Ondřej Kobližek ] + * New upstream release (Closes: #852991) + + -- Ondřej Kobližek Thu, 02 Feb 2017 14:44:31 +0100 + +gnocchi (3.0.0-2) unstable; urgency=medium + + [ Ondřej Nový ] + * d/s/options: extend-diff-ignore of .gitreview + + [ Thomas Goirand ] + * Uploading to unstable. + * Debconf translation: + - it (Closes: #839198). + + -- Thomas Goirand Tue, 04 Oct 2016 09:14:26 +0200 + +gnocchi (3.0.0-1) experimental; urgency=medium + + * New upstream release. + * Fixed (build-)depends for this release. + * Using OpenStack's Gerrit as VCS URLs. + + -- Thomas Goirand Fri, 23 Sep 2016 16:38:32 +0200 gnocchi (2.0.2-7) unstable; urgency=medium diff --git a/debian/compat b/debian/compat index ec635144..f599e28b 100644 --- a/debian/compat +++ b/debian/compat @@ -1 +1 @@ -9 +10 diff --git a/debian/control b/debian/control index 70a83854..9c398ef2 100644 --- a/debian/control +++ b/debian/control @@ -1,105 +1,141 @@ Source: gnocchi Section: net Priority: optional -Maintainer: Debian OpenStack -Uploaders: - Thomas Goirand , -Build-Depends: - debhelper (>= 9), - dh-python, - openstack-pkg-tools (>= 40~), - python-all, - python-pbr, - python-setuptools, - python-sphinx, -Build-Depends-Indep: - alembic (>= 0.7.6), - libpq-dev, - postgresql, - postgresql-server-dev-all, - python-concurrent.futures (>= 2.1.6), - python-coverage (>= 3.6), - python-doc8, - python-fixtures, - python-future (>= 0.15), - python-gabbi (>= 1), - python-jsonpatch (>= 1.9), - python-keystoneclient (>= 1:1.6.0), - python-keystonemiddleware (>= 4.0.0), - python-lz4, - python-mock, - python-msgpack, - python-mysqldb, - python-numpy, - python-oslo.config (>= 1:2.6.0), - python-oslo.db (>= 1.8.0), - python-oslo.log (>= 1.0.0), - python-oslo.middleware, - python-oslo.policy (>= 0.3.0), - python-oslo.serialization (>= 1.4.0), - python-oslo.utils (>= 1.6.0), - python-oslosphinx (>= 2.2.0.0), - python-oslotest, - python-pandas (>= 0.17), - python-paste, - python-pastedeploy, - python-pecan (>= 0.9), - python-prettytable, - python-psycopg2, - python-pymysql, - python-pytimeparse (>= 1.1.5), - python-requests, - python-retrying, - python-six, - python-sphinx-bootstrap-theme, - python-sphinxcontrib.httpdomain, - python-sqlalchemy, - python-sqlalchemy-utils, - python-stevedore, - python-swiftclient (>= 2.5.0), - python-sysv-ipc, - python-tempest-lib (>= 0.2.0), - python-testscenarios, - python-testtools (>= 0.9.38), - python-tooz (>= 1.34), - python-trollius, - python-voluptuous, - python-webob (>= 1.4.1), - python-webtest (>= 2.0.16), - python-werkzeug, - python-yaml, - subunit (>= 0.0.18), - testrepository, -Standards-Version: 4.1.0 -Vcs-Browser: https://anonscm.debian.org/cgit/openstack/services/gnocchi.git -Vcs-Git: https://anonscm.debian.org/git/openstack/services/gnocchi.git +Maintainer: PKG OpenStack +Uploaders: Thomas Goirand , +Build-Depends: debhelper (>= 10), + dh-python, + openstack-pkg-tools (>= 54~), + python-all, + python-pbr, + python-setuptools, + python-sphinx, +Build-Depends-Indep: alembic (>= 0.7.6), + libpq-dev, + postgresql, + postgresql-server-dev-all, + python-concurrent.futures (>= 2.1.6), + python-cotyledon (>= 1.2.2), + python-coverage (>= 3.6), + python-doc8, + python-fixtures, + python-future (>= 0.15), + python-gabbi (>= 1.21), + python-iso8601, + python-jsonpatch (>= 1.9), + python-keystoneclient (>= 1:1.6.0), + python-keystonemiddleware (>= 4.0.0), + python-lz4, + python-mock, + python-msgpack, + python-mysqldb, + python-numpy, + python-os-testr, + python-oslo.config (>= 1:2.6.0), + python-oslo.db (>= 4.8.0), + python-oslo.log (>= 2.3.0), + python-oslo.middleware (>= 3.11.0), + python-oslo.policy (>= 0.3.0), + python-oslo.serialization (>= 1.4.0), + python-oslo.utils (>= 3.3.0), + python-oslosphinx (>= 2.2.0.0), + python-oslotest, + python-pandas (>= 0.17), + python-paste, + python-pastedeploy, + python-pecan (>= 0.9), + python-prettytable, + python-psycopg2, + python-pymysql, + python-pytimeparse (>= 1.1.5), + python-requests, + python-retrying, + python-six, + python-sphinx-bootstrap-theme, + python-sphinxcontrib.httpdomain, + python-sqlalchemy, + python-sqlalchemy-utils, + python-stevedore, + python-swiftclient (>= 3.1.0), + python-sysv-ipc, + python-testscenarios, + python-testtools (>= 0.9.38), + python-tooz (>= 1.38), + python-trollius, + python-voluptuous, + python-webob (>= 1.4.1), + python-webtest (>= 2.0.16), + python-werkzeug, + python-yaml, + subunit (>= 0.0.18), + testrepository, +Standards-Version: 3.9.8 +Vcs-Browser: https://git.openstack.org/cgit/openstack/deb-gnocchi +Vcs-Git: https://git.openstack.org/openstack/deb-gnocchi -b debian/newton Homepage: https://github.com/openstack/gnocchi -Package: gnocchi-api +Package: python-gnocchi +Section: python Architecture: all -Depends: - adduser, - gnocchi-common (= ${binary:Version}), - python-openstackclient, - q-text-as-data, - ${misc:Depends}, - ${python:Depends}, -Description: Metric as a Service - API daemon +Depends: alembic (>= 0.7.6), + python-concurrent.futures (>= 2.1.6), + python-cotyledon (>= 1.2.2), + python-future (>= 0.15), + python-iso8601, + python-jsonpatch (>= 1.9), + python-keystoneclient (>= 1:1.6.0), + python-keystonemiddleware (>= 4.0.0), + python-lz4, + python-msgpack, + python-numpy, + python-oslo.config (>= 1:2.6.0), + python-oslo.db (>= 4.8.0), + python-oslo.log (>= 2.3.0), + python-oslo.middleware (>= 3.11.0), + python-oslo.policy (>= 0.3.0), + python-oslo.serialization (>= 1.4.0), + python-oslo.utils (>= 3.3.0), + python-oslosphinx (>= 2.2.0.0), + python-pandas (>= 0.17), + python-paste, + python-pastedeploy, + python-pbr, + python-pecan (>= 0.9), + python-prettytable, + python-psycopg2, + python-pymysql, + python-pytimeparse (>= 1.1.5), + python-requests, + python-retrying, + python-six, + python-sqlalchemy, + python-sqlalchemy-utils, + python-stevedore, + python-swiftclient (>= 3.1.0), + python-tooz (>= 1.38), + python-trollius, + python-voluptuous, + python-webob (>= 1.4.1), + python-werkzeug, + python-yaml, + ${misc:Depends}, + ${python:Depends}, +Suggests: python-gnocchi-doc, +Description: Metric as a Service - Python 2.x Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an HTTP REST API. . - This package contains the API server. + This package contains the Python 2.x module. Package: gnocchi-common Architecture: all -Depends: - adduser, - dbconfig-common, - debconf, - python-gnocchi (= ${binary:Version}), - ${misc:Depends}, - ${python:Depends}, +Depends: adduser, + dbconfig-common, + debconf, + python-gnocchi (= ${binary:Version}), + ${misc:Depends}, + ${python:Depends}, Description: Metric as a Service - common files Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an @@ -107,68 +143,43 @@ Description: Metric as a Service - common files . This package contains the common files. -Package: gnocchi-metricd +Package: gnocchi-api Architecture: all -Depends: - gnocchi-common (= ${binary:Version}), - ${misc:Depends}, - ${python:Depends}, -Description: Metric as a Service - metric daemon +Depends: adduser, + gnocchi-common (= ${binary:Version}), + python-openstackclient, + lsb-base, + q-text-as-data, + ${misc:Depends}, + ${python:Depends}, +Description: Metric as a Service - API daemon Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an HTTP REST API. . - This package contains the metric daemon. + This package contains the API server. -Package: python-gnocchi -Section: python +Package: gnocchi-metricd Architecture: all -Depends: - alembic (>= 0.7.6), - python-concurrent.futures (>= 2.1.6), - python-future (>= 0.15), - python-jsonpatch (>= 1.9), - python-keystoneclient (>= 1:1.6.0), - python-keystonemiddleware (>= 4.0.0), - python-lz4, - python-msgpack, - python-numpy, - python-oslo.config (>= 1:2.6.0), - python-oslo.db (>= 1.8.0), - python-oslo.log (>= 1.0.0), - python-oslo.middleware, - python-oslo.policy (>= 0.3.0), - python-oslo.serialization (>= 1.4.0), - python-oslo.utils (>= 1.6.0), - python-oslosphinx (>= 2.2.0.0), - python-pandas (>= 0.17), - python-paste, - python-pastedeploy, - python-pecan (>= 0.9), - python-prettytable, - python-psycopg2, - python-pymysql, - python-pytimeparse (>= 1.1.5), - python-requests, - python-retrying, - python-six, - python-sqlalchemy, - python-sqlalchemy-utils, - python-stevedore, - python-swiftclient (>= 2.5.0), - python-tooz (>= 1.34), - python-trollius, - python-voluptuous, - python-webob (>= 1.4.1), - python-werkzeug, - python-yaml, - ${misc:Depends}, - ${python:Depends}, -Suggests: - python-gnocchi-doc, -Description: Metric as a Service - Python 2.x +Depends: gnocchi-common (= ${binary:Version}), + lsb-base, + ${misc:Depends}, + ${python:Depends}, +Description: Metric as a Service - metric daemon Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an HTTP REST API. . - This package contains the Python 2.x module. + This package contains the metric daemon. + +#Package: gnocchi-doc +#Section: doc +#Architecture: all +#Depends: ${misc:Depends}, +# ${sphinxdoc:Depends}, +#Description: Metric as a Service - doc +# Gnocchi is a service for managing a set of resources and storing metrics about +# them, in a scalable and resilient way. Its functionalities are exposed over an +# HTTP REST API. +# . +# This package contains the documentation. diff --git a/debian/copyright b/debian/copyright index 3deff357..5d11d0d6 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,4 +1,4 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: gnocchi Upstream-Contact: Julien Danjou Source: https://github.com/openstack/gnocchi diff --git a/debian/gnocchi-api.postinst.in b/debian/gnocchi-api.postinst.in old mode 100755 new mode 100644 diff --git a/debian/gnocchi-common.install b/debian/gnocchi-common.install index 10da746c..339258a5 100644 --- a/debian/gnocchi-common.install +++ b/debian/gnocchi-common.install @@ -1,3 +1,3 @@ -etc/gnocchi/api-paste.ini /usr/share/gnocchi-common etc/gnocchi/policy.json /usr/share/gnocchi-common +etc/gnocchi/api-paste.ini /usr/share/gnocchi-common gnocchi/rest/app.wsgi /usr/share/gnocchi-common diff --git a/debian/gnocchi-common.postinst.in b/debian/gnocchi-common.postinst.in old mode 100755 new mode 100644 diff --git a/debian/gnocchi-common.postrm b/debian/gnocchi-common.postrm old mode 100755 new mode 100644 diff --git a/debian/po/de.po b/debian/po/de.po index 0d4a16fd..77e37f0d 100644 --- a/debian/po/de.po +++ b/debian/po/de.po @@ -1,14 +1,14 @@ -# German debconf translation of glance. -# This file is distributed under the same license as the glance package. +# German debconf translation of gnocchi. +# This file is distributed under the same license as the gnocchi package. # Copyright (C) 2010 United States Government,2010-2011 OpenStack LLC. -# Copyright (C) of this file 2012-2014 Chris Leick . +# Copyright (C) of this file 2012-2016 Chris Leick . # msgid "" msgstr "" -"Project-Id-Version: glance 2013.2.1-1\n" +"Project-Id-Version: gnocchi 3.0.0-2\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" "POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2014-01-09 22:51+0100\n" +"PO-Revision-Date: 2016-10-29 18:15+0100\n" "Last-Translator: Chris Leick \n" "Language-Team: German \n" "Language: de\n" @@ -25,17 +25,12 @@ msgstr "Rechnername des Authentifizierungsservers:" #. Type: string #. Description #: ../gnocchi-common.templates:2001 -#, fuzzy -#| msgid "" -#| "Please specify the hostname of the authentication server for Glance. " -#| "Typically this is also the hostname of the OpenStack Identity Service " -#| "(Keystone)." msgid "" "Please specify the hostname of the authentication server for Gnocchi. " "Typically this is also the hostname of the OpenStack Identity Service " "(Keystone)." msgstr "" -"Bitte geben Sie den Rechnernamen des Glance-Authentifizierungsservers an. " +"Bitte geben Sie den Rechnernamen des Gnocci-Authentifizierungsservers an. " "Typischerweise ist das gleichzeitig der Rechnername Ihres OpenStack-" "Identitätsdienstes (Keystone)." @@ -96,26 +91,19 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "Set up a database for Glance?" msgid "Set up a database for Gnocchi?" -msgstr "Eine Datenbank für Glance einrichten?" +msgstr "Eine Datenbank für Gnocci einrichten?" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "No database has been set up for glance-registry or glance-api to use. " -#| "Before continuing, you should make sure you have the following " -#| "information:" msgid "" "No database has been set up for Gnocchi to use. Before continuing, you " "should make sure you have the following information:" msgstr "" -"Es wurde keine Datenbank für die Benutzung mit der Glance-Registry oder das " -"Glance-API eingerichtet. Bevor Sie fortfahren, sollten Sie sicherstellen, " -"dass Sie die folgenden Informationen haben:" +"Es wurde keine Datenbank für die Benutzung mit Gnocci eingerichtet. Bevor Sie " +"fortfahren, sollten Sie sicherstellen, dass Sie die folgenden Informationen " +"haben:" #. Type: boolean #. Description @@ -146,24 +134,18 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "You can change this setting later on by running \"dpkg-reconfigure -plow " -#| "glance-common\"." msgid "" "You can change this setting later on by running \"dpkg-reconfigure -plow " "gnocchi-common\"." msgstr "" "Sie können diese Einstellung später ändern, indem Sie »dpkg-reconfigure -" -"plow glance-common« ausführen." +"plow gnocci-common« ausführen." #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 -#, fuzzy -#| msgid "Register Glance in the Keystone endpoint catalog?" msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Glance im Keystone-Endpunktkatalog registrieren?" +msgstr "Gnocci im Keystone-Endpunktkatalog registrieren?" #. Type: boolean #. Description @@ -180,18 +162,15 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 -#, fuzzy -#| msgid "" -#| "Note that you will need to have an up and running Keystone server on " -#| "which to connect using the Keystone authentication token." msgid "" "Note that you will need to have an up and running Keystone server on which " "to connect using a known admin project name, admin username and password. " "The admin auth token is not used anymore." msgstr "" -"Beachten Sie, dass Sie einen gestarteten und laufenden Keystone-Server haben " -"müssen, mit dem Sie sich anhand des Keystone-Authentifizierungs-Tokens " -"verbinden." +"Beachten Sie, dass Sie einen konfigurierten und laufenden Keystone-Server " +"haben müssen, mit dem Sie sich anhand eines bekannten " +"Administratorprojektnamens, Administratorbenutzernamens und Passworts " +"verbinden. Das Administratorauthentifizierungs-Token wird nicht mehr benutzt." #. Type: string #. Description @@ -202,25 +181,19 @@ msgstr "IP-Adresse des Keystone-Servers:" #. Type: string #. Description #: ../gnocchi-api.templates:3001 -#, fuzzy -#| msgid "" -#| "Please enter the IP address of the Keystone server, so that glance-api " -#| "can contact Keystone to do the Glance service and endpoint creation." msgid "" "Please enter the IP address of the Keystone server, so that gnocchi-api can " "contact Keystone to do the Gnocchi service and endpoint creation." msgstr "" -"Bitte geben Sie die IP-Adresse des Keystone-Servers an, so dass Glance-API " -"Keystone kontaktieren kann, um den Glance-Dienst und den Endpunkt zu " +"Bitte geben Sie die IP-Adresse des Keystone-Servers an, so dass Gnocci-API " +"Keystone kontaktieren kann, um den Gnocci-Dienst und den Endpunkt zu " "erstellen." #. Type: string #. Description #: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "Keystone authentication token:" msgid "Keystone admin name:" -msgstr "Keystone-Authentifizierungs-Token:" +msgstr "Keystone-Administratorname:" #. Type: string #. Description @@ -234,35 +207,34 @@ msgid "" "To register the service endpoint, this package needs to know the Admin " "login, name, project name, and password to the Keystone server." msgstr "" +"Um den Dienstendpunkt zu registrieren, muss dieses Paket den " +"Administratoranmeldenamen, Namen, Projektnamen und das Passwort für den " +"Keystone-Server kennen." #. Type: string #. Description #: ../gnocchi-api.templates:5001 msgid "Keystone admin project name:" -msgstr "" +msgstr "Keystone-Administratorprojektname:" #. Type: password #. Description #: ../gnocchi-api.templates:6001 msgid "Keystone admin password:" -msgstr "" +msgstr "Keystone-Administratorpasswort:" #. Type: string #. Description #: ../gnocchi-api.templates:7001 -#, fuzzy -#| msgid "Glance endpoint IP address:" msgid "Gnocchi endpoint IP address:" -msgstr "IP-Adresse des Glance-Endpunkts" +msgstr "IP-Adresse des Gnocci-Endpunkts" #. Type: string #. Description #: ../gnocchi-api.templates:7001 -#, fuzzy -#| msgid "Please enter the IP address that will be used to contact Glance." msgid "Please enter the IP address that will be used to contact Gnocchi." msgstr "" -"Bitte geben Sie die IP-Adresse ein, die zum Kontaktieren von Glance benutzt " +"Bitte geben Sie die IP-Adresse ein, die zum Kontaktieren von Gnocci benutzt " "wird." #. Type: string @@ -294,74 +266,3 @@ msgstr "" "OpenStack unterstützt die Verwendung von Verfügbarkeitszonen, bei der jede " "Region einen Ort repräsentiert. Bitte geben Sie die Zone, die Sie benutzen " "möchten, bei der Registrierung des Endpunkts an." - -#, fuzzy -#~| msgid "" -#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#~| "authentication token." -#~ msgid "" -#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -#~ "authentication token." -#~ msgstr "" -#~ "Glance-API benötigt das Keystone-Authentifizierungs-Token, um seinen " -#~ "Endpunkt in Keystone zu konfigurieren." - -#~ msgid "keystone" -#~ msgstr "Keystone" - -#~ msgid "caching" -#~ msgstr "Zwischenspeichern" - -#~ msgid "keystone+caching" -#~ msgstr "Keystone+Zwischenspeichern" - -#~ msgid "cachemanagement" -#~ msgstr "Zwischenspeicherverwaltung" - -#~ msgid "keystone+cachemanagement" -#~ msgstr "Keystone+Zwischenspeicherverwaltung" - -#~ msgid "Pipeline flavor:" -#~ msgstr "Pipeline-Variante:" - -#~ msgid "Please specify the flavor of the pipeline to be used by Glance." -#~ msgstr "" -#~ "Bitte geben Sie die Variante der von Glance zu benutzenden Pipeline an." - -#~ msgid "" -#~ "If you use the OpenStack Identity Service (Keystone), you might want to " -#~ "select \"keystone\". If you don't use this service, you can safely choose " -#~ "\"caching\" only." -#~ msgstr "" -#~ "Falls Sie den OpenStack-Identitätsdienst (Keystone) verwenden, möchten " -#~ "Sie möglicherweise »Keystone« auswählen. Falls Sie diesen Dienst nicht " -#~ "nutzen, können Sie problemlos »Zwischenspeichern« auswählen." - -#~ msgid "IP address of your RabbitMQ host:" -#~ msgstr "IP-Adresse Ihres RabbitMQ-Rechners:" - -#~ msgid "" -#~ "In order to interoperate with other components of OpenStack, this package " -#~ "needs to connect to a central RabbitMQ server." -#~ msgstr "" -#~ "Um mit weiteren Bestandteilen von OpenStack zusammenzuarbeiten, muss sich " -#~ "dieses Paket mit einem zentralen RabbitMQ-Server verbinden." - -#~ msgid "Please specify the IP address of that server." -#~ msgstr "Bitte geben Sie die IP-Adresse dieses Servers an." - -#~ msgid "Username for connection to the RabbitMQ server:" -#~ msgstr "Benutzername für die Verbindung mit dem RabbitMQ-Server:" - -#~ msgid "Please specify the username used to connect to the RabbitMQ server." -#~ msgstr "" -#~ "Bitte geben Sie den Benutzernamen ein, den Sie zum Verbinden mit dem " -#~ "RabbitMQ-Server verwenden." - -#~ msgid "Password for connection to the RabbitMQ server:" -#~ msgstr "Passwort für die Verbindung mit dem RabbitMQ-Server:" - -#~ msgid "Please specify the password used to connect to the RabbitMQ server." -#~ msgstr "" -#~ "Bitte geben Sie das Passwort ein, das Sie zum Verbinden mit dem RabbitMQ-" -#~ "Server verwenden." diff --git a/debian/po/it.po b/debian/po/it.po index 06e65766..7055f7ce 100644 --- a/debian/po/it.po +++ b/debian/po/it.po @@ -1,13 +1,13 @@ -# Italian description of glance debconf messages. -# Copyright (C) 2012, glance package copyright holder. -# This file is distributed under the same license as the glance package. -# Beatrice Torracca , 2012, 2013, 2014. +# Italian description of gnocchi debconf messages. +# Copyright (C) 2016, gnocchi package copyright holder. +# This file is distributed under the same license as the gnocchi package. +# Beatrice Torracca , 2012, 2013, 2014, 2016. msgid "" msgstr "" "Project-Id-Version: glance\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" "POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2014-04-21 10:03+0200\n" +"PO-Revision-Date: 2016-09-30 07:28+0200\n" "Last-Translator: Beatrice Torracca \n" "Language-Team: Italian \n" "Language: it\n" @@ -94,7 +94,6 @@ msgstr "Impostare un database per Gnocchi?" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy #| msgid "" #| "No database has been set up for glance-registry or glance-api to use. " #| "Before continuing, you should make sure you have the following " @@ -103,9 +102,8 @@ msgid "" "No database has been set up for Gnocchi to use. Before continuing, you " "should make sure you have the following information:" msgstr "" -"Non è stato impostato alcun database per essere usato da glance-registry o " -"glance-api. Prima di continuare assicurarsi di avere le seguenti " -"informazioni:" +"Non è stato impostato alcun database per l'uso da parte di Gnocchi. Prima di " +"continuare, assicurarsi di avere le seguenti informazioni:" #. Type: boolean #. Description @@ -135,7 +133,6 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy #| msgid "" #| "You can change this setting later on by running \"dpkg-reconfigure -plow " #| "glance-common\"." @@ -144,7 +141,7 @@ msgid "" "gnocchi-common\"." msgstr "" "È possibile cambiare questa impostazione successivamente eseguendo «dpkg-" -"reconfigure -plow glance-common»." +"reconfigure -plow gnocchi-common»." #. Type: boolean #. Description @@ -167,7 +164,6 @@ msgstr "" #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 -#, fuzzy #| msgid "" #| "Note that you will need to have an up and running Keystone server on " #| "which to connect using the Keystone authentication token." @@ -177,7 +173,9 @@ msgid "" "The admin auth token is not used anymore." msgstr "" "Notare che sarà necessario avere un server Keystone in funzione a cui " -"connettersi usando il token di autenticazione Keystone." +"connettersi usando un nome di progetto di amministrazione conosciuto, un " +"nome utente e password di amministratore. Il token di autenticazione di " +"amministratore non è più usato." #. Type: string #. Description @@ -199,10 +197,9 @@ msgstr "" #. Type: string #. Description #: ../gnocchi-api.templates:4001 -#, fuzzy #| msgid "Keystone authentication token:" msgid "Keystone admin name:" -msgstr "Token di autenticazione Keystone:" +msgstr "Nome amministratore Keystone:" #. Type: string #. Description @@ -216,18 +213,21 @@ msgid "" "To register the service endpoint, this package needs to know the Admin " "login, name, project name, and password to the Keystone server." msgstr "" +"Per registrare il punto terminale del servizio questo pacchetto deve " +"conoscere il login, il nome, il nome del progetto e la password " +"dell'amministratore per il server Keystone." #. Type: string #. Description #: ../gnocchi-api.templates:5001 msgid "Keystone admin project name:" -msgstr "" +msgstr "Nome del progetto di amministrazione Keystone:" #. Type: password #. Description #: ../gnocchi-api.templates:6001 msgid "Keystone admin password:" -msgstr "" +msgstr "Password amministratore Keystone:" #. Type: string #. Description @@ -270,14 +270,3 @@ msgstr "" "OpenStack gestisce le zone di disponibilità, con ogni regione che " "rappresenta una posizione. Inserire la zona che si desidera usare durante la " "registrazione del punto terminale." - -#, fuzzy -#~| msgid "" -#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#~| "authentication token." -#~ msgid "" -#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -#~ "authentication token." -#~ msgstr "" -#~ "Per configurare il proprio punto terminale in Keystone, glance-api ha " -#~ "bisogno del token di autenticazione Keystone." diff --git a/debian/rules b/debian/rules index 082d2b44..39739adf 100755 --- a/debian/rules +++ b/debian/rules @@ -1,11 +1,7 @@ #!/usr/bin/make -f -PYTHONS:=$(shell pyversions -vr) -#PYTHON3S:=$(shell py3versions -vr) - include /usr/share/openstack-pkg-tools/pkgos.make -export OSLO_PACKAGE_VERSION=$(VERSION) UNIT_TEST_BLACKLIST = test_carbonara.CarbonaraCmd.* %: diff --git a/debian/source/options b/debian/source/options index cb61fa52..91222451 100644 --- a/debian/source/options +++ b/debian/source/options @@ -1 +1,2 @@ extend-diff-ignore = "^[^/]*[.]egg-info/" +extend-diff-ignore = "^[.]gitreview$" -- GitLab From 9d380a0365b488bffb8c836053337866a6b21dcb Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 26 Oct 2017 06:34:19 +0200 Subject: [PATCH 1045/1483] Updating vcs fields. --- debian/changelog | 6 ++++++ debian/control | 4 ++-- debian/rules | 1 + 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index b97e7f98..e9901d61 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (3.0.4-5) UNRELEASED; urgency=medium + + * Updating vcs fields. + + -- Thomas Goirand Thu, 26 Oct 2017 06:33:31 +0200 + gnocchi (3.0.4-4) unstable; urgency=medium * German debconf translation update (Closes: #842481). diff --git a/debian/control b/debian/control index 9c398ef2..1c5cd9fb 100644 --- a/debian/control +++ b/debian/control @@ -70,8 +70,8 @@ Build-Depends-Indep: alembic (>= 0.7.6), subunit (>= 0.0.18), testrepository, Standards-Version: 3.9.8 -Vcs-Browser: https://git.openstack.org/cgit/openstack/deb-gnocchi -Vcs-Git: https://git.openstack.org/openstack/deb-gnocchi -b debian/newton +Vcs-Browser: https://anonscm.debian.org/cgit/openstack/services/gnocchi.git +Vcs-Git: https://anonscm.debian.org/git/openstack/services/gnocchi.git Homepage: https://github.com/openstack/gnocchi Package: python-gnocchi diff --git a/debian/rules b/debian/rules index 39739adf..bb8e5243 100755 --- a/debian/rules +++ b/debian/rules @@ -1,5 +1,6 @@ #!/usr/bin/make -f +UPSTREAM_GIT:=https://github.com/gnocchixyz/gnocchi include /usr/share/openstack-pkg-tools/pkgos.make UNIT_TEST_BLACKLIST = test_carbonara.CarbonaraCmd.* -- GitLab From 9950743ff88af98a1685745850d2b25eb6b2cb50 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 26 Oct 2017 06:34:41 +0200 Subject: [PATCH 1046/1483] Updating copyright format url. --- debian/changelog | 1 + debian/copyright | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index e9901d61..944b4444 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,6 +1,7 @@ gnocchi (3.0.4-5) UNRELEASED; urgency=medium * Updating vcs fields. + * Updating copyright format url. -- Thomas Goirand Thu, 26 Oct 2017 06:33:31 +0200 diff --git a/debian/copyright b/debian/copyright index 5d11d0d6..3deff357 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,4 +1,4 @@ -Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: gnocchi Upstream-Contact: Julien Danjou Source: https://github.com/openstack/gnocchi -- GitLab From 7be0f142b7ff0d9e4b5f32e97f98350cde8373d3 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 26 Oct 2017 06:35:07 +0200 Subject: [PATCH 1047/1483] Updating maintainer field. --- debian/changelog | 1 + debian/control | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 944b4444..d851a1f7 100644 --- a/debian/changelog +++ b/debian/changelog @@ -2,6 +2,7 @@ gnocchi (3.0.4-5) UNRELEASED; urgency=medium * Updating vcs fields. * Updating copyright format url. + * Updating maintainer field. -- Thomas Goirand Thu, 26 Oct 2017 06:33:31 +0200 diff --git a/debian/control b/debian/control index 1c5cd9fb..e4a975bb 100644 --- a/debian/control +++ b/debian/control @@ -1,7 +1,7 @@ Source: gnocchi Section: net Priority: optional -Maintainer: PKG OpenStack +Maintainer: Debian OpenStack Uploaders: Thomas Goirand , Build-Depends: debhelper (>= 10), dh-python, -- GitLab From afd55bff46283a650d9e743a383065268d400a56 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 26 Oct 2017 06:35:25 +0200 Subject: [PATCH 1048/1483] Running wrap-and-sort -bast. --- debian/changelog | 1 + debian/control | 296 +++++++++++++++++----------------- debian/gnocchi-common.install | 2 +- 3 files changed, 148 insertions(+), 151 deletions(-) diff --git a/debian/changelog b/debian/changelog index d851a1f7..c411f6c6 100644 --- a/debian/changelog +++ b/debian/changelog @@ -3,6 +3,7 @@ gnocchi (3.0.4-5) UNRELEASED; urgency=medium * Updating vcs fields. * Updating copyright format url. * Updating maintainer field. + * Running wrap-and-sort -bast. -- Thomas Goirand Thu, 26 Oct 2017 06:33:31 +0200 diff --git a/debian/control b/debian/control index e4a975bb..ea159808 100644 --- a/debian/control +++ b/debian/control @@ -2,140 +2,107 @@ Source: gnocchi Section: net Priority: optional Maintainer: Debian OpenStack -Uploaders: Thomas Goirand , -Build-Depends: debhelper (>= 10), - dh-python, - openstack-pkg-tools (>= 54~), - python-all, - python-pbr, - python-setuptools, - python-sphinx, -Build-Depends-Indep: alembic (>= 0.7.6), - libpq-dev, - postgresql, - postgresql-server-dev-all, - python-concurrent.futures (>= 2.1.6), - python-cotyledon (>= 1.2.2), - python-coverage (>= 3.6), - python-doc8, - python-fixtures, - python-future (>= 0.15), - python-gabbi (>= 1.21), - python-iso8601, - python-jsonpatch (>= 1.9), - python-keystoneclient (>= 1:1.6.0), - python-keystonemiddleware (>= 4.0.0), - python-lz4, - python-mock, - python-msgpack, - python-mysqldb, - python-numpy, - python-os-testr, - python-oslo.config (>= 1:2.6.0), - python-oslo.db (>= 4.8.0), - python-oslo.log (>= 2.3.0), - python-oslo.middleware (>= 3.11.0), - python-oslo.policy (>= 0.3.0), - python-oslo.serialization (>= 1.4.0), - python-oslo.utils (>= 3.3.0), - python-oslosphinx (>= 2.2.0.0), - python-oslotest, - python-pandas (>= 0.17), - python-paste, - python-pastedeploy, - python-pecan (>= 0.9), - python-prettytable, - python-psycopg2, - python-pymysql, - python-pytimeparse (>= 1.1.5), - python-requests, - python-retrying, - python-six, - python-sphinx-bootstrap-theme, - python-sphinxcontrib.httpdomain, - python-sqlalchemy, - python-sqlalchemy-utils, - python-stevedore, - python-swiftclient (>= 3.1.0), - python-sysv-ipc, - python-testscenarios, - python-testtools (>= 0.9.38), - python-tooz (>= 1.38), - python-trollius, - python-voluptuous, - python-webob (>= 1.4.1), - python-webtest (>= 2.0.16), - python-werkzeug, - python-yaml, - subunit (>= 0.0.18), - testrepository, +Uploaders: + Thomas Goirand , +Build-Depends: + debhelper (>= 10), + dh-python, + openstack-pkg-tools (>= 54~), + python-all, + python-pbr, + python-setuptools, + python-sphinx, +Build-Depends-Indep: + alembic (>= 0.7.6), + libpq-dev, + postgresql, + postgresql-server-dev-all, + python-concurrent.futures (>= 2.1.6), + python-cotyledon (>= 1.2.2), + python-coverage (>= 3.6), + python-doc8, + python-fixtures, + python-future (>= 0.15), + python-gabbi (>= 1.21), + python-iso8601, + python-jsonpatch (>= 1.9), + python-keystoneclient (>= 1:1.6.0), + python-keystonemiddleware (>= 4.0.0), + python-lz4, + python-mock, + python-msgpack, + python-mysqldb, + python-numpy, + python-os-testr, + python-oslo.config (>= 1:2.6.0), + python-oslo.db (>= 4.8.0), + python-oslo.log (>= 2.3.0), + python-oslo.middleware (>= 3.11.0), + python-oslo.policy (>= 0.3.0), + python-oslo.serialization (>= 1.4.0), + python-oslo.utils (>= 3.3.0), + python-oslosphinx (>= 2.2.0.0), + python-oslotest, + python-pandas (>= 0.17), + python-paste, + python-pastedeploy, + python-pecan (>= 0.9), + python-prettytable, + python-psycopg2, + python-pymysql, + python-pytimeparse (>= 1.1.5), + python-requests, + python-retrying, + python-six, + python-sphinx-bootstrap-theme, + python-sphinxcontrib.httpdomain, + python-sqlalchemy, + python-sqlalchemy-utils, + python-stevedore, + python-swiftclient (>= 3.1.0), + python-sysv-ipc, + python-testscenarios, + python-testtools (>= 0.9.38), + python-tooz (>= 1.38), + python-trollius, + python-voluptuous, + python-webob (>= 1.4.1), + python-webtest (>= 2.0.16), + python-werkzeug, + python-yaml, + subunit (>= 0.0.18), + testrepository, Standards-Version: 3.9.8 Vcs-Browser: https://anonscm.debian.org/cgit/openstack/services/gnocchi.git Vcs-Git: https://anonscm.debian.org/git/openstack/services/gnocchi.git Homepage: https://github.com/openstack/gnocchi -Package: python-gnocchi -Section: python +Package: gnocchi-api Architecture: all -Depends: alembic (>= 0.7.6), - python-concurrent.futures (>= 2.1.6), - python-cotyledon (>= 1.2.2), - python-future (>= 0.15), - python-iso8601, - python-jsonpatch (>= 1.9), - python-keystoneclient (>= 1:1.6.0), - python-keystonemiddleware (>= 4.0.0), - python-lz4, - python-msgpack, - python-numpy, - python-oslo.config (>= 1:2.6.0), - python-oslo.db (>= 4.8.0), - python-oslo.log (>= 2.3.0), - python-oslo.middleware (>= 3.11.0), - python-oslo.policy (>= 0.3.0), - python-oslo.serialization (>= 1.4.0), - python-oslo.utils (>= 3.3.0), - python-oslosphinx (>= 2.2.0.0), - python-pandas (>= 0.17), - python-paste, - python-pastedeploy, - python-pbr, - python-pecan (>= 0.9), - python-prettytable, - python-psycopg2, - python-pymysql, - python-pytimeparse (>= 1.1.5), - python-requests, - python-retrying, - python-six, - python-sqlalchemy, - python-sqlalchemy-utils, - python-stevedore, - python-swiftclient (>= 3.1.0), - python-tooz (>= 1.38), - python-trollius, - python-voluptuous, - python-webob (>= 1.4.1), - python-werkzeug, - python-yaml, - ${misc:Depends}, - ${python:Depends}, -Suggests: python-gnocchi-doc, -Description: Metric as a Service - Python 2.x +Depends: + adduser, + gnocchi-common (= ${binary:Version}), + lsb-base, + python-openstackclient, + q-text-as-data, + ${misc:Depends}, + ${python:Depends}, +Description: Metric as a Service - API daemon Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an HTTP REST API. . - This package contains the Python 2.x module. + This package contains the API server. Package: gnocchi-common Architecture: all -Depends: adduser, - dbconfig-common, - debconf, - python-gnocchi (= ${binary:Version}), - ${misc:Depends}, - ${python:Depends}, +Depends: + adduser, + dbconfig-common, + debconf, + python-gnocchi (= ${binary:Version}), + ${misc:Depends}, + ${python:Depends}, Description: Metric as a Service - common files Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an @@ -143,43 +110,72 @@ Description: Metric as a Service - common files . This package contains the common files. -Package: gnocchi-api +Package: gnocchi-metricd Architecture: all -Depends: adduser, - gnocchi-common (= ${binary:Version}), - python-openstackclient, - lsb-base, - q-text-as-data, - ${misc:Depends}, - ${python:Depends}, -Description: Metric as a Service - API daemon +Depends: + gnocchi-common (= ${binary:Version}), + lsb-base, + ${misc:Depends}, + ${python:Depends}, +Description: Metric as a Service - metric daemon Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an HTTP REST API. . - This package contains the API server. + This package contains the metric daemon. -Package: gnocchi-metricd +Package: python-gnocchi +Section: python Architecture: all -Depends: gnocchi-common (= ${binary:Version}), - lsb-base, - ${misc:Depends}, - ${python:Depends}, -Description: Metric as a Service - metric daemon +Depends: + alembic (>= 0.7.6), + python-concurrent.futures (>= 2.1.6), + python-cotyledon (>= 1.2.2), + python-future (>= 0.15), + python-iso8601, + python-jsonpatch (>= 1.9), + python-keystoneclient (>= 1:1.6.0), + python-keystonemiddleware (>= 4.0.0), + python-lz4, + python-msgpack, + python-numpy, + python-oslo.config (>= 1:2.6.0), + python-oslo.db (>= 4.8.0), + python-oslo.log (>= 2.3.0), + python-oslo.middleware (>= 3.11.0), + python-oslo.policy (>= 0.3.0), + python-oslo.serialization (>= 1.4.0), + python-oslo.utils (>= 3.3.0), + python-oslosphinx (>= 2.2.0.0), + python-pandas (>= 0.17), + python-paste, + python-pastedeploy, + python-pbr, + python-pecan (>= 0.9), + python-prettytable, + python-psycopg2, + python-pymysql, + python-pytimeparse (>= 1.1.5), + python-requests, + python-retrying, + python-six, + python-sqlalchemy, + python-sqlalchemy-utils, + python-stevedore, + python-swiftclient (>= 3.1.0), + python-tooz (>= 1.38), + python-trollius, + python-voluptuous, + python-webob (>= 1.4.1), + python-werkzeug, + python-yaml, + ${misc:Depends}, + ${python:Depends}, +Suggests: + python-gnocchi-doc, +Description: Metric as a Service - Python 2.x Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an HTTP REST API. . - This package contains the metric daemon. - -#Package: gnocchi-doc -#Section: doc -#Architecture: all -#Depends: ${misc:Depends}, -# ${sphinxdoc:Depends}, -#Description: Metric as a Service - doc -# Gnocchi is a service for managing a set of resources and storing metrics about -# them, in a scalable and resilient way. Its functionalities are exposed over an -# HTTP REST API. -# . -# This package contains the documentation. + This package contains the Python 2.x module. diff --git a/debian/gnocchi-common.install b/debian/gnocchi-common.install index 339258a5..10da746c 100644 --- a/debian/gnocchi-common.install +++ b/debian/gnocchi-common.install @@ -1,3 +1,3 @@ -etc/gnocchi/policy.json /usr/share/gnocchi-common etc/gnocchi/api-paste.ini /usr/share/gnocchi-common +etc/gnocchi/policy.json /usr/share/gnocchi-common gnocchi/rest/app.wsgi /usr/share/gnocchi-common -- GitLab From c88fa33ab2e116c3491c9273febb8de22506cfe3 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 26 Oct 2017 06:36:08 +0200 Subject: [PATCH 1049/1483] Standards-Version is now 4.1.1. --- debian/changelog | 1 + debian/control | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index c411f6c6..8c270a44 100644 --- a/debian/changelog +++ b/debian/changelog @@ -4,6 +4,7 @@ gnocchi (3.0.4-5) UNRELEASED; urgency=medium * Updating copyright format url. * Updating maintainer field. * Running wrap-and-sort -bast. + * Standards-Version is now 4.1.1. -- Thomas Goirand Thu, 26 Oct 2017 06:33:31 +0200 diff --git a/debian/control b/debian/control index ea159808..b8686cdf 100644 --- a/debian/control +++ b/debian/control @@ -72,7 +72,7 @@ Build-Depends-Indep: python-yaml, subunit (>= 0.0.18), testrepository, -Standards-Version: 3.9.8 +Standards-Version: 4.1.1 Vcs-Browser: https://anonscm.debian.org/cgit/openstack/services/gnocchi.git Vcs-Git: https://anonscm.debian.org/git/openstack/services/gnocchi.git Homepage: https://github.com/openstack/gnocchi -- GitLab From b1801b98f7cec8dbab70324969a99e1661eb4b14 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 26 Oct 2017 06:55:45 +0200 Subject: [PATCH 1050/1483] Now packaging 4.0.3 --- debian/changelog | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 8c270a44..73b96630 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,6 @@ -gnocchi (3.0.4-5) UNRELEASED; urgency=medium +gnocchi (4.0.3-1) experimental; urgency=medium + * New upstream release. * Updating vcs fields. * Updating copyright format url. * Updating maintainer field. -- GitLab From d4c42932bbd0d1393c0184a1feacd7b3f72a3cc2 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 26 Oct 2017 06:56:08 +0200 Subject: [PATCH 1051/1483] Fixed (build-)depends for this release. --- debian/changelog | 1 + debian/control | 80 +++++++++++++++++++++++++----------------------- 2 files changed, 42 insertions(+), 39 deletions(-) diff --git a/debian/changelog b/debian/changelog index 73b96630..df28f0b5 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,6 +1,7 @@ gnocchi (4.0.3-1) experimental; urgency=medium * New upstream release. + * Fixed (build-)depends for this release. * Updating vcs fields. * Updating copyright format url. * Updating maintainer field. diff --git a/debian/control b/debian/control index b8686cdf..ed1e5057 100644 --- a/debian/control +++ b/debian/control @@ -13,62 +13,63 @@ Build-Depends: python-setuptools, python-sphinx, Build-Depends-Indep: - alembic (>= 0.7.6), + alembic, libpq-dev, postgresql, postgresql-server-dev-all, - python-concurrent.futures (>= 2.1.6), - python-cotyledon (>= 1.2.2), + python-boto3, + python-botocore (>= 1.5), + python-concurrent.futures, + python-cotyledon (>= 1.5.0), python-coverage (>= 3.6), + python-daiquiri, python-doc8, python-fixtures, python-future (>= 0.15), - python-gabbi (>= 1.21), + python-gabbi (>= 1.30), python-iso8601, - python-jsonpatch (>= 1.9), + python-jsonpatch, python-keystoneclient (>= 1:1.6.0), python-keystonemiddleware (>= 4.0.0), - python-lz4, + python-lz4 (>= 0.9.0), python-mock, - python-msgpack, + python-monotonic, python-mysqldb, python-numpy, python-os-testr, - python-oslo.config (>= 1:2.6.0), + python-oslo.config (>= 1:3.22.0), python-oslo.db (>= 4.8.0), - python-oslo.log (>= 2.3.0), - python-oslo.middleware (>= 3.11.0), - python-oslo.policy (>= 0.3.0), - python-oslo.serialization (>= 1.4.0), - python-oslo.utils (>= 3.3.0), - python-oslosphinx (>= 2.2.0.0), - python-oslotest, - python-pandas (>= 0.17), + python-oslo.middleware (>= 3.22.0), + python-oslo.policy, + python-pandas, python-paste, python-pastedeploy, - python-pecan (>= 0.9), + python-pecan, python-prettytable, python-psycopg2, python-pymysql, - python-pytimeparse (>= 1.1.5), - python-requests, - python-retrying, + python-redis, + python-scipy, python-six, python-sphinx-bootstrap-theme, + python-sphinx-rtd-theme, python-sphinxcontrib.httpdomain, python-sqlalchemy, python-sqlalchemy-utils, python-stevedore, python-swiftclient (>= 3.1.0), python-sysv-ipc, + python-tenacity (>= 3.1.0), python-testscenarios, python-testtools (>= 0.9.38), python-tooz (>= 1.38), python-trollius, + python-ujson, python-voluptuous, - python-webob (>= 1.4.1), + python-webob, python-webtest (>= 2.0.16), python-werkzeug, + python-wsgi-intercept (>= 1.4.1), python-yaml, subunit (>= 0.0.18), testrepository, @@ -128,45 +129,46 @@ Package: python-gnocchi Section: python Architecture: all Depends: - alembic (>= 0.7.6), - python-concurrent.futures (>= 2.1.6), - python-cotyledon (>= 1.2.2), + alembic, + python-boto3, + python-botocore (>= 1.5), + python-concurrent.futures, + python-cotyledon (>= 1.5.0), + python-daiquiri, python-future (>= 0.15), python-iso8601, - python-jsonpatch (>= 1.9), + python-jsonpatch, python-keystoneclient (>= 1:1.6.0), python-keystonemiddleware (>= 4.0.0), - python-lz4, - python-msgpack, + python-lz4 (>= 0.9.0), + python-monotonic, python-numpy, - python-oslo.config (>= 1:2.6.0), + python-oslo.config (>= 1:3.22.0), python-oslo.db (>= 4.8.0), - python-oslo.log (>= 2.3.0), - python-oslo.middleware (>= 3.11.0), - python-oslo.policy (>= 0.3.0), - python-oslo.serialization (>= 1.4.0), - python-oslo.utils (>= 3.3.0), + python-oslo.middleware (>= 3.22.0), + python-oslo.policy, python-oslosphinx (>= 2.2.0.0), - python-pandas (>= 0.17), + python-pandas, python-paste, python-pastedeploy, python-pbr, - python-pecan (>= 0.9), + python-pecan, python-prettytable, python-psycopg2, python-pymysql, - python-pytimeparse (>= 1.1.5), - python-requests, - python-retrying, + python-redis, + python-scipy, python-six, python-sqlalchemy, python-sqlalchemy-utils, python-stevedore, python-swiftclient (>= 3.1.0), + python-tenacity (>= 3.1.0), python-tooz (>= 1.38), python-trollius, + python-ujson, python-voluptuous, - python-webob (>= 1.4.1), + python-webob, python-werkzeug, python-yaml, ${misc:Depends}, -- GitLab From fd1678eed5bfd8a8c1241de0756878a1245d9aaf Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 2 Oct 2017 17:14:29 +0200 Subject: [PATCH 1052/1483] gnocchi-api: add support for VIRTUAL_ENV uwsgi does not honor VIRTUAL_ENV by default, which leads to problem when testing gnocchi-api in a virtual env. --- gnocchi/cli/api.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/gnocchi/cli/api.py b/gnocchi/cli/api.py index 170379a1..932edd9f 100644 --- a/gnocchi/cli/api.py +++ b/gnocchi/cli/api.py @@ -81,8 +81,7 @@ def api(): workers = utils.get_default_workers() - return os.execl( - uwsgi, uwsgi, + args = [ "--http", "%s:%d" % (conf.host or conf.api.host, conf.port or conf.api.port), "--master", @@ -96,4 +95,10 @@ def api(): "--chdir", "/", "--wsgi", "gnocchi.rest.wsgi", "--pyargv", " ".join(sys.argv[1:]), - ) + ] + + virtual_env = os.getenv("VIRTUAL_ENV") + if virtual_env is not None: + args.extend(["-H", os.getenv("VIRTUAL_ENV", ".")]) + + return os.execl(uwsgi, uwsgi, *args) -- GitLab From fa030fe231f6881b57d74e0a7932b5abea8ed2f3 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 26 Oct 2017 12:48:44 +0000 Subject: [PATCH 1053/1483] remove boundary requirements we explicitly take first and last timstamps common across all series as the boundaries if missing. --- gnocchi/rest/api.py | 5 ----- .../gabbits/aggregates-with-metric-ids.yaml | 13 ------------- gnocchi/tests/functional/gabbits/aggregation.yaml | 7 ------- 3 files changed, 25 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 816bbafd..01d14f1b 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1661,11 +1661,6 @@ def validate_qs(start, stop, granularity, needed_overlap, fill): abort(400, {"cause": "Argument value error", "detail": "needed_overlap", "reason": "Must be a number"}) - if needed_overlap != 100.0 and start is None and stop is None: - abort(400, {"cause": "Argument value error", - "detail": "needed_overlap", - "reason": "start and/or stop must be provided " - "if specifying needed_overlap"}) if start is not None: try: diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml index 15878185..db9a698f 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml @@ -512,19 +512,6 @@ tests: $.description.detail: "needed_overlap" $.description.reason: "Must be a number" - - name: incomplete needed_overlap - POST: /v1/aggregates?needed_overlap=50 - request_headers: - accept: application/json - content-type: application/json - authorization: "basic Zm9vYmFyOg==" - status: 400 - response_json_paths: - $.code: 400 - $.description.cause: "Argument value error" - $.description.detail: "needed_overlap" - $.description.reason: "start and/or stop must be provided if specifying needed_overlap" - - name: invalid granularity POST: /v1/aggregates?granularity=foobar request_headers: diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index 1121f90a..8a65f41a 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -171,13 +171,6 @@ tests: response_strings: - Granularity '42.0' for metric - - name: get measure aggregates no boundary custom overlap - desc: https://github.com/gnocchixyz/gnocchi/issues/17 - GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&needed_overlap=50 - status: 400 - response_strings: - - start and/or stop must be provided if specifying needed_overlap - # Aggregation by resource and metric_name - name: post a resource -- GitLab From 49015c26c935b42a86c4992ee2f25f12e6e30c1f Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 26 Oct 2017 18:11:02 +0200 Subject: [PATCH 1054/1483] Switch the package to Python 3 only, drop Python 2. --- debian/changelog | 1 + debian/control | 218 +++++++++++++++++++++++------------------------ debian/rules | 20 ++--- 3 files changed, 118 insertions(+), 121 deletions(-) diff --git a/debian/changelog b/debian/changelog index df28f0b5..d07d7e83 100644 --- a/debian/changelog +++ b/debian/changelog @@ -7,6 +7,7 @@ gnocchi (4.0.3-1) experimental; urgency=medium * Updating maintainer field. * Running wrap-and-sort -bast. * Standards-Version is now 4.1.1. + * Switch the package to Python 3 only, drop Python 2. -- Thomas Goirand Thu, 26 Oct 2017 06:33:31 +0200 diff --git a/debian/control b/debian/control index ed1e5057..58cc9dd6 100644 --- a/debian/control +++ b/debian/control @@ -8,69 +8,69 @@ Build-Depends: debhelper (>= 10), dh-python, openstack-pkg-tools (>= 54~), - python-all, - python-pbr, - python-setuptools, - python-sphinx, + python3-all, + python3-pbr, + python3-setuptools, + python3-sphinx, Build-Depends-Indep: alembic, libpq-dev, postgresql, postgresql-server-dev-all, - python-boto3, - python-botocore (>= 1.5), - python-concurrent.futures, - python-cotyledon (>= 1.5.0), - python-coverage (>= 3.6), - python-daiquiri, - python-doc8, - python-fixtures, - python-future (>= 0.15), - python-gabbi (>= 1.30), - python-iso8601, - python-jsonpatch, - python-keystoneclient (>= 1:1.6.0), - python-keystonemiddleware (>= 4.0.0), - python-lz4 (>= 0.9.0), - python-mock, - python-monotonic, - python-mysqldb, - python-numpy, - python-os-testr, - python-oslo.config (>= 1:3.22.0), - python-oslo.db (>= 4.8.0), - python-oslo.middleware (>= 3.22.0), - python-oslo.policy, - python-pandas, - python-paste, - python-pastedeploy, - python-pecan, - python-prettytable, - python-psycopg2, - python-pymysql, - python-redis, - python-scipy, - python-six, - python-sphinx-bootstrap-theme, - python-sphinx-rtd-theme, - python-sphinxcontrib.httpdomain, - python-sqlalchemy, - python-sqlalchemy-utils, - python-stevedore, - python-swiftclient (>= 3.1.0), - python-sysv-ipc, - python-tenacity (>= 3.1.0), - python-testscenarios, - python-testtools (>= 0.9.38), - python-tooz (>= 1.38), - python-trollius, - python-ujson, - python-voluptuous, - python-webob, - python-webtest (>= 2.0.16), - python-werkzeug, - python-wsgi-intercept (>= 1.4.1), - python-yaml, + python3-boto3, + python3-botocore (>= 1.5), + python3-concurrent.futures, + python3-cotyledon (>= 1.5.0), + python3-coverage (>= 3.6), + python3-daiquiri, + python3-doc8, + python3-fixtures, + python3-future (>= 0.15), + python3-gabbi (>= 1.30), + python3-iso8601, + python3-jsonpatch, + python3-keystoneclient (>= 1:1.6.0), + python3-keystonemiddleware (>= 4.0.0), + python3-lz4 (>= 0.9.0), + python3-mock, + python3-monotonic, + python3-mysqldb, + python3-numpy, + python3-os-testr, + python3-oslo.config (>= 1:3.22.0), + python3-oslo.db (>= 4.8.0), + python3-oslo.middleware (>= 3.22.0), + python3-oslo.policy, + python3-pandas, + python3-paste, + python3-pastedeploy, + python3-pecan, + python3-prettytable, + python3-psycopg2, + python3-pymysql, + python3-redis, + python3-scipy, + python3-six, + python3-sphinx-bootstrap-theme, + python3-sphinx-rtd-theme, + python3-sphinxcontrib.httpdomain, + python3-sqlalchemy, + python3-sqlalchemy-utils, + python3-stevedore, + python3-swiftclient (>= 3.1.0), + python3-sysv-ipc, + python3-tenacity (>= 3.1.0), + python3-testscenarios, + python3-testtools (>= 0.9.38), + python3-tooz (>= 1.38), + python3-trollius, + python3-ujson, + python3-voluptuous, + python3-webob, + python3-webtest (>= 2.0.16), + python3-werkzeug, + python3-wsgi-intercept (>= 1.4.1), + python3-yaml, subunit (>= 0.0.18), testrepository, Standards-Version: 4.1.1 @@ -84,10 +84,10 @@ Depends: adduser, gnocchi-common (= ${binary:Version}), lsb-base, - python-openstackclient, + python3-openstackclient, q-text-as-data, ${misc:Depends}, - ${python:Depends}, + ${python3:Depends}, Description: Metric as a Service - API daemon Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an @@ -101,9 +101,9 @@ Depends: adduser, dbconfig-common, debconf, - python-gnocchi (= ${binary:Version}), + python3-gnocchi (= ${binary:Version}), ${misc:Depends}, - ${python:Depends}, + ${python3:Depends}, Description: Metric as a Service - common files Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an @@ -117,7 +117,7 @@ Depends: gnocchi-common (= ${binary:Version}), lsb-base, ${misc:Depends}, - ${python:Depends}, + ${python3:Depends}, Description: Metric as a Service - metric daemon Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an @@ -125,59 +125,59 @@ Description: Metric as a Service - metric daemon . This package contains the metric daemon. -Package: python-gnocchi +Package: python3-gnocchi Section: python Architecture: all Depends: alembic, - python-boto3, - python-botocore (>= 1.5), - python-concurrent.futures, - python-cotyledon (>= 1.5.0), - python-daiquiri, - python-future (>= 0.15), - python-iso8601, - python-jsonpatch, - python-keystoneclient (>= 1:1.6.0), - python-keystonemiddleware (>= 4.0.0), - python-lz4 (>= 0.9.0), - python-monotonic, - python-numpy, - python-oslo.config (>= 1:3.22.0), - python-oslo.db (>= 4.8.0), - python-oslo.middleware (>= 3.22.0), - python-oslo.policy, - python-oslosphinx (>= 2.2.0.0), - python-pandas, - python-paste, - python-pastedeploy, - python-pbr, - python-pecan, - python-prettytable, - python-psycopg2, - python-pymysql, - python-redis, - python-scipy, - python-six, - python-sqlalchemy, - python-sqlalchemy-utils, - python-stevedore, - python-swiftclient (>= 3.1.0), - python-tenacity (>= 3.1.0), - python-tooz (>= 1.38), - python-trollius, - python-ujson, - python-voluptuous, - python-webob, - python-werkzeug, - python-yaml, + python3-boto3, + python3-botocore (>= 1.5), + python3-concurrent.futures, + python3-cotyledon (>= 1.5.0), + python3-daiquiri, + python3-future (>= 0.15), + python3-iso8601, + python3-jsonpatch, + python3-keystoneclient (>= 1:1.6.0), + python3-keystonemiddleware (>= 4.0.0), + python3-lz4 (>= 0.9.0), + python3-monotonic, + python3-numpy, + python3-oslo.config (>= 1:3.22.0), + python3-oslo.db (>= 4.8.0), + python3-oslo.middleware (>= 3.22.0), + python3-oslo.policy, + python3-oslosphinx (>= 2.2.0.0), + python3-pandas, + python3-paste, + python3-pastedeploy, + python3-pbr, + python3-pecan, + python3-prettytable, + python3-psycopg2, + python3-pymysql, + python3-redis, + python3-scipy, + python3-six, + python3-sqlalchemy, + python3-sqlalchemy-utils, + python3-stevedore, + python3-swiftclient (>= 3.1.0), + python3-tenacity (>= 3.1.0), + python3-tooz (>= 1.38), + python3-trollius, + python3-ujson, + python3-voluptuous, + python3-webob, + python3-werkzeug, + python3-yaml, ${misc:Depends}, - ${python:Depends}, + ${python3:Depends}, Suggests: - python-gnocchi-doc, -Description: Metric as a Service - Python 2.x + gnocchi-doc, +Description: Metric as a Service - Python 3.x Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an HTTP REST API. . - This package contains the Python 2.x module. + This package contains the Python 3.x module. diff --git a/debian/rules b/debian/rules index bb8e5243..f34539f9 100755 --- a/debian/rules +++ b/debian/rules @@ -6,24 +6,20 @@ include /usr/share/openstack-pkg-tools/pkgos.make UNIT_TEST_BLACKLIST = test_carbonara.CarbonaraCmd.* %: - dh $@ --buildsystem=python_distutils --with python2,sphinxdoc + dh $@ --buildsystem=python_distutils --with python3,sphinxdoc override_dh_auto_install: - set -e ; for pyvers in $(PYTHONS); do \ + set -e ; for pyvers in $(PYTHON3S); do \ python$$pyvers setup.py install --install-layout=deb \ - --root $(CURDIR)/debian/python-gnocchi; \ + --root $(CURDIR)/debian/python3-gnocchi; \ done - mkdir -p $(CURDIR)/debian/python-gnocchi/usr/lib/python2.7/dist-packages/gnocchi/indexer - cp -auxf gnocchi/indexer/alembic $(CURDIR)/debian/python-gnocchi/usr/lib/python2.7/dist-packages/gnocchi/indexer -# set -e ; for pyvers in $(PYTHON3S); do \ -# python$$pyvers setup.py install --install-layout=deb \ -# --root $(CURDIR)/debian/python3-gnocchi; \ -# done + mkdir -p $(CURDIR)/debian/python3-gnocchi/usr/lib/python3/dist-packages/gnocchi/indexer + cp -auxf gnocchi/indexer/alembic $(CURDIR)/debian/python3-gnocchi/usr/lib/python3/dist-packages/gnocchi/indexer rm -rf $(CURDIR)/debian/python*-gnocchi/usr/lib/python*/dist-packages/*.pth rm -rf $(CURDIR)/debian/python*-gnocchi/usr/etc mkdir -p $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common - PYTHONPATH=$(CURDIR)/debian/python-gnocchi/usr/lib/python2.7/dist-packages oslo-config-generator \ + PYTHONPATH=$(CURDIR)/debian/python3-gnocchi/usr/lib/python3/dist-packages oslo-config-generator \ --output-file $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf \ --wrap-width 140 \ --namespace gnocchi \ @@ -43,7 +39,7 @@ override_dh_auto_install: override_dh_auto_test: ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) @echo "===> Running tests" - set -e ; set -x ; for i in 2.7 ; do \ + set -e ; set -x ; for i in $(PYTHON3S) ; do \ PYMAJOR=`echo $$i | cut -d'.' -f1` ; \ echo "===> Starting PGSQL" ; \ BINDIR=`pg_config --bindir` ; \ @@ -76,7 +72,7 @@ override_dh_sphinxdoc: # chmod +x debian/start_pg.sh ; \ # debian/start_pg.sh $$PG_MYTMPDIR ; \ # export GNOCCHI_TEST_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ -# PYTHONPATH=. sphinx-build -b html doc/source debian/python-gnocchi-doc/usr/share/doc/python-gnocchi-doc/html ; \ +# PYTHONPATH=. sphinx-build -b html doc/source debian/python3-gnocchi-doc/usr/share/doc/python3-gnocchi-doc/html ; \ # dh_sphinxdoc -O--buildsystem=python_distutils ; \ # $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR echo "Do nothing" -- GitLab From 91156fedda34159493789cdce22e6a6294b00e08 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 26 Oct 2017 16:12:50 +0000 Subject: [PATCH 1055/1483] Remove futures & trollius, which are py2 only. --- debian/control | 4 ---- 1 file changed, 4 deletions(-) diff --git a/debian/control b/debian/control index 58cc9dd6..2075f2e8 100644 --- a/debian/control +++ b/debian/control @@ -19,7 +19,6 @@ Build-Depends-Indep: postgresql-server-dev-all, python3-boto3, python3-botocore (>= 1.5), - python3-concurrent.futures, python3-cotyledon (>= 1.5.0), python3-coverage (>= 3.6), python3-daiquiri, @@ -63,7 +62,6 @@ Build-Depends-Indep: python3-testscenarios, python3-testtools (>= 0.9.38), python3-tooz (>= 1.38), - python3-trollius, python3-ujson, python3-voluptuous, python3-webob, @@ -132,7 +130,6 @@ Depends: alembic, python3-boto3, python3-botocore (>= 1.5), - python3-concurrent.futures, python3-cotyledon (>= 1.5.0), python3-daiquiri, python3-future (>= 0.15), @@ -165,7 +162,6 @@ Depends: python3-swiftclient (>= 3.1.0), python3-tenacity (>= 3.1.0), python3-tooz (>= 1.38), - python3-trollius, python3-ujson, python3-voluptuous, python3-webob, -- GitLab From 6f2ea7781606ba42c6a4a323f5029bc55376de3e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 24 Oct 2017 08:05:06 +0200 Subject: [PATCH 1056/1483] aggregates: remove useless checks It's not necessary useful to forbid fill without granularity. Of course, some "operations" doesn't make sense. Like resample without granularity. But it's up to the user to understand that. --- gnocchi/rest/api.py | 4 -- .../gabbits/aggregates-with-metric-ids.yaml | 59 +++++++++++++++---- .../tests/functional/gabbits/aggregation.yaml | 10 +++- 3 files changed, 54 insertions(+), 19 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 01d14f1b..bad70f12 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1687,10 +1687,6 @@ def validate_qs(start, stop, granularity, needed_overlap, fill): "reason": six.text_type(e)}) if fill is not None: - if granularity is None: - abort(400, {"cause": "Argument value error", - "detail": "granularity", - "reason": "Unable to fill without a granularity"}) if fill != "null": try: fill = float(fill) diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml index db9a698f..ad476fad 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml @@ -341,6 +341,52 @@ tests: - ["2015-03-06T14:35:12+00:00", 1.0, -9.5] - ["2015-03-06T14:35:15+00:00", 1.0, -13.0] + + - name: push new measurements to metric1 + POST: /v1/metric/$HISTORY['create metric1'].$RESPONSE['$.id']/measures + data: + - timestamp: "2015-03-06T14:37:00" + value: 15 + - timestamp: "2015-03-06T14:38:00" + value: 15 + status: 202 + + - name: refresh metric1 + GET: /v1/metric/$HISTORY['create metric1'].$RESPONSE['$.id']/measures?refresh=true + + - name: fill and no granularity + POST: /v1/aggregates?fill=123 + data: + operations: "(metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean))" + response_json_paths: + $.`len`: 2 + $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] + - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] + - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + - ['2015-03-06T14:37:00+00:00', 60.0, 15.0] + - ['2015-03-06T14:38:00+00:00', 60.0, 15.0] + - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -16.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] + - ['2015-03-06T14:37:00+00:00', 1.0, 15.0] + - ['2015-03-06T14:38:00+00:00', 1.0, 15.0] + $."$HISTORY['create metric2'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:33:00+00:00", 60.0, 2.0] + - ["2015-03-06T14:34:00+00:00", 60.0, 4.5] + - ["2015-03-06T14:35:00+00:00", 60.0, 12.5] + - ['2015-03-06T14:37:00+00:00', 60.0, 123.0] + - ['2015-03-06T14:38:00+00:00', 60.0, 123.0] + - ["2015-03-06T14:33:57+00:00", 1.0, 2.0] + - ["2015-03-06T14:34:12+00:00", 1.0, 4.0] + - ["2015-03-06T14:34:15+00:00", 1.0, 5.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 10.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 15.0] + - ['2015-03-06T14:37:00+00:00', 1.0, 123.0] + - ['2015-03-06T14:38:00+00:00', 1.0, 123.0] + # Negative tests - name: get no operations @@ -525,19 +571,6 @@ tests: $.description.detail: "granularity" $.description.reason: "Unable to parse timespan" - - name: incomplete fill - POST: /v1/aggregates?fill=123 - request_headers: - accept: application/json - content-type: application/json - authorization: "basic Zm9vYmFyOg==" - status: 400 - response_json_paths: - $.code: 400 - $.description.cause: "Argument value error" - $.description.detail: "granularity" - $.description.reason: "Unable to fill without a granularity" - - name: invalid fill POST: /v1/aggregates?fill=foobar&granularity=5 request_headers: diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index 8a65f41a..ee8a571f 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -156,9 +156,15 @@ tests: - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - ['2015-03-06T14:35:12+00:00', 1.0, 5.0] - - name: get measure aggregates with fill missing granularity + - name: get measure aggregates with fill all granularities GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&fill=0 - status: 400 + response_json_paths: + $: + - ['2015-03-06T14:30:00+00:00', 300.0, 15.05] + - ['2015-03-06T14:35:00+00:00', 300.0, 2.5] + - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + - ['2015-03-06T14:35:12+00:00', 1.0, 2.5] - name: get measure aggregates with bad fill GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&fill=asdf -- GitLab From 1385867c4cea42c101f192eccd913c6a977523ab Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 26 Oct 2017 16:16:44 +0000 Subject: [PATCH 1057/1483] Fix diff with upstream tag. --- gnocchi/tests/functional/fixtures.py | 7 ------- gnocchi/tests/gabbi/gabbits/cors.yaml | 21 --------------------- 2 files changed, 28 deletions(-) delete mode 100644 gnocchi/tests/gabbi/gabbits/cors.yaml diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 41814ddc..13b7ebbd 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -100,13 +100,6 @@ class ConfigFixture(fixture.GabbiFixture): # options making impossible to override them properly... cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com") - # NOTE(sileht): This is not concurrency safe, but only this tests file - # deal with cors, so we are fine. set_override don't work because cors - # group doesn't yet exists, and we the CORS middleware is created it - # register the option and directly copy value of all configurations - # options making impossible to override them properly... - cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com") - self.conf = conf self.tmp_dir = data_tmp_dir diff --git a/gnocchi/tests/gabbi/gabbits/cors.yaml b/gnocchi/tests/gabbi/gabbits/cors.yaml deleted file mode 100644 index bd2395d5..00000000 --- a/gnocchi/tests/gabbi/gabbits/cors.yaml +++ /dev/null @@ -1,21 +0,0 @@ -fixtures: - - ConfigFixture - -tests: - - name: get CORS headers for non-allowed - OPTIONS: /v1/status - request_headers: - Origin: http://notallowed.com - Access-Control-Request-Method: GET - response_forbidden_headers: - - Access-Control-Allow-Origin - - Access-Control-Allow-Methods - - - name: get CORS headers for allowed - OPTIONS: /v1/status - request_headers: - Origin: http://foobar.com - Access-Control-Request-Method: GET - response_headers: - Access-Control-Allow-Origin: http://foobar.com - Access-Control-Allow-Methods: GET -- GitLab From 7755e31171ccb8b85eccea59132841d80109701d Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 26 Oct 2017 19:47:39 +0000 Subject: [PATCH 1058/1483] Add debian/bin with gnocchi-config-generator entrypoint. --- debian/bin/gnocchi-config-generator | 8 ++++++++ debian/changelog | 1 + debian/rules | 2 +- 3 files changed, 10 insertions(+), 1 deletion(-) create mode 100755 debian/bin/gnocchi-config-generator diff --git a/debian/bin/gnocchi-config-generator b/debian/bin/gnocchi-config-generator new file mode 100755 index 00000000..f825aef6 --- /dev/null +++ b/debian/bin/gnocchi-config-generator @@ -0,0 +1,8 @@ +#!/usr/bin/python3 + +import sys + +from gnocchi.cli import config_generator + +if __name__ == "__main__": + sys.exit(config_generator()) diff --git a/debian/changelog b/debian/changelog index d07d7e83..714894a9 100644 --- a/debian/changelog +++ b/debian/changelog @@ -8,6 +8,7 @@ gnocchi (4.0.3-1) experimental; urgency=medium * Running wrap-and-sort -bast. * Standards-Version is now 4.1.1. * Switch the package to Python 3 only, drop Python 2. + * Add debian/bin with gnocchi-config-generator entrypoint. -- Thomas Goirand Thu, 26 Oct 2017 06:33:31 +0200 diff --git a/debian/rules b/debian/rules index f34539f9..2b8bf5a5 100755 --- a/debian/rules +++ b/debian/rules @@ -54,7 +54,7 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) rm -rf .testrepository ; \ testr-python$$PYMAJOR init ; \ TEMP_REZ=`mktemp -t` ; \ - PYTHONPATH=$(CURDIR) PYTHON=python$$i testr-python$$PYMAJOR run --subunit 'gnocchi\.tests\.(?!.*('"$(UNIT_TEST_BLACKLIST)"'))' | tee $$TEMP_REZ | subunit2pyunit ; \ + export PATH=$(PATH):$(CURDIR)/debian/bin && PYTHONPATH=$(CURDIR):$(CURDIR)/debian/bin PYTHON=python$$i testr-python$$PYMAJOR run --subunit 'gnocchi\.tests\.(?!.*('"$(UNIT_TEST_BLACKLIST)"'))' | tee $$TEMP_REZ | subunit2pyunit ; \ cat $$TEMP_REZ | subunit-filter -s --no-passthrough | subunit-stats ; \ rm -f $$TEMP_REZ ; \ testr-python$$PYMAJOR slowest ; \ -- GitLab From d4eed3217ca4acb94c1d064d6467abef7f791789 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 24 Oct 2017 21:27:52 +0200 Subject: [PATCH 1059/1483] aggregates: new fill=drop-na dropna is like null but nan values are not returned. This is the default for /v1/aggregates API. --- doc/source/rest.j2 | 32 +++--- gnocchi/rest/aggregates/api.py | 4 +- gnocchi/rest/aggregates/processor.py | 25 +++-- gnocchi/rest/api.py | 34 ++++--- .../gabbits/aggregates-with-metric-ids.yaml | 97 ++++++++++++++++++- .../tests/functional/gabbits/aggregation.yaml | 4 + .../notes/fill=dropna-9e055895e7bff778.yaml | 6 ++ 7 files changed, 156 insertions(+), 46 deletions(-) create mode 100644 releasenotes/notes/fill=dropna-9e055895e7bff778.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index f5d23429..5b3eaf85 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -590,15 +590,12 @@ error is returned. boundary to the first or last timestamp common across all series. The ability to fill in missing points from a subset of time series is supported -by specifying a `fill` value. Valid fill values include any float or `null`. In -the case of `null`, Gnocchi will compute the aggregation using only the -existing points. The `fill` parameter will not backfill timestamps which contain no -points in any of the time series. Only timestamps which have datapoints in at -least one of the time series is returned. - -.. note:: - - A |granularity| must be specified when using the `fill` parameter. +by specifying a `fill` value. Valid fill values include any float, `dropna` or +`null`. In the case of `null`, Gnocchi will compute the aggregation using only +the existing points. `dropna` is like `null` but remove NaN from the result. +The `fill` parameter will not backfill timestamps which contain no points in +any of the time series. Only timestamps which have datapoints in at least one +of the time series is returned. {{ scenarios['get-aggregates-by-metric-ids-fill']['doc'] }} @@ -739,16 +736,13 @@ expects 100% overlap. If this percentage is not reached, an error is returned. If `start` or `stop` boundary is not set, Gnocchi will set the missing boundary to the first or last timestamp common across all series. -The ability to fill in missing points from a subset of time series is supported -by specifying a `fill` value. Valid fill values include any float or `null`. In -the case of `null`, Gnocchi will compute the aggregation using only the -existing points. The `fill` parameter will not backfill timestamps which contain no -points in any of the time series. Only timestamps which have datapoints in at -least one of the time series is returned. - -.. note:: - - A |granularity| must be specified when using the `fill` parameter. +The ability to fill in missing points from a subset of time series is supported +by specifying a `fill` value. Valid fill values include any float, `dropna` or +`null`. In the case of `null`, Gnocchi will compute the aggregation using only +the existing points. `dropna` is like `null` but remove NaN from the result. +The `fill` parameter will not backfill timestamps which contain no points in +any of the time series. Only timestamps which have datapoints in at least one +of the time series is returned. {{ scenarios['get-across-metrics-measures-by-metric-ids-fill']['doc'] }} diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index 15b2341f..233c1130 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -179,7 +179,9 @@ class AggregatesController(rest.RestController): @pecan.expose("json") def post(self, start=None, stop=None, granularity=None, - needed_overlap=100.0, fill=None, groupby=None): + needed_overlap=None, fill=None, groupby=None): + if fill is None and needed_overlap is None: + fill = "dropna" start, stop, granularity, needed_overlap, fill = api.validate_qs( start, stop, granularity, needed_overlap, fill) diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index fc74ca08..2c178c37 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -126,7 +126,8 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, return_inverse=True) # create nd-array (unique series x unique times) and fill - filler = fill if fill is not None and fill != 'null' else numpy.NaN + filler = (numpy.NaN if fill in [None, 'null', 'dropna'] + else fill) val_grid = numpy.full((len(series[key]), len(times)), filler) start = 0 for i, split in enumerate(series[key]): @@ -162,16 +163,20 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, values = values.T if is_aggregated: - result["aggregated"]["timestamps"].extend(times) - result["aggregated"]['granularity'].extend([granularity] * - len(times)) - result["aggregated"]['values'].extend(values[0]) + idents = ["aggregated"] else: - for i, ref in enumerate(references[key]): - ident = "%s_%s" % tuple(ref) - result[ident]["timestamps"].extend(times) - result[ident]['granularity'].extend([granularity] * len(times)) - result[ident]['values'].extend(values[i]) + idents = ["%s_%s" % tuple(ref) for ref in references[key]] + for i, ident in enumerate(idents): + if fill == "dropna": + pos = ~numpy.isnan(values[i]) + v = values[i][pos] + t = times[pos] + else: + v = values[i] + t = times + result[ident]["timestamps"].extend(t) + result[ident]['granularity'].extend([granularity] * len(t)) + result[ident]['values'].extend(v) return dict(((ident, list(six.moves.zip(result[ident]['timestamps'], result[ident]['granularity'], diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index bad70f12..12bd1105 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1652,15 +1652,21 @@ class AggregationResourceController(rest.RestController): return results +FillSchema = voluptuous.Schema( + voluptuous.Any(voluptuous.Coerce(float), "null", "dropna", + msg="Must be a float, 'dropna' or 'null'")) + + # FIXME(sileht): should be in aggregates.api but we need to split all # controllers to do this def validate_qs(start, stop, granularity, needed_overlap, fill): - try: - needed_overlap = float(needed_overlap) - except ValueError: - abort(400, {"cause": "Argument value error", - "detail": "needed_overlap", - "reason": "Must be a number"}) + if needed_overlap is not None: + try: + needed_overlap = float(needed_overlap) + except ValueError: + abort(400, {"cause": "Argument value error", + "detail": "needed_overlap", + "reason": "Must be a number"}) if start is not None: try: @@ -1687,15 +1693,13 @@ def validate_qs(start, stop, granularity, needed_overlap, fill): "reason": six.text_type(e)}) if fill is not None: - if fill != "null": - try: - fill = float(fill) - except ValueError: - abort(400, - {"cause": "Argument value error", - "detail": "fill", - "reason": "Must be a float or \'null\', got '%s'" % - fill}) + try: + fill = FillSchema(fill) + except voluptuous.Error as e: + abort(400, {"cause": "Argument value error", + "detail": "fill", + "reason": str(e)}) + return start, stop, granularity, needed_overlap, fill diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml index ad476fad..251c71b2 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml @@ -51,6 +51,12 @@ tests: archive_policy_name: cookies status: 201 + - name: create metric4 + POST: /v1/metric + data: + archive_policy_name: cookies + status: 201 + - name: push measurements to metric1 POST: /v1/metric/$HISTORY['create metric1'].$RESPONSE['$.id']/measures data: @@ -81,6 +87,15 @@ tests: value: 15 status: 202 + - name: push measurements to metric4 + POST: /v1/metric/$HISTORY['create metric4'].$RESPONSE['$.id']/measures + data: + - timestamp: "2017-04-06T14:33:57" + value: 20 + - timestamp: "2017-04-06T14:34:12" + value: 10 + status: 202 + - name: get measurements from metric1 GET: /v1/metric/$HISTORY['create metric1'].$RESPONSE['$.id']/measures?refresh=true response_json_paths: @@ -387,6 +402,86 @@ tests: - ['2015-03-06T14:37:00+00:00', 1.0, 123.0] - ['2015-03-06T14:38:00+00:00', 1.0, 123.0] + - name: no overlap dropna + POST: /v1/aggregates + data: + operations: "(metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric4'].$RESPONSE['$.id'] mean))" + response_json_paths: + $.`len`: 2 + $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] + - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] + - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + - ['2015-03-06T14:37:00+00:00', 60.0, 15.0] + - ['2015-03-06T14:38:00+00:00', 60.0, 15.0] + - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -16.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] + - ['2015-03-06T14:37:00+00:00', 1.0, 15.0] + - ['2015-03-06T14:38:00+00:00', 1.0, 15.0] + $."$HISTORY['create metric4'].$RESPONSE['$.id']_mean": + - ["2017-04-06T14:33:00+00:00", 60.0, 20.0] + - ["2017-04-06T14:34:00+00:00", 60.0, 10.0] + - ["2017-04-06T14:33:57+00:00", 1.0, 20.0] + - ["2017-04-06T14:34:12+00:00", 1.0, 10.0] + + - name: no overlap null + POST: /v1/aggregates?fill=null + xfail: gabbi use assertEqual to compare .NAN which is always false + data: + operations: "(metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric4'].$RESPONSE['$.id'] mean))" + response_json_paths: + $.`len`: 2 + $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] + - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] + - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + - ['2015-03-06T14:37:00+00:00', 60.0, 15.0] + - ['2015-03-06T14:38:00+00:00', 60.0, 15.0] + - ["2017-04-06T14:33:00+00:00", 60.0, .NAN] + - ["2017-04-06T14:34:00+00:00", 60.0, .NAN] + - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -16.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] + - ['2015-03-06T14:37:00+00:00', 1.0, 15.0] + - ['2015-03-06T14:38:00+00:00', 1.0, 15.0] + - ["2017-04-06T14:33:57+00:00", 1.0, .NAN] + - ["2017-04-06T14:34:12+00:00", 1.0, .NAN] + $."$HISTORY['create metric4'].$RESPONSE['$.id']_mean": + - ["2015-03-06T14:33:00+00:00", 60.0, .NAN] + - ["2015-03-06T14:34:00+00:00", 60.0, .NAN] + - ["2015-03-06T14:35:00+00:00", 60.0, .NAN] + - ['2015-03-06T14:37:00+00:00', 60.0, .NAN] + - ['2015-03-06T14:38:00+00:00', 60.0, .NAN] + - ["2017-04-06T14:33:00+00:00", 60.0, 20.0] + - ["2017-04-06T14:34:00+00:00", 60.0, 10.0] + - ["2015-03-06T14:33:57+00:00", 1.0, .NAN] + - ["2015-03-06T14:34:12+00:00", 1.0, .NAN] + - ["2015-03-06T14:34:15+00:00", 1.0, .NAN] + - ["2015-03-06T14:35:12+00:00", 1.0, .NAN] + - ["2015-03-06T14:35:15+00:00", 1.0, .NAN] + - ['2015-03-06T14:37:00+00:00', 1.0, .NAN] + - ['2015-03-06T14:38:00+00:00', 1.0, .NAN] + - ["2017-04-06T14:33:57+00:00", 1.0, 20.0] + - ["2017-04-06T14:34:12+00:00", 1.0, 10.0] + + - name: no overlap null light check due to previous xfail + POST: /v1/aggregates?fill=null + data: + operations: "(metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric4'].$RESPONSE['$.id'] mean))" + response_json_paths: + $.`len`: 2 + $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean".`len`: 16 + $."$HISTORY['create metric4'].$RESPONSE['$.id']_mean".`len`: 16 + $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean"[0]: ["2015-03-06T14:33:00+00:00", 60.0, 43.1] + $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean"[7]: ["2015-03-06T14:33:57+00:00", 1.0, 43.1] + $."$HISTORY['create metric4'].$RESPONSE['$.id']_mean"[5]: ["2017-04-06T14:33:00+00:00", 60.0, 20.0] + $."$HISTORY['create metric4'].$RESPONSE['$.id']_mean"[14]: ["2017-04-06T14:33:57+00:00", 1.0, 20.0] + # Negative tests - name: get no operations @@ -582,7 +677,7 @@ tests: $.code: 400 $.description.cause: "Argument value error" $.description.detail: "fill" - $.description.reason: "Must be a float or 'null', got 'foobar'" + $.description.reason: "Must be a float, 'dropna' or 'null'" - name: get rolling bad aggregate POST: /v1/aggregates diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index ee8a571f..212223e0 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -139,6 +139,7 @@ tests: $.aggregated: - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] + - ['2015-03-06T14:35:00+00:00', 60.0, 5.0] - name: get measure aggregates with fill zero GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&fill=0 @@ -244,6 +245,7 @@ tests: $.aggregated: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] + - ['2015-03-06T14:35:12+00:00', 1.0, 5.0] - name: get measure aggregates by granularity from resources and resample POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&resample=60 @@ -262,6 +264,7 @@ tests: $.aggregated: - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] + - ['2015-03-06T14:35:00+00:00', 60.0, 5.0] - name: get measure aggregates by granularity from resources and operations POST: /v1/aggregates?granularity=1 @@ -273,6 +276,7 @@ tests: $.aggregated: - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] + - ['2015-03-06T14:35:00+00:00', 60.0, 5.0] - name: get measure aggregates by granularity from resources and bad resample POST: /v1/aggregation/resource/generic/metric/agg_meter?resample=abc diff --git a/releasenotes/notes/fill=dropna-9e055895e7bff778.yaml b/releasenotes/notes/fill=dropna-9e055895e7bff778.yaml new file mode 100644 index 00000000..16e17d3d --- /dev/null +++ b/releasenotes/notes/fill=dropna-9e055895e7bff778.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Aggregates API and cross metrics aggregation API can take `dropna` for the + `fill` parameter. This acts like `null`, but NaN values are removed from + the result. -- GitLab From 96e77170b08a3c234df238c9f01a77b342dc14f4 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 26 Oct 2017 20:21:54 +0000 Subject: [PATCH 1060/1483] Fixed oslo-config-generator namespace list for this release. --- debian/changelog | 1 + debian/rules | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/debian/changelog b/debian/changelog index 714894a9..ae4999d8 100644 --- a/debian/changelog +++ b/debian/changelog @@ -9,6 +9,7 @@ gnocchi (4.0.3-1) experimental; urgency=medium * Standards-Version is now 4.1.1. * Switch the package to Python 3 only, drop Python 2. * Add debian/bin with gnocchi-config-generator entrypoint. + * Fixed oslo-config-generator namespace list for this release. -- Thomas Goirand Thu, 26 Oct 2017 06:33:31 +0200 diff --git a/debian/rules b/debian/rules index 2b8bf5a5..75830e05 100755 --- a/debian/rules +++ b/debian/rules @@ -19,14 +19,16 @@ override_dh_auto_install: rm -rf $(CURDIR)/debian/python*-gnocchi/usr/etc mkdir -p $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common - PYTHONPATH=$(CURDIR)/debian/python3-gnocchi/usr/lib/python3/dist-packages oslo-config-generator \ + PYTHONPATH=$(CURDIR)/debian/python3-gnocchi/usr/lib/python3/dist-packages python3-oslo-config-generator \ --output-file $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf \ --wrap-width 140 \ --namespace gnocchi \ --namespace oslo.db \ - --namespace oslo.log \ - --namespace oslo.middleware \ + --namespace oslo.middleware.cors \ + --namespace oslo.middleware.healthcheck \ + --namespace oslo.middleware.http_proxy_to_wsgi \ --namespace oslo.policy \ + --namespace cotyledon \ --namespace keystonemiddleware.auth_token sed -i 's|^[ \t#]*url[ \t#]*=.*|url = sqlite:////var/lib/gnocchi/gnocchidb|' $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf sed -i 's|^[# \t]*auth_protocol[\t #]*=.*|auth_protocol = http|' $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf -- GitLab From 7d3204f742b8654a38f6f76d6da5cb8b76c6c677 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 26 Oct 2017 20:36:19 +0000 Subject: [PATCH 1061/1483] Fixed source location for policy.json and api-paste.ini. --- debian/changelog | 1 + debian/gnocchi-common.install | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/debian/changelog b/debian/changelog index ae4999d8..10b715e1 100644 --- a/debian/changelog +++ b/debian/changelog @@ -10,6 +10,7 @@ gnocchi (4.0.3-1) experimental; urgency=medium * Switch the package to Python 3 only, drop Python 2. * Add debian/bin with gnocchi-config-generator entrypoint. * Fixed oslo-config-generator namespace list for this release. + * Fixed source location for policy.json and api-paste.ini. -- Thomas Goirand Thu, 26 Oct 2017 06:33:31 +0200 diff --git a/debian/gnocchi-common.install b/debian/gnocchi-common.install index 10da746c..a74fd66d 100644 --- a/debian/gnocchi-common.install +++ b/debian/gnocchi-common.install @@ -1,3 +1,3 @@ -etc/gnocchi/api-paste.ini /usr/share/gnocchi-common -etc/gnocchi/policy.json /usr/share/gnocchi-common -gnocchi/rest/app.wsgi /usr/share/gnocchi-common +gnocchi/rest/api-paste.ini /usr/share/gnocchi-common +gnocchi/rest/app.wsgi /usr/share/gnocchi-common +gnocchi/rest/policy.json /usr/share/gnocchi-common -- GitLab From 1cefef07a708a57e1b11b6bbffa1a4b1b0b8c5f6 Mon Sep 17 00:00:00 2001 From: gord chung Date: Thu, 26 Oct 2017 23:13:27 +0000 Subject: [PATCH 1062/1483] return empty series if empty series given don't try to compute overlap if empty --- gnocchi/rest/aggregates/processor.py | 29 ++++++++++++++-------------- gnocchi/tests/test_aggregates.py | 7 +++++++ 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index 2c178c37..70e10337 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -142,20 +142,21 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, if overlap.size == 0 and needed_percent_of_overlap > 0: raise exceptions.UnAggregableTimeseries(references[key], 'No overlap') - # if no boundary set, use first/last timestamp which overlap - if to_timestamp is None and overlap.size: - times = times[:overlap[-1] + 1] - values = values[:overlap[-1] + 1] - if from_timestamp is None and overlap.size: - times = times[overlap[0]:] - values = values[overlap[0]:] - percent_of_overlap = overlap.size * 100.0 / times.size - if percent_of_overlap < needed_percent_of_overlap: - raise exceptions.UnAggregableTimeseries( - references[key], - 'Less than %f%% of datapoints overlap in this ' - 'timespan (%.2f%%)' % (needed_percent_of_overlap, - percent_of_overlap)) + if times.size: + # if no boundary set, use first/last timestamp which overlap + if to_timestamp is None and overlap.size: + times = times[:overlap[-1] + 1] + values = values[:overlap[-1] + 1] + if from_timestamp is None and overlap.size: + times = times[overlap[0]:] + values = values[overlap[0]:] + percent_of_overlap = overlap.size * 100.0 / times.size + if percent_of_overlap < needed_percent_of_overlap: + raise exceptions.UnAggregableTimeseries( + references[key], + 'Less than %f%% of datapoints overlap in this ' + 'timespan (%.2f%%)' % (needed_percent_of_overlap, + percent_of_overlap)) granularity, times, values, is_aggregated = ( agg_operations.evaluate(operations, key, times, values, diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 8c1c274d..a295c7c3 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -878,6 +878,13 @@ class CrossMetricAggregated(base.TestCase): "metric", ["whatever", "mean"], ["everwhat", "mean"], ]]) + def test_get_measures_empty_metric_needed_overlap_zero(self): + m_id = str(self.metric.id) + result = processor.get_measures( + self.storage, [(self.metric, "mean")], + operations=["metric", m_id, "mean"], needed_overlap=0) + self.assertEqual({'%s_mean' % m_id: []}, result) + def test_get_measures_unknown_aggregation(self): metric2 = indexer.Metric(uuid.uuid4(), self.archive_policies['low']) -- GitLab From 70b9ca427ba7710f5b45dda0b2e1490c08219f37 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 27 Oct 2017 09:53:36 +0200 Subject: [PATCH 1063/1483] Move coordination_url option out of the storage section The coordination url is more used by the incoming driver for sacks than for the storage. It has been there fore historical purpose. Move it out to DEFAULT. --- devstack/plugin.sh | 2 +- gnocchi/cli/metricd.py | 2 +- gnocchi/opts.py | 7 ++++++- gnocchi/rest/app.py | 3 +-- gnocchi/service.py | 11 ++++------- gnocchi/storage/__init__.py | 5 ----- gnocchi/tests/base.py | 2 +- gnocchi/tests/functional/fixtures.py | 3 +-- 8 files changed, 15 insertions(+), 20 deletions(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 9abe9a29..4bd189c1 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -215,7 +215,7 @@ function configure_gnocchi { fi if [ -n "$GNOCCHI_COORDINATOR_URL" ]; then - iniset $GNOCCHI_CONF storage coordination_url "$GNOCCHI_COORDINATOR_URL" + iniset $GNOCCHI_CONF coordination_url "$GNOCCHI_COORDINATOR_URL" fi if is_service_enabled gnocchi-statsd ; then diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index 6e705471..c59c7764 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -66,7 +66,7 @@ class MetricProcessBase(cotyledon.Service): def _configure(self): self.coord = retry_on_exception(get_coordinator_and_start, - self.conf.storage.coordination_url) + self.conf.coordination_url) self.store = retry_on_exception( storage.get_driver, self.conf, self.coord) self.incoming = retry_on_exception(incoming.get_driver, self.conf) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 608294ca..7bfba932 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -94,6 +94,11 @@ _cli_options = ( def list_opts(): return [ ("DEFAULT", _cli_options + ( + cfg.StrOpt( + 'coordination_url', + secret=True, + deprecated_group="storage", + help='Coordination driver URL'), cfg.IntOpt( 'parallel_operations', min=1, @@ -174,7 +179,7 @@ def list_opts(): 'to force refresh of metric.'), ) + API_OPTS, ), - ("storage", _STORAGE_OPTS + gnocchi.storage._CARBONARA_OPTS), + ("storage", _STORAGE_OPTS), ("incoming", _INCOMING_OPTS), ("statsd", ( cfg.HostAddressOpt('host', diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 720e516a..4b808c98 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -93,8 +93,7 @@ def load_app(conf, indexer=None, storage=None, incoming=None, coord=None, # NOTE(jd) This coordinator is never stop. I don't think it's a # real problem since the Web app can never really be stopped # anyway, except by quitting it entirely. - coord = metricd.get_coordinator_and_start( - conf.storage.coordination_url) + coord = metricd.get_coordinator_and_start(conf.coordination_url) storage = gnocchi_storage.get_driver(conf, coord) if not incoming: incoming = gnocchi_incoming.get_driver(conf) diff --git a/gnocchi/service.py b/gnocchi/service.py index 3c580e78..4075b082 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -84,15 +84,13 @@ def prepare_service(args=None, conf=None, # If no coordination URL is provided, default to using the indexer as # coordinator - if conf.storage.coordination_url is None: + if conf.coordination_url is None: if conf.storage.driver == "redis": conf.set_default("coordination_url", - conf.storage.redis_url, - "storage") + conf.storage.redis_url) elif conf.incoming.driver == "redis": conf.set_default("coordination_url", - conf.incoming.redis_url, - "storage") + conf.incoming.redis_url) else: parsed = urlparse.urlparse(conf.indexer.url) proto, _, _ = parsed.scheme.partition("+") @@ -100,8 +98,7 @@ def prepare_service(args=None, conf=None, # Set proto without the + part parsed[0] = proto conf.set_default("coordination_url", - urlparse.urlunparse(parsed), - "storage") + urlparse.urlunparse(parsed)) conf.log_opt_values(LOG, logging.DEBUG) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index d792c2a1..90b784ae 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -35,11 +35,6 @@ OPTS = [ help='Storage driver to use'), ] -_CARBONARA_OPTS = [ - cfg.StrOpt('coordination_url', - secret=True, - help='Coordination driver URL'), -] LOG = daiquiri.getLogger(__name__) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index ea93d9cd..b1dcd17f 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -301,7 +301,7 @@ class TestCase(BaseTestCase): self.index = indexer.get_driver(self.conf) self.coord = metricd.get_coordinator_and_start( - self.conf.storage.coordination_url) + self.conf.coordination_url) # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all # their tables in a single transaction even with the diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 9d7f8446..bf9e301e 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -127,8 +127,7 @@ class ConfigFixture(fixture.GabbiFixture): self.index = index - self.coord = metricd.get_coordinator_and_start( - conf.storage.coordination_url) + self.coord = metricd.get_coordinator_and_start(conf.coordination_url) s = storage.get_driver(conf, self.coord) s.upgrade() i = incoming.get_driver(conf) -- GitLab From fdc048a0407ebb19c99320d22056cc4e0a165c55 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 11 Oct 2017 11:25:52 +0200 Subject: [PATCH 1064/1483] rest: allow to pass resource_id in the metric creation payload --- gnocchi/indexer/__init__.py | 10 ++- gnocchi/rest/api.py | 50 +++++++++++---- .../create-metric-with-resource-id.yaml | 62 +++++++++++++++++++ 3 files changed, 107 insertions(+), 15 deletions(-) create mode 100644 gnocchi/tests/functional/gabbits/create-metric-with-resource-id.yaml diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index f7e3be1c..9163bf13 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -207,10 +207,14 @@ class UnsupportedArchivePolicyRuleChange(IndexerException): class NamedMetricAlreadyExists(IndexerException): """Error raised when a named metric already exists.""" - def __init__(self, metric): + def __init__(self, metric_name): super(NamedMetricAlreadyExists, self).__init__( - "Named metric %s already exists" % metric) - self.metric = metric + "Named metric %s already exists" % metric_name) + self.metric_name = metric_name + + def jsonify(self): + return {"cause": "Named metric already exists", + "detail": self.metric_name} class ResourceAlreadyExists(IndexerException): diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 12bd1105..b0a8cecc 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -547,19 +547,22 @@ class MetricsController(rest.RestController): abort(404, six.text_type(indexer.NoSuchMetric(id))) return MetricController(metrics[0]), remainder - _MetricSchema = voluptuous.Schema({ - "archive_policy_name": six.text_type, - "name": six.text_type, - voluptuous.Optional("unit"): - voluptuous.All(six.text_type, voluptuous.Length(max=31)), - }) - # NOTE(jd) Define this method as it was a voluptuous schema – it's just a # smarter version of a voluptuous schema, no? - @classmethod - def MetricSchema(cls, definition): + @staticmethod + def MetricSchema(definition): + creator = pecan.request.auth_helper.get_current_user( + pecan.request) + # First basic validation - definition = cls._MetricSchema(definition) + schema = voluptuous.Schema({ + "archive_policy_name": six.text_type, + "resource_id": functools.partial(ResourceID, creator=creator), + "name": six.text_type, + voluptuous.Optional("unit"): + voluptuous.All(six.text_type, voluptuous.Length(max=31)), + }) + definition = schema(definition) archive_policy_name = definition.get('archive_policy_name') name = definition.get('name') @@ -580,12 +583,23 @@ class MetricsController(rest.RestController): else: definition['archive_policy_name'] = ap.name - creator = pecan.request.auth_helper.get_current_user( - pecan.request) + resource_id = definition.get('resource_id') + if resource_id is None: + original_resource_id = None + else: + if name is None: + abort(400, + {"cause": "Attribute value error", + "detail": "name", + "reason": "Name cannot be null " + "if resource_id is not null"}) + original_resource_id, resource_id = resource_id enforce("create metric", { "creator": creator, "archive_policy_name": archive_policy_name, + "resource_id": resource_id, + "original_resource_id": original_resource_id, "name": name, "unit": definition.get('unit'), }) @@ -597,15 +611,23 @@ class MetricsController(rest.RestController): creator = pecan.request.auth_helper.get_current_user( pecan.request) body = deserialize_and_validate(self.MetricSchema) + + resource_id = body.get('resource_id') + if resource_id is not None: + resource_id = resource_id[1] + try: m = pecan.request.indexer.create_metric( uuid.uuid4(), creator, + resource_id=resource_id, name=body.get('name'), unit=body.get('unit'), archive_policy_name=body['archive_policy_name']) except indexer.NoSuchArchivePolicy as e: abort(400, six.text_type(e)) + except indexer.NamedMetricAlreadyExists as e: + abort(400, e) set_resp_location_hdr("/metric/" + str(m.id)) pecan.response.status = 201 return m @@ -1046,6 +1068,10 @@ def ResourceUUID(value, creator): def ResourceID(value, creator): + """Convert value to a resource ID. + + :return: A tuple (original_resource_id, resource_id) + """ return (six.text_type(value), ResourceUUID(value, creator)) diff --git a/gnocchi/tests/functional/gabbits/create-metric-with-resource-id.yaml b/gnocchi/tests/functional/gabbits/create-metric-with-resource-id.yaml new file mode 100644 index 00000000..0adb02ef --- /dev/null +++ b/gnocchi/tests/functional/gabbits/create-metric-with-resource-id.yaml @@ -0,0 +1,62 @@ +fixtures: + - ConfigFixture + +defaults: + request_headers: + # User foobar + authorization: "basic Zm9vYmFyOg==" + content-type: application/json + accept: application/json + +tests: + - name: create archive policy + POST: /v1/archive_policy + request_headers: + # User admin + authorization: "basic YWRtaW46" + data: + name: medium + definition: + - granularity: 1 second + status: 201 + + - name: create resource + POST: /v1/resource/generic + data: + id: foobar + status: 201 + + - name: create metric with a resource id + POST: /v1/metric + data: + resource_id: foobar + archive_policy_name: medium + name: cpu + status: 201 + response_json_paths: + $.archive_policy_name: medium + $.resource_id: 2fbfbb20-8d56-5e1e-afb9-b3007da11fdf + $.creator: foobar + $.name: cpu + + - name: create metric with a resource id and an already existing name + POST: /v1/metric + data: + resource_id: foobar + archive_policy_name: medium + name: cpu + status: 400 + response_json_paths: + $.description.cause: Named metric already exists + $.description.detail: cpu + + - name: create metric with a resource id but no name + POST: /v1/metric + data: + resource_id: foobar + archive_policy_name: medium + status: 400 + response_json_paths: + $.description.cause: Attribute value error + $.description.detail: name + $.description.reason: Name cannot be null if resource_id is not null -- GitLab From 52e8d7e1985b93b0cab049629575d37abb8c5732 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 27 Oct 2017 17:20:30 +0200 Subject: [PATCH 1065/1483] doc: add reno for 4.1 and switch stable to 4.1 --- doc/source/conf.py | 2 +- doc/source/releasenotes/4.1.rst | 6 ++++++ doc/source/releasenotes/index.rst | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 doc/source/releasenotes/4.1.rst diff --git a/doc/source/conf.py b/doc/source/conf.py index f852686e..3d3ae310 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -180,7 +180,7 @@ html_theme_options = { # Multiversion docs scv_sort = ('semver',) scv_show_banner = True -scv_banner_main_ref = 'stable/4.0' +scv_banner_main_ref = 'stable/4.1' scv_priority = 'branches' scv_whitelist_branches = ('master', '^stable/([3-9]\.)') scv_whitelist_tags = ("^$",) diff --git a/doc/source/releasenotes/4.1.rst b/doc/source/releasenotes/4.1.rst new file mode 100644 index 00000000..d33a607f --- /dev/null +++ b/doc/source/releasenotes/4.1.rst @@ -0,0 +1,6 @@ +=================================== + 4.1 Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/4.1 diff --git a/doc/source/releasenotes/index.rst b/doc/source/releasenotes/index.rst index c74aac00..b7a4a627 100644 --- a/doc/source/releasenotes/index.rst +++ b/doc/source/releasenotes/index.rst @@ -5,6 +5,7 @@ Release Notes :maxdepth: 2 unreleased + 4.1 4.0 3.1 3.0 -- GitLab From c84d1c4e7ea84346428450fb79e838bc604f32bc Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 27 Oct 2017 19:32:36 +0000 Subject: [PATCH 1066/1483] Fixed version of python-sqlalchemy-utils (>= 0.32.14). --- debian/changelog | 6 ++++++ debian/control | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index 10b715e1..384dd56a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (4.0.3-2) UNRELEASED; urgency=medium + + * Fixed version of python-sqlalchemy-utils (>= 0.32.14). + + -- Thomas Goirand Fri, 27 Oct 2017 19:32:06 +0000 + gnocchi (4.0.3-1) experimental; urgency=medium * New upstream release. diff --git a/debian/control b/debian/control index 2075f2e8..a44c48ae 100644 --- a/debian/control +++ b/debian/control @@ -54,7 +54,7 @@ Build-Depends-Indep: python3-sphinx-rtd-theme, python3-sphinxcontrib.httpdomain, python3-sqlalchemy, - python3-sqlalchemy-utils, + python3-sqlalchemy-utils (>= 0.32.14), python3-stevedore, python3-swiftclient (>= 3.1.0), python3-sysv-ipc, @@ -157,7 +157,7 @@ Depends: python3-scipy, python3-six, python3-sqlalchemy, - python3-sqlalchemy-utils, + python3-sqlalchemy-utils (>= 0.32.14), python3-stevedore, python3-swiftclient (>= 3.1.0), python3-tenacity (>= 3.1.0), -- GitLab From dde9730fdf0d453088ecf104529d3312dd610593 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Sat, 28 Oct 2017 10:09:08 +0000 Subject: [PATCH 1067/1483] Updated pt.po (Closes: #876172). --- debian/changelog | 5 ++- debian/po/pt.po | 111 +++++++++++++++++++---------------------------- 2 files changed, 47 insertions(+), 69 deletions(-) diff --git a/debian/changelog b/debian/changelog index 384dd56a..041a208f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,9 @@ gnocchi (4.0.3-2) UNRELEASED; urgency=medium - * Fixed version of python-sqlalchemy-utils (>= 0.32.14). + * Fixed version of python-sqlalchemy-utils (>= 0.32.14). + * Updated pt.po (Closes: #876172). - -- Thomas Goirand Fri, 27 Oct 2017 19:32:06 +0000 + -- Thomas Goirand Sat, 28 Oct 2017 10:08:35 +0000 gnocchi (4.0.3-1) experimental; urgency=medium diff --git a/debian/po/pt.po b/debian/po/pt.po index 2905fde1..b4ac73fa 100644 --- a/debian/po/pt.po +++ b/debian/po/pt.po @@ -1,17 +1,17 @@ # glance debconf portuguese messages # Copyright (C) 2012 the glance'S COPYRIGHT HOLDER # This file is distributed under the same license as the glance package. -# Pedro Ribeiro , 2012 +# Pedro Ribeiro , 2012, 2017 # msgid "" msgstr "" -"Project-Id-Version: glance\n" +"Project-Id-Version: gnocchi_3.0.4-4\n" "Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" "POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2013-10-20 23:43+0100\n" +"PO-Revision-Date: 2017-09-11 10:43+0100\n" "Last-Translator: Pedro Ribeiro \n" "Language-Team: Potuguese \n" -"Language: \n" +"Language: pt\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -20,7 +20,7 @@ msgstr "" #. Description #: ../gnocchi-common.templates:2001 msgid "Authentication server hostname:" -msgstr "Nome do servidor de autenticação:" +msgstr "Nome do servidor de autenticao:" #. Type: string #. Description @@ -30,8 +30,8 @@ msgid "" "Typically this is also the hostname of the OpenStack Identity Service " "(Keystone)." msgstr "" -"Indique o nome do seu servidor de autenticação para o Gnocchi. Normalmente, " -"é o nome do seu Serviço de Identidade OpenStack (Keystone)." +"Indique o nome do seu servidor de autenticao para o Gnocchi. Normalmente, " +" o nome do seu Servio de Identidade OpenStack (Keystone)." #. Type: string #. Description @@ -44,7 +44,7 @@ msgstr "" #. locataire ("tenant") #: ../gnocchi-common.templates:3001 msgid "Authentication server tenant name:" -msgstr "Nome do 'tenant' do servidor de autenticação:" +msgstr "Nome do 'tenant' do servidor de autenticao:" #. Type: string #. Description @@ -57,33 +57,33 @@ msgstr "Nome do 'tenant' do servidor de autenticação:" #. locataire ("tenant") #: ../gnocchi-common.templates:3001 msgid "Please specify the authentication server tenant name." -msgstr "Indique, por favor, o nome do 'tenant' do servidor de autenticação." +msgstr "Indique, por favor, o nome do 'tenant' do servidor de autenticao." #. Type: string #. Description #: ../gnocchi-common.templates:4001 msgid "Authentication server username:" -msgstr "Nome de utilizador para o servidor de autenticação:" +msgstr "Nome de utilizador para o servidor de autenticao:" #. Type: string #. Description #: ../gnocchi-common.templates:4001 msgid "Please specify the username to use with the authentication server." msgstr "" -"Indique, por favor, o nome de utilizador para o servidor de autenticação." +"Indique, por favor, o nome de utilizador para o servidor de autenticao." #. Type: password #. Description #: ../gnocchi-common.templates:5001 msgid "Authentication server password:" -msgstr "Palavra chave do servidor de autenticação:" +msgstr "Palavra chave do servidor de autenticao:" #. Type: password #. Description #: ../gnocchi-common.templates:5001 msgid "Please specify the password to use with the authentication server." msgstr "" -"Indique, por favor, a palavra-chave para usar no servidor de autenticação." +"Indique, por favor, a palavra-chave para usar no servidor de autenticao." #. Type: boolean #. Description @@ -94,17 +94,12 @@ msgstr "Configurar uma base de dados para o Gnocchi?" #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "No database has been set up for glance-registry or glance-api to use. " -#| "Before continuing, you should make sure you have the following " -#| "information:" msgid "" "No database has been set up for Gnocchi to use. Before continuing, you " "should make sure you have the following information:" msgstr "" -"Não foi definida nenhuma base de dados para ser usada pelo glance-registry " -"ou glance-api. Antes de continuar, certifique-se que tem:" +"No foi definida nenhuma base de dados para ser usada pelo Gnocchi. Antes de " +"continuar, certifique-se que tem a seguinte informao:" #. Type: boolean #. Description @@ -117,9 +112,9 @@ msgid "" " * a username and password to access the database." msgstr "" " * o tipo de base de dados que quer usar;\n" -" * o nome do servidor (esse servidor deve aceitar ligações TCP a partir\n" -"desta máquina);\n" -" * o nome de utilizador e palavra passe para aceder à base de dados." +" * o nome do servidor (esse servidor deve aceitar ligaes TCP a partir\n" +"desta mquina);\n" +" * o nome de utilizador e palavra passe para aceder base de dados." #. Type: boolean #. Description @@ -128,28 +123,24 @@ msgid "" "If some of these requirements are missing, do not choose this option and run " "with regular SQLite support." msgstr "" -"Se algum destes requisitos estiver em falta, rejeite esta opção e execute " +"Se algum destes requisitos estiver em falta, rejeite esta opo e execute " "com o suporte SQLite normal." #. Type: boolean #. Description #: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "You can change this setting later on by running \"dpkg-reconfigure -plow " -#| "glance-common\"." msgid "" "You can change this setting later on by running \"dpkg-reconfigure -plow " "gnocchi-common\"." msgstr "" -"Pode mudar esta definição mais tarde ao executar \"dpkg-reconfigure -plow " -"glance-common\"." +"Pode mudar esta definio mais tarde ao executar \"dpkg-reconfigure -plow " +"gnocchi-common\"." #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Registar o Gnocchi no catálogo de pontos finais do Keystone?" +msgstr "Registar o Gnocchi no catlogo de pontos finais do Keystone?" #. Type: boolean #. Description @@ -159,30 +150,27 @@ msgid "" "accessible. This is done using \"keystone service-create\" and \"keystone " "endpoint-create\". This can be done automatically now." msgstr "" -"Cada serviço Openstack (cada API) deve estar registado para que seja " -"acessível. Isto é feito com \"keystone service-create\" e \"keystone " +"Cada servio Openstack (cada API) deve estar registado para que seja " +"acessvel. Isto feito com \"keystone service-create\" e \"keystone " "endpoint-create\". Pode correr estes comandos agora." #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 -#, fuzzy -#| msgid "" -#| "Note that you will need to have an up and running Keystone server on " -#| "which to connect using the Keystone authentication token." msgid "" "Note that you will need to have an up and running Keystone server on which " "to connect using a known admin project name, admin username and password. " "The admin auth token is not used anymore." msgstr "" -"Note que irá necessitar de ter um servidor keystone a correr e pronto para " -"receber ligações autenticadas com o token de autenticação Keystone." +"Note que ir necessitar de ter um servidor keystone a correr e pronto para " +"receber ligaes autenticadas com um nome de administrador de projecto, nome " +"de utilizador e password. O token de autorizao de admin j no usado." #. Type: string #. Description #: ../gnocchi-api.templates:3001 msgid "Keystone server IP address:" -msgstr "Endereço IP do keystone:" +msgstr "Endereo IP do keystone:" #. Type: string #. Description @@ -191,16 +179,14 @@ msgid "" "Please enter the IP address of the Keystone server, so that gnocchi-api can " "contact Keystone to do the Gnocchi service and endpoint creation." msgstr "" -"Indique o endereço IP do seu servidor keystone, de modo a que o glance-api " -"possa contactar o Keystone para criar o serviço e ponto final Gnocchi." +"Indique o endereo IP do seu servidor keystone, de modo a que o glance-api " +"possa contactar o Keystone para criar o servio e ponto final Gnocchi." #. Type: string #. Description #: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "Keystone authentication token:" msgid "Keystone admin name:" -msgstr "Token de Autenticação Keystone:" +msgstr "Nome de administrador Keystone:" #. Type: string #. Description @@ -214,30 +200,32 @@ msgid "" "To register the service endpoint, this package needs to know the Admin " "login, name, project name, and password to the Keystone server." msgstr "" +"Para registar o endpoint do servio, este pacote necessita de saber o nome " +"de utilizador, nome, nome do projecto e password para o servidor Keystone." #. Type: string #. Description #: ../gnocchi-api.templates:5001 msgid "Keystone admin project name:" -msgstr "" +msgstr "Nome de projecto do administrador Keystone:" #. Type: password #. Description #: ../gnocchi-api.templates:6001 msgid "Keystone admin password:" -msgstr "" +msgstr "Password de administrador Keystone:" #. Type: string #. Description #: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" -msgstr "Endereço IP do ponto final Gnocchi:" +msgstr "Endereo IP do ponto final Gnocchi:" #. Type: string #. Description #: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "Indique o endereço IP que irá ser usado para contactar o Gnocchi." +msgstr "Indique o endereo IP que ir ser usado para contactar o Gnocchi." #. Type: string #. Description @@ -247,15 +235,15 @@ msgid "" "service, so if you are installing a public cloud, this should be a public IP " "address." msgstr "" -"Este endereço IP deve ser acessível a partir dos clientes que irão usar este " -"serviço, portanto se está a instalar uma cloud pública, este deve ser um " -"endereço IP público." +"Este endereo IP deve ser acessvel a partir dos clientes que iro usar este " +"servio, portanto se est a instalar uma cloud pblica, este deve ser um " +"endereo IP pblico." #. Type: string #. Description #: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" -msgstr "Nome da região a registar:" +msgstr "Nome da regio a registar:" #. Type: string #. Description @@ -265,17 +253,6 @@ msgid "" "location. Please enter the zone that you wish to use when registering the " "endpoint." msgstr "" -"O Openstack pode ser usado com zonas de disponibilidade, com cada região a " -"representar uma localização. Por favor, indique a zona que quer usar ao " -"registar um ponto final." - -#, fuzzy -#~| msgid "" -#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#~| "authentication token." -#~ msgid "" -#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -#~ "authentication token." -#~ msgstr "" -#~ "Para configurar o seu ponto final no Keystone, o glance-api precisa do " -#~ "token de autenticação do Keystone." +"O Openstack suporta a utilizao de zonas de disponibilidade, com cada " +"regio a representar uma localizao. Por favor, indique a zona que quer " +"user ao registar um ponto final." -- GitLab From 5fcc1931891cf3959545295db9afad7f906a20bb Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Sat, 28 Oct 2017 10:11:55 +0000 Subject: [PATCH 1068/1483] Running gnocchi-upgrade instead of dbsync (Closes: #853121). --- debian/changelog | 1 + debian/gnocchi-common.postinst.in | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 041a208f..cab91b9e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -2,6 +2,7 @@ gnocchi (4.0.3-2) UNRELEASED; urgency=medium * Fixed version of python-sqlalchemy-utils (>= 0.32.14). * Updated pt.po (Closes: #876172). + * Running gnocchi-upgrade instead of dbsync (Closes: #853121). -- Thomas Goirand Sat, 28 Oct 2017 10:08:35 +0000 diff --git a/debian/gnocchi-common.postinst.in b/debian/gnocchi-common.postinst.in index ca04648d..e0b1e0d7 100644 --- a/debian/gnocchi-common.postinst.in +++ b/debian/gnocchi-common.postinst.in @@ -20,7 +20,7 @@ if [ "$1" = "configure" ] || [ "$1" = "reconfigure" ] ; then if [ "$RET" = "true" ] ; then pkgos_dbc_postinst ${CONF} database connection gnocchi $@ echo "Now calling gnocchi-dbsync: this may take a while..." - su -s /bin/sh -c 'gnocchi-dbsync' gnocchi + su -s /bin/sh -c 'gnocchi-upgrade' gnocchi fi pkgos_write_admin_creds ${CONF} keystone_authtoken gnocchi fi -- GitLab From 08d14895d4cd077baed1f78268e13cdb9f62eebd Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sun, 29 Oct 2017 10:59:28 +0100 Subject: [PATCH 1069/1483] doc: add a note about issues with good first issue in contributing --- doc/source/contributing.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 249f6ec2..4d076845 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -14,7 +14,11 @@ Be sure to include a title and clear description, as much relevant information as possible, and a code sample or an executable test case demonstrating the expected behavior that is not occurring. +If you are looking to contribute for the first time, some issues are tagged +with the "`good first issue`_" label and are easy targets for newcomers. + .. _`GitHub issue tracker`: https://github.com/gnocchixyz/gnocchi/issues +.. _`good first issue`: https://github.com/gnocchixyz/gnocchi/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 Pull-requests -- GitLab From df5e4b9d8e8f29664c2ef5525bbf3cd3ccde34a4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 27 Oct 2017 18:17:08 +0200 Subject: [PATCH 1070/1483] tests: make sure upgrade from 4.1 is supported --- .travis.yml | 2 ++ tox.ini | 24 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/.travis.yml b/.travis.yml index fb8400f7..84f554f7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,6 +16,8 @@ env: - TARGET: py35-postgresql-file-upgrade-from-3.1 - TARGET: py27-mysql-ceph-upgrade-from-4.0 - TARGET: py35-postgresql-file-upgrade-from-4.0 + - TARGET: py27-mysql-ceph-upgrade-from-4.1 + - TARGET: py35-postgresql-file-upgrade-from-4.1 - TARGET: py27-mysql - TARGET: py35-mysql diff --git a/tox.ini b/tox.ini index 48a830c0..84f0e391 100644 --- a/tox.ini +++ b/tox.ini @@ -95,6 +95,30 @@ deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.0,<4.1 pifpaf[ceph,gnocchi]>=0.13 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} +[testenv:py35-postgresql-file-upgrade-from-4.1] +# We should always recreate since the script upgrade +# Gnocchi we can't reuse the virtualenv +recreate = True +skip_install = True +usedevelop = False +setenv = GNOCCHI_VARIANT=test,postgresql,file +deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.1,<4.2 + pifpaf[gnocchi]>=0.13 + gnocchiclient>=2.8.0 +commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} + +[testenv:py27-mysql-ceph-upgrade-from-4.1] +# We should always recreate since the script upgrade +# Gnocchi we can't reuse the virtualenv +recreate = True +skip_install = True +usedevelop = False +setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib +deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.1,<4.2 + gnocchiclient>=2.8.0 + pifpaf[ceph,gnocchi]>=0.13 +commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} + [testenv:pep8] deps = hacking>=0.12,<0.13 bashate -- GitLab From 680144d64d422b3043214d8496bfc4d2e823b5a8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 27 Oct 2017 15:28:45 +0200 Subject: [PATCH 1071/1483] Remove deprecated dynamic aggregation --- gnocchi/deprecated_aggregates/__init__.py | 50 ------ gnocchi/deprecated_aggregates/moving_stats.py | 142 ------------------ gnocchi/rest/api.py | 33 +--- gnocchi/tests/test_deprecated_aggregates.py | 109 -------------- gnocchi/tests/test_rest.py | 71 +-------- ...-dynamic-aggregation-e14ece1d0fcaf313.yaml | 4 + requirements.txt | 1 - setup.cfg | 3 - tox.ini | 2 + 9 files changed, 15 insertions(+), 400 deletions(-) delete mode 100644 gnocchi/deprecated_aggregates/__init__.py delete mode 100644 gnocchi/deprecated_aggregates/moving_stats.py delete mode 100644 gnocchi/tests/test_deprecated_aggregates.py create mode 100644 releasenotes/notes/remove-deprecated-dynamic-aggregation-e14ece1d0fcaf313.yaml diff --git a/gnocchi/deprecated_aggregates/__init__.py b/gnocchi/deprecated_aggregates/__init__.py deleted file mode 100644 index 4d54f470..00000000 --- a/gnocchi/deprecated_aggregates/__init__.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import abc - -import six - -from gnocchi import exceptions - - -class CustomAggFailure(Exception): - """Error raised when custom aggregation functions fail for any reason.""" - - def __init__(self, msg): - self.msg = msg - super(CustomAggFailure, self).__init__(msg) - - -@six.add_metaclass(abc.ABCMeta) -class CustomAggregator(object): - - @abc.abstractmethod - def compute(self, storage_obj, metric, start, stop, **param): - """Returns list of (timestamp, window, aggregate value) tuples. - - :param storage_obj: storage object for retrieving the data - :param metric: metric - :param start: start timestamp - :param stop: stop timestamp - :param **param: parameters are window and optionally center. - 'window' is the granularity over which to compute the moving - aggregate. - 'center=True' returns the aggregated data indexed by the central - time in the sampling window, 'False' (default) indexes aggregates - by the oldest time in the window. center is not supported for EWMA. - - """ - raise exceptions.NotImplementedError diff --git a/gnocchi/deprecated_aggregates/moving_stats.py b/gnocchi/deprecated_aggregates/moving_stats.py deleted file mode 100644 index a7c526ba..00000000 --- a/gnocchi/deprecated_aggregates/moving_stats.py +++ /dev/null @@ -1,142 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2014-2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import numpy -import pandas -import six - -from gnocchi import deprecated_aggregates -from gnocchi import utils - - -class MovingAverage(deprecated_aggregates.CustomAggregator): - - @staticmethod - def retrieve_data(storage_obj, metric, start, stop, window): - """Retrieves finest-res data available from storage.""" - window_seconds = utils.timespan_total_seconds(window) - try: - min_grain = min( - ap.granularity for ap in metric.archive_policy.definition - if (window_seconds % utils.timespan_total_seconds( - ap.granularity) == 0)) - except ValueError: - msg = ("No data available that is either full-res or " - "of a granularity that factors into the window size " - "you specified.") - raise deprecated_aggregates.CustomAggFailure(msg) - - data = list(zip(*storage_obj.get_measures(metric, start, stop, - granularity=min_grain))) - - return (min_grain, - pandas.Series(data[2], data[0]) if data else pandas.Series()) - - @staticmethod - def aggregate_data(data, func, window, min_grain, center=False, - min_size=1): - """Calculates moving func of data with sampling width of window. - - :param data: Series of timestamp, value pairs - :param func: the function to use when aggregating - :param window: (float) range of data to use in each aggregation. - :param min_grain: granularity of the data being passed in. - :param center: whether to index the aggregated values by the first - timestamp of the values picked up by the window or by the central - timestamp. - :param min_size: if the number of points in the window is less than - min_size, the aggregate is not computed and nan is returned for - that iteration. - """ - - if center: - center = utils.strtobool(center) - - def moving_window(x): - msec = numpy.timedelta64(1, 'ms') - zero = numpy.timedelta64(0, 's') - half_span = window / 2 - start = utils.normalize_time(data.index[0]) - stop = utils.normalize_time(data.index[-1] + min_grain) - # min_grain addition necessary since each bin of rolled-up data - # is indexed by leftmost timestamp of bin. - - left = half_span if center else zero - right = 2 * half_span - left - msec - # msec subtraction is so we don't include right endpoint in slice. - - x = utils.normalize_time(x) - - if x - left >= start and x + right <= stop: - dslice = data[x - left: x + right] - - if center and dslice.size % 2 == 0: - return func([func(data[x - msec - left: x - msec + right]), - func(data[x + msec - left: x + msec + right]) - ]) - - # (NOTE) atmalagon: the msec shift here is so that we have two - # consecutive windows; one centered at time x - msec, - # and one centered at time x + msec. We then average the - # aggregates from the two windows; this result is centered - # at time x. Doing this double average is a way to return a - # centered average indexed by a timestamp that existed in - # the input data (which wouldn't be the case for an even number - # of points if we did only one centered average). - - else: - return numpy.nan - if dslice.size < min_size: - return numpy.nan - return func(dslice) - try: - result = pandas.Series(data.index).apply(moving_window) - - # change from integer index to timestamp index - result.index = data.index - - return [(t.to_datetime64(), window, r) for t, r - in six.iteritems(result[~result.isnull()])] - except Exception as e: - raise deprecated_aggregates.CustomAggFailure(str(e)) - - def compute(self, storage_obj, metric, start, stop, window=None, - center=False): - """Returns list of (timestamp, window, aggregated value) tuples. - - :param storage_obj: a call is placed to the storage object to retrieve - the stored data. - :param metric: the metric - :param start: start timestamp - :param stop: stop timestamp - :param window: format string specifying the size over which to - aggregate the retrieved data - :param center: how to index the aggregated data (central timestamp or - leftmost timestamp) - """ - if window is None: - raise deprecated_aggregates.CustomAggFailure( - 'Moving aggregate must have window specified.' - ) - try: - window = utils.to_timespan(window) - except ValueError: - raise deprecated_aggregates.CustomAggFailure( - 'Invalid value for window') - - min_grain, data = self.retrieve_data(storage_obj, metric, start, - stop, window) - return self.aggregate_data(data, numpy.mean, window, min_grain, center, - min_size=1) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index b0a8cecc..5195d353 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -17,7 +17,6 @@ import functools import itertools import uuid -import warnings import jsonpatch import pbr.version @@ -26,13 +25,11 @@ from pecan import rest import pyparsing import six from six.moves.urllib import parse as urllib_parse -from stevedore import extension import voluptuous import webob.exc import werkzeug.http from gnocchi import archive_policy -from gnocchi import deprecated_aggregates from gnocchi import incoming from gnocchi import indexer from gnocchi import json @@ -436,9 +433,6 @@ class MetricController(rest.RestController): def __init__(self, metric): self.metric = metric - mgr = extension.ExtensionManager(namespace='gnocchi.aggregates', - invoke_on_load=True) - self.custom_agg = dict((x.name, x.obj) for x in mgr) def enforce_metric(self, rule): enforce(rule, json.to_primitive(self.metric)) @@ -464,15 +458,13 @@ class MetricController(rest.RestController): granularity=None, resample=None, refresh=False, **param): self.enforce_metric("get measures") - if not (aggregation - in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS - or aggregation in self.custom_agg): + if (aggregation not in + archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS): msg = '''Invalid aggregation value %(agg)s, must be one of %(std)s or %(custom)s''' abort(400, msg % dict( agg=aggregation, - std=archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS, - custom=str(self.custom_agg.keys()))) + std=archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)) if start is not None: try: @@ -503,12 +495,6 @@ class MetricController(rest.RestController): except storage.SackLockTimeoutError as e: abort(503, six.text_type(e)) try: - if aggregation in self.custom_agg: - warnings.warn("moving_average aggregation is deprecated.", - category=DeprecationWarning) - return self.custom_agg[aggregation].compute( - pecan.request.storage, self.metric, - start, stop, **param) return pecan.request.storage.get_measures( self.metric, start, stop, aggregation, utils.to_timespan(granularity) @@ -518,8 +504,6 @@ class MetricController(rest.RestController): storage.GranularityDoesNotExist, storage.AggregationDoesNotExist) as e: abort(404, six.text_type(e)) - except deprecated_aggregates.CustomAggFailure as e: - abort(400, six.text_type(e)) @pecan.expose() def delete(self): @@ -1869,15 +1853,8 @@ class CapabilityController(rest.RestController): @staticmethod @pecan.expose('json') def get(): - aggregation_methods = set( - archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS) - return dict(aggregation_methods=aggregation_methods, - dynamic_aggregation_methods=[ - ext.name for ext in extension.ExtensionManager( - # NOTE(sileht): Known as deprecated_aggregates - # but we can't change the namespace - namespace='gnocchi.aggregates') - ]) + return dict(aggregation_methods=set( + archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)) class StatusController(rest.RestController): diff --git a/gnocchi/tests/test_deprecated_aggregates.py b/gnocchi/tests/test_deprecated_aggregates.py deleted file mode 100644 index 75116003..00000000 --- a/gnocchi/tests/test_deprecated_aggregates.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2014-2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime -import uuid - -import numpy -from stevedore import extension - -from gnocchi import deprecated_aggregates -from gnocchi.deprecated_aggregates import moving_stats -from gnocchi import incoming -from gnocchi import indexer -from gnocchi.tests import base as tests_base -from gnocchi.tests import utils as tests_utils -from gnocchi import utils - - -class TestAggregates(tests_base.TestCase): - - def setUp(self): - super(TestAggregates, self).setUp() - mgr = extension.ExtensionManager('gnocchi.aggregates', - invoke_on_load=True) - self.custom_agg = dict((x.name, x.obj) for x in mgr) - - def test_extension_dict(self): - self.assertIsInstance(self.custom_agg['moving-average'], - moving_stats.MovingAverage) - - def _test_create_metric_and_data(self, data, spacing): - metric = indexer.Metric( - uuid.uuid4(), self.archive_policies['medium']) - start_time = utils.datetime_utc(2014, 1, 1, 12) - incr = datetime.timedelta(seconds=spacing) - measures = [incoming.Measure( - utils.dt_in_unix_ns(start_time + incr * n), val) - for n, val in enumerate(data)] - self.index.create_metric(metric.id, str(uuid.uuid4()), 'medium') - self.incoming.add_measures(metric, measures) - metrics = tests_utils.list_all_incoming_metrics(self.incoming) - self.storage.process_new_measures( - self.index, self.incoming, metrics, sync=True) - - return metric - - def test_retrieve_data(self): - metric = self._test_create_metric_and_data([69, 42, 6, 44, 7], - spacing=20) - for agg_method in self.custom_agg: - agg_obj = self.custom_agg[agg_method] - window = numpy.timedelta64(90, 's') - self.assertRaises(deprecated_aggregates.CustomAggFailure, - agg_obj.retrieve_data, - self.storage, metric, - start=None, stop=None, - window=window) - - window = numpy.timedelta64(120, 's') - grain, result = agg_obj.retrieve_data(self.storage, metric, - start=None, stop=None, - window=window) - self.assertEqual(numpy.timedelta64(1, 'm'), grain) - self.assertEqual(39.0, result[datetime.datetime(2014, 1, 1, 12)]) - self.assertEqual(25.5, - result[datetime.datetime(2014, 1, 1, 12, 1)]) - self.storage.delete_metric(self.incoming, metric) - - def test_compute_moving_average(self): - metric = self._test_create_metric_and_data([69, 42, 6, 44, 7], - spacing=20) - agg_obj = self.custom_agg['moving-average'] - window = '120s' - - center = 'False' - result = agg_obj.compute(self.storage, metric, - start=None, stop=None, - window=window, center=center) - self.assertEqual([(numpy.datetime64("2014-01-01 12:00"), - numpy.timedelta64(120, 's'), - 32.25)], - result) - - center = 'True' - result = agg_obj.compute(self.storage, metric, - start=None, stop=None, - window=window, center=center) - - self.assertEqual([(numpy.datetime64("2014-01-01 12:01"), - numpy.timedelta64(120, 's'), - 28.875)], - result) - # (FIXME) atmalagon: doing a centered average when - # there are only two points in the retrieved data seems weird. - # better to raise an error or return nan in this case? - - self.storage.delete_metric(self.incoming, metric) diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index afc7eb5a..7931f234 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -28,7 +28,6 @@ from keystonemiddleware import fixture as ksm_fixture import mock import pbr.version import six -from stevedore import extension import testscenarios from testtools import testcase import webtest @@ -198,21 +197,12 @@ class RootTest(RestTest): status=415) def test_capabilities(self): - custom_agg = extension.Extension('test_aggregation', None, None, None) - mgr = extension.ExtensionManager.make_test_instance( - [custom_agg], 'gnocchi.aggregates') aggregation_methods = set( archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS) - - with mock.patch.object(extension, 'ExtensionManager', - return_value=mgr): - result = self.app.get("/v1/capabilities").json - self.assertEqual( - sorted(aggregation_methods), - sorted(result['aggregation_methods'])) - self.assertEqual( - ['test_aggregation'], - result['dynamic_aggregation_methods']) + result = self.app.get("/v1/capabilities").json + self.assertEqual( + sorted(aggregation_methods), + sorted(result['aggregation_methods'])) def test_version(self): with self.app.use_admin_user(): @@ -512,59 +502,6 @@ class MetricTest(RestTest): [u'2013-01-01T12:00:00+00:00', 60.0, 12345.2]], result) - def test_get_moving_average(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}) - metric = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": '2013-01-01 12:00:00', - "value": 69}, - {"timestamp": '2013-01-01 12:00:20', - "value": 42}, - {"timestamp": '2013-01-01 12:00:40', - "value": 6}, - {"timestamp": '2013-01-01 12:01:00', - "value": 44}, - {"timestamp": '2013-01-01 12:01:20', - "value": 7}]) - - path = "/v1/metric/%s/measures?aggregation=%s&window=%ds" - ret = self.app.get(path % (metric['id'], 'moving-average', 120), - status=200) - result = json.loads(ret.text) - expected = [[u'2013-01-01T12:00:00+00:00', 120.0, 32.25]] - self.assertEqual(expected, result) - ret = self.app.get(path % (metric['id'], 'moving-average', 90), - status=400) - self.assertIn('No data available that is either full-res', - ret.text) - path = "/v1/metric/%s/measures?aggregation=%s" - ret = self.app.get(path % (metric['id'], 'moving-average'), - status=400) - self.assertIn('Moving aggregate must have window specified', - ret.text) - - def test_get_moving_average_invalid_window(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}) - metric = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": '2013-01-01 12:00:00', - "value": 69}, - {"timestamp": '2013-01-01 12:00:20', - "value": 42}, - {"timestamp": '2013-01-01 12:00:40', - "value": 6}, - {"timestamp": '2013-01-01 12:01:00', - "value": 44}, - {"timestamp": '2013-01-01 12:01:20', - "value": 7}]) - - path = "/v1/metric/%s/measures?aggregation=%s&window=foobar" - ret = self.app.get(path % (metric['id'], 'moving-average'), - status=400) - self.assertIn('Invalid value for window', ret.text) - def test_get_resource_missing_named_metric_measure_aggregation(self): mgr = self.index.get_resource_type_schema() resource_type = str(uuid.uuid4()) diff --git a/releasenotes/notes/remove-deprecated-dynamic-aggregation-e14ece1d0fcaf313.yaml b/releasenotes/notes/remove-deprecated-dynamic-aggregation-e14ece1d0fcaf313.yaml new file mode 100644 index 00000000..12da6f20 --- /dev/null +++ b/releasenotes/notes/remove-deprecated-dynamic-aggregation-e14ece1d0fcaf313.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + The deprecated dynamic aggregation (moving average) has been removed. diff --git a/requirements.txt b/requirements.txt index 1dc7f130..1a936099 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,6 @@ iso8601 oslo.config>=3.22.0 oslo.policy>=0.3.0 oslo.middleware>=3.22.0 -pandas>=0.18.0 pytimeparse scipy>=0.18.1 # BSD pecan>=0.9 diff --git a/setup.cfg b/setup.cfg index 49067208..b9ca87ce 100644 --- a/setup.cfg +++ b/setup.cfg @@ -111,9 +111,6 @@ gnocchi.indexer = postgresql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer postgresql+psycopg2 = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer -gnocchi.aggregates = - moving-average = gnocchi.deprecated_aggregates.moving_stats:MovingAverage - gnocchi.rest.auth_helper = keystone = gnocchi.rest.auth_helper:KeystoneAuthHelper basic = gnocchi.rest.auth_helper:BasicAuthHelper diff --git a/tox.ini b/tox.ini index 84f0e391..c829d82a 100644 --- a/tox.ini +++ b/tox.ini @@ -172,6 +172,8 @@ setenv = GNOCCHI_STORAGE_DEPS=file GNOCCHI_TEST_DEBUG=1 deps = {[testenv:docs]deps} sphinxcontrib-versioning +# fox <= 4.1 doc + pandas # for 3.x doc oslotest oslosphinx -- GitLab From b705a8d0dfefcf8c57afae1165ec10f7a36078ed Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 27 Oct 2017 13:55:16 +0000 Subject: [PATCH 1072/1483] make rest docs more consumable - divide metric section into crud tasks - divide measures section into crud tasks - divide archive-policy section into crud tasks - divide archive-policy-rule section into crud tasks - divide resources section into crud tasks - divide resource-type section into crud tasks - create common search section - create anchors for each functionality - better highlight aggregates API - remove redundant create-resource-with-new-metrics example --- doc/source/rest.j2 | 593 +++++++++++++++++++++++++++---------------- doc/source/rest.yaml | 20 +- 2 files changed, 381 insertions(+), 232 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 5b3eaf85..2af28d2d 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -35,6 +35,9 @@ temperature of a room or the number of bytes sent by a network interface. A |metric| only has a few properties: a UUID to identify it, a name, the |archive policy| that will be used to store and aggregate the |measures|. +Create +------ + To create a |metric|, the following API request should be used: {{ scenarios['create-metric']['doc'] }} @@ -45,38 +48,49 @@ To create a |metric|, the following API request should be used: unchangeable. The definition of the |archive policy| can be changed through the :ref:`archive_policy endpoint` though. +Read +---- + Once created, you can retrieve the |metric| information: {{ scenarios['get-metric']['doc'] }} +List +---- + To retrieve the list of all the |metrics| created, use the following request: {{ scenarios['list-metric']['doc'] }} -Metrics can be deleted through a request: - -{{ scenarios['delete-metric']['doc'] }} - -.. note:: +Pagination +~~~~~~~~~~ - Considering the large volume of |metrics| Gnocchi will store, query results - are limited to `max_limit` value set in the configuration file. Returned - results are ordered by |metrics|' id values. To retrieve the next page of - results, the id of a |metric| should be given as `marker` for the beginning - of the next page of results. +Considering the large volume of |metrics| Gnocchi will store, query results +are limited to `max_limit` value set in the configuration file. Returned +results are ordered by |metrics|' id values. To retrieve the next page of +results, the id of a |metric| should be given as `marker` for the beginning +of the next page of results. Default ordering and limits as well as page start can be modified using query parameters: {{ scenarios['list-metric-pagination']['doc'] }} -See also :ref:`Resource's named metrics `. +Delete +------ + +Metrics can be deleted through a request: + +{{ scenarios['delete-metric']['doc'] }} + +See also :ref:`Resources ` for similar operations specific +to metrics associated with a |resource|. Measures ======== -Push and retrieve ------------------ +Push +---- It is possible to send |measures| to the |metric|: @@ -87,18 +101,45 @@ status code. It is possible to provide any number of |measures|. .. IMPORTANT:: - While it is possible to send any number of (timestamp, value), it is still - needed to honor constraints defined by the |archive policy| used by the + While it is possible to send any number of (timestamp, value), they still + need to honor constraints defined by the |archive policy| used by the |metric|, such as the maximum |timespan|. +Batch +~~~~~ + +It is also possible to batch |measures| sending, i.e. send several |measures| +for different |metrics| in a simple call: + +{{ scenarios['post-measures-batch']['doc'] }} + +Or using named |metrics| of |resources|: + +{{ scenarios['post-measures-batch-named']['doc'] }} + +If some named |metrics| specified in the batch request do not exist, Gnocchi +can try to create them as long as an |archive policy| rule matches: + +{{ scenarios['post-measures-batch-named-create']['doc'] }} + +Read +---- + Once |measures| are sent, it is possible to retrieve |aggregates| using *GET* on the same endpoint: {{ scenarios['get-measures']['doc'] }} -Depending on the driver, there may be some lag after POSTing |measures| before +The list of points returned is composed of tuples with (timestamp, +|granularity|, value) sorted by timestamp. The |granularity| is the |timespan| +covered by aggregation for this point. + +Refresh +~~~~~~~ + +Depending on the driver, there may be some lag after pushing |measures| before they are processed and queryable. To ensure your query returns all |aggregates| -that have been POSTed and processed, you can force any unprocessed |measures| +that have been pushed and processed, you can force any unprocessed |measures| to be handled: {{ scenarios['get-measures-refresh']['doc'] }} @@ -108,9 +149,11 @@ to be handled: Depending on the amount of data that is unprocessed, `refresh` may add some overhead to your query. -The list of points returned is composed of tuples with (timestamp, -|granularity|, value) sorted by timestamp. The |granularity| is the |timespan| -covered by aggregation for this point. +Filter +~~~~~~ + +Time range +`````````` It is possible to filter the |aggregates| over a time range by specifying the *start* and/or *stop* parameters to the query with timestamp. The timestamp @@ -119,24 +162,25 @@ timestamp: {{ scenarios['get-measures-from']['doc'] }} +Aggregation +``````````` + By default, the aggregated values that are returned use the *mean* -|aggregation method|. It is possible to request for any other method by -specifying the *aggregation* query parameter: +|aggregation method|. It is possible to request for any other method defined +by the policy by specifying the *aggregation* query parameter: {{ scenarios['get-measures-max']['doc'] }} -The list of |aggregation method| available is: *mean*, *sum*, *last*, *max*, -*min*, *std*, *median*, *first*, *count* and *Npct* (with 0 < N < 100). - -They can be prefixed by "rate:" (like rate:last) to compute the rate of change -before doing the aggregation. +Granularity +``````````` It's possible to provide the |granularity| argument to specify the |granularity| to retrieve, rather than all the |granularities| available: {{ scenarios['get-measures-granularity']['doc'] }} -See also :ref:`Aggregation across metrics ` and :ref:`Resource's named metrics ` . +Resample +~~~~~~~~ In addition to |granularities| defined by the |archive policy|, |aggregates| can be resampled to a new |granularity|. @@ -148,22 +192,11 @@ can be resampled to a new |granularity|. Depending on the |aggregation method| and frequency of |measures|, resampled data may lack accuracy as it is working against previously aggregated data. -Batching --------- - -It is also possible to batch |measures| sending, i.e. send several |measures| -for different |metrics| in a simple call: - -{{ scenarios['post-measures-batch']['doc'] }} - -Or using named |metrics| of |resources|: +.. note:: -{{ scenarios['post-measures-batch-named']['doc'] }} + Gnocchi has an :ref:;`aggregates ` endpoint which provides + resampling as well as additional capabilities. -If some named |metrics| specified in the batch request do not exist, Gnocchi -can try to create them as long as an |archive policy| rule matches: - -{{ scenarios['post-measures-batch-named-create']['doc'] }} Archive Policy ============== @@ -172,13 +205,15 @@ When sending |measures| for a |metric| to Gnocchi, the values are dynamically aggregated. That means that Gnocchi does not store all sent |measures|, but aggregates them over a certain period of time. -Gnocchi provides several |aggregation methods| (mean, min, max, sum…) that are -builtin. Those can be prefix by `rate:` to compute the rate of change before -doing the aggregation. +Gnocchi provides several |aggregation methods| that are builtin. The list of +|aggregation method| available is: *mean*, *sum*, *last*, *max*, *min*, *std*, +*median*, *first*, *count* and *Npct* (with 0 < N < 100). Those can be prefix +by `rate:` to compute the rate of change before doing the aggregation. An |archive policy| is defined by a list of items in the `definition` field. -Each item is composed of the |timespan| and the level of precision that must be -kept when aggregating data, determined using at least 2 of the points, +Each item is composed of: the |timespan|; the |granularity|, which is the level +of precision that must be kept when aggregating data; and the number of points. +The |archive policy| is determined using at least 2 of the points, |granularity| and |timespan| fields. For example, an item might be defined as 12 points over 1 hour (one point every 5 minutes), or 1 point every 1 hour over 1 day (24 points). @@ -197,6 +232,9 @@ process |measures| back to 14:00 with a `back_window` of 0. If the `back_window` is set to 2, it will be possible to send |measures| with timestamp back to 12:00 (14:00 minus 2 times 1 hour). +Create +------ + The REST API allows to create |archive policies| in this way: {{ scenarios['create-archive-policy']['doc'] }} @@ -219,18 +257,27 @@ The list of |aggregation methods| can either be: If `*` is included in the list, it's substituted by the list of all supported |aggregation methods|. +Read +---- + Once the |archive policy| is created, the complete set of properties is computed and returned, with the URL of the |archive policy|. This URL can be used to retrieve the details of the |archive policy| later: {{ scenarios['get-archive-policy']['doc'] }} +List +---- + It is also possible to list |archive policies|: {{ scenarios['list-archive-policy']['doc'] }} .. _archive-policy-patch: +Update +------ + Existing |archive policies| can be modified to retain more or less data depending on requirements. If the policy coverage is expanded, |aggregates| are not retroactively calculated as backfill to accommodate the new |timespan|: @@ -242,6 +289,9 @@ not retroactively calculated as backfill to accommodate the new |timespan|: |Granularities| cannot be changed to a different rate. Also, |granularities| cannot be added or dropped from a policy. +Delete +------ + It is possible to delete an |archive policy| if it is not used by any |metric|: {{ scenarios['delete-archive-policy']['doc'] }} @@ -272,6 +322,9 @@ multiple rules match, the longest matching rule is taken. For example, if two rules exists which match `*` and `disk.*`, a `disk.io.rate` |metric| would match the `disk.*` rule rather than `*` rule. +Create +------ + To create a rule, the following API request should be used: {{ scenarios['create-archive-policy-rule']['doc'] }} @@ -282,44 +335,61 @@ The `metric_pattern` is used to pattern match so as some examples, - `disk.*` matches disk.io - `disk.io.*` matches disk.io.rate +Read +---- + Once created, you can retrieve the rule information: {{ scenarios['get-archive-policy-rule']['doc'] }} +List +---- + It is also possible to list |archive policy| rules. The result set is ordered by the `metric_pattern`, in reverse alphabetical order: {{ scenarios['list-archive-policy-rule']['doc'] }} -It is possible to delete an |archive policy| rule: - -{{ scenarios['delete-archive-policy-rule']['doc'] }} +Update +------ It is possible to rename an archive policy rule: {{ scenarios['rename-archive-policy-rule']['doc'] }} +Delete +------ + +It is possible to delete an |archive policy| rule: + +{{ scenarios['delete-archive-policy-rule']['doc'] }} + +.. _resources-endpoint: + Resources ========= -Creation --------- - Gnocchi provides the ability to store and index |resources|. Each |resource| has a type. The basic type of |resources| is *generic*, but more specialized subtypes also exist, especially to describe OpenStack resources. -The REST API allows to manipulate |resources|. To create a generic |resource|: +Create +------ + +To create a generic |resource|: {{ scenarios['create-resource-generic']['doc'] }} -The *id*, *user_id* and *project_id* attributes must be UUID. The timestamp +The *id*, *user_id* and *project_id* attributes must be an UUID. The timestamp describing the lifespan of the |resource| are optional, and *started_at* is by default set to the current timestamp. It's possible to retrieve the |resource| by the URL provided in the `Location` header. +Non-generic resources +~~~~~~~~~~~~~~~~~~~~~ + More specialized |resources| can be created. For example, the *instance* is used to describe an OpenStack instance as managed by Nova_. @@ -328,36 +398,92 @@ used to describe an OpenStack instance as managed by Nova_. All specialized types have their own optional and mandatory attributes, but they all include attributes from the generic type as well. -It is possible to create |metrics| at the same time you create a |resource| to -save some requests: +.. _Nova: http://launchpad.net/nova + +With metrics +~~~~~~~~~~~~ -{{ scenarios['create-resource-with-new-metrics']['doc'] }} +Each |resource| can be linked to any number of |metrics| on creation: -Querying --------- +{{ scenarios['create-resource-instance-with-metrics']['doc'] }} + +It is also possible to create |metrics| at the same time you create a |resource| +to save some requests: + +{{ scenarios['create-resource-instance-with-dynamic-metrics']['doc'] }} + +Read +---- To retrieve a |resource| by its URL provided by the `Location` header at creation time: {{ scenarios['get-resource-generic']['doc'] }} -Modification ------------- +List +---- + +All |resources| can be listed, either by using the `generic` type that will +list all types of |resources|, or by filtering on their |resource| type: + +{{ scenarios['list-resource-generic']['doc'] }} + +Specific resource type +~~~~~~~~~~~~~~~~~~~~~~ + +No attributes specific to the |resource| type are retrieved when using the +`generic` endpoint. To retrieve the details, either list using the specific +|resource| type endpoint: + +{{ scenarios['list-resource-instance']['doc'] }} + +With details +~~~~~~~~~~~~ + +To retrieve a more detailed view of the resources, use `details=true` in the +query parameter: + +{{ scenarios['list-resource-generic-details']['doc'] }} + +Pagination +~~~~~~~~~~ + +Similar to |metric| list, query results are limited to `max_limit` value set +in the configuration file. Returned results represent a single page of data and +are ordered by resouces' revision_start time and started_at values: + +{{ scenarios['list-resource-generic-pagination']['doc'] }} + +List resource metrics +--------------------- + +The |metrics| associated with a |resource| can be accessed and manipulated +using the usual `/v1/metric` endpoint or using the named relationship with the +|resource|: + +{{ scenarios['get-resource-named-metrics-measures']['doc'] }} + +Update +------ It's possible to modify a |resource| by re-uploading it partially with the modified fields: {{ scenarios['patch-resource']['doc'] }} -History modification --------------------- +It is also possible to associate additional |metrics| with a |resource|: + +{{ scenarios['append-metrics-to-resource']['doc'] }} + +History +------- -And to retrieve its modification history: +And to retrieve a |resource|'s modification history: {{ scenarios['get-patched-instance-history']['doc'] }} -Deletion --------- +Delete +------ It is possible to delete a |resource| altogether: @@ -366,6 +492,9 @@ It is possible to delete a |resource| altogether: It is also possible to delete a batch of |resources| based on attribute values, and returns a number of deleted |resources|. +Batch +~~~~~ + To delete |resources| based on ids: {{ scenarios['delete-resources-by-ids']['doc'] }} @@ -382,105 +511,99 @@ or delete |resources| based on time: When a batch of |resources| are deleted, an attribute filter is required to avoid deletion of the entire database. -Listing -------- - -All |resources| can be listed, either by using the `generic` type that will -list all types of |resources|, or by filtering on their |resource| type: - -{{ scenarios['list-resource-generic']['doc'] }} -No attributes specific to the |resource| type are retrieved when using the -`generic` endpoint. To retrieve the details, either list using the specific -|resource| type endpoint: - -{{ scenarios['list-resource-instance']['doc'] }} - -or using `details=true` in the query parameter: - -{{ scenarios['list-resource-generic-details']['doc'] }} - -.. note:: +Resource Types +============== - Similar to |metric| list, query results are limited to `max_limit` value set - in the configuration file. +Gnocchi is able to manage |resource| types with custom attributes. -Returned results represent a single page of data and are ordered by resouces' -revision_start time and started_at values: +Create +------ -{{ scenarios['list-resource-generic-pagination']['doc'] }} +To create a new |resource| type: -.. _resource-named-metrics: +{{ scenarios['create-resource-type']['doc'] }} -Named metrics -------------- +Read +---- -Each |resource| can be linked to any number of |metrics|. The |metrics| -attributes is a key/value field where the key is the name of the relationship -and the value is a |metric|: +Then to retrieve its description: -{{ scenarios['create-resource-instance-with-metrics']['doc'] }} +{{ scenarios['get-resource-type']['doc'] }} -It's also possible to create |metrics| dynamically while creating a |resource|: +List +---- -{{ scenarios['create-resource-instance-with-dynamic-metrics']['doc'] }} +All |resource| types can be listed like this: -The |metric| associated with a |resource| can be accessed and manipulated using -the usual `/v1/metric` endpoint or using the named relationship with the -|resource|: +{{ scenarios['list-resource-type']['doc'] }} -{{ scenarios['get-resource-named-metrics-measures']['doc'] }} +Update +------ -The same endpoint can be used to append |metrics| to a |resource|: +Attributes can be added or removed: -{{ scenarios['append-metrics-to-resource']['doc'] }} +{{ scenarios['patch-resource-type']['doc'] }} -.. _Nova: http://launchpad.net/nova +Delete +------ -Resource Types -============== +It can also be deleted if no more |resources| are associated to it: -Gnocchi is able to manage |resource| types with custom attributes. +{{ scenarios['delete-resource-type']['doc'] }} -To create a new |resource| type: +.. note:: -{{ scenarios['create-resource-type']['doc'] }} + Creating |resource| type means creation of new tables on the indexer + backend. This is heavy operation that will lock some tables for a short + amount of time. When the |resource| type is created, its initial `state` is + `creating`. When the new tables have been created, the state switches to + `active` and the new |resource| type is ready to be used. If something + unexpected occurs during this step, the state switches to `creation_error`. -Then to retrieve its description: + The same behavior occurs when the |resource| type is deleted. The state + starts to switch to `deleting`, the |resource| type is no longer usable. + Then the tables are removed and then finally the resource_type is really + deleted from the database. If some unexpected error occurs the state + switches to `deletion_error`. -{{ scenarios['get-resource-type']['doc'] }} -All |resource| types can be listed like this: -{{ scenarios['list-resource-type']['doc'] }} +Search +====== -It can also be deleted if no more |resources| are associated to it: +Gnocchi's search API supports to the ability to execute a query across +|resources| or |metrics|. This API provides a language to construct more +complex matching contraints beyond basic filtering. -{{ scenarios['delete-resource-type']['doc'] }} +Usage +----- -Attributes can be added or removed: +You can specify a time range to look for by specifying the `start` and/or +`stop` query parameter, and the |aggregation method| to use by specifying the +`aggregation` query parameter. -{{ scenarios['patch-resource-type']['doc'] }} +The supported operators are: equal to (`=`, `==` or `eq`), lesser than (`<` or +`lt`), greater than (`>` or `gt`), less than or equal to (`<=`, `le` or `≤`) +greater than or equal to (`>=`, `ge` or `≥`) not equal to (`!=`, `ne` or `≠`), +addition (`+` or `add`), substraction (`-` or `sub`), multiplication (`*`, +`mul` or `×`), division (`/`, `div` or `÷`). These operations take either one +argument, and in this case the second argument passed is the value, or it. -Creating |resource| type means creation of new tables on the indexer backend. -This is heavy operation that will lock some tables for a short amount of times. -When the |resource| type is created, its initial `state` is `creating`. When -the new tables have been created, the state switches to `active` and the new -|resource| type is ready to be used. If something unexpected occurs during this -step, the state switches to `creation_error`. +The operators or (`or` or `∨`), and (`and` or `∧`) and `not` are also +supported, and take a list of arguments as parameters. -The same behavior occurs when the |resource| type is deleted. The state starts -to switch to `deleting`, the |resource| type is no longer usable. Then the -tables are removed and then finally the resource_type is really deleted from -the database. If some unexpected error occurs the state switches to -`deletion_error`. +.. _search-resource: -Searching for resources -======================= +Resource +-------- It's possible to search for |resources| using a query mechanism, using the `POST` method and uploading a JSON formatted query. +Single filter +~~~~~~~~~~~~~ + When listing |resources|, it is possible to filter |resources| based on attributes values: @@ -490,14 +613,23 @@ Or even: {{ scenarios['search-resource-for-host-like']['doc'] }} +Multiple filters +~~~~~~~~~~~~~~~~ + Complex operators such as `and` and `or` are also available: {{ scenarios['search-resource-for-user-after-timestamp']['doc'] }} +With details +~~~~~~~~~~~~ + Details about the |resource| can also be retrieved at the same time: {{ scenarios['search-resource-for-user-details']['doc'] }} +History +~~~~~~~ + It's possible to search for old revisions of |resources| in the same ways: {{ scenarios['search-resource-history']['doc'] }} @@ -506,24 +638,23 @@ It is also possible to send the *history* parameter in the *Accept* header: {{ scenarios['search-resource-history-in-accept']['doc'] }} +Time range +`````````` + The timerange of the history can be set, too: {{ scenarios['search-resource-history-partial']['doc'] }} -The supported operators are: equal to (`=`, `==` or `eq`), less than (`<` or -`lt`), greater than (`>` or `gt`), less than or equal to (`<=`, `le` or `≤`) -greater than or equal to (`>=`, `ge` or `≥`) not equal to (`!=`, `ne` or `≠`), -value is in (`in`), value is like (`like`), or (`or` or `∨`), and (`and` or -`∧`) and negation (`not`). +Magic +~~~~~ The special attribute `lifespan` which is equivalent to `ended_at - started_at` is also available in the filtering queries. {{ scenarios['search-resource-lifespan']['doc'] }} - -Searching for values in metrics -=============================== +Metric +------ It is possible to search for values in |metrics|. For example, this will look for all values that are greater than or equal to 50 if we add 23 to them and @@ -532,88 +663,34 @@ into by using the `metric_id` query parameter several times. {{ scenarios['search-value-in-metric']['doc'] }} -And it is possible to search for values in |metrics| by using one or multiple +And it is possible to search for values in |metrics| by using one or more |granularities|: {{ scenarios['search-value-in-metrics-by-granularity']['doc'] }} -You can specify a time range to look for by specifying the `start` and/or -`stop` query parameter, and the |aggregation method| to use by specifying the -`aggregation` query parameter. - -The supported operators are: equal to (`=`, `==` or `eq`), lesser than (`<` or -`lt`), greater than (`>` or `gt`), less than or equal to (`<=`, `le` or `≤`) -greater than or equal to (`>=`, `ge` or `≥`) not equal to (`!=`, `ne` or `≠`), -addition (`+` or `add`), substraction (`-` or `sub`), multiplication (`*`, -`mul` or `×`), division (`/`, `div` or `÷`). These operations take either one -argument, and in this case the second argument passed is the value, or it. - -The operators or (`or` or `∨`), and (`and` or `∧`) and `not` are also -supported, and take a list of arguments as parameters. .. _aggregates: -Aggregates: on the fly, measurements modification and aggregation -=================================================================== +Dynamic Aggregates +================== -Gnocchi allows to do on-the-fly aggregation and modification of already -aggregated data of |metrics|. - -It can be done by providing the list of |metrics| to aggregate: - -{{ scenarios['get-aggregates-by-metric-ids']['doc'] }} - -This example computes the mean aggregates with `all` metrics listed in -`metrics` and then multiples it by `4`. +Gnocchi supports the ability to make on-the-fly reaggregations of existing +|metrics| and the ability to manipulate and transform |metrics| as required. +This is accomplished by passing an `operations` value describing the actions +to apply to the |metrics|. .. note:: `operations` can also be passed as a string, for example: `"operations": "(aggregate mean (metric (metric-id aggregation) (metric-id aggregation))"` -Operations between metrics can also be done, such as: - -{{ scenarios['get-aggregates-between-metrics']['doc'] }} - -Aggregation across |metrics| have different behavior depending -on whether boundary values are set (`start` and `stop`) and if `needed_overlap` -is set. - -Gnocchi expects that time series have a certain percentage of timestamps in -common. This percent is controlled by the `needed_overlap` needed_overlap, -which by default expects 100% overlap. If this percentage is not reached, an -error is returned. - -.. note:: - - If `start` or `stop` boundary is not set, Gnocchi will set the missing - boundary to the first or last timestamp common across all series. - -The ability to fill in missing points from a subset of time series is supported -by specifying a `fill` value. Valid fill values include any float, `dropna` or -`null`. In the case of `null`, Gnocchi will compute the aggregation using only -the existing points. `dropna` is like `null` but remove NaN from the result. -The `fill` parameter will not backfill timestamps which contain no points in -any of the time series. Only timestamps which have datapoints in at least one -of the time series is returned. - -{{ scenarios['get-aggregates-by-metric-ids-fill']['doc'] }} - -It's also possible to do that aggregation on |metrics| linked to |resources|. -In order to select these |resources|, the following endpoint accepts a query -such as the one described in `Searching for resources`_. - -{{ scenarios['get-aggregates-by-attributes-lookup']['doc'] }} - -It is possible to group the |resource| search results by any attribute of the -requested |resource| type, and then compute the aggregation: - -{{ scenarios['get-aggregates-by-attributes-lookup-groupby']['doc'] }} - List of supported ------------------------------ -getting one or more metrics:: +Get one or more metrics +~~~~~~~~~~~~~~~~~~~~~~~ + +:: (metric ) (metric (( ), ( ), ...)) @@ -621,7 +698,15 @@ getting one or more metrics:: metric-id: the id of a metric to retrieve aggregation: the aggregation method to retrieve -rolling window aggregation:: +.. note:: + + When used alone, this provides the ability to retrieve multiple |metrics| in a + single request. + +Rolling window aggregation +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: (rolling ()) @@ -629,15 +714,20 @@ rolling window aggregation:: (mean, median, std, min, max, sum, var, count) rolling window: number of previous values to aggregate +Aggregation across metrics +~~~~~~~~~~~~~~~~~~~~~~~~~~ -aggregation across metrics:: +:: aggregate ((), (), ...)) aggregation method: the aggregation method to use to compute the aggregate between metrics (mean, median, std, min, max, sum, var, count) -resampling metrics:: +Resample +~~~~~~~~ + +:: (resample ()) @@ -646,19 +736,28 @@ resampling metrics:: granularity: the granularity (e.g.: 1d, 60s, ...) -math operations:: +Math operations +~~~~~~~~~~~~~~~ + +:: ( ) operator: %, mod, +, add, -, sub, *, ×, mul, /, ÷, div, **, ^, pow -boolean operations:: +Boolean operations +~~~~~~~~~~~~~~~~~~ + +:: ( ) operator: =, ==, eq, <, lt, >, gt, <=, ≤, le, =, ≥, ge, !=, ≠, ne -function operations:: +Function operations +~~~~~~~~~~~~~~~~~~~ + +:: (abs ()) (absolute ()) @@ -670,8 +769,75 @@ function operations:: (floor ()) (ceil ()) +Cross-metric Usage +------------------ + +Aggregation across multiple |metrics| have different behavior depending +on whether boundary values are set (`start` and `stop`) and if `needed_overlap` +is set. + +Overlap percentage +~~~~~~~~~~~~~~~~~~ + +Gnocchi expects that time series have a certain percentage of timestamps in +common. This percent is controlled by the `needed_overlap` needed_overlap, +which by default expects 100% overlap. If this percentage is not reached, an +error is returned. + +.. note:: + + If `start` or `stop` boundary is not set, Gnocchi will set the missing + boundary to the first or last timestamp common across all series. + +Backfill +~~~~~~~~ + +The ability to fill in missing points from a subset of time series is supported +by specifying a `fill` value. Valid fill values include any float, `dropna` or +`null`. In the case of `null`, Gnocchi will compute the aggregation using only +the existing points. `dropna` is like `null` but remove NaN from the result. +The `fill` parameter will not backfill timestamps which contain no points in +any of the time series. Only timestamps which have datapoints in at least one +of the time series is returned. + +{{ scenarios['get-aggregates-by-metric-ids-fill']['doc'] }} + + +Search and aggregate +-------------------- + +It's also possible to do that aggregation on |metrics| linked to |resources|. +In order to select these |resources|, the following endpoint accepts a query +such as the one described in the :ref:`resource search API `. + +{{ scenarios['get-aggregates-by-attributes-lookup']['doc'] }} + +Groupby +~~~~~~~ + +It is possible to group the |resource| search results by any attribute of the +requested |resource| type, and then compute the aggregation: + +{{ scenarios['get-aggregates-by-attributes-lookup-groupby']['doc'] }} + +Examples +-------- + +Aggregate then math +~~~~~~~~~~~~~~~~~~~ + +The following computes the mean aggregates with `all` metrics listed in +`metrics` and then multiples it by `4`. + +{{ scenarios['get-aggregates-by-metric-ids']['doc'] }} + +Between metrics +~~~~~~~~~~~~~~~ + +Operations between metrics can also be done, such as: + +{{ scenarios['get-aggregates-between-metrics']['doc'] }} -.. _aggregation-across-metrics: Aggregation across metrics (deprecated) ======================================= @@ -704,7 +870,7 @@ parameter: It's also possible to do that aggregation on |metrics| linked to |resources|. In order to select these |resources|, the following endpoint accepts a query -such as the one described in `Searching for resources`_. +such as the one described in the :ref:`resource search API `. {{ scenarios['get-across-metrics-measures-by-attributes-lookup']['doc'] }} @@ -758,6 +924,7 @@ can differ between deployments. It is possible to get the supported list of Status ====== + The overall status of the Gnocchi installation can be retrieved via an API call reporting values such as the number of new |measures| to process for each |metric|: diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index f7510589..61ffc557 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -314,18 +314,6 @@ "project_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D" } -- name: create-resource-with-new-metrics - request: | - POST /v1/resource/generic HTTP/1.1 - Content-Type: application/json - - { - "id": "AB68DA77-FA82-4E67-ABA9-270C5A98CBCB", - "user_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D", - "project_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D", - "metrics": {"temperature": {"archive_policy_name": "low"}} - } - - name: create-resource-type-instance request: | POST /v1/resource_type HTTP/1.1 @@ -569,12 +557,6 @@ Content-Type: application/json { - "{{ scenarios['create-resource-with-new-metrics']['response'].json['id'] }}": { - "temperature": [ - { "timestamp": "2014-10-06T14:34:12", "value": 17 }, - { "timestamp": "2014-10-06T14:34:20", "value": 18 } - ] - }, "{{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['id'] }}": { "cpu.util": [ { "timestamp": "2014-10-06T14:34:12", "value": 12 }, @@ -595,7 +577,7 @@ Content-Type: application/json { - "{{ scenarios['create-resource-with-new-metrics']['response'].json['id'] }}": { + "{{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['id'] }}": { "disk.io.test": [ { "timestamp": "2014-10-06T14:34:12", "value": 71 }, { "timestamp": "2014-10-06T14:34:20", "value": 81 } -- GitLab From c79d1a6c8d8af3e645a7e188191d425b51d034a4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 7 Nov 2017 15:12:10 +0100 Subject: [PATCH 1073/1483] Remove oslo.db from config generator The oslo.db template included by `gnocchi-config-generator` contains many variable and confused people configure Gnocchi for the first time, especially because the [database]connection field is there and not used. Do not expose those values in the template to keep things simple. --- gnocchi/gnocchi-config-generator.conf | 1 - 1 file changed, 1 deletion(-) diff --git a/gnocchi/gnocchi-config-generator.conf b/gnocchi/gnocchi-config-generator.conf index faf947e0..ab1752dd 100644 --- a/gnocchi/gnocchi-config-generator.conf +++ b/gnocchi/gnocchi-config-generator.conf @@ -1,7 +1,6 @@ [DEFAULT] wrap_width = 79 namespace = gnocchi -namespace = oslo.db namespace = oslo.middleware.cors namespace = oslo.middleware.healthcheck namespace = oslo.middleware.http_proxy_to_wsgi -- GitLab From dfdb68f4a1b45aee803f4f386c92af27cfa11db1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 8 Nov 2017 12:21:38 +0100 Subject: [PATCH 1074/1483] doc: add some note about resample People should leverage archive policy system. This change adds some notes about that. --- doc/source/rest.j2 | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 2af28d2d..512c25ba 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -187,6 +187,13 @@ can be resampled to a new |granularity|. {{ scenarios['get-measures-resample']['doc'] }} + +.. note:: + + If you plan to execute the query often, it is recommended for performance + to leverage an |archive policy| with the needed |granularity| instead of + resampling the time series on each query. + .. note:: Depending on the |aggregation method| and frequency of |measures|, resampled @@ -736,6 +743,13 @@ Resample granularity: the granularity (e.g.: 1d, 60s, ...) +.. note:: + + If you plan to execute the query often, it is recommended for performance + to leverage an |archive policy| with the needed |granularity| instead of + resampling the time series on each query. + + Math operations ~~~~~~~~~~~~~~~ -- GitLab From 02b6787e6a4958b8b9f2ace9f57d2bc82847d775 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 9 Nov 2017 11:21:41 +0100 Subject: [PATCH 1075/1483] tests: Assert on NAN gabbi allows to write custom value checker since 1.37.0. This change uses that to check that .NAN is NAN. --- gnocchi/tests/functional/fixtures.py | 13 ++++++++ .../gabbits/aggregates-with-metric-ids.yaml | 33 +++++++++---------- gnocchi/tests/functional/test_gabbi.py | 3 +- setup.cfg | 2 +- 4 files changed, 32 insertions(+), 19 deletions(-) diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index bf9e301e..6bb1ed9b 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -24,9 +24,11 @@ import warnings import daiquiri from gabbi import fixture +import numpy from oslo_config import cfg from oslo_middleware import cors import sqlalchemy_utils +import yaml from gnocchi.cli import metricd from gnocchi import incoming @@ -50,6 +52,17 @@ def setup_app(): return app.load_app(**LOAD_APP_KWARGS) +class AssertNAN(yaml.YAMLObject): + def __eq__(self, other): + try: + return numpy.isnan(other) + except TypeError: + return False + + +yaml.add_constructor(u'!AssertNAN', lambda loader, node: AssertNAN()) + + class ConfigFixture(fixture.GabbiFixture): """Establish the relevant configuration fixture, per test file. diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml index 251c71b2..aea68d65 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml @@ -429,7 +429,6 @@ tests: - name: no overlap null POST: /v1/aggregates?fill=null - xfail: gabbi use assertEqual to compare .NAN which is always false data: operations: "(metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric4'].$RESPONSE['$.id'] mean))" response_json_paths: @@ -440,8 +439,8 @@ tests: - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] - ['2015-03-06T14:37:00+00:00', 60.0, 15.0] - ['2015-03-06T14:38:00+00:00', 60.0, 15.0] - - ["2017-04-06T14:33:00+00:00", 60.0, .NAN] - - ["2017-04-06T14:34:00+00:00", 60.0, .NAN] + - ["2017-04-06T14:33:00+00:00", 60.0, !AssertNAN ] + - ["2017-04-06T14:34:00+00:00", 60.0, !AssertNAN ] - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] - ["2015-03-06T14:34:15+00:00", 1.0, -16.0] @@ -449,23 +448,23 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] - ['2015-03-06T14:37:00+00:00', 1.0, 15.0] - ['2015-03-06T14:38:00+00:00', 1.0, 15.0] - - ["2017-04-06T14:33:57+00:00", 1.0, .NAN] - - ["2017-04-06T14:34:12+00:00", 1.0, .NAN] + - ["2017-04-06T14:33:57+00:00", 1.0, !AssertNAN ] + - ["2017-04-06T14:34:12+00:00", 1.0, !AssertNAN ] $."$HISTORY['create metric4'].$RESPONSE['$.id']_mean": - - ["2015-03-06T14:33:00+00:00", 60.0, .NAN] - - ["2015-03-06T14:34:00+00:00", 60.0, .NAN] - - ["2015-03-06T14:35:00+00:00", 60.0, .NAN] - - ['2015-03-06T14:37:00+00:00', 60.0, .NAN] - - ['2015-03-06T14:38:00+00:00', 60.0, .NAN] + - ["2015-03-06T14:33:00+00:00", 60.0, !AssertNAN ] + - ["2015-03-06T14:34:00+00:00", 60.0, !AssertNAN ] + - ["2015-03-06T14:35:00+00:00", 60.0, !AssertNAN ] + - ['2015-03-06T14:37:00+00:00', 60.0, !AssertNAN ] + - ['2015-03-06T14:38:00+00:00', 60.0, !AssertNAN ] - ["2017-04-06T14:33:00+00:00", 60.0, 20.0] - ["2017-04-06T14:34:00+00:00", 60.0, 10.0] - - ["2015-03-06T14:33:57+00:00", 1.0, .NAN] - - ["2015-03-06T14:34:12+00:00", 1.0, .NAN] - - ["2015-03-06T14:34:15+00:00", 1.0, .NAN] - - ["2015-03-06T14:35:12+00:00", 1.0, .NAN] - - ["2015-03-06T14:35:15+00:00", 1.0, .NAN] - - ['2015-03-06T14:37:00+00:00', 1.0, .NAN] - - ['2015-03-06T14:38:00+00:00', 1.0, .NAN] + - ["2015-03-06T14:33:57+00:00", 1.0, !AssertNAN ] + - ["2015-03-06T14:34:12+00:00", 1.0, !AssertNAN ] + - ["2015-03-06T14:34:15+00:00", 1.0, !AssertNAN ] + - ["2015-03-06T14:35:12+00:00", 1.0, !AssertNAN ] + - ["2015-03-06T14:35:15+00:00", 1.0, !AssertNAN ] + - ['2015-03-06T14:37:00+00:00', 1.0, !AssertNAN ] + - ['2015-03-06T14:38:00+00:00', 1.0, !AssertNAN ] - ["2017-04-06T14:33:57+00:00", 1.0, 20.0] - ["2017-04-06T14:34:12+00:00", 1.0, 10.0] diff --git a/gnocchi/tests/functional/test_gabbi.py b/gnocchi/tests/functional/test_gabbi.py index 0273dee1..02525615 100644 --- a/gnocchi/tests/functional/test_gabbi.py +++ b/gnocchi/tests/functional/test_gabbi.py @@ -33,4 +33,5 @@ def load_tests(loader, tests, pattern): test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) return driver.build_tests(test_dir, loader, host=None, prefix=PREFIX, intercept=fixtures.setup_app, - fixture_module=fixtures) + fixture_module=fixtures, + safe_yaml=False) diff --git a/setup.cfg b/setup.cfg index b9ca87ce..ce02ef17 100644 --- a/setup.cfg +++ b/setup.cfg @@ -53,7 +53,7 @@ doc = reno>=1.6.2 test = pifpaf[ceph,gnocchi]>=1.0.1 - gabbi>=1.30.0 + gabbi>=1.37.0 coverage>=3.6 fixtures mock -- GitLab From a3c81e0ef5715999eeec8b5d350b1e017a8cc8e1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 8 Nov 2017 16:42:33 +0100 Subject: [PATCH 1076/1483] doc: introduces a REST use-cases section We should list all REST API use-case that people is asking for, to not get issue opened for new feature when it's already possible to get the data. It's not always clear what API call I need to do to get all informations I need for a use-cases. This change starts a new section and add one example. Related #465. --- doc/source/rest.j2 | 12 +++++++++++- doc/source/rest.yaml | 22 ++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 512c25ba..ade8999d 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -852,6 +852,17 @@ Operations between metrics can also be done, such as: {{ scenarios['get-aggregates-between-metrics']['doc'] }} +List the top N resources that consume the most CPU during the last hour +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following is configured so that `stop` - `start` = `granularity` will get +only one point per instance. + +This will give all information needed to order by `cpu.util` timeseries which +can be filtered down to N results. + + +{{ scenarios['use-case1-top-cpuutil-per-instances']['doc'] }} Aggregation across metrics (deprecated) ======================================= @@ -945,7 +956,6 @@ reporting values such as the number of new |measures| to process for each {{ scenarios['get-status']['doc'] }} - Timestamp format ================ diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 61ffc557..f1e469c2 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -7,6 +7,10 @@ "name": "short", "back_window": 0, "definition": [ + { + "granularity": "1h", + "timespan": "7 day" + }, { "granularity": "1s", "timespan": "1 hour" @@ -52,6 +56,10 @@ { "definition": [ + { + "granularity": "1h", + "timespan": "7 day" + }, { "granularity": "1s", "timespan": "1 hour" @@ -825,3 +833,17 @@ - name: get-status request: GET /v1/status HTTP/1.1 + + +- name: use-case1-top-cpuutil-per-instances + request: | + POST /v1/aggregates?start=2014-10-06T14:00&stop=2014-10-06T15:00&groupby=original_resource_id&groupby=display_name&granularity=3600.0 HTTP/1.1 + Content-Type: application/json + + { + "resource_type": "instance", + "search": "server_group='my_autoscaling_group'", + "operations": "(metric cpu.util mean)" + } + + -- GitLab From 2c3f4d94dcad7f4a24472ed53abda724a6b49993 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 15 Nov 2017 11:31:45 +0000 Subject: [PATCH 1077/1483] Uploading to unstable. --- debian/changelog | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index cab91b9e..a9159aff 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,10 +1,11 @@ -gnocchi (4.0.3-2) UNRELEASED; urgency=medium +gnocchi (4.0.3-2) unstable; urgency=medium * Fixed version of python-sqlalchemy-utils (>= 0.32.14). * Updated pt.po (Closes: #876172). * Running gnocchi-upgrade instead of dbsync (Closes: #853121). + * Uploading to unstable. - -- Thomas Goirand Sat, 28 Oct 2017 10:08:35 +0000 + -- Thomas Goirand Wed, 15 Nov 2017 11:31:24 +0000 gnocchi (4.0.3-1) experimental; urgency=medium -- GitLab From f6e5485460dab4e4689a7c7979fdab89c85972fb Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 15 Nov 2017 11:36:02 +0000 Subject: [PATCH 1078/1483] * Add missing build-depends: python-all, python-pbr, python-setuptools, python3-testresources. --- debian/changelog | 2 ++ debian/control | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/debian/changelog b/debian/changelog index a9159aff..c094dfdf 100644 --- a/debian/changelog +++ b/debian/changelog @@ -4,6 +4,8 @@ gnocchi (4.0.3-2) unstable; urgency=medium * Updated pt.po (Closes: #876172). * Running gnocchi-upgrade instead of dbsync (Closes: #853121). * Uploading to unstable. + * Add missing build-depends: python-all, python-pbr, python-setuptools, + python3-testresources. -- Thomas Goirand Wed, 15 Nov 2017 11:31:24 +0000 diff --git a/debian/control b/debian/control index a44c48ae..6ed11051 100644 --- a/debian/control +++ b/debian/control @@ -8,6 +8,9 @@ Build-Depends: debhelper (>= 10), dh-python, openstack-pkg-tools (>= 54~), + python-all, + python-pbr, + python-setuptools, python3-all, python3-pbr, python3-setuptools, @@ -59,6 +62,7 @@ Build-Depends-Indep: python3-swiftclient (>= 3.1.0), python3-sysv-ipc, python3-tenacity (>= 3.1.0), + python3-testresources, python3-testscenarios, python3-testtools (>= 0.9.38), python3-tooz (>= 1.38), -- GitLab From 032e8b13c3002d9a9658541051a3e40e76b69d91 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 15 Nov 2017 11:32:37 +0100 Subject: [PATCH 1079/1483] cli: load uwsgi plugins if needed Some distro build uwsgi with dynamic plugins. This change makes our cli working with and without uwsgi dynamic plugins. Closes #478 --- gnocchi/cli/api.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gnocchi/cli/api.py b/gnocchi/cli/api.py index 932edd9f..f501be88 100644 --- a/gnocchi/cli/api.py +++ b/gnocchi/cli/api.py @@ -82,6 +82,8 @@ def api(): workers = utils.get_default_workers() args = [ + "--if-not-plugin", "python", "--plugin", "python", "--endif", + "--if-not-plugin", "http", "--plugin", "http", "--endif", "--http", "%s:%d" % (conf.host or conf.api.host, conf.port or conf.api.port), "--master", -- GitLab From dbc45898a284921d89b104d2a636e1d285edf2a1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 15 Nov 2017 11:53:15 +0100 Subject: [PATCH 1080/1483] Depends on oslo.db 4.29.0 which fixes 2 bugs we hit in tests --- gnocchi/tests/functional/gabbits/pagination.yaml | 1 - gnocchi/tests/functional/gabbits/resource.yaml | 1 - setup.cfg | 4 ++-- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/gnocchi/tests/functional/gabbits/pagination.yaml b/gnocchi/tests/functional/gabbits/pagination.yaml index 567c5d87..0e3a7f4c 100644 --- a/gnocchi/tests/functional/gabbits/pagination.yaml +++ b/gnocchi/tests/functional/gabbits/pagination.yaml @@ -209,7 +209,6 @@ tests: $[0].ended_at: null - name: limit with history and links page 2 - xfail: https://bugs.launchpad.net/oslo.db/+bug/1615938 GET: /v1/resource/generic?history=true&limit=1&marker=1e3d5702-2cbf-46e0-ba13-0ddaa3c71150@1&sort=id:asc&sort=ended_at:asc-nullsfirst response_headers: link: "<$SCHEME://$NETLOC/v1/resource/generic?history=true&limit=1&marker=1e3d5702-2cbf-46e0-ba13-0ddaa3c71150%402&sort=id%3Aasc&sort=ended_at%3Aasc-nullsfirst>; rel=\"next\"" diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index ae62f6f9..d8267f1a 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -225,7 +225,6 @@ tests: $.`len`: 1 - name: get generic history with links page 2 - xfail: https://bugs.launchpad.net/oslo.db/+bug/1615938 desc: Ensure we can get the history GET: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9/history?limit=1&marker=75c44741-cc60-4033-804e-2d3098c7d2e9@1&sort=revision_end:asc-nullslast response_json_paths: diff --git a/setup.cfg b/setup.cfg index ce02ef17..7220641e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -23,13 +23,13 @@ keystone = keystonemiddleware>=4.0.0 mysql = pymysql - oslo.db>=4.8.0,!=4.13.1,!=4.13.2,!=4.15.0 + oslo.db>=4.29.0 sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1,!=0.9.0 postgresql = psycopg2 - oslo.db>=4.8.0,!=4.13.1,!=4.13.2,!=4.15.0 + oslo.db>=4.29.0 sqlalchemy sqlalchemy-utils alembic>=0.7.6,!=0.8.1,!=0.9.0 -- GitLab From e100051759c1182247d78787cb1d39b2e0a66887 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 10 Nov 2017 18:56:49 +0100 Subject: [PATCH 1081/1483] aggregates: rework API output This design have some issue when two timeseries have the same names, only the last one is dumped. Or if the same metric is asked twice for whatever reason, measures have all timestamps duplicated. This change the result to a new format: For resources: { "measures": { resource_id: {metric_name: {aggregation: MEASURES}}}, "references": [RESOURCES_DETAILS] } For metrics { "measures": {metric_id: {aggregation: MEASURES}}, "references": [METRICS_DETAILS] } When the result is aggregtated we got: For resources: { "measures": { "aggregated": MEASURES}, "references": [RESOURCES_DETAILS] } For metrics { "measures": { "aggregated": MEASURES}, "references": [METRICS_DETAILS] } Closes-bug: #477 --- gnocchi/rest/aggregates/api.py | 70 ++-- gnocchi/rest/aggregates/processor.py | 148 ++++--- gnocchi/rest/api.py | 2 +- .../gabbits/aggregates-with-metric-ids.yaml | 135 ++++--- .../gabbits/aggregates-with-resources.yaml | 70 +++- .../tests/functional/gabbits/aggregation.yaml | 12 +- gnocchi/tests/functional/gabbits/metric.yaml | 8 +- gnocchi/tests/test_aggregates.py | 360 +++++++++++------- ...es-api-output-change-2bc6620c7f595925.yaml | 6 + 9 files changed, 520 insertions(+), 291 deletions(-) create mode 100644 releasenotes/notes/aggregates-api-output-change-2bc6620c7f595925.yaml diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index 233c1130..af4f4221 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -119,32 +119,42 @@ def OperationsSchema(v): required=True)(v) +class ReferencesList(list): + "A very simplified OrderedSet with list interface" + + def append(self, ref): + if ref not in self: + super(ReferencesList, self).append(ref) + + def extend(self, refs): + for ref in refs: + self.append(ref) + + def extract_references(nodes): - references = set() + references = ReferencesList() if nodes[0] == "metric": if isinstance(nodes[1], list): for subnodes in nodes[1:]: - references.add(tuple(subnodes)) + references.append(tuple(subnodes)) else: - references.add(tuple(nodes[1:])) + references.append(tuple(nodes[1:])) else: for subnodes in nodes[1:]: if isinstance(subnodes, list): - references |= extract_references(subnodes) + references.extend(extract_references(subnodes)) return references -def get_measures_or_abort(metrics_and_aggregations, operations, start, - stop, granularity, needed_overlap, fill, - ref_identifier): +def get_measures_or_abort(references, operations, start, + stop, granularity, needed_overlap, fill): try: return processor.get_measures( pecan.request.storage, - metrics_and_aggregations, + references, operations, start, stop, - granularity, needed_overlap, fill, - ref_identifier=ref_identifier) + granularity, needed_overlap, fill) except exceptions.UnAggregableTimeseries as e: api.abort(400, e) # TODO(sileht): We currently got only one metric for these exceptions but @@ -187,7 +197,7 @@ class AggregatesController(rest.RestController): body = api.deserialize_and_validate(self.FetchSchema) - references = list(extract_references(body["operations"])) + references = extract_references(body["operations"]) if not references: api.abort(400, {"cause": "Operations is invalid", "reason": "At least one 'metric' is required", @@ -208,10 +218,11 @@ class AggregatesController(rest.RestController): attr_filter = policy_filter groupby = sorted(set(api.arg_to_list(groupby))) + sorts = groupby if groupby else api.RESOURCE_DEFAULT_PAGINATION resources = pecan.request.indexer.list_resources( body["resource_type"], attribute_filter=attr_filter, - sorts=groupby) + sorts=sorts) if not groupby: return self._get_measures_by_name( resources, references, body["operations"], start, stop, @@ -232,8 +243,8 @@ class AggregatesController(rest.RestController): else: try: - metric_ids = [six.text_type(utils.UUID(m)) - for (m, a) in references] + metric_ids = set(six.text_type(utils.UUID(m)) + for (m, a) in references) except ValueError as e: api.abort(400, {"cause": "Invalid metric references", "reason": six.text_type(e), @@ -255,24 +266,27 @@ class AggregatesController(rest.RestController): api.enforce("get metric", metric) metrics_by_ids = dict((six.text_type(m.id), m) for m in metrics) - metrics_and_aggregations = [(metrics_by_ids[m], a) - for (m, a) in references] - return get_measures_or_abort( - metrics_and_aggregations, body["operations"], - start, stop, granularity, needed_overlap, fill, - ref_identifier="id") + references = [processor.MetricReference(metrics_by_ids[m], a) + for (m, a) in references] + return {"references": references, + "measures": get_measures_or_abort( + references, body["operations"], + start, stop, granularity, needed_overlap, fill)} def _get_measures_by_name(self, resources, metric_names, operations, start, stop, granularity, needed_overlap, fill): - metrics_and_aggregations = list(filter( - lambda x: x[0] is not None, ([r.get_metric(metric_name), agg] - for (metric_name, agg) in metric_names - for r in resources))) - if not metrics_and_aggregations: + references = [ + processor.MetricReference(r.get_metric(metric_name), agg, r) + for (metric_name, agg) in metric_names + for r in resources if r.get_metric(metric_name) is not None + ] + + if not references: api.abort(400, {"cause": "Metrics not found", "detail": set((m for (m, a) in metric_names))}) - return get_measures_or_abort(metrics_and_aggregations, operations, - start, stop, granularity, needed_overlap, - fill, ref_identifier="name") + return {"references": references, + "measures": get_measures_or_abort( + references, operations, start, stop, granularity, + needed_overlap, fill)} diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index 70e10337..f170a71e 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -31,18 +31,41 @@ from gnocchi import utils LOG = daiquiri.getLogger(__name__) -def _get_measures_timeserie(storage, metric, aggregation, ref_identifier, - *args, **kwargs): - return ([str(getattr(metric, ref_identifier)), aggregation], - storage._get_measures_timeserie(metric, aggregation, *args, - **kwargs)) +class MetricReference(object): + def __init__(self, metric, aggregation, resource=None): + self.metric = metric + self.aggregation = aggregation + self.resource = resource + self.timeseries = {} + + if self.resource is None: + self.name = str(self.metric.id) + else: + self.name = self.metric.name + + self.lookup_key = [self.name, self.aggregation] + + def jsonify(self): + if self.resource: + return self.resource + else: + return self.metric + + def __eq__(self, other): + return (self.metric == other.metric and + self.resource == other.resource and + self.aggregation == other.aggregation) -def get_measures(storage, metrics_and_aggregations, - operations, +def _get_measures_timeserie(storage, ref, *args, **kwargs): + return (ref, storage._get_measures_timeserie( + ref.metric, ref.aggregation, *args, **kwargs)) + + +def get_measures(storage, references, operations, from_timestamp=None, to_timestamp=None, granularity=None, needed_overlap=100.0, - fill=None, ref_identifier="id"): + fill=None): """Get aggregated measures of multiple entities. :param storage: The storage driver. @@ -55,16 +78,18 @@ def get_measures(storage, metrics_and_aggregations, """ references_with_missing_granularity = [] - for (metric, aggregation) in metrics_and_aggregations: - if aggregation not in metric.archive_policy.aggregation_methods: - raise gnocchi_storage.AggregationDoesNotExist(metric, aggregation) + for ref in references: + if (ref.aggregation not in + ref.metric.archive_policy.aggregation_methods): + raise gnocchi_storage.AggregationDoesNotExist(ref.metric, + ref.aggregation) if granularity is not None: - for d in metric.archive_policy.definition: + for d in ref.metric.archive_policy.definition: if d.granularity == granularity: break else: references_with_missing_granularity.append( - (getattr(metric, ref_identifier), aggregation)) + (ref.name, ref.aggregation)) if references_with_missing_granularity: raise exceptions.UnAggregableTimeseries( @@ -75,30 +100,27 @@ def get_measures(storage, metrics_and_aggregations, if granularity is None: granularities = ( definition.granularity - for (metric, aggregation) in metrics_and_aggregations - for definition in metric.archive_policy.definition + for ref in references + for definition in ref.metric.archive_policy.definition ) granularities_in_common = [ g for g, occurrence in six.iteritems( collections.Counter(granularities)) - if occurrence == len(metrics_and_aggregations) + if occurrence == len(references) ] if not granularities_in_common: raise exceptions.UnAggregableTimeseries( - list((str(getattr(m, ref_identifier)), a) - for (m, a) in metrics_and_aggregations), + list((ref.name, ref.aggregation) + for ref in references), 'No granularity match') else: granularities_in_common = [granularity] tss = utils.parallel_map(_get_measures_timeserie, - [(storage, metric, aggregation, - ref_identifier, - g, from_timestamp, to_timestamp) - for (metric, aggregation) - in metrics_and_aggregations + [(storage, ref, g, from_timestamp, to_timestamp) + for ref in references for g in granularities_in_common]) return aggregated(tss, operations, from_timestamp, to_timestamp, @@ -110,17 +132,19 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, series = collections.defaultdict(list) references = collections.defaultdict(list) - for (reference, timeserie) in refs_and_timeseries: + lookup_keys = collections.defaultdict(list) + for (ref, timeserie) in refs_and_timeseries: from_ = (None if from_timestamp is None else carbonara.round_timestamp(from_timestamp, timeserie.sampling)) - references[timeserie.sampling].append(reference) + references[timeserie.sampling].append(ref) + lookup_keys[timeserie.sampling].append(ref.lookup_key) series[timeserie.sampling].append(timeserie[from_:to_timestamp]) - result = collections.defaultdict(lambda: {'timestamps': [], - 'granularity': [], - 'values': []}) - for key in sorted(series, reverse=True): - combine = numpy.concatenate(series[key]) + result = [] + is_aggregated = False + result = {} + for sampling in sorted(series, reverse=True): + combine = numpy.concatenate(series[sampling]) # np.unique sorts results for us times, indices = numpy.unique(combine['timestamps'], return_inverse=True) @@ -128,9 +152,9 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, # create nd-array (unique series x unique times) and fill filler = (numpy.NaN if fill in [None, 'null', 'dropna'] else fill) - val_grid = numpy.full((len(series[key]), len(times)), filler) + val_grid = numpy.full((len(series[sampling]), len(times)), filler) start = 0 - for i, split in enumerate(series[key]): + for i, split in enumerate(series[sampling]): size = len(split) val_grid[i][indices[start:start + size]] = split['values'] start += size @@ -140,7 +164,7 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, overlap = numpy.flatnonzero(~numpy.any(numpy.isnan(values), axis=1)) if overlap.size == 0 and needed_percent_of_overlap > 0: - raise exceptions.UnAggregableTimeseries(references[key], + raise exceptions.UnAggregableTimeseries(lookup_keys[sampling], 'No overlap') if times.size: # if no boundary set, use first/last timestamp which overlap @@ -153,33 +177,53 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, percent_of_overlap = overlap.size * 100.0 / times.size if percent_of_overlap < needed_percent_of_overlap: raise exceptions.UnAggregableTimeseries( - references[key], + lookup_keys[sampling], 'Less than %f%% of datapoints overlap in this ' 'timespan (%.2f%%)' % (needed_percent_of_overlap, percent_of_overlap)) granularity, times, values, is_aggregated = ( - agg_operations.evaluate(operations, key, times, values, - False, references[key])) + agg_operations.evaluate(operations, sampling, times, values, + False, lookup_keys[sampling])) values = values.T - if is_aggregated: - idents = ["aggregated"] - else: - idents = ["%s_%s" % tuple(ref) for ref in references[key]] - for i, ident in enumerate(idents): + result[sampling] = (granularity, times, values, references[sampling]) + + if is_aggregated: + output = {"aggregated": []} + for sampling in sorted(result, reverse=True): + granularity, times, values, references = result[sampling] if fill == "dropna": - pos = ~numpy.isnan(values[i]) - v = values[i][pos] + pos = ~numpy.isnan(values[0]) + v = values[0][pos] t = times[pos] else: - v = values[i] + v = values[0] t = times - result[ident]["timestamps"].extend(t) - result[ident]['granularity'].extend([granularity] * len(t)) - result[ident]['values'].extend(v) - - return dict(((ident, list(six.moves.zip(result[ident]['timestamps'], - result[ident]['granularity'], - result[ident]['values']))) - for ident in result)) + g = [granularity] * len(t) + output["aggregated"].extend(six.moves.zip(t, g, v)) + return output + else: + r_output = collections.defaultdict( + lambda: collections.defaultdict( + lambda: collections.defaultdict(list))) + m_output = collections.defaultdict( + lambda: collections.defaultdict(list)) + for sampling in sorted(result, reverse=True): + granularity, times, values, references = result[sampling] + for i, ref in enumerate(references): + if fill == "dropna": + pos = ~numpy.isnan(values[i]) + v = values[i][pos] + t = times[pos] + else: + v = values[i] + t = times + g = [granularity] * len(t) + measures = six.moves.zip(t, g, v) + if ref.resource is None: + m_output[ref.name][ref.aggregation].extend(measures) + else: + r_output[str(ref.resource.id)][ + ref.metric.name][ref.aggregation].extend(measures) + return r_output if r_output else m_output diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 5195d353..528e465d 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1801,7 +1801,7 @@ class AggregationController(rest.RestController): granularity, resample) return processor.get_measures( pecan.request.storage, - [(m, aggregation) for m in metrics], + [processor.MetricReference(m, aggregation) for m in metrics], operations, start, stop, granularity, needed_overlap, fill)["aggregated"] except exceptions.UnAggregableTimeseries as e: diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml index aea68d65..8237fc47 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml @@ -124,12 +124,17 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 15.0] - name: get aggregates + desc: we put metric2 twice to ensure we retrieve it once POST: /v1/aggregates data: - operations: ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] + operations: ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] response_json_paths: - $.`len`: 2 - $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + $.references.`len`: 2 + $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[0].archive_policy.name: cookies + $.references[1].archive_policy.name: cookies + $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] @@ -138,7 +143,7 @@ tests: - ["2015-03-06T14:34:15+00:00", 1.0, -16.0] - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] - $."$HISTORY['create metric2'].$RESPONSE['$.id']_mean": + $.measures."$HISTORY['create metric2'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 2.0] - ["2015-03-06T14:34:00+00:00", 60.0, 4.5] - ["2015-03-06T14:35:00+00:00", 60.0, 12.5] @@ -156,14 +161,18 @@ tests: data: operations: ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] response_json_paths: - $.`len`: 2 - $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + $.references.`len`: 2 + $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[0].archive_policy.name: cookies + $.references[1].archive_policy.name: cookies + $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] - ["2015-03-06T14:34:15+00:00", 1.0, -16.0] - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] - $."$HISTORY['create metric2'].$RESPONSE['$.id']_mean": + $.measures."$HISTORY['create metric2'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:34:00+00:00", 60.0, 4.5] - ["2015-03-06T14:35:00+00:00", 60.0, 12.5] - ["2015-03-06T14:34:12+00:00", 1.0, 4.0] @@ -175,12 +184,16 @@ tests: data: operations: ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "max"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "min"]] response_json_paths: - $.`len`: 2 - $."$HISTORY['create metric1'].$RESPONSE['$.id']_max": + $.references.`len`: 2 + $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[0].archive_policy.name: cookies + $.references[1].archive_policy.name: cookies + $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".max: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, 12.0] - ["2015-03-06T14:35:00+00:00", 60.0, 11.0] - $."$HISTORY['create metric2'].$RESPONSE['$.id']_min": + $.measures."$HISTORY['create metric2'].$RESPONSE['$.id']".min: - ["2015-03-06T14:33:00+00:00", 60.0, 2.0] - ["2015-03-06T14:34:00+00:00", 60.0, 4.0] - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] @@ -190,8 +203,11 @@ tests: data: operations: ["+", ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]], 2.0] response_json_paths: - $.`len`: 2 - $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[0].archive_policy.name: cookies + $.references[1].archive_policy.name: cookies + $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 45.1] - ["2015-03-06T14:34:00+00:00", 60.0, 0.0] - ["2015-03-06T14:35:00+00:00", 60.0, 12.0] @@ -200,7 +216,7 @@ tests: - ["2015-03-06T14:34:15+00:00", 1.0, -14.0] - ["2015-03-06T14:35:12+00:00", 1.0, 11.0] - ["2015-03-06T14:35:15+00:00", 1.0, 13.0] - $."$HISTORY['create metric2'].$RESPONSE['$.id']_mean": + $.measures."$HISTORY['create metric2'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 4.0] - ["2015-03-06T14:34:00+00:00", 60.0, 6.5] - ["2015-03-06T14:35:00+00:00", 60.0, 14.5] @@ -219,12 +235,16 @@ tests: - 60 - ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] response_json_paths: - $.`len`: 2 - $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + $.references.`len`: 2 + $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[0].archive_policy.name: cookies + $.references[1].archive_policy.name: cookies + $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] - $."$HISTORY['create metric2'].$RESPONSE['$.id']_mean": + $.measures."$HISTORY['create metric2'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 2.0] - ["2015-03-06T14:34:00+00:00", 60.0, 4.5] - ["2015-03-06T14:35:00+00:00", 60.0, 12.5] @@ -238,13 +258,17 @@ tests: - 2 - ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] response_json_paths: - $.`len`: 2 - $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + $.references.`len`: 2 + $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[0].archive_policy.name: cookies + $.references[1].archive_policy.name: cookies + $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:34:12+00:00", 1.0, 27.55] - ["2015-03-06T14:34:15+00:00", 1.0, -2.0] - ["2015-03-06T14:35:12+00:00", 1.0, -3.5] - ["2015-03-06T14:35:15+00:00", 1.0, 10.0] - $."$HISTORY['create metric2'].$RESPONSE['$.id']_mean": + $.measures."$HISTORY['create metric2'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:34:12+00:00", 1.0, 3.0] - ["2015-03-06T14:34:15+00:00", 1.0, 4.5] - ["2015-03-06T14:35:12+00:00", 1.0, 7.5] @@ -256,8 +280,9 @@ tests: data: operations: "(metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean)" response_json_paths: - $.`len`: 1 - $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + $.references.`len`: 1 + $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] @@ -272,8 +297,9 @@ tests: data: operations: "(aggregate mean (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean))" response_json_paths: - $.`len`: 1 - $."aggregated": + $.references.`len`: 1 + $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.measures.aggregated: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] @@ -288,8 +314,10 @@ tests: data: operations: "(+ (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)) 2.0)" response_json_paths: - $.`len`: 2 - $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + $.references.`len`: 2 + $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 45.1] - ["2015-03-06T14:34:00+00:00", 60.0, 0.0] - ["2015-03-06T14:35:00+00:00", 60.0, 12.0] @@ -298,7 +326,7 @@ tests: - ["2015-03-06T14:34:15+00:00", 1.0, -14.0] - ["2015-03-06T14:35:12+00:00", 1.0, 11.0] - ["2015-03-06T14:35:15+00:00", 1.0, 13.0] - $."$HISTORY['create metric2'].$RESPONSE['$.id']_mean": + $.measures."$HISTORY['create metric2'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 4.0] - ["2015-03-06T14:34:00+00:00", 60.0, 6.5] - ["2015-03-06T14:35:00+00:00", 60.0, 14.5] @@ -313,8 +341,10 @@ tests: data: operations: "(- (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean) (metric $HISTORY['create metric2'].$RESPONSE['$.id'] mean)))" response_json_paths: - $.`len`: 1 - $."aggregated": + $.references.`len`: 2 + $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.measures.aggregated: - ["2015-03-06T14:33:00+00:00", 60.0, 41.1] - ["2015-03-06T14:34:00+00:00", 60.0, -6.5] - ["2015-03-06T14:35:00+00:00", 60.0, -2.5] @@ -329,8 +359,10 @@ tests: data: operations: "(aggregate mean (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)))" response_json_paths: - $.`len`: 1 - $."aggregated": + $.references.`len`: 2 + $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.measures.aggregated: - ["2015-03-06T14:33:00+00:00", 60.0, 22.55] - ["2015-03-06T14:34:00+00:00", 60.0, 1.25] - ["2015-03-06T14:35:00+00:00", 60.0, 11.25] @@ -345,8 +377,10 @@ tests: data: operations: "(negative (absolute (aggregate mean (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)))))" response_json_paths: - $.`len`: 1 - $."aggregated": + $.references.`len`: 2 + $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.measures.aggregated: - ["2015-03-06T14:33:00+00:00", 60.0, -22.55] - ["2015-03-06T14:34:00+00:00", 60.0, -1.25] - ["2015-03-06T14:35:00+00:00", 60.0, -11.25] @@ -374,8 +408,10 @@ tests: data: operations: "(metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean))" response_json_paths: - $.`len`: 2 - $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + $.references.`len`: 2 + $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] @@ -388,7 +424,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] - ['2015-03-06T14:37:00+00:00', 1.0, 15.0] - ['2015-03-06T14:38:00+00:00', 1.0, 15.0] - $."$HISTORY['create metric2'].$RESPONSE['$.id']_mean": + $.measures."$HISTORY['create metric2'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 2.0] - ["2015-03-06T14:34:00+00:00", 60.0, 4.5] - ["2015-03-06T14:35:00+00:00", 60.0, 12.5] @@ -407,8 +443,10 @@ tests: data: operations: "(metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric4'].$RESPONSE['$.id'] mean))" response_json_paths: - $.`len`: 2 - $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + $.references.`len`: 2 + $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[1].id: $HISTORY['create metric4'].$RESPONSE['$.id'] + $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] @@ -421,7 +459,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] - ['2015-03-06T14:37:00+00:00', 1.0, 15.0] - ['2015-03-06T14:38:00+00:00', 1.0, 15.0] - $."$HISTORY['create metric4'].$RESPONSE['$.id']_mean": + $.measures."$HISTORY['create metric4'].$RESPONSE['$.id']".mean: - ["2017-04-06T14:33:00+00:00", 60.0, 20.0] - ["2017-04-06T14:34:00+00:00", 60.0, 10.0] - ["2017-04-06T14:33:57+00:00", 1.0, 20.0] @@ -432,8 +470,10 @@ tests: data: operations: "(metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric4'].$RESPONSE['$.id'] mean))" response_json_paths: - $.`len`: 2 - $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean": + $.references.`len`: 2 + $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[1].id: $HISTORY['create metric4'].$RESPONSE['$.id'] + $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] @@ -450,7 +490,7 @@ tests: - ['2015-03-06T14:38:00+00:00', 1.0, 15.0] - ["2017-04-06T14:33:57+00:00", 1.0, !AssertNAN ] - ["2017-04-06T14:34:12+00:00", 1.0, !AssertNAN ] - $."$HISTORY['create metric4'].$RESPONSE['$.id']_mean": + $.measures."$HISTORY['create metric4'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, !AssertNAN ] - ["2015-03-06T14:34:00+00:00", 60.0, !AssertNAN ] - ["2015-03-06T14:35:00+00:00", 60.0, !AssertNAN ] @@ -468,19 +508,6 @@ tests: - ["2017-04-06T14:33:57+00:00", 1.0, 20.0] - ["2017-04-06T14:34:12+00:00", 1.0, 10.0] - - name: no overlap null light check due to previous xfail - POST: /v1/aggregates?fill=null - data: - operations: "(metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric4'].$RESPONSE['$.id'] mean))" - response_json_paths: - $.`len`: 2 - $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean".`len`: 16 - $."$HISTORY['create metric4'].$RESPONSE['$.id']_mean".`len`: 16 - $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean"[0]: ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - $."$HISTORY['create metric1'].$RESPONSE['$.id']_mean"[7]: ["2015-03-06T14:33:57+00:00", 1.0, 43.1] - $."$HISTORY['create metric4'].$RESPONSE['$.id']_mean"[5]: ["2017-04-06T14:33:00+00:00", 60.0, 20.0] - $."$HISTORY['create metric4'].$RESPONSE['$.id']_mean"[14]: ["2017-04-06T14:33:57+00:00", 1.0, 20.0] - # Negative tests - name: get no operations diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml index 9dce6125..d9fb5d4a 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml @@ -113,6 +113,9 @@ tests: value: 45.41 status: 202 + - name: list resources + GET: /v1/resource/generic + - name: aggregate metric POST: /v1/aggregates data: @@ -123,11 +126,59 @@ tests: count: 10 delay: 1 response_json_paths: - $.aggregated: + $.references.`len`: 3 + $.references[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] + $.references[1]: $HISTORY['list resources'].$RESPONSE['$[1]'] + $.references[2]: $HISTORY['list resources'].$RESPONSE['$[2]'] + $.measures.aggregated: - ['2015-03-06T14:30:00+00:00', 300.0, 60.251666666666665] - ['2015-03-06T14:33:57+00:00', 1.0, 98.7] - ['2015-03-06T14:34:12+00:00', 1.0, 21.80333333333333] + - name: batch get + POST: /v1/aggregates + data: + resource_type: generic + search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" + operations: "(metric cpu.util mean)" + poll: + count: 10 + delay: 1 + response_json_paths: + $.references.`len`: 3 + $.references[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] + $.references[1]: $HISTORY['list resources'].$RESPONSE['$[1]'] + $.references[2]: $HISTORY['list resources'].$RESPONSE['$[2]'] + $.measures."$HISTORY['list resources'].$RESPONSE['$[0].id']"."cpu.util".mean: + - ['2015-03-06T14:30:00+00:00', 300.0, 27.55] + - ['2015-03-06T14:33:57+00:00', 1.0, 43.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 12.0] + $.measures."$HISTORY['list resources'].$RESPONSE['$[1].id']"."cpu.util".mean: + - ['2015-03-06T14:30:00+00:00', 300.0, 15.5] + - ['2015-03-06T14:33:57+00:00', 1.0, 23.0] + - ['2015-03-06T14:34:12+00:00', 1.0, 8.0] + $.measures."$HISTORY['list resources'].$RESPONSE['$[2].id']"."cpu.util".mean: + - ['2015-03-06T14:30:00+00:00', 300.0, 137.70499999999998] + - ['2015-03-06T14:33:57+00:00', 1.0, 230.0] + - ['2015-03-06T14:34:12+00:00', 1.0, 45.41] + + - name: stupid but valid batch get + POST: /v1/aggregates + data: + resource_type: generic + search: "id = '4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4'" + operations: "(metric (cpu.util mean) (cpu.util mean))" + poll: + count: 10 + delay: 1 + response_json_paths: + $.references.`len`: 1 + $.references[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] + $.measures."$HISTORY['list resources'].$RESPONSE['$[0].id']"."cpu.util".mean: + - ['2015-03-06T14:30:00+00:00', 300.0, 27.55] + - ['2015-03-06T14:33:57+00:00', 1.0, 43.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 12.0] + - name: aggregate metric with groupby on project_id and user_id with aggregates API POST: /v1/aggregates?groupby=project_id&groupby=user_id data: @@ -135,21 +186,24 @@ tests: search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" operations: "(aggregate mean (metric cpu.util mean))" response_json_paths: - $: - - measures: - aggregated: + $.`len`: 2 + $[0].measures.references.`len`: 2 + $[0].measures.references[0]: $HISTORY['list resources'].$RESPONSE['$[1]'] + $[0].measures.references[1]: $HISTORY['list resources'].$RESPONSE['$[0]'] + $[0].measures.measures.aggregated: - ['2015-03-06T14:30:00+00:00', 300.0, 21.525] - ['2015-03-06T14:33:57+00:00', 1.0, 33.05] - ['2015-03-06T14:34:12+00:00', 1.0, 10.0] - group: + $[0].group: user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - - measures: - aggregated: + $[1].measures.references.`len`: 1 + $[1].measures.references[0]: $HISTORY['list resources'].$RESPONSE['$[2]'] + $[1].measures.measures.aggregated: - ['2015-03-06T14:30:00+00:00', 300.0, 137.70499999999998] - ['2015-03-06T14:33:57+00:00', 1.0, 230.0] - ['2015-03-06T14:34:12+00:00', 1.0, 45.41] - group: + $[1].group: user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index 212223e0..4c3dbe47 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -136,7 +136,7 @@ tests: data: operations: "(aggregate mean (resample mean 60 (metric ($HISTORY['get metric list'].$RESPONSE['$[0].id'] mean) ($HISTORY['get metric list'].$RESPONSE['$[1].id'] mean))))" response_json_paths: - $.aggregated: + $.measures.aggregated: - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] - ['2015-03-06T14:35:00+00:00', 60.0, 5.0] @@ -242,7 +242,7 @@ tests: search: {} operations: '(aggregate mean (metric agg_meter mean))' response_json_paths: - $.aggregated: + $.measures.aggregated: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - ['2015-03-06T14:35:12+00:00', 1.0, 5.0] @@ -261,7 +261,7 @@ tests: search: {} operations: '(aggregate mean (resample mean 60 (metric agg_meter mean)))' response_json_paths: - $.aggregated: + $.measures.aggregated: - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] - ['2015-03-06T14:35:00+00:00', 60.0, 5.0] @@ -273,7 +273,7 @@ tests: search: {} operations: '(aggregate mean (resample mean 60 (metric agg_meter mean)))' response_json_paths: - $.aggregated: + $.measures.aggregated: - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] - ['2015-03-06T14:35:00+00:00', 60.0, 5.0] @@ -308,7 +308,7 @@ tests: count: 10 delay: 1 response_json_paths: - $.aggregated: + $.measures.aggregated: - ['2015-03-06T14:30:00+00:00', 300.0, 15.05] - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] @@ -337,7 +337,7 @@ tests: search: {} operations: '(aggregate mean (metric agg_meter mean))' response_json_paths: - $.aggregated: + $.measures.aggregated: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - ['2015-03-06T14:35:12+00:00', 1.0, 2.5] diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index 9a277d0c..e824cfdd 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -218,7 +218,7 @@ tests: data: operations: "(negative (resample mean 60 (metric $HISTORY['list valid metrics'].$RESPONSE['$[0].id'] mean)))" response_json_paths: - $."$HISTORY['list valid metrics'].$RESPONSE['$[0].id']_mean": + $.measures."$HISTORY['list valid metrics'].$RESPONSE['$[0].id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, -43.1] - ["2015-03-06T14:34:00+00:00", 60.0, -14.0] - ["2015-03-06T14:35:00+00:00", 60.0, -10.0] @@ -240,7 +240,7 @@ tests: data: operations: "(absolute (metric $HISTORY['list valid metrics'].$RESPONSE['$[0].id'] mean))" response_json_paths: - $."$HISTORY['list valid metrics'].$RESPONSE['$[0].id']_mean": + $.measures."$HISTORY['list valid metrics'].$RESPONSE['$[0].id']".mean: - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] - ["2015-03-06T14:34:15+00:00", 1.0, 16.0] @@ -255,7 +255,7 @@ tests: operations: "(rolling mean 2 (metric $HISTORY['list valid metrics'].$RESPONSE['$[0].id'] mean))" status: 200 response_json_paths: - $."$HISTORY['list valid metrics'].$RESPONSE['$[0].id']_mean": + $.measures."$HISTORY['list valid metrics'].$RESPONSE['$[0].id']".mean: - ["2015-03-06T14:34:12+00:00", 1.0, 27.55] - ["2015-03-06T14:34:15+00:00", 1.0, 14.0] - ["2015-03-06T14:35:12+00:00", 1.0, 12.5] @@ -268,7 +268,7 @@ tests: data: operations: "(negative (absolute (metric $HISTORY['list valid metrics'].$RESPONSE['$[0].id'] mean)))" response_json_paths: - $."$HISTORY['list valid metrics'].$RESPONSE['$[0].id']_mean": + $.measures."$HISTORY['list valid metrics'].$RESPONSE['$[0].id']".mean: - ["2015-03-06T14:33:57+00:00", 1.0, -43.1] - ["2015-03-06T14:34:12+00:00", 1.0, -12.0] - ["2015-03-06T14:34:15+00:00", 1.0, -16.0] diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index a295c7c3..87bf03c8 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -17,6 +17,7 @@ import datetime import functools import uuid +import mock import numpy from gnocchi import carbonara @@ -46,21 +47,24 @@ class TestAggregatedTimeseries(base.BaseTestCase): """Helper method that mimics _add_measures workflow.""" grouped = ts.group_serie(agg_dict['sampling']) existing = agg_dict.get('return') + name = agg_dict.get("name") + resource = None if name is None else mock.Mock(id=str(uuid.uuid4())) + metric = mock.Mock(id=str(uuid.uuid4()), name=name) agg_dict['return'] = ( - [agg_dict.get("name", "all"), 'mean'], + processor.MetricReference(metric, "mean", resource), carbonara.AggregatedTimeSerie.from_grouped_serie( grouped, agg_dict['sampling'], agg_dict['agg'], max_size=agg_dict.get('size'), truncate=True)) if existing: - existing[1].merge(agg_dict['return'][1]) + existing[2].merge(agg_dict['return'][2]) agg_dict['return'] = existing def test_aggregated_different_archive_no_overlap(self): tsc1 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 50, 'agg': 'mean'} + 'size': 50, 'agg': 'mean', "name": "all"} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) tsc2 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 50, 'agg': 'mean'} + 'size': 50, 'agg': 'mean', "name": "all"} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 11, 46, 4), 4)], @@ -93,11 +97,14 @@ class TestAggregatedTimeseries(base.BaseTestCase): dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) + metric = mock.Mock(id=str(uuid.uuid4())) + ref = processor.MetricReference(metric, "mean") self.assertRaises(exceptions.UnAggregableTimeseries, processor.aggregated, - [tsc1['return'], (("all", "mean"), tsc2)], + [tsc1['return'], (ref, tsc2)], operations=["aggregate", "mean", - ["metric", "all", "mean"]]) + ["metric", tsc1['return'][0].lookup_key, + ref.lookup_key]]) def test_aggregated_different_archive_overlap(self): tsc1 = {'sampling': numpy.timedelta64(60, 's'), @@ -149,7 +156,9 @@ class TestAggregatedTimeseries(base.BaseTestCase): from_timestamp=dtfrom, to_timestamp=dtto, operations=["aggregate", "mean", [ - "metric", tsc1['return'][0], tsc2['return'][0], + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key, ]]) # Retry with 80% and it works @@ -157,7 +166,9 @@ class TestAggregatedTimeseries(base.BaseTestCase): tsc1['return'], tsc2['return']], from_timestamp=dtfrom, to_timestamp=dtto, operations=["aggregate", "mean", [ - "metric", tsc1['return'][0], tsc2['return'][0], + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key, ]], needed_percent_of_overlap=80.0)["aggregated"] @@ -217,7 +228,9 @@ class TestAggregatedTimeseries(base.BaseTestCase): output = processor.aggregated([ tsc1['return'], tsc2['return']], operations=["aggregate", "sum", [ - "metric", tsc1['return'][0], tsc2['return'][0] + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key ]])["aggregated"] self.assertEqual([ @@ -253,7 +266,9 @@ class TestAggregatedTimeseries(base.BaseTestCase): output = processor.aggregated( [tsc1['return'], tsc2['return']], operations=["aggregate", "mean", [ - "metric", tsc1['return'][0], tsc2['return'][0] + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key ]])["aggregated"] self.assertEqual([ (datetime64( @@ -291,7 +306,9 @@ class TestAggregatedTimeseries(base.BaseTestCase): output = processor.aggregated([ tsc1['return'], tsc2['return']], operations=["aggregate", "mean", [ - "metric", tsc1['return'][0], tsc2['return'][0] + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key ]], needed_percent_of_overlap=50.0)["aggregated"] self.assertEqual([ @@ -335,7 +352,9 @@ class TestAggregatedTimeseries(base.BaseTestCase): output = processor.aggregated([ tsc1['return'], tsc2['return']], operations=["aggregate", "mean", [ - "metric", tsc1['return'][0], tsc2['return'][0] + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key ]], from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), needed_percent_of_overlap=50.0)["aggregated"] @@ -356,7 +375,9 @@ class TestAggregatedTimeseries(base.BaseTestCase): output = processor.aggregated([ tsc1['return'], tsc2['return']], operations=["aggregate", "mean", [ - "metric", tsc1['return'][0], tsc2['return'][0], + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key, ]], to_timestamp=datetime64(2014, 1, 1, 12, 7, 0), needed_percent_of_overlap=50.0)["aggregated"] @@ -408,7 +429,9 @@ class TestAggregatedTimeseries(base.BaseTestCase): output = processor.aggregated([ tsc1['return'], tsc2['return']], operations=["aggregate", "mean", [ - "metric", tsc1['return'][0], tsc2['return'][0] + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key ]], fill=0)["aggregated"] self.assertEqual([ @@ -434,8 +457,8 @@ class TestAggregatedTimeseries(base.BaseTestCase): output = processor.aggregated([ tsc1['return'], tsc2['return']], - operations=["-", ["metric"] + tsc1['return'][0], - ["metric"] + tsc2['return'][0] + operations=["-", ["metric"] + tsc1['return'][0].lookup_key, + ["metric"] + tsc2['return'][0].lookup_key ], fill=0)["aggregated"] self.assertEqual([ @@ -493,7 +516,9 @@ class TestAggregatedTimeseries(base.BaseTestCase): output = processor.aggregated([ tsc1['return'], tsc2['return']], operations=["aggregate", "mean", [ - "metric", tsc1['return'][0], tsc2['return'][0] + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key ]], fill='null')["aggregated"] self.assertEqual([ @@ -519,8 +544,8 @@ class TestAggregatedTimeseries(base.BaseTestCase): output = processor.aggregated([ tsc1['return'], tsc2['return']], - operations=["-", ["metric"] + tsc1['return'][0], - ["metric"] + tsc2['return'][0] + operations=["-", ["metric"] + tsc1['return'][0].lookup_key, + ["metric"] + tsc2['return'][0].lookup_key ], fill='null')["aggregated"] self.assertEqual([ @@ -574,7 +599,9 @@ class TestAggregatedTimeseries(base.BaseTestCase): output = processor.aggregated([ tsc1['return'], tsc2['return']], operations=["aggregate", "mean", [ - "metric", tsc1['return'][0], tsc2['return'][0] + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key ]], fill=0)["aggregated"] self.assertEqual([ @@ -596,8 +623,8 @@ class TestAggregatedTimeseries(base.BaseTestCase): output = processor.aggregated([ tsc1['return'], tsc2['return']], - operations=["-", ["metric"] + tsc1['return'][0], - ["metric"] + tsc2['return'][0] + operations=["-", ["metric"] + tsc1['return'][0].lookup_key, + ["metric"] + tsc2['return'][0].lookup_key ], fill=0)["aggregated"] self.assertEqual([ @@ -619,14 +646,14 @@ class TestAggregatedTimeseries(base.BaseTestCase): def test_aggregated_nominal(self): tsc1 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean', 'name': '1'} + 'size': 10, 'agg': 'mean'} tsc12 = {'sampling': numpy.timedelta64(300, 's'), - 'size': 6, 'agg': 'mean', 'name': '12'} + 'size': 6, 'agg': 'mean'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc12['sampling']) tsc2 = {'sampling': numpy.timedelta64(60, 's'), - 'size': 10, 'agg': 'mean', 'name': '2'} + 'size': 10, 'agg': 'mean'} tsc22 = {'sampling': numpy.timedelta64(300, 's'), - 'size': 6, 'agg': 'mean', 'name': '22'} + 'size': 6, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc22['sampling']) def ts1_update(ts): @@ -680,8 +707,8 @@ class TestAggregatedTimeseries(base.BaseTestCase): [tsc1['return'], tsc12['return'], tsc2['return'], tsc22['return']], operations=["aggregate", "mean", [ "metric", - tsc1['return'][0], tsc12['return'][0], - tsc2['return'][0], tsc22['return'][0] + tsc1['return'][0].lookup_key, tsc12['return'][0].lookup_key, + tsc2['return'][0].lookup_key, tsc22['return'][0].lookup_key ]])["aggregated"] self.assertEqual([ (datetime64(2014, 1, 1, 11, 45), @@ -745,7 +772,9 @@ class TestAggregatedTimeseries(base.BaseTestCase): output = processor.aggregated( [tsc1['return'], tsc2['return']], operations=["aggregate", "sum", [ - "metric", tsc1['return'][0], tsc2['return'][0] + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key ]])["aggregated"] self.assertEqual([ @@ -764,7 +793,9 @@ class TestAggregatedTimeseries(base.BaseTestCase): [tsc1['return'], tsc2['return']], from_timestamp=dtfrom, to_timestamp=dtto, operations=["aggregate", "sum", [ - "metric", tsc1['return'][0], tsc2['return'][0] + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key ]], needed_percent_of_overlap=0)["aggregated"] self.assertEqual([ (datetime64( @@ -791,7 +822,9 @@ class TestAggregatedTimeseries(base.BaseTestCase): output = processor.aggregated( [tsc1['return'], tsc2['return']], operations=["aggregate", "sum", [ - "metric", tsc1['return'][0], tsc2['return'][0] + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key ]], needed_percent_of_overlap=0)["aggregated"] self.assertEqual([ (datetime64( @@ -809,20 +842,26 @@ class TestAggregatedTimeseries(base.BaseTestCase): [tsc1['return'], tsc2['return']], to_timestamp=dtto, operations=["aggregate", "sum", [ - "metric", tsc1['return'][0], tsc2['return'][0] + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key ]]) self.assertRaises(exceptions.UnAggregableTimeseries, processor.aggregated, [tsc1['return'], tsc2['return']], from_timestamp=dtfrom, operations=["aggregate", "sum", [ - "metric", tsc1['return'][0], tsc2['return'][0] + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key ]]) # Retry with 50% and it works output = processor.aggregated( [tsc1['return'], tsc2['return']], from_timestamp=dtfrom, operations=["aggregate", "sum", [ - "metric", tsc1['return'][0], tsc2['return'][0] + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key ]], needed_percent_of_overlap=50.0)["aggregated"] self.assertEqual([ (datetime64( @@ -842,7 +881,9 @@ class TestAggregatedTimeseries(base.BaseTestCase): output = processor.aggregated( [tsc1['return'], tsc2['return']], to_timestamp=dtto, operations=["aggregate", "sum", [ - "metric", tsc1['return'][0], tsc2['return'][0] + "metric", + tsc1['return'][0].lookup_key, + tsc2['return'][0].lookup_key ]], needed_percent_of_overlap=50.0)["aggregated"] self.assertEqual([ (datetime64( @@ -867,13 +908,15 @@ class CrossMetricAggregated(base.TestCase): self.metric, __ = self._create_metric() def test_get_measures_empty_metrics_no_overlap(self): + references = [ + processor.MetricReference(indexer.Metric( + uuid.uuid4(), self.archive_policies['low']), 'mean'), + processor.MetricReference(indexer.Metric( + uuid.uuid4(), self.archive_policies['low']), 'mean'), + ] self.assertRaises( exceptions.UnAggregableTimeseries, - processor.get_measures, self.storage, - [(indexer.Metric(uuid.uuid4(), - self.archive_policies['low']), 'mean'), - (indexer.Metric(uuid.uuid4(), - self.archive_policies['low']), 'mean')], + processor.get_measures, self.storage, references, operations=["aggregate", "mean", [ "metric", ["whatever", "mean"], ["everwhat", "mean"], ]]) @@ -881,9 +924,9 @@ class CrossMetricAggregated(base.TestCase): def test_get_measures_empty_metric_needed_overlap_zero(self): m_id = str(self.metric.id) result = processor.get_measures( - self.storage, [(self.metric, "mean")], + self.storage, [processor.MetricReference(self.metric, "mean")], operations=["metric", m_id, "mean"], needed_overlap=0) - self.assertEqual({'%s_mean' % m_id: []}, result) + self.assertEqual({m_id: {"mean": []}}, result) def test_get_measures_unknown_aggregation(self): metric2 = indexer.Metric(uuid.uuid4(), @@ -903,7 +946,8 @@ class CrossMetricAggregated(base.TestCase): self.assertRaises(storage.AggregationDoesNotExist, processor.get_measures, self.storage, - [(self.metric, 'last'), (metric2, 'last')], + [processor.MetricReference(self.metric, 'last'), + processor.MetricReference(metric2, 'last')], operations=["aggregate", "mean", [ "metric", [str(self.metric.id), "last"], @@ -928,7 +972,8 @@ class CrossMetricAggregated(base.TestCase): self.assertRaises(exceptions.UnAggregableTimeseries, processor.get_measures, self.storage, - [(self.metric, "mean"), (metric2, "mean")], + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], operations=["aggregate", "mean", [ "metric", [str(self.metric.id), "mean"], @@ -955,7 +1000,8 @@ class CrossMetricAggregated(base.TestCase): self.assertRaises(exceptions.UnAggregableTimeseries, processor.get_measures, self.storage, - [(self.metric, "mean"), (metric2, "mean")], + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], operations=["aggregate", "mean", [ "metric", [str(self.metric.id), "mean"], @@ -979,7 +1025,9 @@ class CrossMetricAggregated(base.TestCase): self.trigger_processing([str(self.metric.id), str(metric2.id)]) values = processor.get_measures( - self.storage, [(self.metric, "mean"), (metric2, "mean")], + self.storage, + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], operations=["aggregate", "mean", [ "metric", [str(self.metric.id), "mean"], @@ -999,7 +1047,9 @@ class CrossMetricAggregated(base.TestCase): ], values) values = processor.get_measures( - self.storage, [(self.metric, "mean"), (metric2, "mean")], + self.storage, + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], operations=["aggregate", "max", [ "metric", [str(self.metric.id), "mean"], @@ -1019,7 +1069,9 @@ class CrossMetricAggregated(base.TestCase): ], values) values = processor.get_measures( - self.storage, [(self.metric, "mean"), (metric2, "mean")], + self.storage, + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], operations=["aggregate", "mean", [ "metric", [str(self.metric.id), "mean"], @@ -1036,7 +1088,9 @@ class CrossMetricAggregated(base.TestCase): ], values) values = processor.get_measures( - self.storage, [(self.metric, "mean"), (metric2, "mean")], + self.storage, + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], operations=["aggregate", "mean", [ "metric", [str(self.metric.id), "mean"], @@ -1054,7 +1108,9 @@ class CrossMetricAggregated(base.TestCase): ], values) values = processor.get_measures( - self.storage, [(self.metric, "mean"), (metric2, "mean")], + self.storage, + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], operations=["aggregate", "mean", [ "metric", [str(self.metric.id), "mean"], @@ -1072,7 +1128,9 @@ class CrossMetricAggregated(base.TestCase): ], values) values = processor.get_measures( - self.storage, [(self.metric, "mean"), (metric2, "mean")], + self.storage, + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], operations=["aggregate", "mean", [ "metric", [str(self.metric.id), "mean"], @@ -1091,7 +1149,9 @@ class CrossMetricAggregated(base.TestCase): ], values) values = processor.get_measures( - self.storage, [(self.metric, "mean"), (metric2, "mean")], + self.storage, + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], operations=["aggregate", "mean", [ "metric", [str(self.metric.id), "mean"], @@ -1124,7 +1184,9 @@ class CrossMetricAggregated(base.TestCase): self.trigger_processing([str(self.metric.id), str(metric2.id)]) values = processor.get_measures( - self.storage, [(self.metric, 'mean'), (metric2, 'mean')], + self.storage, + [processor.MetricReference(self.metric, 'mean'), + processor.MetricReference(metric2, 'mean')], operations=["aggregate", "mean", [ "metric", [str(self.metric.id), "mean"], @@ -1160,7 +1222,9 @@ class CrossMetricAggregated(base.TestCase): self.trigger_processing([str(self.metric.id), str(metric2.id)]) values = processor.get_measures( - self.storage, [(self.metric, "mean"), (metric2, "mean")], + self.storage, + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], ["resample", "mean", numpy.timedelta64(1, 'D'), ["metric", [str(self.metric.id), "mean"], @@ -1168,12 +1232,14 @@ class CrossMetricAggregated(base.TestCase): granularity=numpy.timedelta64(1, 'h')) self.assertEqual({ - "%s_%s" % (self.metric.id, "mean"): [ - (datetime64(2014, 1, 1, 0, 0, 0), - numpy.timedelta64(1, 'D'), 39.75)], - "%s_%s" % (metric2.id, "mean"): [ - (datetime64(2014, 1, 1, 0, 0, 0), - numpy.timedelta64(1, 'D'), 4.75)], + str(self.metric.id): { + "mean": [(datetime64(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 39.75)] + }, + str(metric2.id): { + "mean": [(datetime64(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 4.75)] + } }, values) def test_resample_minus_2_on_right(self): @@ -1193,7 +1259,9 @@ class CrossMetricAggregated(base.TestCase): self.trigger_processing([str(self.metric.id), str(metric2.id)]) values = processor.get_measures( - self.storage, [(self.metric, "mean"), (metric2, "mean")], + self.storage, + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], ["-", ["resample", "mean", numpy.timedelta64(1, 'D'), ["metric", [str(self.metric.id), "mean"], @@ -1201,12 +1269,14 @@ class CrossMetricAggregated(base.TestCase): granularity=numpy.timedelta64(1, 'h')) self.assertEqual({ - "%s_%s" % (self.metric.id, "mean"): [ - (datetime64(2014, 1, 1, 0, 0, 0), - numpy.timedelta64(1, 'D'), 37.75)], - "%s_%s" % (metric2.id, "mean"): [ - (datetime64(2014, 1, 1, 0, 0, 0), - numpy.timedelta64(1, 'D'), 2.75)], + str(self.metric.id): { + "mean": [(datetime64(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 37.75)] + }, + str(metric2.id): { + "mean": [(datetime64(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), 2.75)] + } }, values) def test_resample_minus_2_on_left(self): @@ -1226,7 +1296,9 @@ class CrossMetricAggregated(base.TestCase): self.trigger_processing([str(self.metric.id), str(metric2.id)]) values = processor.get_measures( - self.storage, [(self.metric, "mean"), (metric2, "mean")], + self.storage, + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], ["-", 2, ["resample", "mean", numpy.timedelta64(1, 'D'), @@ -1236,12 +1308,14 @@ class CrossMetricAggregated(base.TestCase): granularity=numpy.timedelta64(1, 'h')) self.assertEqual({ - "%s_%s" % (self.metric.id, "mean"): [ - (datetime64(2014, 1, 1, 0, 0, 0), - numpy.timedelta64(1, 'D'), -37.75)], - "%s_%s" % (metric2.id, "mean"): [ - (datetime64(2014, 1, 1, 0, 0, 0), - numpy.timedelta64(1, 'D'), -2.75)], + str(self.metric.id): { + "mean": [(datetime64(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), -37.75)] + }, + str(metric2.id): { + "mean": [(datetime64(2014, 1, 1, 0, 0, 0), + numpy.timedelta64(1, 'D'), -2.75)] + } }, values) def test_rolling(self): @@ -1261,29 +1335,31 @@ class CrossMetricAggregated(base.TestCase): self.trigger_processing([str(self.metric.id), str(metric2.id)]) values = processor.get_measures( - self.storage, [(self.metric, "mean"), (metric2, "mean")], + self.storage, + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], ["/", ["rolling", "sum", 2, ["metric", [str(self.metric.id), "mean"], [str(metric2.id), "mean"]]], 2], granularity=numpy.timedelta64(5, 'm')) self.assertEqual({ - "%s_%s" % (self.metric.id, "mean"): [ - (datetime64(2014, 1, 1, 12, 5, 0), - numpy.timedelta64(5, 'm'), 55.5), - (datetime64(2014, 1, 1, 12, 10, 0), - numpy.timedelta64(5, 'm'), 23), - (datetime64(2014, 1, 1, 12, 15, 0), - numpy.timedelta64(5, 'm'), 24) - ], - "%s_%s" % (metric2.id, "mean"): [ - (datetime64(2014, 1, 1, 12, 5, 0), - numpy.timedelta64(5, 'm'), 5.5), - (datetime64(2014, 1, 1, 12, 10, 0), - numpy.timedelta64(5, 'm'), 3), - (datetime64(2014, 1, 1, 12, 15, 0), - numpy.timedelta64(5, 'm'), 4), - ], + str(self.metric.id): { + "mean": [(datetime64(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(5, 'm'), 55.5), + (datetime64(2014, 1, 1, 12, 10, 0), + numpy.timedelta64(5, 'm'), 23), + (datetime64(2014, 1, 1, 12, 15, 0), + numpy.timedelta64(5, 'm'), 24)] + }, + str(metric2.id): { + "mean": [(datetime64(2014, 1, 1, 12, 5, 0), + numpy.timedelta64(5, 'm'), 5.5), + (datetime64(2014, 1, 1, 12, 10, 0), + numpy.timedelta64(5, 'm'), 3), + (datetime64(2014, 1, 1, 12, 15, 0), + numpy.timedelta64(5, 'm'), 4)] + } }, values) def test_binary_operator_with_two_references(self): @@ -1303,7 +1379,9 @@ class CrossMetricAggregated(base.TestCase): self.trigger_processing([str(self.metric.id), str(metric2.id)]) values = processor.get_measures( - self.storage, [(self.metric, "mean"), (metric2, "mean")], + self.storage, + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], ["*", ["metric", str(self.metric.id), "mean"], ["metric", str(metric2.id), "mean"]], granularity=numpy.timedelta64(1, 'h'))["aggregated"] @@ -1330,20 +1408,21 @@ class CrossMetricAggregated(base.TestCase): self.trigger_processing([str(self.metric.id)]) values = processor.get_measures( - self.storage, [(self.metric, "mean")], + self.storage, [processor.MetricReference(self.metric, "mean")], ["*", ["metric", str(self.metric.id), "mean"], 2], granularity=numpy.timedelta64(1, 'h')) - self.assertEqual([ - (datetime64(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(1, 'h'), 138), - (datetime64(2014, 1, 1, 13, 0, 0), - numpy.timedelta64(1, 'h'), 84), - (datetime64(2014, 1, 1, 14, 0, 0), - numpy.timedelta64(1, 'h'), 8), - (datetime64(2014, 1, 1, 15, 0, 0), - numpy.timedelta64(1, 'h'), 88), - ], values["%s_mean" % self.metric.id]) + self.assertEqual({str(self.metric.id): { + "mean": [ + (datetime64(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 138), + (datetime64(2014, 1, 1, 13, 0, 0), + numpy.timedelta64(1, 'h'), 84), + (datetime64(2014, 1, 1, 14, 0, 0), + numpy.timedelta64(1, 'h'), 8), + (datetime64(2014, 1, 1, 15, 0, 0), + numpy.timedelta64(1, 'h'), 88)] + }}, values) def test_binary_operator_ts_on_right(self): metric2, __ = self._create_metric() @@ -1356,20 +1435,20 @@ class CrossMetricAggregated(base.TestCase): self.trigger_processing([str(self.metric.id)]) values = processor.get_measures( - self.storage, [(self.metric, "mean")], + self.storage, [processor.MetricReference(self.metric, "mean")], ["*", 2, ["metric", str(self.metric.id), "mean"]], granularity=numpy.timedelta64(1, 'h')) - self.assertEqual([ - (datetime64(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(1, 'h'), 138), - (datetime64(2014, 1, 1, 13, 0, 0), - numpy.timedelta64(1, 'h'), 84), - (datetime64(2014, 1, 1, 14, 0, 0), - numpy.timedelta64(1, 'h'), 8), - (datetime64(2014, 1, 1, 15, 0, 0), - numpy.timedelta64(1, 'h'), 88), - ], values["%s_mean" % self.metric.id]) + self.assertEqual({str(self.metric.id): { + "mean": [(datetime64(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 138), + (datetime64(2014, 1, 1, 13, 0, 0), + numpy.timedelta64(1, 'h'), 84), + (datetime64(2014, 1, 1, 14, 0, 0), + numpy.timedelta64(1, 'h'), 8), + (datetime64(2014, 1, 1, 15, 0, 0), + numpy.timedelta64(1, 'h'), 88)] + }}, values) def test_mix(self): metric2, __ = self._create_metric() @@ -1388,7 +1467,9 @@ class CrossMetricAggregated(base.TestCase): self.trigger_processing([str(self.metric.id), str(metric2.id)]) values = processor.get_measures( - self.storage, [(self.metric, "mean"), (metric2, "mean")], + self.storage, + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], [ "rolling", "sum", @@ -1424,7 +1505,9 @@ class CrossMetricAggregated(base.TestCase): self.trigger_processing([str(self.metric.id), str(metric2.id)]) values = processor.get_measures( - self.storage, [(self.metric, "mean"), (metric2, "mean")], + self.storage, + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], [ "gt", [ @@ -1452,7 +1535,7 @@ class CrossMetricAggregated(base.TestCase): ], values) def test_unary_operator(self): - metric2, __ = self._create_metric() + metric2, _ = self._create_metric() self.incoming.add_measures(self.metric, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), -69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), @@ -1468,30 +1551,31 @@ class CrossMetricAggregated(base.TestCase): self.trigger_processing([str(self.metric.id), str(metric2.id)]) values = processor.get_measures( - self.storage, [(self.metric, "mean"), (metric2, "mean")], + self.storage, + [processor.MetricReference(self.metric, "mean"), + processor.MetricReference(metric2, "mean")], ["abs", ["metric", [str(self.metric.id), "mean"], [str(metric2.id), "mean"]]], granularity=numpy.timedelta64(1, 'h')) self.assertEqual({ - "%s_%s" % (self.metric.id, "mean"): [ - (datetime64(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(1, 'h'), 69), - (datetime64(2014, 1, 1, 13, 0, 0), - numpy.timedelta64(1, 'h'), 42), - (datetime64(2014, 1, 1, 14, 0, 0), - numpy.timedelta64(1, 'h'), 4), - (datetime64(2014, 1, 1, 15, 0, 0), - numpy.timedelta64(1, 'h'), 44) - ], - "%s_%s" % (metric2.id, "mean"): [ - (datetime64(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(1, 'h'), 9), - (datetime64(2014, 1, 1, 13, 0, 0), - numpy.timedelta64(1, 'h'), 2), - (datetime64(2014, 1, 1, 14, 0, 0), - numpy.timedelta64(1, 'h'), 4), - (datetime64(2014, 1, 1, 15, 0, 0), - numpy.timedelta64(1, 'h'), 4), - ], + str(self.metric.id): { + "mean": [(datetime64(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 69), + (datetime64(2014, 1, 1, 13, 0, 0), + numpy.timedelta64(1, 'h'), 42), + (datetime64(2014, 1, 1, 14, 0, 0), + numpy.timedelta64(1, 'h'), 4), + (datetime64(2014, 1, 1, 15, 0, 0), + numpy.timedelta64(1, 'h'), 44)]}, + str(metric2.id): { + "mean": [(datetime64(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 9), + (datetime64(2014, 1, 1, 13, 0, 0), + numpy.timedelta64(1, 'h'), 2), + (datetime64(2014, 1, 1, 14, 0, 0), + numpy.timedelta64(1, 'h'), 4), + (datetime64(2014, 1, 1, 15, 0, 0), + numpy.timedelta64(1, 'h'), 4), + ]} }, values) diff --git a/releasenotes/notes/aggregates-api-output-change-2bc6620c7f595925.yaml b/releasenotes/notes/aggregates-api-output-change-2bc6620c7f595925.yaml new file mode 100644 index 00000000..c9a7115c --- /dev/null +++ b/releasenotes/notes/aggregates-api-output-change-2bc6620c7f595925.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Aggregates API output introduced in 4.1.0 doesn't allow for easy identification of which timeseries + is associated with what metrics/resources that have been queried. This have been fixed, but + the new output format is not backwards compatible with the format released in 4.1.0. -- GitLab From 51ce9cfc9cde7d1173ff31a86c9e95f82d7969f6 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 15 Nov 2017 10:03:01 +0100 Subject: [PATCH 1082/1483] aggregates: make references optional The output of "references", can be large. The client application can have already this references somewhere, so by default don't return it, client have to pass details=True trought query string or header to get them. --- gnocchi/rest/aggregates/api.py | 36 ++++++---- .../gabbits/aggregates-with-metric-ids.yaml | 71 +++++++++++++++---- .../gabbits/aggregates-with-resources.yaml | 8 +-- 3 files changed, 84 insertions(+), 31 deletions(-) diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index af4f4221..abbf18ee 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -189,7 +189,9 @@ class AggregatesController(rest.RestController): @pecan.expose("json") def post(self, start=None, stop=None, granularity=None, - needed_overlap=None, fill=None, groupby=None): + needed_overlap=None, fill=None, groupby=None, **kwargs): + details = api.get_details(kwargs) + if fill is None and needed_overlap is None: fill = "dropna" start, stop, granularity, needed_overlap, fill = api.validate_qs( @@ -226,7 +228,7 @@ class AggregatesController(rest.RestController): if not groupby: return self._get_measures_by_name( resources, references, body["operations"], start, stop, - granularity, needed_overlap, fill) + granularity, needed_overlap, fill, details=details) def groupper(r): return tuple((attr, r[attr]) for attr in groupby) @@ -237,7 +239,7 @@ class AggregatesController(rest.RestController): "group": dict(key), "measures": self._get_measures_by_name( resources, references, body["operations"], start, stop, - granularity, needed_overlap, fill) + granularity, needed_overlap, fill, details=details) }) return results @@ -268,13 +270,19 @@ class AggregatesController(rest.RestController): metrics_by_ids = dict((six.text_type(m.id), m) for m in metrics) references = [processor.MetricReference(metrics_by_ids[m], a) for (m, a) in references] - return {"references": references, - "measures": get_measures_or_abort( - references, body["operations"], - start, stop, granularity, needed_overlap, fill)} + + response = { + "measures": get_measures_or_abort( + references, body["operations"], + start, stop, granularity, needed_overlap, fill) + } + if details: + response["references"] = references + return response def _get_measures_by_name(self, resources, metric_names, operations, - start, stop, granularity, needed_overlap, fill): + start, stop, granularity, needed_overlap, fill, + details): references = [ processor.MetricReference(r.get_metric(metric_name), agg, r) @@ -286,7 +294,11 @@ class AggregatesController(rest.RestController): api.abort(400, {"cause": "Metrics not found", "detail": set((m for (m, a) in metric_names))}) - return {"references": references, - "measures": get_measures_or_abort( - references, operations, start, stop, granularity, - needed_overlap, fill)} + response = { + "measures": get_measures_or_abort( + references, operations, start, stop, granularity, + needed_overlap, fill) + } + if details: + response["references"] = references + return response diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml index 8237fc47..8d8fd209 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml @@ -123,12 +123,52 @@ tests: - ["2015-03-06T14:35:12+00:00", 1.0, 10.0] - ["2015-03-06T14:35:15+00:00", 1.0, 15.0] - - name: get aggregates - desc: we put metric2 twice to ensure we retrieve it once + - name: get measurements from metric3 + GET: /v1/metric/$HISTORY['create metric3'].$RESPONSE['$.id']/measures?refresh=true + response_json_paths: + $: [] + + - name: get measurements from metric4 + GET: /v1/metric/$HISTORY['create metric4'].$RESPONSE['$.id']/measures?refresh=true + response_json_paths: + $: + - ["2017-04-06T14:33:00+00:00", 60.0, 20.0] + - ["2017-04-06T14:34:00+00:00", 60.0, 10.0] + - ["2017-04-06T14:33:57+00:00", 1.0, 20.0] + - ["2017-04-06T14:34:12+00:00", 1.0, 10.0] + + - name: get aggregates, no references POST: /v1/aggregates + data: + operations: ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] + response_json_paths: + $.`len`: 1 + $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: + - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] + - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] + - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] + - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -16.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] + $.measures."$HISTORY['create metric2'].$RESPONSE['$.id']".mean: + - ["2015-03-06T14:33:00+00:00", 60.0, 2.0] + - ["2015-03-06T14:34:00+00:00", 60.0, 4.5] + - ["2015-03-06T14:35:00+00:00", 60.0, 12.5] + - ["2015-03-06T14:33:57+00:00", 1.0, 2.0] + - ["2015-03-06T14:34:12+00:00", 1.0, 4.0] + - ["2015-03-06T14:34:15+00:00", 1.0, 5.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 10.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 15.0] + + - name: get aggregates with references + desc: we put metric2 twice to ensure we retrieve it once + POST: /v1/aggregates?details=true data: operations: ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] response_json_paths: + $.`len`: 2 $.references.`len`: 2 $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] @@ -156,6 +196,7 @@ tests: - name: get aggregates start and stop POST: /v1/aggregates query_parameters: + details: true start: "2015-03-06T14:34:00" stop: "2015-03-06T14:35:13" data: @@ -180,7 +221,7 @@ tests: - ["2015-03-06T14:35:12+00:00", 1.0, 10.0] - name: get aggregates granularity - POST: /v1/aggregates?granularity=60 + POST: /v1/aggregates?granularity=60&details=true data: operations: ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "max"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "min"]] response_json_paths: @@ -199,7 +240,7 @@ tests: - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] - name: get aggregates simple with array - POST: /v1/aggregates + POST: /v1/aggregates?details=true data: operations: ["+", ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]], 2.0] response_json_paths: @@ -227,7 +268,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 17.0] - name: get aggregates resample - POST: /v1/aggregates?granularity=1 + POST: /v1/aggregates?granularity=1&details=true data: operations: - resample @@ -250,7 +291,7 @@ tests: - ["2015-03-06T14:35:00+00:00", 60.0, 12.5] - name: get aggregates rolling - POST: /v1/aggregates?granularity=1 + POST: /v1/aggregates?granularity=1&details=true data: operations: - rolling @@ -276,7 +317,7 @@ tests: - name: get one metric - POST: /v1/aggregates + POST: /v1/aggregates?details=true data: operations: "(metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean)" response_json_paths: @@ -293,7 +334,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] - name: get aggregates one metric - POST: /v1/aggregates + POST: /v1/aggregates?details=true data: operations: "(aggregate mean (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean))" response_json_paths: @@ -310,7 +351,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] - name: get aggregates math with string - POST: /v1/aggregates + POST: /v1/aggregates?details=true data: operations: "(+ (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)) 2.0)" response_json_paths: @@ -337,7 +378,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 17.0] - name: get aggregates substact - POST: /v1/aggregates + POST: /v1/aggregates?details=true data: operations: "(- (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean) (metric $HISTORY['create metric2'].$RESPONSE['$.id'] mean)))" response_json_paths: @@ -355,7 +396,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, -4.0] - name: get aggregates mean aggregate - POST: /v1/aggregates + POST: /v1/aggregates?details=true data: operations: "(aggregate mean (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)))" response_json_paths: @@ -373,7 +414,7 @@ tests: - ["2015-03-06T14:35:15+00:00", 1.0, 13.0] - name: get aggregates negative absolute - POST: /v1/aggregates + POST: /v1/aggregates?details=true data: operations: "(negative (absolute (aggregate mean (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)))))" response_json_paths: @@ -404,7 +445,7 @@ tests: GET: /v1/metric/$HISTORY['create metric1'].$RESPONSE['$.id']/measures?refresh=true - name: fill and no granularity - POST: /v1/aggregates?fill=123 + POST: /v1/aggregates?fill=123&details=true data: operations: "(metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean))" response_json_paths: @@ -439,7 +480,7 @@ tests: - ['2015-03-06T14:38:00+00:00', 1.0, 123.0] - name: no overlap dropna - POST: /v1/aggregates + POST: /v1/aggregates?details=true data: operations: "(metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric4'].$RESPONSE['$.id'] mean))" response_json_paths: @@ -466,7 +507,7 @@ tests: - ["2017-04-06T14:34:12+00:00", 1.0, 10.0] - name: no overlap null - POST: /v1/aggregates?fill=null + POST: /v1/aggregates?fill=null&details=true data: operations: "(metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric4'].$RESPONSE['$.id'] mean))" response_json_paths: diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml index d9fb5d4a..384f1f32 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml @@ -117,7 +117,7 @@ tests: GET: /v1/resource/generic - name: aggregate metric - POST: /v1/aggregates + POST: /v1/aggregates?details=true data: resource_type: generic search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" @@ -136,7 +136,7 @@ tests: - ['2015-03-06T14:34:12+00:00', 1.0, 21.80333333333333] - name: batch get - POST: /v1/aggregates + POST: /v1/aggregates?details=true data: resource_type: generic search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" @@ -163,7 +163,7 @@ tests: - ['2015-03-06T14:34:12+00:00', 1.0, 45.41] - name: stupid but valid batch get - POST: /v1/aggregates + POST: /v1/aggregates?details=true data: resource_type: generic search: "id = '4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4'" @@ -180,7 +180,7 @@ tests: - ['2015-03-06T14:34:12+00:00', 1.0, 12.0] - name: aggregate metric with groupby on project_id and user_id with aggregates API - POST: /v1/aggregates?groupby=project_id&groupby=user_id + POST: /v1/aggregates?groupby=project_id&groupby=user_id&details=true data: resource_type: generic search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" -- GitLab From 9e2f8a8b464e7999ba0507974384c0739966eceb Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 7 Nov 2017 13:21:50 +0100 Subject: [PATCH 1083/1483] doc: rewrite intro --- README.rst | 25 ++++++++--- doc/source/architecture.rst | 88 ------------------------------------- doc/source/index.rst | 22 +--------- doc/source/install.rst | 75 ++++++++++++++++--------------- doc/source/intro.rst | 85 +++++++++++++++++++++++++++++++++++ doc/source/operating.rst | 37 +++++++++------- 6 files changed, 165 insertions(+), 167 deletions(-) delete mode 100755 doc/source/architecture.rst create mode 100644 doc/source/intro.rst diff --git a/README.rst b/README.rst index 55040f83..ee52b3e5 100644 --- a/README.rst +++ b/README.rst @@ -11,11 +11,24 @@ .. image:: doc/source/_static/gnocchi-logo.png -Gnocchi is an open-source, multi-tenant timeseries, metrics and resources -database. It provides an `HTTP REST`_ interface to create and manipulate the -data. It is designed to store metrics at a very large scale while providing -access to metrics and resources information and history. +Gnocchi is an open-source |time series| database. -You can read the full documentation online at http://gnocchi.xyz. +The problem that Gnocchi solves is the storage and indexing of |time series| +data and resources at a large scale. This is useful in modern cloud platforms +which are not only huge but also are dynamic and potentially multi-tenant. +Gnocchi takes all of that into account. + +Gnocchi has been designed to handle large amounts of |aggregates| being stored +while being performant, scalable and fault-tolerant. While doing this, the goal +was to be sure to not build any hard dependency on any complex storage system. + +Gnocchi takes a unique approach to |time series| storage: rather than storing +raw data points, it aggregates them before storing them. This built-in feature +is different from most other |time series| databases, which usually support +this mechanism as an option and compute aggregation (average, minimum, etc.) at +query time. -.. _`HTTP REST`: https://en.wikipedia.org/wiki/Representational_state_transfer +Because Gnocchi computes all the aggregations at ingestion, getting the data +back is extremely fast, as it just needs to read back the pre-computed results. + +You can read the full documentation online at http://gnocchi.xyz. diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst deleted file mode 100755 index 761b921a..00000000 --- a/doc/source/architecture.rst +++ /dev/null @@ -1,88 +0,0 @@ -====================== - Project Architecture -====================== - -Gnocchi consists of several services: a HTTP REST API (see :doc:`rest`), an -optional statsd-compatible daemon (see :doc:`statsd`), and an asynchronous -processing daemon (named `gnocchi-metricd`). Data is received via the HTTP REST -API or statsd daemon. `gnocchi-metricd` performs operations (statistics -computing, |metric| cleanup, etc...) on the received data in the background. - -Both the HTTP REST API and the asynchronous processing daemon are stateless and -are scalable. Additional workers can be added depending on load. - -.. image:: _static/architecture.svg - :align: center - :width: 95% - :alt: Gnocchi architecture - -.. image source: https://docs.google.com/drawings/d/1aHV86TPNFt7FlCLEjsTvV9FWoFYxXCaQOzfg7NdXVwM/edit?usp=sharing - - -Back-ends ---------- - -Gnocchi uses three different back-ends for storing data: one for storing new -incoming |measures| (the incoming driver), one for storing the time series (the -storage driver) and one for indexing the data (the index driver). - -The *incoming* storage is responsible for storing new |measures| sent to -|metrics|. It is by default – and usually – the same driver as the *storage* -one. - -The *storage* is responsible for storing |aggregates| of created |metrics|. It -receives timestamps and values, and pre-computes aggregations according to the -defined |archive policies|. - -The *indexer* is responsible for storing the index of all |resources|, -|archive policies| and |metrics|, along with their definitions, types and -properties. The indexer is also responsible for linking |resources| with -|metrics|. - -Available incoming and storage back-ends -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Gnocchi currently offers different incoming and storage drivers: - -* File (default) -* `Ceph`_ (preferred) -* `OpenStack Swift`_ -* `S3`_ -* `Redis`_ - -The drivers are based on an intermediate library, named *Carbonara*, which -handles the time series manipulation, since none of these storage technologies -handle time series natively. - -The four *Carbonara* based drivers are working well and are as scalable as -their back-end technology permits. Ceph and Swift are inherently more scalable -than the file driver. - -Depending on the size of your architecture, using the file driver and storing -your data on a disk might be enough. If you need to scale the number of server -with the file driver, you can export and share the data via NFS among all -Gnocchi processes. In any case, it is obvious that S3, Ceph and Swift drivers -are largely more scalable. Ceph also offers better consistency, and hence is -the recommended driver. - -.. _OpenStack Swift: http://docs.openstack.org/developer/swift/ -.. _Ceph: https://ceph.com -.. _`S3`: https://aws.amazon.com/s3/ -.. _`Redis`: https://redis.io - -Available index back-ends -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Gnocchi currently offers different index drivers: - -* `PostgreSQL`_ (preferred) -* `MySQL`_ (at least version 5.6.4) - -Those drivers offer almost the same performance and features, though PostgreSQL -tends to be more performant and has some additional features (e.g. |resource| -duration computing). - -.. _PostgreSQL: http://postgresql.org -.. _MySQL: http://mysql.org - -.. include:: include/term-substitution.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index b700ea7b..7cc6505e 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -33,26 +33,6 @@ You can join Gnocchi's community via the following channels: - Bug tracker: https://github.com/gnocchixyz/gnocchi/issues - IRC: #gnocchi on `Freenode `_ -Why Gnocchi? ------------- - -Gnocchi has been created to fulfill the need of a |time series| database usable -in the context of cloud computing: providing the ability to store large -quantities of |metrics|. It has been designed to handle large amount of -|aggregates| being stored, while being performant, scalable and fault-tolerant. -While doing this, the goal was to be sure to not build any hard dependency on -any complex storage system. - -The Gnocchi project was started in 2014 as a spin-off of the `OpenStack -Ceilometer`_ project to address the performance issues that Ceilometer -encountered while using standard databases as a storage backends for |metrics|. -More information are available on `Julien's blog post on Gnocchi -`_. - -.. _`OpenStack Ceilometer`: https://docs.openstack.org/developer/ceilometer/ - - - Comparisons To Alternatives --------------------------- @@ -114,7 +94,7 @@ Documentation .. toctree:: :maxdepth: 1 - architecture + intro install operating client diff --git a/doc/source/install.rst b/doc/source/install.rst index dc972635..34085cfe 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -8,8 +8,8 @@ Installation ============ Gnocchi can be installed using `pip`. Depending on the drivers and features you -want to use (see :doc:`architecture` for which driver to pick), you need to -specify the extra variants you need. For example:: +want to use (see :doc:`intro` for which driver to pick), you need to specify +the extra variants you need. For example:: pip install gnocchi[postgresql,ceph,keystone] @@ -18,16 +18,16 @@ storage, and Keystone support for authentication and authorization. The list of variants available is: -* keystone – provides Keystone authentication support -* mysql - provides MySQL indexer support -* postgresql – provides PostgreSQL indexer support -* swift – provides OpenStack Swift storage support -* s3 – provides Amazon S3 storage support -* ceph_recommended_lib – provides Ceph (>=0.80) storage support -* ceph_alternative_lib – provides Ceph (>=12.2.0) storage support -* redis – provides Redis storage support -* doc – documentation building support -* test – unit and functional tests support +* `keystone` – provides Keystone authentication support +* `mysql` - provides MySQL indexer support +* `postgresql` – provides PostgreSQL indexer support +* `swift` – provides OpenStack Swift storage support +* `s3` – provides Amazon S3 storage support +* `ceph_recommended_lib` – provides Ceph (>= 0.80) storage support +* `ceph_alternative_lib` – provides Ceph (>= 12.2.0) storage support +* `redis` – provides Redis storage support +* `doc` – documentation building support +* `test` – unit and functional tests support To install Gnocchi from source, run the standard Python installation procedure:: @@ -43,7 +43,7 @@ install extra variants using, for example:: Ceph requirements ----------------- -The ceph driver needs to have a Ceph user and a pool already created. They can +The Ceph driver needs to have a Ceph user and a pool already created. They can be created for example with: :: @@ -52,13 +52,13 @@ be created for example with: ceph auth get-or-create client.gnocchi mon "allow r" osd "allow rwx pool=metrics" -Gnocchi leverages some librados features (omap, async, operation context) -available in python binding only since python-rados >= 12.2.0. To handle this, -Gnocchi uses 'cradox' python library which has exactly the same API but works -with Ceph >= 0.80.0. +Gnocchi leverages some _librados_ features (omap, async, operation context) +available in the Python binding only since python-rados >= 12.2.0. To handle +this, Gnocchi uses _cradox_ python library which has exactly the same API but +works with Ceph >= 0.80.0. -If Ceph and python-rados are >= 12.2.0, cradox python library becomes optional -but is still recommended. +If Ceph and python-rados are >= 12.2.0, the cradox Python library becomes +optional but is still recommended. Configuration @@ -67,6 +67,14 @@ Configuration Configuration file ------------------- +No config file is provided with the source code; it will be created during the +installation. In the case where no configuration file is installed, one can be +easily created by running: + +:: + + gnocchi-config-generator > /path/to/gnocchi.conf + By default, gnocchi looks for its configuration file in the following places, in order: @@ -79,15 +87,6 @@ in order: * ``/etc/gnocchi/gnocchi.conf.d`` * ``/etc/gnocchi.conf.d`` - -No config file is provided with the source code; it will be created during the -installation. In case where no configuration file was installed, one can be -easily created by running: - -:: - - gnocchi-config-generator > /path/to/gnocchi.conf - Configure Gnocchi by editing the appropriate file. The configuration file should be pretty explicit, but here are some of the base @@ -123,13 +122,20 @@ to use the configured storage driver. Configuring authentication ----------------------------- -The API server supports different authentication methods: `basic` (the default) -which uses the standard HTTP `Authorization` header or `keystone` to use -`OpenStack Keystone`_. If you successfully installed the `keystone` flavor -using `pip` (see :ref:`installation`), you can set `api.auth_mode` to -`keystone` to enable Keystone authentication. +The API server supports different authentication methods: + +* `basic` (the default) which uses the standard HTTP `Authorization` header. + +* `keystone` to use `OpenStack Keystone`_. If you successfully installed the + `keystone` flavor using `pip` (see :ref:`installation`), you can set + `api.auth_mode` to `keystone` to enable Keystone authentication. + You also need to configure the `keystone_authtoken` section in `gnocchi.conf` + with the proper value so Gnocchi is able to validate tokens. + +* `remoteuser` Gnocchi will look at the HTTP server REMOTE_USER environment + variable to get the username. Then the permissions model is the same as the + `basic` mode. -.. _`Paste Deployment`: http://pythonpaste.org/deploy/ .. _`OpenStack Keystone`: http://launchpad.net/keystone Initialization @@ -142,7 +148,6 @@ and storage: gnocchi-upgrade - Upgrading ========= In order to upgrade from a previous version of Gnocchi, you need to make sure diff --git a/doc/source/intro.rst b/doc/source/intro.rst new file mode 100644 index 00000000..2314a784 --- /dev/null +++ b/doc/source/intro.rst @@ -0,0 +1,85 @@ +Getting started +--------------- +Gnocchi uses three different back-ends for storing data: one for storing new +incoming |measures| (the *incoming* driver), one for storing the |time series| +|aggregates| (the *storage* driver) and one for indexing the data (the *index* +driver). By default, the *incoming* driver is configured to use the same value +as the *storage* driver. + +Incoming and storage drivers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Gnocchi can leverage different storage systems, such as: + +* File (default) +* `Ceph`_ (preferred) +* `OpenStack Swift`_ +* `Amazon S3`_ +* `Redis`_ + +Depending on the size of your architecture, using the file driver and storing +your data on a disk might be enough. If you need to scale the number of server +with the file driver, you can export and share the data via NFS among all +Gnocchi processes. Ultimately, the S3, Ceph, and Swift drivers are more +scalable storage options. Ceph also offers better consistency, and hence is the +recommended driver. + +.. _`OpenStack Swift`: http://docs.openstack.org/developer/swift/ +.. _`Ceph`: https://ceph.com +.. _`Amazon S3`: https://aws.amazon.com/s3/ +.. _`Redis`: https://redis.io + +Indexer driver +~~~~~~~~~~~~~~ + +You also need a database to index the resources and metrics that Gnocchi will +handle. The supported drivers are: + +* `PostgreSQL`_ (preferred) +* `MySQL`_ (at least version 5.6.4) + +The *indexer* is responsible for storing the index of all |resources|, |archive +policies| and |metrics|, along with their definitions, types and properties. +The indexer is also responsible for linking |resources| with |metrics| and the +relationships of |resources|.. + +.. _PostgreSQL: http://postgresql.org +.. _MySQL: http://mysql.org + +Architecture overview +--------------------- + +Gnocchi consists of several services: a HTTP REST API (see :doc:`rest`), an +optional statsd-compatible daemon (see :doc:`statsd`), and an asynchronous +processing daemon (named `gnocchi-metricd`). Data is received via the HTTP REST +API or statsd daemon. `gnocchi-metricd` performs operations (statistics +computing, |metric| cleanup, etc...) on the received data in the background. + +.. image:: _static/architecture.svg + :align: center + :width: 95% + :alt: Gnocchi architecture + +.. image source: https://docs.google.com/drawings/d/1aHV86TPNFt7FlCLEjsTvV9FWoFYxXCaQOzfg7NdXVwM/edit?usp=sharing + +All those services are stateless and therefore horizontally scalable. Contrary +to many time series databases, there is no limit on the number of +`gnocchi-metricd` daemons or `gnocchi-api` endpoints that you can run. If your +load starts to increase, you just need to spawn more daemons to handle the flow +of new requests. The same applies if you want to handle high-availability +scenarios: just start more Gnocchi daemons on independent servers. + + +Understanding aggregation +------------------------- + +The way data points are aggregated is configurable on a per-metric basis, using +an archive policy. + +An archive policy defines which aggregations to compute and how many aggregates +to keep. Gnocchi supports a variety of aggregation methods, such as minimum, +maximum, average, Nth percentile, standard deviation, etc. Those aggregations +are computed over a period of time (called granularity) and are kept for a +defined timespan. + +.. include:: include/term-substitution.rst diff --git a/doc/source/operating.rst b/doc/source/operating.rst index 351075ca..fad37a95 100644 --- a/doc/source/operating.rst +++ b/doc/source/operating.rst @@ -2,13 +2,15 @@ Running Gnocchi =============== -To run Gnocchi, simply run the HTTP server and metric daemon: +Once Gnocchi is properly installed, you need to launch Gnocchi. Simply run the +HTTP server and metric daemon: :: gnocchi-api gnocchi-metricd +You can run these services as background daemons. Running API As A WSGI Application ================================= @@ -53,9 +55,9 @@ Once written to `/etc/gnocchi/uwsgi.ini`, it can be launched this way:: How to define archive policies ============================== -In Gnocchi, the |archive policies| define how the |metrics| are aggregated and -how long they are stored. Each |archive policy| definition is expressed as the -number of points over a |timespan|. +The |archive policies| define how the |metrics| are aggregated and how long +they are stored. Each |archive policy| definition is expressed as the number of +points over a |timespan|. If your |archive policy| defines a policy of 10 points with a |granularity| of 1 second, the |time series| archive will keep up to 10 seconds, each @@ -64,7 +66,8 @@ maximum retain 10 seconds of data (sometimes a bit more) between the more recent point and the oldest point. That does not mean it will be 10 consecutive seconds: there might be a gap if data is fed irregularly. -There is no expiry of data relative to the current timestamp. +**There is no expiry of data relative to the current timestamp. Data is only +expired according to timespan.** Each |archive policy| also defines which |aggregation methods| will be used. The default is set to `default_aggregation_methods` which is by default set to @@ -93,18 +96,18 @@ consume twice CPU than just one definition (e.g. just 1 second granularity for 1 day). Default archive policies -======================== - -By default, 3 |archive policies| are created when calling `gnocchi-upgrade`: -*low*, *medium* and *high*. The name both describes the storage space and CPU -usage needs. - -A fourth |archive policy| named `bool` is also provided by default and is -designed to store only boolean values (i.e. 0 and 1). It only stores one data -point for each second (using the `last` |aggregation method|), with a one year -retention period. The maximum optimistic storage size is estimated based on the -assumption that no other value than 0 and 1 are sent as |measures|. If other -values are sent, the maximum pessimistic storage size is taken into account. +------------------------ + +By default, 4 |archive policies| are created when calling `gnocchi-upgrade`: +*bool*, *low*, *medium* and *high*. The name both describes the storage space +and CPU usage needs. + +The `bool` |archive policy| is designed to store only boolean values (i.e. 0 +and 1). It only stores one data point for each second (using the `last` +|aggregation method|), with a one year retention period. The maximum optimistic +storage size is estimated based on the assumption that no other value than 0 +and 1 are sent as |measures|. If other values are sent, the maximum pessimistic +storage size is taken into account. - low -- GitLab From acc762ab61129b249d56a829efc7a30b1b5b3872 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 17 Nov 2017 22:41:37 +0000 Subject: [PATCH 1084/1483] * python3-gnocchi breaks+replaces python-gnocchi, therefore allowing upgrades from older Gnocchi that was using Python 2.7 (Closes: #881247). --- debian/changelog | 7 +++++++ debian/control | 4 ++++ 2 files changed, 11 insertions(+) diff --git a/debian/changelog b/debian/changelog index c094dfdf..6471351d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +gnocchi (4.0.3-3) unstable; urgency=medium + + * python3-gnocchi breaks+replaces python-gnocchi, therefore allowing + upgrades from older Gnocchi that was using Python 2.7 (Closes: #881247). + + -- Thomas Goirand Fri, 17 Nov 2017 22:40:08 +0000 + gnocchi (4.0.3-2) unstable; urgency=medium * Fixed version of python-sqlalchemy-utils (>= 0.32.14). diff --git a/debian/control b/debian/control index 6ed11051..0f4ff6f8 100644 --- a/debian/control +++ b/debian/control @@ -173,6 +173,10 @@ Depends: python3-yaml, ${misc:Depends}, ${python3:Depends}, +Breaks: + python-gnocchi, +Replaces: + python-gnocchi, Suggests: gnocchi-doc, Description: Metric as a Service - Python 3.x -- GitLab From 333bafea4953a68e83d16a7c80756ab8b9fa41f6 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Fri, 17 Nov 2017 22:49:55 +0000 Subject: [PATCH 1085/1483] Fixed encoding of pt.po. --- debian/changelog | 1 + debian/po/pt.po | 68 ++++++++++++++++++++++++------------------------ 2 files changed, 35 insertions(+), 34 deletions(-) diff --git a/debian/changelog b/debian/changelog index 6471351d..5dba90cf 100644 --- a/debian/changelog +++ b/debian/changelog @@ -2,6 +2,7 @@ gnocchi (4.0.3-3) unstable; urgency=medium * python3-gnocchi breaks+replaces python-gnocchi, therefore allowing upgrades from older Gnocchi that was using Python 2.7 (Closes: #881247). + * Fixed encoding of pt.po. -- Thomas Goirand Fri, 17 Nov 2017 22:40:08 +0000 diff --git a/debian/po/pt.po b/debian/po/pt.po index b4ac73fa..ddf0b979 100644 --- a/debian/po/pt.po +++ b/debian/po/pt.po @@ -20,7 +20,7 @@ msgstr "" #. Description #: ../gnocchi-common.templates:2001 msgid "Authentication server hostname:" -msgstr "Nome do servidor de autenticao:" +msgstr "Nome do servidor de autenticação:" #. Type: string #. Description @@ -30,8 +30,8 @@ msgid "" "Typically this is also the hostname of the OpenStack Identity Service " "(Keystone)." msgstr "" -"Indique o nome do seu servidor de autenticao para o Gnocchi. Normalmente, " -" o nome do seu Servio de Identidade OpenStack (Keystone)." +"Indique o nome do seu servidor de autenticação para o Gnocchi. Normalmente, " +"é o nome do seu Serviço de Identidade OpenStack (Keystone)." #. Type: string #. Description @@ -44,7 +44,7 @@ msgstr "" #. locataire ("tenant") #: ../gnocchi-common.templates:3001 msgid "Authentication server tenant name:" -msgstr "Nome do 'tenant' do servidor de autenticao:" +msgstr "Nome do 'tenant' do servidor de autenticação:" #. Type: string #. Description @@ -57,33 +57,33 @@ msgstr "Nome do 'tenant' do servidor de autentica #. locataire ("tenant") #: ../gnocchi-common.templates:3001 msgid "Please specify the authentication server tenant name." -msgstr "Indique, por favor, o nome do 'tenant' do servidor de autenticao." +msgstr "Indique, por favor, o nome do 'tenant' do servidor de autenticação." #. Type: string #. Description #: ../gnocchi-common.templates:4001 msgid "Authentication server username:" -msgstr "Nome de utilizador para o servidor de autenticao:" +msgstr "Nome de utilizador para o servidor de autenticação:" #. Type: string #. Description #: ../gnocchi-common.templates:4001 msgid "Please specify the username to use with the authentication server." msgstr "" -"Indique, por favor, o nome de utilizador para o servidor de autenticao." +"Indique, por favor, o nome de utilizador para o servidor de autenticação." #. Type: password #. Description #: ../gnocchi-common.templates:5001 msgid "Authentication server password:" -msgstr "Palavra chave do servidor de autenticao:" +msgstr "Palavra chave do servidor de autenticação:" #. Type: password #. Description #: ../gnocchi-common.templates:5001 msgid "Please specify the password to use with the authentication server." msgstr "" -"Indique, por favor, a palavra-chave para usar no servidor de autenticao." +"Indique, por favor, a palavra-chave para usar no servidor de autenticação." #. Type: boolean #. Description @@ -98,8 +98,8 @@ msgid "" "No database has been set up for Gnocchi to use. Before continuing, you " "should make sure you have the following information:" msgstr "" -"No foi definida nenhuma base de dados para ser usada pelo Gnocchi. Antes de " -"continuar, certifique-se que tem a seguinte informao:" +"Não foi definida nenhuma base de dados para ser usada pelo Gnocchi. Antes de " +"continuar, certifique-se que tem a seguinte informação:" #. Type: boolean #. Description @@ -112,9 +112,9 @@ msgid "" " * a username and password to access the database." msgstr "" " * o tipo de base de dados que quer usar;\n" -" * o nome do servidor (esse servidor deve aceitar ligaes TCP a partir\n" -"desta mquina);\n" -" * o nome de utilizador e palavra passe para aceder base de dados." +" * o nome do servidor (esse servidor deve aceitar ligações TCP a partir\n" +"desta máquina);\n" +" * o nome de utilizador e palavra passe para aceder à base de dados." #. Type: boolean #. Description @@ -123,7 +123,7 @@ msgid "" "If some of these requirements are missing, do not choose this option and run " "with regular SQLite support." msgstr "" -"Se algum destes requisitos estiver em falta, rejeite esta opo e execute " +"Se algum destes requisitos estiver em falta, rejeite esta opção e execute " "com o suporte SQLite normal." #. Type: boolean @@ -133,14 +133,14 @@ msgid "" "You can change this setting later on by running \"dpkg-reconfigure -plow " "gnocchi-common\"." msgstr "" -"Pode mudar esta definio mais tarde ao executar \"dpkg-reconfigure -plow " +"Pode mudar esta definição mais tarde ao executar \"dpkg-reconfigure -plow " "gnocchi-common\"." #. Type: boolean #. Description #: ../gnocchi-api.templates:2001 msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Registar o Gnocchi no catlogo de pontos finais do Keystone?" +msgstr "Registar o Gnocchi no catálogo de pontos finais do Keystone?" #. Type: boolean #. Description @@ -150,8 +150,8 @@ msgid "" "accessible. This is done using \"keystone service-create\" and \"keystone " "endpoint-create\". This can be done automatically now." msgstr "" -"Cada servio Openstack (cada API) deve estar registado para que seja " -"acessvel. Isto feito com \"keystone service-create\" e \"keystone " +"Cada serviço Openstack (cada API) deve estar registado para que seja " +"acessível. Isto é feito com \"keystone service-create\" e \"keystone " "endpoint-create\". Pode correr estes comandos agora." #. Type: boolean @@ -162,15 +162,15 @@ msgid "" "to connect using a known admin project name, admin username and password. " "The admin auth token is not used anymore." msgstr "" -"Note que ir necessitar de ter um servidor keystone a correr e pronto para " -"receber ligaes autenticadas com um nome de administrador de projecto, nome " -"de utilizador e password. O token de autorizao de admin j no usado." +"Note que irá necessitar de ter um servidor keystone a correr e pronto para " +"receber ligações autenticadas com um nome de administrador de projecto, nome " +"de utilizador e password. O token de autorização de admin já não é usado." #. Type: string #. Description #: ../gnocchi-api.templates:3001 msgid "Keystone server IP address:" -msgstr "Endereo IP do keystone:" +msgstr "Endereço IP do keystone:" #. Type: string #. Description @@ -179,8 +179,8 @@ msgid "" "Please enter the IP address of the Keystone server, so that gnocchi-api can " "contact Keystone to do the Gnocchi service and endpoint creation." msgstr "" -"Indique o endereo IP do seu servidor keystone, de modo a que o glance-api " -"possa contactar o Keystone para criar o servio e ponto final Gnocchi." +"Indique o endereço IP do seu servidor keystone, de modo a que o glance-api " +"possa contactar o Keystone para criar o serviço e ponto final Gnocchi." #. Type: string #. Description @@ -200,7 +200,7 @@ msgid "" "To register the service endpoint, this package needs to know the Admin " "login, name, project name, and password to the Keystone server." msgstr "" -"Para registar o endpoint do servio, este pacote necessita de saber o nome " +"Para registar o endpoint do serviço, este pacote necessita de saber o nome " "de utilizador, nome, nome do projecto e password para o servidor Keystone." #. Type: string @@ -219,13 +219,13 @@ msgstr "Password de administrador Keystone:" #. Description #: ../gnocchi-api.templates:7001 msgid "Gnocchi endpoint IP address:" -msgstr "Endereo IP do ponto final Gnocchi:" +msgstr "Endereço IP do ponto final Gnocchi:" #. Type: string #. Description #: ../gnocchi-api.templates:7001 msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "Indique o endereo IP que ir ser usado para contactar o Gnocchi." +msgstr "Indique o endereço IP que irá ser usado para contactar o Gnocchi." #. Type: string #. Description @@ -235,15 +235,15 @@ msgid "" "service, so if you are installing a public cloud, this should be a public IP " "address." msgstr "" -"Este endereo IP deve ser acessvel a partir dos clientes que iro usar este " -"servio, portanto se est a instalar uma cloud pblica, este deve ser um " -"endereo IP pblico." +"Este endereço IP deve ser acessível a partir dos clientes que irão usar este " +"serviço, portanto se está a instalar uma cloud pública, este deve ser um " +"endereço IP público." #. Type: string #. Description #: ../gnocchi-api.templates:8001 msgid "Name of the region to register:" -msgstr "Nome da regio a registar:" +msgstr "Nome da região a registar:" #. Type: string #. Description @@ -253,6 +253,6 @@ msgid "" "location. Please enter the zone that you wish to use when registering the " "endpoint." msgstr "" -"O Openstack suporta a utilizao de zonas de disponibilidade, com cada " -"regio a representar uma localizao. Por favor, indique a zona que quer " +"O Openstack suporta a utilização de zonas de disponibilidade, com cada " +"região a representar uma localização. Por favor, indique a zona que quer " "user ao registar um ponto final." -- GitLab From 5769501a780e1d5005cf9d6597ec1dcf7fbb393f Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 22 Sep 2017 17:13:19 +0200 Subject: [PATCH 1086/1483] Add promeutheus storage adapter --- doc/source/index.rst | 2 + doc/source/install.rst | 1 + doc/source/prometheus.rst | 33 ++ gnocchi/rest/api.py | 141 ++++++ gnocchi/rest/prometheus/README | 6 + gnocchi/rest/prometheus/__init__.py | 0 gnocchi/rest/prometheus/remote.proto | 68 +++ gnocchi/rest/prometheus/remote_pb2.py | 475 ++++++++++++++++++ .../tests/functional/gabbits/prometheus.yaml | 79 +++ .../031b586e-ebe1-4737-812e-cf0ddf26f5ad.dump | Bin 0 -> 2538 bytes .../1ea8f6f7-eebe-49d5-8276-ceb2d56c5ba4.dump | Bin 0 -> 2180 bytes .../a0c06674-a5ef-4621-883c-e94880a2de02.dump | Bin 0 -> 2217 bytes .../indexer/sqlalchemy/test_migrations.py | 41 +- .../notes/prometheus-bc2153962b9a237a.yaml | 6 + setup.cfg | 3 + tox.ini | 5 +- 16 files changed, 833 insertions(+), 27 deletions(-) create mode 100644 doc/source/prometheus.rst create mode 100644 gnocchi/rest/prometheus/README create mode 100644 gnocchi/rest/prometheus/__init__.py create mode 100644 gnocchi/rest/prometheus/remote.proto create mode 100644 gnocchi/rest/prometheus/remote_pb2.py create mode 100644 gnocchi/tests/functional/gabbits/prometheus.yaml create mode 100644 gnocchi/tests/functional/gabbits/prometheus_fixtures/031b586e-ebe1-4737-812e-cf0ddf26f5ad.dump create mode 100644 gnocchi/tests/functional/gabbits/prometheus_fixtures/1ea8f6f7-eebe-49d5-8276-ceb2d56c5ba4.dump create mode 100644 gnocchi/tests/functional/gabbits/prometheus_fixtures/a0c06674-a5ef-4621-883c-e94880a2de02.dump create mode 100644 releasenotes/notes/prometheus-bc2153962b9a237a.yaml diff --git a/doc/source/index.rst b/doc/source/index.rst index 7cc6505e..d28cd99e 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -21,6 +21,7 @@ Gnocchi's main features are: - Queryable resource indexer - Multi-tenant - Grafana support +- Prometheus Remote Write support - Nagios/Icinga support - Statsd protocol support - Collectd plugin support @@ -101,6 +102,7 @@ Documentation rest statsd grafana + prometheus nagios collectd glossary diff --git a/doc/source/install.rst b/doc/source/install.rst index 34085cfe..a219bea2 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -26,6 +26,7 @@ The list of variants available is: * `ceph_recommended_lib` – provides Ceph (>= 0.80) storage support * `ceph_alternative_lib` – provides Ceph (>= 12.2.0) storage support * `redis` – provides Redis storage support +* `prometheus` – provides Prometheus Remote Write support * `doc` – documentation building support * `test` – unit and functional tests support diff --git a/doc/source/prometheus.rst b/doc/source/prometheus.rst new file mode 100644 index 00000000..df4c3262 --- /dev/null +++ b/doc/source/prometheus.rst @@ -0,0 +1,33 @@ +==================== + Prometheus support +==================== + +`Prometheus`_ can use Gnocchi to store its data through `Remote Write +Adapter`_. Gnocchi needs to be installed with the `prometheus` flavor. + +Example of Prometheus configuration:: + + remote_write: + - url: "http://localhost:8041/v1/prometheus/write" + basic_auth: + username: "admin" + password: "whatever" + + +The `/v1/prometheus/write` endpoint handles the `WriteRequest` protobuf +message. + +Gnocchi maps Prometheus metrics to its data model. + +For each metric sent by Prometheus, Gnocchi maintains a corresponding resource +based on each `job` and `instance` pair. This resource is created with the +`prometheus` resource type and contains two attributes, `job` and `instance`. +The metrics sent by Prometheus with this pair are attached to that resource and +filled with the provided measures. + +.. note:: + + `/` is forbidden in Gnocchi metric name, they are replaced by `_` + +.. _`Prometheus`: https://prometheus.io/ +.. _`Remote Write Adapter`: https://prometheus.io/docs/operating/configuration/# diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 528e465d..aff8ff7e 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -14,6 +14,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import collections import functools import itertools import uuid @@ -25,6 +26,7 @@ from pecan import rest import pyparsing import six from six.moves.urllib import parse as urllib_parse +import tenacity import voluptuous import webob.exc import werkzeug.http @@ -39,6 +41,13 @@ from gnocchi.rest.aggregates import processor from gnocchi import storage from gnocchi import utils +try: + from gnocchi.rest.prometheus import remote_pb2 + import snappy + PROMETHEUS_SUPPORTED = True +except ImportError: + PROMETHEUS_SUPPORTED = False + def arg_to_list(value): if isinstance(value, list): @@ -1890,6 +1899,136 @@ class BatchController(object): resources = ResourcesBatchController() +class PrometheusWriteController(rest.RestController): + + PROMETHEUS_RESOURCE_TYPE = { + "instance": {"type": "string", + "min_length": 1, + "max_length": 512, + "required": True}, + "job": {"type": "string", + "min_length": 1, + "max_length": 512, + "required": True} + } + + # Retry with exponential backoff for up to 1 minute + @classmethod + @tenacity.retry( + wait=tenacity.wait_exponential(multiplier=0.5, max=60), + retry=tenacity.retry_if_exception_type( + (indexer.NoSuchResource, indexer.ResourceAlreadyExists, + indexer.ResourceTypeAlreadyExists, + indexer.NamedMetricAlreadyExists))) + def get_or_create_resource_and_metrics(cls, creator, rid, + original_resource_id, + job, instance, metric_names): + try: + r = pecan.request.indexer.get_resource('prometheus', rid, + with_metrics=True) + except indexer.NoSuchResourceType: + enforce("create resource type", { + 'name': 'prometheus', + 'state': 'creating', + 'attributes': cls.PROMETHEUS_RESOURCE_TYPE + }) + + schema = pecan.request.indexer.get_resource_type_schema() + rt = schema.resource_type_from_dict( + 'prometheus', cls.PROMETHEUS_RESOURCE_TYPE, 'creating') + pecan.request.indexer.create_resource_type(rt) + raise tenacity.TryAgain + except indexer.UnexpectedResourceTypeState as e: + # NOTE(sileht): Currently created by another thread + if not e.state.endswith("_error"): + raise tenacity.TryAgain + + if r: + enforce("update resource", r) + exists_metric_names = [m.name for m in r.metrics] + metrics = MetricsSchema(dict( + (m, {}) for m in metric_names + if m not in exists_metric_names + )) + if metrics: + return pecan.request.indexer.update_resource( + 'prometheus', rid, + metrics=metrics, + append_metrics=True, + create_revision=False + ).metrics + else: + return r.metrics + else: + metrics = MetricsSchema(dict((m, {}) for m in metric_names)) + target = { + "id": rid, + "resource_type": "prometheus", + "creator": creator, + "original_resource_id": original_resource_id, + "job": job, + "instance": instance, + "metrics": metrics, + } + enforce("create resource", target) + + try: + return pecan.request.indexer.create_resource( + 'prometheus', rid, creator, + original_resource_id=original_resource_id, + job=job, + instance=instance, + metrics=metrics + ).metrics + except indexer.ResourceAlreadyExists: + # NOTE(sileht): ensure the rid is not registered whitin another + # resource type. + r = pecan.request.indexer.get_resource('generic', rid) + if r.type != 'prometheus': + abort(409, six.text_type(e)) + raise + + @pecan.expose() + def post(self): + buf = snappy.uncompress(pecan.request.body) + f = remote_pb2.WriteRequest() + f.ParseFromString(buf) + measures_by_rid = collections.defaultdict(dict) + for ts in f.timeseries: + attrs = dict((l.name, l.value) for l in ts.labels) + original_rid = (attrs["job"], attrs["instance"]) + name = attrs['__name__'].replace('/', '_') + if ts.samples: + measures_by_rid[original_rid][name] = ( + MeasuresListSchema([{'timestamp': s.timestamp_ms / 1000.0, + 'value': s.value} + for s in ts.samples])) + + creator = pecan.request.auth_helper.get_current_user(pecan.request) + + measures_to_batch = {} + for (job, instance), measures in measures_by_rid.items(): + original_rid = '%s@%s' % (job, instance) + rid = ResourceUUID(original_rid, creator=creator) + metric_names = list(measures.keys()) + metrics = self.get_or_create_resource_and_metrics( + creator, rid, original_rid, job, instance, metric_names) + + for metric in metrics: + enforce("post measures", metric) + + measures_to_batch.update( + dict((metric, measures[metric.name]) for metric in + metrics if metric.name in measures)) + + pecan.request.incoming.add_measures_batch(measures_to_batch) + pecan.response.status = 202 + + +class PrometheusController(object): + write = PrometheusWriteController() + + class V1Controller(object): def __init__(self): @@ -1911,6 +2050,8 @@ class V1Controller(object): } for name, ctrl in self.sub_controllers.items(): setattr(self, name, ctrl) + if PROMETHEUS_SUPPORTED: + setattr(self, "prometheus", PrometheusController()) @pecan.expose('json') def index(self): diff --git a/gnocchi/rest/prometheus/README b/gnocchi/rest/prometheus/README new file mode 100644 index 00000000..0c79cb4b --- /dev/null +++ b/gnocchi/rest/prometheus/README @@ -0,0 +1,6 @@ +# remote.proto come from + +https://raw.githubusercontent.com/prometheus/prometheus/master/storage/remote/remote.proto + +# remote_pb2.py is generated with: +protoc --proto_path=. --python_out=. remote.proto diff --git a/gnocchi/rest/prometheus/__init__.py b/gnocchi/rest/prometheus/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/gnocchi/rest/prometheus/remote.proto b/gnocchi/rest/prometheus/remote.proto new file mode 100644 index 00000000..6f09c9ef --- /dev/null +++ b/gnocchi/rest/prometheus/remote.proto @@ -0,0 +1,68 @@ +// Copyright 2016 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package remote; + +message Sample { + double value = 1; + int64 timestamp_ms = 2; +} + +message LabelPair { + string name = 1; + string value = 2; +} + +message TimeSeries { + repeated LabelPair labels = 1; + // Sorted by time, oldest sample first. + repeated Sample samples = 2; +} + +message WriteRequest { + repeated TimeSeries timeseries = 1; +} + +message ReadRequest { + repeated Query queries = 1; +} + +message ReadResponse { + // In same order as the request's queries. + repeated QueryResult results = 1; +} + +message Query { + int64 start_timestamp_ms = 1; + int64 end_timestamp_ms = 2; + repeated LabelMatcher matchers = 3; +} + +enum MatchType { + EQUAL = 0; + NOT_EQUAL = 1; + REGEX_MATCH = 2; + REGEX_NO_MATCH = 3; +} + +message LabelMatcher { + MatchType type = 1; + string name = 2; + string value = 3; +} + +message QueryResult { + repeated TimeSeries timeseries = 1; +} diff --git a/gnocchi/rest/prometheus/remote_pb2.py b/gnocchi/rest/prometheus/remote_pb2.py new file mode 100644 index 00000000..7eeae947 --- /dev/null +++ b/gnocchi/rest/prometheus/remote_pb2.py @@ -0,0 +1,475 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: remote.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='remote.proto', + package='remote', + syntax='proto3', + serialized_pb=_b('\n\x0cremote.proto\x12\x06remote\"-\n\x06Sample\x12\r\n\x05value\x18\x01 \x01(\x01\x12\x14\n\x0ctimestamp_ms\x18\x02 \x01(\x03\"(\n\tLabelPair\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"P\n\nTimeSeries\x12!\n\x06labels\x18\x01 \x03(\x0b\x32\x11.remote.LabelPair\x12\x1f\n\x07samples\x18\x02 \x03(\x0b\x32\x0e.remote.Sample\"6\n\x0cWriteRequest\x12&\n\ntimeseries\x18\x01 \x03(\x0b\x32\x12.remote.TimeSeries\"-\n\x0bReadRequest\x12\x1e\n\x07queries\x18\x01 \x03(\x0b\x32\r.remote.Query\"4\n\x0cReadResponse\x12$\n\x07results\x18\x01 \x03(\x0b\x32\x13.remote.QueryResult\"e\n\x05Query\x12\x1a\n\x12start_timestamp_ms\x18\x01 \x01(\x03\x12\x18\n\x10\x65nd_timestamp_ms\x18\x02 \x01(\x03\x12&\n\x08matchers\x18\x03 \x03(\x0b\x32\x14.remote.LabelMatcher\"L\n\x0cLabelMatcher\x12\x1f\n\x04type\x18\x01 \x01(\x0e\x32\x11.remote.MatchType\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\"5\n\x0bQueryResult\x12&\n\ntimeseries\x18\x01 \x03(\x0b\x32\x12.remote.TimeSeries*J\n\tMatchType\x12\t\n\x05\x45QUAL\x10\x00\x12\r\n\tNOT_EQUAL\x10\x01\x12\x0f\n\x0bREGEX_MATCH\x10\x02\x12\x12\n\x0eREGEX_NO_MATCH\x10\x03\x62\x06proto3') +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +_MATCHTYPE = _descriptor.EnumDescriptor( + name='MatchType', + full_name='remote.MatchType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='EQUAL', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NOT_EQUAL', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='REGEX_MATCH', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='REGEX_NO_MATCH', index=3, number=3, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=588, + serialized_end=662, +) +_sym_db.RegisterEnumDescriptor(_MATCHTYPE) + +MatchType = enum_type_wrapper.EnumTypeWrapper(_MATCHTYPE) +EQUAL = 0 +NOT_EQUAL = 1 +REGEX_MATCH = 2 +REGEX_NO_MATCH = 3 + + + +_SAMPLE = _descriptor.Descriptor( + name='Sample', + full_name='remote.Sample', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='remote.Sample.value', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timestamp_ms', full_name='remote.Sample.timestamp_ms', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=24, + serialized_end=69, +) + + +_LABELPAIR = _descriptor.Descriptor( + name='LabelPair', + full_name='remote.LabelPair', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='remote.LabelPair.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='remote.LabelPair.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=71, + serialized_end=111, +) + + +_TIMESERIES = _descriptor.Descriptor( + name='TimeSeries', + full_name='remote.TimeSeries', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='labels', full_name='remote.TimeSeries.labels', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='samples', full_name='remote.TimeSeries.samples', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=113, + serialized_end=193, +) + + +_WRITEREQUEST = _descriptor.Descriptor( + name='WriteRequest', + full_name='remote.WriteRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='timeseries', full_name='remote.WriteRequest.timeseries', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=195, + serialized_end=249, +) + + +_READREQUEST = _descriptor.Descriptor( + name='ReadRequest', + full_name='remote.ReadRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='queries', full_name='remote.ReadRequest.queries', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=251, + serialized_end=296, +) + + +_READRESPONSE = _descriptor.Descriptor( + name='ReadResponse', + full_name='remote.ReadResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='results', full_name='remote.ReadResponse.results', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=298, + serialized_end=350, +) + + +_QUERY = _descriptor.Descriptor( + name='Query', + full_name='remote.Query', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='start_timestamp_ms', full_name='remote.Query.start_timestamp_ms', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_timestamp_ms', full_name='remote.Query.end_timestamp_ms', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='matchers', full_name='remote.Query.matchers', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=352, + serialized_end=453, +) + + +_LABELMATCHER = _descriptor.Descriptor( + name='LabelMatcher', + full_name='remote.LabelMatcher', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='remote.LabelMatcher.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='name', full_name='remote.LabelMatcher.name', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='remote.LabelMatcher.value', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=455, + serialized_end=531, +) + + +_QUERYRESULT = _descriptor.Descriptor( + name='QueryResult', + full_name='remote.QueryResult', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='timeseries', full_name='remote.QueryResult.timeseries', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=533, + serialized_end=586, +) + +_TIMESERIES.fields_by_name['labels'].message_type = _LABELPAIR +_TIMESERIES.fields_by_name['samples'].message_type = _SAMPLE +_WRITEREQUEST.fields_by_name['timeseries'].message_type = _TIMESERIES +_READREQUEST.fields_by_name['queries'].message_type = _QUERY +_READRESPONSE.fields_by_name['results'].message_type = _QUERYRESULT +_QUERY.fields_by_name['matchers'].message_type = _LABELMATCHER +_LABELMATCHER.fields_by_name['type'].enum_type = _MATCHTYPE +_QUERYRESULT.fields_by_name['timeseries'].message_type = _TIMESERIES +DESCRIPTOR.message_types_by_name['Sample'] = _SAMPLE +DESCRIPTOR.message_types_by_name['LabelPair'] = _LABELPAIR +DESCRIPTOR.message_types_by_name['TimeSeries'] = _TIMESERIES +DESCRIPTOR.message_types_by_name['WriteRequest'] = _WRITEREQUEST +DESCRIPTOR.message_types_by_name['ReadRequest'] = _READREQUEST +DESCRIPTOR.message_types_by_name['ReadResponse'] = _READRESPONSE +DESCRIPTOR.message_types_by_name['Query'] = _QUERY +DESCRIPTOR.message_types_by_name['LabelMatcher'] = _LABELMATCHER +DESCRIPTOR.message_types_by_name['QueryResult'] = _QUERYRESULT +DESCRIPTOR.enum_types_by_name['MatchType'] = _MATCHTYPE + +Sample = _reflection.GeneratedProtocolMessageType('Sample', (_message.Message,), dict( + DESCRIPTOR = _SAMPLE, + __module__ = 'remote_pb2' + # @@protoc_insertion_point(class_scope:remote.Sample) + )) +_sym_db.RegisterMessage(Sample) + +LabelPair = _reflection.GeneratedProtocolMessageType('LabelPair', (_message.Message,), dict( + DESCRIPTOR = _LABELPAIR, + __module__ = 'remote_pb2' + # @@protoc_insertion_point(class_scope:remote.LabelPair) + )) +_sym_db.RegisterMessage(LabelPair) + +TimeSeries = _reflection.GeneratedProtocolMessageType('TimeSeries', (_message.Message,), dict( + DESCRIPTOR = _TIMESERIES, + __module__ = 'remote_pb2' + # @@protoc_insertion_point(class_scope:remote.TimeSeries) + )) +_sym_db.RegisterMessage(TimeSeries) + +WriteRequest = _reflection.GeneratedProtocolMessageType('WriteRequest', (_message.Message,), dict( + DESCRIPTOR = _WRITEREQUEST, + __module__ = 'remote_pb2' + # @@protoc_insertion_point(class_scope:remote.WriteRequest) + )) +_sym_db.RegisterMessage(WriteRequest) + +ReadRequest = _reflection.GeneratedProtocolMessageType('ReadRequest', (_message.Message,), dict( + DESCRIPTOR = _READREQUEST, + __module__ = 'remote_pb2' + # @@protoc_insertion_point(class_scope:remote.ReadRequest) + )) +_sym_db.RegisterMessage(ReadRequest) + +ReadResponse = _reflection.GeneratedProtocolMessageType('ReadResponse', (_message.Message,), dict( + DESCRIPTOR = _READRESPONSE, + __module__ = 'remote_pb2' + # @@protoc_insertion_point(class_scope:remote.ReadResponse) + )) +_sym_db.RegisterMessage(ReadResponse) + +Query = _reflection.GeneratedProtocolMessageType('Query', (_message.Message,), dict( + DESCRIPTOR = _QUERY, + __module__ = 'remote_pb2' + # @@protoc_insertion_point(class_scope:remote.Query) + )) +_sym_db.RegisterMessage(Query) + +LabelMatcher = _reflection.GeneratedProtocolMessageType('LabelMatcher', (_message.Message,), dict( + DESCRIPTOR = _LABELMATCHER, + __module__ = 'remote_pb2' + # @@protoc_insertion_point(class_scope:remote.LabelMatcher) + )) +_sym_db.RegisterMessage(LabelMatcher) + +QueryResult = _reflection.GeneratedProtocolMessageType('QueryResult', (_message.Message,), dict( + DESCRIPTOR = _QUERYRESULT, + __module__ = 'remote_pb2' + # @@protoc_insertion_point(class_scope:remote.QueryResult) + )) +_sym_db.RegisterMessage(QueryResult) + + +# @@protoc_insertion_point(module_scope) diff --git a/gnocchi/tests/functional/gabbits/prometheus.yaml b/gnocchi/tests/functional/gabbits/prometheus.yaml new file mode 100644 index 00000000..ee6bed90 --- /dev/null +++ b/gnocchi/tests/functional/gabbits/prometheus.yaml @@ -0,0 +1,79 @@ +fixtures: + - ConfigFixture + +defaults: + request_headers: + content-type: application/json + authorization: "basic YWRtaW46" + +tests: + - name: create archive policy + desc: for later use + POST: /v1/archive_policy + data: + name: space + definition: + - granularity: 1 second + status: 201 + + - name: create archive policy rule + POST: /v1/archive_policy_rule + data: + name: test_prom + metric_pattern: "*" + archive_policy_name: space + status: 201 + + - name: post some measures + POST: /v1/prometheus/write + request_headers: + authorization: "basic YWRtaW46" + content-type: application/x-protobuf + content-encoding: snappy + data: <@prometheus_fixtures/031b586e-ebe1-4737-812e-cf0ddf26f5ad.dump + status: 202 + + - name: ensure resource has been created + GET: /v1/resource/prometheus + response_json_paths: + $.`len`: 1 + $[0].job: "prometheus" + $[0].instance: "localhost:9090" + + - name: ensure one resource have all metrics created + GET: /v1/resource/prometheus/prometheus@localhost:9090 + response_json_paths: + $.metrics.`len`: 56 + + - name: check metrics + GET: /v1/resource/prometheus/prometheus@localhost:9090/metric + response_json_paths: + $[\name].[24].name: 'prometheus_sd_marathon_refresh_duration_seconds_sum' + + - name: check measures + GET: /v1/resource/prometheus/prometheus@localhost:9090/metric/scrape_samples_scraped/measures?refresh=true + response_json_paths: + $[0]: ['2017-09-23T06:02:58+00:00', 1.0, 558.0] + + - name: post some measures second + POST: /v1/prometheus/write + request_headers: + authorization: "basic YWRtaW46" + content-type: application/x-protobuf + content-encoding: snappy + data: <@prometheus_fixtures/a0c06674-a5ef-4621-883c-e94880a2de02.dump + status: 202 + + - name: post some measures third + POST: /v1/prometheus/write + request_headers: + authorization: "basic YWRtaW46" + content-type: application/x-protobuf + content-encoding: snappy + data: <@prometheus_fixtures/1ea8f6f7-eebe-49d5-8276-ceb2d56c5ba4.dump + status: 202 + + - name: check measures second + GET: /v1/resource/prometheus/prometheus@localhost:9090/metric/scrape_samples_scraped/measures?refresh=true + response_json_paths: + $[0]: ['2017-09-23T06:02:58+00:00', 1.0, 558.0] diff --git a/gnocchi/tests/functional/gabbits/prometheus_fixtures/031b586e-ebe1-4737-812e-cf0ddf26f5ad.dump b/gnocchi/tests/functional/gabbits/prometheus_fixtures/031b586e-ebe1-4737-812e-cf0ddf26f5ad.dump new file mode 100644 index 0000000000000000000000000000000000000000..01b67dfa6fcc2b6b200259cf06da9f16d3ca7daa GIT binary patch literal 2538 zcmXw53vg3a8vf3`H|@=B(x!nv!V(JQU1&)o2_&T{M2UKJy6OmsJLGbc+_nkHz2W92 z&?4Oh*CM6mQE?S4P*K+LL1B@t&>gE5K_0WLD_RD|h0*aX3qEGib@Z~g&g_{p=bt(M z%y-WD{{R2})0h0_0q!W_ia9!=MC@EzRZ^1HB4sn&IF60W8BLUSxiZSV6St8zX=Jd zlu|TF$Y_cxHXBxwH6bEu(H0?=RYfhX$i`k&kz*Mls$^x&ZZl(aWCs@74qv_S)mO8) zw+N@dhRADhaU7MD?98mYB_vbC@ihM?xw3@@Wht=-_GauL8Rs zHWuHCE>wHFsKg~AZT%@lmcNQ;&Ym=FJw>6;`61@~`9t_m&^SE27qD6Pf=$JcSz1E{ zJM%`WDgNIi+(-FU1Lv}ep?y@)Mhs}?FGTOgm2NEELxSgM{wSE;Xc*t%AQ(|&PNGi&zY+6@PtUwwp zCk!{M-iivZl{of}tF|~s(Aj=1-~Tn2Xk}R`>W+NL@j1d5(o84eXFz31a2s>E4}3SU zWs&aCOMl`n3mCizek-6g-VXTdzekPtdm?v}_I3HoC#Z6P8pWqn<548O&v&ea{A(=d z)^nKA#1U@+<4Cg!&gEQ$Dc(h3o8rmBVe5D?CP=EPsHxir_rsCI4b)xEi^Xr zT@-58bEx!kG}9av&E9jiq3;mtphAz0f<1;)hJCd050qX&h6@OrFoGhA(b1XH~cvS3vZ8D%Qv z3?Ui$+GOD@oXON+v(sWM=Gew;BHnREf?KS6Y>ceLB>ic0JPk)DI)+=!?hA&iz|3b=NLHs??J=Dh@ zirGx`XGrHW1Vw=xs98n%9O!Nzu=qX3PW3ywM0q93vE5xqS+ePvsMk@(^wv?<9>ZR* zqkNaF-8GkpzMSeWU8e2YAM!Mxnf zuHzOt1$c%8+;m%zkq?gzehB@HdE6N@wu?vIIirN-{PC=kmI`|&G0|1}R5~%04m~k? z))O!$b_|c0gZoQwo&Y~cqfMPoW8sR^nCm@Fb-YbB&Y;;xxS(?-yu&MTb0$6Xi(Q5- zWF_9^^se#fR_`mfXlh*3>+&wu1=g3q!-LxRqS-w{!zZs`PQ4C(JFvC!^PdpjPcXmukh*Rp7BTvn7gme{E;xO+DOg#2Q^V`mNdZ-Ez;4X02sHe7d|EjeS62;iMU6p0!h+ zMNgJ@_KOYQ-WpT+0#gy#ZGx+*VUm6j?t>U_DDvRT>^;9Y_q45>?!SWISmugE zLU)x+F+BIZwD6({V?93XYWmLERz^Ys)QhC;GZbES+8&)e@n4*?x8nl>UsHXBJ_PsB zuYenVXLy}^?Bm89Iad{+NO-O(H;4^KMj>B`tnJEw^C{11Y|W{bz#|A(Slf*C-Tcb; zXChOwo!IX+=p2nN~SkdVXff&S@=GO+OU4d031R zTI-%zHC9N*Q*q6_Z16|wQcY8`qIkV!RXm+8+*vi%G~z39C`=zo+)w-i+$fC=w&LDN z%ls!z1BBs^=c8{}E@+xe1>OTZXWs+=9I%g{XAOr-60=A`%x}DtL_WvT{m^&ds~rd& z%zO5o0A`^t=wlh5@BC2=q^To|33~KSYDH&w;k2d->fnX)FZyo1*ng|6vUCo!lSDo> zC+BCwIrn_wSz5iWO4sTwUv!-mdd8p0154)_($^4q#BAlx$1lY%Fd9j2Pck_ ze(vYR`nz)v3Nj; zwZ)T4V?%XAHEUw&HbDx;MA<@xVnR5n-7p;~K~f?y(Lz?+8yeV^EW`7X&@S@4rKC+! z61*%X6LBdi^2tcI$osn#F=;XB5iG8+`b_VB@!Pq%Z?Xzox%kr9$dt7y+Z(~aCsXb7 zmw~!WT4-|`(o>IttYU3I34P1N0FK@w zY@KjKi^+~)vVc9g$wan^LV{y3)$dvj?u}TzjJICKTt7hOS{2oF7t|AY*9k~^c0n$H zB^q-I_LV})lY)tT_|mrs+g`(Q#1{Lgb%vPW7h`-{h^55ky6I*rswcR&fiq(AIFg@( z?7Q&b3g%u1av0qaF!v=)e*xS{fR{oBbnZQTrx8PMs29{pJUEFB{fM>rCQImeLW#sB z^?m%{eGL8AJ|Z($Ve}g2N-+$mLqH9I=`iHfP(p&!y>Rb5mTNpa!J%sllTKEu8 z`d~<%qqB1~Si5vpA%j8F2@qoF51K17dE^jkSqJI@om-&MaS~SAhT)i6oYzvEw|xon z+`x`iu*P-*4)|9g^#b-^z-!MNZr-YY#&I5A@gIVPES8Lf@l^&Oxy^C?q~7k%Gj*gy zxvOa>9(~2gO_PvGTbTphFuIxI3TRC#II59snZ%y!;66xkgW%N{NPk6K7;=Bar|uqj z*0>2xT*T2vR@3X4>Z7tg3W_u$dDN$cyevpzk$V}zvGn(G%O+?lBh(yr<`(Ph-wW>V zVD-K+d*2A%{u7Yv!`vBS#D$^9-vbk8@tt*f3mnDBso3nGk(AN$L{~ zoXSyy97Xkb{)shYpr{$_&tT4os-Cc3+JQZrVdAIQ)@jifu0R%(E-A{9*)}}&F>pO7 z$KvQ}o%b++`M^E!vYKM*EZs6oYnj}~D6Skq>ij7@zYMKf=34=KySIS*X}qw7vUO=S zxld>B)6QeaqzFnR!0ki1F+BBi!qW@xLS_+5e_CR=ZQ%Cf*?-X75KN5yKb7|pEc9XN zZ`jtQj^Wuc3@PtE)bt5u(6Xh}n+4}?767tL7m#1|RZQlqE^63C>F>Ee!&>;lV)*GR zp2g#0j-FaW_{P*e)Y^yal}id0K@N+GJAk2%>q85L)|Lp2en@HnV8G=pGVgY8!n5U+ z+)R3`6|O?7c}UGO9?mmD=hojKbptiHfzs+QLuY4b`+K+~Ey_u)r8NZ?#<6jS`yxHE z5o^gB$FX_0I*#YY@zf7#E{)FJz^AT5r`idq^ZX5L>t@>6RaN^|1Z|qL1;k{M4gsLs#my+7@jRF z-wh`7ZbLjFO7jB12J1&mj9ptUK)G3{*X%746eWdk7u(}{A zlcCFzK)q=eY~Z7|+unjgJk zZHLd&zs%cVVs(D4j{|KMUAvG}M42sQ_2KwHX|sh6#~t>1dktI5GP0PC9Qw<6WsMN> z*MveLp+;~v1Zsj}y{k@iIMzEu{`IxhwY5&MAz1CGsmb@4%v>4eu&=i}*yT@)U|oH2 zbtUglMPfldB8B4IZuH_E1|Dy$pjkMaz6J1Xc^KB0hrg>8F%${&F%6C8?XkEJ)T}xn z9$fxslg6?=5q*THBk&XFuTXwQ55&a&HBehV29?8Oz=TpUstch83_Q0MNlO`b7g_pb zWf?C@;fN&i`TL8=i0BNmt?z)#3Cp(`0x4M*UkBmpPRjaws7-whr_-M!u12?c2}>no z5p7Jd5lIqdUepY=$wfom;0-A3;)5w!$bY!>MIGm%$Fv(gwo-8vYWHBMg;-P=JpCULPhBN6?#E1{nVxp^<%K?l}=RlpqTjsbU zi2DJ+k@y};E;L68p5pk}FunofawI7pEWWG{s)H*B`Q+Oev z^YK!1EvV0dUWw?&xyO^wc_?vrgyR zt9>j|cahY!d=9pu>sM(g?9f48yUvh|%Zf@8A(9}oXha6}5jFWpN@K27qtSX6)Q42) zAvL9uc&$|wWR=qr~>_VYNc%s?ga642Kcxs2bA4D*KQ=-)T1pN?dm5@~*y3 ZHtO)^YKJyS>K|Mky?yXf)Bh&^e*s7dwS@ox literal 0 HcmV?d00001 diff --git a/gnocchi/tests/functional/gabbits/prometheus_fixtures/a0c06674-a5ef-4621-883c-e94880a2de02.dump b/gnocchi/tests/functional/gabbits/prometheus_fixtures/a0c06674-a5ef-4621-883c-e94880a2de02.dump new file mode 100644 index 0000000000000000000000000000000000000000..06c2b9750ac766d4a009de5ca213ecfbcbfbe33c GIT binary patch literal 2217 zcmXX|4Nz3q6+ZXA_weoG?YgWie_=&8{$P|1FoJ-E;;Nhanrojabu46Kn9NmCR(M+H_{y>9mT?^rg?VGiT1s zojd2=bH4AK@7_Mo-~>v>D`0my?Df$QAMpev zdsuJ;r1~(E#1LYjPR9g80lyG&3(+u>!O$Kl9N{I0pwsvQ4&LVugd-)zxy89UtqO1a zF#C0D`1{&lwBP*t>(z{hSz&NNMwJU#dpPQ!@CfzUqYBZ1?2nC`Mk0gYZduqv6USA8&OSJ&c8fv$v zDh2ojzmiuZZ0CJSUiP}CNRnGy{-CgZL(&xOV=|o6h-6k7D#1`)3CZ?IAj12&^Prkx zA39df=p|ZWRvRiH%Tximby0`c?&ia8d$=jAI?rtfX&6*N7J?Vo>4V^OrXtoE5(MYR z(C!AdJpYlPche%X(r_D|G2Mo21x-gVYIlZs2mi{{O-M@_Is9o`M{4C9xU3rimKjU}~y1+(yjx(!?Z z30`RctiOe_DzcD9tVfE5)dP7JQrfTggUdO9s5{AlI#F;$X3U6%limOB$p^tY4s`*7 zdOb5F_yj&I)aTfwQ3MO}O2K1wfmouTy<*3)Q*vP`syv>zo`kmw*3$uIjp2K+#B>X@ zK3+liRW{=B3x7VBko;sm)!f{0bdYv4%MA^Xp5Fl4K)@Rf#`8$!(!O}X2cihzAG#11 zfo*+twX;&Yx# z(E7tcUNXH8GZN4fY5D?Ae4A8B>31b90ZHbqD*atZ*ZaWDtpw+BOiRLRf@W?`dIMA< zd!Xt0PQ573D0wOE8#2sT#-ZH@EF5^DMDG(vkX9+k;f;@=RUj1s*z`a^O6Nxwrr zCXHF(<|T!)kWS4jKH}#k-X(;>%);lkvB^ZPsemyV-d<~TM!zTMZ zj!+<6$Fr=`=7>5;&bNHts2pqk89LnyP#o(8F-CBM0LN#`5NJ!wSTfgCJarZ2E?$#WpO?D-)5_L_vZ;#bvxj z=H5rp)A%fE-v{z0T)hcH@6z0!gghiqV(TQ9_8~40L5npR$rtgKi=j?1E=nYKBY6;4 z4oVHcJ%IGRWDk-%DQhQn`XD4LI^y)WxZ_B0+6qR1#T!PYbHE(|vKPe`C0ImC zia`D_Qa?855ZqBHoW|;6c^d!Eny0bqfo+=YL(C#uVJ5Ad7!D}25TMQm#y>&aAc8)* z2)7mCt|wqE#&AULhnarpx`|9t^J zoqd;*DiXL)DDz^Je+`=!gVq(|gYI~c8VXwXg7|ab`q8eP#IE~LF$MB9tk~2wth)Nt z6)B7HYuIAJ@*!&M5Cw;0-;l8I;t-G;+7N$?H$UcngW<5|FNwx^ZG{VBN3d1Kv08#N z6XGVYtU=~pe5s4(auB>>ae;gV4`0F1Vrc>SG9J5(;h0hwq}pn-6~(C&eD+4(7mXW} zGK@*w38b)bIsh>>9zTe=695BpnqO>MK%3jq7XQY7EU!}l@lmqT=ah^~7E8BM9s zdnyBH%l1<-~ImO!_BZ{g^@ zg<3^WLQUqE2(Fual$i9Cgbd=l&d|I|8L(g^5Y3Z{-PUc!Gjf zYZ{Wr@Yb==LDG1LE}2hTPHOETUUJ2Er=;voiJwHPPFnIaVqJKAALP%bma4fgu-ucb zSffJ5-ono^3_0>wc=Rg_?XkH8lRe!u1~*a(+e7Fs^b%w!DsRiuZOe+CqKlC7-~-IJ I{QtY~UwVhq*#H0l literal 0 HcmV?d00001 diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py index c29fcc90..23712347 100644 --- a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py +++ b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py @@ -19,7 +19,7 @@ import mock import oslo_db.exception from oslo_db.sqlalchemy import test_migrations import six -import sqlalchemy as sa +import sqlalchemy.schema import sqlalchemy_utils from gnocchi import indexer @@ -50,6 +50,21 @@ class ModelsMigrationsSync( self.index.upgrade(nocreate=True) self.addCleanup(self._drop_database) + # NOTE(sileht): remove tables dynamically created by other tests + valid_resource_type_tables = [] + for rt in self.index.list_resource_types(): + valid_resource_type_tables.append(rt.tablename) + valid_resource_type_tables.append("%s_history" % rt.tablename) + # NOTE(sileht): load it in sqlalchemy metadata + self.index._RESOURCE_TYPE_MANAGER.get_classes(rt) + + for table in sqlalchemy_base.Base.metadata.sorted_tables: + if (table.name.startswith("rt_") and + table.name not in valid_resource_type_tables): + sqlalchemy_base.Base.metadata.remove(table) + self.index._RESOURCE_TYPE_MANAGER._cache.pop( + table.name.replace('_history', ''), None) + def _drop_database(self): try: sqlalchemy_utils.drop_database(self.conf.indexer.url) @@ -63,27 +78,3 @@ class ModelsMigrationsSync( def get_engine(self): return self.index.get_engine() - - def db_sync(self, engine): - # NOTE(sileht): We ensure all resource type sqlalchemy model are loaded - # in this process - for rt in self.index.list_resource_types(): - if rt.state == "active": - self.index._RESOURCE_TYPE_MANAGER.get_classes(rt) - - def filter_metadata_diff(self, diff): - tables_to_keep = [] - for rt in self.index.list_resource_types(): - if rt.name.startswith("indexer_test"): - tables_to_keep.extend([rt.tablename, - "%s_history" % rt.tablename]) - new_diff = [] - for line in diff: - if len(line) >= 2: - item = line[1] - # NOTE(sileht): skip resource types created for tests - if (isinstance(item, sa.Table) - and item.name in tables_to_keep): - continue - new_diff.append(line) - return new_diff diff --git a/releasenotes/notes/prometheus-bc2153962b9a237a.yaml b/releasenotes/notes/prometheus-bc2153962b9a237a.yaml new file mode 100644 index 00000000..7000a45b --- /dev/null +++ b/releasenotes/notes/prometheus-bc2153962b9a237a.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Gnocchi API can act as Prometheus Remote Write Adapter to receive + Prometheus metrics. The endpoint to configure in Prometheus configuration + is https:///v1/prometheus/write. diff --git a/setup.cfg b/setup.cfg index 7220641e..acec68c2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -44,6 +44,9 @@ ceph_recommended_lib = cradox>=1.2.0 ceph_alternative_lib = python-rados>=12.2.0 # not available on pypi +prometheus = + python-snappy + protobuf doc = sphinx<1.6.0 sphinx_rtd_theme diff --git a/tox.ini b/tox.ini index c829d82a..8f6a313e 100644 --- a/tox.ini +++ b/tox.ini @@ -40,8 +40,9 @@ setenv = # NOTE(jd) Install redis as a test dependency since it is used as a # coordination driver in functional tests (--coordination-driver is passed to # pifpaf) -deps = .[test,redis,{env:GNOCCHI_STORAGE_DEPS:},{env:GNOCCHI_INDEXER_DEPS:}] +deps = .[test,redis,prometheus,{env:GNOCCHI_STORAGE_DEPS:},{env:GNOCCHI_INDEXER_DEPS:}] {env:GNOCCHI_TEST_TARBALLS:} + cliff!=2.9.0 commands = doc8 doc/source {toxinidir}/run-tests.sh {posargs} @@ -150,7 +151,7 @@ deps = {[testenv:docs]deps} commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- {posargs} [flake8] -exclude = .tox,.eggs,doc +exclude = .tox,.eggs,doc,gnocchi/rest/prometheus/remote_pb2.py show-source = true enable-extensions = H904 -- GitLab From c89ebb5fb4b06297f8374df86b3e75d51afec494 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 22 Nov 2017 17:16:44 +0100 Subject: [PATCH 1087/1483] redis: enhance error message is redis is not installed There's is a python-redis on PyPI, and that's not what we want. Let's not be confusing. --- gnocchi/common/redis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/common/redis.py b/gnocchi/common/redis.py index e43cee45..114fc252 100644 --- a/gnocchi/common/redis.py +++ b/gnocchi/common/redis.py @@ -73,7 +73,7 @@ CLIENT_INT_ARGS = frozenset([ def get_client(conf): if redis is None: - raise RuntimeError("python-redis unavailable") + raise RuntimeError("Redis Python module is unavailable") parsed_url = parse.urlparse(conf.redis_url) options = parse.parse_qs(parsed_url.query) -- GitLab From b512947487bd4fb23488f8f74a3de1df2bf22857 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 22 Nov 2017 14:00:03 +0100 Subject: [PATCH 1088/1483] Simplify Ceph flavor Now that the `ceph` flavor is gone, let's use `ceph` for the recommended approach and `ceph_alternative` for the other one. --- doc/source/install.rst | 4 ++-- setup.cfg | 4 ++-- tox.ini | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index a219bea2..efe45ab2 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -23,8 +23,8 @@ The list of variants available is: * `postgresql` – provides PostgreSQL indexer support * `swift` – provides OpenStack Swift storage support * `s3` – provides Amazon S3 storage support -* `ceph_recommended_lib` – provides Ceph (>= 0.80) storage support -* `ceph_alternative_lib` – provides Ceph (>= 12.2.0) storage support +* `ceph` – provides Ceph (>= 0.80) storage support +* `ceph_alternative` – provides Ceph (>= 12.2.0) storage support * `redis` – provides Redis storage support * `prometheus` – provides Prometheus Remote Write support * `doc` – documentation building support diff --git a/setup.cfg b/setup.cfg index acec68c2..3d4ddbfd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -40,9 +40,9 @@ redis = redis>=2.10.0 # MIT swift = python-swiftclient>=3.1.0 -ceph_recommended_lib = +ceph = cradox>=1.2.0 -ceph_alternative_lib = +ceph_alternative = python-rados>=12.2.0 # not available on pypi prometheus = python-snappy diff --git a/tox.ini b/tox.ini index 8f6a313e..d0dbd2d7 100644 --- a/tox.ini +++ b/tox.ini @@ -19,8 +19,8 @@ setenv = postgresql: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql: GNOCCHI_TEST_INDEXER_DRIVERS=mysql - GNOCCHI_STORAGE_DEPS=file,swift,test-swift,s3,ceph,ceph_recommended_lib,redis - ceph: GNOCCHI_STORAGE_DEPS=ceph,ceph_recommended_lib + GNOCCHI_STORAGE_DEPS=file,swift,test-swift,s3,ceph,redis + ceph: GNOCCHI_STORAGE_DEPS=ceph swift: GNOCCHI_STORAGE_DEPS=swift,test-swift file: GNOCCHI_STORAGE_DEPS=file redis: GNOCCHI_STORAGE_DEPS=redis -- GitLab From fa065ac313ea9573782e89b4a1983afeeec4f059 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 22 Nov 2017 10:32:38 +0100 Subject: [PATCH 1089/1483] docs: add some note about grafana config The allowed_origin example is invalid. This change fixes it and add some note about it, because even oslo.config generates a comprehensive documentation about how to set it, people prefer copying this bad example instead of reading the doc... --- doc/source/grafana.rst | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/doc/source/grafana.rst b/doc/source/grafana.rst index 73842dc1..16349ff5 100644 --- a/doc/source/grafana.rst +++ b/doc/source/grafana.rst @@ -31,17 +31,22 @@ steps: Grafana:: [cors] - allowed_origin = http://example.com/grafana + allowed_origin = http://grafana.fqdn 2. Configure the CORS middleware in Keystone to allow request from Grafana too:: [cors] - allowed_origin = http://example.com/grafana + allowed_origin = http://grafana.fqdn 3. Configure a new datasource in Grafana with the Keystone URL, a user, a project and a password. Your browser will query Keystone for a token, and then query Gnocchi based on what Grafana needs. +.. note:: + + `allowed_origin` format is format: `://[:]`. No path, + no query string and no trailing `/`. + .. image:: _static/grafana-screenshot.png :align: center :alt: Grafana screenshot -- GitLab From d5a8bca2d75eb2e52c2899084a5f006f4e0659cb Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 16 Nov 2017 11:57:42 +0100 Subject: [PATCH 1090/1483] rest: fix some id search issue id key and like operator have some issue, due to the binary nature of uuid. This change avoids it. Closes #450 Closes #491 --- gnocchi/rest/api.py | 16 +++++++++----- gnocchi/tests/functional/gabbits/search.yaml | 23 ++++++++++++++++++++ 2 files changed, 33 insertions(+), 6 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index aff8ff7e..8a69c1a0 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1286,6 +1286,9 @@ ResourceSearchSchemaAttributeValue = voluptuous.Any( six.text_type, float, int, bool, None) +NotIDKey = voluptuous.All(six.text_type, voluptuous.NotIn(["id"])) + + def _ResourceSearchSchema(): user = pecan.request.auth_helper.get_current_user( pecan.request) @@ -1302,20 +1305,21 @@ def _ResourceSearchSchema(): u"<=", u"≤", u"le", u">=", u"≥", u"ge", u"!=", u"≠", u"ne", - u"like" ): voluptuous.All( voluptuous.Length(min=1, max=1), {"id": _ResourceUUID, - six.text_type: ResourceSearchSchemaAttributeValue}, + NotIDKey: ResourceSearchSchemaAttributeValue}, ), - voluptuous.Any( - u"in", - ): voluptuous.All( + u"like": voluptuous.All( + voluptuous.Length(min=1, max=1), + {NotIDKey: ResourceSearchSchemaAttributeValue}, + ), + u"in": voluptuous.All( voluptuous.Length(min=1, max=1), {"id": voluptuous.All( [_ResourceUUID], voluptuous.Length(min=1)), - six.text_type: voluptuous.All( + NotIDKey: voluptuous.All( [ResourceSearchSchemaAttributeValue], voluptuous.Length(min=1))} ), diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml index 6826d537..1e7e1bdb 100644 --- a/gnocchi/tests/functional/gabbits/search.yaml +++ b/gnocchi/tests/functional/gabbits/search.yaml @@ -40,6 +40,29 @@ tests: - "expected a list for dictionary value @ data[" - "'and']" + - name: search like id + POST: /v1/search/resource/generic + data: + like: + id: fa% + status: 400 + response_strings: + - "Invalid input: extra keys not allowed @ data[" + - "'like'][" + - "'id']" + + - name: search like list id + POST: /v1/search/resource/generic + data: + like: + id: + - fa% + status: 400 + response_strings: + - "Invalid input: extra keys not allowed @ data[" + - "'like'][" + - "'id']" + - name: search invalid ne value desc: attribute value for binary operator must not be dict or list POST: /v1/search/resource/generic -- GitLab From d9c099b7f53750159a5537ed37edf289db22fee5 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 22 Nov 2017 15:07:47 +0100 Subject: [PATCH 1091/1483] doc: add an example configuration file for mod_wsgi --- doc/source/operating.rst | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/doc/source/operating.rst b/doc/source/operating.rst index fad37a95..bc8c2a81 100644 --- a/doc/source/operating.rst +++ b/doc/source/operating.rst @@ -16,14 +16,18 @@ Running API As A WSGI Application ================================= To run Gnocchi API, you can use the provided `gnocchi-api`. It wraps around -`uwsgi` – makes sure that `uwsgi`_ is installed. If one Gnocchi API server is +`uwsgi` – makes sure that `uWSGI`_ is installed. If one Gnocchi API server is not enough, you can spawn any number of new API server to scale Gnocchi out, even on different machines. Since Gnocchi API tier runs using WSGI, it can alternatively be run using -`Apache httpd`_ and `mod_wsgi`_, or any other HTTP daemon. If you want to -deploy using `uwsgi`_ yourself, the following uwsgi configuration file can be -used as a base:: +`Apache httpd`_ and `mod_wsgi`_, or any other HTTP daemon. + +uWSGI +----- + +If you want to deploy using `uWSGI`_ yourself, the following uWSGI +configuration file can be used as a base:: [uwsgi] http = localhost:8041 @@ -48,9 +52,28 @@ Once written to `/etc/gnocchi/uwsgi.ini`, it can be launched this way:: uwsgi /etc/gnocchi/uwsgi.ini +Apache mod_wsgi +--------------- + +If you want to use Apache httpd `mod_wsgi`_, here's an example configuration +file:: + + + WSGIDaemonProcess gnocchi user=gnocchi processes=4 threads=32 display-name=%{GROUP} + WSGIProcessGroup gnocchi + WSGIScriptAlias / /usr/local/bin/gnocchi-api + WSGIPassAuthorization On + WSGIApplicationGroup %{GLOBAL} + + + Require all granted + + + + .. _Apache httpd: http://httpd.apache.org/ .. _mod_wsgi: https://modwsgi.readthedocs.org/ -.. _uwsgi: https://uwsgi-docs.readthedocs.org/ +.. _uWSGI: https://uwsgi-docs.readthedocs.org/ How to define archive policies ============================== -- GitLab From f157e4faecb9859116b19843aa1d763209fdb1ad Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 8 Nov 2017 12:28:04 +0100 Subject: [PATCH 1092/1483] doc: add a procedure to clean out Gnocchi data --- doc/source/operating.rst | 48 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/doc/source/operating.rst b/doc/source/operating.rst index bc8c2a81..818bef82 100644 --- a/doc/source/operating.rst +++ b/doc/source/operating.rst @@ -289,4 +289,52 @@ your file system). The procedure to restore is no more complicated than initial deployment: restore your index and storage backups, reinstall Gnocchi if necessary, and restart it. +How to clear Gnocchi data +========================= + +If you ever want to start fresh or need to clean Gnocchi data, this can be +easily done. You need to clean the measures (incoming), aggregates (storage) +and indexer data storage. + +Once that is done, if you want to re-initialize Gnocchi, you need to call +`gnocchi-upgrade` so it re-initialize the different drivers. + +Index storage +------------- + +Both MySQL and PostgreSQL drivers uses a single database. Delete the database. +If you want to install Gnocchi again, create back that database with the same +name before calling `gnocchi-upgrade`. + +Incoming data +------------- + +Depending on the driver you use, the data are stored in different places: + +* **Ceph**: delete the `gnocchi-config` object and the objects whose names + start with `incoming` in the Ceph pool. Alternatively you can delete the Ceph + pool (and recreate it if needed). +* **OpenStack Swift**: delete the `gnocchi-config` container and containers + whose names start with `incoming` in the Swift account. +* **Redis**: delete the `gnocchi-config` key and the keys whose names start + with `incoming`. +* **File**: delete `${incoming.file_basepath}/tmp` and the directories whose + names start with `${incoming.file_basepath}/incoming`. +* **Amazon S3**: delete the bucket whose name start with `incoming`. + +Storage data +------------ + +Depending on the driver you use, the data are stored in different places: + +* **Ceph**: delete the objects whose names start with `gnocchi_` in the Ceph + pool. Alternatively you can delete the Ceph pool (and recreate it if needed). +* **OpenStack Swift**: delete the containers whose names start with + `$storage.swift_container_prefix` in the Swift account. +* **Redis**: delete the keys whose names start with `timeseries`. +* **File**: delete the directories whose names are UUIDs under + `$incoming.file_basepath`. +* **Amazon S3**: delete the bucket whose name start with + `$storage.s3_bucket_prefix`. + .. include:: include/term-substitution.rst -- GitLab From 598e9e2a9cd16833b410de6b87fb4df889bb3799 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 24 Nov 2017 14:05:53 +0100 Subject: [PATCH 1093/1483] doc: move comparison in intro --- doc/source/alternatives.rst | 56 +++++++++++++++++++++++++++++++++++++ doc/source/index.rst | 56 +------------------------------------ 2 files changed, 57 insertions(+), 55 deletions(-) create mode 100644 doc/source/alternatives.rst diff --git a/doc/source/alternatives.rst b/doc/source/alternatives.rst new file mode 100644 index 00000000..9fe9cad4 --- /dev/null +++ b/doc/source/alternatives.rst @@ -0,0 +1,56 @@ +Comparisons To Alternatives +--------------------------- + +The following table summarises feature comparison between different existing +open source time series database. More details are written below, if needed. + +.. include:: comparison-table.rst + +Gnocchi vs Prometheus +~~~~~~~~~~~~~~~~~~~~~ +`Prometheus `_ is a full-featured solution that +includes everything from polling the metrics to storing and archiving them. It +offers advanced features such as alerting. + +In comparison, Gnocchi does not offer polling as it prefers to leverage +existing solutions (e.g. `collectd `_). However, it +provides high-availability and horizontal scalablity as well as multi-tenancy. + + +Gnocchi vs InfluxDB +~~~~~~~~~~~~~~~~~~~ + +`InfluxDB `_ is a time series database storing metrics +into local files. It offers a variety of input protocol support and created its +own query language, InfluxQL, inspired from SQL. The HTTP API it offers is just +a way to pass InfluxQL over the wire. Horizontal scalability is only provided +in the commercial version. The data model is based on time series with labels +associated to it. + +In comparison, Gnocchi offers scalability and multi-tenancy. Its data model +differs as it does not provide labels, but |resources| to attach to |metrics|. + +Gnocchi vs OpenTSDB +~~~~~~~~~~~~~~~~~~~ + +`OpenTSDB `_ is a distributed time series database that +uses `Hadoop `_ and `HBase +`_ to store its data. That makes it easy to scale +horizontally. However, its querying feature are rather simple. + +In comparison, Gnocchi offers a proper query language with more features. The +usage of Hadoop might be a show-stopper for many as it's quite heavy to deploy +and operate. + +Gnocchi vs Graphite +~~~~~~~~~~~~~~~~~~~ + +`Graphite `_ is essentially a data +metric storage composed of flat files (Whisper), and focuses on rendering those +time series. Each time series stored is composed of points that are stored +regularly and are related to the current date and time. + +In comparison, Gnocchi offers much more scalability, a better file format and +no relativity to the current time and date. + +.. include:: include/term-substitution.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index d28cd99e..fc44e6a1 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -34,61 +34,6 @@ You can join Gnocchi's community via the following channels: - Bug tracker: https://github.com/gnocchixyz/gnocchi/issues - IRC: #gnocchi on `Freenode `_ -Comparisons To Alternatives ---------------------------- - -The following table summarises feature comparison between different existing -open source time series database. More details are written below, if needed. - -.. include:: comparison-table.rst - -Gnocchi vs Prometheus -~~~~~~~~~~~~~~~~~~~~~ -`Prometheus `_ is a full-featured solution that -includes everything from polling the metrics to storing and archiving them. It -offers advanced features such as alerting. - -In comparison, Gnocchi does not offer polling as it prefers to leverage -existing solutions (e.g. `collectd `_). However, it -provides high-availability and horizontal scalablity as well as multi-tenancy. - - -Gnocchi vs InfluxDB -~~~~~~~~~~~~~~~~~~~ - -`InfluxDB `_ is a time series database storing metrics -into local files. It offers a variety of input protocol support and created its -own query language, InfluxQL, inspired from SQL. The HTTP API it offers is just -a way to pass InfluxQL over the wire. Horizontal scalability is only provided -in the commercial version. The data model is based on time series with labels -associated to it. - -In comparison, Gnocchi offers scalability and multi-tenancy. Its data model -differs as it does not provide labels, but |resources| to attach to |metrics|. - -Gnocchi vs OpenTSDB -~~~~~~~~~~~~~~~~~~~ - -`OpenTSDB `_ is a distributed time series database that -uses `Hadoop `_ and `HBase -`_ to store its data. That makes it easy to scale -horizontally. However, its querying feature are rather simple. - -In comparison, Gnocchi offers a proper query language with more features. The -usage of Hadoop might be a show-stopper for many as it's quite heavy to deploy -and operate. - -Gnocchi vs Graphite -~~~~~~~~~~~~~~~~~~~ - -`Graphite `_ is essentially a data -metric storage composed of flat files (Whisper), and focuses on rendering those -time series. Each time series stored is composed of points that are stored -regularly and are related to the current date and time. - -In comparison, Gnocchi offers much more scalability, a better file format and -no relativity to the current time and date. - Documentation ------------- @@ -105,6 +50,7 @@ Documentation prometheus nagios collectd + alternatives glossary releasenotes/index.rst contributing -- GitLab From e0f7c7535e5705b352a74a0875afe80917796413 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 24 Nov 2017 18:39:09 +0100 Subject: [PATCH 1094/1483] doc: fix heading level for titles in intro There are at level 1 where they should be level 2 --- doc/source/intro.rst | 80 ++++++++++++++++++++++++++------------------ 1 file changed, 48 insertions(+), 32 deletions(-) diff --git a/doc/source/intro.rst b/doc/source/intro.rst index 2314a784..61997840 100644 --- a/doc/source/intro.rst +++ b/doc/source/intro.rst @@ -1,15 +1,44 @@ Getting started --------------- -Gnocchi uses three different back-ends for storing data: one for storing new -incoming |measures| (the *incoming* driver), one for storing the |time series| -|aggregates| (the *storage* driver) and one for indexing the data (the *index* -driver). By default, the *incoming* driver is configured to use the same value -as the *storage* driver. + +Architecture overview +~~~~~~~~~~~~~~~~~~~~~ + +Gnocchi consists of several services: a HTTP REST API (see :doc:`rest`), an +optional statsd-compatible daemon (see :doc:`statsd`), and an asynchronous +processing daemon (named `gnocchi-metricd`). Data is received via the HTTP REST +API or statsd daemon. `gnocchi-metricd` performs operations (statistics +computing, |metric| cleanup, etc...) on the received data in the background. + +.. image:: _static/architecture.svg + :align: center + :width: 95% + :alt: Gnocchi architecture + +.. image source: https://docs.google.com/drawings/d/1aHV86TPNFt7FlCLEjsTvV9FWoFYxXCaQOzfg7NdXVwM/edit?usp=sharing + +All those services are stateless and therefore horizontally scalable. Contrary +to many time series databases, there is no limit on the number of +`gnocchi-metricd` daemons or `gnocchi-api` endpoints that you can run. If your +load starts to increase, you just need to spawn more daemons to handle the flow +of new requests. The same applies if you want to handle high-availability +scenarios: just start more Gnocchi daemons on independent servers. + +As you can see on the architecture diagram above, there are three external +components that Gnocchi needs to work correctly: + +- An incoming measure storage +- An aggregated metric storage +- An index + +Those three parts are provided by drivers. Gnocchi is entirely pluggable and +offer different options for those services. Incoming and storage drivers -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +++++++++++++++++++++++++++++ -Gnocchi can leverage different storage systems, such as: +Gnocchi can leverage different storage systems for its incoming |measures| and +aggregated |metrics|, such as: * File (default) * `Ceph`_ (preferred) @@ -24,13 +53,16 @@ Gnocchi processes. Ultimately, the S3, Ceph, and Swift drivers are more scalable storage options. Ceph also offers better consistency, and hence is the recommended driver. +A typical recommendation for medium to large scale deployment is to use +`Redis`_ as an incoming measure storage and `Ceph`_ as an aggregate storage. + .. _`OpenStack Swift`: http://docs.openstack.org/developer/swift/ .. _`Ceph`: https://ceph.com .. _`Amazon S3`: https://aws.amazon.com/s3/ .. _`Redis`: https://redis.io Indexer driver -~~~~~~~~~~~~~~ +++++++++++++++ You also need a database to index the resources and metrics that Gnocchi will handle. The supported drivers are: @@ -46,32 +78,9 @@ relationships of |resources|.. .. _PostgreSQL: http://postgresql.org .. _MySQL: http://mysql.org -Architecture overview ---------------------- - -Gnocchi consists of several services: a HTTP REST API (see :doc:`rest`), an -optional statsd-compatible daemon (see :doc:`statsd`), and an asynchronous -processing daemon (named `gnocchi-metricd`). Data is received via the HTTP REST -API or statsd daemon. `gnocchi-metricd` performs operations (statistics -computing, |metric| cleanup, etc...) on the received data in the background. - -.. image:: _static/architecture.svg - :align: center - :width: 95% - :alt: Gnocchi architecture - -.. image source: https://docs.google.com/drawings/d/1aHV86TPNFt7FlCLEjsTvV9FWoFYxXCaQOzfg7NdXVwM/edit?usp=sharing - -All those services are stateless and therefore horizontally scalable. Contrary -to many time series databases, there is no limit on the number of -`gnocchi-metricd` daemons or `gnocchi-api` endpoints that you can run. If your -load starts to increase, you just need to spawn more daemons to handle the flow -of new requests. The same applies if you want to handle high-availability -scenarios: just start more Gnocchi daemons on independent servers. - Understanding aggregation -------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~ The way data points are aggregated is configurable on a per-metric basis, using an archive policy. @@ -82,4 +91,11 @@ maximum, average, Nth percentile, standard deviation, etc. Those aggregations are computed over a period of time (called granularity) and are kept for a defined timespan. + +Gnocchi uses three different back-ends for storing data: one for storing new +incoming |measures| (the *incoming* driver), one for storing the |time series| +|aggregates| (the *storage* driver) and one for indexing the data (the *index* +driver). By default, the *incoming* driver is configured to use the same value +as the *storage* driver. + .. include:: include/term-substitution.rst -- GitLab From dc6287f829cd2f80207912c22a716f0b609a0834 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 21 Nov 2017 15:19:36 +0100 Subject: [PATCH 1095/1483] indexer: replace kwargs filtering by regular attribute filter This leverages the attribute filter used in resource filtering for metrics too, allowing more generic and finer grained filtering. --- gnocchi/indexer/__init__.py | 3 ++- gnocchi/indexer/sqlalchemy.py | 21 ++++++++++---------- gnocchi/rest/api.py | 37 +++++++++++++++++++++++------------ gnocchi/tests/test_indexer.py | 14 +++++++------ 4 files changed, 45 insertions(+), 30 deletions(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 9163bf13..6b014d12 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -376,7 +376,8 @@ class IndexerDriver(object): @staticmethod def list_metrics(names=None, ids=None, details=False, status='active', - limit=None, marker=None, sorts=None, **kwargs): + limit=None, marker=None, sorts=None, + attribute_filter=None): raise exceptions.NotImplementedError @staticmethod diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 1b97ff81..5f1172d7 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -688,7 +688,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): @retry_on_deadlock def list_metrics(self, names=None, ids=None, details=False, status='active', limit=None, marker=None, sorts=None, - creator=None, **kwargs): + attribute_filter=None): sorts = sorts or [] if ids is not None and not ids: return [] @@ -701,17 +701,18 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): q = q.filter(Metric.name.in_(names)) if ids is not None: q = q.filter(Metric.id.in_(ids)) - if creator is not None: - if creator[0] == ":": - q = q.filter(Metric.creator.like("%%%s" % creator)) - elif creator[-1] == ":": - q = q.filter(Metric.creator.like("%s%%" % creator)) - else: - q = q.filter(Metric.creator == creator) - for attr in kwargs: - q = q.filter(getattr(Metric, attr) == kwargs[attr]) if details: q = q.options(sqlalchemy.orm.joinedload('resource')) + if attribute_filter: + engine = session.connection() + # We don't catch the indexer.QueryAttributeError error here + # since we expect any user input on this function. If the + # caller screws it, it's its problem: no need to convert the + # exception to another type. + f = QueryTransformer.build_filter( + engine.dialect.name, + Metric, attribute_filter) + q = q.filter(f) sort_keys, sort_dirs = self._build_sort_keys(sorts, ['id']) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 8a69c1a0..5adc1439 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -535,7 +535,7 @@ class MetricsController(rest.RestController): # NOTE(sileht): Don't get detail for measure details = len(remainder) == 0 metrics = pecan.request.indexer.list_metrics( - id=metric_id, details=details) + attribute_filter={"=": {"id": metric_id}}, details=details) if not metrics: abort(404, six.text_type(indexer.NoSuchMetric(id))) return MetricController(metrics[0]), remainder @@ -667,13 +667,17 @@ class MetricsController(rest.RestController): pagination_opts = get_pagination_options(kwargs, METRIC_DEFAULT_PAGINATION) - attr_filter = {} + attr_filters = [] if provided_creator is not None: - attr_filter['creator'] = provided_creator - attr_filter.update(pagination_opts) - attr_filter.update(kwargs) + attr_filters.append({"=": {"creator": provided_creator}}) + + for k, v in six.iteritems(kwargs): + attr_filters.append({"=": {k: v}}) + try: - metrics = pecan.request.indexer.list_metrics(**attr_filter) + metrics = pecan.request.indexer.list_metrics( + attribute_filter={"and": attr_filters}, + **pagination_opts) if metrics and len(metrics) >= pagination_opts['limit']: set_resp_link_hdr(str(metrics[-1].id), kwargs, pagination_opts) return metrics @@ -709,9 +713,12 @@ class NamedMetricController(rest.RestController): # NOTE(sileht): We want detail only when we GET /metric/ # and not for /metric//measures details = pecan.request.method == 'GET' and len(remainder) == 0 - m = pecan.request.indexer.list_metrics(details=details, - name=name, - resource_id=self.resource_id) + m = pecan.request.indexer.list_metrics( + details=details, + attribute_filter={"and": [ + {"=": {"name": name}}, + {"=": {"resource_id": self.resource_id}}, + ]}) if m: return MetricController(m[0]), remainder @@ -755,7 +762,8 @@ class NamedMetricController(rest.RestController): if not resource: abort(404, six.text_type(indexer.NoSuchResource(self.resource_id))) enforce("get resource", resource) - return pecan.request.indexer.list_metrics(resource_id=self.resource_id) + return pecan.request.indexer.list_metrics( + attribute_filter={"=": {"resource_id": self.resource_id}}) class ResourceHistoryController(rest.RestController): @@ -1519,7 +1527,9 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): resource_id)] names = body[(original_resource_id, resource_id)].keys() metrics = pecan.request.indexer.list_metrics( - names=names, resource_id=resource_id) + names=names, + attribute_filter={"=": {"resource_id": resource_id}}, + ) known_names = [m.name for m in metrics] if strtobool("create_metrics", create_metrics): @@ -1558,8 +1568,9 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): known_metrics.extend( pecan.request.indexer.list_metrics( names=already_exists_names, - resource_id=resource_id) - ) + attribute_filter={"=": + {"resource_id": resource_id}}, + )) elif len(names) != len(metrics): unknown_metrics.extend( diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 545b6507..6f661d09 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -179,7 +179,7 @@ class TestIndexerDriver(tests_base.TestCase): self.assertIsNone(m.name) self.assertIsNone(m.unit) self.assertIsNone(m.resource_id) - m2 = self.index.list_metrics(id=r1) + m2 = self.index.list_metrics(attribute_filter={"=": {"id": r1}}) self.assertEqual([m], m2) def test_create_named_metric_duplicate(self): @@ -195,7 +195,7 @@ class TestIndexerDriver(tests_base.TestCase): self.assertEqual(m.creator, creator) self.assertEqual(name, m.name) self.assertEqual(r1, m.resource_id) - m2 = self.index.list_metrics(id=m1) + m2 = self.index.list_metrics(attribute_filter={"=": {"id": m1}}) self.assertEqual([m], m2) self.assertRaises(indexer.NamedMetricAlreadyExists, @@ -322,7 +322,8 @@ class TestIndexerDriver(tests_base.TestCase): 'generic', r1, creator, metrics={"foobar": {"archive_policy_name": "low"}}) self.assertEqual(1, len(rc.metrics)) - m = self.index.list_metrics(id=rc.metrics[0].id) + m = self.index.list_metrics( + attribute_filter={"=": {"id": rc.metrics[0].id}}) self.assertEqual(m[0], rc.metrics[0]) def test_delete_resource(self): @@ -1091,7 +1092,7 @@ class TestIndexerDriver(tests_base.TestCase): creator = str(uuid.uuid4()) self.index.create_metric(e1, creator, archive_policy_name="low") - metric = self.index.list_metrics(id=e1) + metric = self.index.list_metrics(attribute_filter={"=": {"id": e1}}) self.assertEqual(1, len(metric)) metric = metric[0] self.assertEqual(e1, metric.id) @@ -1106,7 +1107,7 @@ class TestIndexerDriver(tests_base.TestCase): creator, archive_policy_name="low") - metric = self.index.list_metrics(id=e1) + metric = self.index.list_metrics(attribute_filter={"=": {"id": e1}}) self.assertEqual(1, len(metric)) metric = metric[0] self.assertEqual(e1, metric.id) @@ -1117,7 +1118,8 @@ class TestIndexerDriver(tests_base.TestCase): def test_get_metric_with_bad_uuid(self): e1 = uuid.uuid4() - self.assertEqual([], self.index.list_metrics(id=e1)) + self.assertEqual([], self.index.list_metrics( + attribute_filter={"=": {"id": e1}})) def test_get_metric_empty_list_uuids(self): self.assertEqual([], self.index.list_metrics(ids=[])) -- GitLab From 59f20e09df77cd336efe976ceeb2ed6121b99220 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 21 Nov 2017 15:48:56 +0100 Subject: [PATCH 1096/1483] indexer: replace names filter in list_metric by attribute_filter --- gnocchi/indexer/__init__.py | 2 +- gnocchi/indexer/sqlalchemy.py | 6 +----- gnocchi/rest/api.py | 17 +++++++++-------- 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 6b014d12..5cb11d1e 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -375,7 +375,7 @@ class IndexerDriver(object): raise exceptions.NotImplementedError @staticmethod - def list_metrics(names=None, ids=None, details=False, status='active', + def list_metrics(ids=None, details=False, status='active', limit=None, marker=None, sorts=None, attribute_filter=None): raise exceptions.NotImplementedError diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 5f1172d7..147a1e21 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -686,19 +686,15 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): return m @retry_on_deadlock - def list_metrics(self, names=None, ids=None, details=False, + def list_metrics(self, ids=None, details=False, status='active', limit=None, marker=None, sorts=None, attribute_filter=None): sorts = sorts or [] if ids is not None and not ids: return [] - if names is not None and not names: - return [] with self.facade.independent_reader() as session: q = session.query(Metric).filter( Metric.status == status) - if names is not None: - q = q.filter(Metric.name.in_(names)) if ids is not None: q = q.filter(Metric.id.in_(ids)) if details: diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 5adc1439..2677c67a 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1525,11 +1525,12 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): for original_resource_id, resource_id in body: body_by_rid[resource_id] = body[(original_resource_id, resource_id)] - names = body[(original_resource_id, resource_id)].keys() + names = list(body[(original_resource_id, resource_id)].keys()) metrics = pecan.request.indexer.list_metrics( - names=names, - attribute_filter={"=": {"resource_id": resource_id}}, - ) + attribute_filter={"and": [ + {"=": {"resource_id": resource_id}}, + {"in": {"name": names}}, + ]}) known_names = [m.name for m in metrics] if strtobool("create_metrics", create_metrics): @@ -1567,10 +1568,10 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): known_names.extend(already_exists_names) known_metrics.extend( pecan.request.indexer.list_metrics( - names=already_exists_names, - attribute_filter={"=": - {"resource_id": resource_id}}, - )) + attribute_filter={"and": [ + {"=": {"resource_id": resource_id}}, + {"in": {"name": already_exists_names}}, + ]})) elif len(names) != len(metrics): unknown_metrics.extend( -- GitLab From c145a5ba182b1c8bea57f35b57d28938f2e1c6e4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 21 Nov 2017 16:03:35 +0100 Subject: [PATCH 1097/1483] indexer: replace ids argument with attribute filter in list_metrics() --- gnocchi/indexer/__init__.py | 2 +- gnocchi/indexer/sqlalchemy.py | 11 ++++------- gnocchi/rest/aggregates/api.py | 3 ++- gnocchi/rest/api.py | 8 +++++--- gnocchi/storage/__init__.py | 3 ++- gnocchi/tests/functional/fixtures.py | 3 ++- gnocchi/tests/test_indexer.py | 18 +++++++++++------- gnocchi/tests/test_storage.py | 6 +++--- 8 files changed, 30 insertions(+), 24 deletions(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 5cb11d1e..0018e98f 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -375,7 +375,7 @@ class IndexerDriver(object): raise exceptions.NotImplementedError @staticmethod - def list_metrics(ids=None, details=False, status='active', + def list_metrics(details=False, status='active', limit=None, marker=None, sorts=None, attribute_filter=None): raise exceptions.NotImplementedError diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 147a1e21..32654654 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -686,17 +686,13 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): return m @retry_on_deadlock - def list_metrics(self, ids=None, details=False, - status='active', limit=None, marker=None, sorts=None, + def list_metrics(self, details=False, status='active', + limit=None, marker=None, sorts=None, attribute_filter=None): sorts = sorts or [] - if ids is not None and not ids: - return [] with self.facade.independent_reader() as session: q = session.query(Metric).filter( Metric.status == status) - if ids is not None: - q = q.filter(Metric.id.in_(ids)) if details: q = q.options(sqlalchemy.orm.joinedload('resource')) if attribute_filter: @@ -713,7 +709,8 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): sort_keys, sort_dirs = self._build_sort_keys(sorts, ['id']) if marker: - metric_marker = self.list_metrics(ids=[marker]) + metric_marker = self.list_metrics( + attribute_filter={"in": {"id": [marker]}}) if metric_marker: metric_marker = metric_marker[0] else: diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index abbf18ee..4e88566a 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -252,7 +252,8 @@ class AggregatesController(rest.RestController): "reason": six.text_type(e), "detail": references}) - metrics = pecan.request.indexer.list_metrics(ids=metric_ids) + metrics = pecan.request.indexer.list_metrics( + attribute_filter={"in": {"id": metric_ids}}) missing_metric_ids = (set(metric_ids) - set(six.text_type(m.id) for m in metrics)) if missing_metric_ids: diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 2677c67a..e3c877ea 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1468,7 +1468,7 @@ class SearchMetricController(rest.RestController): granularity = [utils.to_timespan(g) for g in arg_to_list(granularity or [])] metrics = pecan.request.indexer.list_metrics( - ids=arg_to_list(metric_id)) + attribute_filter={"in": {"id": arg_to_list(metric_id)}}) for metric in metrics: enforce("search metric", metric) @@ -1613,7 +1613,8 @@ class MetricsMeasuresBatchController(rest.RestController): @pecan.expose() def post(self): body = deserialize_and_validate(self.MeasuresBatchSchema) - metrics = pecan.request.indexer.list_metrics(ids=body.keys()) + metrics = pecan.request.indexer.list_metrics( + attribute_filter={"in": {"id": list(body.keys())}}) if len(metrics) != len(body): missing_metrics = sorted(set(body) - set(m.id for m in metrics)) @@ -1855,7 +1856,8 @@ class AggregationController(rest.RestController): metric_ids = [six.text_type(m) for m in metric_ids] # Check RBAC policy - metrics = pecan.request.indexer.list_metrics(ids=metric_ids) + metrics = pecan.request.indexer.list_metrics( + attribute_filter={"in": {"id": metric_ids}}) missing_metric_ids = (set(metric_ids) - set(six.text_type(m.id) for m in metrics)) if missing_metric_ids: diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 90b784ae..67f68a20 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -458,7 +458,8 @@ class StorageDriver(object): """ # process only active metrics. deleted metrics with unprocessed # measures will be skipped until cleaned by janitor. - metrics = indexer.list_metrics(ids=metrics_to_process) + metrics = indexer.list_metrics( + attribute_filter={"in": {"id": metrics_to_process}}) for metric in metrics: # NOTE(gordc): must lock at sack level try: diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 6bb1ed9b..97acbcb9 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -198,7 +198,8 @@ class MetricdThread(threading.Thread): def run(self): while self.flag: metrics = utils.list_all_incoming_metrics(self.incoming) - metrics = self.index.list_metrics(ids=metrics) + metrics = self.index.list_metrics( + attribute_filter={"in": {"id": metrics}}) for metric in metrics: self.storage.refresh_metric(self.index, self.incoming, diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 6f661d09..a514d0aa 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -348,7 +348,8 @@ class TestIndexerDriver(tests_base.TestCase): self.assertRaises(indexer.NoSuchResource, self.index.delete_resource, r1) - metrics = self.index.list_metrics(ids=[e1, e2]) + metrics = self.index.list_metrics( + attribute_filter={"in": {"id": [e1, e2]}}) self.assertEqual([], metrics) def test_delete_resource_non_existent(self): @@ -1064,8 +1065,9 @@ class TestIndexerDriver(tests_base.TestCase): g2 = self.index.create_resource('generic', r2, creator, user, project, metrics=metrics) - metrics = self.index.list_metrics(ids=[g1['metrics'][0]['id'], - g2['metrics'][0]['id']]) + metrics = self.index.list_metrics( + attribute_filter={"in": {"id": [g1['metrics'][0]['id'], + g2['metrics'][0]['id']]}}) self.assertEqual(2, len(metrics)) for m in metrics: self.assertEqual('active', m['status']) @@ -1080,9 +1082,10 @@ class TestIndexerDriver(tests_base.TestCase): attribute_filter={"=": {"user_id": user}}) self.assertEqual(0, len(resources)) - metrics = self.index.list_metrics(ids=[g1['metrics'][0]['id'], - g2['metrics'][0]['id']], - status='delete') + metrics = self.index.list_metrics( + attribute_filter={"in": {"id": [g1['metrics'][0]['id'], + g2['metrics'][0]['id']]}}, + status='delete') self.assertEqual(2, len(metrics)) for m in metrics: self.assertEqual('delete', m['status']) @@ -1122,7 +1125,8 @@ class TestIndexerDriver(tests_base.TestCase): attribute_filter={"=": {"id": e1}})) def test_get_metric_empty_list_uuids(self): - self.assertEqual([], self.index.list_metrics(ids=[])) + self.assertEqual([], self.index.list_metrics( + attribute_filter={"in": {"id": []}})) def test_list_metrics(self): e1 = uuid.uuid4() diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 837e66b6..41d55254 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -884,7 +884,7 @@ class TestStorageDriver(tests_base.TestCase): ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)]) self.index.create_archive_policy(ap) m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()), name) - m = self.index.list_metrics(ids=[m.id])[0] + m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0] self.incoming.add_measures(m, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 0), 1), incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 1), @@ -899,7 +899,7 @@ class TestStorageDriver(tests_base.TestCase): # expand to more points self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)]) - m = self.index.list_metrics(ids=[m.id])[0] + m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0] self.incoming.add_measures(m, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 15), 1), ]) @@ -913,7 +913,7 @@ class TestStorageDriver(tests_base.TestCase): # shrink timespan self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)]) - m = self.index.list_metrics(ids=[m.id])[0] + m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0] self.assertEqual([ (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 15), numpy.timedelta64(5, 's'), 1), -- GitLab From 9beb7cf75969a5c496c7fafd7f4ee03ce3c05af7 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 28 Nov 2017 14:58:22 +0000 Subject: [PATCH 1098/1483] py3: pass a bytes type object to tooz coordinator Ensure that a bytes type id is passed into tooz otherwise metricd will fail to join the coordination group due to an error concatenating the GROUP_ID with a str in tooz when used with the memcached driver. --- gnocchi/cli/metricd.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index c59c7764..b3e2b5de 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -132,7 +132,7 @@ class MetricReporting(MetricProcessBase): class MetricProcessor(MetricProcessBase): name = "processing" - GROUP_ID = "gnocchi-processing" + GROUP_ID = b"gnocchi-processing" def __init__(self, worker_id, conf): super(MetricProcessor, self).__init__( @@ -162,7 +162,8 @@ class MetricProcessor(MetricProcessBase): try: self.partitioner = self.coord.join_partitioned_group( self.GROUP_ID, partitions=200) - LOG.info('Joined coordination group: %s', self.GROUP_ID) + LOG.info('Joined coordination group: %s', + self.GROUP_ID.decode()) except tooz.NotImplemented: LOG.warning('Coordinator does not support partitioning. Worker ' 'will battle against other workers for jobs.') -- GitLab From 70c4246aa96be45ee9118411c106c932b8468124 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 28 Nov 2017 14:58:22 +0000 Subject: [PATCH 1099/1483] py3: pass a bytes type object to tooz coordinator Ensure that a bytes type id is passed into tooz otherwise metricd will fail to join the coordination group due to an error concatenating the GROUP_ID with a str in tooz when used with the memcached driver. (cherry picked from commit 9beb7cf75969a5c496c7fafd7f4ee03ce3c05af7) --- gnocchi/cli.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index a79a74f5..7b14607f 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -181,7 +181,7 @@ class MetricReporting(MetricProcessBase): class MetricProcessor(MetricProcessBase): name = "processing" - GROUP_ID = "gnocchi-processing" + GROUP_ID = b"gnocchi-processing" def __init__(self, worker_id, conf): super(MetricProcessor, self).__init__( @@ -207,7 +207,7 @@ class MetricProcessor(MetricProcessBase): try: self.partitioner = self.coord.join_partitioned_group( self.GROUP_ID, partitions=200) - LOG.info('Joined coordination group: %s', self.GROUP_ID) + LOG.info('Joined coordination group: %s', self.GROUP_ID.decode()) @periodics.periodic(spacing=self.conf.metricd.worker_sync_rate, run_immediately=True) -- GitLab From cd19a5f3c750bb8c9471ef2f4450f09d839bbedc Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 29 Nov 2017 16:41:31 +0100 Subject: [PATCH 1100/1483] redis: add documentation for the url Closes #520 --- gnocchi/common/redis.py | 49 ++++++++++++++++++++++++++++++++++------ gnocchi/opts.py | 4 ++-- gnocchi/storage/redis.py | 8 ------- 3 files changed, 44 insertions(+), 17 deletions(-) diff --git a/gnocchi/common/redis.py b/gnocchi/common/redis.py index 114fc252..d7980405 100644 --- a/gnocchi/common/redis.py +++ b/gnocchi/common/redis.py @@ -16,6 +16,7 @@ from __future__ import absolute_import +from oslo_config import cfg from six.moves.urllib import parse try: @@ -43,13 +44,6 @@ CLIENT_ARGS = frozenset([ 'sentinel_fallback', ]) """ -Keys that we allow to proxy from the coordinator configuration into the -redis client (used to configure the redis client internals so that -it works as you expect/want it to). - -See: http://redis-py.readthedocs.org/en/latest/#redis.Redis - -See: https://github.com/andymccurdy/redis-py/blob/2.10.3/redis/client.py """ #: Client arguments that are expected/allowed to be lists. @@ -70,6 +64,47 @@ CLIENT_INT_ARGS = frozenset([ 'socket_timeout', ]) +OPTS = [ + cfg.StrOpt('redis_url', + default='redis://localhost:6379/', + help="""Redis URL + + For example:: + + redis://[:password]@localhost:6379?db=0 + + We proxy some options to the redis client (used to configure the redis client + internals so that it works as you expect/want it to): `%s` + + Further resources/links: + + - http://redis-py.readthedocs.org/en/latest/#redis.Redis + - https://github.com/andymccurdy/redis-py/blob/2.10.3/redis/client.py + + To use a `sentinel`_ the connection URI must point to the sentinel server. + At connection time the sentinel will be asked for the current IP and port + of the master and then connect there. The connection URI for sentinel + should be written as follows:: + + redis://:?sentinel= + + Additional sentinel hosts are listed with multiple ``sentinel_fallback`` + parameters as follows:: + + redis://:?sentinel=& + sentinel_fallback=:& + sentinel_fallback=:& + sentinel_fallback=: + + Further resources/links: + + - http://redis.io/ + - http://redis.io/topics/sentinel + - http://redis.io/topics/cluster-spec + +""" % "`, `".join(CLIENT_ARGS)), +] + def get_client(conf): if redis is None: diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 7bfba932..aaad32ff 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -20,11 +20,11 @@ import uuid from oslo_config import cfg import gnocchi.archive_policy +import gnocchi.common.redis import gnocchi.indexer import gnocchi.storage import gnocchi.storage.ceph import gnocchi.storage.file -import gnocchi.storage.redis import gnocchi.storage.s3 import gnocchi.storage.swift @@ -48,7 +48,7 @@ _STORAGE_OPTS = list(itertools.chain(gnocchi.storage.OPTS, gnocchi.storage.ceph.OPTS, gnocchi.storage.file.OPTS, gnocchi.storage.swift.OPTS, - gnocchi.storage.redis.OPTS, + gnocchi.common.redis.OPTS, gnocchi.storage.s3.OPTS)) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 43414ecf..eeeb4dd6 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -13,20 +13,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from oslo_config import cfg from gnocchi.common import redis from gnocchi import storage from gnocchi import utils -OPTS = [ - cfg.StrOpt('redis_url', - default='redis://localhost:6379/', - help='Redis URL'), -] - - class RedisStorage(storage.StorageDriver): WRITE_FULL = True -- GitLab From 697bc413fbbe0a9a5fc320374a625b0fb5b3406e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 28 Nov 2017 18:01:01 +0100 Subject: [PATCH 1101/1483] rest: load details on metric to apply ACL correctly The details on metrics need to be retrieved so the ACL can be correctly applied. Fixes #464 --- gnocchi/rest/api.py | 5 +---- gnocchi/tests/test_rest.py | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index e3c877ea..0955a708 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -710,11 +710,8 @@ class NamedMetricController(rest.RestController): @pecan.expose() def _lookup(self, name, *remainder): - # NOTE(sileht): We want detail only when we GET /metric/ - # and not for /metric//measures - details = pecan.request.method == 'GET' and len(remainder) == 0 m = pecan.request.indexer.list_metrics( - details=details, + details=True, attribute_filter={"and": [ {"=": {"name": name}}, {"=": {"resource_id": self.resource_id}}, diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 7931f234..fc75b102 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -435,6 +435,44 @@ class MetricTest(RestTest): self.app.get("/v1/metric/%s/measures" % metric['id'], status=403) + def test_get_measures_with_another_user_allowed(self): + rid = str(uuid.uuid4()) + self.app.post_json("/v1/resource/generic", + params={ + "id": rid, + "project_id": TestingApp.PROJECT_ID_2, + "metrics": { + "disk": {"archive_policy_name": "low"}, + } + }) + measures_url = "/v1/resource/generic/%s/metric/disk/measures" % rid + self.app.post_json(measures_url, + params=[{"timestamp": '2013-01-01 23:23:23', + "value": 1234.2}]) + with self.app.use_another_user(): + result = self.app.get(measures_url) + self.assertEqual( + [['2013-01-01T00:00:00+00:00', 86400.0, 1234.2], + ['2013-01-01T23:00:00+00:00', 3600.0, 1234.2], + ['2013-01-01T23:20:00+00:00', 300.0, 1234.2]], + result.json) + + def test_get_measures_with_another_user_disallowed(self): + rid = str(uuid.uuid4()) + self.app.post_json("/v1/resource/generic", + params={ + "id": rid, + "metrics": { + "disk": {"archive_policy_name": "low"}, + } + }) + measures_url = "/v1/resource/generic/%s/metric/disk/measures" % rid + self.app.post_json(measures_url, + params=[{"timestamp": '2013-01-01 23:23:23', + "value": 1234.2}]) + with self.app.use_another_user(): + self.app.get(measures_url, status=403) + @mock.patch.object(utils, 'utcnow') def test_get_measure_start_relative(self, utcnow): """Make sure the timestamps can be relative to now.""" -- GitLab From 4da657622fc28d92786dfca3e8bf01d025984308 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 27 Nov 2017 13:46:15 +0100 Subject: [PATCH 1102/1483] api: add a metric policy filter mechanism Use the auth_helper plugin mechanism to filter metric listing rather than an hardcoded rule. The Keystone rule is identical than before and filters based on created_by_project_id fake-attribute. The basic/remoteuser rule is changed to mimic the one for resources, i.e., no filtering is enforced. --- gnocchi/rest/api.py | 16 ++++----- gnocchi/rest/auth_helper.py | 34 +++++++++++++++++++ gnocchi/rest/policy.json | 3 +- .../tests/functional/gabbits/metric-list.yaml | 2 +- 4 files changed, 42 insertions(+), 13 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 0955a708..f460ae3d 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -28,7 +28,6 @@ import six from six.moves.urllib import parse as urllib_parse import tenacity import voluptuous -import webob.exc import werkzeug.http from gnocchi import archive_policy @@ -655,15 +654,6 @@ class MetricsController(rest.RestController): + ":" + (provided_project_id or "") ) - try: - enforce("list all metric", {}) - except webob.exc.HTTPForbidden: - enforce("list metric", {}) - creator = pecan.request.auth_helper.get_current_user( - pecan.request) - if provided_creator and creator != provided_creator: - abort(403, "Insufficient privileges to filter by user/project") - provided_creator = creator pagination_opts = get_pagination_options(kwargs, METRIC_DEFAULT_PAGINATION) @@ -674,6 +664,12 @@ class MetricsController(rest.RestController): for k, v in six.iteritems(kwargs): attr_filters.append({"=": {k: v}}) + policy_filter = pecan.request.auth_helper.get_metric_policy_filter( + pecan.request, "list metric") + + if policy_filter: + attr_filters.append(policy_filter) + try: metrics = pecan.request.indexer.list_metrics( attribute_filter={"and": attr_filters}, diff --git a/gnocchi/rest/auth_helper.py b/gnocchi/rest/auth_helper.py index f04c955e..1752222d 100644 --- a/gnocchi/rest/auth_helper.py +++ b/gnocchi/rest/auth_helper.py @@ -85,6 +85,32 @@ class KeystoneAuthHelper(object): return {"or": policy_filter} + @staticmethod + def get_metric_policy_filter(request, rule): + try: + # Check if the policy allows the user to list any metric + api.enforce(rule, {}) + except webob.exc.HTTPForbidden: + policy_filter = [] + project_id = request.headers.get("X-Project-Id") + try: + # Check if the policy allows the user to list metrics linked + # to their created_by_project + api.enforce(rule, { + "created_by_project_id": project_id, + }) + except webob.exc.HTTPForbidden: + pass + else: + policy_filter.append( + {"like": {"creator": "%:" + project_id}}) + + if not policy_filter: + # We need to have at least one policy filter in place + api.abort(403, "Insufficient privileges") + + return {"or": policy_filter} + class BasicAuthHelper(object): @staticmethod @@ -109,6 +135,10 @@ class BasicAuthHelper(object): def get_resource_policy_filter(request, rule, resource_type): return None + @staticmethod + def get_metric_policy_filter(request, rule): + return None + class RemoteUserAuthHelper(object): @staticmethod @@ -131,3 +161,7 @@ class RemoteUserAuthHelper(object): @staticmethod def get_resource_policy_filter(request, rule, resource_type): return None + + @staticmethod + def get_metric_policy_filter(request, rule): + return None diff --git a/gnocchi/rest/policy.json b/gnocchi/rest/policy.json index b8ee9a5d..b747354d 100644 --- a/gnocchi/rest/policy.json +++ b/gnocchi/rest/policy.json @@ -35,8 +35,7 @@ "delete metric": "rule:admin_or_creator", "get metric": "rule:admin_or_creator or rule:metric_owner", "search metric": "rule:admin_or_creator or rule:metric_owner", - "list metric": "", - "list all metric": "role:admin", + "list metric": "rule:admin_or_creator or rule:metric_owner", "get measures": "rule:admin_or_creator or rule:metric_owner", "post measures": "rule:admin_or_creator" diff --git a/gnocchi/tests/functional/gabbits/metric-list.yaml b/gnocchi/tests/functional/gabbits/metric-list.yaml index 347e3c87..f71b2d10 100644 --- a/gnocchi/tests/functional/gabbits/metric-list.yaml +++ b/gnocchi/tests/functional/gabbits/metric-list.yaml @@ -89,7 +89,7 @@ tests: - name: list metrics GET: /v1/metric response_json_paths: - $.`len`: 2 + $.`len`: 4 - name: list metrics by id GET: /v1/metric?id=$HISTORY['create metric 1'].$RESPONSE['id'] -- GitLab From 5da88c648e338d21b782a8a36a69e873da6c04ae Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 9 Nov 2017 16:43:39 +0100 Subject: [PATCH 1103/1483] cli: use --http-socket rather than --http for uwsgi MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using --http enables the HTTP router – which is not used here. That router prevents the chunked tranfer encoding to work. --- gnocchi/cli/api.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/gnocchi/cli/api.py b/gnocchi/cli/api.py index f501be88..f6bfb066 100644 --- a/gnocchi/cli/api.py +++ b/gnocchi/cli/api.py @@ -83,9 +83,8 @@ def api(): args = [ "--if-not-plugin", "python", "--plugin", "python", "--endif", - "--if-not-plugin", "http", "--plugin", "http", "--endif", - "--http", "%s:%d" % (conf.host or conf.api.host, - conf.port or conf.api.port), + "--http-socket", "%s:%d" % (conf.host or conf.api.host, + conf.port or conf.api.port), "--master", "--enable-threads", "--die-on-term", -- GitLab From f100dc29eb1b9875535a8e981bed1d175c464181 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 23 Nov 2017 09:45:25 +0100 Subject: [PATCH 1104/1483] rest: make Prometheus get_or_create_resource_and_metrics() static --- gnocchi/rest/api.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index f460ae3d..8f1ef968 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1924,29 +1924,29 @@ class PrometheusWriteController(rest.RestController): } # Retry with exponential backoff for up to 1 minute - @classmethod + @staticmethod @tenacity.retry( wait=tenacity.wait_exponential(multiplier=0.5, max=60), retry=tenacity.retry_if_exception_type( (indexer.NoSuchResource, indexer.ResourceAlreadyExists, indexer.ResourceTypeAlreadyExists, indexer.NamedMetricAlreadyExists))) - def get_or_create_resource_and_metrics(cls, creator, rid, - original_resource_id, - job, instance, metric_names): + def get_or_create_resource_and_metrics( + creator, rid, original_resource_id, job, instance, metric_names, + resource_type, resource_type_attributes): try: - r = pecan.request.indexer.get_resource('prometheus', rid, + r = pecan.request.indexer.get_resource(resource_type, rid, with_metrics=True) except indexer.NoSuchResourceType: enforce("create resource type", { - 'name': 'prometheus', + 'name': resource_type, 'state': 'creating', - 'attributes': cls.PROMETHEUS_RESOURCE_TYPE + 'attributes': resource_type_attributes, }) schema = pecan.request.indexer.get_resource_type_schema() rt = schema.resource_type_from_dict( - 'prometheus', cls.PROMETHEUS_RESOURCE_TYPE, 'creating') + resource_type, resource_type_attributes, 'creating') pecan.request.indexer.create_resource_type(rt) raise tenacity.TryAgain except indexer.UnexpectedResourceTypeState as e: @@ -1963,7 +1963,7 @@ class PrometheusWriteController(rest.RestController): )) if metrics: return pecan.request.indexer.update_resource( - 'prometheus', rid, + resource_type, rid, metrics=metrics, append_metrics=True, create_revision=False @@ -1974,7 +1974,7 @@ class PrometheusWriteController(rest.RestController): metrics = MetricsSchema(dict((m, {}) for m in metric_names)) target = { "id": rid, - "resource_type": "prometheus", + "resource_type": resource_type, "creator": creator, "original_resource_id": original_resource_id, "job": job, @@ -1985,7 +1985,7 @@ class PrometheusWriteController(rest.RestController): try: return pecan.request.indexer.create_resource( - 'prometheus', rid, creator, + resource_type, rid, creator, original_resource_id=original_resource_id, job=job, instance=instance, @@ -1995,7 +1995,7 @@ class PrometheusWriteController(rest.RestController): # NOTE(sileht): ensure the rid is not registered whitin another # resource type. r = pecan.request.indexer.get_resource('generic', rid) - if r.type != 'prometheus': + if r.type != resource_type: abort(409, six.text_type(e)) raise @@ -2023,7 +2023,8 @@ class PrometheusWriteController(rest.RestController): rid = ResourceUUID(original_rid, creator=creator) metric_names = list(measures.keys()) metrics = self.get_or_create_resource_and_metrics( - creator, rid, original_rid, job, instance, metric_names) + creator, rid, original_rid, job, instance, metric_names, + "prometheus", self.PROMETHEUS_RESOURCE_TYPE) for metric in metrics: enforce("post measures", metric) -- GitLab From ff7ce7e361e748851f0ac36c4d399d1fa6a8ccae Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 23 Nov 2017 09:49:57 +0100 Subject: [PATCH 1105/1483] rest: make get_or_create_resource_and_metrics() resource type creation optional --- gnocchi/rest/api.py | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 8f1ef968..655dc097 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1933,22 +1933,25 @@ class PrometheusWriteController(rest.RestController): indexer.NamedMetricAlreadyExists))) def get_or_create_resource_and_metrics( creator, rid, original_resource_id, job, instance, metric_names, - resource_type, resource_type_attributes): + resource_type, resource_type_attributes=None): try: r = pecan.request.indexer.get_resource(resource_type, rid, with_metrics=True) except indexer.NoSuchResourceType: - enforce("create resource type", { - 'name': resource_type, - 'state': 'creating', - 'attributes': resource_type_attributes, - }) - - schema = pecan.request.indexer.get_resource_type_schema() - rt = schema.resource_type_from_dict( - resource_type, resource_type_attributes, 'creating') - pecan.request.indexer.create_resource_type(rt) - raise tenacity.TryAgain + if resource_type_attributes: + enforce("create resource type", { + 'name': resource_type, + 'state': 'creating', + 'attributes': resource_type_attributes, + }) + + schema = pecan.request.indexer.get_resource_type_schema() + rt = schema.resource_type_from_dict( + resource_type, resource_type_attributes, 'creating') + pecan.request.indexer.create_resource_type(rt) + raise tenacity.TryAgain + else: + raise except indexer.UnexpectedResourceTypeState as e: # NOTE(sileht): Currently created by another thread if not e.state.endswith("_error"): -- GitLab From 5abb349da774f0543c64ff1dce282e39f9bdef84 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 23 Nov 2017 10:35:43 +0100 Subject: [PATCH 1106/1483] rest: move get_or_create_resource_and_metrics out of the Prometheus class This is going to be used by other code. --- gnocchi/rest/api.py | 160 ++++++++++++++++++++++---------------------- 1 file changed, 80 insertions(+), 80 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 655dc097..76a0ab80 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1910,6 +1910,85 @@ class BatchController(object): resources = ResourcesBatchController() +# Retry with exponential backoff for up to 1 minute +@tenacity.retry( + wait=tenacity.wait_exponential(multiplier=0.5, max=60), + retry=tenacity.retry_if_exception_type( + (indexer.NoSuchResource, indexer.ResourceAlreadyExists, + indexer.ResourceTypeAlreadyExists, + indexer.NamedMetricAlreadyExists))) +def get_or_create_resource_and_metrics( + creator, rid, original_resource_id, job, instance, metric_names, + resource_type, resource_type_attributes=None): + try: + r = pecan.request.indexer.get_resource(resource_type, rid, + with_metrics=True) + except indexer.NoSuchResourceType: + if resource_type_attributes: + enforce("create resource type", { + 'name': resource_type, + 'state': 'creating', + 'attributes': resource_type_attributes, + }) + + schema = pecan.request.indexer.get_resource_type_schema() + rt = schema.resource_type_from_dict( + resource_type, resource_type_attributes, 'creating') + pecan.request.indexer.create_resource_type(rt) + raise tenacity.TryAgain + else: + raise + except indexer.UnexpectedResourceTypeState as e: + # NOTE(sileht): Currently created by another thread + if not e.state.endswith("_error"): + raise tenacity.TryAgain + + if r: + enforce("update resource", r) + exists_metric_names = [m.name for m in r.metrics] + metrics = MetricsSchema(dict( + (m, {}) for m in metric_names + if m not in exists_metric_names + )) + if metrics: + return pecan.request.indexer.update_resource( + resource_type, rid, + metrics=metrics, + append_metrics=True, + create_revision=False + ).metrics + else: + return r.metrics + else: + metrics = MetricsSchema(dict((m, {}) for m in metric_names)) + target = { + "id": rid, + "resource_type": resource_type, + "creator": creator, + "original_resource_id": original_resource_id, + "job": job, + "instance": instance, + "metrics": metrics, + } + enforce("create resource", target) + + try: + return pecan.request.indexer.create_resource( + resource_type, rid, creator, + original_resource_id=original_resource_id, + job=job, + instance=instance, + metrics=metrics + ).metrics + except indexer.ResourceAlreadyExists: + # NOTE(sileht): ensure the rid is not registered whitin another + # resource type. + r = pecan.request.indexer.get_resource('generic', rid) + if r.type != resource_type: + abort(409, six.text_type(e)) + raise + + class PrometheusWriteController(rest.RestController): PROMETHEUS_RESOURCE_TYPE = { @@ -1923,85 +2002,6 @@ class PrometheusWriteController(rest.RestController): "required": True} } - # Retry with exponential backoff for up to 1 minute - @staticmethod - @tenacity.retry( - wait=tenacity.wait_exponential(multiplier=0.5, max=60), - retry=tenacity.retry_if_exception_type( - (indexer.NoSuchResource, indexer.ResourceAlreadyExists, - indexer.ResourceTypeAlreadyExists, - indexer.NamedMetricAlreadyExists))) - def get_or_create_resource_and_metrics( - creator, rid, original_resource_id, job, instance, metric_names, - resource_type, resource_type_attributes=None): - try: - r = pecan.request.indexer.get_resource(resource_type, rid, - with_metrics=True) - except indexer.NoSuchResourceType: - if resource_type_attributes: - enforce("create resource type", { - 'name': resource_type, - 'state': 'creating', - 'attributes': resource_type_attributes, - }) - - schema = pecan.request.indexer.get_resource_type_schema() - rt = schema.resource_type_from_dict( - resource_type, resource_type_attributes, 'creating') - pecan.request.indexer.create_resource_type(rt) - raise tenacity.TryAgain - else: - raise - except indexer.UnexpectedResourceTypeState as e: - # NOTE(sileht): Currently created by another thread - if not e.state.endswith("_error"): - raise tenacity.TryAgain - - if r: - enforce("update resource", r) - exists_metric_names = [m.name for m in r.metrics] - metrics = MetricsSchema(dict( - (m, {}) for m in metric_names - if m not in exists_metric_names - )) - if metrics: - return pecan.request.indexer.update_resource( - resource_type, rid, - metrics=metrics, - append_metrics=True, - create_revision=False - ).metrics - else: - return r.metrics - else: - metrics = MetricsSchema(dict((m, {}) for m in metric_names)) - target = { - "id": rid, - "resource_type": resource_type, - "creator": creator, - "original_resource_id": original_resource_id, - "job": job, - "instance": instance, - "metrics": metrics, - } - enforce("create resource", target) - - try: - return pecan.request.indexer.create_resource( - resource_type, rid, creator, - original_resource_id=original_resource_id, - job=job, - instance=instance, - metrics=metrics - ).metrics - except indexer.ResourceAlreadyExists: - # NOTE(sileht): ensure the rid is not registered whitin another - # resource type. - r = pecan.request.indexer.get_resource('generic', rid) - if r.type != resource_type: - abort(409, six.text_type(e)) - raise - @pecan.expose() def post(self): buf = snappy.uncompress(pecan.request.body) @@ -2025,7 +2025,7 @@ class PrometheusWriteController(rest.RestController): original_rid = '%s@%s' % (job, instance) rid = ResourceUUID(original_rid, creator=creator) metric_names = list(measures.keys()) - metrics = self.get_or_create_resource_and_metrics( + metrics = get_or_create_resource_and_metrics( creator, rid, original_rid, job, instance, metric_names, "prometheus", self.PROMETHEUS_RESOURCE_TYPE) -- GitLab From b76c8fd2537a6dad374bea1f5eca28e63602b608 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 23 Nov 2017 10:40:06 +0100 Subject: [PATCH 1107/1483] rest: make specific attributes generic in get_or_create_resource_and_metrics --- gnocchi/rest/api.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 76a0ab80..d9005522 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1918,7 +1918,8 @@ class BatchController(object): indexer.ResourceTypeAlreadyExists, indexer.NamedMetricAlreadyExists))) def get_or_create_resource_and_metrics( - creator, rid, original_resource_id, job, instance, metric_names, + creator, rid, original_resource_id, metric_names, + resource_attributes, resource_type, resource_type_attributes=None): try: r = pecan.request.indexer.get_resource(resource_type, rid, @@ -1966,19 +1967,17 @@ def get_or_create_resource_and_metrics( "resource_type": resource_type, "creator": creator, "original_resource_id": original_resource_id, - "job": job, - "instance": instance, "metrics": metrics, } + target.update(resource_attributes) enforce("create resource", target) try: return pecan.request.indexer.create_resource( resource_type, rid, creator, original_resource_id=original_resource_id, - job=job, - instance=instance, - metrics=metrics + metrics=metrics, + **resource_attributes, ).metrics except indexer.ResourceAlreadyExists: # NOTE(sileht): ensure the rid is not registered whitin another @@ -2026,7 +2025,8 @@ class PrometheusWriteController(rest.RestController): rid = ResourceUUID(original_rid, creator=creator) metric_names = list(measures.keys()) metrics = get_or_create_resource_and_metrics( - creator, rid, original_rid, job, instance, metric_names, + creator, rid, original_rid, metric_names, + dict(job=job, instance=instance), "prometheus", self.PROMETHEUS_RESOURCE_TYPE) for metric in metrics: -- GitLab From 136c05322f5ed11401f0cb06078acdbea7aa67e8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 9 Nov 2017 16:47:35 +0100 Subject: [PATCH 1108/1483] api: rename refresh_timeout option to operation_timeout This timeout is going to be used as a more general timeout for operations that the API needs to do and that block for a long time. --- gnocchi/opts.py | 5 +++-- gnocchi/rest/api.py | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index aaad32ff..5b59d511 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -173,10 +173,11 @@ def list_opts(): required=True, help=('The maximum number of items returned in a ' 'single response from a collection resource')), - cfg.IntOpt('refresh_timeout', + cfg.IntOpt('operation_timeout', + deprecated_name="refresh_timeout", default=10, min=0, help='Number of seconds before timeout when attempting ' - 'to force refresh of metric.'), + 'to do some operations.'), ) + API_OPTS, ), ("storage", _STORAGE_OPTS), diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index d9005522..94685f36 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -499,7 +499,7 @@ class MetricController(rest.RestController): try: pecan.request.storage.refresh_metric( pecan.request.indexer, pecan.request.incoming, self.metric, - pecan.request.conf.api.refresh_timeout) + pecan.request.conf.api.operation_timeout) except storage.SackLockTimeoutError as e: abort(503, six.text_type(e)) try: @@ -1809,7 +1809,7 @@ class AggregationController(rest.RestController): try: pecan.request.storage.refresh_metric( pecan.request.indexer, pecan.request.incoming, m, - pecan.request.conf.api.refresh_timeout) + pecan.request.conf.api.operation_timeout) except storage.SackLockTimeoutError as e: abort(503, six.text_type(e)) if number_of_metrics == 1: -- GitLab From 3a51b86e384ff4ec625314259abe3298c5c8cbf3 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 9 Nov 2017 16:49:15 +0100 Subject: [PATCH 1109/1483] api: add an InfluxDB API endpoint This provides a new InfluxDB-compatible endpoint to write data to Gnocchi at `/v1/influxdb'. --- doc/source/index.rst | 2 + doc/source/influxdb.rst | 43 +++ gnocchi/incoming/__init__.py | 5 +- gnocchi/indexer/__init__.py | 10 + gnocchi/rest/api.py | 11 +- gnocchi/rest/influxdb.py | 267 ++++++++++++++++++ .../tests/functional/gabbits/influxdb.yaml | 111 ++++++++ .../tests/functional/gabbits/resource.yaml | 4 +- gnocchi/tests/test_influxdb.py | 198 +++++++++++++ .../influxdb-endpoint-13cbd82cf287d91c.yaml | 6 + requirements.txt | 2 +- 11 files changed, 650 insertions(+), 9 deletions(-) create mode 100644 doc/source/influxdb.rst create mode 100644 gnocchi/rest/influxdb.py create mode 100644 gnocchi/tests/functional/gabbits/influxdb.yaml create mode 100644 gnocchi/tests/test_influxdb.py create mode 100644 releasenotes/notes/influxdb-endpoint-13cbd82cf287d91c.yaml diff --git a/doc/source/index.rst b/doc/source/index.rst index fc44e6a1..bd285924 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -25,6 +25,7 @@ Gnocchi's main features are: - Nagios/Icinga support - Statsd protocol support - Collectd plugin support +- InfluxDB line protocol ingestion support Community --------- @@ -48,6 +49,7 @@ Documentation statsd grafana prometheus + influxdb nagios collectd alternatives diff --git a/doc/source/influxdb.rst b/doc/source/influxdb.rst new file mode 100644 index 00000000..10cb13d8 --- /dev/null +++ b/doc/source/influxdb.rst @@ -0,0 +1,43 @@ +============================ + InfluxDB ingestion support +============================ + +Gnocchi implements some part of the InfluxDB REST API. That allows tool that +are used to write to InfluxDB to write directly to Gnocchi instead, such as +`Telegraf`_. + +The endpoint is available at `/v1/influxdb`. It supports: + +* `GET /v1/influxdb/ping` +* `POST /v1/influxdb/query` where the only query that is handled is `CREATE + DATABASE `. That will create a new resource type named after the database + handle. +* `POST /v1/influxdb/write?db=`. The `db` parameter should be an existing + resource type that does not require any attributes to be set. The body should + follow the `InfluxDB line protocol`_. + +In order to map InfluxDB data to Gnocchi data model, the following +transformation happen when writing metrics: + +* For each measure sent, one of the tag value is used as the original resource + id. By default the `host` tag is used. This can be overriden by passing the + `X-Gnocchi-InfluxDB-Tag-Resource-ID` HTTP header. + +* The metric names associated to the resource have the format: + `.[@=,…]`. The tag are sorted + by keys. + + +Telegraf configuration +====================== + +In order to use `Telegraf`_ with Gnocchi, you can use the following +configuration example:: + + [[outputs.influxdb]] + urls = ["http://admin:localhost:8041/v1/influxdb"] + http_headers = {"X-Gnocchi-InfluxDB-Tag-Resource-ID" = "host"} + + +.. _`Telegraf`: https://github.com/influxdata/telegraf +.. _`InfluxDB line protocol`: https://docs.influxdata.com/influxdb/v1.3/write_protocols/line_protocol_reference/ diff --git a/gnocchi/incoming/__init__.py b/gnocchi/incoming/__init__.py index 0e76a2b1..f3c66c58 100644 --- a/gnocchi/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -118,8 +118,9 @@ class IncomingDriver(object): def add_measures_batch(self, metrics_and_measures): """Add a batch of measures for some metrics. - :param metrics_and_measures: A dict where keys - are metrics and value are measure. + :param metrics_and_measures: A dict where keys are metric objects + and values are a list of + :py:class:`gnocchi.incoming.Measure`. """ utils.parallel_map( self._store_new_measures, diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 0018e98f..20a18940 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -115,6 +115,12 @@ class NoSuchResourceType(IndexerException): "Resource type %s does not exist" % type) self.type = type + def jsonify(self): + return { + "cause": "Resource type does not exist", + "detail": self.type, + } + class NoSuchMetric(IndexerException): """Error raised when a metric does not exist.""" @@ -224,6 +230,10 @@ class ResourceAlreadyExists(IndexerException): "Resource %s already exists" % resource) self.resource = resource + def jsonify(self): + return {"cause": "Resource already exists", + "detail": self.resource} + class ResourceTypeAlreadyExists(IndexerException): """Error raised when a resource type already exists.""" diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 94685f36..02052229 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1972,12 +1972,13 @@ def get_or_create_resource_and_metrics( target.update(resource_attributes) enforce("create resource", target) + kwargs = resource_attributes # no copy used since not used after + kwargs['metrics'] = metrics + kwargs['original_resource_id'] = original_resource_id + try: return pecan.request.indexer.create_resource( - resource_type, rid, creator, - original_resource_id=original_resource_id, - metrics=metrics, - **resource_attributes, + resource_type, rid, creator, **kwargs ).metrics except indexer.ResourceAlreadyExists: # NOTE(sileht): ensure the rid is not registered whitin another @@ -2049,6 +2050,7 @@ class V1Controller(object): def __init__(self): # FIXME(sileht): split controllers to avoid lazy loading from gnocchi.rest.aggregates import api as agg_api + from gnocchi.rest import influxdb self.sub_controllers = { "search": SearchController(), @@ -2062,6 +2064,7 @@ class V1Controller(object): "capabilities": CapabilityController(), "status": StatusController(), "aggregates": agg_api.AggregatesController(), + "influxdb": influxdb.InfluxDBController(), } for name, ctrl in self.sub_controllers.items(): setattr(self, name, ctrl) diff --git a/gnocchi/rest/influxdb.py b/gnocchi/rest/influxdb.py new file mode 100644 index 00000000..37d00c53 --- /dev/null +++ b/gnocchi/rest/influxdb.py @@ -0,0 +1,267 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import collections +import time + +from gnocchi import incoming +from gnocchi import indexer +from gnocchi.rest import api +from gnocchi import utils + +import daiquiri +import numpy +import pbr.version +import pecan +from pecan import rest +import pyparsing +import six +import tenacity +try: + import uwsgi +except ImportError: + uwsgi = None + + +LOG = daiquiri.getLogger(__name__) + + +boolean = "False|True|false|true|FALSE|TRUE|F|T|f|t" +boolean = pyparsing.Regex(boolean).setParseAction( + lambda t: t[0].lower()[0] == "t") + +quoted_string = pyparsing.QuotedString('"', escChar="\\") +unquoted_string = pyparsing.OneOrMore( + pyparsing.CharsNotIn(" ,=\\") + + pyparsing.Optional( + pyparsing.OneOrMore( + (pyparsing.Literal("\\ ") | + pyparsing.Literal("\\,") | + pyparsing.Literal("\\=") | + pyparsing.Literal("\\")).setParseAction( + lambda s, loc, tok: tok[0][-1])))).setParseAction( + lambda s, loc, tok: "".join(list(tok))) +measurement = tag_key = tag_value = field_key = quoted_string | unquoted_string +number = r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?" +number = pyparsing.Regex(number).setParseAction( + lambda s, loc, tok: float(tok[0])) +integer = ( + pyparsing.Word(pyparsing.nums).setParseAction( + lambda s, loc, tok: int(tok[0])) + + pyparsing.Suppress("i") + ) +field_value = integer | number | quoted_string +timestamp = pyparsing.Word(pyparsing.nums).setParseAction( + lambda s, loc, tok: numpy.datetime64(int(tok[0]), 'ns')) + +line_protocol = ( + measurement + + # Tags + pyparsing.Optional(pyparsing.Suppress(",") + + pyparsing.delimitedList( + pyparsing.OneOrMore( + pyparsing.Group( + tag_key + + pyparsing.Suppress("=") + + tag_value), ",")).setParseAction( + lambda s, loc, tok: dict(list(tok))), + default={}) + + pyparsing.Suppress(" ") + + # Fields + pyparsing.delimitedList( + pyparsing.OneOrMore( + pyparsing.Group(field_key + + pyparsing.Suppress("=") + + field_value), ",")).setParseAction( + lambda s, loc, tok: dict(list(tok))) + + # Timestamp + pyparsing.Optional(pyparsing.Suppress(" ") + timestamp, default=None) +).leaveWhitespace() + + +query_parser = ( + pyparsing.Suppress(pyparsing.CaselessLiteral("create")) + + pyparsing.Suppress(pyparsing.CaselessLiteral("database")) + + pyparsing.Suppress(pyparsing.White()) + + (pyparsing.QuotedString('"', escChar="\\") | + pyparsing.Word(pyparsing.alphas + "_", + pyparsing.alphanums + "_")) + + pyparsing.Suppress( + pyparsing.Optional(pyparsing.Optional(pyparsing.White()) + + pyparsing.Optional(pyparsing.Literal(";")))) +) + + +class InfluxDBController(rest.RestController): + _custom_actions = { + 'ping': ['HEAD', 'GET'], + 'query': ['POST'], + 'write': ['POST'], + } + + DEFAULT_TAG_RESOURCE_ID = "host" + + @pecan.expose() + def ping(self): + pecan.response.headers['X-Influxdb-Version'] = ( + "Gnocchi " + pbr.version.VersionInfo('gnocchi').version_string() + ) + + @pecan.expose('json') + def post_query(self, q=None): + if q is not None: + try: + query = query_parser.parseString(q) + except pyparsing.ParseException: + api.abort(501, {"cause": "Not implemented error", + "detail": "q", + "reason": "Query not implemented"}) + resource_type = query[0] + api.enforce("create resource type", {"name": resource_type}) + schema = pecan.request.indexer.get_resource_type_schema() + rt = schema.resource_type_from_dict(resource_type, {}, 'creating') + try: + pecan.request.indexer.create_resource_type(rt) + except indexer.ResourceTypeAlreadyExists: + pass + pecan.response.status = 204 + + @staticmethod + def _write_get_lines(): + encoding = pecan.request.headers.get('Transfer-Encoding', "").lower() + if encoding == "chunked": + if uwsgi is None: + api.abort( + 501, {"cause": "Not implemented error", + "reason": "This server is not running with uwsgi"}) + return encoding, uwsgi.chunked_read() + return None, pecan.request.body + + @pecan.expose('json') + def post_write(self, db="influxdb"): + + creator = pecan.request.auth_helper.get_current_user(pecan.request) + tag_to_rid = pecan.request.headers.get( + "X-Gnocchi-InfluxDB-Tag-Resource-ID", + self.DEFAULT_TAG_RESOURCE_ID) + + while True: + encoding, chunk = self._write_get_lines() + + # If chunk is empty then this is over. + if not chunk: + break + + # Compute now on a per-chunk basis + now = numpy.datetime64(int(time.time() * 10e8), 'ns') + + # resources = { resource_id: { + # metric_name: [ incoming.Measure(t, v), …], … + # }, … + # } + resources = collections.defaultdict( + lambda: collections.defaultdict(list)) + for line_number, line in enumerate(chunk.split(b"\n")): + # Ignore empty lines + if not line: + continue + + try: + measurement, tags, fields, timestamp = ( + line_protocol.parseString(line.decode()) + ) + except (UnicodeDecodeError, SyntaxError, + pyparsing.ParseException): + api.abort(400, { + "cause": "Value error", + "detail": "line", + "reason": "Unable to parse line %d" % ( + line_number + 1), + }) + + if timestamp is None: + timestamp = now + + try: + resource_id = tags.pop(tag_to_rid) + except KeyError: + api.abort(400, { + "cause": "Value error", + "detail": "key", + "reason": "Unable to find key `%s' in tags" % ( + tag_to_rid), + }) + + tags_str = (("@" if tags else "") + + ",".join(("%s=%s" % (k, tags[k])) + for k in sorted(tags))) + + for field_name, field_value in six.iteritems(fields): + if isinstance(field_value, str): + # We do not support field value that are not numerical + continue + + # Metric name is the: + # .@=,… + # with tag ordered + # Replace "/" with "_" because Gnocchi does not support / + # in metric names + metric_name = ( + measurement + "." + field_name + tags_str + ).replace("/", "_") + + resources[resource_id][metric_name].append( + incoming.Measure(timestamp, field_value)) + + measures_to_batch = {} + for resource_name, metrics_and_measures in six.iteritems( + resources): + resource_name = resource_name + resource_id = utils.ResourceUUID( + resource_name, creator=creator) + LOG.debug("Getting metrics from resource `%s'", resource_name) + timeout = pecan.request.conf.api.operation_timeout + try: + metrics = ( + api.get_or_create_resource_and_metrics.retry_with( + stop=tenacity.stop_after_delay(timeout))( + creator, resource_id, resource_name, + metrics_and_measures.keys(), + {}, db) + ) + except indexer.ResourceAlreadyExists as e: + # If this function raises ResourceAlreadyExists it means + # the resource might already exist as another type, we + # can't continue. + LOG.error("Unable to create resource `%s' for InfluxDB, " + "it might already exists as another " + "resource type than `%s'", resource_name, db) + api.abort(400, e) + + for metric in metrics: + api.enforce("post measures", metric) + + measures_to_batch.update( + dict((metric, metrics_and_measures[metric.name]) + for metric in metrics + if metric.name in metrics_and_measures)) + + LOG.debug("Add measures batch for %d metrics", + len(measures_to_batch)) + pecan.request.incoming.add_measures_batch(measures_to_batch) + pecan.response.status = 204 + + if encoding != "chunked": + return diff --git a/gnocchi/tests/functional/gabbits/influxdb.yaml b/gnocchi/tests/functional/gabbits/influxdb.yaml new file mode 100644 index 00000000..8c6b32ac --- /dev/null +++ b/gnocchi/tests/functional/gabbits/influxdb.yaml @@ -0,0 +1,111 @@ +# Tests for the InfluxDB compatibility layer + +fixtures: + - ConfigFixture + +defaults: + request_headers: + # User admin + authorization: "basic YWRtaW46" + content-type: application/json + +tests: + - name: ping influxdb status with head + desc: test HEAD on ping – xfails because Pecan does not honor HEAD correctly yet + xfail: true + HEAD: /v1/influxdb/ping + status: 204 + + - name: ping influxdb status with get + GET: /v1/influxdb/ping + status: 204 + + - name: create a database + POST: /v1/influxdb/query?q=create+database+influxdbtest + status: 204 + + - name: check the resource type now exists + GET: /v1/resource_type/influxdbtest + status: 200 + response_json_paths: + $: + name: influxdbtest + attributes: {} + state: active + + - name: do an unrecognized query + POST: /v1/influxdb/query?q=select+metrics+plz + request_headers: + # This is useful to get the error in JSON format + accept: application/json + status: 501 + response_json_paths: + $.description.cause: Not implemented error + $.description.detail: q + $.description.reason: Query not implemented + + - name: create archive policy + POST: /v1/archive_policy + data: + name: low + definition: + - granularity: 1 hour + status: 201 + + - name: create archive policy for influxdb + POST: /v1/archive_policy_rule + data: + name: influxdb + metric_pattern: "*" + archive_policy_name: low + status: 201 + + - name: write a line + POST: /v1/influxdb/write?db=influxdbtest + request_headers: + content-type: text/plain + data: + "mymetric,host=foobar,mytag=myvalue field=123 1510581804179554816" + status: 204 + + - name: check resource created + GET: /v1/resource/influxdbtest/foobar + status: 200 + response_json_paths: + $.original_resource_id: foobar + $.id: b4d568e4-7af1-5aec-ac3f-9c09fa3685a9 + $.type: influxdbtest + $.creator: admin + + - name: check metric created + GET: /v1/resource/influxdbtest/foobar/metric/mymetric.field@mytag=myvalue + + - name: check measures processed + GET: /v1/resource/influxdbtest/foobar/metric/mymetric.field@mytag=myvalue/measures?refresh=true + response_json_paths: + $: + - ["2017-11-13T14:00:00+00:00", 3600.0, 123.0] + + - name: write lines with different tag resource id + POST: /v1/influxdb/write?db=influxdbtest + request_headers: + content-type: text/plain + X-Gnocchi-InfluxDB-Tag-Resource-ID: mytag + data: + "mymetric,host=foobar,mytag=myvalue field=123 1510581804179554816\ncpu,path=/foobar,mytag=myvalue field=43i 1510581804179554816" + status: 204 + + - name: check resource created with different resource id + GET: /v1/resource/influxdbtest/myvalue + status: 200 + response_json_paths: + $.original_resource_id: myvalue + $.id: 6b9e2039-98d0-5d8d-9153-2d7491cf13e5 + $.type: influxdbtest + $.creator: admin + + - name: check metric created different tag resource id + GET: /v1/resource/influxdbtest/myvalue/metric/mymetric.field@host=foobar + + - name: check metric created different tag resource id and slash replaced + GET: /v1/resource/influxdbtest/myvalue/metric/cpu.field@path=_foobar diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index d8267f1a..d3d01231 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -58,9 +58,9 @@ tests: redirects: true response_json_paths: $.version: "1.0" - $.links.`len`: 12 + $.links.`len`: 13 $.links[0].href: $SCHEME://$NETLOC/v1 - $.links[8].href: $SCHEME://$NETLOC/v1/resource + $.links[9].href: $SCHEME://$NETLOC/v1/resource - name: root of resource GET: /v1/resource diff --git a/gnocchi/tests/test_influxdb.py b/gnocchi/tests/test_influxdb.py new file mode 100644 index 00000000..5bc801bb --- /dev/null +++ b/gnocchi/tests/test_influxdb.py @@ -0,0 +1,198 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import numpy +import pyparsing + +from gnocchi.rest import influxdb +from gnocchi.tests import base + + +class TestInfluxDBLineProtocol(base.BaseTestCase): + def test_line_protocol_parser_ok(self): + lines = ( + ('cpu,cpu=cpu2,host=abydos usage_system=11.1,usage_idle=73.2,usage_nice=0,usage_irq=0,usage_user=15.7,usage_softirq=0,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_iowait=0 1510150170000000000', # noqa + ['cpu', + {'host': 'abydos', + 'cpu': 'cpu2'}, + {'usage_guest': 0.0, + 'usage_nice': 0.0, + 'usage_steal': 0.0, + 'usage_iowait': 0.0, + 'usage_user': 15.7, + 'usage_idle': 73.2, + 'usage_softirq': 0.0, + 'usage_guest_nice': 0.0, + 'usage_irq': 0.0, + 'usage_system': 11.1}, + numpy.datetime64('2017-11-08T14:09:30.000000000')]), + ('cpu,cpu=cpu-total,host=abydos usage_idle=79.2198049512378,usage_nice=0,usage_iowait=0,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_system=9.202300575143786,usage_irq=0,usage_softirq=0,usage_user=11.577894473618404 1510150170000000000', # noqa + ['cpu', + {'cpu': 'cpu-total', + 'host': 'abydos'}, + {'usage_guest': 0.0, + 'usage_guest_nice': 0.0, + 'usage_idle': 79.2198049512378, + 'usage_iowait': 0.0, + 'usage_irq': 0.0, + 'usage_nice': 0.0, + 'usage_softirq': 0.0, + 'usage_steal': 0.0, + 'usage_system': 9.202300575143786, + 'usage_user': 11.577894473618404}, + numpy.datetime64('2017-11-08T14:09:30.000000000')]), + ('diskio,name=disk0,host=abydos io_time=11020501i,iops_in_progress=0i,read_bytes=413847966208i,read_time=9816308i,write_time=1204193i,weighted_io_time=0i,reads=33523907i,writes=7321123i,write_bytes=141510539264i 1510150170000000000', # noqa + ['diskio', + {'host': 'abydos', + 'name': 'disk0'}, + {'io_time': 11020501, + 'iops_in_progress': 0, + 'read_bytes': 413847966208, + 'read_time': 9816308, + 'reads': 33523907, + 'weighted_io_time': 0, + 'write_bytes': 141510539264, + 'write_time': 1204193, + 'writes': 7321123}, + numpy.datetime64('2017-11-08T14:09:30.000000000')]), + ('disk,path=/,device=disk1s1,fstype=apfs,host=abydos total=250140434432i,free=28950695936i,used=216213557248i,used_percent=88.19130621205531,inodes_total=9223372036854775807i,inodes_free=9223372036850748963i,inodes_used=4026844i 1510150170000000000', # noqa + ['disk', + {'device': 'disk1s1', 'fstype': 'apfs', + 'host': 'abydos', 'path': '/'}, + {'free': 28950695936, + 'inodes_free': 9223372036850748963, + 'inodes_total': 9223372036854775807, + 'inodes_used': 4026844, + 'total': 250140434432, + 'used': 216213557248, + 'used_percent': 88.19130621205531}, + numpy.datetime64('2017-11-08T14:09:30.000000000')]), + ('mem,host=abydos free=16195584i,available_percent=24.886322021484375,used=6452215808i,cached=0i,buffered=0i,active=2122153984i,inactive=2121523200i,used_percent=75.11367797851562,total=8589934592i,available=2137718784i 1510150170000000000', # noqa + ['mem', + {'host': 'abydos'}, + {'active': 2122153984, + 'available': 2137718784, + 'available_percent': 24.886322021484375, + 'buffered': 0, + 'cached': 0, + 'free': 16195584, + 'inactive': 2121523200, + 'total': 8589934592, + 'used': 6452215808, + 'used_percent': 75.11367797851562}, + numpy.datetime64('2017-11-08T14:09:30.000000000')]), + ('disk,path=/private/var/vm,device=disk1s4,fstype=apfs,host=abydos inodes_total=9223372036854775807i,inodes_free=9223372036854775803i,inodes_used=4i,total=250140434432i,free=28950695936i,used=4296265728i,used_percent=12.922280752806417 1510150170000000000', # noqa + ['disk', + {'device': 'disk1s4', + 'fstype': 'apfs', + 'host': 'abydos', + 'path': '/private/var/vm'}, + {'free': 28950695936, + 'inodes_free': 9223372036854775803, + 'inodes_total': 9223372036854775807, + 'inodes_used': 4, + 'total': 250140434432, + 'used': 4296265728, + 'used_percent': 12.922280752806417}, + numpy.datetime64('2017-11-08T14:09:30.000000000')]), + ('swap,host=abydos used=2689073152i,free=532152320i,used_percent=83.47981770833334,total=3221225472i 1510150170000000000', # noqa + ['swap', + {'host': 'abydos'}, + {'free': 532152320, + 'total': 3221225472, + 'used': 2689073152, + 'used_percent': 83.47981770833334}, + numpy.datetime64('2017-11-08T14:09:30.000000000')]), + ('swap,host=abydos in=0i,out=0i 1510150170000000000', + ['swap', + {'host': 'abydos'}, + {'in': 0, 'out': 0}, + numpy.datetime64('2017-11-08T14:09:30.000000000')]), + ('processes,host=abydos stopped=0i,running=2i,sleeping=379i,total=382i,unknown=0i,idle=0i,blocked=1i,zombies=0i 1510150170000000000', # noqa + ['processes', + {'host': 'abydos'}, + {'blocked': 1, + 'idle': 0, + 'running': 2, + 'sleeping': 379, + 'stopped': 0, + 'total': 382, + 'unknown': 0, + 'zombies': 0}, + numpy.datetime64('2017-11-08T14:09:30.000000000')]), + ('system,host=abydos load5=3.02,load15=3.31,n_users=1i,n_cpus=4i,load1=2.18 1510150170000000000', # noqa + ['system', + {'host': 'abydos'}, + {'load1': 2.18, + 'load15': 3.31, + 'load5': 3.02, + 'n_cpus': 4, + 'n_users': 1}, + numpy.datetime64('2017-11-08T14:09:30.000000000')]), + ('system,host=abydos uptime=337369i,uptime_format="3 days, 21:42" 1510150170000000000', # noqa + ['system', + {'host': 'abydos'}, + {'uptime': 337369, 'uptime_format': '3 days, 21:42'}, + numpy.datetime64('2017-11-08T14:09:30.000000000')]), + ('notag up=1 123234', + ['notag', + {}, + {'up': 1.0}, + numpy.datetime64('1970-01-01T00:00:00.000123234')]), + ('notag up=3 ', ['notag', {}, {'up': 3.0}, None]), + ) + for line, result in lines: + parsed = list(influxdb.line_protocol.parseString(line)) + self.assertEqual(result, parsed) + + def test_line_protocol_parser_fail(self): + lines = ( + "measurement, field=1", + "measurement, field=1 123", + "measurement,tag=value 123", + "measurement,tag=value , 123", + "measurement,tag=value 123", + ",tag=value 123", + "foobar,tag=value field=string 123", + ) + for line in lines: + self.assertRaises(pyparsing.ParseException, + influxdb.line_protocol.parseString, + line) + + def test_query_parser_ok(self): + lines = ( + "CREATE DATABASE foobar;", + "CREATE DATABASE foobar ;", + "CREATE DATABASE foobar ;;;", + "CrEaTe Database foobar", + "create Database foobar", + ) + for line in lines: + parsed = list(influxdb.query_parser.parseString(line))[0] + self.assertEqual("foobar", parsed) + + def test_query_parser_fail(self): + lines = ( + "SELECT", + "hey yo foobar;", + "help database foobar;", + "something weird", + "create stuff foobar", + ) + for line in lines: + self.assertRaises(pyparsing.ParseException, + influxdb.query_parser.parseString, + line) diff --git a/releasenotes/notes/influxdb-endpoint-13cbd82cf287d91c.yaml b/releasenotes/notes/influxdb-endpoint-13cbd82cf287d91c.yaml new file mode 100644 index 00000000..75bb6d2f --- /dev/null +++ b/releasenotes/notes/influxdb-endpoint-13cbd82cf287d91c.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Gnocchi now provides a new `/v1/influxdb` endpoint that allows to ingest + data from InfluxDB clients. Only write is implemented. This should ease + transition of users coming from InfluxDB tools such as Telegraf. diff --git a/requirements.txt b/requirements.txt index 1a936099..233f14e4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,7 +16,7 @@ ujson voluptuous werkzeug trollius; python_version < '3.4' -tenacity>=4.2.0 # Apache-2.0 +tenacity>=4.6.0 WebOb>=1.4.1 Paste PasteDeploy -- GitLab From 9e7bd2aa56ccb9e4f7705d2b869660e1f4d40662 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 22 Nov 2017 09:12:02 +0100 Subject: [PATCH 1110/1483] aggregates: return resources/metrics only once We currently returns all references, but when many of them point to the same resource, we output each time the resource. This change removes this resource duplication from the output. --- gnocchi/indexer/__init__.py | 2 + gnocchi/rest/aggregates/api.py | 5 +- gnocchi/rest/aggregates/processor.py | 6 -- .../gabbits/aggregates-with-metric-ids.yaml | 92 ++++++++++--------- .../gabbits/aggregates-with-resources.yaml | 66 +++++++++---- 5 files changed, 101 insertions(+), 70 deletions(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 20a18940..8c69af64 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -69,6 +69,8 @@ class Resource(object): return self.revision_start.replace(microsecond=0, tzinfo=iso8601.iso8601.UTC) + __hash__ = object.__hash__ + class Metric(object): def __init__(self, id, archive_policy, creator=None, diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index 4e88566a..7246399a 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -278,7 +278,8 @@ class AggregatesController(rest.RestController): start, stop, granularity, needed_overlap, fill) } if details: - response["references"] = references + response["references"] = metrics + return response def _get_measures_by_name(self, resources, metric_names, operations, @@ -301,5 +302,5 @@ class AggregatesController(rest.RestController): needed_overlap, fill) } if details: - response["references"] = references + response["references"] = set((r.resource for r in references)) return response diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index f170a71e..128a9996 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -45,12 +45,6 @@ class MetricReference(object): self.lookup_key = [self.name, self.aggregation] - def jsonify(self): - if self.resource: - return self.resource - else: - return self.metric - def __eq__(self, other): return (self.metric == other.metric and self.resource == other.resource and diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml index 8d8fd209..fe739c0a 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml @@ -36,24 +36,28 @@ tests: - name: create metric1 POST: /v1/metric data: + name: metric1 archive_policy_name: cookies status: 201 - - name: create metric3 + - name: create metric2 POST: /v1/metric data: - archive_policy_name: cake + name: metric2 + archive_policy_name: cookies status: 201 - - name: create metric2 + - name: create metric3 POST: /v1/metric data: - archive_policy_name: cookies + name: metric3 + archive_policy_name: cake status: 201 - name: create metric4 POST: /v1/metric data: + name: metric4 archive_policy_name: cookies status: 201 @@ -170,10 +174,10 @@ tests: response_json_paths: $.`len`: 2 $.references.`len`: 2 - $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] - $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] - $.references[0].archive_policy.name: cookies - $.references[1].archive_policy.name: cookies + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[/name][1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[/name][0].archive_policy.name: cookies + $.references[/name][1].archive_policy.name: cookies $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] @@ -203,10 +207,10 @@ tests: operations: ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] response_json_paths: $.references.`len`: 2 - $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] - $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] - $.references[0].archive_policy.name: cookies - $.references[1].archive_policy.name: cookies + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[/name][1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[/name][0].archive_policy.name: cookies + $.references[/name][1].archive_policy.name: cookies $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] @@ -226,10 +230,10 @@ tests: operations: ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "max"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "min"]] response_json_paths: $.references.`len`: 2 - $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] - $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] - $.references[0].archive_policy.name: cookies - $.references[1].archive_policy.name: cookies + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[/name][1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[/name][0].archive_policy.name: cookies + $.references[/name][1].archive_policy.name: cookies $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".max: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, 12.0] @@ -244,10 +248,10 @@ tests: data: operations: ["+", ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]], 2.0] response_json_paths: - $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] - $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] - $.references[0].archive_policy.name: cookies - $.references[1].archive_policy.name: cookies + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[/name][1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[/name][0].archive_policy.name: cookies + $.references[/name][1].archive_policy.name: cookies $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 45.1] - ["2015-03-06T14:34:00+00:00", 60.0, 0.0] @@ -277,10 +281,10 @@ tests: - ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] response_json_paths: $.references.`len`: 2 - $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] - $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] - $.references[0].archive_policy.name: cookies - $.references[1].archive_policy.name: cookies + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[/name][1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[/name][0].archive_policy.name: cookies + $.references[/name][1].archive_policy.name: cookies $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] @@ -300,10 +304,10 @@ tests: - ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] response_json_paths: $.references.`len`: 2 - $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] - $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] - $.references[0].archive_policy.name: cookies - $.references[1].archive_policy.name: cookies + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[/name][1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[/name][0].archive_policy.name: cookies + $.references[/name][1].archive_policy.name: cookies $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:34:12+00:00", 1.0, 27.55] - ["2015-03-06T14:34:15+00:00", 1.0, -2.0] @@ -322,7 +326,7 @@ tests: operations: "(metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean)" response_json_paths: $.references.`len`: 1 - $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] @@ -339,7 +343,7 @@ tests: operations: "(aggregate mean (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean))" response_json_paths: $.references.`len`: 1 - $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] $.measures.aggregated: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] @@ -356,8 +360,8 @@ tests: operations: "(+ (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)) 2.0)" response_json_paths: $.references.`len`: 2 - $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] - $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[/name][1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 45.1] - ["2015-03-06T14:34:00+00:00", 60.0, 0.0] @@ -383,8 +387,8 @@ tests: operations: "(- (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean) (metric $HISTORY['create metric2'].$RESPONSE['$.id'] mean)))" response_json_paths: $.references.`len`: 2 - $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] - $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[/name][1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] $.measures.aggregated: - ["2015-03-06T14:33:00+00:00", 60.0, 41.1] - ["2015-03-06T14:34:00+00:00", 60.0, -6.5] @@ -401,8 +405,8 @@ tests: operations: "(aggregate mean (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)))" response_json_paths: $.references.`len`: 2 - $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] - $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[/name][1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] $.measures.aggregated: - ["2015-03-06T14:33:00+00:00", 60.0, 22.55] - ["2015-03-06T14:34:00+00:00", 60.0, 1.25] @@ -419,8 +423,8 @@ tests: operations: "(negative (absolute (aggregate mean (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)))))" response_json_paths: $.references.`len`: 2 - $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] - $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[/name][1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] $.measures.aggregated: - ["2015-03-06T14:33:00+00:00", 60.0, -22.55] - ["2015-03-06T14:34:00+00:00", 60.0, -1.25] @@ -450,8 +454,8 @@ tests: operations: "(metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean))" response_json_paths: $.references.`len`: 2 - $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] - $.references[1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[/name][1].id: $HISTORY['create metric2'].$RESPONSE['$.id'] $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] @@ -485,8 +489,8 @@ tests: operations: "(metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric4'].$RESPONSE['$.id'] mean))" response_json_paths: $.references.`len`: 2 - $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] - $.references[1].id: $HISTORY['create metric4'].$RESPONSE['$.id'] + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[/name][1].id: $HISTORY['create metric4'].$RESPONSE['$.id'] $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] @@ -512,8 +516,8 @@ tests: operations: "(metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric4'].$RESPONSE['$.id'] mean))" response_json_paths: $.references.`len`: 2 - $.references[0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] - $.references[1].id: $HISTORY['create metric4'].$RESPONSE['$.id'] + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.references[/name][1].id: $HISTORY['create metric4'].$RESPONSE['$.id'] $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - ["2015-03-06T14:34:00+00:00", 60.0, -2.0] diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml index 384f1f32..715c92f2 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml @@ -36,16 +36,18 @@ tests: - name: create resource 1 POST: /v1/resource/generic data: - id: 4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4 + id: 1ed9c196-4c9f-4ba8-a5be-c9a71a82aac4 user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 metrics: cpu.util: archive_policy_name: low + cpu.idle: + archive_policy_name: low status: 201 - name: post cpuutil measures 1 - POST: /v1/resource/generic/4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4/metric/cpu.util/measures + POST: /v1/resource/generic/1ed9c196-4c9f-4ba8-a5be-c9a71a82aac4/metric/cpu.util/measures data: - timestamp: "2015-03-06T14:33:57" value: 43.1 @@ -56,7 +58,7 @@ tests: - name: create resource 2 POST: /v1/resource/generic data: - id: 1447CD7E-48A6-4C50-A991-6677CC0D00E6 + id: 2447CD7E-48A6-4C50-A991-6677CC0D00E6 user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 metrics: @@ -65,7 +67,7 @@ tests: status: 201 - name: post cpuutil measures 2 - POST: /v1/resource/generic/1447CD7E-48A6-4C50-A991-6677CC0D00E6/metric/cpu.util/measures + POST: /v1/resource/generic/2447CD7E-48A6-4C50-A991-6677CC0D00E6/metric/cpu.util/measures data: - timestamp: "2015-03-06T14:33:57" value: 23 @@ -96,7 +98,7 @@ tests: - name: create resource 4 POST: /v1/resource/generic data: - id: b1409ec6-3909-4b37-bbff-f9a5448fe328 + id: 41409ec6-3909-4b37-bbff-f9a5448fe328 user_id: 70b5b732-9d81-4dfb-a8a1-a424ef3eae6b project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 metrics: @@ -105,7 +107,7 @@ tests: status: 201 - name: post cpuutil measures 4 - POST: /v1/resource/generic/b1409ec6-3909-4b37-bbff-f9a5448fe328/metric/cpu.util/measures + POST: /v1/resource/generic/41409ec6-3909-4b37-bbff-f9a5448fe328/metric/cpu.util/measures data: - timestamp: "2015-03-06T14:33:57" value: 230 @@ -127,15 +129,43 @@ tests: delay: 1 response_json_paths: $.references.`len`: 3 - $.references[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] - $.references[1]: $HISTORY['list resources'].$RESPONSE['$[1]'] - $.references[2]: $HISTORY['list resources'].$RESPONSE['$[2]'] + $.references[/id].[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] + $.references[/id].[1]: $HISTORY['list resources'].$RESPONSE['$[1]'] + $.references[/id].[2]: $HISTORY['list resources'].$RESPONSE['$[2]'] $.measures.aggregated: - ['2015-03-06T14:30:00+00:00', 300.0, 60.251666666666665] - ['2015-03-06T14:33:57+00:00', 1.0, 98.7] - ['2015-03-06T14:34:12+00:00', 1.0, 21.80333333333333] - - name: batch get + - name: batch get list + POST: /v1/aggregates?details=true + data: + resource_type: generic + search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" + operations: "(metric (cpu.util mean) (cpu.idle mean))" + poll: + count: 10 + delay: 1 + response_json_paths: + $.references.`len`: 3 + $.references[/id].[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] + $.references[/id].[1]: $HISTORY['list resources'].$RESPONSE['$[1]'] + $.references[/id].[2]: $HISTORY['list resources'].$RESPONSE['$[2]'] + $.measures."$HISTORY['list resources'].$RESPONSE['$[0].id']"."cpu.idle".mean: [] + $.measures."$HISTORY['list resources'].$RESPONSE['$[0].id']"."cpu.util".mean: + - ['2015-03-06T14:30:00+00:00', 300.0, 27.55] + - ['2015-03-06T14:33:57+00:00', 1.0, 43.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 12.0] + $.measures."$HISTORY['list resources'].$RESPONSE['$[1].id']"."cpu.util".mean: + - ['2015-03-06T14:30:00+00:00', 300.0, 15.5] + - ['2015-03-06T14:33:57+00:00', 1.0, 23.0] + - ['2015-03-06T14:34:12+00:00', 1.0, 8.0] + $.measures."$HISTORY['list resources'].$RESPONSE['$[2].id']"."cpu.util".mean: + - ['2015-03-06T14:30:00+00:00', 300.0, 137.70499999999998] + - ['2015-03-06T14:33:57+00:00', 1.0, 230.0] + - ['2015-03-06T14:34:12+00:00', 1.0, 45.41] + + - name: batch get solo POST: /v1/aggregates?details=true data: resource_type: generic @@ -146,9 +176,9 @@ tests: delay: 1 response_json_paths: $.references.`len`: 3 - $.references[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] - $.references[1]: $HISTORY['list resources'].$RESPONSE['$[1]'] - $.references[2]: $HISTORY['list resources'].$RESPONSE['$[2]'] + $.references[/id].[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] + $.references[/id].[1]: $HISTORY['list resources'].$RESPONSE['$[1]'] + $.references[/id].[2]: $HISTORY['list resources'].$RESPONSE['$[2]'] $.measures."$HISTORY['list resources'].$RESPONSE['$[0].id']"."cpu.util".mean: - ['2015-03-06T14:30:00+00:00', 300.0, 27.55] - ['2015-03-06T14:33:57+00:00', 1.0, 43.1] @@ -166,14 +196,14 @@ tests: POST: /v1/aggregates?details=true data: resource_type: generic - search: "id = '4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4'" + search: "id = '1ed9c196-4c9f-4ba8-a5be-c9a71a82aac4'" operations: "(metric (cpu.util mean) (cpu.util mean))" poll: count: 10 delay: 1 response_json_paths: $.references.`len`: 1 - $.references[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] + $.references[/id].[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] $.measures."$HISTORY['list resources'].$RESPONSE['$[0].id']"."cpu.util".mean: - ['2015-03-06T14:30:00+00:00', 300.0, 27.55] - ['2015-03-06T14:33:57+00:00', 1.0, 43.1] @@ -188,8 +218,8 @@ tests: response_json_paths: $.`len`: 2 $[0].measures.references.`len`: 2 - $[0].measures.references[0]: $HISTORY['list resources'].$RESPONSE['$[1]'] - $[0].measures.references[1]: $HISTORY['list resources'].$RESPONSE['$[0]'] + $[0].measures.references[/id].[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] + $[0].measures.references[/id].[1]: $HISTORY['list resources'].$RESPONSE['$[1]'] $[0].measures.measures.aggregated: - ['2015-03-06T14:30:00+00:00', 300.0, 21.525] - ['2015-03-06T14:33:57+00:00', 1.0, 33.05] @@ -198,7 +228,7 @@ tests: user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 $[1].measures.references.`len`: 1 - $[1].measures.references[0]: $HISTORY['list resources'].$RESPONSE['$[2]'] + $[1].measures.references[/id].[0]: $HISTORY['list resources'].$RESPONSE['$[2]'] $[1].measures.measures.aggregated: - ['2015-03-06T14:30:00+00:00', 300.0, 137.70499999999998] - ['2015-03-06T14:33:57+00:00', 1.0, 230.0] -- GitLab From 545820845c7dce731eed42030dd008336b244fc8 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 1 Dec 2017 15:57:38 +0100 Subject: [PATCH 1111/1483] aggregates: catch indexer exception Closes #521 --- gnocchi/rest/aggregates/api.py | 11 +++++++---- .../gabbits/aggregates-with-resources.yaml | 15 +++++++++++++++ 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index 7246399a..ffad342f 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -221,10 +221,13 @@ class AggregatesController(rest.RestController): groupby = sorted(set(api.arg_to_list(groupby))) sorts = groupby if groupby else api.RESOURCE_DEFAULT_PAGINATION - resources = pecan.request.indexer.list_resources( - body["resource_type"], - attribute_filter=attr_filter, - sorts=sorts) + try: + resources = pecan.request.indexer.list_resources( + body["resource_type"], + attribute_filter=attr_filter, + sorts=sorts) + except indexer.IndexerException as e: + api.abort(400, six.text_type(e)) if not groupby: return self._get_measures_by_name( resources, references, body["operations"], start, stop, diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml index 715c92f2..b2ab79bb 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml @@ -272,3 +272,18 @@ tests: $.description.detail.`sorted`: - foobar - notexists + + - name: invalid groupby attribute + POST: /v1/aggregates?groupby=unit + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + resource_type: generic + search: "id = '1ed9c196-4c9f-4ba8-a5be-c9a71a82aac4'" + operations: "(metric (cpu.util mean) (cpu.util mean))" + status: 400 + response_json_paths: + $.code: 400 + $.description: "Invalid pagination: `Sort key supplied is invalid: unit'" -- GitLab From 77d1f3b29af325c7f07ba0371ea789d0bccb2311 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 1 Dec 2017 10:19:03 +0100 Subject: [PATCH 1112/1483] Only install futures on Python 2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New futures release breaks entirely on Python 3: futures requires Python '>=2.6, <3' but the running Python is 3.5.2 Anyway it's a good idea to not install on Python 3. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 233f14e4..a31ccc30 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ oslo.middleware>=3.22.0 pytimeparse scipy>=0.18.1 # BSD pecan>=0.9 -futures +futures; python_version < '3' jsonpatch cotyledon>=1.5.0 six -- GitLab From 4ca1f172edc58e1b1ecdd7653cfb028d321f33ba Mon Sep 17 00:00:00 2001 From: bobuhiro11 Date: Fri, 1 Dec 2017 21:37:16 +0900 Subject: [PATCH 1113/1483] rest: reduce number of index queries --- gnocchi/rest/api.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 02052229..05911113 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1515,15 +1515,24 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): unknown_metrics = [] unknown_resources = [] body_by_rid = {} + + attribute_filter = {"or": []} + for original_resource_id, resource_id in body: + names = list(body[(original_resource_id, resource_id)].keys()) + attribute_filter["or"].append({"and": [ + {"=": {"resource_id": resource_id}}, + {"in": {"name": names}}]}) + + all_metrics = collections.defaultdict(list) + for metric in pecan.request.indexer.list_metrics( + attribute_filter=attribute_filter): + all_metrics[metric.resource_id].append(metric) + for original_resource_id, resource_id in body: body_by_rid[resource_id] = body[(original_resource_id, resource_id)] names = list(body[(original_resource_id, resource_id)].keys()) - metrics = pecan.request.indexer.list_metrics( - attribute_filter={"and": [ - {"=": {"resource_id": resource_id}}, - {"in": {"name": names}}, - ]}) + metrics = all_metrics[resource_id] known_names = [m.name for m in metrics] if strtobool("create_metrics", create_metrics): -- GitLab From e874e17490bf9c781368032d67c8255da1b528ae Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 1 Dec 2017 10:19:03 +0100 Subject: [PATCH 1114/1483] Only install futures on Python 2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New futures release breaks entirely on Python 3: futures requires Python '>=2.6, <3' but the running Python is 3.5.2 Anyway it's a good idea to not install on Python 3. (cherry picked from commit 77d1f3b29af325c7f07ba0371ea789d0bccb2311) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4083e8e0..3a98e3c9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ oslo.middleware>=3.22.0 pandas>=0.18.0 scipy>=0.18.1 # BSD pecan>=0.9 -futures +futures; python_version < '3' jsonpatch cotyledon>=1.5.0 six -- GitLab From f3fa952409f820a7476235dc7975bef5e0241128 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 22 Nov 2017 08:09:10 +0100 Subject: [PATCH 1115/1483] aggregates: allow wildcard as metric name --- doc/source/rest.j2 | 4 ++ doc/source/rest.yaml | 11 ++++ gnocchi/rest/aggregates/api.py | 17 +++--- gnocchi/rest/aggregates/processor.py | 4 +- .../gabbits/aggregates-with-resources.yaml | 59 +++++++++++++++++++ ...ates-metric-wildcard-d489260c685c5727.yaml | 4 ++ 6 files changed, 90 insertions(+), 9 deletions(-) create mode 100644 releasenotes/notes/aggregates-metric-wildcard-d489260c685c5727.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index ade8999d..11edf77b 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -826,6 +826,10 @@ such as the one described in the :ref:`resource search API `. {{ scenarios['get-aggregates-by-attributes-lookup']['doc'] }} +And metric name can be `wildcard` too. + +{{ scenarios['get-aggregates-by-attributes-lookup-wildcard']['doc'] }} + Groupby ~~~~~~~ diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index f1e469c2..e8862047 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -817,6 +817,17 @@ "operations": ["*", ["aggregate", "mean", ["metric", "cpu.util", "mean"]], 4] } +- name: get-aggregates-by-attributes-lookup-wildcard + request: | + POST /v1/aggregates?start=2014-10-06T14:34 HTTP/1.1 + Content-Type: application/json + + { + "resource_type": "instance", + "search": {"=": {"server_group": "my_autoscaling_group"}}, + "operations": ["*", ["aggregate", "mean", ["metric", "cpu*", "mean"]], 4] + } + - name: get-aggregates-by-attributes-lookup-groupby request: | POST /v1/aggregates?start=2014-10-06T14:34&groupby=host&groupby=flavor_id HTTP/1.1 diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index ffad342f..f37a0926 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +import fnmatch import itertools import pecan @@ -285,19 +286,21 @@ class AggregatesController(rest.RestController): return response - def _get_measures_by_name(self, resources, metric_names, operations, + def _get_measures_by_name(self, resources, metric_wildcards, operations, start, stop, granularity, needed_overlap, fill, details): - references = [ - processor.MetricReference(r.get_metric(metric_name), agg, r) - for (metric_name, agg) in metric_names - for r in resources if r.get_metric(metric_name) is not None - ] + references = [] + for r in resources: + references.extend([ + processor.MetricReference(m, agg, r, wildcard) + for wildcard, agg in metric_wildcards + for m in r.metrics if fnmatch.fnmatch(m.name, wildcard) + ]) if not references: api.abort(400, {"cause": "Metrics not found", - "detail": set((m for (m, a) in metric_names))}) + "detail": set((m for (m, a) in metric_wildcards))}) response = { "measures": get_measures_or_abort( diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index 128a9996..e48eff14 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -32,7 +32,7 @@ LOG = daiquiri.getLogger(__name__) class MetricReference(object): - def __init__(self, metric, aggregation, resource=None): + def __init__(self, metric, aggregation, resource=None, wildcard=None): self.metric = metric self.aggregation = aggregation self.resource = resource @@ -43,7 +43,7 @@ class MetricReference(object): else: self.name = self.metric.name - self.lookup_key = [self.name, self.aggregation] + self.lookup_key = [wildcard or self.name, self.aggregation] def __eq__(self, other): return (self.metric == other.metric and diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml index b2ab79bb..cb8652e0 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml @@ -44,6 +44,8 @@ tests: archive_policy_name: low cpu.idle: archive_policy_name: low + noway: + archive_policy_name: low status: 201 - name: post cpuutil measures 1 @@ -192,6 +194,63 @@ tests: - ['2015-03-06T14:33:57+00:00', 1.0, 230.0] - ['2015-03-06T14:34:12+00:00', 1.0, 45.41] + - name: batch get wildcard list + POST: /v1/aggregates?details=true + data: + resource_type: generic + search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" + operations: "(metric (cpu.* mean) (*way mean))" + poll: + count: 10 + delay: 1 + response_json_paths: + $.references.`len`: 3 + $.references[/id].[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] + $.references[/id].[1]: $HISTORY['list resources'].$RESPONSE['$[1]'] + $.references[/id].[2]: $HISTORY['list resources'].$RESPONSE['$[2]'] + $.measures."$HISTORY['list resources'].$RESPONSE['$[0].id']"."cpu.util".mean: + - ['2015-03-06T14:30:00+00:00', 300.0, 27.55] + - ['2015-03-06T14:33:57+00:00', 1.0, 43.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 12.0] + $.measures."$HISTORY['list resources'].$RESPONSE['$[0].id']"."cpu.idle".mean: [] + $.measures."$HISTORY['list resources'].$RESPONSE['$[0].id']"."noway".mean: [] + $.measures."$HISTORY['list resources'].$RESPONSE['$[1].id']"."cpu.util".mean: + - ['2015-03-06T14:30:00+00:00', 300.0, 15.5] + - ['2015-03-06T14:33:57+00:00', 1.0, 23.0] + - ['2015-03-06T14:34:12+00:00', 1.0, 8.0] + $.measures."$HISTORY['list resources'].$RESPONSE['$[2].id']"."cpu.util".mean: + - ['2015-03-06T14:30:00+00:00', 300.0, 137.70499999999998] + - ['2015-03-06T14:33:57+00:00', 1.0, 230.0] + - ['2015-03-06T14:34:12+00:00', 1.0, 45.41] + + - name: batch get wildcard solo + POST: /v1/aggregates?details=true + data: + resource_type: generic + search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" + operations: "(metric cpu.* mean)" + poll: + count: 10 + delay: 1 + response_json_paths: + $.references.`len`: 3 + $.references[/id].[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] + $.references[/id].[1]: $HISTORY['list resources'].$RESPONSE['$[1]'] + $.references[/id].[2]: $HISTORY['list resources'].$RESPONSE['$[2]'] + $.measures."$HISTORY['list resources'].$RESPONSE['$[0].id']"."cpu.util".mean: + - ['2015-03-06T14:30:00+00:00', 300.0, 27.55] + - ['2015-03-06T14:33:57+00:00', 1.0, 43.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 12.0] + $.measures."$HISTORY['list resources'].$RESPONSE['$[0].id']"."cpu.idle".mean: [] + $.measures."$HISTORY['list resources'].$RESPONSE['$[1].id']"."cpu.util".mean: + - ['2015-03-06T14:30:00+00:00', 300.0, 15.5] + - ['2015-03-06T14:33:57+00:00', 1.0, 23.0] + - ['2015-03-06T14:34:12+00:00', 1.0, 8.0] + $.measures."$HISTORY['list resources'].$RESPONSE['$[2].id']"."cpu.util".mean: + - ['2015-03-06T14:30:00+00:00', 300.0, 137.70499999999998] + - ['2015-03-06T14:33:57+00:00', 1.0, 230.0] + - ['2015-03-06T14:34:12+00:00', 1.0, 45.41] + - name: stupid but valid batch get POST: /v1/aggregates?details=true data: diff --git a/releasenotes/notes/aggregates-metric-wildcard-d489260c685c5727.yaml b/releasenotes/notes/aggregates-metric-wildcard-d489260c685c5727.yaml new file mode 100644 index 00000000..62800603 --- /dev/null +++ b/releasenotes/notes/aggregates-metric-wildcard-d489260c685c5727.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Wildcard can be used instead of metric name in Dynamic aggregates API. -- GitLab From e40e847fe5fa11c5ce3ee8954c6d97fe0a5f1554 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 3 Jul 2017 14:00:03 +0200 Subject: [PATCH 1116/1483] Make command line tools log to stderr `gnocchi-upgrade' and `gnocchi-change-sack-size' are both command line tools and not daemons. That means they should always log to stderr, and not use the configuration file instructions as where to log. Fixes #16 (cherry picked from commit 0b6cd96310bc9238a6492eab01b67ca2b1d0ccf8) --- gnocchi/cli.py | 4 ++-- gnocchi/service.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/gnocchi/cli.py b/gnocchi/cli.py index 7b14607f..9c44f057 100644 --- a/gnocchi/cli.py +++ b/gnocchi/cli.py @@ -56,7 +56,7 @@ def upgrade(): help="Number of storage sacks to create."), ]) - conf = service.prepare_service(conf=conf) + conf = service.prepare_service(conf=conf, log_to_std=True) if not conf.skip_index: index = indexer.get_driver(conf) index.connect() @@ -84,7 +84,7 @@ def change_sack_size(): cfg.IntOpt("sacks-number", required=True, min=1, help="Number of storage sacks."), ]) - conf = service.prepare_service(conf=conf) + conf = service.prepare_service(conf=conf, log_to_std=True) s = storage.get_incoming_driver(conf.incoming) try: report = s.measures_report(details=False) diff --git a/gnocchi/service.py b/gnocchi/service.py index fb221bd2..9e5c7ce1 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -32,7 +32,8 @@ LOG = daiquiri.getLogger(__name__) def prepare_service(args=None, conf=None, - default_config_files=None): + default_config_files=None, + log_to_std=False): if conf is None: conf = cfg.ConfigOpts() opts.set_defaults() @@ -53,7 +54,7 @@ def prepare_service(args=None, conf=None, default_config_files=default_config_files, version=pbr.version.VersionInfo('gnocchi').version_string()) - if conf.log_dir or conf.log_file: + if not log_to_std and (conf.log_dir or conf.log_file): outputs = [daiquiri.output.File(filename=conf.log_file, directory=conf.log_dir)] else: -- GitLab From 7a4666195d329ba8233210be4b277c705d1a3d08 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 2 Dec 2017 10:11:11 +0100 Subject: [PATCH 1117/1483] rest/influxdb: remove already caught exception ResourceAlreadyExists exception is actually caught and retried upon by get_or_create_resource_and_metrics(): it'll never be caught here. get_or_create_resource_and_metrics() will abort with a 409 if that happens. --- gnocchi/rest/influxdb.py | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/gnocchi/rest/influxdb.py b/gnocchi/rest/influxdb.py index 37d00c53..09e574d9 100644 --- a/gnocchi/rest/influxdb.py +++ b/gnocchi/rest/influxdb.py @@ -233,22 +233,13 @@ class InfluxDBController(rest.RestController): resource_name, creator=creator) LOG.debug("Getting metrics from resource `%s'", resource_name) timeout = pecan.request.conf.api.operation_timeout - try: - metrics = ( - api.get_or_create_resource_and_metrics.retry_with( - stop=tenacity.stop_after_delay(timeout))( - creator, resource_id, resource_name, - metrics_and_measures.keys(), - {}, db) - ) - except indexer.ResourceAlreadyExists as e: - # If this function raises ResourceAlreadyExists it means - # the resource might already exist as another type, we - # can't continue. - LOG.error("Unable to create resource `%s' for InfluxDB, " - "it might already exists as another " - "resource type than `%s'", resource_name, db) - api.abort(400, e) + metrics = ( + api.get_or_create_resource_and_metrics.retry_with( + stop=tenacity.stop_after_delay(timeout))( + creator, resource_id, resource_name, + metrics_and_measures.keys(), + {}, db) + ) for metric in metrics: api.enforce("post measures", metric) -- GitLab From 344e382c260374569eea31673c99245fe513201b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 30 Nov 2017 17:15:07 +0100 Subject: [PATCH 1118/1483] indexer: fix upgrade when password contains a % oslo.config doesn't support %{} or %() variable interpolation while ConfigParser does. So when a password contains a '%', it's fine for oslo.config But the Python ConfigParser will raise the exception. Since alembic use ConfigParser and not oslo.config, we have to escape the url ourself. This is not a big deal since, we don't want alembic doing variable interpolation. --- gnocchi/indexer/sqlalchemy.py | 2 +- .../tests/indexer/sqlalchemy/test_utils.py | 25 +++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 gnocchi/tests/indexer/sqlalchemy/test_utils.py diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 32654654..c4a42c58 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -301,7 +301,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): cfg = config.Config( "%s/alembic/alembic.ini" % os.path.dirname(__file__)) cfg.set_main_option('sqlalchemy.url', - self.conf.database.connection) + self.conf.database.connection.replace('%', '%%')) return cfg def get_engine(self): diff --git a/gnocchi/tests/indexer/sqlalchemy/test_utils.py b/gnocchi/tests/indexer/sqlalchemy/test_utils.py new file mode 100644 index 00000000..9d251ec4 --- /dev/null +++ b/gnocchi/tests/indexer/sqlalchemy/test_utils.py @@ -0,0 +1,25 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2017 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from gnocchi import indexer +from gnocchi.tests import base + + +class TestUtils(base.TestCase): + def test_percent_in_url(self): + url = 'mysql+pymysql://user:pass%word@localhost/foobar' + self.conf.set_override('url', url, 'indexer') + alembic = indexer.get_driver(self.conf)._get_alembic_config() + self.assertEqual(url, alembic.get_main_option("sqlalchemy.url")) -- GitLab From 7892684dde395dd3ccec67a9cb172659975d74a8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 2 Dec 2017 10:09:42 +0100 Subject: [PATCH 1119/1483] rest/prometheus: use operation_timeout to stop retrying The current code makes it retry infinitely, which might be a bit too long. --- gnocchi/rest/api.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 05911113..77de08a2 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -2034,10 +2034,12 @@ class PrometheusWriteController(rest.RestController): original_rid = '%s@%s' % (job, instance) rid = ResourceUUID(original_rid, creator=creator) metric_names = list(measures.keys()) - metrics = get_or_create_resource_and_metrics( - creator, rid, original_rid, metric_names, - dict(job=job, instance=instance), - "prometheus", self.PROMETHEUS_RESOURCE_TYPE) + timeout = pecan.request.conf.api.operation_timeout + metrics = get_or_create_resource_and_metrics.retry_with( + stop=tenacity.stop_after_delay(timeout))( + creator, rid, original_rid, metric_names, + dict(job=job, instance=instance), + "prometheus", self.PROMETHEUS_RESOURCE_TYPE) for metric in metrics: enforce("post measures", metric) -- GitLab From 56db921bc185f5ab97680d4675fe665a6767b57d Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 16 Nov 2017 11:42:10 +0100 Subject: [PATCH 1120/1483] rest: use custom json renderer This change replaces the json renderer to ensure we never render None. It also adds tests to ensure the all 202 don't have body set. Closes #463 --- gnocchi/gendoc.py | 3 +++ gnocchi/rest/api.py | 2 +- gnocchi/rest/app.py | 13 +++++++++++++ .../tests/functional/gabbits/batch-measures.yaml | 8 ++++++++ gnocchi/tests/functional/gabbits/metric.yaml | 2 ++ gnocchi/tests/functional/gabbits/resource.yaml | 2 ++ 6 files changed, 29 insertions(+), 1 deletion(-) diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index 3f864362..6d5545c6 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -41,6 +41,9 @@ def _format_json(txt): def _extract_body(req_or_resp): # TODO(jd) Make this a Sphinx option + if not req_or_resp.body: + return "" + if req_or_resp.content_type == "application/json": body = _format_json(req_or_resp.body) else: diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 77de08a2..8d16ffe3 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1612,7 +1612,7 @@ class MetricsMeasuresBatchController(rest.RestController): {utils.UUID: MeasuresListSchema} ) - @pecan.expose() + @pecan.expose("json") def post(self): body = deserialize_and_validate(self.MeasuresBatchSchema) metrics = pecan.request.indexer.list_metrics( diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 4b808c98..5fbd095b 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -23,6 +23,7 @@ from oslo_policy import policy from paste import deploy import pecan from pecan import jsonify +from pecan import templating from stevedore import driver import webob.exc @@ -74,6 +75,17 @@ class NotImplementedMiddleware(object): "Sorry, this Gnocchi server does " "not implement this feature 😞") + +class JsonRenderer(templating.JsonRenderer): + def render(self, template_path, namespace): + # NOTE(sileht): Unlike the builtin renderer of pecan + # we don't want to return "null" for None. Our API + # returns only empty, list or dict. + if namespace is None: + return "" + return super(JsonRenderer, self).render(template_path, namespace) + + # NOTE(sileht): pastedeploy uses ConfigParser to handle # global_conf, since python 3 ConfigParser doesn't # allow to store object as config value, only strings are @@ -130,6 +142,7 @@ def _setup_app(root, conf, indexer, storage, incoming, root, hooks=(GnocchiHook(storage, indexer, incoming, conf),), guess_content_type_from_ext=False, + custom_renderers={"json": JsonRenderer} ) if not_implemented_middleware: diff --git a/gnocchi/tests/functional/gabbits/batch-measures.yaml b/gnocchi/tests/functional/gabbits/batch-measures.yaml index b09473ea..4a0ef279 100644 --- a/gnocchi/tests/functional/gabbits/batch-measures.yaml +++ b/gnocchi/tests/functional/gabbits/batch-measures.yaml @@ -36,6 +36,8 @@ tests: - timestamp: "2015-03-06T14:34:12" value: 12 status: 202 + response_headers: + content-length: 0 - name: push measurements to unknown metrics POST: /v1/batch/metrics/measures @@ -122,6 +124,8 @@ tests: - timestamp: "2015-03-06T14:34:12" value: 12 status: 202 + response_headers: + content-length: 0 - name: push measurements to two named metrics POST: /v1/batch/resources/metrics/measures @@ -149,6 +153,8 @@ tests: - timestamp: "2015-03-06T14:34:12" value: 12 status: 202 + response_headers: + content-length: 0 - name: create archive policy rule for auto POST: /v1/archive_policy_rule @@ -171,6 +177,8 @@ tests: - timestamp: "2015-03-06T14:34:12" value: 12 status: 202 + response_headers: + content-length: 0 - name: get created metric to check creation GET: /v1/resource/generic/46c9418d-d63b-4cdd-be89-8f57ffc5952e/metric/auto.test diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index e824cfdd..8503c855 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -167,6 +167,8 @@ tests: - timestamp: 1425652437.0 value: 43.1 status: 202 + response_headers: + content-length: 0 - name: push measurements to metric POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index d3d01231..132fc09f 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -481,6 +481,8 @@ tests: - timestamp: "2015-03-06T14:34:12" value: 12 status: 202 + response_headers: + content-length: 0 - name: request cpuutil measures again GET: $LAST_URL -- GitLab From c89a0ef696a8bee5a95a336c599104c979c01b5f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 2 Dec 2017 13:27:57 +0100 Subject: [PATCH 1121/1483] indexer: do not return empty IN statement in QueryFilter Fixes #530 --- gnocchi/indexer/sqlalchemy.py | 9 ++++++++- gnocchi/tests/test_indexer.py | 8 ++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index c4a42c58..76f844b7 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1131,6 +1131,13 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): return sort_keys, sort_dirs +def _operator_in(field_name, value): + # Do not generate empty IN comparison + # https://github.com/gnocchixyz/gnocchi/issues/530 + if len(value): + return field_name.in_(value) + + class QueryTransformer(object): unary_operators = { @@ -1160,7 +1167,7 @@ class QueryTransformer(object): u"≠": operator.ne, u"ne": operator.ne, - u"in": lambda field_name, values: field_name.in_(values), + u"in": _operator_in, u"like": lambda field, value: field.like(value), } diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index a514d0aa..3160080e 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -863,6 +863,14 @@ class TestIndexerDriver(tests_base.TestCase): resource_type, attribute_filter={"=": {"flavor_id": 1.0}}) self.assertEqual(0, len(r)) + def test_list_resource_empty_in(self): + self.index.create_resource('generic', str(uuid.uuid4()), + str(uuid.uuid4()), str(uuid.uuid4())) + self.assertEqual( + [], + self.index.list_resources( + attribute_filter={"in": {"id": []}})) + def test_list_resource_weird_date(self): self.assertRaises( indexer.QueryValueError, -- GitLab From 57b9945aa4df2831b181526513ef43bdced5a198 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 2 Dec 2017 10:36:59 +0100 Subject: [PATCH 1122/1483] rest: fix error handling on resource conflict The current code convert an nonexistent variable into a string. Facepalm. Let's return the correct exception into the correct format, and test it for real using InfluxDB. The same will apply to Prometheus anyway. --- gnocchi/rest/api.py | 4 ++-- .../tests/functional/gabbits/influxdb.yaml | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 8d16ffe3..9860119f 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1989,12 +1989,12 @@ def get_or_create_resource_and_metrics( return pecan.request.indexer.create_resource( resource_type, rid, creator, **kwargs ).metrics - except indexer.ResourceAlreadyExists: + except indexer.ResourceAlreadyExists as e: # NOTE(sileht): ensure the rid is not registered whitin another # resource type. r = pecan.request.indexer.get_resource('generic', rid) if r.type != resource_type: - abort(409, six.text_type(e)) + abort(409, e) raise diff --git a/gnocchi/tests/functional/gabbits/influxdb.yaml b/gnocchi/tests/functional/gabbits/influxdb.yaml index 8c6b32ac..c5a2da16 100644 --- a/gnocchi/tests/functional/gabbits/influxdb.yaml +++ b/gnocchi/tests/functional/gabbits/influxdb.yaml @@ -109,3 +109,22 @@ tests: - name: check metric created different tag resource id and slash replaced GET: /v1/resource/influxdbtest/myvalue/metric/cpu.field@path=_foobar + + - name: create a generic resource for conflict + POST: /v1/resource/generic + data: + id: conflict + status: 201 + + - name: write lines with conflicting resource + POST: /v1/influxdb/write?db=influxdbtest + request_headers: + content-type: text/plain + accept: application/json + data: + "mymetric,host=conflict,mytag=myvalue field=123 1510581804179554816" + status: 409 + response_json_paths: + $.title: "Conflict" + $.description.cause: "Resource already exists" + $.description.detail: "da19a545-af76-5081-9a88-f370baab66c6" -- GitLab From af9a58f0a0c151f500b32ae94d815d4a35b9b036 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 5 Dec 2017 16:35:41 +0100 Subject: [PATCH 1123/1483] indexer: add a policy_filter on list_metrics This splits the filtering done for attribute and policy in 2 different filter, allowing them to be combined differently. --- gnocchi/indexer/__init__.py | 2 +- gnocchi/indexer/sqlalchemy.py | 29 ++++++++++++++++++++--------- gnocchi/rest/api.py | 4 +--- 3 files changed, 22 insertions(+), 13 deletions(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 8c69af64..33726559 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -389,7 +389,7 @@ class IndexerDriver(object): @staticmethod def list_metrics(details=False, status='active', limit=None, marker=None, sorts=None, - attribute_filter=None): + attribute_filter=None, policy_filter=None): raise exceptions.NotImplementedError @staticmethod diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 76f844b7..b9334715 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -688,6 +688,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): @retry_on_deadlock def list_metrics(self, details=False, status='active', limit=None, marker=None, sorts=None, + policy_filter=None, attribute_filter=None): sorts = sorts or [] with self.facade.independent_reader() as session: @@ -695,16 +696,26 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): Metric.status == status) if details: q = q.options(sqlalchemy.orm.joinedload('resource')) - if attribute_filter: + if policy_filter or attribute_filter: engine = session.connection() - # We don't catch the indexer.QueryAttributeError error here - # since we expect any user input on this function. If the - # caller screws it, it's its problem: no need to convert the - # exception to another type. - f = QueryTransformer.build_filter( - engine.dialect.name, - Metric, attribute_filter) - q = q.filter(f) + if attribute_filter: + # We don't catch the indexer.QueryAttributeError error here + # since we expect any user input on this function. If the + # caller screws it, it's its problem: no need to convert + # the exception to another type. + attribute_f = QueryTransformer.build_filter( + engine.dialect.name, + Metric, attribute_filter) + q = q.filter(attribute_f) + if policy_filter: + # We don't catch the indexer.QueryAttributeError error here + # since we expect any user input on this function. If the + # caller screws it, it's its problem: no need to convert the + # exception to another type. + policy_f = QueryTransformer.build_filter( + engine.dialect.name, + Metric, policy_filter) + q = q.filter(policy_f) sort_keys, sort_dirs = self._build_sort_keys(sorts, ['id']) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 9860119f..658da4bf 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -667,12 +667,10 @@ class MetricsController(rest.RestController): policy_filter = pecan.request.auth_helper.get_metric_policy_filter( pecan.request, "list metric") - if policy_filter: - attr_filters.append(policy_filter) - try: metrics = pecan.request.indexer.list_metrics( attribute_filter={"and": attr_filters}, + policy_filter=policy_filter, **pagination_opts) if metrics and len(metrics) >= pagination_opts['limit']: set_resp_link_hdr(str(metrics[-1].id), kwargs, pagination_opts) -- GitLab From ca201f5901d6807c3a92f77e0749ecc2a6e900d2 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 21 Nov 2017 13:38:54 +0100 Subject: [PATCH 1124/1483] indexer: add support for resource_filter in list_metrics This allows to filter metrics based on the resource they are linked to. --- gnocchi/indexer/__init__.py | 3 ++- gnocchi/indexer/sqlalchemy.py | 28 +++++++++++++++++++++++----- gnocchi/tests/test_indexer.py | 17 +++++++++++++++++ 3 files changed, 42 insertions(+), 6 deletions(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 33726559..92356815 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -389,7 +389,8 @@ class IndexerDriver(object): @staticmethod def list_metrics(details=False, status='active', limit=None, marker=None, sorts=None, - attribute_filter=None, policy_filter=None): + attribute_filter=None, policy_filter=None, + resource_policy_filter=None): raise exceptions.NotImplementedError @staticmethod diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index b9334715..99e026bf 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -688,7 +688,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): @retry_on_deadlock def list_metrics(self, details=False, status='active', limit=None, marker=None, sorts=None, - policy_filter=None, + policy_filter=None, resource_policy_filter=None, attribute_filter=None): sorts = sorts or [] with self.facade.independent_reader() as session: @@ -696,7 +696,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): Metric.status == status) if details: q = q.options(sqlalchemy.orm.joinedload('resource')) - if policy_filter or attribute_filter: + if policy_filter or resource_policy_filter or attribute_filter: engine = session.connection() if attribute_filter: # We don't catch the indexer.QueryAttributeError error here @@ -710,12 +710,30 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): if policy_filter: # We don't catch the indexer.QueryAttributeError error here # since we expect any user input on this function. If the - # caller screws it, it's its problem: no need to convert the - # exception to another type. + # caller screws it, it's its problem: no need to convert + # the exception to another type. policy_f = QueryTransformer.build_filter( engine.dialect.name, Metric, policy_filter) - q = q.filter(policy_f) + else: + policy_f = None + if resource_policy_filter: + q = q.join(Metric.resource) + try: + resource_policy_f = QueryTransformer.build_filter( + engine.dialect.name, + Resource, + resource_policy_filter) + except indexer.QueryAttributeError as e: + # NOTE(jd) The QueryAttributeError does not know about + # resource_type, so convert it + raise indexer.ResourceAttributeError("generic", + e.attribute) + else: + resource_policy_f = None + + if policy_filter or resource_policy_filter: + q = q.filter(sqlalchemy.or_(policy_f, resource_policy_f)) sort_keys, sort_dirs = self._build_sort_keys(sorts, ['id']) diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 3160080e..c9d1d08d 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -1151,6 +1151,23 @@ class TestIndexerDriver(tests_base.TestCase): else: self.assertLess(id_list.index(e2), id_list.index(e1)) + def test_list_metrics_resource_filter(self): + r1 = uuid.uuid4() + creator = str(uuid.uuid4()) + m1 = uuid.uuid4() + m2 = uuid.uuid4() + project_id = str(uuid.uuid4()) + self.index.create_resource("generic", r1, creator, + project_id=project_id) + self.index.create_metric(m1, creator, archive_policy_name="low", + resource_id=r1) + self.index.create_metric(m2, creator, archive_policy_name="low") + metrics = self.index.list_metrics( + resource_policy_filter={"=": {"project_id": project_id}}) + id_list = [m.id for m in metrics] + self.assertIn(m1, id_list) + self.assertNotIn(m2, id_list) + def test_list_metrics_delete_status(self): e1 = uuid.uuid4() self.index.create_metric(e1, str(uuid.uuid4()), -- GitLab From 466eb83a38239b9e0a144a394390635389047304 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 5 Dec 2017 17:30:17 +0100 Subject: [PATCH 1125/1483] rest: use resource_policy_filter when listing metrics This leverages the resource policy filter from auth_helper to use as a filter for metric when listing them. In the case of Keystone auth, that allows a tenant whose metrics are linked to a resource with its project_id to see those metrics listed. Fixes #314 --- gnocchi/rest/api.py | 6 ++++++ gnocchi/rest/auth_helper.py | 25 ++++++++++++++----------- gnocchi/tests/test_rest.py | 16 ++++++++++++++++ 3 files changed, 36 insertions(+), 11 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 658da4bf..22532cce 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -666,11 +666,17 @@ class MetricsController(rest.RestController): policy_filter = pecan.request.auth_helper.get_metric_policy_filter( pecan.request, "list metric") + resource_policy_filter = ( + pecan.request.auth_helper.get_resource_policy_filter( + pecan.request, "list metric", resource_type=None, + prefix="resource") + ) try: metrics = pecan.request.indexer.list_metrics( attribute_filter={"and": attr_filters}, policy_filter=policy_filter, + resource_policy_filter=resource_policy_filter, **pagination_opts) if metrics and len(metrics) >= pagination_opts['limit']: set_resp_link_hdr(str(metrics[-1].id), kwargs, pagination_opts) diff --git a/gnocchi/rest/auth_helper.py b/gnocchi/rest/auth_helper.py index 1752222d..1362f83a 100644 --- a/gnocchi/rest/auth_helper.py +++ b/gnocchi/rest/auth_helper.py @@ -41,7 +41,7 @@ class KeystoneAuthHelper(object): } @staticmethod - def get_resource_policy_filter(request, rule, resource_type): + def get_resource_policy_filter(request, rule, resource_type, prefix=None): try: # Check if the policy allows the user to list any resource api.enforce(rule, { @@ -50,26 +50,29 @@ class KeystoneAuthHelper(object): except webob.exc.HTTPForbidden: policy_filter = [] project_id = request.headers.get("X-Project-Id") + target = {} + if prefix: + resource = target[prefix] = {} + else: + resource = target + resource["resource_type"] = resource_type + resource["project_id"] = project_id try: # Check if the policy allows the user to list resources linked # to their project - api.enforce(rule, { - "resource_type": resource_type, - "project_id": project_id, - }) + api.enforce(rule, target) except webob.exc.HTTPForbidden: pass else: policy_filter.append({"=": {"project_id": project_id}}) + del resource["project_id"] + resource["created_by_project_id"] = project_id try: # Check if the policy allows the user to list resources linked # to their created_by_project - api.enforce(rule, { - "resource_type": resource_type, - "created_by_project_id": project_id, - }) + api.enforce(rule, target) except webob.exc.HTTPForbidden: pass else: @@ -132,7 +135,7 @@ class BasicAuthHelper(object): } @staticmethod - def get_resource_policy_filter(request, rule, resource_type): + def get_resource_policy_filter(request, rule, resource_type, prefix=None): return None @staticmethod @@ -159,7 +162,7 @@ class RemoteUserAuthHelper(object): } @staticmethod - def get_resource_policy_filter(request, rule, resource_type): + def get_resource_policy_filter(request, rule, resource_type, prefix=None): return None @staticmethod diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index fc75b102..97db5a5b 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -325,6 +325,22 @@ class MetricTest(RestTest): metric_list = self.app.get("/v1/metric") self.assertNotIn(metric_id, [m["id"] for m in metric_list.json]) + def test_list_metric_with_another_user_allowed(self): + rid = str(uuid.uuid4()) + r = self.app.post_json("/v1/resource/generic", + params={ + "id": rid, + "project_id": TestingApp.PROJECT_ID_2, + "metrics": { + "disk": {"archive_policy_name": "low"}, + } + }) + metric_id = r.json['metrics']['disk'] + + with self.app.use_another_user(): + metric_list = self.app.get("/v1/metric") + self.assertIn(metric_id, [m["id"] for m in metric_list.json]) + def test_get_metric_with_another_user(self): result = self.app.post_json("/v1/metric", params={"archive_policy_name": "medium"}, -- GitLab From c10e224851d4bdcad8b2423cd50173999b92e315 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 1 Dec 2017 13:16:07 +0100 Subject: [PATCH 1126/1483] doc: add an intro about metricd --- doc/source/operating.rst | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/doc/source/operating.rst b/doc/source/operating.rst index 818bef82..d3f40535 100644 --- a/doc/source/operating.rst +++ b/doc/source/operating.rst @@ -190,8 +190,23 @@ uses the 6 default |aggregation methods| (mean, min, max, sum, std, count) with the same "one year, one minute aggregations" resolution, the space used will go up to a maximum of 6 × 4.1 MiB = 24.6 MiB. -How many metricd workers do we need to run -========================================== +Metricd +======= + +Metricd is the daemon responsible for processing measures, computing their +aggregates and storing them into the aggregate storage. It also handles a few +other cleanup tasks, such as deleting metrics marked for deletion. + +Metricd therefore is responsible for most of the CPU usage and I/O job in +Gnocchi. The archive policy of each metric will influence how fast it performs. + +In order to process new measures, metricd checks the incoming storage for new +measures from time to time. The delay between each check is can be configured +by changing the `[metricd]metric_processing_delay` configuration option. + + +How many metricd workers do I need to run +----------------------------------------- By default, `gnocchi-metricd` daemon spans all your CPU power in order to maximize CPU utilisation when computing |metric| aggregation. You can use the @@ -205,7 +220,7 @@ increase the number of `gnocchi-metricd` daemons. You can run any number of metricd daemon on any number of servers. How to scale measure processing -=============================== +------------------------------- Measurement data pushed to Gnocchi is divided into sacks for better distribution. The number of partitions is controlled by the `sacks` option @@ -269,7 +284,7 @@ How to monitor Gnocchi The `/v1/status` endpoint of the HTTP API returns various information, such as the number of |measures| to process (|measures| backlog), which you can easily -monitor (see `How many metricd workers do we need to run`_). The Gnocchi client +monitor (see `How many metricd workers do I need to run`_). The Gnocchi client can show this output by running `gnocchi status`. Making sure that the HTTP server and `gnocchi-metricd` daemon are running and -- GitLab From 586de48d281aabae88243282687cfc3e61c21ddc Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 27 Nov 2017 15:02:52 +0100 Subject: [PATCH 1127/1483] metricd: expose greedy option This allows to disable eager processing of new measures. Fixes #507 --- doc/source/operating.rst | 5 +++++ gnocchi/cli/metricd.py | 7 ++++--- gnocchi/incoming/__init__.py | 4 ++-- gnocchi/incoming/ceph.py | 2 +- gnocchi/incoming/file.py | 2 +- gnocchi/incoming/redis.py | 5 +++-- gnocchi/incoming/s3.py | 2 +- gnocchi/incoming/swift.py | 2 +- gnocchi/opts.py | 5 +++++ ...d-respect-processing-delay-option-b8cc9895dec75567.yaml | 6 ++++++ 10 files changed, 29 insertions(+), 11 deletions(-) create mode 100644 releasenotes/notes/metricd-respect-processing-delay-option-b8cc9895dec75567.yaml diff --git a/doc/source/operating.rst b/doc/source/operating.rst index d3f40535..50d3e0d5 100644 --- a/doc/source/operating.rst +++ b/doc/source/operating.rst @@ -204,6 +204,11 @@ In order to process new measures, metricd checks the incoming storage for new measures from time to time. The delay between each check is can be configured by changing the `[metricd]metric_processing_delay` configuration option. +Some incoming driver (only Redis currently) are able to inform metricd that new +measures are available for processing. In that case, metricd will not respect +the `[metricd]metric_processing_delay` parameter and start processing the new +measures right away. This behaviour can be disabled by turning off the +`[metricd]greedy` option. How many metricd workers do I need to run ----------------------------------------- diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index b3e2b5de..0ad70a2a 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -172,9 +172,10 @@ class MetricProcessor(MetricProcessBase): 'partitioning. Retrying: %s', e) raise tenacity.TryAgain(e) - filler = threading.Thread(target=self._fill_sacks_to_process) - filler.daemon = True - filler.start() + if self.conf.metricd.greedy: + filler = threading.Thread(target=self._fill_sacks_to_process) + filler.daemon = True + filler.start() @retry_on_exception.wraps def _fill_sacks_to_process(self): diff --git a/gnocchi/incoming/__init__.py b/gnocchi/incoming/__init__.py index f3c66c58..b972aa25 100644 --- a/gnocchi/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -54,7 +54,7 @@ class IncomingDriver(object): return self._num_sacks @staticmethod - def __init__(conf): + def __init__(conf, greedy=True): pass def get_sack_prefix(self, num_sacks=None): @@ -189,4 +189,4 @@ def get_driver(conf): :param conf: incoming configuration only (not global) """ return utils.get_driver_class('gnocchi.incoming', conf.incoming)( - conf.incoming) + conf.incoming, conf.metricd.greedy) diff --git a/gnocchi/incoming/ceph.py b/gnocchi/incoming/ceph.py index b28c364f..8e5b588d 100644 --- a/gnocchi/incoming/ceph.py +++ b/gnocchi/incoming/ceph.py @@ -30,7 +30,7 @@ class CephStorage(incoming.IncomingDriver): Q_LIMIT = 1000 - def __init__(self, conf): + def __init__(self, conf, greedy=True): super(CephStorage, self).__init__(conf) self.rados, self.ioctx = ceph.create_rados_connection(conf) # NOTE(sileht): constants can't be class attributes because diff --git a/gnocchi/incoming/file.py b/gnocchi/incoming/file.py index ecbcfeba..2e7afa3b 100644 --- a/gnocchi/incoming/file.py +++ b/gnocchi/incoming/file.py @@ -28,7 +28,7 @@ from gnocchi import utils class FileStorage(incoming.IncomingDriver): - def __init__(self, conf): + def __init__(self, conf, greedy=True): super(FileStorage, self).__init__(conf) self.basepath = conf.file_basepath self.basepath_tmp = os.path.join(self.basepath, 'tmp') diff --git a/gnocchi/incoming/redis.py b/gnocchi/incoming/redis.py index 5fe67f03..55e2f9f7 100644 --- a/gnocchi/incoming/redis.py +++ b/gnocchi/incoming/redis.py @@ -23,9 +23,10 @@ from gnocchi import incoming class RedisStorage(incoming.IncomingDriver): - def __init__(self, conf): + def __init__(self, conf, greedy=True): super(RedisStorage, self).__init__(conf) self._client = redis.get_client(conf) + self.greedy = greedy def __str__(self): return "%s: %s" % (self.__class__.__name__, self._client) @@ -55,7 +56,7 @@ class RedisStorage(incoming.IncomingDriver): sack_name = self.get_sack_name(self.sack_for_metric(metric.id)) path = self._build_measure_path_with_sack(metric.id, sack_name) pipe.rpush(path, self._encode_measures(measures)) - if sack_name not in notified_sacks: + if self.greedy and sack_name not in notified_sacks: # value has no meaning, we just use this for notification pipe.setnx(sack_name, 1) notified_sacks.add(sack_name) diff --git a/gnocchi/incoming/s3.py b/gnocchi/incoming/s3.py index e8a58d6c..54c3b49f 100644 --- a/gnocchi/incoming/s3.py +++ b/gnocchi/incoming/s3.py @@ -31,7 +31,7 @@ botocore = s3.botocore class S3Storage(incoming.IncomingDriver): - def __init__(self, conf): + def __init__(self, conf, greedy=True): super(S3Storage, self).__init__(conf) self.s3, self._region_name, self._bucket_prefix = ( s3.get_connection(conf) diff --git a/gnocchi/incoming/swift.py b/gnocchi/incoming/swift.py index 980291c4..8358ad4a 100644 --- a/gnocchi/incoming/swift.py +++ b/gnocchi/incoming/swift.py @@ -27,7 +27,7 @@ swift_utils = swift.swift_utils class SwiftStorage(incoming.IncomingDriver): - def __init__(self, conf): + def __init__(self, conf, greedy=True): super(SwiftStorage, self).__init__(conf) self.swift = swift.get_connection(conf) diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 5b59d511..75250838 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -136,6 +136,11 @@ def list_opts(): deprecated_group='storage', help="How many seconds to wait between " "scheduling new metrics to process"), + cfg.BoolOpt( + 'greedy', default=True, + help="Allow to bypass `metric_processing_delay` if metricd " + "is notified that measures are ready to be processed." + ), cfg.IntOpt('metric_reporting_delay', deprecated_group='storage', default=120, diff --git a/releasenotes/notes/metricd-respect-processing-delay-option-b8cc9895dec75567.yaml b/releasenotes/notes/metricd-respect-processing-delay-option-b8cc9895dec75567.yaml new file mode 100644 index 00000000..61a12ce1 --- /dev/null +++ b/releasenotes/notes/metricd-respect-processing-delay-option-b8cc9895dec75567.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Metricd exposes a new option called `greedy` (true by default) that allows + to control whether eager processing of new measures is enabled when + available. -- GitLab From 340d7e6e728aa180b6ffe9bc50763a5d6e0bdeb6 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 7 Dec 2017 12:28:47 +0100 Subject: [PATCH 1128/1483] doc: Update grafana screeshot --- doc/source/_static/grafana-screenshot.png | Bin 82601 -> 104530 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/doc/source/_static/grafana-screenshot.png b/doc/source/_static/grafana-screenshot.png index eff160321972884e6811d4dfa7fe7dd26fac2912..c2c07af9c62cc558489a6939fc2903c132a47ca2 100644 GIT binary patch literal 104530 zcma&Nb9CKX)HeEy?W9rTq_Nf5wynmt?KVl1#&AssgB+Z2Mf{*21jw%a=Wt=9g8it=BYLG|T7ampgPD zBpWT%T3#irt6RTOYl_gWQ;7ep5da4#B7{i+XNooQ@ePvlvx^Up*?+#=CYK%`O*1uN zy589t#U^H9K|ce5fPm=k?he#}GjfH6g*9SY(q8~23PKhH3u9lz;D`R_s~JB`i(XxJ zwm`WmE<7Z#dHo3JGbICE-Q5CI@vy|{{X<}fy3LU zf0CVA!otGdN@)HalWTb`d{aR|h_Ob!ruE_VWd9S4P51yO!Bb!*y^Ll-lJu8)&9NFQ zXJ_Y5RPEjRP0q)kOuGI~^8nw73vB-Q7LeD|+u$afmf4~H0zx>g0}ePx z9A+*Y`Lt!X=h+k)jz!z#6pC^2@GKwN+g^r9vEJU^3TDe!E`$r`@Rm|agbB;al9ocD z49(BZRs(sl;n3=e!ZjX*OD#A&E2fr0e>)S3KZRG%o%-0SwRa+|{KlOQKz<;}aU3TfpL`mh*Jdglt9Ko5P#FTQfu9zl>ywRt%p}(PRhhQR1}(}zE460A1=bMp zY{|2mxxiv8gJt0UIy!|rc34SIdw~)Q*p-yL#q={=?@zywi`h0QD@`zvO5AC%ieyAJ zOBePLG&Uw20E_vh@6Hw`a1pRpk&j zOF@j3@G%|@nozYJJ`+IwoK)q~-NQngFEMGZ%=t@PT&CL}ueEmM{LcbX{|twg zo)qoDt01!A!5bN@tS*jPd;{zCQ~SLX(;-GJ#S)k0amI!-PrAyL{%YDo5icI~oy=~do2YtM*hX2VcEW|xN|e=MN-+0rfJ`HVKe`YtKY}OGv6CU){`oJ zcxkEmnkmB|UctC0gl{md^<>ee(M9awC2`QVVm+OY`!0Xi_Koauh_8E&|NBU0(?PL8SXvI(9=>WAytI0!yGD>L z{~`$!dvR)ND!0$2)q9NIxZ1jBA`papM4z*{Q-A&iQtB~ysZ+c2Ua@ye;Ku~{SQ4+l zs?olx>-&lH6M5Oji)K^RSMuYyKVLXZ`k5!BaWy=itQsrX@A-y$r1&(s_}JXQX`Qgq zSiSe&g(IWoe823din^F@-%h*WGGtEJGJ~d^+xchW5_EYTh;GYu6Y#QqE6}ccNF1JC z>8G$AAj_70e&69G&%MeFcFjOUO_lyK5*zzGIC#nQv0YT;@nm(Y#ezeTad8yRGhZ~m zB*aZJ6~@%L7*_r0pU_GN_vG_>7DRyK4-=j&1@Hf&$VO}Mr}7wmO=?j13R86=*Z09~ zY&?}l-Gkv?M};%1v&wOM4Pvzx>sefky8YSCqyOYEpMWa>@3bOwk4SpySehf=D$KNY zq}US2z~@I*e?PQ8Ubc9;C}!JcPd@Xq*N~wTl}zQe3@WgQtC7lfkkA>e{j#tY%#)g) zqitTil4PDhB@tK{3|6GV0s+<>jUs5qpfRoIyVi$S%J+5tklsNzddR+;g%aU&?(&<5 z+(aY5|1>ZbMbLMoo}y@c%Aq_NV;S3h$=DJx`fcUf+<~pVIsV1&u*`Mg_j9SMYb}|Z z^7Pe`P4ltdij%4kZyUv9|54}X)YeN|Da@U=y2F$9#NB(Sg{EEG-pS^bMC)-H6A;p^ z5k6fER*X>rXO_)3pEa$uuM|*fC{ykaBk8HGyQ9 z?z>v?UOo7PbHrx2mO!@M+ok<%(H92R*WR*OOZJFS`$y#GA^jk?<+N;5$N|P&#?M3B zVAK~Q)Q5T%+^Uf{@MdD?zF?dYPL1|_O**tpog^$_CjLYE!qe5n4FR8hrpw^qOCM5s z`RmMdF1dB1ZsR|2aA<*M0L9~(Udn-(aYaph1}o+?FQ12_(d_SFUndYeEhR;W zUMxhy$t{0&#`+JIT2eRx;Nz6W6Jf9m2*$|L<;2{`-R&N8m1NH?&&;2-5!Spp(gTMa z)?1Qtm?$!}i8H)QiIq@OB=nVc(w@($(+L(4ne7Y$bf8(Adq{fmqi&I_kGlOb0O2mZ z7lTms9$N5dNmnGAPi^@(8tYbp!J>F-Au36JVwC2uhrT=|pRtK!ziX#0#&YrTdDOb| zg||@5dsJhR!l+X7Y~L@B#4uxAX$b5j`)2&&Tf>qqm5J`}Z=8LMP4B!@X|ch7w%euh zzg{8N?i|WF1G?mVM$(h2kxJ%!=L@|!m|CH*82~>uvnt#o`m;wC*3vBP9^R${g3^AY za=rG#s!(zg6`OGpcHd#HL)%hy=QI3I{F#o5c-yoAzcY*~VxUOKn-l$j@s@ zJFK>SQ3VH#TiL-H%~V#9qRYQgNCyfo?WA-?u>5iq2c7zyF+q-ytPW7TQNWQ>=(ODnFQssJ12#4jNp z(Yg?=Nbyj-TpQb{L1Hmkk`o69zz);UT+ zfUpEfki}Ya&l zdU>}Cq?LKMwGAXM_l9#C6oft~5U{YwDoDIbb6D9&h7qfiAC5y7fwOCO)zWR@GPiZV zPNYWK`d(F>_+d{~vC|XC%(xF5TWNNLr*YTb*C;BKkc!NZx;vBnO@l`)Y?R^VF|NSa zkGGybWUM0s66F`tg4ktA$f zrV+i7WQK(v+fT6mkdF#zn~bLBKkuDRV9+#4d+vj2wB|n!@klMtI{?WX?T+v$xzes=WZlk1o3|-kkr@-MO$urtt z*;uuLk2cvzV`AD)P30fPhoR!Q{He=Qtpf6JYbOh`x4)jqi)865h#)rL#FV+3ZwOn6UgUX?BpTVzNN6#sWFF#r} z1=rYk6nq&oa(((qHG;lwo$ccSB-gkaxl}#{GeA7^NlnZF0KffS!Tn5lxaXUJ+V07l zmaOgk`f}34;}0|nKxI7p#DRz3MpV`Q<$$rf_EU68cr*5cIBwA7q*{g}1-XxOoU7AI zP+2)gN=29yF&Y?sLH-dTgMswr`ssj^E=)Kv`GF{Vrwf%)xSYvnD%DOVJx7XMLZ>$` z-@!*b09b9y{<;zaUKED?gS_+c#P;EQ?oa71UO^`SI9-e;F`^eaGSyVNQZp0_=P1Qn z@}u(FntK&>ntaH@ZXeb6=xN}TjyrFmhxvCq*}85@&$CJtDP&;e)LTY!arlpvO&QhW z8fHfoAd7x*RSf`iVQi%x13}rS!iE;>DwE)K>b}O%ahqsZ984oHvPWWsfq|_#^{I_? z^ZgookY@dYT!XVgrQhB!?&x|>39Zv1-gsE6{i~NYU8C*U%5A14tli{GWGq8o9MwaI z{wM)1b?kZ%?tn_DDxYJ~uAI!>(|+v=#!(eQ6GyPONsL)rCy-UW(i_KrwBrm0#IZ7L za}P;j(3JL`QUtI)Tl22n~71`FWJ3}!%0KoeVX1C3p>1rp~<(R}c)~KT5S~U_1 zAoN!j7f)Wjy_)jUIMXJd9DokO$TBkY`g zV>OQF%I~U|m#3;6&Z$EH0FL6`%fW!6nc2B`3R_U1${9zMlvvC9DKdOXx2(S9T8zH# zDP;PKi}!$Yho!P=h}7(A%haRI$7boDJa8>UKHQ#hS-88%Rx8hWh0I|dGt`)s?p<ncZ3{7hkf~$9Z?0N8)m53Da5|h@br7`4T`46D zavBNn%Mq{f&Y6bAYqL8|!G3E=E2m~Ra%M(Qwq&X6OazB@P~5P!U?N(3S>w-dQ1R8I zf>14-oh@9 zp=v1xZ#GRA+$T50nDd$W;&H@n>ZVG8ot&v#d>Q92xU~-zraIPzF>Jxt9Pv``th>7A zVx}71U}NfYow?JSfnd$(&_AqJ&>T~C)wP`7sY+4GGw>XZ8Amgl{_=7fnf-ofeUe}!FazuCHMZY1!OXg!R91%-k%bntz^KBkoqd!;-pMs+{s$L=M7Dv+- z8TqG`%1lG1>oErx^}I_pL$!?d>7E&$=L)A5J7ntzZU?#xurQFBTh1HB-Mx@QL(PjE z%WvnUKUdAmAEfE@IodM`smO^1>8mS(_CYBkY3<*$h*)@Drhe~D9bNBul6T+E%-yT_ z8GYwPnbn-(87Kn@++XBt{Z1{YvCGW@TKNfsPQ;W@KPO|k6Ba(sE+b;6q+vlrNL-Wi z+t06$N<};FdsegToHVOfq%PdFMBbg5`N*h>KECw9p=b#c)Qc&@4H9CD#~Pk4CehhadOt_km5T!8Dg8p69Kq`6)SyHM z0HBJqV68syvV0!XIVFd7=$oN^jKZ!_9wy6MybQZazv>v*-Y1+#@vjg^u>@B2vp;V# zo82zqB}@ubRIE+AP=5p&YXlH}jVvwQ|HE7E%_jKOGC;_ysE5M#PcFxwM8)%+L|lR; z`E9NMHIET^fd0LEY9bt981o4F?5l1kBhSi(tMTzctF!q92dy_6*|n*cT#tsiXA($Z$iH?AYMawBuGtdeOp zI?Sn2L@X9O@f{}QWURH!Hqa4HE@i&}Z13n0bS@BPsDg7^rVs}u0~wP7QnPTHA*-Xe zmz?~-M|^@33aGpYI^bsnXy?+UX9RACa(}hCWklwwgu33p6e=`asC}VyFG$rymF`jT z!<9K6<{TaObv9^9QNc{3yHA3O{X&bfJfx3%07M6--peQ9{Quv|BrwBUP!fv)!ndvLd%jJ5T(A)*; zclI)V-=UDPn!ML&x9bT5?FFP(0fecTJo$jz_S>#;eY}}$a@eQVnkN9}T^OKmP1P>hP zkVPDyQ=H)ZQi*{~YJKCIqirKwj`-WjFe%?;HUE*^$;c#_-ygG~)p!AST-O>lxT<-^ zhBZQrHBEC&D=Slx5lOkll>FO*MSGK%95w+BLn$QRn(F(x8N`)~a!VHs0w8O3pU)JR zn5-RZD~MKF?|tEBeIaM)r?!be8lMEMF0KvFl5fgxLKD7f$;0H)J`#vqpG2FfIDLWj zhFa6>S?PCV!FwW^!o%~;Z=NY{9}2tguvD*=lZ26%$>htJMOZDL=QV%BZ8>>x>xSmq zi`X4T>55dDuIah<#)hiYN_-wS_x&nTGyi>zLHVpy4 z{>GanEmzG?9jv$jA#?6+%dirABh8&8{kbRQ_+nelLypa8L~n9*x4(Zxf&TkbT|{m9 zxhtulkdXCw`NFXZ)!gYyV|l1Z>Q&a};6{iRCYM5&ER2nnz*whETE|o5d>1aSS5s(c zs654qk1-r9Y(QWjmzUPfpe3c!&De*ptXMh3rG0=*81jpKA`u9dCNl*y`HI$ z-ny@2x*{A=;)8r<(sl}C$kW70bIel+q3b_pZ3f>N4ZgK#fnGH5+bw~S zSh8ev=G|l8g@-7fbBbjx_omVE39i^vC-%!8LD41LHzW$t;YBn|Nwcq6_wbkPc3GmI zdJFI!e)D_x0_=@elO_V$Oz9yQL7*a<87vTW~AV1FU`{CXJM!M=##Y ziHR0*V@!E)0XMg%Tl;;ee^_*)peN17jIfh~gM;JamC4C0)lvjVFczMR-$qO*;9HxU zgwf&*_y5fML+pZWjJrMwX4~xyM!%|I{97H8UoO)o_-{*-uL#Wge^ZV07{EmT*0~El z2(9+~*rQuW5e4`jco*7_n7}~~+ef(;wSO>E5De`9TM6QSl_WCh6C+GaB;la1s;8sF z(`EVjiY68PFOBZpD~fy3MV_1VM*Po!vOxmd3IC4qmv=Y(zwv`5%u&C8Wanc;|944S zAKW-wtXWEf0>iylfCe*?P@x&R=aNHx!q4-^HZ&z&#d^Ki9!B2lK^`1XVYQo z6Hy4^=<98n<}gG_mp3}#SNG-2;csFDpsOh9v346j8U3=5FSNvmjqL3-xvDFR(8oPv znEI`ckB1m3>Y_b;AgFCpaj&f;7MV2rQce~LpU?S7eC{!|TSqw;^Z%V~Kl7FqvC-Kc z0WZ4-KKtI&_e~^T{uakV*A#6%LIVA!n9?-qZ|koFbO-K=H>_Gr)jIxY{dyI*RW>(8 zgIK;y`&Q+ooV?Q`dYTyxTu=Z%DkppN_-Z=B7L2_v@o@XoPQ|F;CqS4{D$xT2^Pp{w zg)a{T4<{2iF{{MY=zFW+;9_F!9=bjN<8pKY0Ojk}XFh5yX8QF^1IpJv9`bw%7~L{i z=f}?Vt!UDsX=Vo%J08pXahhMfqfs<;gk)&5IQq#{KM@c3+1G}6Huzt(*@s);C;g`w zyLI2Duj=alxDb6F8Cy(*Gt$4}rhg(83H|aV+=)fSi?F_{lkD>t_(~<52GH14pGgDz z3Cz*@Je7JTdDE6cCyF5+@3Y@cX_5p{srxt!!^cj!+!MhwRYQk zBxHOJPzrgS;ZaWCON-07(TLTAe(iUx_dh4!^d0ZL+C1efzF=VPS5i`Bp(S?4@2uA? z_A}+>HUDN4vV;4X`7ViSom~2Gk>TBXj|nBpYl0!G4^;^Q@W&_Dc({)DMck_y{&TLN ziDu!}tid2(S!a-Z+1=lgBkY)KIIoj!aaRHf=In7k0ZrE;ZDQ|T-4G;jLFJ(BpFZ;m z)O&i`TqL=p10qLZth(KAMg~L-Ex%80gs1>Qu|85U$%?lUB1!`GAHRvR`Kh#ISI075V?}9A(3D=LDal&nBu#v1&hU`q=mKk~k0)i%ffEnx zLf^~otl-wP_#zmlyV(;?SjJeeoO+cPTz=Q&WPB`4BMDhueqG%U)XWS93Uq(jXYD&J zZyoQh4IAa(L(6mQLjPS%50x9>SK9L}ZEk~WEn2I48)v3q+3sPXY@FmcE{2AppB9>JS)JyfBXGv7?b%k zW)i}joQSu`9i=4gPZz8I67_ADZev^oJpRT_0n_z#Uh{RkI*!BLEOZ{>d>8^Iv`7Hscp1qbfG%Eip= z;7RuDXyD&9nv|bjt0rWC`8P{Q2uW0^hxEUCAUwE8Sh4@=Ca2EhOEkv+DR*DL&W(on zKUwcbyGqfF|21)6tovV76?&}@9NW_=c?lTcS(Xg{OM7DEX&=v8L7UIVmjOZP^F1V$9VQ@ zekdvFZUM)*bTgZJ=W@y}Dk{2gFznC0ZXCS zXn1;vw`Y|bA7@{6RvRctG>`tDy8uSn%Q?h8`bvzL8-$(1ziSNaZRvmQq_txsW3uNI z9nAY#L;Sl%e44ibg#TPWkdE(sbE;UU-t#pb^LRm#(3p>wN@1&Y_Ymy6Em-ebJ|1Ow zd*sT9?Ip}$IU0XVCCMd^0?q2OGhKwn^ef$GW{%3_v{C7*K`#9k89(~{a(2*uHuyMW zyGY?pUSV+~n&fYP&!kmvf!j^}_$UR-0X?Ax{Snp6Wr3Y>vyG|cIiUSN1K<`u@QWMv zOuNN71?Ll^&2&}ROrw5KU^emfgS}Me_aF9#XCGW7r_Pi%APNHbIaf#9U)pac9}mbz zeGkVSP&T^L>it(hFI{rd+PzV+-wq2@=-am1Ne>xL_spROk_L(YJ!%X%6PrixG zWbIP*)T5BCB*Ifeq0WkUuvUId5tdX`*f96a%{j|kOm8fU!D5~K^6UOqCFJ*-7B)3G zd6#VW>6TMxyR=}n;Tpc_x)J$^kGOhqINtBdu}7Ek1(hBsnheV6{Z<((sysF0{3 zpZ@VneKq4E6U0B{$;Z&Mv-26j&hlz2cM+`q{A&#gfgu5;s6WEP;i48iw7>7AVg1iQ zk%<96gP|>lfm`nd~3cXe#FS$!6$llV606Y7$4CW1=%gSeCgzT3B9b;{X{m6#eHxrJK~`OaUm zkzbgZ1`imXZtO>N2}u-N>PPYnD$De+1>zGJuQQt``@KOWW1{wM9&Uf4m6u zKCedAUfj!ECw=8Y(GCUg4^A_+_N`W7bmNn zo5ABB=Ef|iJl@BqRcG;;JCx>dv(y=Dg6&Vc+xxnjnF}rB<+6iX1bjyW<5;KrcLyV9OX-N)=d>EFDh%dCXe%{I(*8KPRErKQs&sT;vcsT*#T^(WbW zIkY36Y!rg}BFVaWY=&k>EHOtt@7uh(on9Jv&GJxUmoldjUj#+88bMu66Pv}zLwDL! zxouD7IKz&NAR0cgCJp`)3HSC{bt{7H;<1q^bJz|r)mw&}82d@cRZN@DRqwMtL+MU5 zAFPBk}F*$QdeZn28<=@v1Nh*3){Tn_zyBsnH~E>1z_kei&%Z0BA(U zk&;VAN$&O;K2pQU4M$STW-&Cua_C_G6>@dXuxVS=#G>D_m&gjIP$^o?Y1n!# zk=FSn8^Of9uSF_73p-F$wJ?q7)9LFN&}^)=^oFGgwcd(`b3}gT(0ryW+JOi-?IWdw zHMiYqN6U#LfC`wh^K6*s z%}o_UX5~8wI=W24n^RtOav4|a=-NOtjoc8Zma66oZ*`6tU>zEc!@ctWA>|cW_F4uD z@GpNBR48u^Wma}k*RnQF#!)XQQlxSXk@R!X+Ll~Sc!@7{?faBpq4BJ=LfXzJUkyB~EZQywMmysFCX`8(g% z&OYS5(rDVV(GXpf^SPhu!kK;rs9&32SWH1k5JN}UwtGiZ{qdE(9{32A3#}GeW=7q~ z%){tCQ`l-x4*6rktJ|ozhdP!Nfse48Wto=R1rxy`JlWeZ^K<#ymDe8>)+TZ*Z4M2O9eMIswB-|~^Ab&h<_K=x!|kor zG)2a)$?(fUm?k?>^M-)d; z5P{PMNQt+EmZM+-G+r%s=FP&>(+&v4EUMrDA66f){su3+U!+yQXtUfWmR%?|-cDfB zW*~Yw!LFp2z9rMc=e9U>-U}NJ$df~MG&?=Ip#tpg@m}N@l@cy>=Jp*)m=t+52}V6X zRB;+rD!Z>go9&41e4}x~GnKj~mGNv?rc1uu6X(2`dkGtdz131iyE|`GC)$45q6>n) zmtrX=SRF6XhAmO!$>cnN9P7-}%(7EpTj?0e{SHpGm>RM3Tz#=!kyeBB2uaQ71kvsKuF}@iI18*_9 z(PM65BRrdp&w4vb1pvv)&rv(|9M|!*xUQh`Gu+QFZM2T}j+IVP=j(f0;oUW# zvGVk>INVlH$QXB;%iRlp%`Vl57cx#fyZLUO_4%UcG6v5$&W zEKI8Fj@Q*NPjURCN<0{{IMS3->^9iWXdzT4n7(YKw}<0i1<85k>NS0}>}8*4p~k@j zq#Y}61Jx~u>)!01jtVm9q>ZH+8haZaHDnRbmBOK4pCMZ6ecv8QX`;^~55jWEjca0k zvuWk)exrSbD%bb`RT`6d#z4$OqcHVsf|X>Uv);x1tAwV`QgylSip$HGz&rY}Y~k*h z&dUi6?0980XHvch5J7jY9zB4%Rwz<7cWrrIslT?pu9vtY#kibvQ~yZyf%>95I`i@7 zBWanA<4U0XOU3ifA#CJ3&vZs3h(F0U%x*8sh_!q*5BuT9p4RZJs2jq&O5=1gKeo!? zxZoJp>n#lVEY0ihs<} zsSk7XfG1+zB+suK@k?f51D$-O#&v9p56!Nl`!XDt2P+jMB;4z#leP3;=WILAMiX}k z&m0)JL_+$nV&pJ3iB+!Pf`j9;x(eSh^Q|(*73G_KkK4H^v=etMLC%QXMx+?rHcQU%8~~ z)oRmt#($2(BoEksZu|e1twe!$f%u?WFJJ&}_cj!-4ibT{pxmUyV<4lpa|MuS2O6?# zT`CqF{oKl;ZCfC=;8E-mgrA}92zxtvPpHn0WZ>`K7U&l9sIKww)XKJ;Xv~h9k33`t z5M9VnTG;;?c#Yn=mk)!4Y+jMRKmYG-5CLGooFfuF1&dxOh#+bQMU?r%-7eMN(&D-= zwI42CJGi08p6J}T7g}(0qVNU6iw{I@HtE_PAuW99GZBN#^>6)9ld1@iH+1m}_b&TM z{T)f0RJp`tpHim8HSASQ5|+V#IOU~W7}Cc$aZ+*#<}TXU!@`gdDH_ias}5tPWOr^z z;0-|#*NHQPgLK{3x*tH!t3e zJ(@4EKbTX;Os?&Qxu<^K4?uyx$#U2GKT}1Gbr^JvJl$AU1wgsyU=eNgGOH#fbN4^! zp_wr*eiEWoN5Y%P%;}Ha0V9QntV|SRAz&~5v(cEkUT>%=_;L!c32RRK;~23&2KM2C1WQV>KcWg3 zkH92wAqC-C7IFK@aUzmLjaQOK#q{Z+?6PGqUfNOl$$q` zICyiiFXH2HVw_Yte8h^=_CCT7qmKKR6oBJ#XfxY51Tryl6)x z`*f`b<0$?sm3N5o)54l#H}S0RLu6#+*jQr%n|`*u852%wp`QvaL!FNS!iwKi79pi% zE|N5I%ef z^ZN&(AVh#&c2x;T!+T@5J_{dDB&-9G59MjCe(25;3 zB4uCgcqPcKb}bLY+q!fEjTxo>_+=LVMMrFG5Jtu062qSqyJI_3{7~Z7EX*Pa>AiEt!O_|?2MkAcPwqQKd+Re>{v@+uMs30>Aw5awDzG< z-FNYxREv`yp|q$!FryX-;@$iv^!U7*zN7K#C=S@KLXwkrkzyg3CxIa0+$U!mzj?7- z@8{~#CLp=!Amaq+<{dJEc3WE9$^;xNkPZb*?BlwB7+nawJ?ZePla35vuD1aU2MHJ(gi`T$rB*R;4sne zzAg!T^IImdeQ|fx9pQ57nq88(jNM@(Kw>nrES0>LEj)kUbBjKsOOSYy_95mVViVpw zqk$}FP<4MC&8}+~L)W24ThjyQX8JAgRkq7@`|`7Y>Hg@h=$@01clR11p-1JPXnVl{ zd611{KaX08p?Lxe{kcG$Vp$P|S(A-2rX@bdzlW`|J46f!c_u`Si$9)Bv}i z1@9J2-fsKj8)<17c6JZr`b{Gg$N+G7t$r0ioSUt-XouL!`0~J;8EG-khz}0MJ5naJ`?#K&z-ubla}GpqlrHT z$gxkrnAs3!rSSw1XF`X0vFxlF6Qs8^)ZNjWFn5zkBF8Xpkffcpkz*~7U{cS?7(kfty>DY~A(IJ5BD5kTsQw#UDo5n@ zX!#W`e&)vO76dqHvQ`gX(=cgNp-&~F_W3)A>W{6>uRT*2)P^0S(WtOTtz6sQ1Z6@< zDTe3A`jilD2Y0auV5Rq@BR3Em7^OU`e7_*X-YI|f8&m2Gd8IfyZv z8KZyFt2`9GH{G(_52)ED5qwKt;~;#ZKki+ID2-$(Af8?n8Djbn$kvXUl^e_u8+vGu zo6$Jy%q3YBfAr@;?>+!n&T&lMINk6)(53_f_#a8>+*b?LM!*kecX|O>ASovTNsY$K zyl?r{T@wJJnsyHH=+FSbHokoV>*uYQ%x6E1A-2!u3*IY}c0*RpSXU%Lh`$9YbTYSP z?9nx+H#7}fdfweIW)^VxRURL=E{U44rT(O`EUGj`fF$WjiV!{aE$h}V zs${PzTUsqo;PdDI<+7x`&_(NJMwo*2*)SNGw2MI@`^woNd}X8)M}X74JeQ%vX|~_;Eas zHIO{g@GDGpR5ktjY6_UTn)=lsmW~{Bc^<`xNK6rRjCUy~?BcDoo|ZDJ%sXcd^z*WN z?(I}FnVQ1Z#SgzIVSC>|wwQ+mFVlfZICs z51k1S*j~AttgQJ+oF+lC*!4?Z)G8E^b>pSGgs`W)6cY;CrFJGHh%E&XM-uNe8ZhU36r^S(L-n#ZkF4x+_iKA!HC&WE*q^zx= z2m;6oC=hFn1^7c{knACWN!@WZ7gZgPqhUEU@{pl4N?3|&v92ACbML#oT-uG6wZg)e z?V0!=C<6H7=n(urK5!0oWA)8=*y&!DT}Ui#7lk>|yXBF-S^)nWEEad=ZHUSf9Ke4; zvq5$&bJD)dn3Wjh<&XOjpJBwoiPd3_wQRPz0L^wWnpn5 zm#{El!udEgbgA%*&e-%7Uf8<&%XDKMJuluEn64pS%^^`_sn$l-l>uJ7ghP0aAj@AKt!GkTVZI5@hc z<*(!V`do+t{K#^!!E*p6d=3mcqfS#7Y@a=Z%Af!x zcdwBKJK2+zl%n)I7n zGJGBLDJe=H_aV0Xex1*U8phoHgnID8mMA!Kda)su5DhsL@ff!MY{}r1Omzt!;Jgq4 zCC2p&>1ILB+Dk4&pR|*=Q~>J_;YE#P%va=yKXCgoFPr^8z>uw$D@_tnr@%`(LnO@* z?{pVy!9H1)iSMjko?~(QWg>*Iv*H+52CU^xdD${B;7vROneTkWQ+!UGw*LCxV&%5g04i^WtEaM}h#zGv9zPR4{;{ zEFqgjHw{zJpN;DlnLE11@x3$3^CN-f)PMf0 z2V6vZc4bomnFFXaubviBmEM-u?s&jUT(MszU&bN4vi!?7$YHUFj@Y74d?9|8)M70zO5gngvAQ*}kf0`-xwV-;fhI>650>CzRDzI`I@G6_fnFT)I} zHX9wO`i=8*9Go0|aKUV~K4|=;5+-)5thtOsQ&m{h_<8a_9dHrQBRN9)EjwoB<=uWsx6)J`Y@7231!JYbP8^HL?0-nWPCyXz7o$;eX`~%%it1^ zeDUz|6btDOmASu)gewU>D};WHEA`0b)Z`U|9I>11*6$C)CR2&Db1in)Q@fNBxO?4V z2H;ome5^CCFsu%j?wjw=e%CX)*ZM`b=eyv@*6=~_0EX-f-1avR|E^lT0FlXXlvi=! zFn2_?-ve-b43{}<@L*QPd%jHjN@A7Jn2;(L-lu@Qa;{Ig_bZujDIIy6U@bGRmI`x~;Fh?X1Lm)bcdb=bQ%{ z>?JPhV+*zUxem`(o2YJeq|L$HH{lt>){I-9>D5)sHPpFP2CKHl7Dz}~aPTgLK%(Jp z0b%4waFpJC`j7WSX$AWVadI=aD@IJt*X!@&_Mc6SxSokA2_=-8v4MvUZ{LDl&nu9x zLcAnyzevqK-Z+@<;6yU)J@H}RBw8n$ziQGZRp?N^WWzVnzSm#x67C>hfv`ACTUyMpCQb^1(c$ z=jVSxBSHtvqO2t(MrJ^YqO3F&q)wNS_oX4mWMMfT6e|0*CM}%d0SNvnrxALxJauW- zcH8HE9w5ux3TY+eNgd3T%53s6S~ltufTl{SY-I}SA1`A~rqPQ_e{3s2xpg1^nP@EK z;d+jBzS#w;oGh(CYJP}41ablMJ9ffXmGB{OD32wXx1_T2u$913M${0~aCqK9G+qK( zyr%Q^QbFJet_waWl2Gh2E6|pBkE&uzFcU#$+PI3HqoUW!Cn(3~=-8A(Jph8$iPy_E z(YkSs{h&K+c9VS1^#(*KMLM~K*K%Oc+N^2k083WkG>QbVEA)d}_^w#{h!XaMof$a= z7e7n|AORN_@u2EV2(Cz`BPwkrMKzeU5$P{gSRabYs#~u|a{>qXeb4hUv61h3xSOi4 z+ZhgtP4>$`=Tj6kBZIqMe#X2)wd(zl|NIdz$Mci*qVC5t^UQ$-UmFSEaKyIOQPC$U z&k>>ir~gCPJ4RR51dYNcnqXqvwrx#p+qP|EVr!C2l8J5GwllFa@x=FJp7(w~zPr}F z|Mp&|`&94ha#vNmv^%$~tj^&Hl~>yG)18!IztlwM6m`DZ4N~7o0W(2hqM)qHV6pCg zZQR&a-)JRZSBRs!A^fq`N_hE+sQq@P__5Q<;g{S(_2k z`_7WG0qi=#DT!WF!67+!fs8>i-1^5OPH|zO>z!8_m&jTC6KVkp)QWc&JC*~(KJ{haeI01i$)MxOwI+5vc5p0p+oTfp+qx0b_WDt z`_YxWO}(1K6*1zg(BF8I1hSPb-SV6nswr)8M+f*ws2UlkJXAw0#l|KH!_ffk65P$q zA_^a5j6qL8vS0Isg6g=h6RITJW2=cq<>viu(>S{U^Un1n2W=zoYJr-jxUAp9;war! zDxS2g`q=iaR?Py2v+^m(#6 z1|5LnhWG&h3ghg_uucaRa9}{!R301F4m1!a&ioes(WvlRMt^(Km_6gio_;tqeq3jt z(w<~)^6=(qyNcw%b@;7DsNzl^McBF>*x_aRs$QN4_l#*xH?vGdVIfbR{wzi(5;hye zx+eydv-}YM+4o+!B@sdgBB(5)l=R8&d)F^Mn-oc7u|^kGN?!K6>tBK><{>?5Wpo zN=SK6yCf=2`Xfb;hr35fke7V`OwN7pp`~mB4{}!^lcbPR^SlnOR_%&$>`fct&Tob9 zJ|Q-%kKdnR4QxQD_G^$)kTbL>ZQRVWqNxZ@ML*d-(}j)7T5=1}eG6t3w-p9oxkpb0$MxG<$4) zHj6_JAP6K(+lv7p^AoV_==~bosN;Fw8zeFs+xtzC%DchZdNY#BP*nri+2B7owo1kS zLD9*{AOpKrU)ny!Ry?FS+{N>_al?4Mu=tS;Zo~vgO1qAJgF~>X?j&qeaN~ZnAp?^+ zSoOy{H52dGTYb9;YJR^O;W8~cC;=DOF6H&MSl9mgF5vBsPoVC(9sTjbw|-tuDy?UN zGaewz;3jd7D5ETD*ZZV9QEi);C5&Owv-&p_hZ4>bJ2UxJ;v_mm+dOcl<_=?S>1SvB zMzkdu>);bw*5MjBcyFTZzHDmcEb|!`i*wUatAF+?n-*;x>)hWL_yqH*Z0AGIshC5b z!&&FtqbZ81`!_AR>$PQp`&CG{D}nv<6@_nGn;)yf`f=0Fu%rNglJH@09&(UsB%tB)R!LcAo6FpQy}3=ARRc!(`25FP}Dfn!fd70uhil zUYLVcodniRup9h>|8b-KkiA@_AdMnks0eiarnrcSjVMv)oI{;?h;McyzC|4S;2Lxa zBroVnjQHF>zZn}Fvwh{^=B`uLCutE~tfW#fjcyvb$%71br%1{}2(OO+IXWsP{#sS` zF48%vzIn&t|J&Q*C@FYn{qOb5<=bZL`${0e>w$>3QMvo;Du$h&`T5I~IsspLJjbS= z`%!MVUam_)x}UJF$OvTj@w{JRy`QE)%S3+N?qI>k-qu7?=0~%yl64~svcR1{cMBHK)5v|>m`d}S>aN@Jc^|&G@K2HZaETI_ z&yzPjSV&G{UTM;G#>%dK>^-_{xA_3`clp=?x9PHe?8otcjgn-$*X#@j z`LTTF%ZT`EUB`K%=_)rpUBp!TWMpW!2e-Zx_Q&7{`&5ytHXcKoPUnQecL1WW*0XsMo6>X&jcJ;1@!iuK-w7A`m!fB^h{^L4s!ukW)JjL_=c}k>R;Nzv+ef;WUOIJS6 zR>D*W@us6UEjX3h`!egf`J%&B*)>SFk&Aofh&A;7PH(fQY^d0}dCHN$;;Etnpv-pJ zpMQ!!y=cF7d8+;tOptv)o@pur`s*bRl#qzMqb`JX8DbS79dXW!^9aDaOL>$glYrlm z^sYi?-+p!C-TeGEBKvR|4R4O&P=@Hn1b53R9E+Jh41T^-D`O(j|(F0AlM3;S+~4L z%CXdp?sBHiEY%tp3qKq$%=w?sk6xsuiE+Gl!KOg~Jwq4EE}c&cB9Q_uXKQp_o%7SW z`pX?^OPK{FVAZ}j%N>>+Qq$^PV}{M#>uL9Jxqj|?c0O}K%*x8X0muq2+RiG~BTd7y8hg`6hW~~D>Z;qac~$7! zG=5FYN-r40`zA;3ah%UxZq6A3Ye5}7fWs*GWgnf!dnzV@K;XH+X&$e$=(4YAXI`17zpSeNaVAG)V!2m}zOUyJ0}YtRWSyU# zrnl_vw#==0@l%JTpZ@ZP__DuMCXCS@jkJWaA`Tym}i$Fajq0=S?f9s>=L38(O4jv!l-;FB!%TjDJ*b+QrOkF5k z4-rU1<1b(!GoiGDfKy;TIU#M94z=gv_iEq0?~C?)cdd|*6acr)ZVZ6nMYMi*Ee~VM z(JRdv1R$WaD`Y~pwN5#1f^jjFQv3rUm-o-dTeEf>DkDH}>o5MX_W(Fx#ba|(_q|HM z{}LEyin-AC^*+0Uuro4^Ge4JA%0XcTi%?N-I+Vq!_DwOruM=jWmX>h-@B2u(FhEcT z4A-l&!fgT?3pn4tZIw-4rG1J=K7>wmj%S2BE+nBXpWD>lO-OIMXWfrm{l9b33EKQ^ ztzP9P!<2#u0Ebj-lUuB5Vwe%J8fDmG3+iOGh19X;i|FkZ>$jdl1l~o29Hgt%>5mH( z{jJ(6^RxS`!qXiET)(1VTYmuDG3 zJcs=@y6>%b-h1J?!1cFgfj?1go-e6hl|8Z*Hkq|;sz!jcan((ItE~p9#d4j$V;pBPw=592Jc=#&FxUIycdOf#R2mFPIWGg#Qw`ceO5Iyk_7Y^VCS?ajxvotcyT{CP+{ zynsg?^=cAP6q*;B{&XKNbP&!X@}m^{y;F`#tv71>IKafAX{eb>YW!ir?99lV+i%!j zk;435nUpsx!WyN-cOBJkJa~6#P=G`P*3gj6OQVlpUKN6dT5!o+SwKV(B}{?FD!wIl z*p0vBjhNE&`l$**qH}r0qggX1THS{UxZ86GFn6DjF}44}5rc;QR;x5ICl{?DFHm zt*j@`bY&gg(}}3omwPaBy6^h0S%mFEPR^?ciJ)q^XIS6^kwOyL13q=D5TtOo((^vm z1&r%zSmPw$S0BoOISXNq1FRSZecn^8wwg_*s%B7MWL7CNzMZ*Ga1uN)N$VXMwoTq< zn{r7r)o-)HYd+M#p#y;NalQhiQ#Cb%bL{39Hr+gG(#Z^E%}a9wTAe?1mQtqjp3YoT z&M~o$?^gUDWmO#4w!Ho+od%BZJ2%eyJmB2D+oODjq96gmMI!9*YvB94>C*WLATL2# z6&}3`pyYH@Q{)*L-=ra>A=d#%4EUGvh&uExdPA+TD8YyVfvb^bY025u?lMVWe~~_z zQ*Hov+RCw%Y~4~=pa8}gfWaNxJFZ!-3H_I2p?d-qhkP~@B5Psp(h8drLcozgmeEki ztzub4!W4tXpB$S&nwOO8pYDzwu}9T)7^w8#J?len<1O7NuXsI}(?x`N`+LJ4{IL_@ zsDckcJONdJFR9MW1Y{f<+=sioIKwueH5SqWrU z3&D17j2%L2-Z?S|VXlSJIlr%2)JI88%`>fcktA@|r_(R}mdy3!9Z!lyu(b&>?W#^G z?o3}s-`0#`dT2FaW=9zAF0PC21yy;#q)mOE(l^z{N!!?o464Qh&Ec(y35N0tJ3!`j z{W8@RJRu4Yq;=sC0^c#P+E^fD>HDgGA3v_=00Ax!;O17iE<8V&2(STlf*u(nN*nNj zv3+)8#z+p3j4t<1@F>0F;i&g{{l(`7Zi4W&5^?G63nf0TpE9PWcdxV*wt?cH{;`PehDAg4F;Bjc)XvOrp=Wr?`#`196mDv z>Od=3e` z7NQRfYhv6+<%C2e1AvGk0V{dIQ9&SfaMPhDcAFw6hQ$6*z)SUnsC+ZDyRGeaJ7ukE z7axg%8vf{9Eg}htCzBr;1j+Vs!M}ml+mIxBg+`XJQ|S2R(c26wVi(Ux>D@wGm;gxv z))w^yn@)X<&7s1`+7NR)8nl77-{l5NyZ3Io1Mj@P-%2`7_LQL?SVi1SL(D*IP#88= zCN({k$CU3u*#gnZIX#%u4jb^5xdm(o#=xL;rU@UF0E29#geeVR3#9WI)qPk6fDrY& zC?cs~LNswf4+Jy~{y|Vg&_E6+jZ^?}6tX+0ong){oZyrVAS}kw4(zDM#(+Hn)WDG< zXaEq9VURUKTSIr`b}ayA1C}vqi;@n}1U(^a22#&i-@}~UJ~G`L&P!$>JSL@>j)l$h zP==>BS^b*)=d%0vK>H*;Hc<$zVGzm|_5!?SLNJzwCL`PB#EH5MI>c1#ImoCo)bJK1 zdS>iqz(_3ZF&3lBL`koLka}x=enRCs<0x95GXxG$_$*J^Mjy?`OJCOt<;s)Pk0oFr zDQeW{VHKh8mKE>z8P*b|?X-%v=3t5`fbKjLR^yRaD_%PZ0fqut_G4JSWCK6T7%IjQ zKJ;}g{uAJwDx)5^CDvw)%gf4frvN-Cgz`rCXKD^+Yyr3w02~?| zBve?08HH37}#ggh7G98qilQbXPk(Tlnq^-bw}WN_SA8l;flEa#no&!N$uC}AS?F1y-gEk`I6H{OFov(- zLrA1P{B(@eyHu{_31MhC38F@UBGP$DS!CQ~KvwQkWf^B7OIxKe6{u~XnS#JHl`vTr z3=X7yc>JyALsv1l%m^$ZAV&@fGOjK0O6JoyRn2XGJh3Xn3*^bP0qm)2UG{l<{n-viF}xvBsTAyW=5Di zA*0HZ^dz67ibbH)X~hz&J7s5wP+D*C5MO*a71N!+H%)zmTsa1Sb2|=tJKRHpoBh#& zafTi#A{Xs3Q1xIF32M(QUzb3E*-bn*UuI6*w#w3Iqf>(~S!*n#|=O8b|^PGv$HO1ELV%Bjxj`5Avg_E~vya zMH`^9s^WP4khJ{3=qB6i{+{#F*8|R-o`% zw?`B*L6W};@vnxD$V4bd8?;zW?_Xkb-NGXI4wrl2|NJERLl5!ag#A9^6l~wpaIjBm zf@(#CD%u&3&eSISFH!nXwG{(g%<$if6A@yuWLt7D^yf&4Gpc3~d4f=kpN#(lw|(_H zgb)!>g#s7R5ax^t7c0#8?>{X;KqpXbX$XoCW&UMVr*eak$Aa@Vr%U%kqkN8zC(AFx z0B3td{`ZoqF42?h{tw6qM1d8rK?#M(f&9M}2y4Y0-+Lh?IP0N#9taAu0vk*d{tePA z&cgjcj80=+^>jhA?B=H5&?GD1tk!r&e%QF)kKbrhTo9Ji&Iy5@we%yXf=4#oidC(P|l<{1CLl*k^1h3(p z6q$;y|CaA0gv^JJpmOK4lT8wYj?<{F+jU>Zwt2SvG=1rP607(XK#lF?99&pMr>?hE z-vsv^_r|XEZXzL4R=tAr?oeT8?tXW*+3))BBs=FWq%+*F$saY<{-e8^F;TPTB7!3g z{Q8fyz{_Gn&vRq=`%@9eHk?iunWM>!#Tv+e+m3-*dgw>Aw>C;5@WG|$eKEM!<^LWx z|HO?Whr8PS2-R~y@Ujw>mutCQ=O)eQ{Kr*+BKLzpYW-mOeXA(^!_b~;sqK!1;gW8{ zcl$v)2dB*t5C8(82b@w1{o7h}2_g`l<$f-KoKypY(SCuKYeCtM9pVo%bz99dP}qjE zaQ`xVHMQ5#HWY*_NR9Wdi>x&eRFXn46Xrl`Z-(vWj|+7})D zicOGhtgW-GC>xXr>8tq$0Z3^Z9we8m&L;dY68|7eh@_Yg{2J}8$A_!DNZg9E&E%B0 zPWwlUw9uHbz#?KKEaQ~^0xoF-H`|_h0nvZ6Ms^i*QL4!M)+wxafVR--{616=OjR?PauJ0O#SB>+MoEMfa>@R&8J*-Wtk=LykNSKSSjtD&C+GGasw=$VxSm)A0MjDucbPT_ltyCa^uH(lAFTym6qGHyiE(ra*l7`a$na9$0)`IXEJ?%NJLZJ1u=fs zT&A~OF%0U}t+?2A?>)N{tUW>IJriv_hp(H=Z26q$%Add9RrOY`bBxY-UeIN|bUBUN zB6L(xN!3S7K@ml(e+2gCT5|v@&Mxq|5tBf;o404Zj{|d>eGhhqa+X~|0ip)m{f|DU zrTLF{u$OIT^X<>gdfh^~cSNvG@^RM-CH7u_{IS=oF2gP>wHGv$?il}fE`YjT^rC0q zWu6);3MBuDiuHHY*UtwMPV-FO4s`e|Z@9AiI303*VG&(7mrC)4J5&5yNhN@kG8= z|7yOVbPr^F(c|_Q@01r{yHT?N2eb)1tNXik5)i)6amc}mmGQR;Df77d`9I2*vEPj~ ze{6qy^sCj0X{Llq3hQfXYRVM*?VvGzfH*7J6N(!XF&IdA+61E94^;9Kfy-9}I5=4y z9fgG?JQs+3A+%~YZR_1H87p~P?G%pRaoqf0*LhI%-%iJ&x7i;3-S=4XJZ#GDWJf%c z43vh$GHH3n4Wv6sK1JtOy(2h;_s2MzX@S3YtwZ{N2!y*A0q2kY!4VK!Lb%7PIXETH}90R;wgX4F5^Q_*BL-@c%3i3F&yL z=Hkq+{Y3g+d$t5IhdO}+)9+@}ztIKqZD&U_-bb=H`hIl;0OHPOyA$ty9?0nV zVLL_02i|9mJfFoQfmOKV0BEA06&qi8Wx*H$r2h9bS9tz6WbV&T-AfiO20)nbbZ%`P zuG#ub$k+f6|*KFsQ2fS8C*WL5oB zsrhWp@ea&od)fgx0Lyz?DdzC|<8~ktRq*%K&tgjhkv~s^*q!qJ48!(&K=|<}^M_Jnm;l%H?oJbXAO6HqvIe@QC3ukP9=JL-j zTZ~aCfr7^=l~iw)&tVwj>Lk`0F&6gX%OCrd+S?}Ece#&zg2A#TTch$o+VNEzw-fva z>;lTDf)%4mM!V+r6svuwR#Hv7BJ9Q$hJ^mQiXlJgj4o?185aB+7Fb4N^dT_%FB@W< z_}vIi0l=mcsip_FPoX|@YqK+_KQ*v{LH1A8-hhY=JOKNA($V-N zw|{)}73li{9}t}kf&SL#gD^eqCp~>C`cN^-*fcfoLYHQv3(GI+)=LT&|Llv9Y?MzK z^Z8d8=)lm@=fi#<^SWjNa2_rD%}|_AF-IBI`(Np}*Pq+qfPkrTyKc{DqjZ3XH-Uez z`(xF`so(}dpl_V7y}spc9hxmD0?8M$RB0AcBHN zil~Bs?+P2Xjvt_$+9_WH0HDNBgaBbfyzj#KXL+=dbun$1W*r5E2EQpC9llc=Tyr|p z;+gEkq*?259x5Dr9d8(-IV&;g9VZ`GjCneW!Q8l<4J z3GD&teJj%Qy@)B3^>xzi74U|v!NdgQ{)x?piC-`iotV zzhNhT$5&G=fB41y;Xtoat*)5lgx9Gjw5;2t>jA0is&z0(fZK8_tZR$F zPUUZ;g4YKs$G}{B!(VZ_%f=cwh6w_ld)@>3h7KcH%BxlJ-}_LsjGwz$r}D(p-q0EM zfynRWT>|<>4iNRS-<*3Kafi1GWo!Bc=8f-LA>Va+n|B`O1iDq%s~Y1BP>6`>h!@Mg zWv-4vlk=k*h&CdnuGFlBCp2J!AwKGXIr^ zhKxFtg>r9#%A2VgjQB`{|ETVUstJ)X+OjnMHT%|&3mtW8D6}ul3E=Lg0RLS>s-Xu7 zL<}4pi4$rtAf%@?Y4q!L5#kl1G!3eoz9U>LN=MZ1aGq)jsv)#EyXydqCaQNqQ9aLP zN#!C8q-bxj{i%;}c|EkT0I;C(Sg$QzAN>6y5jEc26qwpBpI@`10UGUoV#L&4Frl*4 z1^&Oc=kTSq(bE1HJ3u>Dj`y{8!z`yKGRLGq@mGy zTEImNQ0;(XQ_t<8=5AJFYKR!_0JZqnJRO%M!SH0Hz!>GUzUJ-8kT+ z0trB*hF23Cp?8O*^!48XfErLuVMJ?s1*?TTj7>+~sH6RQZ&rZcpE`nNusugrU!?kE z+>Az1{k*HHm##8WzZ(Uj8@C5cuxX2(b&q~o4!a1xmp>8yo8NPq@7-L>r~T`V`i)7@ z=p0ifbyFzX#+E9|>J6S)BU7Zy)t!;Ch|z;}4I%RMr;cRS>wYZMgbW5~&GM@#bxGlr z)k8CEI`5ju9`@WUZURP-s2{byJFAw1Kwh$fsWl{~7@3HDwJ#+F%DH74(>czQ1%2|T#KW(`=Z9BkUzQH4sb-U{ zciGK!tYzjruhgeNOf8coSoQf6Slv~u<9j5Vg}}cG38*DHXuH1|_kCmF#kzQ}AP`?L z_RjSvLs;7Mv$r?j{#x4?mXXcrU$on3K2T|x?y(TUQ8 z5=q-Z@)CC%^eW{j;X$mY&-8TrsXGQ(q+{eY70EsAW4%WN_L z&p_mtyuVZLr(%aQVFz$#E4^^eaF7Xd=XG#14S^Es;L)Qz%a>uF zuc5+bU+*Cq9NTpYNJpdQ&VyJn7p+GlL;4dbwPNv0icbzp^_onJ=1+z$lYo&X<4JZ9 z9LY%lpr7?%EnH?^rbJ0rriWfkvP5ap>*2|`WiyesJVU1b>zsi~HQ6`hK^5x7KMB}b zRjOye&Q&Jj^|Cz_dcH3L+g`&LU$Wm%9uqt6w5zRvbBF7` zX6nX&v0n57cHqjGt*8#c@x3p#zo3yp*WBDVbKkSKyKV|EN@zjLs+l6!u<65=ZJHv) z`==#;rm!0D?kE>ND1T%lK^$8pwGr=@!8?wVNqrN=j?^YWd63w^&lmg;P3W;Nl(vGtDE1TIw3gJ+&;-ut!OD6^9`t7MUDOC@qWz80&1IjvQ#r=(oi`i3 zeLshD)HnTZ(FYkp!XEhFgRU-h*B1j@1VCT#U$(kLouz@{jp(7E$*36fy@%BnxT{P6 zAp3JwwnpkI9lJ*A>MAj07qg@Uvh~-j1j>qtiii+G=nzO^l89nL@uEV)wJi$q;EJfC zlBlG2ExZ>eg7lR~b6)g0(?vJpsp53N#G|f}Mh?5{!EitiCH+aKtMoR>HrFq-OZ)%R z;{5>vjR}#=mq;pIK$@-WB&L`iKNu46h8{+&=8@qWI{uy7ZJ?i=6;uAU7Is;zXpTj`9r}F-4 z{pp5FnajKEWPH1Crr(-)tvjw+6JRJc7QRK>(hPBZI%piPJbkfkCbq2-7Gl*V(WPS5M{BzHX2L4R9f4>c)Pz(83;^3tJ2im(|1!;wsX*Lr&5-JBfD2g?PMEE` zAjhA6nlWOl6Gh5=&b4n@dLa|;Wv3grI<_7#dk&ik0O$21X7VGRU9nfM1&=0+X^wJF z3xzmpYa3st%CVgjox_f-Pk^>2HH?#4;L^*!wqeebJp>tJF>QH^;yJzv;SDr>H|4X< z{gJg=ff;ez+K?-MCjPxm9w^0QjH^35-=^BVU@_NjBkD=7$1VeTx;*6Vb%HHSEI0!E z`+klK5#yQij5bCsVz&CS$i~}hPi1!}4F=12ksiNDb7^d6rhvDlNz?L_vi)LsB zxeA@B*%GXoc7v_+=#rhlxE}CAwfzG;JiJv;Z=S~RLC>|@7~+tMlg_;ivB_d3XpstW%-&DfN?I>B91LxSNrq&p|)>RdfD|a z*^5A>^IKZEk7P;?E|dH~MUuJfkU7+W*%tGyl~;q$cOKZS1F^eb9$ zEJ%)N)m^s@{^vK)qDeD@EKB_I@L2G8ivR)SAQ@gbd5#xV#p&(|u-N59X;OTfP%f;0 z%QexKi9K5IsA>ptzJzY=6l$h8j;esFTB(^pamny3_leczPLn=M-Qw0C{HH3kv}s6p zq&|t`P9Jv;#K1JXrAhhuUN-mAbayn}*J*$kt9~jbPg_N7z(;Gkq`WFhl{b-M{~BDk zE26|#b_KYsC>8IvjwijFF#pi{7mtV^$`2(C%B?wBrok>7BQT-73k^+>;M=UgfbU$bxs!CyIWqX2u6VXd>wV!3`MwLQCG& zO_Ns93ED+QEy8rrEf_b&X{$VIGGr8KhRZo=vpsGyTnV!)!{W+YoU=?eK@}591eZ+Z zjUpTmqJ%DpNDN6FAobKhot8*6k^UX6YkQT#07F?PJ!N4=Zv1o3jNO`fe7DIoi@f>b z25#J>X_79Qi$i90KN%JYW+!d2WHHB+yA_*lYn$k_3$6d6FGfXX0Q%1Fdz@0F_)s3^HRZx?4 zLF*@d$VmhKPY_qVOGgOt5!QE#Lpxc6N10B?R_EINEn^jqz3TRLt zsPBhG*c7gxJNanf65$*b2m2Ve-V}#7n;hs1p_54fYL#YU*E9Q-NF5fEZ_?i zjRmU|2HVxXe><1XhRyUGPo&FupQ>gbt3Uv@M25B{E>GOc-)*}@4{^WXme%c!$A!(v zh{=#V8+5QSnG@$WwOmLEibp4Ve0|Wr2np}HOX^Fu|G!!kj)jYUwaHQ-Uq6C{B_O~Z z$VI5-!oi_>KoF6}vp5k9sdmua!8Ka70pRc&ExjPR_`P={G0;<5+0I9+*;z#zIv(Hw z7!<}%_y<$3vY|MGDM(xZhD#97Cw5M4!~5r=l3yDL$;%ArCA*3hYq(Je(8z6Kf6%9k z2EmkX5ma&+J?;yO!l}{Gb1R%^Dw=63-a*(97FWSRK%`yM3rGzyB=quC*5(9K{E-hL zRO3|n$If)`b-62&(qz4UIH1%vRC8-DzJSqHcO7h~lA?+MTU(}_zK5o%^9N{X|BBjN z>L_M8FFVYww}|RyTCN;4;k5M5cCpU<$@~;;zSs^Z*=VvGL^|d5{hg=8RkA_>8n6;l z-3zouyJ(uK`Q90->0eeL$@xMe3MkoY;WSXiBr~g}pOvuh@Qi(jT>zad#1$vWRUH#A z`qe4oL&7f(kRP>*1l{u$Xwh4Icg6{+~ETf57-sBT}pZ8^@}Ppu5$z`tmWC5F?3V5N{P^|>-K@)gGDxJ~!2%QTH;TQqUF2y7U1v2+ z#Ms9oCI{b`Bogb<5k5N-mnLlm3sWE)Um*J-OgI1F9j!Bm(!fhpc9_+*gEtz|L`jX@ zv{%6r1SaSlVI=U!KEiXM?c8aj6CveGaD+vfB})=Ji>30UMn*$v@uW;xy(h1~e3iPj z#iAET!1R%EOyeAE_IJ;+G+WIK8MDKC?B3>sA=lvPkB-|z+d;A%zM&o#?7!?=4)d`% zM$`PfyZX&QZ?;o+^YnJRVujI;@M!T3ku!B&79={bpe6*Pp4NT0s6GY7z@s)Fwfq5E zIr}PDNNE1lN3v&Ek{|~Ye#0_^wm}ku1;xlPGU*0!OtJb8B~(Pww|o~nYE}abV(MYx zACUV8M-Tv1m=}?q!6AEEJJmJZ+VpoaMN|{XLnm2hU8)0NDnxX9G*wm@r|b=zF&Z^$ zUT>x%v_4jJFbD>WW?Ve!Y7wy{b5*0gBDS&{k|V;PWK&`_Ue`rEic*^wkzeo3DzsdH z00li2&x!mFz*x9fnR)55f@X*IJF*hiCYq+OP;%fR{{d=BsqILfPS~1}O0{xA5!X_c z_U)v}DYMM|0u&(XTN5gVF&cwjr+zc{TA+d(az zW~!#~Iy~nQ@KCnCk5+!g80e85`VGv4+dcKHS5uVhV*_u;?zb=kRWsqru&{W@4mAOV z?>yVsw{G8O%h`QlNag9<9Ywkoo#1CP`c6LWp3VXcd`l*3+0#atfKK5;0vw+9N5&L> z@-+B3vpRC)2_(s7%Q>oD(W%)dx(lf#8~mEdBF0o&s{>kl=Ti>?CG+`;<1Pf4=4cUGz)XH>_NWY*@m(-kWR^oxB>e~di)f%J6y!pN^C{z zx>RfYstXg97(dxGwNltiXocN;ak9HP*C(1xDw%p9ewkDoM!FNUKkfHto@(uo!N9|4 zfSbukD33m}rdU~Po@~=0h2t1SJ|?|Hd?Ja7hyONw5BDUK%gMxNf24&FIsvWsxNPQ` zrhVR6%ZM|DGl$uU<<*OEendXzfMUohRa&na7q%T#hz#?FP9LlFj+qP51wur;*r78g zL+bP-`NV~J%Jitz4R~ZX==WX;=?9P4>UY%C1t!NLxSO&G=I(QyyVde#c4Wp5@DT<) zq**__x+8d2tmr5*Ww&L-ns1O9H<{1PNJSiKB^s6G&AshqjZrI02ml2G#)7OqPXuZ* zOfrjSXMXpca&W5mSg@Y5ZgcB8BQl}FoV4~ech3>z!Zhb}J>3&KpsWX^(P^`Dms|%N zrDz8N&89UuOW8D1>0?+*N27r;6Z02Y_bJ<`d^vv;RmwVI3oJJy&KyiR%cDovu7S~B zC&qo4uoS_WRw7EN>$3LbTsQF|3G9qJ6_V{nD{ag2rLO?u*<)mTno9eGh4Hh;xY$L% zuMh#i6sGfz&K!0(wz|rLY;X}GRaMP;1n)wq0-%&*IZ**dZ6tnMuwuo1I?jbHIt7%6 zb(QlP^G4mG0E4K@c@#50A+Lti2D9eDK|=eU-^z!-mB|6%ye-AjTAO4a&*TVuJFx6N zMgtR*rVR}lMMhTC#kn57D>zQy)IyhRFH-rGc3r6b$aA>8^)^v*@cS8gRaz0nB;mud zzZjDN%`c52@E|G10WSAv#dM(cC&gW;gjwmD`)PuXD2YOQ5Sci=st-@<<5lC^c$BM; zqkX~Q=Q6^&{j(fnL!Bpk5WXEbiceKXyyEKkQq=9J2!zIz{T-nseKoxcYojS{Y3WSHvO6CMcsS%^u#Xs=`%4ylkJ46 zajiIbYiO)f*`7LHtQ_Hq)6s)rGm3tMlu6Q@lKPya;e zFZD6gBR^FQ9((-Xxd6s(xj4}WLq=@lZ?o*~4U_1mWT^0!7mC7$WT;;Pgq0nsOG|R{ zsHAm&d_Da<8*&oL2j7s{lVs0jg~w+H{Gg$(I!SD@bMUQL_BY$$#;OP zQlxHKdt!i!B!zWOk}rc*Y4nTUjm$`>q|>6$-)Yt#iuWn_N%%*~e2kuCLg=Jo4Q1Yp z?1$1Ht?*Xd<3~9UUf7C>qMGqKyRhxqRsjnqQtXzSRN_0ku1lF6TQbJIYP^N#2|9ku z=P+wfw?0@2#G^r^%#Ov_Y=;BP2Mpi9^1eMt(9^FsalTn4Z9&&_&dkRqI$c6k-=gR4 zLZ^e*%%o8(f$GtU?*gNB?}J2m0JX;G@-*qIZkdxPsgLXLPlre(jmD^dkRP^Zsi zmD24z*R^*Fi88`HdePk7gM-)pH01WBeg&(^C5N$kgr-PgIL@Q*=Zc_uF>-P1 zCBql`_=66DdfmA0mr%L*^;?`}1Ohw^B6<7FURC~gHXpwzzs{=WKSUQ``2Q(bZ~*dS z5~f_n(_w6LUwUrDS)_-oIn*kFLuj89|Eeho6LW7bJ&h@uspTuF>dpH+Ue zKeQ#`sIot8MCo)q8wwt(3^(MmeNhFkA$3#?vpJikaWVTb_h|*|{LZ&}DAs%7L6>$k zaG_LD@{yufF3YySQJ`O7vN)QAF2mL>3Y`3-k!C7T{K zLwl{9C(L(`F_6T`Y?*hQHn)MhXp$O!x-8?~2PeJelE*f24Xfy^1C3&{>{d(0j|M$; zp-XJ);zfROYyx@8bbaUtc*n0&uJ=>zC-vLA#Fvzkw z_b@v500s1ogC(TotERbQYqB_UqY=m*C`BI_u_;fcTNRs0z|3Aa7Stfayr@@3egXPu zwA4|#GT0|S=M8LrDtl2|npyC|IORv@Y5l>Hhz<%2$Xra{B%VisD%iR8V4-7&HPVox z&@K_D7ZXyPI6`lWbbPM?G|U0Rl9(0Hj>*V><{b7LT=yH$?tXhSxNTnZ4 z5kGRI?b%9D0PMV!Ms(aF5cWZJYl1iM96owAv zyTHrOmCjDK`I+=&GDiz1o*;)+N@}0~_0D zPw@>MYXzgZzK>E0+io))Hc3CkjI~Kq7hKwK9Jt$vA)P>&i-&19IR>= zz&{nR>~d5QVBznc@5j`1-ar03GV3Tb05p(XOlywbHZ!2+hc|9N%P> zoA&RidgGmIb68}uXiz~!Npj|tp|Oi&X@$FDdHme9(~$2G`0LLOgqwf--_FAeS_eOq zrh<|GAfwCI=XK`3te&-wGbDc8zVOyT$tC^Crq}0#oH#`JU)+6lP@GNo<=_z9HMj?t z;1V>rySuwP1cFO&cXxLU5Fog_yZhj>!~6Zd)!NMwEremuJpQAs^$GyMXc5r;fv%PKZpj)FhitdXH%@;su9i3DhF&n5L?;~+o9`(ov zYWfye076aFBOrOHQYByY3FJ%FuLGTDv)#?E<2oqfTGK100c(z~CHFcKJTHW39Xm;U zcT9|7ejCXjSP6t@leA?hL@pK!L5>=>rKw|?ZR|RmwwkAqEk^4reK<;3%fU^-h9AE* z)|OL6oY{A6U(2l-|91C8o~?KH*s1fyxLWQG3iNjCNvFbIpO!c$4T^bZXD!D}E@898 zPc8qLG728&VGH3lUH?0k@(*e=MX3eRo2+qvaMp3!cMy`nNO$Kw5RRDp9w#D^pjjqX z36z2aL`v1IZbFXYP^jc6S#N2QSyWYAF+2Abec2Lg#PA?D5elvf#*?+hR z3A5hBf}xA4>GNk;n>L&CgCOj&R<5e_OwVSN&7VQ_f?;IqT|b&s%UwSiXK%DuNa>y= z`}qDni0|w&40IS6H65$4wD?15EYnyA&#XqI5&qTt&dRT)t^2)*$MY%6&+-BV@V^SN zQIY11w^|>PuOr`cTuQb}69LvN?Yx?QbOrIrZqCle z2Q~>y_Z9aOGg)RLJ7A2yx%Qli*2|$&hzp)OV9g)u5RNprt7$I!rS*!xPqOX%5KLP_ z=zIUW`|d{YLF4y=Se(Iy7@TS%W>uF^HSO)E%jZtnczZ>iNy6LXmZE5@;-~R82+0G{ zk!8kD)r+&^ToVKJmMqy(W^*F87EzQ__DMyW%L!Bpr3BGFbrTmB7qC^jC%28iI{0~# zS?kM6OB=U1%Tq0jEH-37=sA^`o&0*}6Vd$HjLo0}Wbl1wj$t<_#Yl*Kxd!TMB-UO1 zE`49O)TkC$2G2&p-&{)Sbeld|uqP7^gOQ79Tn1@p)wEmb^q@O}ELrw+{aL4h%{?mKP@L!58+b6=7Dq+x`=S8>nD)*vzmzc4Wt2q2!p(_f@%hvHP5%D z$y5`TK7CN9<8j3dJV@AhV%svZQX7rI*+BEW&s|KTQzbI@jBiY>tuDve9 zBXqJmnAzAYJ3T}M`0H8*EQcs>mW+K*-M6ZEHm^ljyF%UeOh!6T8ClO1@y01{#G-#F zLo7aaWKJCTXDexy694@{mrPohLp^R&`=E692nFhthCIf%IMF{Up31*WR@hPuSVpKR z_YFB2L@lWdl7BSjCX=!^V=bh+PN89_)o0;hVrwQ%(k9pDQ|D}>CA9i%(oM(H42S2m zr0awSe@p&B4I6Pf5qtoF*{r`bNekl1{#f92&TjKq9sZG{scAd!Z>nz_Ak?``-~G|w4ZT`uLAG?LIsA6@P9Ex~o!Pc2aSQ1y^X8C*Bt@lr zvK$sEW;vMyaN3mikKlwP;PkJr$q^7(;R) z7i}x2bg{Lf)@f5u{44o4a-qPGKCr+fJWmdi14=Z+yDNl78f5lg8B$_&!~hz4Bgd!RB*z z9{GyWuA;e)eLw zt{eA^vg~R#TJeL8#K;_lW{cS-8Zfg32T-HA>GkH+;t7;6 zNX+ZpKDgVciaeK?X)0e>snUJl@f&k8mK7q!;DV>(jwpT&bHKM_!(%^IMZ~ffBg2;p z3qb*_LN-h-yBV9#%WQ;q+RZuE6hV=WXN;{M|DZlei-Ns2#0phcj@)h2yNb zqaGfb53^q5JC5AKd?yMC5F-njIDm)z0k&4G07BIIC|>E}U)!gmh&X@; zdKwk}$ZBU3(`}us%00saWm?#Pqq5%zbqii}+^%Y4@s!uOih3&-cQ-&g+A_BC9o7xR|OIf^E1Kp(*zn9dyuBqav9qS!t+z zx+_1Ur^nN2*A;isEL&(kK``1zOt2-LN~y^FDzuFUDt+<(=CyFQV%uKn!seGs95neo z*r~@{|3xqU?LAEsUWy%TCL<{bcBKlR7US%|YH<7+`|PMO*edF@QOwD;cgrz|)GRin zyuO?@m3ous>QR=r%b&`A8v@2zx!Pvos8n~Qij`t;KgqQ-X?Zz>Y(-d()t>1N+`;2?%SZ2*DY&1n1&a!#J4qlK-adWAXI$9AnSLy^$=O)|wcGoX}h z=a~D~t=?2pdD8CQRD>I00^=qGJGQ!5DEu*(?%-MM_+KZ8lrpPyiu0jCO^CZJyJlwL zHJowpq?ERTunH_98>pZKaUM5yVsrBF$_2{JYfjg7{T(?062Vd}e^9t+%Cg z7|TQ$BQb9l4HwpBMv!cg$ddCt)J7PL*rei&54NY0V?{pnNJpJ)@!ZKWu+25INrxkn z<0_w7ifL~>2>W26+V4U>-~LIs_U4aBzjnj?QBCzKm`<5{q;zM@(9Rzqe6H?T(#3{v zS?xh4Qh63xoz_Xt^zhK^bEs)lu+^*)=V#4y15d2R6A0s=L84uuK z6W)4t=z7GPsAd8B+noGcS8&-E{n(sW?m@*1$1+Mo`_G9oxK5xk0~Q@TLXLaqVIJOU zN25lHkuAPKFyTdsZQA#kLu!>DrE26qJ}&2S()bK+;Xr9;P!h2P>Avi@EPyI&k39(k%33Y`wK&E zl5e7b4W`;tetK-e>ScGaN3#d`TSh5cb;u$rn-ys0XzsWuO zUL^b9JqLh^hEXuW!5$7qd=G+6rzmZPkO^jcuAYM*7Q#wqSuUyYV*XD8?klc(_!aQ> z5i@MM=a=~-M(aMOrE=_JX+T^+{r(EA8`ETC_+aSlNod2~%ElL%OZVHokWR}xZGxh{ zv!8@y;5Gkw2=jOF)`$tT;8}gFwTZPNR9XKueh6t*cp6Q}b^@D@k@MDzaU4z+^i>E8 zX<|HZoI!SH(3+VRMN0{|T6M-s@a&7474ehY<_mZ(&5IGZ2%5s}NljL5XOkn%!46zv z{4r#kPVT%Ymz>^&myZPe$$bX_f<2g*0TWRC*&_#m{)1aLT;7DR4+Q)%eFqMZMdL+S zGvG0OJ7C!p%y?X$oeE}v%=Houe^q2p2y>uVCz!J*Q>En$ej{}By8bL~MdmjXwh#t-)Y=K1lVP-U4zu@Yx?)y zD=skza~HAB-z9UnE}oM|GVhR?<2iHb^(vKw&fNK`cIL%JP?Mlx;scFlrpK53@h3Tm zLuWBEgn5y!bl74*@DI6Zt*f}3gKYIPm%DO^-Rjz?6V_AEn(RqnP~jD8%Obl@$*wV` z1cUZ{xlP~VpI&!FnbKI%6i-zBxQXYoP-4YNwZEe28W{#}Of+TCW)I6CZ}-JeIzq0^ z$4`tIl7jEeYk&%y;1u?YAwmPQJXddSED?5y5t|(A4VP@c?QrVGRukg0g$3y*?BG>( zqeBK1w-Op0Kx`8y3Q*L2iT_)lZG#P7j0x|z{!@GR;{YHUY;l7t$Sl zGQtMWd|k6yT*2~stO;E98d7K>*tI%O9w<_b$uE_FfiIc?H8ZpUf}6X$Y>~K3$s;By z%nu9FU?i)x?)WF>VIu&09pr@-z_RDLc~&YG3Sl-(gO1tA=jR@S`>Lo62KY60B=Uo+ z)w`2^efwKMQfzlfcYtgl+nxUQ&7)B6q=WXWJ`;|nmKKa_Hprt4t!l@rAC&uS;#p-FkNFRuk1;@O}i$fqcihb{7y|8S$eg$LPCLz{r9oWqO`LmDJ4- z9YUS~9|3_uD0qq=H$HY;t$$a9JDl%z0#ycE@3m?}c3*9A@ElP(+rKV$9bwYG2ZJwK zeLfY~={Jk;n};nHzYv|5AG{1hiu6w+GE`|whaJkd!t+1RxkSH5;#;vUp_pDW|# z@Omwnjmzo|!~_+tbYc~{B}iUdMq-YBk(z&QF^$SRnaeeC_6Y;>R~bA#p`mPFo>>Ux z%1epM{iOwUQAJD+`t-EIu3&rRt@_$5>Mkbx%a1K3Uj^CJV##r{fzCIX?4HE5V z5H>%c9$4w#RE(G;`XQVvAcpFOs4h_>p(r4#%qhRTgt)dKKL>k8mcNwQR8H(5#}rA; zpR_=}vm1?SS1W;HSF6U5h*%d!_^TwJk_}pQL4(5vZU2KqCd2EoZS}WOrTJv*U>iSQ zSd!lHj97j#bxF~cV@Pox-~QTQ)|2u~g+?|QlNGKO<1N+>K`AKrph{uL6b~d!M52< z<$pMR@$NN$BJn<+BUa+S7h$&8L&>gAo>kqHd}9h*xjkwegMn_^Ea-DsA&Jy@#PH)i zMDFzU(JVt*UudW@U2WF=OM7HwP_*vQjC3DsyJfZ7S&R3OPJMh(K3K$@8_&57IA^re zTk>vcW)#gP;33(f>B`yi^y~#%tmv$9uwS^iEwu!7HHq=qJwt9#qRcey#Ag(91f(>X zs&dln^g=OVyb6;&gsgeIMPmaK^zmGL(sW-7O70b6rl0-!_A9+pPB)ZnAEhXy!qQO$ z4=HmhUaxBicgK!s*rd>TJ{|nz9yGCI)2qwcX2B<07@22js!~f%Qt;>ml@t&6I_qg1 z%f>s`U!GBGE(0GjHq`lJvFr!TlxgF^x}-w*(~<#qp3wyT%8t!qmv-0l%)N0>HCi=i zj4b;Eug|gCf2Sm4tm?0?>fiN(VP_h;b)v>*mVUce?5T(T{ZbJ8e%2Cb1y`B;xnhq? zt>NW{NHfsmxBG@A7#nW2Le>R>ZOBbU(VhUV-_msPEwK~`4srfJDC&Spo8KhdwOEBeOXt|bk(yP@y}CIz zAplUs01N=C^;*)f+O2*DH4VT5ES;MI3`LOLuHRW=u@aD4mu#th{2D_>*{+x+V zJcMq&&sAIJPoBepyiu-u4@C=FRh15zO~qV0L;BJXPJtX6!7EM_`9FC5`~!f;VC zvGPyzUq%(o)cgn0k_(C-CT&YR+xM2!kPenS!cL^%R`r^6#}0vrIRDtbOAurxVH<{m z-H4p)dgnOq@-)<*X$0!s@*{p0%fDuT{jh>0Y%DL;cgYev@lD(}-=2Xd`J}H)L9BFv%_N*%= zmIlB{Y2G?Lt+(OZ4F?tSUOOF|t=*n_&;wB3Q$(%DlRp6f7oiE%df@#^lJ!~ipH>LK z8ZDb{BW@M%Q?PqiHG0|hid8)+m!7OkPDvoAR#Q>{;CT{=T=>zR>zho$$fz*lq8@RS zpZY%^BOPQ^;5QWq+d9{Js}+X27GxCwh{M$JFW>=l?O!n>`|G)>gJy`rGZ`sK_qVf< zyDLE_{>Hrx|9Fc**gL%6Io8@dwu{YR5#Ff2xnSjL`ahwsAi*zqxBRM9V70yU5qr8H z^TA!Vna`IpMMDY0zxQ)rR8IC84xA7mf$&e`BTDje*5FYV_A5_wwCjzyIa%m-E19bM zqaKRKsvk7jvjb`8h-ued*X-GXZ2arPu&z7X*st>nOyXSBz|zogKej*Tr7+2rdfBKR zO8@{4+*ORqyT*iJ$8VY)fBk?sg24I18pGK?McO0;}stiR;>Ki42 zAs7XbVTf=^(ofck5QT^`bWE(2wc^umf}>>;#??AR`rg>04_=aMR`qolIzrUcV}Gfk zw$pEaP&p+q1zbrm&b~%J_(ca%@upOYo_K!%z0noBynn{^t7h-~fRqON&PYPqc(8S?oyl=>MXUFFv?GeNoI1NGEH>!>H#w(+l|;TU_k8dDw|4d1 z)t}pP?@JhcomQ>dD&H6XdBpPF_QK6_DRYJd0#RO-LNHn0_rdc~elwr6TfnJo4_y~L zW{Fe!ZJutI_aUCvZB~#BMu&?ZcE0%?0#9ZXe_nPKxv=ka0VR|2CGXGY$BB-xx(3$p z%LBT@@_f5It?|&g!^n^}->&aK$4AQ`Wuh~GU1I1>+Vk;kJvE$Fou{YWx3ybq3+ZX) zgL0DmutN{EShXkayv(AO}~ZuuyEFcQSrE3?GshNm0qfd<5%w@aE)O!3#FgnJu^xOb49oUnraF4&6*A7I{mYj zWkVaU8jZJqF)V%x;n7jf1r#_ZH|mGDoJDHHdt)Bqe^UP&f6{APPB|$bm#kNYluBPd zRy(=^?(ZC{tndpNV|s%Q3T5zxPx8su7wTqM>mshG^P=X?Csd?A1v!QO5KSmRl9z+h zRe_l62?;(RXGm(I1dCo1yH*}gA5k2useh}ELfT8&gWRl|R61?Cfuq>bRx%tzwrOzL zX1k`(P;1C`4neDdBtI_vFZYSYbu@8eV%A%YmAKN&C;hnuRHSjiA{^rgpK>J*j2c1m z-)~9iDm;CwLn03=3`u4wXQS?E+z-_{2e!*G@VS;)fHZ0Y=%aYYg6kAFBClg5r)ZbA zU*3O}Vq|R-+?fU^E}H4n+dZe!`0!9<^_@kcRDh-SH6Kb~kuQ2Y)k)Qa_E5wPk zn9?)U?FiHU!%5leti$6hY&EWTuAX6LEfNJXrzPDj3rK8cGHI8l`p;j=EqcLNQqAeh zIc1)!ZHpOBvNz>8bKI1P8%~;T(|C$2zg4G;?aNp#y>*=f@2Lb8MtGv9^#J3LRtI5e zN-|aT;M9Q7_^pRv{}6caDbcUK_l-$_?P#WUYxp2 zhpLwA3C0#V-!J~z&I{V^U92qvZlNWeFH}1ZcH%-$_z$|XdX&z#v+s`Qn=sfsnJ({( zf-1+sihg=DpuANoC48@i1#NidQoz_XFBO1b2U%RNh(M-1Iye9j1dv~z1qYmA!QAI$6f2 zQd|WFkkHfs5af|M6RKUoMBgt7(24{zo8Bs1mKvIBv*<3dyuI#Tw&GlKeTsy#{@v2U!8lr0LN3Q2dBQMk?9EY()acJbGGpm_( z4yqYU6t5Y~psE()m$*3ZFdlla97|CJRg0cB%(w7Jh(c6}>dqWftC#}6)a)uqeNv$U zREhFl5owcoh0xBKHh7iTUbp;F8Tctu_s2+uxhf7zy3&A98p#DcMU3&ayjjqvzmRu zBcCKx_-JZUMa2R-vY&a8FSj@QuLTbfrRy#QgRE>t59FPgj9h+iVRO1>-Q0q2cq=XZ z!Y*>7!mNBLZGBqlzx!-KH3rgR{B-M|>=-wS=lzE&-<$vg2f0C*H@L`RZ=xG$-%`Bw zdINO@aTH!T2G3Z3VmDlkx6#9A3l1vG%SMCAq6ra^U)m;tD;^nAm?0rOpr~S`u=iW? zWX~Uo!s`5*W#Ie%3d&E?yymL)WyICSE%bzUy;TaW!p^Gmd~+dq{rD}GW;;7Uq)t&v zTnDj)#KbQtl#}xqOb(oZp}}}zeRy4Wz6&`B<(U2h(;CV7UyiAtJ^SUKms1U}>uA0I z`Cg?YK#Y5-e@F>|HKBt?G`jEeYhmyHlQFBY85|>w5C_}`a9boJmGk3V&t$*;b`LyB ztJkp$!ucj<{ZTF62xlE85(Up|_BOuw(ykW-o7)1}xBdakla){hAKr&qbXZ7^a}e5Y z#;H_YS)8Y%{-6;c_+mupSlX2X4Y&l&TvSY! z_qUc}2z;cY9S*AX9Udpj>L%?y+hOrFZ$(pv+N(Bx|rkRibL9*KAeQ&ni-4S5 z8K**EOltmbj=kRp**@;)d;wKJie!Shp!NU_#wtC+Lslc`>aOj&{eiX0RZ~8^X7eIH z^+iASBISEx^8u&8~y&lsB9C$1Yn2K$8ZdW|`&r&V|+3-m_amAPDi6(FnCH zCa<P=cxP4W(RYy2hg62QZ#5SWxcrHXRf9bniU0B| z)#F(5JvhtVZI;Xp)&Ayz-p&b3r-pj~2Eedu&F_f|?PZ=TCvwY+ygi^jyyQK*^LURX z*qK&2^xU%Bocp=W!O?!!8#yPrYor|KF!wnf45kYoh^^)y?bP!*Vyb>vQq;vpx6t;S ztmkVDSeGIvT;%&Od0hP#E9Q`5()V;)3_wrAQk;H%&eB; zedF{>zWdI%Mm_M2DQC8w$N1hgi)n-p`6jG($QrvnPmc?RXB)?yX= zS|%GMl9r!TA++U?d0dw4@@0zkJwyGcwWFCRnbeebKK`-v2>0ik>`FgP>q)S9;y_Ge2v`U1OaK5i_{^DY&@~!I&j4VSyPfmY20&ovbM{ z)hI5R!1-5@mN5SiotK1|isYk8>LJgGhT>20xHj$qIgx&84N7#3TQgt=HndTW;k;+= z2$lG|5t0f`%vlARxEs;it-XfI^1u#C8>Q@>OObb|0F5~D))`@1-shzn!g#_9gg`_xiI8VkVD27>8Oy^DYN!OM6KXS1O<@n`)O>W`802447T5N#B?N zYIrBU`U~IR^KY(W=ikR5D(~%KCa{4Dwf54^X1#%yc|+mT(D{U`-2=7W-sji`ON&cm z;{FCFa6tdydpFdVH(A=p@Sq9^R%C zHH$-_F9S60vj&m1c;5ZI*WBBy-R=w%6@5?j6J`4W<%Se2b^!gCBnnS#GeWiBw};>D zu+-VVlN*juHdJxC>D|c|(j6k@4ELT+%+#Hb+hR&y5Hnw82@jW9$D<}S*7@6~vas4{ zX)-|mT!)5KR)K++OB_zT4s0#9iAP=ES|2u&oJ=z5%1mnABMytawXG^M++KG#OI@tf zav};C^6WtpMwy6~^>x8DT9))+a?)A`HRv3m!#UHg;p$9UdaZumHEQ_j+NU-3PyYly zcP6g*=To7q?>J5F2Ti@(7pLQK$wZ}P@!u1cTK~}IRHU(tZXy`_ff8HDd`?*B3!XqJ zGX+y;|NOao2(j(UfiDLC+eWp(k;8rl#o#oXu4%i@$(AVNthwM}z4_w^qnI?5ei4K2 zoAdPuvbn6z@%!W^Dhlv4jVd+V2`|@QcM4#h)(F1!3jq;87(kL1$BK8B8na;L+i}LE zfx+w&8giF%>`KB&#Zk=6bX)~S)Yk3P^T+RfKay3jm}|3X{zS)fUbQd0fLw1|Rtiv} zsgR(8S7ZCLn{-FeLS)*Cv1DnP>_~6F_zPG{C<^FG8562fstbnUMZ>v}O2OsYp%-~9 zR2dr(0lsCWu5Mp&@1+1kZdg!(8@+>VVCJX7f^KPH-a}%%YoXaJnE&q8ucD6kTmUhW zY&aZokD!UFZ)HN&cgZ-wjC>pvv4-){%$9$95HXxo_%Cn+k;X^yp7`*-xGV&t)p9o< z7W#6fHvqJFPu zyN#Idllg-bJ)V+fS1hA?=Mw`lyBB{)*cjhF6NGE>g-jsg;|EzY^mIVVo$OC-aov! zh>BbHE3lDJg%2PNSl*nwX}{Le-6R?AE+o|D`uy) z+UC(gu3)}RiJm_{lTMelxL)GrU~}Jaew7XN!pY___{HYiK)a-`b({R12xDSk=og~>denaakkn0n`txm#jIQaEDD$zH$0BG56-%zX7e z=&UXx?X#LIpTD@gVFM8u^e)d#SqFOZW56dI>XkY4Ga|us~{V(!hF9-vX zvfiklal2}r8eB}ICU;4+0$QBM@`EEf7eAPw@;-SoD0EWD@bhq`78Pbc zdJm#q8fZ(<(M~*9h&V80G118LY&$Ymbo9UD4KvNR#{Gy(*%aHT7`U_~JxmRBUV3vq zm^sd&$A!TUlT_)5ftxalHtBgp&YK zcmRq}Cp+2)Upt&7O*~e6X~kvJks=5Niuw``!T$4$?-l^?RdGgnXtq z?*FBSkg&DsfAB%jZ*1v#&CJYB=sSw2wg#f`ix-Y6==JZ(37oY70K%As##&Z~hqT>E zff>DtcT3+gVVpFaQURy85WQUj0 z&OymF&>>xQJbulCs`@-q5-k%A-`Q-k(@mRC_(HZ0$YpZxdCBk(G}k+IyZ-^@r<(aR zcL5zTHNix0JVE}xr=5D-ltbb0&DZT^PqsUUN5`)_r;Dd_VPzf{Ac)pQN#m$H$-B^| zg#GL=3JVN=n48>=iZZ>tyxgcmjbYhd9_&9UgZdcE#~FWrVt4)<6>L12@ijVI?-1)N zFnL@xqtj;2JRP)?(GbA zb6MrtUQwHMz3$^K^JB!jPYG_%PVW7@+{XLu)w6*1(0mCl#{)mv>+?Q-lYZ>45RVuPa@xueIw35C% za`;yLIQkpAeoRZ@FG334dL$J-k9lw&AA!|{;Tw`c3tumg?DfvRw4xlT%O8?p;zlRN zc>iWqM<+<9WNQ18miPiJoLt$3R5pQuADR8;CRcr;9obcb((voi71y8h$u4&N0$Y`t zvGWQ^;ovQz@q+AEisZ!e-;Sl~_Mpo~NnTJweQzYLJ4UR1|7-pIudU`=Qd{sHuN7O; z$WhKM)^PRpM_WxOwrZvH`NEmflyWN%>?h6p6jRCqiYgRWfv1_rEhhS9j2kO9GWjXx zJrJq^;=;eRM+mJ!v!marY~^R?J55gXmxOiulO7bAzh;4PZ$jO|_>Pfo>Olz@nJ;TT z3eqXXvz=eR0LcOV=C+EwPQ*ZJNP*V=nV-^@oN-Sk>`(y8?Z$2|T3Rh$s?zR7Flqf$ z#T>TM-j2dq%K}GGrVB7sV;`_l<3e@RokUfV#)3vd;JC}Nl*$Bue3(zT7q-N8 zFQH5@HT&rse<-OUp}aDhf_^+6wT-!9Kq_+>UtAZ2p{fk2*Vz+br=7ZYL+MyIJ+Tm^ zdwjEy6{A7fD8S}T%@2Z$lyfw|&07R{HfOP9XS4@L<}zpGLEYmh3|G7aQiLCCV$0oU zvrNm?8Lp&q9k-5ugOLbg-(}QMtpXO(iUL*irW`bl?7Xnt1HTA`m6s9V4H6x7{G&`@GyrvucmhcuaHV2(<8n0~tD!alV%uZFqcYV@(< zTCG2(slKRVK-!xeP4Hr)YC8KdsOpKrWYS|qas0Lp({ZO0L)7H@_Wsaz701B$ns-ow zL`X-c1hI&j599RrdZM1gmPXm*Clx&7}QeE1ziJO5{-7Ax2s?#%SNJFRQ3DHbc3X_9{vncWo!XUoU$ z(!f2@*w}6EM7M&Lss~5A<~%3+E=u`B(BHHM$0gwKQcOAdH3*!@litHt;sM40j?5Z4#Ak-jMe7Duu_;)No zaWw`2=RHwkUGzv?g*ZZo)FvQ_^70l#>uauyD3_O;8I8Aa5xC0k#T%WATV-*H5D6j{ z`h}pPW%;nJ4v*2ip`NIxrPwhx3YsfEih0S<$a`*qrb1@2l4W}2FNQeUXj=cIqVCkR zw<$0*ObQh>Wu_~;&UQyd3n;6NkkAU4Qzpo()s%$pYGY|Fn(Jv+#d?e`1NgJvSeN?K zLlMQj*o@92DjMaBF$n<)@Pf&sQO;w1>^gjd#k`8hm%JTUv%f3Z%JZI{w616LieM)H zlHcZ;Dh{^Zm;TIa>*UJsVrTGbl*to}Rb#Cno2{q!uDof}-&M-WjD4S|ZhQ%KVV23M zkumZD|I;U>T<<40o;BbRGY$y=R72?wUog1ZzyQqo8L&4lC@#~^9Nyen0RUDY$i+_M z=k@FLZMz522PD@WE5(S>XPo_B9DkWaSo6NCxC~o(e@tk3sW`)5TMYeRIHT?`QL^Jm zXeZoUbW+j@sHpT1IhH0@A0RR5k*4`G9lV@3kV`4`x_2}J(0fUIaoKMsBq}{OZiM1f z?1EipSZLjOx@d_w8Bvs#>kF2Yh>fH=CMBID4;{{$n$uzq*H>~b73mj6qEBdr|#@=SsITywU4=X10F3QKA7svcs8RKM8L# zTS*z2(Rw96-WLglg$;BVLC}7yj#QlM5m{oD?`NGu>mpp*U7hf6|AL|p;y(_CPFeu* zI;w<0Ubburh~2P(+|)c!=5z|upWvKA!q@1oqSbLJpN@ito95T6AMyFoCUmZrr-`Cd zT-#ZsZRb>|wbBcw5w~Naq<5k)2L>plrw2@qFHe*&o`u6XeGnY{G-^PJwHX{aprNT` zS>PB|aivEj%<86GhZn|xcx108LjydA-A)G90o?qe1?G8c)GH9_V2nGU_dfi4o7(Nn zL;@26pap$-^Pyw_a(Oh(?}`!t=zWA2EpBFqXmPRTuzGtt9W72?EoRv08_$*nEJ}qm zU2wuHsdYJK4=(jZL9nvTW)J-Zzm!Y?cl8q{jLW_7bT)nDdFK^9_jn6S-P14|RGfVs z^WS*A*kH{7vR`+c_@55i|*|E%QX`0x|SvWM3 zzuBB9nfvu12`!p_mg%|znbc?dHm&nfv?1npl+!`(mcLj~TsRny3!GX$k6qUA0D#NS zOf3{V-sJAJL!9z>vA4_cmQ8oxpOPPsBmtD9B6zV4w9@^`rk{VRxBm?# z{dpm_K^8_QUs!6U-m{jJ$PPn$Kvn3j2pvA1x>?Aa#SqzcCrOX_r#hqI3tv7GfIKFI zZSNHs3gBR`C=CFErmerWDs&cAjDt)Gj-vzoC6&rXf?1`2lu%a;!hj_!8xsJ}QYJ{c zHOAS|?L5i<1k|+Ryvto;R8y)6nuKUwqZI$_R)SC;w0erRu}2r62=--vIx=GoAP6h} zNqSKq!MK$GHDLbcvXJ!hZLu(hLpn|n4q#)M2M|n8TW-f4@2cGlB_*X09cEFa9v+et zKn9>CH`0L8p_jRRKDx??vQ3Uw<4d{B!~XdstOY2dYZ3P`4{VZZPb_+2L+5#Rj}9J% z0qFmi!#lUksHXZ06{uaEie{mw(x>aIf@+mWz z*tkBJ>w%ZapZcBcAB%zMNOvz|pFm7T??Or$;k(N2@5xsPm3{%5Fyq3c?Q}>_!~b4G z3+SwH=R#@21+l3N6UBP;xuesl;*b}85ZPHm{SeJof8B%aYw;a9CzTuXHlQXuKo!@^D%sNA^_2*a>6%)h5sWq z1=7ZRdO(<;#=x&Ad9OPy`&hp>j&Le*qlGm7;<}+j(%~K$rZ1rRdpSyfsXB{HuUPIOosqgDB+dUG{{q!^ELNDTxdV|XUkl&cH8{@nOSFxuSd>AByPu3_=rzw z$NtxosTNVz*S_Aj@>-Fp16p9`^u{C|WKcs>6Zc}be92iAo)9)K;l+LofFx*uI`H@8 zS{X?TGET(QMhi}7s?ON`L>V7_elgZBSS<%v3+3XcYuhqiJJ4#icF`4M_qUr~wvhhz zncpHBI(h#k_wxZ*5f$$R#YJA`-#qu)}2$Fr5Es9{sII42qxn=@=+9|@6=z)y1yVA-fh(n!HT>`V+`lZ`v*VYQvTO1^PoT)A z^F2A*FMO{C3(-XnnEzr_XTfMNA&nWMy|>tih%p}bwZ;F5wzm$7t7+QC7k3Zt8X#zZ z;O-vWEx5b81Pku&?!jFX2<{Mcad&sVCHH;5&+~q-)K~SZU)BC`YR>H0*_l2)S5IHv z&DW7iprFKq>Fmg?tE-%u>E z5UF-iXYT26Fn~xM#%4;O`2gu&bu4;tsLH@pXPgoM737F16Vdo7_2XbUvwI{g3gUpi z^igyFSfTwqXOWRPb#PVLDl4Lyl|ol_#{CC9<@rThQ8648}%KzP`P&)k9^&~u!0{Y-$vs~3o<^PHBK5o zqW(gUryODK9SM^*QV*uCYf^S!>QxxGZ>KxW2!`*|^X}8qfc}W9Fm#9$CXtn@QGvEJ zHo`n8jm#y{TS-c6l+;0yPh0p@CUcc}O;_4WE~!Jf`JU`c5nVhS{6n3`7(yWPXJ!Nd zQS6|giuvQgn*_rr_(V%nm(vc@8}?TE*X3Gnv}>@D8xG0%%cCT#6k78jgPM%=Oou|w zrX3*BS6keY8kcWY#FXM+C6WBbFJ_g#CUl9qp?bm{5}N@DowFnchmk-l2p21j40 zfbd*-61049R`_IyfGDN(2(S6=&pm09gN8!BWBoGwE(`M-NI$qJJ4Vh8kTJE$E)`(- z+*9?&*TDlXk#bZ7dgXiAR8%(z57-^F>I5T;Z%M_rlts|1RW zHChQBhCALGpDGUpoB+z`MqYU6+vGM>bFYxmF-|!PDuzCR+?5{?V3TQBrW*^lg+A|D z3kaw<*PJPrYSZwicrJdzT_-%L`z3L@Q$|a566u;0M&HJEi}tq( zb+osomdspKnE=DmRCugBB!vC@D@#YZvH1Hq+UV_*VY(s`U~;;K%u{>rbgKPK55rbi zkij=sn-6p^V<&X(_pxPMVIrxSS{2|!Wi)+=V*)yxq&w1|DI59W)<3#yE;fz`9m=o@ zm!v^~7>-WQ``(sls~vYy-$LY_JDrEfnM9Nv#K=LxKX?GyI#vBDp_Bi`UxeqE%L|^D zrE^K4WG|P`dwKpa65T{d^^L3ud2ph!B*fnAk%ehZ_|_F!eWDPZ7jTB7FfT?Y=G*t> z>V^#%-6~yaNOK%sXG%k|s^ERUYGp}##2~%U(p%Tc#V4xiPO=I6(t?1*@Sj2qxaDSU zogJg^;Xov7{j~x4uKnrY_)}@O2?P*Wd);xBhhlZMVSnUdKmA^YrXi%0rS@D-B%(5!$qBQ`DHKw zDGe+DwIy-*N>xy96xZ+k3MpBzoJp4?0dRrC@NJ*i*aEqqBH@Sd+Yv^n&>sgJhImGW zVM(u(r0q0}ebFme$lFWT!$`?m0?>m&^)Pd32l`fMdBAsMGi8$dWzSPTZl5R-XSaX@ z*3g=Hgko;(!=PXCD4P(#x@B{$S*<73<%v>gIXtS0)(o2l!w^|(laYO&205Dz?}tg7 zUe(XE_(N(D~fC8++n_rM8>})AWK@=Rx*BEPOiw@-1O)AoNQDW2B9;o_ZYgvCk)dT zY>oN}ha^ZJ`{9&c>z@_?RMd&79=u-!!LD0r5=C;9S>z0l@~K;3)0DrynhlR3n7cXD z2N~ny()bwXU-RkP0s?Nq(q#~}_)Z6+K$@r`-6uJu4K`$>e>{e9kUG#uobFyvrXfL-hzb3yR>8v$AfiaQ@DQw0@OD!ky z{mZ!$$XD+@1GNa%$9AQluFnerV_DD6?1XX(abgQ?wKUm18+7<@w6|iM5|9(b>^G7G zG!G6YbE|J>A`*(%rtv3BR<*OBR@ZCZYG)Skxj9^E|}KHi~QM=jq+0f zHg^>-%&{93g>qxBLnZrMZBOqUiMRUejUksiX_GJt@~upy^kc8y6x&;?X*7y13HcxL zEDwO{A2boXxn<1E><3z!Te%Q&yxYIUe{XRn(Jh+mvtayn$&3gh627^IoGg3(Hi1?&-v;n@``?&=Q2)`*{xe$N=qM8KZM>jf{j>vY z>s3vSX;>TtQD}aKI-l@z(~YqI?#@cPri1$DS$lK;3di}EN9uoy+Cf6-nSs$;X!Shp zZ12!g8+$k6b$ZX= z+t(w&Y`=(IVn$mm*snR)o!MEd31{P{#l=QdnjG5+4$UYips}<5Sy2Nf_Zyjq=iTj5 zK+ddLeUga~Yz-Y@KLjWxhZfk_+gdYCI^AKXcI-?OG~PdTMn|#tIsr)f9F;-ShS2@T zoP>_t9Y-Mcx?>;(_z3_20`j&Vv^#^dugiM*U}mc3MI%>e09fO?qLRbeKF=D^Q0La( z%ughU;|1&2mz`^Dz86JUtIZd7T^^wQ^-phHQ@z*I@lGRw=gq9^5d4r`5|C6?Y=nxC zA%{Q!{+0Y<{-*KMr3LZ;Z1;Bt{Kz6G7;cA|&_4?^Ara^*2y#&V=aAC>eG$eN78V}x zU_ji?|8Nj`U6b7>x_c)I8u$sPT&kq+SYlG&=Dar%K@}?D?ilmS%)IWhw0%|Ti3sSOTv*67Hb&KnYu0rx1yVV@7ye= zF3Gp^NL})IV`KNDr(WZ!j$=Q9*u(ZYR(f2!zchY*f=9rH-Ccc7N$1g0^OW*g z;Ef>QjGMeUN8kX}i>{sr#%JjgUN7#p6FX8<7 zacbYm#fIHzqOM~8&{@*;&cHv*n3pJeX%p9qR0k@g8H|J=Y-(Iy467GP^ON=n(D zC;YvNEPcxzp7R)d^J}4@u$6KH?CdCwF*c}CY1ef4y7Z~v!X4VQD=RAl=?n0KGEE3J zp#AI!7q&BIwGjK$u8vL;zXh*scc1C67~NT%gO47L1Cj5@$qNCIvx4LYr7r zC6-Mbt!Bh3@=g#kw#)|^3Csasgwz*5`;yF zgf~gqQNl9jd&M5n&+fSninXfJiF{JvfFG0jp_AoG#MHj%5Qz|>Ia>^h;-x5`oxUAo zXD59K_vcF7+$RHwL`^h9O%+X2BV_?%(}eZ$sQ`Hr2{!1zBYNhmwmv}+HX3EKmkrV| zsPIk#vkHUZ$q_Z1lAECvlyB$!sAS;^@GGx0J@v#PCQwmn+-HJM`3o%{m6ZEJUm@=r zf4W!%Pnsb@TMu_gIHO@g0R7fJ7z(JMa*~t1!jQ~D-?b%neYn@^qz+faM@=*lZSPbr z#lPp5lI;Z^iJpOn=KT`=IEEECzlrG&2-Q_MP*r5d_VyNste$ZyRDCanFH-POVYK;5 zhyEIx%PK9I{sll5x~78oWPBdpsi{U-K5f~6*mqbDAOX+iey4+(N9~F`c^WPUe)b?v z5vy4E5fu*Xft*t7o%gu(wJG6|_N)r3(H^rM+L>B>^P~9Hc|u*VfqIg)HSR)_%_AmK zpI;K@SF@OY5A&4dUJmnOhwaZBdFZP=N<|=lxQ;=8zJQ}O2tBZZ*F@sRB2)xn-m#Ru|Ei`r@ zSieRSQAtjy9LkrNp#g)RB?6BsM34@kRJaL`rG`#I1E_bbZce%{a{*e>XaxnjpZ&a? zi1emTq|)|-j9Vx{Vu7!OK&2W zE`i_xoIM0^OR{422hA~pqf%Praj}siqLD9uajfGOX$j~jQ1MlZ5cs5k0$%or#OYFO z`zf^AU&??>0<3AOVEeabi!X^ao|!osFHyA<3**;KpH#(8$j})e9GFnSqQXgjNjh<< z@dNZb*n_c|AO5VyIeQd2%UjYR=&^TYnSk=?kBH>;qSVg{oosPqh~D0C3OZA>BBiZ< zZG^sA7hB+@Qej4sB=Z!!mqG>j^uYbAkZVx=1XhORC=2uhQbijBcqasSIizLsvZfg| zM2{{Jfd~dYyuwN}@)S>u{MkAj(pn`W$OLI?$t@qeHPRqov;d>wto>)ORZxzIy=(Y@ zu`cP(n;@nTn*@X(s!eyQvK$>GtsfAzA-#I%^5$>sAuEhoLB`m=4M&Hc?y#9y1c|I` z=8Vdm?~39v*hXPBzJQJ)IQWm3u5lt>sioy3r;y3bjcc;}S*}d$H~lTL=RX;~UU=V$ z(^t84`=%a4BOs%fRu-}35(#hzgOW|?`|NB^GpTOh6ls3P>W+z;I+DGISh+J-$U{T4 z1W+FR6`vWW_>$;z)13bep$`>l6ozmMT>Dq#tBJ8=5G9br`X>%K0W#$gLWbFAWeE%n z{{V=qdZkX6s&xvp58c8OM-Vt&9hx04bNjU2#d#<=gQMAJufecCk+b*lk)dTvI3u8`vN{BtUbvmB zd1>d^u6)Z;7k3%@Xhgu|` z8dt}(;PjJu!J|So++QQfxIrcx4h8@aD>uG=fbHnCUAg(NUXnU{c+HSJNBAk83zPt) zeAc5^oGGYs=geXd&7&iIe^a5P3yyn~;|CJJfJ+;M20%BS*|puk$bR^wT1SM2+Ffgv z@7);>cW6MNCF7d%KB3t4SiY(t|G2t))g-zd!WADAYE)2f7T~x&8Cc$0XkAo#K~*oG z{pd{yc37{OsXA=dw}0!kUZOF~v{=&$ZP}nnbwtaF$i{+*VG_`w34qOKZ2P}<+4#xO zyvAzm+J^~?2Ef{Wh{uW3YU{3s*qELL9dMG`cgG!+d2jnSpO(YwMX;e76>I>24q+|Y z?{uZD*!aPVkO=}HXRJ6}`41Psl%sMCS$woYZr3SznoB==Y1FJ?U1e~+WC0ZoO*a6n z*OrheDE&WSijR5J7xd2!8#Bi)t~?q`@>d+-MA;DYKZ%)?*XI%TTuM`QYb#tXdOC1WTXW* zqri;lr5FsDd6W*9k>-!$ZoWWDwd^v{zlIdEI@QOiPk0uIyh`~S-#z6?V&fk8y3^EE zo%ZxAfmv+eJIYb4y^;fw^e0NjjEY08hQ9_1JEi5p0dvIl_)t9t9-(o5bkV$7F|h|p z63LL%<}?;t7i0Ynive}4=&KyA>VB-J2=38P{b-0u$rULiSR{daiyCb+6gj9moNe+w>_==b;v z&z)QUd--iyQ^|KNXZ`#Z3u%SpJn=GD4Yn_!^Hs*wsk8O;-OUYn)ij0Y#iR)nZ4kOamt*5GsvO#n(+Fh)P33LtCfB0r1$kT52unm+uj+ z7yg-xzY%#x1a-KVbk_(Ngx}KfiCb-B_MQRc)589IQ+5(R;luH_5LnVqlz)>(PxpV8 zaA0kE;%_ln|Nkz6|2y_Uv(-AD)cpCuXqz+tgv*1HIv z-u61~8uCvedes}=)CHv=`M)iuA$}wti>p0Y9ot$2u>9F59A=^NW5cFrDGm!FT*692 zjp_lG-$Je7y49vQ{itZnpld-6i%)owf%Lha5jf1fo9Fcn4M=Y`jLRR(q1V0{Rve^h z2i-&!$Yc0i5dh?5j8i^EwN1~=fFNI@-OSOsTUvVh%DSU_=)@sz<=mU~#$^QhRe;^$&2dlC1QMB+ z7B5I(?nL5Iga2(*e!>|+)ZtBo4m?&;x-PFER)ffGQdbvh~BF zqHHY9&);K^fup*E04zQ!!PiAe78>S$ef>ve9y6TC9(}uubtm~!16GfY&Yuh5L7^nz zfY7xiRMA&tAkhX8%O6#*)(9q!@J>F=7bP|-@%83abk9%(&FlR)uDRr7TbM#<0IVMl zCffzkcpEQK3-cXv0x$Ki`_Px=XU}&(Usk60{R@+>A4Sk(xm2e_J|xqd_uAJQ2(W$2 zI~74EAyFX$E29c~U_2m><=X1R*{sxRN+hh6-v-d4JkTlN-ZzQ3i=Pp1qk@x6h0uad zYtaG0ualt*vs8__5U9nmv z#l^&eR|uG}Z5+Gv#Ut_BIrU?IS*KEWd}<4lW!RmAonVz;dRjbhu%`6gqb>IK>y>?tV%~!GV$F7&;^7U;8f5-=_IZ zYCTZUa!Uv(*4XRcp|Q3Gad1iaS>#c~Z14!`1 z@{c{6p5Z5%B2*aUO^KNe7{*$Nc~@X$hQK-Fsr`T6Z(`V)a|v*v{>hrXMpHq4__ z8CqfLC8@X1qYj^u0}3GOD%e9R>*ci(p2J-J;17mQh^7VStqB;))?+4AgNo)-WbHF+ zfb8CO2t;3zP=ljwO+H0UUYLko*FlPH%6jEU3zVslfL}Vx#+XD4nsm_;fzC|jt<#(jLsOOBfIrYqLYI{yw4V4fKZ3s3pM~!{v>MTFeSBYUdMOomo0u6kqddX3W;n4PT4fV z_{7rm1Xi=~ycr0g4DD@d`utf?bUZ9)Y8COI;sR;oC8ITAGo#(wfmnF0XbB-6G>)~t z-=u#$Dnz-*I)IYXOjPiZlJbj$ux5pCw%CkYyKd9)tgiD{JdOY);er+&gF3O2Ly#SD zRKv!(z&9q>XGTWeveV}JIA`639+f68CVuHvH$Tm2>GuGy(7Ay+QAd|=c*3dWwZ2zr@kmMxghQ;gKC0S~ z?01j(4v6U)BuXfCau-IcIAA~flk_n`2lni5iE>G(0P!cp@A;r))S-<93D^1MFyq0V zJ5slBN@7-$g+!c7#6!nPXosA~_nkc3Uu;L73@9;+!pa0bV+r`~i&~d(w?7;LG@Jy~ zr|H!@vJ6s^Ja0WtzI2121_OD^rWt`1=1R_MFvsl#GQgw1TeN$#ev|V!RF?~`nQf9O z4VHO>2t$rns?Y37_lC$&*=7VRVV~ud$QUv)#B(+y)T;}rj6*u%r6}Tgn$I6uP}w`r zg!qX#jAxiC$`6g~+#@u3OqA_n+9M$qD%)hC>r{Q8iBe99^$J>OHk&$N5A3W=dS)2$ z0T;!sN18|I^t28FVR6^rmLH>peGOkgWf}F1-d3L#2j`QyU@=%Ec6$#!#@;8{mK>9K zm{zEJD$6if-7|I*4QOw6))~CU;K|Tfyce6{H#?3iwqu|P`IoHEgvcgIeydtsXQh%a zCuwEf=Ope)mMt>pr98f*8$=i8De1g=YxCY-QjfOcC^{0$<6=Q2+`> zZP*coCPC?}8Mpn?_pF>3NmG&iecfE}c~J>&jXdnIfuGHb?1#T3_t>fg0NsW5z#!?K zQiD;@T;{Xpi&-@5wkb66BTYhB>bDZBhh!Myqq5>fM(v5+5q%JnX9-_h%FS|9ZWKg3 zsxR9{%|VnDsl9HyS_mCFij|18jDzVSu$U5V1wibHc1Z7{Jh;<=GqeS3m6G#x~}L$;;2L~JG+!E zQ@rtY&(J3z6GLg+J@$&L-NL_9pI*L#bRS-x{b8VZ)r1;so|`IeP-`nBwerzyNHEbh z1QPWv8j-M8P`ld)eK@gDfdphRh$B#DTs`y}@!5~cJ9!V}0y&i+RJ+HshE6^+Z*>h_ zGA6Chf8nVFl6G6;*<1ghUxrAzw(R@DGe7fA9lW);BIgi@qF2i;>c8ahqLL}Pj)fc4 zn3nZ?XpkXUYfQqUEUaEAT$O-TfiJxroX!O0_(gf}Z1wclObjmwbaSs|%PE##KvR)( zFxZ}yOpr_ELH&y74l4l!)ywNHi4746MGI(@H7cNbFXdChN5vN6hc@2zLsXYn5KcL$ zv{$^4x3nFMmjXRozX6G@ZrltfZ_!mv&etn18C3jVFdtia&X@^QN=%z+?+-MC#sWutv{_pWE#ERR-i#bG^{VBro6d z5BWaQKM!L+2X-Q>idyp`dtuX`AJ_JS7Z3~m(1cHw3Tc9Y}^yfx_ash#NIlK)O?!x{_?Z+qE_0Oskk!)-9Nnk$|5Qm&C zxx_$=OdjXg>gn%zz4eVN@qv?TTA6;%_~Dmer>=bJ`fNewn7#YM{(HX^WUIIXEA#M0 z{CW{x6cQJwD{xwG&TNLHoLqM~USt5B>&_A5Cby~`}RY{G^Pwfi!i)nIwmC>rG ze+SDnsw_o>*-h}QET71^Zt*<+S$W|J_kq3b@s2;4A2g1?69+weMk__Ym7kqQIQT#2 zN0xs&1QjBLCN#;9kq#O3|qEI)yoM@r~UlpwZh zkR_5&dfc`59Xo!$gW5bK#DX=V1PV13)jW4Qcom;3-UhRIm|v76Er#D?ztfa}La>a$ zSp`){1c_8cSRz!}!W*30G)wcn5V#BkorIlY+d$C7j+Ps`zKGxCeuHK|rpKgIOk=ka z@I~BMvXpvPu2LQ8SHKNno(Xt_I>@Ip>p{Ngeq{NcH*e({#B1tpJqUK%n(p!t!RA!l zy#X&dtcmYj5_AX=hpsX`Jb3?5$I!cWif1PU6vX7eQb%V>Vlr zL+k=YIAh-h6&Y++zxW7>;+w$Yev+uXQ}jjr1cI97lO>hiRH6U@VLvdON;&wW@ME+M zbgcRhGsacY$rCq>F&nkRH%-SaEi6?wE~*&P z_l0ht`o7DB7xJ65ICX4k^OT>cTMqcND4G0`G}mFORAW-2^^{2%U-(=J0R1k#3+qginFXIV-pO2N>N4-~z%3X&1Re=0Hya*ZM z6BeM3(`C~C9cE1km;V>oJ~=+9q%MeQxmrlTg!mp(rS(gQzRPhlY7^^m5tK$_ z`n#)^OpF4L9O0!d`B*G%vP=V#%jx?QW*GWF@y>GB?z2rdekFltVk2|(w1J=L7 z0uMuUxlOcF$=kb5@P z1l@Nv=rm90R ziBVDeu_VttZmScP|1fXY7}teZ?qh#kcD;_;*KW1R%bA-w5j+Z{@EJ>u^%_lM4j3JN z*Q9~YN@Ys=MzF9nGGmw~1`!c#@9bM<%S1K(WCqI=)ZE@t6%H0O`;n-+7{xUA;qg#i zOqdnGV9UMR;=173p&MCriao(1zzV6Y(_)44BxBm-8wrlsFm0lJ*0vy;cYX1LYG_&_8?EQd`4KY%~e zXBk0}yh+q(oKH=-fSl=l?W(=bi%)>Vm{2j{_Pi$I)|NG!Hn(Go>^4H}2NLid_1sA~ z0X!W#*ZmA~Q_>}BI&G2+&s#&A3oD&t8eiBajbZAL-vb8OuZ78htngPgY#88qrgpx8 zzgHeEDx>i$JWjx-hIt;vYxXD$PUW*-7ZleO2B+X(Jn`(it4%vv!a;gqlAA;=&t>1| zblUB)HtwYKpv&<6`zf*%j$7@E`IY|pEG~h;e4nH1MQ6pb_vc+h3*$I2Z!`uoU}1nd0tyrxvRTh z|MEkUbQGWz)mos;%J}Z2p7O;jdhVeEBJ@5MuJ<|aS|5>r7>!I`)XV+$cke(j&=J6e zT0h9sF5%?lr1kBB3zVD(F8LMHixfx@8NK-TQ9&=57diC*ex8XF*h9j&eq9L>6&iXb z5zTi)iDrQhZA@5W)4O5&$_jKk*?KQG+Ze6!`q0b1E1SiY*rE$c0e+wP9!pLqg~;>P z>bO$S`-G&B#bL-0N>4I7J(cglV-Qru9>BC7d+OtPzx?$wuR2*fvZ(Pi*nch9Ua(YH z_Y!o!wYeSJm+5{vcCqK#+hm@tNk+Y$%kyL=1TkChVHf0fu3bCa3>G^*RA=J#lfl8$ zspd`-EmLb$CX z#KZp9KVyY0dVJNm4~t(Vdj5qnXHT4MSTx22FV~~1bslz@OKCq_-DUqXgKtObzIeq$ z%Yom*W#yruwl=Tb-eN3hI4R^)^(NmAHofc1xb00w*suHWb(>``_qK}KeA9U(w4V1U znQ=Z+IrO(lx85e4_Bs z3keubPna2$rz1vx`)m6%xyz4cqkvf9yjy3Dv%Qf9;k-jZ;9N0aVmA?6-?jWC1i)}e{Nw4F~NERFe>KQ+A4`dBZ) zSdCpY_dD*AWj~$EciGVLv;Ri4L2ehPTN~{M}zM_#KTC_39v(=B(Bcq>T@}lIPvZU zn{YYnQ$m9}3Kzcph0psQ6G(5@f~IMuyuyVLE*f?dHzbqBJ;H^Dg6ym)MyH_lY5s85 z4q);&cDQHsdDzs;zie64lWWk2glRFTS8{eg-(#c#kwcJ z!fk2r6a)oSZ=-NOH>?v!DU)I&oUNPqg#aLdWpH7lEZdhji#hTpY?vioxX!72!6fR4 zmcxtdsLr<_h|@vgn&YAHBo3OYIKqu1tRe3auDx|8aWiw_-D{46slEL=NDB*CBa$NA zeH4;ixTy?iCThxE`|e(cg;x&bHae#;H6FiHybv+{d43@H@*p+I*|JyI8|IqI2r9i= zb=!S&x=rB$H#!|{+yEw4C`CA&0&tuO`;~p1* zWz2IU@G_9k&ABpbTsO>ZE8V@$B&GWG*ssw3Gxm+MtX(COk%!)6z%YKoj8;>y%=}?3 zXuhd$`J^0$b@pTSy>-gB`o}~5wD-Jq4N1FC9Ln-d==;3yr-QM4u_qrQb$S@L_Th2$ zLBGVQqspR@n3o(;{j`t$%insX?;GO;E$m;u5XgP-|MqnGzdy9jCA0$j=ZG8TDZ9mx z7>{s$sdM@FL*3NF4V^vm-`tCPGLn!k08`i^GW36Sy8N05p!0B_!H^8B{8EBm2MWj= z(d`UFn*W=ZfrytDF)ub|fhEpU(4w^-tZpsy+>!h;TKH{h?+SD)Zx=y#MSb*`fA#se zEo=4ODp1DedA@7gy1NUsbyHjE}K9$82(U?1roF zWyOg%KK7$eBaSw_HeW7vL3hr(J*CCscUH~LHeuPCdG9ssr6l76qryDQzn`5yIh9-KQo24vi@$YV5f-{)4=Um(IBj%j3HR_I z`AZ>B!I+(NdfqnqUde4OhG(~!X7@yyZc{isW)|tSA7wQwbJsTU4gSKJQao+J|2gLS z>-wa6S+Wa{vu&)U?QxwEux+8dw7+yzf!nIV!n0kRy5)2V^j+g|T~hMlVtRs_;B6Ol ze83%u{3n#6Z>`ZyO+T*4Liu_Pb%-&1-e*4j4ErA{46f11 zMV>SOo3Rl#?*qvom<<@9^|{bDM_#qbDzNB4?P=cwK*AMe2Bk2Jrh~ForX9BJd(BsG zAfo84)iUZ_CDgV76}Vn~TSnL;?vDk41o(Q%i|rX0MTnu3tk$?Kmm%3+pPs(B^kvS! ztbgtHKotZ+tg!wgY+mbV%d^;Q8eL&hCLL93d>#^jE{HY3ae03HN5?m(}=- z-o5M&+-LQe51wJBPb~RaWBao2q(Mb>NFf0t3KXEtG%r7YNmKJ_Fep^9Sr!&ebg~Jp zSD_^HA3veK*w*-5v(L4y&jg<>hoxJ9n}n9hH$x|dvyXWTt2;vu)f6P=-u6TQI5jS;1H!|CD{( zHm?9L64DGlD-Z-* zZ;R1BmXkIWd+&`Q?fixtRd(VkftE-apNsv(%c8L?<_ize!y_c&No!)`@3cOw&*zMD z+{eZr0ccFv*`V40{)>jicl*-G#yj*aKt&5reV5+ja)tY^@wN%Nn^@kJB;M=2#+!E+ z!`Yqyzu}k3@s9&|?@djkx)cUSbekyVI3CZp4Yg#4O*!Re;Y&r+F)ZX4AOZUCO{GfJ zltFWMPt@ z33)raS29tutUuk%(|?!l*L=CT#i*5yjJYz)lDA_sg2r0!-9EUuxZEWiT-dZ6Ubu1| zF6tjsuO->BFn26dBrp-&J06EvJsOqOt9v=87@Nd;ywlV}tBU!m=L`DT-~MIw?(w#d zHzY2vZtDI} z*^E9jFagxL>qD#FJ9vG1OPz$;34mB^QZVx zOtXoo(2_mB%s*TJ7FJ;JZ}oVmP<`&s-D4eYI>~*M0DaDyLwPNi*`IhH=-|G|SUvGH z*2g8&7tWN{ImFTZ)M~VLN0?$cg~2-ba6`zUqG(;Z+ElEZIwI@F+T-v8T*&-)sB8obZT@U@}z2e-vwwxIxFtR7a^>muT+T`zHs8%*1!+P=7imIzw8FWgH# zD#|*49_AjU9d!R%(Wrd7lt0GITG3UL*iMEsa@ntX`n$ne-nilCoWb0mcN~#+EEr!= zd)n8TCGKa3A1;$diTdn? zXO01;laIS#Pbx;Uz9L@A6THK zm7|<`EFlA|yj&ED&NrB04m}N7hrwZck4*q(7yvveI>TueP=-J3@qW|laaLU(6E#J1 z8zQ%J%<1J~pZc2_gY2d2)c$WUE}3?8Ls2v|s;XEZC>7IcBDOD|#oxo=QAN^F-y9cj zwKEkE_ziF~#j7QSV#Kc&qpY#Zt`DY}xFPUGVb40dvSJ7tU@geDgq4vWu*S$VT>EgS zXkdG3XuO%W*`#KxX_ zWKbc&)Umo}S(>U22X2l4p-W6)R#hK%gPWK92lJZHm@Hwx$20E*y3VQeDT#_oT19*Y zC*hzQa7`Gydr>M)vOY~R!G7NJMD9GXJNJYMuLNkjcnG;nPN#}hISinwv&gy_@Y_gw z(Cpihq6EyErg9nHAJxk39PnNL`8Rub(*mITN#xAC*{X4LN1#)N2oQ#n*j_?mD@IXl+7p9B>$G2s)Rv(4$KJ-VT%XYjl@=$yY> z&haVm&mmG`U{&hi;s4}37@{+*qcBYRw$2f{9;8mL3~dtmhPHlv-g=-kI1Wa_@U!H& zgaP1m8A@sZtoJD%RR7OJ?WR8;gRioOu% zr*pvmWZu;Qp{LbR-Y)!|9<~~CBhjBljq%wX*TC41*5h@nt&i3b<-$(7UqoAabQZ9&%IDdURSJZab4^#Y^=pPQPM4OX0G7d1* z|1W5X$mr1qFI0TV9}d3z54W}k@E@1|0~MkN2NBvGuKH91#3e!_YSLPTn5iQX=l}yC z_CcN^{zzzWH|*zF-ziLgNnRE;dAH*+rfYGat!LEVuaZ^!?O5F<10K+tpu(v=`W_kJ z*IW^K=rFy*sH?;@;L{!ddhu`{HeJNrSm$zCCTlrWR7juo+etM=qu_T&(P|!+a?qrs zD&|(fh<2+}#`D!^f}9Z|PiFv-)pQi>(7Ds`vB!@vzLAWd-gu+HoW^`QkH5>SV{lqJ z`f49JlE(6~AMx^TAEi;%S(kICVpO!{a(YL6Nlp(}>OD5dJiqVF!|yWdi_Ru+d@=lX z>3?NgH6+n-syZ9_yL})+jRAOpjNR1n>&wqT^8IMv@2lqH=KzszZEYKavFBud+Vx`I zc2;uN^XIvbOV2Eug!9d8hwoSD9uuS=EqoTA?xhFLP5JMaLAjAN#$PG=j4~1unNLhG zS?ct^FK{@DMwQMcuuR)O)W#?_)4Vu@$pQc_ioppU$D2v_K#lA~Q07T=?K>|`0|T+4 zdix3=O&2+`-DcHGc_O?@W6ENAy`7EqE1z!?VDo6c|+W=WyOK2RBh#8)334~1VlS1GY| zf_fPgT>EWO!(miA{pN_VRwnqaBEz0tx{Q=fm{$7B~N3pv?zPMWZ!wwSTFy4Xq& zjm+6w`jEhJ2y4*VeC13JcQB>GBVZpe9xP?)VoWTFmDgwy;#x7R{e)#wAcMc#G8b37 zs`pe*&(f9Z_{xUQ?x+yZDkn!B! z!4UNJws;5ktUb%Pt#v*)V(kW&>eUi-aP#>~x^Wmrj2+g`ten^@&JnEWZGUp;UUGN*!UM3QIfUPow|SjFR#M&WzwGAjw)!L)0^1nx`C98)%*Jd@ z20Itn_S){;G|JQ3i+5_dXZfXHv5Z4$aRIrCoR*PkoO$+Wxx?F5C305G7Gn7Py=$k(1fG0L9+FuP z+n?Y1Z~_3x(GL1!2=R_Y{eJ3e`T#VLVIlIF~%6h|XRM zpc~dA+(~fLkwt>_8l7AL70UtOC#$3wRB)c?do#m=HvND=7=R$)u42SOYM4oA)+Q@* zSTh-P-{#Mr?`2+YKGwQ>DIN8&Ts6!zN3iTY6`N#Cx3o&VbPn3`+i44rcKKgYWS26s z9ZWn`3XE9Nu?VywYqA&Gu19(*M(@K^6csf5R0}zn3Yo4N$5mCGb&l_+BnQeO%OmD$ z169q860MmoT|xi=6n|EV3<1i z=}0zq+3?HYcvVNSB)B0LcvShYu#vM5R@EF-tq!XG`rI1vap}X0N7_|`E;aor)CgwV z!p((Jg2h9W7tgUD=77*Pze&%q6h61aWlFc<>c!H758DIuji9*y?2p|Wtq+qghtrOZ zcu{(UjIaz7-vL$YJB_ckd)AO6k+9#DY+;!jtjhhJzkUs?P@slDC76rn9#+iKN&e#P z?5m-=ehIWcYs_B)@;5)j3mBka9m}t=NIUBFnM%78+L$!WAkjY8Few;` zL1#RzPSb0iq_eYg;7(5J8XB=jULs7_AJH>i?08ne2c!_oJ088-f&uyqFsq~t`~%A} zVXV)4TYH6A>~3l^0XuYqVw4;uuoxfV>o)wiBN_w6d+1U=!grFL3>dzk#u&1kU`I3 zv&F^kQ53-Bn9k0JKR)kuMe}R+rPFyVS#6u&hCMa+=%D8~8LZ!ERKcRpXDaC~!2Fbj zp&=zd6LXiKavA<`2e$fT1qr}~r424(LIbW_RHoqKxvwuf*KfLcpB8wUm>Q@E=2du; zeFA5Ge+ViKUZb;YJDwO0guRbT?Q-yQ(QRhV+8wFr`UT37XS!NS8-OhTR+L;?3Gy0~j`Hd%+n#jbXX7G_vYn05+ximehXg(SmRyiMLTldgd zIT(<(IUc6oP;qp@rhtNNK?=b$6C}UmbAAeoJ9?|Sv$^&Xfi@F94Y$F_?`jRF99Tx^ z>@q$#J~FC!y~6G>oC6ol7F({3h=EOHe}?h;biRImHGsStzP!Y#pZn9&77QoUwKvsd zcei-qw(Ch0*%!>OiCabkRKPxeQ1zyf_XW<})6}$|6bU*L`5ipc``Z`y&mzu{OD%AJ zj9LRZ5T-e={Pp(qH^7#2=-7QHhWH2YfT+TgODd=g$-X@lpoqc0x){6#5B_!e?;%A0 zc48L5jDvWAt_!RqAtNK>v1w|DDKQoF^9U@;ckV7a+Wll4g<&(ZeAV?kYMAaJysf>c z)X`ger|A81A@<_;BMwZ!@0rp&aX{t|&2zBhVpNdP^{h=^>I9#vU9h6MzwLXpc9PC8=4QC=ovkQalI)2~AN96RQv+sCzCTCN>!!bK^H)epG2V?ZKYpW{`kXYWted^N2jz{VPTGbHjwxgP;=wA+BOeC2y-R!f;8}>Yx@w@nXAOJNE z2T^*eGS91Nkb?T@(&5oRD)jI_oL%o)e3@p^sOkD%JcB$3q2=QZh*(G`YE-W6cW)vj zfxT{WUn`2*M#1hvF3%c@w7VhYPc{Xv)P&bRO^#fOJCu}AX%e@+@)*a zA>`m3G#ZcO$Lhm17(T#{^RXp> zqDR5K!saAivEkG}jjrtXMI-(GzHI+|QwFLsJa8M{H*pn>#nqNmV8A^@ZqdeswoztGb>tF3IJem_Eva0-ynC9(5@rP#Z9vnwyKci5~(2sUfxs@ zCKX)42-Xn%zAmP3JNOcbnG_^S^~r}wOOs~YVyILJbfBsc-ek3OfmL9-ib6M|cEoL0 zRv^kiV zI_2d%G4JmaLH!9Te`?=b{>5gF52OXP#&06459wCxje{s@9B`G>dYtS1LBT_a|LRD1 zVKeCelGyW8?O2AuD4wM&pjI~*#BFejka6Jx)c(p(eTMkJ|92r3oRiFb301vg^@g1 zIBe*{JdgS~q5q!$T z+99>;lntRILWK^lXxyxi(wK$JY~Dbj__{H-n0-6lr2d+saKtFI*e%*;jLpEI)5@Y^ zXCr76GCD$pNLhghVLBVE4+u+iks;-2GbGc56*8lD2%4(nJIMz~O8pdruQYB@Y>E_q z71@V)<|RB+ok;BA(KoKj-Ea2`BCxkx@5=>H)gobhaa0Nc1scz5O>?iplk1qh{r*JM z?b4uAjR@3j{ij7ZzgorLRBxg&#vb{@jzm^kxXnUBPT>3#>OV7Zs|*qEzrsSPY6Cr+N5f_XFrS?_t{x%NRvoQo z*7cLjp$15SU~)4sZZ#p(i%&(`8!S#4>ONNHfqH zxLDD=*ATsf?Yc0NcfX9}=Ww2IfVE=p5nsBm2&3cuh-Cij{bB`p_1Cx0v$Y>iHq@}P zv%kl1u9k1#i9x-KGI>_QByaDzEy*6L{2R!woRQ{1wGCX*Z#L6y^P4OYv1U&0F0uI{ zH;x+mP4>!cB5^fU&+_>L1o95J(CLxYO|W7d{{cQ-Br4i;0{{$aTv5mEtA?eKejkp!ciuXno#d* zFr}_<5^dn1;r30_7FBdyUHL~WtHb`J9e9dUktdqv&ozSgR2n;6>DI)ZJ@9HMw&~gBH)*xMNoeS| z0W2rRxAR%c^kq-X4X4p~?bajDfR(Xy0IKJNzHgJlh8aehp3cN5?`wk~UyNXEThlOU z@iA%L!fP;j$W1lQv^Uosl}#1&+2?vZq=p1ZNLWEPnMO(BvDK`7E2Rb|So9qDrsV@H zbt(2HJTkgi3J9FNx{OuuP#rQuKNKpwmRX;8K{W~{`O$Qjw;F^O%0 zIg7`^FFAFk+ZX%8T6}4%0CuTVa}3u`9S3kVGY$D;S7f@%xk2pZ$rBgNk`@|T;?C{s zxlf{csEj)0MO+>meuf&ejl-r3wSIXayK%ke5p!dIXujGd-DEWgA|qUKaaXW<+!uE( z$JJh@+Vd$JFDn=?Y5nn(8KN+pf)%if*gn{8#H{WN0^3#VeVJ{>3|S^L$oM7a%)%pC zo@_al@gR~gh>{u4c5D(-2j4;bG%EG6cDa4GMWJp94Y68dh>@Mr+j1)x(U_y+YZiZ= zZPke|Mz1ax`G=im?y`Z3Up5+e#azT^Q*mE|$3{69t*F?rt#`+cttFBdcx*!13ZOWyS+95|FihxTk%{{i08(!=<$D^+t)Eb<=-Q^USjTuh4>OGF50vc6AjvtJ6bxT6g# z7wdqjMDS}@FfQY6ZXdGV_H8)d?Qw-#SD3}@%UpiUBFrxxirD++;=5oKgM?}Yv4V`R zAP$>FVU$uhmnS!nO{$w?0K33wPv5xyaPhXfG&j4Qs7kie-K=WEaX@YO-8Wc|nMeNu zNy}39yQ>U&Du=C_%`&mFb=svzV$Fsp8^P36{53V@-kqmyY>-rpW_!fg2Sg5L$J-_INOxKHC$Xr(~C3TSKTNrlg~nVOYG-vfWON-}wsVqZ7C4HV5`G8U?cGcGIQNb@TarGHQqaMPpse8Ku&Y3H;h&mF_I?846 zjtr4bgE2P#C$rK*&@^UCEo-^TsP*sQeI1$en*RL`hfd5;F|8uwMB>3K*@g;7P2Cmm zUsOO;@@rw+()D#{2m`a{H(!65B*bGaF5;V>r^nN|zBJ(aYt!!cG<`+QYU&s%MRHEF+iVl~PjSbQdjBYL0L2V{!xk8=$oeBvR8mftF1?zmHddr6lMFPdfQexXy z)t8-1GjoiV%U`OU%8aec0K8Wf=@}*Wdmnehdlulj{3LI^k@2H^eHKU6S&##qti9!V{&;B z`ozt2;-dXv_H?=5#Qna%A`<&G_HQ-ca)AT70qgEofC&ivARB`IlW7HC2W${ zYl`Vj*FB(o#D`FlKgf7UT^7PbLFCm3--VfJEIRa#=Jf2b_$B~<^c5U+gM;}^$+K@| zr3$|9-YI`&Nw*H#_vZ9t|Kk%Dl%ABvMll8!SK*@DFD#}~I{J?=rc6uldSfwGN`C&K z%%nNYbVO=nX1uN#BYreFe@bxEs!Xm2bW0wbrEE zl8h_ykAptyF|NTsN8Hnft?bHsP{vglUEVmOWxaAy+J5)}z?2W4@HA|4kaY$_)!juFS*is1NUR1Xmn;0sqw2Az4u(|^E)3B5B zlTiZMx^Xd{aYG`~tdX@RM@9wrwN5hk2h0vti>G~X-FuL{& z^=cyv?0m`!lhgeC3jsc9(xKb-IZxgCc6WGK?wflWM(qQh$UsCENXq2Qch0Qox1q*- zB=h-feRKd2bbYMw;quO@SO#akOy84e_`pCg4%RDY1iw9J4~HMxB>(Ph(t&95{{?5$ zv)P1V)cEU?)8N7xNezYX@>R#DR`jutyn=$GZBIK)_Sc9|ttK1aW!3y`;#F&XJ!7_c;AMJ`&WME4Vf=MzuI39VPWH0-UoBSGgDh8T{XTc~u@+J%Cu)h|1p2_-?C0vweKW#Jn!6_H zfuRCQ2K~E$HQb8xtbE({&gTI!bdu*d!#vJf{5+u{^Cl?9A+& z@Q+-8gov8VX6_txT$reebxy;YJ-^;Q7aKvG%6yTi3TfGNE;o0J+Y9;3nZ*=7;H7x_ zaiq&oVagAPpCba24}dK?mtKvOR}%tx{CC(?d}k{Frpy*wor_Dfz3}&zL>YE#WuYK+ z)m^-@x`x*7-XmX#_u&82=_3B0`DMmRrZXyU_c4pY5Hh_+6y=8)6zdJ=9&B&I2U(=| zQe!jXv{ne78m(+|y<6#Bg~x9DV;gxCclw8op5)r;O*-!y4=t_z2 z#0xhG%rnZBx(ZwCSe%>;+j}BHBY0{GjQbAjPj$U?(B!^C!emRk)~iXJmIuL0oPGSX z1!LYiw9)>Vr#)Mf`y9pqg}QGhTZzPp)i%lWbM;5xUeaCGd+zzX@4|d&38Nc^Y>V&D zGr%;@R>D4MY^K`redgLBH+u9rW;a8Zmwm2z*O!qm$;ni+-MWvp{iM3CO@vAGkQnpk zLhg5Tnr~0UGC0O6QvZj!lS-ohXYM4gZ-mNhIYO00tLLlb4F~A?_Up1`t7n^Zy+IWU zGmm+Z7>hE~lq2j_66Zf_mE__j72Y0U1aU;l8qg5eUdu?YPEZ=fzjjp1*=$i3 zl@_z^pz&YRI5n#~;hT-gK}rCpQiWm?@Im=#>NVMbVP56|hD*iF@f8>TOFq8g?dPm$ zO)e2Gm&50L{1s+9US{&QR2jBN`p)Z**wr^6L5EhXby!9SKs$@sQNc->T6oXHFJAD|WW zdc}U`3ZYIFzSC40QcfhuQPZ?sD$_lxuR~r)Nea*nZL>+iR8PT-mc>nhj`_(A!(+(c z-}{55Fzs}sqvIq>KX5}r9!D>2{BDMrbFXbrV9p8Iw2>!+JLn+veaLE%Hwb|zl(UKT3F z%X0V0-{?z$(zI>$IwWL?nB<{m#3+}IlCIbuuHCP*s-lC%Nlv8)7tPOd`j_q&6M+)8 zR{GT}gNsYsVzQAInaf&6a^^8&@~5e=Yyl87D>ZmV1jWn=nM2 zzA=92!tH=-H|Myy&N1~puF1(m_WN7EPvNMS;BTfx{6`&!*%g?71fACQLPL0TU`gjw zT6CP$%W`@(tY0Idg@(Ki%lc43aeLY;A9$%L8B-_AZ?RG0vg4`r!;oS#X6s9A-S9{I zay|ejL*op0K+^z96_0`H8(F^z0R{92Ix*9k*)#J+`uGlcc}N}vvodCfdaY{tKx3W%y>QO;w&3tB6hrV34m^IF8-v{Y|}1bm7bn&-(1FN^>ZnOPf?C)@knaut8eeLoH5a7_vU`d%5=9 zyIw2nRz{|^o=uf}HbPkL#d}HaokW7=BVW2-=hauelw?Msjd8q6;jdnO-;6j(JU@nq z2oKcHnk+SFnCGoVS=|c1=MAVo1ZWFP-ruMVo`3kxp;Y;TL>+yQ<^l7}_RB=`Zcf5M zbIwh>(5gbu2Xy>$sXy+wpD)bd*1cuZ6m^~=Z#jxzZ)0}eW4-0 zJ6a$`cAs~g_#xmt|NrL!E6eX0n3;chTe2-9zw9dKUlByz>S}TT2Z~czP@-`!m6c6@ zsS5k+M7~$$e|7rbuk@d9HTeJ}fr45;kiLH4eM%DIi@ph~8!x|U*00=xhVqS+gABuk z;Kur}Waa}cJ)O%!=?~T^{}PiwpVg#B>Bi^s1X!q()ajniudtod;-uB?m(fE=m993} z+Mkm01g?5UK2;NFozJCpf3&+nl%!@B6{}|B{OYr3Rl!;8fmi=%JEcec9%`NC`96*C zhT*O{Br%b=@9ot;j4#;Uz0S6A_Scs^T4)+W3FL=$!ItH#tIYaCo&7I*Q^+ts4%Kgz6aR)04n>iul=?B+xZ` z)Ly!oM@a4FsNe&5%0tI(V~oXP%?uzob6%OwP5p*c77A+oJ7n(ke5bu0`y%%;TVesA#?bAp>-j5UFc!5nC#T{o3Weq z8%?zC$ZeyFVQxNWlO)a0FK?8s|Ed7$7c6#A;+jvr&-Oh{Btx1Kmo;T}AXOiCP)P6Q z2D#{kLpT`9h_z(qAToIcjs^K$k?`i$mV-RX8{QvuzDaO_)O}LI-S-bnI<01w=D3HS z%ApL1a58Xe=KK5S=C0DVxM7H(A*VI;EiW#*H)n02!MLkK>k@O{B*!^5hnp}_}AqVD07v*JhQzBqouKnrm$8>1v!Y&y*0DW{7ZE*@FN>6PwA^>8~WOo|rHqOg2OVg6k(ylDTaK`QJ~!8tY?q~r7QgrOdbq#8 z^)hKe#Aej8I00su8ci>^nB0dR=^9d^p`ub_%4miZW`)>|3AG7ivvhk_@XSiS{Sk`4 z!d7#4%`ylp8@7Nxr%oFk+PjIE{V2$#siEm5Y;!XOYD-$_|6*u!d1?+m%}ojs-=zco zI^1Q05s)d}fbJ7<%mFzKy+>*)UW_vkkS;DYJ+O}ITc+pZf zWAD!AVD%7~xb;ZQ@es)(AVnqERPdW;)UZgfY-~k6xo{?uTIEuwUh@m&dvH)tzr}!M zzCO$Cc2lZkcQnQmRWnV(!n||9+9|K-7b$4;5AqUvIrL}Lvm%G!ql*wMMEgQPt-@k5 zKmT{dR_b{EzpnjX9gz5}7MzDmJ@P-OkzB1YR2<)Y3l9E_#)kbJeWL2B+GVzp;nVXa ze7$Fu96DwVGXd_PvexnIR6xu^!*UFcGX=GW_Si-hq*O2#(er_y1VKmKdSqmH+%#hv zXOZ#Z!ovU5G-8mt)PlNe-jUJ4!Jb}TD}TT5AP_`ytVT!u2?sDgjlguH8&Z+&fNHjkP=Bc2C1P5PJ z{mn=B2q5aXCx))L*h7*YtHs>pN@xV;3t4n9CE7RwK16BBh9?J87*q+<6CHjifA^PO zQzDCYU(^py;*Ad0&{&p-mdrydQrgT&($r)^w*y?cpKl7TRIn1IKDDGqdV{M@{H8Ni z_Kr)XrY-u>OWc{3xJ&c$dM)a7yjuCsug)E&Mvna20#BMUm!9<$H4-AJ|e^B99hM;$+Z5Oe~7SBLHT*R)QP#EqB=MY8`*9$=0G zUx>{AeD?oK@yMGSH%M4ze&`IMeyZGE%Y2xwr=(zU5aL25^kDKZjqu2RL6zSCpa9%R zO50k^33cN828uw!!9krw2iL<9X3p0yeObqX9AWjaFJ>9^{-0vN33<15U&CH%Ix=qC zH}U2>(LvrPh&=QfRJCWW>A~5ZC(ik#uEIqdcM&W!*g7c;BqfZbIe2-5R zebKy%^9sd*`mv0n7W`CeP^Cr{%TiSPR6u26ZhmvKm&wGkexMo(Fz93@Lx@9g8jIja zJ=(jyEy*~aT;_a+8tC;s5M6?JsS0Q(2nqM^88NC^z{AH+AqDFHm3jWRSn;0^m<-wz3!*S# zd`q2ecw&4~y5HdYd_}0`gM)GCwSRq0Z&^rQ_B1r#I&}QYl)Q-~V6nDv6zrLSJ-@_c z)NzEs1v#c6tj}z9-_4ReI)o8tUX48U6Y;f;C0VAZaF3%VO608bEwM9+I z8k%qr#<1R&l$w&7i)P*~E2*{zO^znf82 zALMAKxjA(zaA*h#Cy?!&CTPiu#0bxXDQv8%#pZ>M%SFx#L!dKDh~YI|YwJ-}|~c@ojRn2m!Q&{O`&Z_3rMDmuaI;7w&Lx&(zY!*@iR`V z!uC*pJ(zOck+OL*24R^4BVt4;yl%H~sO-IK5w_-;gd;yZh#gkmYrDhApCsPWM>|Rl z6;t;St!f(sM@qj??w*r29Va{32jNhRpqj z_hI5R*zi}LDjtVXWan_eihfwoJ-g!y>-R@Iuy;EDwyb^-1I%@SZLo63<6}LKkljPn zPJdbXQAe^Y{bm~vJrOnocBx;}!LQT&RMYSYVAJV$R)K5b1s}M}NH0b^jt9@y)_Xbi z?nZ9X{&$=eYCT@P{iAh{Ybp2s-VVoSg1jchHQE~9MZAvh(vz-ExYiFD2$!AOVk#wv z$D?GEJ7?52A3X+`8-u(m3^@i&M{jy>YDQ*lYtQ--Gf*AkZA-)m0*y@E>kx1r<=Fk(@NkzLHkv;C-_u07pd-z?&`ZtZ!omT z!bYc;Dldd8WAI%%KZ5*KJVpr6@k)hfAym7m9*%CF0~onb=ORaT^m9&sm}(5T-_s9A z?~hp+rqfTrC#rL~(Q(;fg6at4YGh&%> z8D;?FNA@~kSn@+Yu;ISlpU9D^+uLV6GJCSx|IO)phC2an*58>bK+OYy%=PHK^-zzQ z@u(?mU|?rkMuiKDDtv|=PQpyx)#H7QDM4U4OA8q3QGdQ92~;e}w}s=Vy%(sMVfU`N zkNz4t&+t!-{`plcU_o7!$PTBu&(j-R^SsgHX|Uvx>Dr_JyLrq52?53#XEWr*Yp|}( z_XkoH0P^Q6@aca_&_r10UsxX0g@3yYk(0yEOrTfkMwjka3y9 zvW4z1n)fB_!|@L2I1K0TLtybwSn>q(+iMCAE!gJ^F`Id!t>Oq&RXNR%3Ya3Z$9_dq z^02uiPR)fDP|(f%&M7K|sgaxGd9cQFH>9Guw%aO>XEO*Bqi=M#y^qLgI{aIA)?v8m z=bX$QW`mCTFQKubS&nh{p}hxKmD?P?o$F+NPDr5_;ZU>8lPBlfcv)V-IoAun4N2l3 z;+9O?^g!Eld~!b>8&|;(dY3C*igYRYZ7h7*85mtROrkeIGqIn!5JuK3qV}deVWqC0X&%r|@*)X>NLQ)(q&? z6nCIGj<6YQCqOvcNft+F{qQ)`7T5mlN`QjW`;oq*nflsdQ*#`#Ogg=pewrR-0`n=y zXOBDCaaJKS)aO)p4lP=ID1_p3$#-;#3bJJ+#kvHqMf>wfebhSaVJYb#WQk+%iM6LG z2Oh~e)#&fRue0lKbv}+H2EyHNy)xrBuVi;ItNpFRdp9&W1vs?I8Uy|M+NO%y@V6Yy z@nxyIOjDancf6jR+@=Pe=%3ED3!c0A(jU<>7CNPe{yt2^K7hc1EK>&M#2=E0Kk^2> z813*`+PX8frk0jn_-EKU=fu(s{q3~@PYkx|*}tu>?#yUCN3m`U)^%42?MxGkMtbWf zhAT8V3Ye5j#TgUBgvrP(tgRuIBw4gsPcSO>cqRaZ6{117Qx9>KSzFg}HYXRhL_h`z zYMn6K*hl@>MTH=O&~SxhiQwI)hc?r9+Xj~y8^?#!xq!8+k7#S(Uo4uoJHXUcPN2E& zd7|9Zo-DHGjfcozw#=tB_3ux7B=4M2fZzYeew23uKoX2Wy2Qd_7i+Kf9;>wu^V*uI zy;eXo#6>ARtKDC-cC5dh=MfStk~g+{F35_4S`=HOK!(GjT=?TkE_yfKsJQ+Q<;hOm z>pX#C_)T0@-5=WJlaMku%HAOu+p&s@Ikf{T|Qj?%qZLQGxEJEzaXl=Vu9ISWa*-ZFL5T9WGw5J z`@K_;PL?mk*kW=FHUU}W1yp8<=GpyGCT-}) zY51M$ZBq{&vAuHk>mA>UedTy6M}G_I8~o@%tZS~OC6z*Vr?`^#x9I!OZJK%F5IKQ} zTBCRf4HBQ?4gf^$4-G{UR7r^wX)E__Vvph#gl4cX{BDWqD+D|COf>^CtDSVn6XTqU zMSL>w`8j@%M29I7IU8M+snxKu8E-b~Us3~-j*|AIq;^@QFRmvz9`j+tK2B_jEw%9y zy2~c6o>)ztye}>%x|BTTQSRuxKc8U17JIst=szz0f?fB%gmo#?KU;k4pwivM-lz

-@9Knwo_vm@{;px|QQ;sYV{OEm4j`l9E z*V%r|NrB+bvip8|VhpU-?N7xdQSTbbhX0wNd;ttk?D)}8wxsz&0j3Vi2G zz;|q^kLMib-n4X67XE;0Cofrhke#+-f}m^bVn)tB8~ZiSwpExJbEmzXW*i082OU1p zPa0qfC{`In#{Im7A;ljMA9p?$9l2_Uz9)=hUp;5qE_Rv%3Xwm`vWA5Vb$YW>e;)&$ zipZ+@^1kIgMcnp1C9mm`Nz{=@T8hJj`4gelXIoMKTR(>gvh07=1LB`OfGuvzVqmVoWrfU~tyC~Ovq~-#|W6Zy|+)#I`RZSq0{rB>M}7OfAyh;40k#1-`p$T z`@#RW`c$(aHe`7HZd_{Q4^Nhy45$5Qb`O;)6B6bF)YK{ayqGHzieV6 zYDT2FFs?<-i6Jq&kntBRB%rFQxt?Yen-Lr=MW|9?IE&5OyY4zXV$FhR{f({9QvwAK zKP32&@aU*sc(FkVbEabCYdn6aAX_cNu-!L10#sO*tor^uh=OjMJ$;3kfGYn}sqlQ! ze5ER^O{L@RsdWqACp%JAXnriS<(IKhT_l!e9V)%}2P!oB!0gtT-~HHPxU&ETiCKZm z!hnj^_;Yz7h?_?;5?OkdacZFHtaYV!U1bkrm+VW=L9jM1AH3+Dxb#H@LAtW$aDjP^ z(kn3$Zms1mC1ohnA^~lNfvLps#iW{5Vd0^>O;7`n1SOhGCKYcqIOnX@??6t!N~MIN zyq+scoO_8S|4WCqf}BO|U{(*8n^PyF0z5(lm=ljtyut2>boi?&PkB*Oz0)gh1>1v^ zYH!P?zG!*7vPxFasho~#Wu5R&Qh`=RN-4rR{R}iDcvkX?OY*WoQ&OSjLIrm_BsSpk zCN(&i1?}}_%#w@hv1{3RYcs2jzWv>Sxme0?6|vdRPGx-B_)7Cc-C~4mZ^N=Q>hw0e z>T=?gq<~aAeq51$TtbvgG!0B)ta`>ODi|y)ecHfGI0z8pDb{_-Z1lWwHBde&KYbqR z|1RjZ3G%H_8(#w$%4;wT4>O*|Q6FC}8n;i*aY~^k+6RTofAW=UJ$Z35n(FW2ZuJ1#aV-G>_0PMWn)?$(qB_lq!MV}rd$zY* zSY+8O63Jn^y)r;+p<4=vZwyB}l#1N|pt99%Uujxq9SL}Le#wUKJmL?X^<7fM67l*h zdq9W8`P829BQ4A6oAWtAVI8XQ&OSfG6Pd7x*3$${azg;2-GSODt|mZdx7nTFx=EoN zkW9~Nj)rT=Xm_G-Jc)4BT2!SdmeeM|;+TjHd3t+gFSC8^K#bQ0~po< zz&pO(g6o^&mu7>yg4yk(peR~+d=MNq9hH?U-pPLt0;8i28nR9{9)a3^&g+gV+DqxL z)*KrGKM7aoQfDgR(YfYzRU5knbsdoC@i3V!~F?Q!w$X`b?W?RP}=-iGj~< z1HoJN_)SV?mR`AGFk)srMv>C6G(HDTOYR@H{e(G53A~KxqHQp?O3_8-CB6Ck^(93?=)~jqVs20xG36cIQ{HQsy|UJVannQDLc#oz;NW z?rx4tJV}Oz=s3wPDKm<^+h{o4Or%er%^bNt-2X`3+J=p?f0oT(28@@;bd{Hoil76b_JYH}w#yNh4n!q3fOz?h#=e zk~on@)fWs=!DK>}e}pOC3Jb>LVOGK07tfE> zut)jl_OjovX-Nt{h;nHGuhF`-B&5v$)WPw7_8 zW12_9InWtJvatLG!Eg^>~cJ&^*hbTff5yU#!(Fzsg9U#VCFH5 zfM~Acx*w>e!#&8Z^ldLhe4V#l(AsjwL3IC#;qoa@3xER2qlr#~-z{f@h@;kdskBvY zc<`${X(t`tGG6D2IfUyIAD+6^%)%|Fj;@=8a@Ek3i`oz5pkOuTwE{t44?~8fCmIxaTdTf%aZv(g zs}CQG0z`0eziy7?;RnY_M6wXRI*$1){#i681&5jw^O!X6r})6N(<|yZfOlP2b=O~6 zGeAhzOj>ikRuhn{Gg>}GT`l@1DP<1ZTdCM_M3FQzGZ%7W8WIlc=$I|~{G27pJ(qoc zleje&30JRS6S5dq3pDYB>9!Vxe#`W|nJ)%YF^j59hDPrr9E?_sScj)rw|N&0ub-c~ z3O=e#9^r0p)YL=EKwvb_>56WuO$97Pr>)Y5>!7T^Aw#BMPshqe9=+^rdn5#MV82^KcYK1N_pqcaTSnf9EKkJ zVEe};FWtWo&tpu^2Im-MXHDCHWM`D_v> z+>@lhuP~B|N@5n$(l$}aD;EzVCKlZ4V>A9NWpbAU z!#GX~it+QbKkz@#rW#9e^1taoo=Ai@tYi^c1|n|d*bwjv(0boH2`VR#&AVT+CO#X) z#=7>DFIJX&RJUGH*(qYtQ;;etKO~bXRRLx=B;&yHHd~T+u4H7phog6QS$0v3Z!urbs-@NN znhk!A@0cT+E!kubAHhGD95JFF@GkP*9#nV18A|s23JiFiRi8m^v;4tZbu2Zm*$TsB z&G?-iYE5j;OQ+ld8EHIKV%pojt9=K{V>YDPDNfcw^Yy>OpCdxBmM!qMzp~ce^T2Zv zS8wz%{;tPy=zebYn^jB{FxOWh6@@~-1FOj-<2<66J;|s9?`eIiL;5HK=@Hf*wnI(J z#V+WxqGw47O!kfxs$5!kZ-0;)l_g=za6!vbpfz0sc6sP_DN@PRHyME{Yr>J!Q#{e# zE)6HEAT6GDBV32So5F0Bn8xsAEYOUXuY<`Ho>vrB%cHgT#p6`Xyg6+f~e&4LUG?cnSa9Y<& zOBNU0SV*biBtK@u7QocVTN{lBlE_ow(jiL5fhm=5id>SthYF`Joa3s%Lg-zgm*V-q$rY05=k z{nFB+Y%h^)=PAE>`^p~zmoyG!jteETyN3ph=auyzhsU|`LXX{ds1r+Xr6Y zGI{v;Ghuhyp0#@ajKg5c-2S^}>zfGpbcg+mm_CW4XgFt2&h#TBM0V=WiRnOrf}e}0 z?Qfu^AUNg;>}Y(v6@j=j|8V`gk;)Ip_b4dBIV3WDO8ZuGW)T?GzP8`lERUF}5f0{zV zQyXG>uub`F(C&hDR0T0gHA<3NuZsfUoEHkTv!jU_=4;2%5($*jrXFQ4AaE(d5X|Vd zJLpAkW9+UZA;Z0g!?wV_UQY9)i9)a?m&DE3UD(ZO3FGpE`2*^tP{nrKU}v(r z49Zj_w5;ujqV4MqrGk80a(_;n#eNB~J3H{x&PBt(I|6*v`eC!R_$48J%u!3x!5Z}1 zK6ibZ-S}bFK7u@FQ$yc)b}Os3T+cFOh()%Ek6ryal@(?ai%|vEHsJu!kdW)VU&cCs zPK^jU=R`#{qF<6e5$ecRP|vCi>jd_!#Ov^#>t35X>Mq$PAK4rWqyq{&k*fS z(hp(y3t=?pYmvs>`fZm;up}emy*XLztIz!h#{F&E9`%>k^-{30gN`*gW9TIi#9XhZ4v}No_(ksPDlEG8L0a=?}~yB zxOcoeG3;nu3UOdxzwO0eW7=z3N@rzc*BK0cE$JroE#t(zm~OY3iL>B25me_WJr`fw zr?p2h`pzAxvDZQXK#3P@+C@XfBo$S%0-j`7eQeIuTTOfdw@F5^91X91WS)42%6H{$ zkWxySZ)HWE!P2*L>R*9Vzz1E1wvN*l=_Mw^_v$&D5OUDJr_5Bfx3{08>D&QH?(gp* zNhxCt+xARv59Ue8s??U93{69GF_Ymr-wG-{{&G%LSO46{x5OE5%1E3^PwLQCc43B0 z&a@&aJUMXV!1Z8kpMo6DcWdQrRDH4R2duBK=H6VwC2pX z5xtJD_ZpcyLbe5^d|uDJ*>@oGiBEF%zX1`GF`@VA8Fp5=6!eO&x(=5`S2aX(IhF2) zmvJ&VXah6q=bPG46I~S74A&2XCk5s007P(1qVUJCb`n zQt5oH`4@LJt31{fE_PSa-P%6)9j?s<=C`-fjXLZ9zeW1LW|8i0R=>hO9HfaQI!3!s zgaqAP1oTBxQW|VGw2Hihx?;NMX3HM^&{T2*d@@R0o&You%Sg^qa&sQ=bQSOAxO6sM z)$&v^#9QFh&-iAY_U->$mZm`U$O1l5I)=@N_SJ1O14gPXKMSj>71MpQFkl+6W?%VW zRL&z|y=<79Ke^mqHtglZo60G>A}8OO$A5lL7GSto&!X^`&$nk}+|wS_)Fk|*xweFa% zOVBJZL(|Ouz5@C`Ov?vVl>frCeB63?V)Ma~?=K-?e01F9%v3AkjSNo{9Bj35GB

  • kdWO~~enidgxj(Q=$y zNlj+IZfYA?UIiTlbRa*N6j-H!s&%GtBVp>1WsUM(@BMPJMiX{kKU8vGK zi)d_jE+ZrxN`=3y?l|vc03l&fm5C!(*z)ZrqH3GT26eGE1D5)Ea*FMqUA>%|?%sdW z*Lh?$Q|>Y?Ez>@_OnE5C3Z{TT@Tj~&jmPX z0QEZrt*=)nyaC2sR8*(ZkjjeoKp{N>F=2bEBqnxJ$Y`;TF7}8HK~ilNmG=I!Z_0dO zn(5p;)j10iq}?Hdf;zVSnI>9~<#G zuC@KrO6DtrcGnKlx?0z89u2NDc2v-0=EbOlMR|FmtSgdIqKh!N7Bh!?^G8+f(|R2j zDF&vnXgK!TY{@cPZ+@;xHt^GDaa#fIWSVDyRyUIQQGZJz84;%=ubj|c99^Z zASIWO4v$SmxcZ?WM`3i&^DP#{*{-3lz9H4m&OQfO{vw7TqpeuJtP{OHP}oCXI^Zca z3PKd5Rp!|(`Y|r60J$*+N@~6MpdHrK#8KXP%LNhJ&`A~~gKIsNse0gX4|DRfR|=}w!#ve8x&62sRv zZn6z=yS-I1N*-YfmM@6#(EA>BrW@wR6q)qDH%Oq00+U{2MI^E=Xo3I zWOA;AB(09)Z;TX!l-+o4{`Vy3$vKd(he&j*eMqtU-0YR)V#Z1DUlG-{u%$Kol+u<+ z)Ap6_0m&;g)UdDVqkBF^BxDNIrg_552E}ruCs|~pbc0IP8olE`ncv_m_T=RjssTUI2TNt{p0HgWB-odi%uOZ8 zFy2MhMumTkEM}N@*{5!!iB-)PLy5N8Djp9!K7&}0TELEwt7ZOg(OU9X8!56!<3k9^ zHvX9;!TyoJ0yY}F(pm;gZWYL8I>G2VAI3!lH*Oa^X5&&4rJf{_-onp0 znacg%Y?pUqtO+%3hj#^;PyHPw91=kx!rMHUrSL~|syFbhawT*nXk8z(#O&8T7Mg^! zdV+5R_HckGEWEu$m60FQ6|ccb<)w!MsKej4CzOrpy(dcTihtuy`L)rRhg-;s@s-_y zX*BoP?IqL;G%og@F%Y9S_TS9OaiPX=@&cTwg3t zQ!269W_^c*Wk~QCZ>M2xi4?nE(u&>htFrD{nU7XO$P>!B(H$@7AEDLDt8MwmtU(8{ zOunvB4QCf6tv*ASMWiE+G}7!pwHN{}GfR}+3cGj2A9$T++IJvg`einNm~MDDipUx# z@mjF|>(i33u;EFn~eb?fssm(<1aL?E3*_l-8={pG$RRt?FZ_ zy2B-8pF@JfG_sYkcMdreul-uS;1^Y8DA5LNQTi2qczTd_3k<$a6=%kto!Oc zfPz<`cR*8`yZnEZ_tjxhb#J>UDkxo&BhnJmIh4u>2nbTrUD7dh4T{3hBHhx`B@Er& zA>GZ;HAr(dukY_WUw`L2*LVIp^9R@LYj5^mv-VogbFb&Q?`NHLa+~c{20&@Cg(kPk z=F?sKP^~{s1}i?Hw}E|aw7lP31hfxMruxl3Ae3>q{+3kc%qsilVL27JHBsoJAmMBXi5BUoQIL67R`hP#*Y-%Pt zK;2zFzw`j8Oq{&;6>81Sx-lwCTi#y`{4;)A5z-vPmDH>L(NYO~uqG1%8n~nPGg;G0 za2Baokp5VX%dChcAQ0085`6F=h=nB*=}6DJ>1^#jF9(rJ${+0v+b!pmgg}A`9=`IT zBhVp+|F<=k9e=#AB37DgvYP{SkXGCi)o;rELhW+SIF*)G zal+-Zh#jLO#W<=eu+o|($H6=E|EqNC;7K%zm4!!FlUI_jhV{LAt_)@@QNkxcdPsY^xR`_ARQ)Z~ zl^tS9358p~Mi z3&8x@eaH0(6F*R(Ua#K;A#9>;QTvVHVf5jryP|;8_HyXrB`GUEsIK)S0)}XRL!|QD z&Qrj8`P6T13F$YDSyd3=wDK$xN04T9jM{i0<5^*|3Y>dAFpv4nj@Xk`TZ-5Z?u`js zQViJ{{0~BM+50JfNdy1CKrucJZQLPR%;^M<&=wv(Sn+PW{e~v-333vow-9G{d$7O< zVOyvsm`y=Hq2v#y!t1ot1r05h>lC^*ssg|onBvDt1s5d=k1A7Bn>0?pEuSy+^ndwp z3myNVsgxCPk-);n{y9H|{Ts*ulc8nHCv8?AoNpCTp8~C}h5S}%t+d-stfL_e&F+M zF}@UMa(rmHmzDTbU|sY##l;R04|2#@2_l`*nOvANoh%ho$xfb^}OC&bJb!n~17X5G!vj1TZsY|zk>7jnh@ z(UA)cH~WKncq~LlcYV#Cr-_p4 zF_J*l>(G7y{l?R~>bz4}@9fL7PuVx#eN#*}>-WZ%C4hiZ?YOEZ?on3!16oVa02vl` z__zv1LZ>FQnK1t=2I6Ysolz+!qzNScS$OAeW5K&SE$`@Y;-UymS)hpzz8;d;ULz3= zH}tMeNk(w^&E{E$IC+_sdF(GP-KQ@Z*E8s`6j{I_cr`aJO?sj?tzEZzx3h1nw%pv-IM@r*W$M0<)*@J+OjEr#5OGM&I9CbnJ4)gIdR4}^G6%bMW0W$UEa`@P{MCA- zN}bQ_u^PZ&5Vyse0RMs94=u$=cHMe=6b^=FAMkzjC+Qh4h*O-@r=g`~rLN`uopj-5 z*+*)5b22q2E_T(y{@7g}KM@YlDGSw|AsDR=8+tEl0cY5>nu1Q4CT6`rqr5zxa}*oD)T#_K%Lr_ymLz_&`wvK<9QJn$^(@k4h1lz>jx; z4S((w-aSo8k@0>4RhZg-s7TR=Q#UpmSJLlMRvTXuYS!>RtPiCtN-e+rzNW zmg&-ACb*SsIBHazEK>Upn%@&{2)>7}VuolCqQ_S8OZ(T$?{8Na5>O_E z6z;UF(;5QyoiYf7_aNX#gz$8AcbOE>Q(tyix+Kk-g9O6md0W{Urk&rpjP1Aq8&#Op zT`?x2Ce{-|gEVcuUwA}V4b+G6Q_&(P+VDc6Nj%RM6HMrFv~qF6haR#L+@HH%Xl*H~ zR~kEkYfSEJ;`f9W1r(jHl%xDJmCL+IU+JOt={bNj8a-p;#6epuC+)w?*;xFjtK%8C z1}XFq=z3{!TIXTKkr@^&1>7B_i3UkM;PXWDp;-#tz0c*Klvut3VpZHPQraDAE#}ZG zB?OS*`dlm7$!c!JHv!k+cc{WBV*}`Jig3lQFUnePWx=Kzid>o|p}FkfK)K)oFSS|I zW%yu6F!UkLds>SRhE!5J*6R~~9LIs=Mj04P_td+^qJ3tbECT6KoW{vM-4kTS0ucwo1v#-m%4YNl~%HwwTc(c>}MZ>PAGvvzK z%gb9^9{u)PGzu;unG(A~;0FZ51v?gAXwBUdcTC1(va$PF_O2oF&Q{1$bT zyBzPxnodm%|DKZK_dJjvJruuQbPkGI`N+2?$hgoxSLJxX%N{$@pf+v*w5VTOL;k9D zU_gO3>M$h4xAeC+@T}#Y3H>X>O7vm!(n=qnwX7^GQjx=&hpNKI0X;EcfV6Zsp5>8! zg^r(k53U22VSOi})Le9cNBu^eE?oM<{`I}%8cR0N;eL8LYX(mwjtqFtLtI~6Ts#2M ztjCI&u5BMC7jjrp5WnJ`P&}s0;4!wR8lRaFsf4|*J@IZN4ET(NDL88v;>oo_A?xqg z4loqI_P^tQhFq}ua638w6AkLiN_2T91#vRLXBQ+Fa4pRrQ5mj3)mhi$^hZD^+ZoteRk1eP*Zi!PQW<$?!rMllN z*DA7rciVvZ?OGeQBT|8imu1?Be^|mUtNKB*Jl9ha7QVgM7T0bsfm$+#pHk7hzLJPK(Ehudm!aoWGMLSF2Vl-DN8MgMrec^HPTR$`g~t|1xhlPEvTqGcNTmFO=!UM-%Qi&YYR2Tb7@bN|AF5rbvJy z*hN8^1p1=?HMHkjm$6lZ3S0OiK~pFcDk0(PgMJsyXTg1#2Pc8=^3)Q>q0eaC4=OpL z?Er+dlyT9S<$S%y<&G}qZn}lqaZO59PW&$J9aB!3#MQ2DX=%Jg71q3ET`T&yqDF*4 zNtI4bRT^N^vPo+PnvCV9S7q%O3Fgz}b zIm%2a8;Fdq6qK@$?oF$g99NkHfYH0qE%~R!S|xj>wRk_O7RSFW-;uPmvT&fkBNf|g z%4DNsw;L(1tQ;}D@=ChnO8~>StoE#SNr5*2Pqg|CeN4}5S^@$Mw#VVObtUfJy&GSs zZLYyar~!u*j64o7?cC_i=CRoPCG-dbhvHYGy%)7Y3UKfIhMU)Q2{U9%rbv8(o>In$oFN@{ zw^!i8US{Ni`Jaib@%I}xdkHvS;FEkG^oQR0*s<~Uc;SEG0{U4UMfW2R))VUATf#S& z=OP5*yvK8Dq$3F&o?UWoOR3Lj&&@{yXZgldN$i8DLrq-i6#K6EF5-m!Esw&U#We6l z!2Y5f)XWN+o#+cA=s8WxtPnW-8l3?iS@2$rQ`+`17)c^!;eiw1-=M-$~5l( z)g+sDh=Dr@auN4evBQGmsw6o|p_7KjcTFI~&sGPtNXN%GBSwd3GgsZJ984a_u8vL4 zAVHSnQA!(2s{w(-wNn*~cQJ4s1%s0HYwH*cx#*(6CR?ANWsVQ--HYurHIt$t%UCW2 z!kEE*k|~V3ja+?<%3fY|f6tJ?J1OFohc~!{_vLkoT@EL>821A*W&MeiOU%DGm3Ys$ z2WAdDm;FU9{Y1TN=!BWyP;x5mVg9F4t6Xb=nbn>cQ1|!j+7QVAb*q0GYtrQH8hXKV zK91V&fywCQH4@d%XodmcH7}X}<`6zSIC(5fZ!^MOtpOIefB#BXZGHRMJzq!dXgNk< zz4aB`i7*om?aD#>>+dgpDFh4mmXSPw4?_KJ_~8;kDKWzaqBYbQGv?;w=LZYp8$1Q6 ztwlQS^Zph0-e=qRQ+TE9cmb>r2)MzuQSK5) z48G~eVzI|JP3+;JsY7Xbw$66=dbSkLNCo)8;^~re4vT5&NKQhEB?=_j+s6g{LyoPw zyvD_{8AcU@wxm=I0#j+k;H0fxv)k$MWT&sgm-bby?Xhx^?I5gTV2BcLjh-9Ih+}Vu z|1y7G*26y=cjEHvlfpwOo#u0-~ej zVvv7X+L{TC94Es^YvciPlzktR{PUWNGa;1b{LS2b)ME-uq0_V8fClj_#j;{2Dh)0%GVdg&+77 zDJb^I=bm(jNxc<41gvC61tL9v1-R=Fu3yRJqTI12u5};bQ*(+6kaMs21>u&A4#{WT zzmasp)Gc{Z4KiMyp6aj;P012EeADX?qynOrph%gAG0MxTl`>IdHL>NG&AjNn@L{H{1euc$-YmA4H(emp$ z376(K^xs(3AP@mh1q0g!x48osC@-%%Tzg4gFk)!K6-+fQw?gRB%$Lv?nhkaHiR)z8 z{amKb7Me|0N)~Xho~UrrA*n%U`LAZY3ojzCcSI7&8nGPq?3hvphL#Y;4oBD=)&ubr zgImV^3)uS_qgh;9+T1SKUdW|$P~cc09j*IS4lQk3q7^N;A`Wix-g<*eX&flq? ze`C|5pXg$+QHq%vP)$E2Oqz@LP;$HRq9aor{5r)k*ClMCQwZ0(v}0?b46ryUnjDxB z(oTRusxFgqw7-PnWV8rP`h)6h{|YF5mvW!D-e#y|JBIW~RC0m?aJO7(DLK^Bf(=#(OgM&{Oe1HgK<&zV+_Of}e ziqL0PB}yu9GdRu-M-MJ@6hw&K=n;u<3(MFp3*_$bVXbx*w{K>&nX*H+OTLbpOlnXJ z6N}7|RhX4Pg4`>5hZZ@*5+VeHO&51y|g#ir>yJXWp2KVK86 z-_M&g{@tF|p2di<RaR^Bi?ra|U~depSqKBb6w^% zkC5BbtFaLe*q(2&%Ac%Tikh{C>6QaSLo>M>SgZl=9O6pxr@+Dgpnt!G5U#u&B^&3g zPEQS5Lv1zLkX>0GzMM#DomI75P9!E5{OP^PlENbsqBoyUli_T4v1#vxOZ$K%h6MILG#$`15ywn_&hZS`S{f8ZJ&N@^65psQxvEp%Qd?j#Klyye&=O+QFf97#SYi zsYTL$bCE-m2?5Fn{uiSjb(i6`^U&-h+-L6e*rX6xia5(a9KoZ7}t(c;gVPXiTh0mk*9C_#D3y4~2M)dnyY0-9uY^8|MM9#K7Oyni9u=;{GfC6Z)u z>IMdcDH0nKlQW^OKlBwRU|-M9&d3u&#NO8W7{$NMIzM;6g8?M4n^5`aPej~5xjz%X z38M*@8I9e8h8+-Pb%zyLixy~UwaC4fb%}{yU+s<}XN%G@gZ_)bZlU$Q-t(+9{mn5P zG4zZe2ebCm+I#1KQ}yn06>!XgoLfFghF0^Az{EaY$%Iv`3z9I?v0{DK1Pr# z2)fBBbZfY5IMEzENN(_X#atk!v%%W4o@P$CN;qbVAT@K)<0RjKxPXpvSZn5%g_lP zEYmrSk){hsdok=rhPFy=^CMj^xM-6SE`I^Fm42f%G4c}+TO|}BDhEhmpmrntne&;j za@8XcrL;e^6mQF#buJozN{h?!4dD0LjCtb{OP!kBG$4kOz$UOsAUG#kz%iq;53m{t zR$&7fKiL3KRf}|NQ`QRrEn?@WxZ8M;=-4|`efrA6lqt%Wx`rBe{#DT%0DuAX`0VRi z35ATJ+?kPs=_{T@+!yk9MuHNM2hEVKFX-~7*t#gn=834_35olPXcF>wB#X> ze+Hf{wtVt%Z?aZcBnN}Rp zxN9hq6{!8caW9&tT?*YchIje)bCM^eBegvtW#B8+aclie=}?b zimft2xRVMGH>E<*?}GJyeTk3HYVySpCQgt(HrH*Ld)eOr`U3x{Id|abykq^Wz%D>` z#mOloG)uxUf8y8>WJE&)1G{Hkm$vBfr==)p=w8ZgK0@+R^a8NpAT1^+!-$(tI61U4 z=$^v*;c8Z)+D5^!a(Gc(ts~OTkVVX^gFh@JHoW4)-8_B11dHjhOVs#L9WgqZPX`Y% z$Y?!u5;55*SJ_T+UkbK00^d$7s=EeXb)IRSFG`tV&?EvnrnKhCmzWzOR}NE?T$s7l zQST@OF<-Ot_vz^Tw`fYctjOUJ2C{HKvS&Lb%K(DV4}l>P#Ry6etUxIs%RA5%X? zB9+s6b$^?Uc}?v(GPOj3X>1h_M~5lN8LEAQk$q7mxSTE7&54sF+NH%9LGFfd)dL0E zHC(-s0y%TLCW-V{2x;Jw}X?hrrv4`CA zZx;h0sr{}r8J zOD}`#wl=d{S(gZ_=x+EoaEjLP+vGoZKrv)N^Gl2F)k&cxA9gHXLjo^iW{KV(AeouI zOlPt~X)Ls+Cb8aYDi*`PJ2LZ`g;7HZO$@1QkE+WPe<3f}hnX7|%vS`v zMdHXGE&c1aP01r(4gna!{htL40dFCyQ1hostO)29WCJOMe_ASj|07);`1Ch{80$Z| zpivdhSTDNY^ubK(jsWckn?|E9S4w=e?>@rHVKOBdYOn72u)8r7fbcygmt6AHt{xJ7 zk^a=rSBpwi+}vY8oycfX;&%wh`n!%^)m+yVn3_R`Azj-`;%_cHYnR*FwbVK{sQvsh ztp;<0s_sYYhGxHhXNk54R8(0&O=_cyGNqu7GV2hfZzLd1>9yWxDM5Rye7{{UQ=u3c zS8r%V$77SmT(_otz9Ud6_s<%fsBfA_+AGV^)3%<)Hk8`JZH+fsDGOo5X2TFb8K>!R?~8}-ijUr_ImquKGBk=VsN8*hGWmK|*fWMu4ci_QyBdsj7Q zk1Lox%zd4Sc_Phq=yNZYhFNVBfTHc>3yIPY^iL4A{kc0-Gi=K9 z+$)AsEPefva4@5DB(HO1)dt)Z^6E&S2Pf0%nQRap8XBqU8^}wQwKO_WTUbDbOQsu0 zBiDnbV9|<6wymCt{br}AeJ%Q-?2;1msK%z7{V`{evy@Z^>D7m`Rj;27Xvysw_h*o+ zniEUgGfKg@WQ}h&g@VGF=*+5`wSv{24~GXZrHP%dqw?qsGL%PZWv-h*LQ)V~X4Y@} zj~}vrJ9*2ihM~{gZCTIP$8oN$tXN)h{CTB*+{^2TqT4YvSb>msDL~WPJm9j{{KeOp z=P!FEWy*49A<)U(^WVelg|oAAYSbVsH(~sTXvYfuEDQ4Nw_V2+V+ceB>`3NRPSVx7 zg|F#iP1D1Un>yO0lBx3^b&R#)ZH^rh--V*&CHHmw-ZeC#B|C`{tR+rQ^e3U&X&VE^^>qCG*Oti6JLQ5yJSVq&HdIZn8fqFA4%6ehMd zHSn88lDlN;WtAouLkFNnlD~oh>DKIbJNx6qZ_=7B6{kdACWL zJS#Odb=fmc&UD2MwOYYLp<&$zX!H*@s zv*$z&jU5iDEwETnNdu`9tJGoyr?Uug0YYZe+KSD&^I1QG5=R+vQ>53Lk;%Ag6*~XU zql8`G^*d#oE9Q&6AALzb?IuT15k7HbbZL7@OM6xse6~GCGC$lZK1aGVfGvbDYl|@v zAt)KqJ(ZO0<&yqqUXsFm8Pp3NIalsF#B|cRXX|k-q{^fMKkoN3X;C9K$kVH5i28a@<^0} z>#Aa4s3}WN|Ed4ItJnyyuiJQ^`BoMxd{wEE`3l);T#Y3in<(|(BS0HQ66^IM$>Z;L zHC$*s7N;!rWzv z^|+S;%cTI46jdqlXoi72D!%dRA3k+8d>uVq z*30+){(%+ybGk8Z;|OeN+q^FPzo zUIxRQ#yK(^E zt|%>CyhG=S`q-D^m8eCYgfHW{+8-vaac~fae-_ZaAJ7!+E&}~oiL&i0ko9kq4=e1| z4KFh!VxY&P_3KZXs5rX#RN7r?od9gH_Y$)~xb|>bW+}|Hp_cYywxBm(W3ifuY2pk9 zb2|8%&Lnu@MCNAMn3RPtVk1AM_qc3KLDi0&{(HEfXQ+(w?KqeJLi?D>>U`eUlh*ii>L0n6wry#kmOUrS+?MD({h)(H-35 z1J;Vz8oBwDMJFl(wGN(qy7?C|nV1o3o(IS^1UVsJ%}IGiN@atbK%Z=Nh^`QNPqSEFtKc-x z6gwDsdzOdSNT_pTuq_i$Tm2l!EHqJAx5j#U{Js+eg59FJ`AX7{2hys?K7Dk+MBQ+( zD_`KQ*=}^ih|Aztg4>UO;5lnTYq1mnm?eLDdZxhu!%%aL5u2PkJ(v0en>&5(2D%U; zg7W-)T0J9Lkz3uSp_y zLY$E*deHc(*CuV#R}D_ccYs|PUlsS|FSm*jhkln6+nS>>>;E$FNDTVI1LZT`aumZs zq)0TrtEaPr3+b_N*5I6D8z5Tl(~gq0>`ls^cZf6*p)(7Zc=(Cql_$Q2&ueXacUiOOY!n4e$*FvMke*0`1iQd zjFN}2M(U2QGnX1-d-%R`mWc?z?b7$?FfQz69i!=K#Ls~t{!sc}9*d&y7Qj41O|iyL zT+kJX`b)zqdF?j60_29c?zk}X&u%3+AG5$EU|r(7vcImu`bSZIp=xbO%;|u1e@G5r z??vkt3*O2T5NbOuJW5C_Ouol;@eRnuRN*8>xGWDoVg7J&g!_qXbXi-~{P4`QmjB?B zAHm+N?h0Tj&nTz96~%OGEY^z03Z0#g7B#?M`{WY^m|9atld4?(Vg~d?R?@cBg-w}8eK8=xfLRcp`-*J(C8I{QOJdy0Zx9R$m zM~l7@JlC=L`}pa&7M4CRN(L9iu66oPki?@WIi6ekQL5Ji#wH>3S=;HNI$ka31tS+S zSs9`v?aF(8Ny-->lRi9MZ0XTMZ|cFmOC zDO#}@=my7FmHR4UPk zuBWv9p?Vf+Kwi{EfE0n5)H!oV1oRk~~@Tt9@?bR47)#9wR36Cw?OR24WtgOTTO)@w!G zl6m)$jJ*r$HFI=ZMsQ|s{8q5F+H>Ji1AoWNoAK%Tr`CCRJDZZC1VfEOWoud3VQ*y4 zC9DmiP+w}(V^-O5M2d?_!$0i*RGX_?A4bd+4>@XzpAUKTb?~7kentq)zGnUlFdDpJ zwpbfXonKWpW6Jq;xQSxLBOvJN$jC08R@+TK^*75^XH>6oSyiJ*h8r^vh!^FuEUGkB zKi>)f2@6NLmgk-)u#_(iYE3(FA>+3F5P0?wh}_mN>gHL1yJtoGI(eZTO82Z;g=D}S z_Drpfi7R8acr`X!en<1{g#D3Rms~DOcz4I9W9yM7NXklu6XAk#k578iJ5eNI_3Kw8 zS(K)3owt{&KXELnSLCn;5hDj6#--AJ4L6l>2g5;z#rF!mmCS`%%p}7t8#L zGvdpWH5^WVG=H8O52R*!ch2RY394D>0v7bBDAycOn7UpWMSgOdJvEk}Eh^=teYYDR( zC$^%3shWCv*OPJ{Ctvqz(%6dg!9trvKK!A!e$Oee+QOEE982wHpnG)B z*37hDWlL6O;#Z8W2ALIQg$XF9mTy5`8S@E`@X^_qe3Y{4v|5oyRXq!fd8rOKd^~oy zl9En2jh9ueP@^s{-WIH*lsQ>b$6)HWid&#pmM3E!Gh11Kv|r~{jo;C71bmQYX6{t+ z>epyj`HnKWLO%VTiaB=}O1Isn_hYNt&g|jYri6&O*nH_ZK2du8`3ro-8ei2iFRYaF zZY%E|wWK=UP;;Ss3{5A(sn2txN7ent?IDSwW)qYbYgLciPJtR6_UPmm15@=0?aS1) z_2<5PZoRW3uL__n&hJ0>C-6ZXw)Hh+O9MHtvmc>%(e8R1Pbyc8!Y2liVQih+&M}Lf z=J%E>nlQH~$KM5BXfWGMJ9!{H%MF6w&)STKl;uSeICCMQ-`CA@i4N&w74^u?X3m{( z*POqASUa_xbSRcR@j4IN4xk2X`*qEuwv2b?AGQ|)_m2PaW$Eia8V&NM?ibF(>K(mL z@^sGL8_20JO0;rnN6C|Vb9ToW^6|AesUq1-7usZSiDl#R)kx2?# zxT}SIx#oc-3Y6gBC(8sIH8D~u~Hu2JQvlj(WadTwNH}Qf#y{? zUkvZQ_?d4Sb3fhGEtyPGCYZ$pXbco$c0t_oYXY}Pqu@&Z!&_yl_;1Ni$5jrodkDi5 z7mKdT2Vu|J#kt>B@A%%8FvG&}8v|L0T+Bw}!Cz66%ZwSx1H?C-)L~oLkJEGC##AJm zsydLmI}$tH$EaQnH+kr*;O+IKsZx)#S%P`DD6=y2#JTj97X8VWnEV>YYuF>N8`;81 zRYRSw$GYnp2ϝ|*5}quC##B|Cad&I-CP4;1#mxbbyH!X5wcyJ#Y`LiB zoqa(fSq#g+Yz3$=S2#`GpP{*LqU0VJPxfB7&XObNbRpx|KSI6AwB>@KI$LX`M4``H zv*j-oTBph!7iSTwIzmHB%dH!we|GYXUdvf3ud2g(WWD^&;jiN+rb?}jL(2-Qr?kux zlF1p>c@M0V>t!Rpoy}Xwmu>YBmRlzVSy^Ov{LV#JoQrP5h>wk%IVT+t>q`V|%}{)h zrkI=aCi;{<2?@)x@}5#je&Kn?&X+RlfW1;#D5`K&oFF{V3(UjgN7mTzy7;Z#jb!H|op+*zyO(17`I={|ge4#5!lsOS4YcF+D zv->%eTPtt%q}f=jm*H@dnI(EuRc(8!JOQ3&xGnXp;eFN^)dj3>$LPdqS=MwHcsZZ*_<$!u1-pbzuX6F|TPOnZV>cF5WC(ZF=_=G&ovoqUodTFnKE zubmHOSD>qe9XkBFYh8IQ_iNG6279P>e6)bf=dSk^lez literal 82601 zcma&N1ys~s7dMKcgv5YI=g^%3l0&F;gLId4H_WKCv~&p44blzLNJ=*&-CYCBz#X4A zp6C6(yY9NoTCAD>*(dfs`|Pvh_YYT9mce;O_6!9D1xHTywK@vQ6F3UWV}A@Z&)RIHrB&smrD;@Movdu^Em2Tp!xNLyRpaN0`j4;kLxYm=J}3N2r+L$SK_=nb zfd2(g3KRI0Pz@hn+nFOQjHdb<8_lx{c|$C-&Uy^iIm6gDl7_@sI;uX@Cm6?VJ|a#l zsZHky*De2t3^%vqVGv5}+my$Wtn2^@GGlh^^UH7jJ$>UY9jNTz9?||niB?RK{Y~}! zImW#JmK`9cYuWZp{PubE7 zDwfKV3>s79^Gfn75Hd}ANGT4-IDZWzK=FPa&$97YLW<7EmoXw@C-wrb{&$U!e-Is> zrzr!^JO(jcD>ny6K$-}ifIN(t@~FdxQIiKhq0xeY(}=^Nom-zDVk|B@hkEoAj< z8@d@zitom*2#DWvd%d!%9)L@yx*%;En?T?r9w{^`|2ZskUC~JM7e$167wVRsXueN2 zz2bsf&Mz5Z^R)Nvf<$iZ=q~C(P=6<$392~?E+?BzYL;^1FtNO}Pg8g{VocN8X~dQx z3Q$5ibo=P|)O+B$FmM9#2%iR17KQrzBXkVO$Fk_f64*OGuueW;V8p5@exLyGSVwvL zVvMH%ZY=?hxu8p5QlPjt7icHa>t37{xKf6iyY#T>ROBy0E2n<8;hD4xaAdx`f? zJ^gklgpv?`lW`=n(RlFqw>?0Cb~s~1iCX`Iq+>J1x5rxFFx@}mbECfdLGTCaG``pfO6k5Lb48wL5;R7% z?%uM#4Rd*J8E&PNp%n39ZmY);y%XO4b|vP zUCVIke8s37In;mHPucyof4L85U3}H@M9-V86(Ei~*1x@`a3XWMebR8sbrS!?=aYD6 z9G;>H!OU~d=ePt5m<<@+L2@~d$76RCBWV~31o0KZae9V&`g^WCn~ujv7(X5(jnPy(5`f!32%J- zrc)iL15`3?=~4@}dq(&9)f?0|PH*bXGS`5;5uc@Bym*tmCTFT9pnoHS%CZkChSgRImcIGx z-{cNHCKM!$mj5W9E1xw;tkqo7VEcK`ahhnF$@V=zL7i&dv@QBHXK9;}gyHYn4|U&6 zI)6R|Oq+TTTykG}?1fG_mBiS+w}shayu7kAnDQ={n{=O4oME34EfgwwGwwRRWkao;eyA(!vW+F?wRP>aFrLGt(>P^p`1HPV3l9{xk z-e%wC+ZKBJB)|_rj|jLmyrss$#>v5{C#xliBq<@`#je5L$Dt;Vi!zAfjVy^eA~TOV zjM9i?i}XWoDtSD8ylcFE!S~{Fma*EdS}&kgcDK}h*Iw}Tu&mXQRjSpcRp8dkbBS|< zbE0#>hzp`#j)k@&yKyfp@(9g{ZlV}=LJkCn8^!U8EXp!p-zJf8uCfcYS`6s+r|ueTivE`J3irDCY57BIF>zP#@OD^f zHtNu`=Pm4DM)6B= z^LByueSZ-+H~0~_1#!gll7#&&rTH*)6w>yPkD@N&Eus6PMj~Fq)Z|XxyZB*3Oo1AG z7yUF)?~^1qowv$KJYIKfWUR4tRW>ktGkdI?y}P=*xr=4Bv@0e=<+b@M8pv7{I$uiF;}iajzO-{C$p7zWznKzV{jverIUr9MWcAUILZcA3#%1qrv(K= zSPc0Blzc0HXvT7p3^)(ZrSmXi<5A&D<`Cw*<00e$2{OF>!tte3F?slGSfw=gU6x+% z_mGaQExq`<#hLbjv!Ay7B{mQxyBUiAwWdJ?#3 z3>H9LIb0)LLH5xrOPPgn$?<}u|Jo=(x|_jL8QnAIdI`w@rF(cN)$a!VM8EhF-HoEH5kxaSnBx3!74m3{>H! z$0cpE@<-J74UpOUN=rG*fK95+DD)|Sy~s(`CqtLJZ;PEF@==3b zgIu6faI^8I{oAr#>F+j?{*JH-Fn=7whKnBzfvFLxwp3eXycx0 zDmfqTO^NX03O3qnr*OX6Bg|qWZdqy>HTG!t-AY@ptKYwx)pwj*dEaj1RT$yfl z|CAAm;fcY@900NSl-$?edaXmr(e2SkaeZ-C{E6-l#_{ZSrm1s4?YP@wOdu|w?8_X- z9@~xKqOtd5LzY~j#+XJ*?V}GGt7E86kt-SZLx+{=<~-)zPQgxvD-Mv#+xD&QHNe+( z;JOOa1S2De$X^|yc{hLWHk-V|#l@%%=?&mOSi#E?em}1JHEc9A9v8$4nLkw>_(VYP z$Xgo~MTqDWH>{FzaMX7z^@CNwJ@&_rbzNgIf_jvx#z%Oms1h`5YPhrtAnO@@75xGX zK@@#`g;|hv!g~k)(l=5WOk0yvQ+_xo&W9-e1jl@ZYM^oqohhB?c{1n!_WDWlbcrj z84V4MsH=sQu=?w_|L%_bB}V(+-Q8K3i_6Q)i_?pb)5+DEi$_REh>M$-iyL^Y97^i2l{#|7!Ys%71j# z{ky9G-~aUdN6Wu^igNwo;2#|QOI&|FMaoP3nJCwPWiS3rd_A-nz_3JFUBOlkFzaDl)FP#wRZ-!5Ep&V{9>p5XToWpeM^)?~ z(dYK=dsA}i>gx1#bb+z9fOagH^2waUzo(y;fK8X{YlpWi)Y-9RU*}RI2DrXsC-n+{ z=LCn*>sT23`?uCQF6%AS+Rz;?)Y0YX1s*?cCrAMQdnxL{!N%PoI9i@YUxhv3N&ZJa zn!hWTrBgE&%f^np9+wGYgDL-R=`?@xZ$W5Eu7*BeGKQ}amnaIVR;vVhm@TuPGZ13R zldyPWuyDdpmVfhn`nTXpQcnC1s`@wX?~bpT56KV*oK4&P`2-MeoK_-`rqrs8?1{vx z%)dF&QiMtV$es^jQWMN|p^R(4+%Q>0-%R#u8AGv$*QB}TuX9=aFt1x44V+zT1P67+ z;5`HVdqc8M*m8M|R8j-@kKm9T#MmZv8#VFnWn(K9;(P6Vp`NQ(6!p(B+Zk0+o&A2^ z`unysNIaF6*@=9Qbe|i)sMT#%YqBZ~AGx4^ujK{!(G#VY+!dLE^Ve9?OKo^J3O6#h z1ZN#TW5V;qI6IW!b!84N9S4qxZwGeg-$a*Bf%wuG_c1((EzCvcGL_5Q+nY`M@jl&QZNeH;>PL4WKUm0Hi$0J=tHVD~%)ZVtq3s~4F%CF9kmTH=>Nj4 z|G0h=XYvBfrd^TH)g=`pacjE5qEX=emH4lzJHCuF=4E4tRW9LV6)1kCU2gVRuS)rs za;STD9V1W2S#jx6m@g2qlh zxVs)2U+>S;S62^qbUNP}u{=xoX{#^B<2y`ZGhL$f>xkfKe8fukZ(yL$D1A^Yt2+YZ zJN!PC$F3kU>y_l~^)VL^2#mx5($jpC;bH0v znxzIDKI}Tm-A$`7Je+3zoI|_*nUn+=IF8>hv7#I}>V^gEOCKe_^LmoWw0ECZ5LxV1 z39qRdwtmZK5}&oa>1kw-MedEqrjX2;c--+|M8vLxJWsOx#9-5MvcZRga$v`zk;V+} zz0WTPXhxHp<=rSHuji5usf7h%t?vmjLEGI$Tr1q4l*cuw*+-|5g2>K~w_!={v zCJUm3XMU$lmg&|Evcb6>jYiV=4fFI@2tXS1-Vat5r-*6h{zOG{#rFHlDJVn$rebd2 zK8|z5!HDTc^2ynD^3FnT2UPsPw?C6)ynJr%mdR~S(E;#<>#ZpH_BhP0v>Y`@nBT;i zRUfw;x5r}=FlySZUc=AFuJ@DWuq}IBul-|f+zvNOaY)19wK@?6Jx!0*I=JR@4aE0`C7{wcH=4oLxTNn7kFnC z8bliJo&4%-ks4jU>k8?}e%QuratAJV^$nUh5JGhl1Rsapsh~jwsZ{KYUanC0U3Wil zP!}=Ct}jXaaqe+FP*yYYzO^{JHF73gOc_5 zmy$2d78|A2uWaT8=b2|qju_hXV2FMQJhZ33UjwEF(-K)FULl(g03|Is#>lVtMQyC} zdkv+*fB!t2JS%fq?|V)~Rl)h9^{4mhXx z{u^|`#aESU#~Dio8n^k^hx;C1;~9xby2Y?vd~oN5UmdS8Qa^WDxUiXMR zuT`^G;E?-m* zqZHj4#J5M)%$#9i_?(k*%Y0B0R%T&gy_}pJ>{UU--a(tPigwZf%f+k1B`6LpHc&tF zELZz@rP>Q>bV+({;D`_IOG)Z%je@m0-om#$Zr==7p`!%N1iKIuXFy@lAUl z&g@jk*`3R!g=7z6;&MDowqNEb&R=?~Z3&9^Q6p-TiR`hnFRD=1)%U5k$|C>9eBd+V zod-6X3l;u|wyR|?XnB2Ne{H%mlj9@T~0f{vTa+wQ)WT#n+}#+v=Q?5%Z%*T!+c{!haswOu-`^g?D7JVI`2BK zOj^FT6B~I;_UeqeUF@jw17^Gq7x-^3CRL`-!^k(bt+N6cA-9uR8-g7-QO|o`fT@%R zfH)ibg|oPu0n|y_$#BhkmmL{i2E^*K zWb4M=31UnvA`VU6rsgT)#c~C-+Jn*}EZuzpP@PYFZeGv%ObQ)=SyvAxA>F5_h}RT! z7fL{h8%L-0f^W>{Cf``&W+s32ph68Z^)Z2#m#9IBQ&XBkCI;K9txhA&-QRJP`)P-h z=Qz}&2=lY-mYeJJ&>q%F%K1v-mLq+ZEw#&an$)B4?b<%}`o{7UrbA6BJwKvW6;!xH zn%l_B%LiLJuLta&)~*pex1Si%f~_KU*?sM60t~~LMzK(abc3n;yn9_6E2dmNHBOH; z5r7BSm%L0mC`A2bzXp~~JRXu(HzVsQzPwad*RU90d&9Zk(`0kd=(6Vo2hsR_umDgL zS-5NujM`!^6bd+~o0-J`TSIWk_XHfr4Rb9o)A(8GFhRe71W;C{?+Eg&g`#?g`f*k4 zW9bfoAN>2bQRGu+DS$5{yB-t1`Kjxyjyx5oRc+o>kI}!7n&F;G+KV1a&ry}3ALE=a zT9#SC!dCa%%(LcdeQKW9c(;eTEVp&eDMyypTg&F{lsiJQN?nk#S7mVxRh(K&$H@mhBHq#4d$C^P?!uih)T$p|(+mFC zi;*@k$F-W^Z(aN;MBtVBMO?Ctr12hOTw)&9E}6>j?*jjPEb{tK06lUK5e-r`VL4+t8&_ zBR(tzShqjQzLp24`$c#h^a_|RdIKfrwUa70V@|p|Ir;cBT%!W33FP{|-Ob$iUG5=W zJw>+gpz$sMMEW^mu>+CbL&n76)de7yz%zAfJq;nd2ACc1S9T;hbs*lYt*uRoQU_eb zrBx)$@!`U6jX$|mu|-8F!lk!>Mpz6(Fbd#US(j7uDHiL!VVkek2)g*q z1_8spb5)R!QNX1Z19LLFE-gV+v>aX@p^Yt?dtYDV~Ixk?OAU~%Vw$& zh>!Q4Oz8eu;dU@y5JkrHak30iImgqZ~UMMq89St8Y%2bk@8N5ma3PDxS&V@a-N`{qg-!|S0R%thXkfK1l-Pe{6f zI>A84BbSV<29x&NswZZ&LiuKcAAoAkts$-7trvtaLH2xG)Mjk6brEw6eQm*ptdbad zV{M4YQ=`-7qbG!BffMQ&+6Hu{Muuk3)N=aCdM^35`pv~O(XSr)h5UX(7eO^2YFmW8 zu4j_!q7*)1=KpR^V1V-FgH*$>VQ{Mat5^IvQW-SP-=klxhrS6j^e-4V>T#DPAHcE@ z=_6YValxx{L@dZY{s9hTG{iHFwNIfQ68gEj)SBaJYfo_H#gGf>rT)7M|CK9e_Q6qV~sz>>l;`Ge{x`lNq^@Fq6AiROMzg&V+1ev(^Gwa_zZXC~a9jOXKh9P5K;gIC(#L!6rb zO8j@g+Igpc7 z0V)ike3vGwsQ?yi_y~7#k-q7_WwKPclz!HtPai z27RHM2MmD(#91H$TZrYZ+lkxpmWV!-mpxFr>-)yJeY5nr#z3UQWjsc`N{p=&Os!0V zXMksY6a;k}6D!P%=Vt?aI)OeQpq7jE4tqTG3&!_n#2yoU>%myWth!HALn6gQL(g!_ zh*N!(7lU^Ps%x+$EnZ6I=>|g+*|eu_%NK(t4B$fo%6Z<{H(q;FEXf{}ubd?;$8>|U z5SKJ-CjjC|J=m_I#J+z75r_I%43GTdI?>)M{Y{p}fV=uZ-l;0dkd58R!icu5hx_A) zW(*L#`}|fYL3|BLwmz%9U+o<95MsRzer>mBmP6K^jdSLG3iF99*k)UIj zi(AEPghzdY1zw4HlZzn+&rYoaGD3YfFQ*674MyLH9d!>Za`qZ!1<+_XxAuANTz|Hw z=y}{V%nGl??u3)9bx!&oc8!4@1H(STe%J{aN=y8fG}7t6`DD`F^!+_~D2of$O+n#0 zUkH5=vokAZrVjVdH!w+N$@ez%DC0Syc_^fBCmsdGYB17SR3L-tmnF{_N%yt$heBTU zk%Y`#NA43K7E&rU6EdT(>-w_kDJRPq+Ptwz(XxZLJr2G*(Hbd#s@7K*WN0q03`Gcy ze6g_#5DR-_U#Yc18!;^m=rcXYnl_H=3`3AF<|s_z>$gERHn&MrlOiZV=3&U7r$2o% zHg2)M5&G*cre(Ir`qON!RfJU7D&Gl1`#_5HRgMQF=tpou_a*QO3-0hWdOd^)oQb^I z=(*|ZjI@)BIQMFLs<5)?#QFBYLF;G*(y*I5A#*CPiAq|_tjTL=^6=s3xs?x?pqKqc zYPsGG1{?=ckbnmG*h0uzs7KTrPyWQj;tpokrZ&D1Xk*3o5L7@gDfcGk)idd@r^NFo zg=fpvXlJt=eZE(M@w&^@?rfyf`{av34(m@}Up``zvyhjBWe-;c2)Tc}kb!NIpvxsB zHnWYt2_cua@JL$HP!aock9CXYXLRLK^n=qzR=twS?!tiRs|d z=oX^w-lsVfLawjFg*X~wEkiHYX6XtKyM)X4zsKZmJ7R+j=CYTrX3F(#eIolmZL2Xx z%!+{FjjWf!s_tanV`%~Cs%w#u<=vs zA;V)OFt-q`%w2MPuYBz{g4pL%Tz#F@BRlU!0{Yw=zLh;WE~E{n=43y`60A18=%s7E z&>LfD#A`^pquqZ@+9NvqTx#%H*b|FmT_il}rWP-95gFd2s~eh?W>pFSy{ ztm2YP`exvE^1E|SM#^hm_RXE%m*+{ZppwHT(5`cIT%wX{t9{t`g0oLx@iO4;s!r93#6;R*hpgi3qc9%ln zY`+KQOw<=c3mmDmHQ`5QhC^=tGCU)OSLE&F15<%C2ewPOJJ%b#{8bd<`geD;F>SrX zU1JL$P=|nSxOXqxBf@ZA*LXDRZAG7)*Dfp55RlCJ&OMw^)t-1Xn zmUu8B$DMm$`Q-vpR&L8W6)z7@j0TZQB0iXMZG&tyf1wZf$Uyww0}oEY2uNY$7kcN}Ay7&KgZGFL`cm0zd^PbQQ&m zA*xcl%z4!GvbqW&NKh8jd?Bgz61KTf!l-Zhthl3*Tu@dWoUp3q8EC1;?>1Cjvkf=W zt*~aBJA|^DE$(UPgt4gC!5q(}@KHS$!*^Kd7wfxPj<@sSS}@ZVRkAy-hchl){f?V0 za{jA79@e*rNO60tdsFe&`wx#$MFkFjbpblcY*s_7&NAvrbB_9=$XPX>KV&pBt|NX^ zuTWFbq2gyf;$;J3#);h=wHy->fJw4`HB-rI+qE99GpNIE7!lxxwt@|M@x50#%f48* zq7<54Di3G7HnC7GyLaj4zn^pq(6u}fd=%@W{5#od*f_3B#aPywEMH{CIMO9scs%xm zM8EI0b+l~N1R4}uJuLcmD-(fEHNe`kGoAxb2dwo9ka$9wiJ@BU4u`Jk-Pftq$}4D*ax8a-#RxTDynr@E-ckqE#Y z(G>xlxu5~vjD??Lnh{=*S{b$LIX{zY4Y>4e#{dmbwYCR{zr1$+QU{$8k?hO_M;Nd-Wx2cixz7k;dp`8ssrXeTC0-hvj zBHqnVvDMk~QQySx)t04pP6b~U#Gv%;4IR7{Q7FCk3vn`0{t(12>zWsdCL*Cce)#x#x*%PjAJ!DGOSMSzfNEeAJcE1X_ducKH zCkDmtu@)I-hWrp%6NE(B;JomElTVL;XRlAHGrb4sZ6TVs3w!779(OFMr)3ipVhc3!-(6r zW$U#qOx*qXcOQXec%#qkddCg&j#y~438knK!0zV;FVMK$=f=EE&O5)$+=+qJ^VlVZ`8_>LdbbW#j?JW{R{S zv>wxQV}h$=+XRgb3VYIzj>lRk=jVGHTD;b)Km z$g!^w$b0ipX7O1Nuavg1)*O7Cw>AE?AA}c3(SMmyINQTdu~Mpam0L!Xn%dqo8-LP$ zimFaNYdEuu=NIdVjG*tutFmZtZ{2!debj?xpJ8X^Udq^t76xZ^WYr+%UMLzSA-ykO z`?}BWaFkr~$OM^9)8LnfvY0K}_wL!L@Jlnk^@lCmvETs(yS<+T9qi9NKozIbj3(7Q z#|r!VL&wpAaSv_>a!s{G6}3+A_#89}uR%y5Y1c8ep#I>4QCG7R1Jf$Fp({p7g!zvK zYdu}Hc1U^TW5Kce1{JGtSTyo3bXWxOij7TyBW6OwqI`Ed&NOtJ<`MAek^?cgT)Hnk zz+WVT=6>wq25G{(pEBNO=gkXKS5wxVbj#uBx^;&Fyi(D-@E^EoDnVUkB1+@bY=)f^lp;S+%CgfKups|^z*_BK0!U9uIBaGRtFlT4%aR5tM| z{c#C?>t(Ff0Z`i7AfyxXgz z5DQ_Z^Z0VuIMDT!n$vZtWj~p#YizkSr80*PexY=pDU#|xaP*@$<#KIAPyEwZZjQMk zMsc$VjUPpzGrTs?9FQhTqwr*&4-Y@q=XQ(2+;&xH>GyH_B(S0AXUIdBN>*TOIO?Nt z>I53{xQ~HuaqaUVPdcMSqNA7@g2E!S&D=8IqLB$!k9-!ryv4qTe?F|Y zDyCf-AnCc5Gamb0)5Uv~rfWj#51`p{uOSKJ^N(q5W4xl4b&t#~B+DWl%WjG|N;>3d z&kV|PR77133y>E~mTaYgw<4oVd!}mEx?f!`Zxn3oMa231JySxiKU2T+Kr2Toc+12& zve4O_M-3YO?Hx6S3-uDC~BQD8%bcJcc>IN}EHWUHl+vZSrZ?j?J&DTu1Ebw5Dv&$G0wzIogrvn37k!@DkrDAj1E z6i+f59Hc`iHVCVq0;_5IR7A-lS5 z%}2a`7a_TGD)!?>s-K8>rcvjOOxI?gw~jB0ZKgIRaFYk4Rm7QnLRaT%vwVc+!2H9_ z*_@4j>}DZ3+gx%lPVdh=mkGb#SRZ8S#3o( zEo+%H%OdCVjQk?u+v0pb!;qa4jV;lNW&}7x-Q2k1{CS1VR|eTAireGqTj;wo)%}1i z6~<9GhNe_clZgc4Dp}AFo znc4EFJTd|qj#rzz{Er6f)jXqib%>Tfo;Jar8>@7w2;O(zM~vYvh9JQ0z(<$c+cqs< z(>zT;o$Vid?yLzdLo0M!X5bV+;!{2Eb`@W0kG7KIPQ*<{ASnjOU;7MwyL4aAtN-!A zy57>e9#1sI{}!s62I?0;#Xzn@-oe5Zdj^Q_ICEzJi{|~Uyu!`-Q1)5OwSKV;0Hibd z<`%f9^58IXFD=A%|B%n~P=1K>S{&H}%&&QWrdKq8UcNbZ*33C%vo}?oi_~uXe`)uZ zlkVWYtosWl;?+WO$DOJZ)pyT$-jBVU!~vZE&iA)}B@#c(R2n18S|{`0+zHhcI%aCd z#7py1>hg%A1h~#;xX{k?C}*kRIqdVaa}J*R`^=$z+{})2zyn;=- zO#}rWVUZa#;r1+s?eC4zq=xjsyUHiFv0A|uBQvALNA;w$j_*248LIDH^u5C08Tv;J zcjME^5cvTP*>zo4f~qI4*7-X9F6u=sh36-@cEhz& z4T)lune%J~$XZ+A?5|^6F!yb~&R2fpX#FF|IQMOorW-e>=ncN+?U zG5SN$6-O3G0G%eH&8Dw%GqWWkEnrFOJd|A=UilDmQ0h@=(?8G-7^H^U6wjOzo3}xH z`84mHMS5bv)rxQkhnLzX&Rz$3a*4K2;1hl-fTqJu&Sfqk8JdY10xG#9ozV~*% z1BJR0_|0>J!vmMPQKs^j8Cx0qR3$$*7v4BmPj>ynwCCF>se=)Mf^?FDv7mdcZF6%dpb}x$6s3*Y_xoOoN7B!X-lX*CZbBcK6SeyS|qhmA*`EWekt{%EACOl$(C7(BXBvgEswhkS)jL8D#ft!E4!rnA|hgYyyBoB$}9(Sa%XA zdrFVsj;2DIoE%M*^{J+G;z@T*X5dE@OLFFbiV`Ie+1}*br+zaOHMbbRyuGYk62QYR zhWr7OuDJ#_?Mn4!2&sbELo|@*$vRt32C?~voQuVUp4?^%>ra+`k6GbV5V%XRVN3lY z8X2gqFr?P*N~5WIn)xT9H3~i->j~lSTMH~}X>DrH>wkzBRW0-`g!UuDZvu32dSmkW zPiN@@XXv>J+Tu1GPUPmOjw$zcx}sb7i8o$e99>Y%P3G+OzhqhB(e&L ztCF2&rsHyWOIh3wdd$bDB>{U>SZ$QBbw9`3z7nsp#Nk9j=^d9HTD=6y-IyJixU1Bb z+DPLOhlbe1$aj7-9++x19-Ntv8N|9HCr$kjLh_RL?tYQv0Z@GsQ944;-qU6z!$?-o5Lz(7O1ixztxF{9&_QB%k0Waz2yA8cg9aG1{L{b(Iptf~qT{ZvPk9 z2wW}9&ni>|88^1PQ?oW!Ij5 z#zrC#w=CVV^*;bVB!(~{{si@X&d1RvkG(0(l!I<0rcz*s=5K<$jNmLZ%LahhMeJ0C zq5e_xK4%95by8=w4!VLj=D(qqNSs`;$*MKS`5RBF9?CR_rGV>Y_0o47#kw^Gtca&D z@-b7g>KksZB-0ztzq8R0LZ$oRi7WqKpS`VD*^VH~ss!zCgdnHO;muyhoKVO^+s_6lnC(v#ks<6+qG)v86HZVaaOW9wPJygcJ;TBZRQC4Rop{sB zOiTOepBvT1)ff!D*qvnH=hvE;n9$6PRZ16Nx$)W>CiU5flVOJc`TVU!tJ>d-GX3HPn`}FjbA+4h|nEDmuzO@xujNV#DCjOa8uRGDH z2Z)XRfr5@pv5}G3ZoaazVSgYf;_wscn-M24T{5SkdZj5bIG#~SJ-<5zT{j4cnPy{z zhhUTD%sia-Qx73aX$(9(tWSxI|;3(-Dox|txX3D zbqa~R>o?6F`-4bm_U5#~`K9Hc_7d5W2$x?=6Vmn9J=e#mHb`aA{q)az(T zh~eP*^XJ1iG-oGEtv=@#zrIOUJ3+<&xgQC}z7Qn4Pp`$NVC3^TP z^FHi4aSojF{}(PQdDoO+Nc$tN^dVu)JI*c@AkVQ z668E|C3V||ge=Kl`79c7z)$oB&L+(WtA2>amd|)li~G;L0Jpbl|HB5v%5%b1%@%5` zRi-bx!jW#jf~KZs_;w}6svsEd&2MPkquW5aW`~5G+WN*&i4B(tCNwIi^7guviC}af z5uiKR|-_T@Cu?K=kt6#$JtJBsGxk@$+~Idu_QjwXYcNkpZjl&C)%77X-i6(vwTU zd#;-6IR^QFBSUjDSSrCEqf--~n7BEa;*Ugnv;L((NgVnG^7@zN38O}>-fD>xQl7x| zQPy8+{B{L>UEPgd-c<9R%vd4LZnL%0SRL=m^-{Y4=IiqfPtA{aZ@xz(QC9VHqOl<< zg~$@WxC2)ZROGmIy&AXdZnrRtv)<7k_ucCDVVnA2Cf||?E|GQ7zEz7X%?T<`T^nGncqApbp+Eq=4Q)>!Vc&Ck5<_JGQ8>nT`Ja~a-FuB3<2*Q?#rcA ztmMy_9Z%MqP4=S=S=pTLD%vIrokD!u8{&HFAAaNWKlGNWlpeH<_QlfjS&wN!q`?kL ztyKJx~AN)h`IP@7a3UfG8d|qex{lij>>l?*z{K8dRRd_bNi?FGx)Z8eaQqJ z%L**7_PJWF2}@?7Z5^s3rOD*pOmui=1dHyggOlr zz1(BL_kdsMUf#wQbia+fXsm=i*H+;*A4fUjtzX2mNCp@ z4?1pr8nKap53ulG6VtucWm-j+{pnN1yxJE+GwNq2{jOd}i^d>`D?g4|FDefjAuq=l zU`}?O??0G6qPu8)xf#-j5x#g)WV53j(XS$QnP98FO27Y|3Zo@gCPtoo{nLo>Se@PM zFt3quFm?47(%aqPrLNxn3+yOJW+SoXakMmVnJMJ9Rb(%2@Y-+D2nnJ3G%a_)pFZPx3pDX{{UVJ_84`CL?ysK!KBzvNq2Rma!hYpJEf=0{rkKF1~^4SzPl}` z_(zqtkoXXQVc0agDB^yIo8NVcKSj9v6DIcs9MzxW#?+u$lrv#e@2C*%8k>IKmM1o7Cv~gP$d}&h?G-Mh+kSVU|2qB)f(k6 zY}WM1j%wnoN9SE}Txl)dl1|opSuHWoF1(OahL-Atyv9pfIC|3lBy#q$a~);Y&B&-< z_8oV-?bMg1d$*nqvh7`(o10Gu)2}^IYjoX|&#EU@63z&so+)p+qcz7P-q?8ozuC@8 z?*dr1to-LNdv;Zt3{KI0{352RM>dNWtAocjAb_mVV&E!H&RY!(cU^q-yDIKiW+>A$ z0lJk&Am5Y&%hoDTTa>b7^R~(sef#`-qRu+;z;*B}(4+^>#-STtIBKZ5h=v*3f4JzS zI)RlM)>uEDgTCLB;u|Y+xi@_+2Rjt;)^R}pK;Pw7<>E2{viopLevCgEn=fcOVL{4k z-?-=c{#EY583bopt`%0Ufr_E>m9*!J}{9mK(LK7%;8HY(H;dASJbm zpN5i1SIFWCsCPX(Zt*!MlF!i_QsOC~swd^~_g%K5~Ag5{Pi%3WiVSH6B@D`o><{&n<_VU7*WwCd>R=-ODb z86PzwKg6wv+sdpl#86Huj7PN3eL%!wde`TVgEHUbRw#N8TL)u;TJMopj%GZrV)v_0 zd{)EGx|~zYIGwlc_qTTZfDI~o>Y%Tci~IC&6y9s@D`eK$M@?MRevUhN802)E_?L^w zf_yz-xvn%7m|v5u+J!77#+naw1$s-}RHCKorW8sBqEQ7?zfKg{&E=bsiaHGpnD=Zs zk;Y`BA|@l%nz?bmqIi-|Bo^zS0(;e-VT3H|`BEh?tP9C9C7{spDJCD%PO=L^zO(#4 zyuD>qoLkc-9K{0!lHl$h+}+(ZNPx!O9l}9_yG!sOf#43oEx5b8djk!8caw9T_slcj ztob*8vRHxc-gi~)lB?>frdkYNg|!Qi`N?_yA!*h$N`1=pa2$s&qs`suFIj9OQf)9$hPx5IB3Yfa$R0~v8(q}SR^ig9)6CD=5WqyN8Jb1 z^^_VXMdPjLb!&chiB^2x*hy~$HzZAMl^X8RA16T8(9&dL!$xXB@;C$~?O2{A7hRg~Fy(C(c# zW#-fXVWkXW>t^xP_g3Wv39j_RpvzhQoI3C;R(T-<_T*aIXsQ=K?0<*w+g-jRk!KWR zOTVuaPo~YlVKlFzzHyp^dDETKtr659gKbDz+Higgv|$<#FE@ETLW!P2EdoD^ObSaM zh3|BMd?wnU{GZAPp_KqMQH50*0#D;1S!ONvrF`eYolvsMbL=mR>81NF|4h}-vw0{| z0Zx)d2^>-Ez1|KnUjQNklXS2IXkIX6l@l`Zwfy`efw#g%9(QqTJIXQqdc&@3XLnaB zRwU(db*kAJ&9^g8P%F_`Ra?O0aaC}#olO4xF!Vo3V?proI)K0w6yiJ|I;h2Am(}>&;MAOt`f|Sf7!26e9 zRK(25mNs$;qf|&CGC1;GiGr&wmUY8>V4nw; zzfgQY5Dbvv>{Sf&D8Tu;axTRDwu#RGaW$C9fGj6}SYbZMy4tmAMz+vFa&0g2L>T=8 zkC-NVio@r|izLAJC&9`poZxoHirbv=<@!VO?1WhduP z8kdT`WMdqF;tGK7pK4l(0C@gh!nkCi534~c6stip6maCc`M3>9LiRi7*xjjqxNTe^ zDTJMOq!5*axY3`&10Q>7;*0|WYc4dw`Zzz4Hq;e&PtU36WlhH715Sj06P`Kp{E9{| z_{%5hEwqnr3>te&N(SuJMNOr7U~)FUJyPb}chwq_P*$ zMYWPHsa+awN7iVN@M0hc325(Kf4`P&Zhl7WkU^Aq$@cAzuHqWiM`l`9Z=%Jpm+#*7 zp}mU?&3kF7vKG?-S0L>9cj*-8`UZaffA?np3n~OXG8RV}6mN}FLpmkn=}TD`qk=_~|l(mp19s8!amg;SoN(Ak%1fe*&0l=i_7aa zXQT_d2@rSOcpe2@uP3Mss=m0|6C8H^7@1_Y4$p-WP3C&%d$IDH1lmvf5-ojuHlWMH znPz#TdoogIW+~5Wk2t^oU~EsX&N&gz&+uMo5=9gS^Xgghg{`{m?7+{~I7Fhk_aeMP z_4c^JX!zY&hT6`sbb7fKzU>zcfk3`3*7Uzx@6}+62qV*{#S-t1u_Fy51O7ve2QWbu zhtj@XK{{VdmQU-x@Vsg_QpMIz_D-fO zdugB=WCwbBvyTa3B!+z)Xu#WoXp{e_50M{^n`II1ja4DOdC)Z)#P|Qsw@jljbsG~Vcni>e~o&2Ye`J$?&xw5 zEgV^?tf zsl6;47!bos<#s96iy6j~PM}qs^^KDSJQDPo{{K`-1)-JzhvRXx5mv}ETg|cblMAi( zyyRH#1+)Ns#(%5jJsAX`KcgzF*1BJs|KI=CcsK(O_*=)_F>|_PD3(_kNMULTKw(0s zVox6<@uc=YhRPK2=V=gH&*F(kM@K7s9v{B+wg70G1hAJO+1lp%XmRuIxRw3pYTjn^ za#}A@xj?ROrOoeN7rI;_5BPI)+Kw^n`QW!C8;7;a&a*8R)1`%&%D)uBr_YA|w%z|S zhOz}yfF~xC%p?lm7!%wA(Cd>q9r6K=f7b6>Rm4^Rh?F)rFUrl!qZ+L-8z(1w97^LP ziR3v89^MZP2~pY?PFx$a#K{D47cX`gknq z_9w~yZHr|SB4T1iOe&D@@UZZ3)nt1pCc5aM{zHol==>-%+1_0pnuoQxoo`7B3i=}9 zajAICaB(ANzj^V4=f7}T^`my1YSiuCIC%&GiT^LgR`kxrBy2G&S&$nc&`FH>%580vqD>_9#l!usbbJBql zf6y7UW8D}*z+WmAO5hFFPuxCr-RO-TtF};2w0j^_K23Roc8!~5Qvinjdk^cGpZfME zD}q2BP1_$(UTQRuPnAHDs1#606|j~Uj73jYY0yfEFnsHGd6qQ-^OHQl#-H7)fx|CUwW5E0FkP2 zJ8Cg0J)JI=LOx-b|53ZEYtcCzi&>1$;p#v$-fTw_Sx7*Ll=!L_GGA+x=?NgpF`1b* zL+5~T6(LFO+f$}`A68FKEW19sHbc2Cg|!aba+fJzzkW4eq>#^uBN7h$j%L^^$1}A* zTWyg@&5Y0Q-Q+$9ERvwqoCP8i>pg(cNx=}0JFNWj-QyL6WXU8$eZ?OeHSE(!2t1f= z-Y5(e-dHcTJ8QJS+dLmoJR6D^sdwbmiO|L-OPH@dXXP475B5QW~|PP zq&xDC^gqs#3gSrytls&i?OaoNAn$y4z1F}}ap{UdRf*ePT@k<9evStdoGzBb= zsmr6;${x`aPG7s%{I@$K)h$5RL(8aJFpSM~UDkoB=OW}=037+leHnBAr^EKpmSfXaaal_d7u)0QG{D_!xe=3;*$qHfL zrZfo$*PX@6X7ZBd=kImTb@@M zA|n3%8yy$G(hM+5AVELq?qZh&I7vQr^JR@!l!+<%Uk5z^2CVi22g@1Qkf1UiZ5O1Z z>2fR`>AO>WB1~12fvgFe9Aqi%p~;wwXYkQv2seH(XsAt1VG0TtFn6HL2aC}MWA*&! zfg~^o#=h9yB3z+F=Ok9hWVtxH;q)C$N>(VP_JJ2ZRM@%Gtk>7r8+v$@F`*KlRMRZR z!!1;`M#|_+DH|{o7azKQS)hOhmvT~QBv@4s8yQhJKW4L<%F_1SNAXBuVqdUcF>e+?n#HMGbV;SzgXWvS186{IXEhBTNa!(a1{6k zCvHQy2B-U`(P=+H9>AMPDRtEuLd%~Ky*NB4vYtWiTqamGIRu;#A5V~yk;^>5Y~Ydm z%)|fNZ%jAt-ur6SWX}zRpRF>N!YTm`CHhZsu$;7Igp5o(etW4Q#sGo4w>kKP!bm_Z z#_?!EW|S(s&>h)7h&3V3pPY%bvYs{_mAY}eHUVdTZh#$wm?z{HL#eYW(}{*Yie^16 zKt>`LYUgI#=KCiIV!?J-+yb;3 zLiDjb4bQXtYM7OZN-}&u_%eGV@qf9$w+K=;jQ9kkxc&>%+r*%$UBv@&qGRDX#(Qn4JfZ+_{jr8?&o-cZvCpcTnDT`l>(gpE^4`KbSw`PP>$bW{b(eGziPTuf3-rrwWuo0X`6y|?50 z0y&{T()IfFYkDl}C`VI1{o%TEQb-l5NF~C)Jtod@I?&5U&gR~kYXf`sqINC^cz=vDs;!h8_WV0n3d@t9Np%po%JRf`AwR&Q=by<>{8P^!J(+_;rG^1McEF>d zH?<(AIRSQ=YeaI$1|c!eP#C0OjxHH&2?U2_n|~rVP3wst5&~!(*)fm~WiiUFw!3ck8tcWP?tNi4(u6K&120Sz`N+!aewwoHhXUJ%AlPgidZuUU zrP^ktDE4p9ySlolID^j2KyTgAy`!8dg~iU%4s4q}t`oL1eRNqDM+~vD1!1R_fq9Jh zNuV72I0eHzfs;lBeX^RK%DqUH77!rdvguo|-S@PYjrYEGQa z#pV?43^2(ub}*N6C0;n2lH$#=$6e^s<$m@|I+(+x!$9foYfN0*sMW;|^0cMZF6En) z`}%A2K2_=%+>JI?!gnp{-3tGwFv-gtxR5lZ^$1_({OgiO%P4@Lx3^azyM!eh#jwT5 z#3b?4IPx11_NytNe;8`CnH7eSSMRRNQv8L~zplejCTOL-&YLo8QkDkj#phP6d>mHS z^U5Q|XF5N@G`y>yXA5Qq-D=hzr@JgECI?;Fp^w62#=JQfkP?y;g3*%pRJ;w{MRi5B zMW>LvpoS>4gjx!gDBHYbdKq1Bf>->u9i_bEVmrfQC~p+Df-oAZFi{-I`DkIcCO7q8 zcS`bEit#t}vSvCJVMdaXsBpEnn6dAu3tzSy^__750nGH7J-O!BU-G4$ zuJ3&HGN{meaSCcw07v^Xd%&+={_pI%=X1oSM^MKEr(<6d(CSqUb zKA6dpQdxxOgClg@!eh5i!`_xPY1zGejow=&OoDKaxqhZe!2P`bHDi9y$@m~`iP8`} zK>ONRmHuK(o`J1HXsq^jyR=YB(lEq{M0#}GY2RPDwgUigGHQ0a?iV*e{fb$R0|PzjBujNi8hHkW%NB)hy*> zn|p34--6d}NZqdO)27cp2mgwtnTzvq`* zOctQAgtD4XNPK08^T|e}?M`xt(Et`X6@|T%rJ~NkC00d2;LkvU7Fa-7Ho8Gg#o8T9 zTTeYN9po$#>2ZnDPqgj@yR_Op9~OK;zGnk$3in%2JpNx#ZW3BIBc|ck^1F%Zwpm+EmZJI_5==nAj6l`$h3)Xm$6QO|cula=4l21$xEMo5 zLYZExuZSoPJ7IM6-Z4SThHMzq&J}r97M&Z@#^2?)CYhi^L=zd7=pbjQoAuE1G4e?E z0NsaO8nj7fwcG7HX^^M*?mHZ!#lUUpN7;GM%;cx8ibpXXmrq8Im$=D#t7pTKFPx>V zXkPa83?z}6F?oGTpygr|`GCtP@>xIP_Z+*#6v6K(oq$4n1Dhu8x%X5I2cdALP+ReC z{iEwP%mFX49OSg|DLI5_XF8l-Sn<&AGe5Z86B^j4dUp2<)5*;mz*ESQk;^2l)P&Lj z3y8sk$+ZFqCkh z$6kibW@jCA!gXt}wyKV;%kBSvlwH2Kvt z&G#Z`*X7mH>cmsdtBLmeZsKj+Ce)k_*2A?9_>ITZ%b(p10%qfHhg7OTY^amj7?X`n zy;@sJH~~=%Z7;)ZoACrn4JM$noCVT<`52qWhx-cCQDQ6>Q-yNta(@-J)u1q53BI_W z>rlwcXmakdYinSTfR;wBjFrG;9SO5!_|G7Q=mBPp-p6c84;3o2v2D6^YT z-PEqN!wJ8Nh$1UMdh6=>lNtrTwI~vaIyliRmb)JUZ#6OaR%FG(gWGiwt|SBt9pcRl zWY|DCpf;qjchDjn`iW{5HoG0W=uDSKPdEGrC1knFXz1vw=g2HO zzZ|8=l;ZuDSF$=lZEJa3>py?ma(W#Zl;60M7xV(4o(h!>&}-co8AV6K$W(Atpy45f z|GEfLu+F%_{g-|bK&kY6r zE1j#%0;Lu2jvOQK0*LInx-PC+1!MtE)7`xec*@GAT16&YiasnnkU-fMQ-dBl*$Wf~ zGrD0bEAi8we=g^DfVQE6SumBdSTM`jnCFf!6a~P(O7!hE3;vSyWOcw0OsvJU4aZQU zWfwIkfQhBGda8b)*UZllR!%J*UW&@=u3}v89%v)!F&f(_#7Bv1lkoQT9Gynx`81 z5U%n8r{1rI+s(`9mX{uqTmFhMjn>WXc#RMo3%Isb*htLzN`Z=VcZ~U=&)<9=Mf!S! zV_Uoo^W-y2W7r#l!jeo~<`&6>Nn9Zyv>%A#}X1VjVVVyB; zsr^SCRna*;lzp{^*Sf4fnAnb1`l9{@W3xGXAm|-X*g`keAy*dvQMe;)TwO);2SY_! zlYgepgXrShFFNBMMF*{E!W5ybk0Y}_rVol(6^|c35N;6k`;5No)Vh-X&#f+RGDwb{ z5=l4YE1_#S>2|(E1N&@yu=UG zX;!7Zv*~oGRlU+1j=mA3fs{lN86U3MJgoBc;oy+%7X&InPBox#c^C;*tj1c}Uk(G= zii5J{7#~KJx|is0^{sZ2rRg=jQT9FQnvA9obL>i|?<95{eJD@t-C&@1>l#o^rn!J0Dg zq}BBi(-`Uovls{M!wCIx)z3OH19k*>Y+bSpuk;?iB2vqlg3xH&hhxE??9DN|2qNR3 z{7Q^c7gM&?%X3KdyUCF_ulo-&ird}rWvvvFFJnLp`S1%wp<1~77k^~dop$ziFWVYu)=oBl z1jx?-QNa-HQ$LVHb#*Rpaat8ugV)QUodGQIH6L1bIl36b=tqb?ZWh;V`_Hq(Vd}y;lTJU-SGc(2Em3dQ35h;WR zNZrytsu?|IEzHtCd)pJ1C;c+x9_SlahP2LD{{S@c*ZaZ>E5SSS|4{`PV<5`-a&v%r zwX5Rpqgp1{WAi1>*W=XhL&Wk3VbHgv|#i|RLXT%RyD{kMGZMvW3Vx2cUo6X|Bour9u4Y2 zGQnK@+RI~VbAeOr0m~cP!-eNMM?-`3+kF?Jp`oQT^vh9Hskg-rL$|Sh6I_JIM5s^( z1poqLJ@v+bT%BLQA!5}67Ce#G!KF*kFqz=o)B=oJ^RPY&ToLqVEdqkks}+o~XCBZ2 zYj)`+?rZ_NIQW58Vc{lHjq1ar27RM-2}tNu?dC?rekiGYwK>?wG;C9zT-Y@fPGa4Z zmA@*^kZ$yFl$X6KO8CDWfj|%%sV=9|I{1JSjKdfJCjcJ#jDfmb;+eDTabd+Hp>le~ zqdhY2CFdQ3Li;fNLq$(lA4J$~QtCg~{**Vt@D?My^}22JBXc84I7j|6k0Oina$85l zc86}IagyQUTAJVaV9WWexlU%rzRZ7_<*ksb=O@4-W_tF8#k9V4?*mJU((b3NTwD4d zgmIb1sWq|2P}+@ufw&L~&0>2nR6lGuCNR)DDeljE{Y)&9`u1P8x%j8fds)FiQ*Bnhg=@sC zO8~4SxG8u|Sn;T2ETcZ_gIebNJotb=PJ92LwF1pstqPo4ZSjrj;lshM-e%{Y+p2l> z4SoOH1bKy{WiTQ$4140zX?U|z4C7LrJdAf{XZcdbB1Yzf*4ENO&XB6#ux_j=%H(tR z9aPBoykS=9{2KNBPFCr*2cjY_-=|J}Pbk9Qb+TE<^cL2}A$eYkh;#SSN|PI3l*spoB;53yePQb)dD2oG z(Ak~#NR0P?dsehMiy_76^-D!?Jgz6c)-$oouy|kz6=>zC-C))SOe;NqTFbI#v)krt z?`q+g_u-G-8ubZG;Z+<*71p|VTez92d(^fguYcjItxiW1-A53Tb^dK#$knXX=BcJT zKi|evW1Cr0=Y`h(KC@#w(DQICOX4c;Dl+PZz2^J*Xz%v&$u59i{B^oDQ(Z^icC%TC zWi`>D$B9pOP9LURWjY@AM~a9ne{!kI2r~MR!$HmjIXF7{o^mePSUm&zm(GrG8N0)1CcWySsXl^eyx5xa<^!J40nwIsW(!b;d7LZd}D>tp1(8R8dB*tC&oN+mdCzZNKHj58NZBy{2fOeD0@7ukMgaVmV)xzI>l>3VRA{v{Yq0b@7x>^K{5M3;uIE#@>StHKpx& zh^Ji~108@$pa6to+uzO2fma?55h#&C z0BZgH`{^wuq@OQh&*2~y``wxiP|s+3+M5E4;i3L(k~QHWZiBc3*&apHBj=7a+FA!Y zE_uNLwgRS{jzYa^l}~f%9et=$Ie%4M{4FhUdxlES9TtP0%63`{%{15xA286%qtW(- zWpb;2r7;F7_Q3yAb%;@*;@;Xs8c7n=tCfnmj$q4M(2RjGjGI>9E!L9|afv@c z!xc0|10?3#{S*^RK9q~NNiW490TJmheW*2_MH)tfrS0DO)Ft<8G*k(7E(0DnAI$Ag z_etT;>54u`gAVmexP7GT3pQD4#$y%)%3U25KVbTkiKzTMh^0`kVKm&cVPE|Wdxy4! zfnD_3X9sOk=*{}a{Vruutj)%&M6kB;@EHyxp-S_Vdbg{0DDo!tZjd?5pUjD21X`tk zj8NrDi1KP-*@jDiDw3K$0^HP?Pf?6^6!OjoQ@UdLWarzTD;>|8FcqU6SMN^K7x2?g zJ?4|5rU%*ck}j2W++ZMH4G1jIsD>%$qa=i?%G@Y!17Bo@(FqwlTS(^aZG#sl|kfVYjKo;7~F(@a`(J z>9`%nHl$ro@*c`!!QbrBy)0)!XgzA4dvA2D!+=fyYCB;J89keMtg`l4Z9B9xY0Eqb zjg{?m6Q~Z_*XN;%1jbN${&6jnp=AA(l$ls5@RQAd3>2<#(D1t&ZKwde#d!(vm7Qu9G@=ZALCMU zR;ZYvU{`Hln`6R__v|5;w@jrpl_w>U{~ROR_@X4PJ6;Mc?HP)rf2n z)w~I(b98vyTTw&|xhmQF4qE;_fDwi%4A^=oX=4z-&9cSWnPLLiyrGBq#vni)65@EiLrjO@-zwfo=UY zZi;(uM<(&-cW-XOy%s+?L}cW^z#h7Kz4LCv5|7h+9SZK?@L5!vhbFM^$sb%FjBy9B zd8hC@-&M3Uv5vW#$x~F4VuZ64X-Qgt>)lsXk-FQ|+-5xQH_Y^}niQI9u-cg_=9kyw zLQscUb=j2x0zC=+d)7ILbWW1b+f~*t-2tJ$eCf{0&h#sE&2?7*>1j0W!AFtX5iYir zkr6+!Om7)XMm5I)77;6I)sb5xB0|H#D0tX@jm_Zr_1dnNaCgMWakWjbV%;D^yCIE! zSobK;yLntwhSYW?(PZH>Vd$5;&zoTHyScQNn@Mdv(2#H2qZry62L#uRa~rifaHyoE zrVPYO9T}Jux9Mgad0|>b}$Xl?&+dES1pp{ESHLGPd zxKDJXG!EMUqpDO_BDN3KBzf|cZ_AQx|Jh5w%K2WKGH12PnP6qGSu`MTI!Z) zSIR|X@=>I7Vc6U)0&`)r>aH4WmUG0PTn-X=?-27OW3zF#OP_D}9Tdf!(9 zE)G%0;JK!Y@^)um4R4|SgSL&^p_EJIu7K#-(x$8U64$)y5|2*jK1KFMd`G&h^&vca zeff`OZ1w_{6=MDDo^Q( zmAUrw@Jo3(-IVagmhxOl-q(}NkC@#d(^;$Trn;7-jCH^Bb^})Np9?R?TfFer6;4=A zQV->NB;UMr;AE%O9$$>}Zew8sRsa$S(n6xjVf%oE`=bo>!hvrRf$2evR0d%7yab>V zc{W*XY^=$wfxb&R&FLG)!)`voqHxTWY-{f;Kx3#~F1iFA15f zEo~SY!_lw1cYlB+j2$E;Es1#>#3fvVx~<_YmU{z`53!|!uJ}h;yf{n7c?J4no+u9}be^VWkg@)&pmJd(_#Zw~Hh^!&F+1Pl$y>1o(GG zyfHLZ^}iPfStP9;Z=tL)Jaz^cu_G?2A$# z&a>_qsXioipf34g&30ZuR%nTiL|@j1NuKvyXNcV9pa8R#Nyna?Jc%@~n)xf7M@1*b zuPGcD?cgq=b40u##LFko;a4X? znV&1(g}*TRn*;TX6w@8#dQ^{{D2u_l%8=%mxD8;%9||?zN&UCZnx-pBZRSPu)?{elW8%hj3BC~9uoJ1_- zZC}WNujHPwKIFULo)7nKVLA0(nVwc|3FQv$u;rol!MSJi&&u0_Amrf9>qgv`WfJwR z@8z^kkYTHx+!ltZVv|2bsM2tv&3X5(v^R_s`l5 zt6BPWYajC?pB#D~lKSzU#5+5zo~5_rV<6qEN^nQLd6V7IniFc7UsH4fu8r1`2bv2llx?JR|sBHASmPvcpq)EC~(Tz+s?lAn5gko|Z zX|ZB$V-+9IU2caCvVWvdcRrdelXA|b|zefZk6;@@3_fpoIy@Y&-r8`Vc$I^(ow|67q49;kVW$_)LDPrseCu&0tm z{9+`*S%sLRQy;i?jI8K&=!n5b&@5$cF=!u+K6%T0K64*nWua8{gkpB}!B z8!u2YnvZU&$o|0{r3nwpMkZz@GaaZuZ$;vWuJ|wRVf@=I99LiNJ3_%%lf4us~Q|;USfq% z7gN;jq@c+*N{Lt)M8tFAhZ}hHtG>QUIKcG6mdN>_BS@o-)t9?oBPC0eI@>p>#ahs0 zwqk)oQ7&Y<4VCJ_lH`iB;1tGB2R!JpAD(TD-2t08a3hv!Xp0BpJwm}HD7!lU>rJw; za9qg2KO*`+PH)>dp6N7M1~3NdjS9L=rMWR#ZP8w08nrBXT&N6F6yz3>69VJo>(GRX zMzm%r^p3m#9JaA{*Y9h^u`9u>Gl!1xAs%JrH3}0wpM%kD1T7X{YXJx;G$zSE!KHi7 z0?oD!7C_gajc}&V^Yp6pfjAOMTw-aBL0(NQrMtW|K-T}xTa`KDsFNUK`8^pFP1f#Q zwz)_3X+kdYoEF?%6v=7I#qXxHyvZ+nClLAlhqX2?m`IZx*O_>{mv4>=5uyu z2d}AkJXN5a<@m?FKzx8*td!T(d$m8xoQz@GH1ej%l2-y764LCV_{pt*=UbzxS;=qR z%l!UH=z@jsG?a#!9FZ^;7<5xP$oPZWmZfi7Eab=>!Ec3;h1Jmbw zsXm=32K4C*r*~gm>M_JtlOws-M;NTcCpMKJ=s-Cpt{p#K#HCDPNCC6 zf+d}pk~t5wOM$t7QmI}b2`PFnPWBMyRr#22Mc3UW)q6F!9Qs21AYb%jEAuA9J-~#4w_k;Y?(w11@^?)ylN`@SxI6FNL4<$v!!g+_ z&9ipWVuW>e$a_m1zmHFZs?Hu4U z89DH4y0Ykf$iyU;9!5&s+!p_ip4YO{8St&87pZEcLpC$PqnFEpA}6ZtRr#pND6wQF zi*7u9%vdz5VZ~lWvcnFj+%?jBQ`kv4gVCnqf?!%l=b&)_^OtPc5x%w#_XEt{T{>@L zqO?Oq&cM9NE)+E=632}XJ4&UV72EFq{MHARvL%Ecp*4hGZmmJ_@f-Z}HEB*L$TGj7zfck9o>5x+g171`FhIgh_C6G>uQAZs5_?YSG5Mm8VS#}pvt{>6%jWl> zItVhDs&sOTgYWb0H?R8;m`Fb;S0=rvVym54>sDv3xN$9^3v8`HF?GL}akY2()5|by zpSt-T{>HD#+7Q5Nw%FF(sWe)rqi;wiJW_7P5G%Z8ukCqj!#p2kdn%T|M^?X&u>%X* zT<#ZAro{Z(;GW8&U_2XQDg`hJ8c}1Kh=FRu2%T(A2EIeLRtj3`gw$Q6( zQkE;-5JPY*fuOZq#p+|Fft^kzKCB+Sn$_ z_nv0t?zX#7bWqTi*Lv^>$7{O*<7_z5Qj`~&@T6c=!HsFJetN=#%iXGlR%5YIdzeZy zwU#sC@`ZN6nnw~djgP!+N7-uP&~NM0dDJ47ol5zTHyToeH9&%(wcTTDx!#m-}5B-ttb z1N-E$HAwL{1>FN_>xM>KS)(!B{GVdN2qKkDh7-e_Qpt`tHUVV5CE;NYs+R_OI`M=I z)D_^_$`%=$R=(*8sAtMvW`zF(6A^!4(7hVc0lUP@|Q7Cld z7C?^F0|+vTiz3;zmb?X_pWs?9nk1as_DYLWTR7-KVQ^X!EOtU$2sU)r-u(S0Mi^myL)>-?mp-gQ?cPk&viM1KtXCTC#=2E#+u5m;XyIyeF&4^BI`Ze zR75NAJ!4BL4k|%Vc^TtAkDx<+c~emKDjeK#T6{c0&tzf|Sh5&RJ0Edz@0;Ug)EGcQ z5mwGFe@(k4W6vme6za12D^LQX)cQ^ACy#d73#Z_Ftxmj6P1d&(o>tk4x=JV=DJu=| z{9$WNd1dL;QhTa#CzZ&pYmo~;`=xBcemE5@eSmXF{P64h<}6z3f+!a@6CH6&X#Xv9 zB#D@@&NX9BKd?BziMIq@73u1L&ypl~H_^IZQhvUmZ|4e>9KorPeb)hTtiF$3eh|qY zI?Un=4MFv8C}H=wJ2JSlch|)@4>JDa$8iE>k8&UIFjUnDv-*{h)YziIV;ORM zM*EyP3*8jeYC4Lec#}HY6BXt^^$(Nw3k${EbT1U)R4j2rrK}vU^h(Sj04oY)J9mw^ zKr4XULn@y70+no|H^z4FRa3huCi5_+q5Vu#1`|lcza)9KRLK!?sSY-JcX2V3meUY6 z4V~G-{Jn;BejcwZ0vt{~V@^I{qWCeK(~G9o1@TAIoKDAD3Hh znpf^X6VohVJ)!C=jMTa9;8y}(JKm7}YyqiefrwAqSpJ!1hBS%SP8qcpQJpP1iy^Hi zLx-f)Ig!$Z4E>%tN@xS3;N3CDuvU}dqkYpX@ivgM6qi*ho~MiBrLgGZ$=7K;NPAwk z{gR)B(a1&1dvb`QCc~9ZftNOW+I}n!w4D_|U}w-as;|qCz2J-ZQ9F3H9I>;A{B633 zfmS4CrcZgdhqw$Xjf>sn8@3mm4;d{d=LLq=g#E0Puc&KLf#H}LSRfgXu{t2>y226u ztiLQ2_`Uj^;7WfHu5%9FwJRXHVq-$V=_QYsk&9++E+slSCierdB7G#yaO2D|a*(rV z9QYC(U=UDFge#aIgtgjJ5E4ame@D@9XuK!At+ZIE;umJ}?DUmm-}n67Y3(E1CYV%( z&TsIdcvG=TKEKAR!x`%Ch^k%efv=DDU6AC}Te47%argb;-O|sy4Uki|%fE@`@UQ=7R zGtNL?Z;wrvpmiD8ux$t*LDEL}@IsSM)5t$7$bwF<)URJ^PqP?@x^aSLkk71a=tTX< zsRpSP?O>ab?xDK`A0*YvDc&u0OeCp0;Q5jc;;62JG-)_^wnx((qC zYF?6djI>tC&ugxegEF0pP=O8x*N~reTcaKwwhODx{)bCGYAvuE_5LAH+!#Nj$gL`P z;)1{9{(1fN+TCu?(HG(3*~1dO+M=Y0o)impsK^9H!_?_@6YC7z1lXemxh4#e--mna zOe{g@m&JM{H(*)?w>X;u85NHue)Rk6Z(3en$^9A_!ah6JXkmO!jay>$H&I#hN`{oo!(U}$U?R|`fAjQFl9h&i+rEnTP%bC+8t zT($zMO;L}b`E?_MVUXAOh`=bM^{*eogG|dqeUkJ>Exkn^>)lab5L#nt=D%Oc;*2i+ zJ_>b+T`c%2b!cr&1bnIT)}^<>>COiuwYgL-CdDlsOcSPcutZ7=Co>0bAh!xpL$ig2os>l?k(i*Kf!;UW3e<^b8;vGwpt*Tb;VK#WG-_dti^Pos z@HbTN-B651y8YLfSc;}Sm+ej z1SV3(LU1teTEZ>yd#x5QXB(ZhUK6%h%&xB)9h@yOIUEaU+I`DR<4Rtme9V}Cue+B| z0IY5qOUyb~=4IRdbzRpG`6n2fo(4G>e(M5yUMs1YD(uMxL8_y|#h>||)O}YAsfE>z zk+fy&w~N91oP=`u2bf1nTl2gmYDRavV3=$k=S)S>Z?6_I^1O|%HQSgPihnv`exuDf z_1C%GJ$=#2(y3>bnn1SM{Vx6_1Cxqwjx{zvOI9YupNGn7o|j4wtIT09uUeGRV>N-c znPWCjQ%V$(UuTMY4mCgGT48ZsV(a_=$JblOMV-Fi!!`nv1|3Q&B_Ps_h;)N=i7+57 z-NT}Ubc3`?Nr`}TDKSHeG)Osg*AN5Dz;lnTyYA=reLa6qzVEp5T<7Ro4QAs=Y=B{I zE|~%KCq;xIZWW-O{3Q56Yml{B6@Y!QXKWeDz*=7atCmb|`eqEDiwcR}$DAlyi44{% z9_DLtehze-oQk=wTXoC9TIlOGUmg1@gz@ap6i+X&fPNpavSx20uQyO|x>ylkliLQh z8%O;NK2?&67cy4T*QumOT(wJ+GA6^EdVBe%i}3StHbiA0Q<70+`su_I^Uo2!L#JyJ z%8LQ)D53j@JH|-_Z*TH@$IDyse9*hA%@AOsBx6Z2mM7d$+2G|6H_h28w0@t{BqB-Z zsa0jS=zVVXfXu2g<8L2~%<>JYxPN6{EYiN)4^a={p^fd^7rj}Dd0Z4ncy*HOE=c`a zfq6%%C>mN9qHu=S`4_0{!_xqQtbN;PC%%Gkdssl?(b1f1;@g3z&;u}od$)a{_t+Y zu-B8^OH3uPzMCRef$$DiIH6MP288_SA-4Aex4IhxAMs5_%Qd>&Uq3V&x-&?jKDnJR6@?;anp`z)(ezp^#Q zE&p4uZqnYuHF{@^N|l4j3HM*WG9JshHcQ{ekN6j=qx;R0JkC>?sPOgEUB7*MFVJ3I zbU{1sY?3Eam(Wo^`usG0JnH6guh&b5$PP1~rf4kQ+l_n|254patyDQDi`PZE^jsrR z2&$cu@-WQtjJ1yEg1MmHCoxIa!g7+ME65QRmWdo;1WHsCcpJG?v5a5-M*9ugx${Eq zR$&IvYFb6o`mgd{x$f`VI-uLW0hb*%V;1eIXMnw~aBt!25ulBV^S_M}D5-JvTuY8k zK!8cE0=^%t4cjrjW4;ZS*lu?OmtHJ}mbO2n^?Wvv&T7G+Fp{BOkRL&hCnk|!5w_d4 zI@HR!zE>r3Q>?K#)|X#3Ja#x;Dy*Y%pDv|SXYtnH5nIEqm-_xht7D`i-CgFaW{82& zFk#xptcc6AhsZn}IWhJHUx*r7%V_7mgMeScAdud(F`h%e80hiPYNn7EUTNmnDl_AD zFsDL10eK$Lb$DT``(-{L9L8Y3B{q5C)-j2)fXF8Y_Ny|LNpS{D^8jf{=XZ1W?)~XX zvt;WuiOZkUZYR^HIzO+M0FZyg4^lr;(G1vyH(Q06W+0VK50{3Me1wEY7~bcLn%>;u zKn-QOT6LM3d>@*ko&Kun#=txcQV#B%wliQM#JVXHIMqYQtjC>*)9=xcn>NvxGb_tC zQHfrX10Y-Oeh0)_qV;$w-HEL`K7JcSiWjw{|q`S3sUqyW1RTeDQFRFoyQ_FD^eQa)E{$}rlpFFzr^>te&1}&J zkz>$o(HHU>BCg%*N;L>VBRKDyF~1I(eFpB=>(fy4F@WABdR}jNDiHILI3gR&k&ZPA zDNTuw)r?mAl^895_!ebJOv3wfK{k@Y%}8HCq&^-PX*mIFLLt^nqL%{YK{bna)-N$u z%gi2#-d)0;)pKUyijtOLEIZC^Tu+-!;=4PgzSA=SQIMA~r?Kv^Y&eNpPk3`7Rl-;r z5zt;_bUtC1rs8geuGJ=sA2)K;5e-BGW@}O8x;bQ=uqP6+n5XCs=<2R0o*Tov71sjp$_MqHZjCAY@+0q_ z4Z>VsKG269FPpNyjhP5(G{LUuz$CYSvLP27doWHhL$zTho#Mtze36pT*x*u>Mg+|R zcYr}XQ;poc7s)oqP= zITad;=gA|WoQskjF1Uq4W4Zm1f(NaS%6Z}Cmfq}rd;1;S3PGNAsv@=bHF6t_T)6t& zH7c*!(yi!zSS_3@kGt<2RFhs^xdeGvSW~>ejq%}Hrvua#vA!=C4%}=9w|5iYX-pB) zd{3TMzY?XUw=W;=zwvn3U_BsHu^#+{pW z6R?8)OE1sLFDva5kzsu1%Wr89XPo**8z@uV0W(A_C`)Hy2(7fI zyEjsICp`7MNm|u)DA~9j>-vm&#>Ipp zl`7wSI{YeAj&z+xoB4EoUoqImxwQ<71ROFUXN>KblI2td+AySm9)o7aEqQ~{df#Sn z#>N0s4nSj}+QRt3JnEHU8b6zT2Kd-O6Y)5WA0y#q!B&-rp8@*1>@o-v4;100J7G;b zH(+1bXBA%9%jHseE(^Qb4)O7HNG}9>#(UC^zn(BFA>r!yO7Fxv!H?>qIP@Uj9~AG_2FkS-r_Wv;U}mjx&S%L?f2^3xzz5y)fPqBrC@GvLYJT7#F$D_UtjgI>n_VxCw;m7{YeSv z!NqZjLq))d*W2q~rd3anJ@B5^qRGcM$63VnMdF;+GhUFNx;Mm^#vi-b+_G(^=!L#i= zz2iORx<@&I&e*sg&NseaLy*AQBxDVe8UGN~oklajTU z_vQ1zt-Og5>yY5uI6BzeyoEM~9YRqK$Id*B=yxd;UAnmE%2#MEkqW-MOnr6M?Jzo; z)N15I10mpVatGtBh8ZM8cJfF>7(b5NCZT8>&t7{L&!S)rGC}HnM}F4BcY=osp}=_( zzA4I_xVO+$PX)z%{=q!+{QRm4T(UJm20liUtVfgSX_J_XxQiRRb%#bW?eEdgBHG&* z{m}jnL&aBj#>(@{pl&on9hF*nr1JFiJWPV-iW{9b7knC=G&+?H4RWU6Xq9(wV`>kq z7jI!dI9L_VP1?vTOgS5jcMy8F%vZ~RB<($%(xyQ!G9MS|`L+*paB@B-@8w1pEiI#`2@>sw3a*1y z(P(z~e?VihH&XMSblw{)Z~Art#bs2gpbU5-!>`c`vjXl~HNaB#5(g^BaW8Rr&XUN9 zr(^SI{D2$Bd95J|zYO!VxK}9V+2^#kxo2+~zT%7sS0wnbCJg9LpdS%r(-b}kwdR7} zb@nmwy?*MB4TiVyYN$g2`hS9hOaMfjicA66T}N^(x#N5YUu+c6V?PxqUhj zFr$-H9BnT@sn@UQ;RpxELhByLBj>GN;yL|#((CaUv@zS>#zIwin8Fn$ZQ29*X5BIVFeixv9P1f5B%oM2WmERYJZZ0q4D0#m{IZW2h z?~LNR_ebf$vib1;mw`??s*98Nf{9Gi$-=B}U@-H|($p)RJ#l~#lmKGv z)4m8j2ma^RD7qGVB&lCJJlz?Ua(m4zDiT-0ocWjS~`yKWNQsW0-PLy{x za;tN7m!4?wwKQ}}m>(=}OV>qFYUCftd+RwQP{8&zjE&&ldn;NjXk!oL#d0x=&a{<>CSB zx?F?T-r(#dBI@=bONaM6Q-4gkUW5G}kNZX1h9joZej${*x54b)+Z%V=!EZ)m=5)8C zh@f|{LZJx^Ah4GvS;sTjAcKn_VuFs!CYfc&(L{OFaa1i-Xw3rJqT_7t9vScu*I^lD z$Gfk6-57U_efP8utNe}y9S=4kC$o^biVTG#|&Evnh+cDAoNpIrK-eY7KMNHQ%xGK3#-m-ufkpkI?( z;IEen+3P0W;>nbc>lwL!$N$rE&7_l$P462I24RlG8o8#NydcH>&&*W!Zz^{*=Hvy_ z9K6TpW8~=IIxlOkc+!87Z$VpqAa*TTv#?b{c&%N24;EBp#6?9lX}IB77{BG*xzW|x z>0}Ta=6H0n+H|5A($IumcL;4d#cl#>h|`lrzY|$w&-v>~<6(oKSdO zWeJ5nh;$~V`2gPB%z^r7Pl^%=E*ygmSpX7A#*Ips@RN7NLW3Cmq<`_U{jndBLK)ww zc`^+ZVma_?yVkGb^I0i9{#H-nojVQY&VXNDb98LuJH%l-s?>Kh8LEF8ETKk3e90Xb498G_y_W(V1}u`|Rk{loEY z14G`<6c`;-RC4;XOPkCC2BD%cS1AnVGA`wmJag(Bk#pOw;CFzIu>plIbB)JCS7G~3 z#(6Sy+R^F3;SG?J?I?u}Fi3y(^+_HCE1$5h&wpI>B<+EBqKRJ89?+yPX^4vDOifH= z-K<9uU!)4meEO5%kWR#_`PpYj-S4053Nv_K^9?P*>$=h$03%~Ezn^63ylfQfl2#@TC2+yELN&%>QJ+<>9>fzYi&k}{Gp*cWt0QNP1?zy|+*(Zv0lPs@j? zSpPag7P|$P<&=yfk(gDL*(E;Xg#8YGZ-TJeJ#KwN-+<}vWd1Pd+%j=}w9&AVMEaN| zLIjIFnAYjiAIU$3+h_%BwSWN!y&Dl!N4v{Xk}^ok{*R_oR(-d!TTNWdcM~i;fhkHO z`}u!2swI})3m3Rx5V3OG3_d=7!NgeY+8fE2kH>a`8DQ)Nm9{Ym zdhhUa#nkvsE5v~eJYR9Fg@CpZM;eBs|1zf))$y=2HdzujO=DI~W!*l1Oy$JW-97bk zf+K95WFP0eE7z!X!~-(vn|iqy8C$1d$LmjX>Q!cXMb;@?R-R)HBU zHJeX$XjDJvfI)AQTZXsnPdNAH?~N;Zm2zHK>WE{@0n}-+si_>9Km{b6S|IdXL^HS; zTQlL{0l>8E&x`SsMvHgp^5cDDtyekRtLrt-T=z&}UiIRblG08Qm+8HGL75waVV);j z!H{Z~m5L9lrzX_MU~&WVUA>oDy1MLu2~W*W%J_px(-brr(69kEfAj4j0Lo80neyK3 zD1&x&Bihk*KqDn<>v*NG>lV)1u`JpsD9DLf&cXT9GOuWGDlI}IZvo6 z_sGHx{2Mo3%LM0kAE}_U3dIu5+a(+Qgf|xka|G3zNuHc+OxAV9-_|srZuGq)O54yixdR#RBMO$I~lNsPn z1f(+#1<*ZbpxDy93A97BXZQ7zk|BN)fj@J=*^ua8&jhyn7vE;HkTy!!ZSDJ8YXS%t zwy4=*p*z`L(d*{c8NNl}BTuq)XNj?t1K+NkM}0@EmA@D%BZM>85M2AsKWV0wEe% z0WLW2>dBtTsn#xp>u5q8Uat$!Sk4pO!!ayAZYGg@D9ppm@MsFqN}3NB=@szv%+U_x z;Ri31Mn}H{6sLeR7e})!M)qsU8MkY{DOCYHjSW)-ZHTthUJl-<)E3TzPWxd;{BFk_ zj5M88=(Z4hqlm>5r#Jmhy)}>Yx;(e3wZ|;4!%iB1x{o)@h~cj_XI9~2*9^eqC16c@ z_>+|gM`gnI+vCTS7Tsv>&6p2fpcvLLY_Ih@J@6Bxan)_h__|bXaiEts%aB-+bUDjY z#!7UT{O8Ql)tkST;qSxs%fa5ph{f;e=P-yYy<5A{BZwpWPx?^rws!8@QPbC5sbsF< zK-ceesKo5FB%nD3IFc^^O=ZgRi~8=@Gb0ye6B*zmxa&31=xcDYlhpKj=j)x_H9rFy z_fgS}8EtKPQ>%fJRzWB$5`-4fg2$05eHdR*(B_s#?X_qd32pCx|t@uQ- zx-weI^nZT%Z`Me_6bMMMZg=(G!28_J5-tav0@$1Ro$Ab3%q<=j_ z2+8X==E4G=*twS^5?iCmBbtaQAbV%h+}x}*w)nX7pM&+k5ejZFJJ94zU~t@Sxy6w` z{#Dj?KnuF)?8dOr#^>efp6na%`+Fz-f0>xy@c{0!XKxXH+(NNpi)Kiu{6yW>qLYlh zI#^JfEhLb~MjpIGC|zj==-;K3_rEB2e;>7dNZ1n(t>ZDeN}VGduo>UB!s z4v{RF`ut_F|GrT_Kd6D$irzvAjQ3KoAN?QO`_B$hFykh=i^jb4F*O|X6}^i8|9_Q{ z47Se+NkowoG~EK?1-LmY`y`-*5Ch1L>qyC|SNOzn4Bx6q#P6g1b6I}#Mc>B29+S=2 z(;}kO(ARk_r_SLJuG$~;*bu6jaVqIdVLh66p($%)CeL0R0yUhRTqsd{;#K%8PUhwG zc(3=}7O+}>FEnnOev>;~gF$(YLHVv9_@PipC#weF8T&gCw9cj{p}KFVU2L@Jn9Qo2 z+Br;Dm*X2u+z<4ygx!}5ZvUPF%7$S9#OF5?)c>*ruYrov+^>p~{E7NeOeZ(y5(ynE z;SC-EhTy=!u4QKmaqe>SF@lbz5fkjBFrP+bhS)kEogNHHMt2+IaqIs+pocDV6=99Q zSgLI_92nOC(z$FP2(^>NfR*!a+NRAlrLRM0?J{Xeq9ao8%I7Mjno8IY7bP3k!CKZa z+xQ`6Hk)VjbxJb4(xOLuJnE;R84Y;E@`gU=0P#k^-&Xy<=b9t6TY{}G7y+abW2gnJ zU%%+3c}>yvMDOSC07eSsRNgM1zd6S@-A-I-1m*qAx+SobK&0wOZ*ffLGLmAG^Gd|x zKV<0|Zja@Wi^}_xey3Pihy>eq9{&kDRt`!5t2unBRZfJZWT|EEa80QK1int&D+4(F zE>z8{NV>3uGnqj4DB+S0+`i!->T`TPRWYm67dpy^;do3r>%kmZen^|03rvUuF_9QdZ73D zuTJDO*xBw4?;rkn6AHEO-?NW>vWl}EuT%y+#mjCIN-bhwo2yevO4>LlIEXD4Zk_i~204!wDQxs)YwNkya1m7wHE3e~ z{thtBEQ21(*F?-#`|QU~0V?<8r>ZX(eYln{di3-YGy$&TE}Yt--SE zC-&WaiLqF}%B{`^=n&!vk4}qwM@pt2++wPeTWpuxb9W7AnsNscAId->0wI*>lzD}a z@SqyPmzXg-`d!FG|3Dr`;{z`ZI5r3D+>gVbr_D*|b?Qdd@QhPBxnNrr4a7u|2Ve#KG0k+_F87~T*=Q1Pwl(AXE<5wic@{e@ZJ#CtF)!M zNE2wH#`jh487`9SmPSWjYDDgis3#b|Uk*V`#p5w$X)61ND8}HCLxI4Bg zkK{X3Y_ysN+fLLcIC7eD-aKEqKEae)X`eKCOaMjBhR59N?lk@++)oP3vMR;1bm`O@C6?~eA_7|Noz~{4u4hUiRN8lw z%c&6LaK3zR5?AF$pCRh&S@?0RVBOLGtUC}8Ihb&c%8EbkP=#qItw_$)T!pC&d?e0u zofSBL6Qibq`EFBkJ=F~`kqh+P!PL4e-vVU4aX58JiNm?834M6-Ay!vem7-Y`6W=#^ zA8c@e>?#3BCUx|r3ML4FQc;-L{-{~xcq!1T^jt``oCtknev_4zRiP`9YaSRQ_}Se2 zYd3Bblv#EU)HquTd+ucWfYt>r9qnjuj|L>AYMp@E$z&$*(DRsJe5owab9sGBLPS*ewkme={bss74&%ZQ#a>#bPf`Wl3C_vrxV_MW1w&UoYnt%BCf$wib24Z0G_Z)eZ^Q=#TuAT@j-BQ;gJ0uVZaelk#U3OCtPU5a z&Mho-=4Ha}0C9DviAZ3zy3&N5C+rtFO6qTc3(luuG~R(DmIXa)(E#1n%7^R^Rp4-V zC&;gdW`V3`oJM4Gi4v8V%WKY$=JLpyvw}uPjzApHl{AyjH^`H()g2$t8`Jq}(8m0! z?nsDV`v1~fuiP>`2aSM~7jeep(Rcu(J;kCQ; zmTw*`w4FrMuP9{|r5jwgho^w> z5;5$$@k^~`kVo=%zSROs2)|kp^`X=8k=GFj0-?L3jw@J)$x=n^yEdUC3dL5!G0nme zJGGp8_EqWckjVA$9KYl2OUH9S=cOAFo^@KZ~Ce>e>VfhTIsltlBBUG}dX8FshG0V3wlwGlHWQ@lyfY1(% zL-8fH&ow-sw~|+ppO?^kA+$r!_8jL4fJG9p8Ft~AR|`r>Pq$M@)+*G-sap&sU5q+= z0(X8Lg!mb;xvv(3S3U=raAf7uQu)f!w4s2~t{Svd8yUP<3Nyzl12K#J^>MaoA5;W+ zyb6FCcn*&=3;Ag0Kf6d31R8^`b?KuS$^4cn%{;emHK$$TC7^ni&QMxd&`h#(mG|Sz zgD{gbAd@**=<;fr26O|i1EHIJwGpY^gEp%j-w23*un&Eds45}F+wyQdBkL9fYjn?a z(R!hmKJ%)HDLfX2wV_afy;6;?w$5| zBd)$T|C||M{CpB#^@M9EkhbNu`{$H(yt^e;dl#**KqfC9tc{iP0iNuT$Vlz^YTXim za<_cMI$7Q;QJCUm4uy7PhTPXOpgtY37Mcg`^?w=45BK1p zbjOEE4$nJAS0&*;wY?E-6;a@`;@Y$PeYovPv7&J z4NZ?yIKB;{1n03$J?wTJClu$ixP6?RP(8lnlUeGK&ALYniZ1KxaZ@_jh_y4c9NV+( zX3Lp|{%Yy>JPVR925?URomoo6nWSC5#6|=eLb|Trl2(ybCpx*LJTJP#8$BvN)cUe? zmo2g$fp1>P{L`d*Z+*dQAP{lM-vcMi*kQHB=8^{5R95o9h}3M&17I?PXdIfbmB~g) zv9a&?Hg~Ii0Q>rn=0L(7P%ABf(|GWVhHR&p_+cx;>BaSXW|=YH+9-0`NE2OGhZ8=1 zx|i_weIUc|OS?yLF$JvH`wmlvrnrT?eEHhMUvI&jmX_n4ZD+wZT=ZEeYT|vl9(al1 z(ZdVV{4V;}P`UZ|ytcvOPj8=scyh+e@XYMY^?A@32JXxj65bUe!`EK@P4E&hsZJM_J@)Cu zIPs!rb;=xC-}8o>{S^q#oi}aYGeYP>^-1Xq&d2b+Q(y9P7fZjCULET*L#W~cZATW* zD9u{k9f>^h%McThy_M~Kp9EiO&RKikec}dxQrH7=ebq+f|_(S;9C z`U6zyy*J?md^y=&QVPKVK?SGl1K-e>9xn=cg_6uJol1n5R5Tb}xtoZwPqO6PF1!zy z2E>XCit)vcO`PJdc%tIw@jyK-(LEfEU69bDdfKY1{nr-iUZe>SiWak&1A453mSe%5 z;>Cm|THm*i6$onEbOTc>t~xM&_I?KX8ojH5-&$S=qiME3pW?zxe#@`8IFRh$EV9HL z+Mr+W*|{TSIYvZt-y^}7M$rB-%Bypuyvbg^?6`mc^f2ylkWNNy+bBCZmDQuZigG81 z{h2dr>7vDVze#Cm>%perJB51Xxz-Z?kCoU!!hsfbGw}pS6ONach%}TrzkQzROyDTU zPI`!&Hv9c2hUCyzoliNzcI+o!^SZ|TK@n51xg@*4H)Vd28G9~y*|KL1j3eNHz5o6YNNy8h!~MpquVER~o*w7B^bu=6 zYh}N4Nx-s+0#UM7E(1N2R4>DJuxIIj2T?T>eGyxrmm0W5tz+}O1lRZGmx_|y1z{H(HQnV{U3U|vxAr87pZ~ym8bG8WMaj(^3Pywz~ ztF`R7g6bbht^%k$aA3tkASTbrf0Z5oSwArF$q8_0O|h3Dq5oZzrZWUh{R)sLJ%a<) z9P_RzNlyW4^N-Ev`Ky^nEvKOySM&bo32!E-p&bDy@gIvzKG)o zz#{uZNgTab1@rU@|M?6kGm^qoSnA!z@}#Fg;gubj=MvCE64xly<~r<)f@Oc6_8=a( z8)bx1KO!S&irq$EjZ(x0?fGU&iX&VDf`Sx4iv8(X-$M;e%^cAB;T`}rRCeFv{e%xj zbvFzyo#tg{zlU!7n44$o%YyK`+jDiOkd2p@mz)9+H0>XV-?v{9W%2YY)~|?_@I8_P zLh;!kqk=U-Na^Xh#l83A;Sn^)*pn53XY6}40{bCsv@^jYwj&?Xj~K?k91yxm?Mf4IRp2(Rf1g<6dt8ar#aZF4cAO1LDFG2KF2fVp1l^1| zQ266ujT0+1Rd|ot(3i~=Zmh2`+2mnklLh@jI~?bGN~GPvBG!MXP^a)9(DjKCX#Pnw zHN84{X}*?ff|Ehm4tWcN(2nlsUnVV zX3~-~xu3{vR6nTQ9!8avsm2NL52?Qr$L4a|FelCu=KaVDA78(afyHCa<9o{E!Pl?CaP8jCuPw7cQLAHbbws=o z!im~OFr@BW&m1T=sOolo7#&H;`>^L2h=e7t+K-jx;4Gj8oayhoiK9U-0tX;`9L^`{3f0KPY0{mx7n_sCS@(c zH;02cu;8ACkb*lN*I4lJ3k(e00gHTGzuBttdLwC#=HTl{ztATwUZiD&)#JzUo$aXI zlaE&(TpS zx@aJ7H|wuk*I;eSqyzfoSA5L0PqM<3M2PnGT%wqPM^T=HK##TNm!nmpS0Vq;WG(e8|HUb!? zx`0fa?i$vVPVDyYlgDxuwxQs8K7DNQ_CW9V_LsO~XUUbW7n7GucXwSpk%1K5i#Mwn z{V+i9JXcwB7V%b$vX_a;9yBc_w@=^*vsdqC5m8CrhT>2(Fn!7b6%XXs zfY}2hZ|o-fdsyUVzGhy5E2DP%dnwtOC#0~9smzjQ59RW?Ocxvbb z4fJw?P{ib-gPVzlMl48(Wc~Nhsq=_Z%zr&w=L584y{VzN$s;2}!!)HdA$90J(2yLy z2BXv@powxU1)^96Go=^>CX8CpD6q>~v6OPR1MJh{&HnUSl3Wl8po<_Adr0S|qAeCkzK3qanU4@gKu zVW7^(=YxKZSdMnZRX6;2^gqdgCuU`m30HICaFKL$EV#Ir z%x^UGWI{|rlx$nTmBYDYS3QcEa9f#fev58O3S_X|mt$^#C@l#dd6^{_$AX2U^- zQl`YD;WQ{FuyAlFVu1s;y)M{Fh{ zfyV*BjZ(ujPSR0cIsOWh<`xdw^6tv)g@=4I6|R?%T|7;kz*`i=LP;}@4>onml$t2O zV}2^ml8s7Kl{kEa4+jpp3nZa)z`zRTnbf%{Cm{FN#xz63fu4^X7v>|Ehh2Nm>(`W? z0KGV5cKLxh3ZUTR?YwdE0|p3f@hWM33{<$^LcbGA` zD(Nd4m4w^S^pkKBZsw4t#=FRoh&u&t$BwgNK z8@W?g)J!s1sH56;R?k6~hnbe%ro+N6#ov>6QsjO{-bmT`w|M@+SEPz)R$qK<*lKv* z_FI798UK$%RF?ZNU>Zc+OIqJd4MoINGF|H>h~V8Vr|kIH!(P7)S6^IX3}Sxp*0gP9 zsIce9TiP4x*1Q&(qDTsQ`VY+1Df-XOqWcwjItY2{f3Rb=8lxxksz}TzpL&W{rh^CW z0tcv^)SPT(@6A{cJqhXd3(`l6PcTHKHK02okpAa7IFUa`;xR^9557lg^}lW5?DS&r z_;EG?H02o-j6q0Olw$s-CZKBXM@3OtNj6BMZMd@sLA$OH4nN-M;u)fR@hleLo0f|j ze(H9vNPd=l{q|;%y@%;_7`tZBt7G(RlvWKFwaXKw&$w=iys!gH!<2E?)Ti@Q-Z-Ok zhlyUq<$bJAyG846d%K+H$>HLVmWLU zhmpNy9>x9P(~B3FX*x|xjZKoQ2H(frv+8g42`|&LjT@*R{hG*SkTOtO+?Z{iI9Gua zas(-}qo*y`?9lvsj|>xnf9^NPQUihO*j`Xf%)B(a`?J{{_OLz(*3%!4BLDYo50REJ zFp)x)EMi=iNN8laoq;UYKwz^Gds|cJ#!iI z24KUxR27?W&1%Da({%lP-nxIYVIbxL;)ZM!Mt*EjvabrL>-_PW8v5evG-!+$qRYD= zs=7&~AHx3gJ5JX?nCG)T(@T-|2taE7{fjsM%#8p2r5BITRz=BP)b0Q@iQ19+V^ssF z{z!xYRSPZ={`Yh2n9vRhw`G<;b|IGV|4D-XeLr|2;e@U4(99d2BogR16W~NT^@<iZXVk* zf+XpmiUN4>gtD{|<4&5TZVA<7*su(J_s$=ybPbks4F(8X#4SwpIwT@ol^`Jc`*)qd z;d5QAuslGX_TO9D%8LoE2zU)5?|;9(ndkF-eS38wXdfhZNe~%_{pZzqT;%wm$Z8B= zQa#NN^vW?piGS`Xlo7)?dG|vGo8$<<-%tT9!?o`fiohFZJsbV_Hf1^5rKMVT-XFvA8252s1^>f29Ln>Om$GE7N*>|nL!FrT}sDkga-^M~~U(gy@hDFQaN z*PTJ&5ra!Bqsq(6q0IsKtN=uNH!u^@Ujkr3Ipi=9SZOue0*z+wUO}5-r5zMX%ZL7mXs%m9B~6=3!gx#MA6jrZ5IXzw3a>jat_ylrZD!9w1dn6t!}2UUca$91s5Ur-QXWn=3C5QYuDN#lOP77ybruX z7l`Qc;5S>*ey)HiP z1|Xsi@KVB~*4M8*#M?Unu7p77k2nlizml!-I`@(h8AebOW)Dv1w~VX@g&Bnm$&>Zz zCXfpiAM_%e8?4*>K9Yd#{+(MKcJd{3@#~S)0t)i?<413WeFbDUj8akSmOO-& z1WjNj&DjfJka0nPi%-L3oqHE3UWGwHvdd1GyM1= zelK|Y03OA#c?)wFHU(Nx2DbH(x~+1HGhXB{JQDi&1P&dML{c_pX0QaUp*Upf@9zS{ z2xqUu^pC)iZ8(@R*f8?iPH{u#L9=HLo&vtIddd?wEs^pZ8eJ-iB_Gq&ML) z`lbQN>{omZ&&pHQEhMuqWeSLk_zF^gKqBJ!$;@=E5mw+Px)TL=! zE7d>$=RffE(Fvy-rb0;dMm`)7<&x8;9--D5{&UQXuMf`J_(DtF4%lA^#&T&&Wufx?TYXAt*# zoUlAq?I}*V2dP$U1h!Aj+|w??l1D!8%|#UE86)gy7^wiiKrHI?;G?Jrgd8V ziDnI-iryt=C)xddu7Uc98hWhDt6~>5SSt7}r+z+RQdZ8Bu!&n;ozHv2=i*7FDgvcT ze_`f_KP#Uq1pA5lDkx9(@VP9h$tx+vudUe%Br<0p&znCV7p6OweinW!hgiUDL) z;SAjkdU`Of{|sr_vop~Dv#^%rtDBeTdp!pCGz1W_5|SAe)CGO?I!O8&F*}Mw&)`-^ z0Gbo;k}BvxADBs@cCNu{fK}kaXD|?uOTF`B6PFZki@bD9z3ELzKyYWSqE86_G*>lq zLkOE}nV~*_2&bAII0Aaope{NvyZF*?>57RS&+a!g!F0%{PoD_JgGNVnYuwgTtla`n z)lW9J{v~S>teKYSwP} zdzkxXjb%ZB=Hj44Q-K%XumEmozNM;huXt;$7j{3KM)1S37clScqo1raTjtH+;rN?P zrzd9>!+3mw()?nT zRDFDMH6ikIfL+f?U){^H{T7CkZvc zw%-b)fQ^LHUmdm^otSUl+@`CmeT42wuX=zSQAds_9l1iYP3XsTSdXv4-fgW40oP`l zW|tCVR6Z@f)6RtaP`6Rwr&2D%REo~9-$*j9J=W!^nT!1ha41^|B|bS*XEGpuC&uG- zrzgi*pPt;Xw#b4lrn@(ToOGspL>eh*-X8tYX})u7p2x37!g)`4F>PS|RuULD91@zn zPZoRIYx!Lmcc{h%^#M8t+GuOM0@IV-0oCA4?3$%I=2}v^THQ7lK=rTShAQ=Mz~aL! zmX$I-7H3d5xODR(9(4q<&r^%2A^fWi>fG$&Jq!_#&Ilgk4hvPuB)Al9yblNVoO%RC zT1_vos%A_)H~PTx;jKmUCV%E(H*N=D+hN=Fq^3$6ZKZrun~RXs!`kj_&L~>pvT@^* zcKH?S$QYXdImiZ48V%3g{>}D6^(-0JW3Qa7J;A;R{8oCh&1L@i{p>iEP@d1W&qH6e z`(J~pLJnr8B*_IMNygp9Q6ov^6~;0gIwi%@r@ptu4*C%-v2}?RRH?p8^DXb*dm|Hc zT7r5ETA~$$6A79?cPC(;MC@AbG@nh0uS%V#%;@$Y5S5E+x>4o0pHWE_r&&h))Tyg- z-LyYGU%7X=ASP?($kwv$CC6;dTyx1-O`2M&fxmC*+VY|aL0IC1o}XUGRw^FhjkbBw zOfPU6A#WqL%uEirn;>GxGhN%1GLf~lJ*7mA=5o5_Ff&8H$slE9nSSkjmqQ9nablyZ zq>-%Gwf$Sy3ZiW@Dqr}exApj!qvJ5}dKSLhOgz*I7AjW&na(7kk#4%wp zix3AU%&SL7{Yj12nq==W1)xugtENJyv1kM&(hBaL23ZyZQ<=KM(Y%igZ@uZ?kM4fN z4@^N5!Di#Jy?9FeZm!bF92*3`>13~@1>{d78qDL3=3s0PdAOJvwYXhT;#;gN=zR;> zj$%%^O$e$4UPi5`&oUIg%*2byvTHjA%=9rv*>v+IV*%nMVp|WoX*xw#OD8Q5SkR0N z1TF7)ZW`XFk6wdD$VTvJUsl#=p2RytHOuZ40ggzf6c_}CLBAduJX^2(8k8*0Qoxqc z&i0^0>=B+Vy7)3&Wu73Wm5{7IbyL%mStP-EhBjaGu*WP){6?hcMs@${-l$nrge$Kn zypgoTHq4jQR90Nwr;iVG6#LojR~!Sh)i00>bRA?1Yed(kuZFIxO@;DA3rPSnN*(GU z_n!XQ2RjX$zC4YiolSK!p&U=eqnWu~@jbhQ)eqD%xB2{S;(N2LxG&cj%a=1?mt=XH zcASg?FpGhdhhdzHq7s_J`B5HJ%`kQoPN{kx-H(h5JtVNLms{9PKgbjh$}aZnpetuq z;}b*cPJR8an|fc5CUp8Mr5|M6>-Q?ziIQeTZoTGjS)`v z>r|Nii-cKeH1{!`=%*>(lLNg?lpD2+jo+H~m3-&5m}VAJExT<;H|BLo9c>*}ln1iV zip?b}hC=tjw0{ih7Tu@}In2d9*I=_|ajyiwbGfb;uxz;^gSYBE;arZ>f-0{mqgdn{ z&YtH9&(zyYbM7U)%BC8zeDB#7%U=D12gFS~1R@XnD`Su9oUQxv5>K`?C|k}b{}2Yi%cdZKRJT^U zA6Aw{=^JAH@JBTFt2_%t#KaTZZv`kOhUn_G%-R6H6!+yEF(&3{*?27IWh3m|!LZRT z6Ewtzd$T|6*}MnU!?Thyjf{e3Rj>71nMQb^iO$eUmlcWrY$Y_AvQqMs<7~Wfc~6y; z`MwW^Oty7O`kGam^sf}MGk#Q8U+Q<7qaC5}RDnHTt8e{*d1c+5g92&fzQ|XLJtLq< zV&BXIeujCGfro*8+)%R#-Ny5d6E_c0RQP@zoi@Cvf75}i&Je*l`~NZb)?ra??cew@ z0TD?hC5BW=8l(hd#(GvKcD-vWYs4xbdNx`W5?sDb0z}XrX1BAgJ)Vqp=-3=5qwGU z7f5C|>fb~7kpM>#=91qv^jb;e=_^pKOVa7bFL3lfpXSNZ?Z+2Df9v=JS2(6^%WT3=7MgphVKMeH{Kk4xj(FB=RcAIYpQ&HviTgn>`>)I8;<4dlXW zwm9Z;tW4i3HL9=g>ppeiICUN(!0Vn@=(O#`9G)ieGj4$;lRB8re>FB%r9n|*Wt!Tp z?~}L;c7m&8a^aGwGuTfVx?6OZ?>HNQ(aom_iwD_dY-b66&X4dVAKpOHK2zURTaeF* zkkjXR$Qkw-vKA#7r6@sOUoiNS1MxoOHa+TL^i8RgK~D}qN}5 z$00Y*ui>Fx0O`S=Y7qJS&egY7a+ZViy*F+}IZ+MZN?!EeU{{agpID44WU&kC(biR4jqnZ*Lj1bjoGa-bJSwJ-!ir3jji@^L zZPX@QNc%c>b2bN`3@~Q?tPWL>v7~>FX460&uB@S&rD&Y)=XQxQDV0k}I4!oFP$8U+AuB6zn5_;6ZIS?Z7g9k>`iYI&e zK{AsRuw_To9W)O?ea>!JsUH98y`&Dx9eQM#2qC86jt?T~ybRPT}dY zC$asfg7D>~+&|oIx*5{;$r~PbN?!2Xk{rrhflmJz-=YJ_){T%du*Ornu9dhM=K3Yl zn?{wd!v%8;>*}AnSy6Z{$eLiK@-p$#+wu)e=rBl8dw(&aIiOGY~yP?e2}whtIxRGgVso7)F3xnHuq&PieO@r<}p^)wi-Gu+iF}gL(UcJuLgByP7n{o ze}2b=IkVuFQ%qZAf)vk}1RGZ>G#i|)ES81)-nYm)xVaG8rhJvKu0}GLE~>;d;U@h{ zD_o{bkR{O=L^t?{#jLjVH+p>dHxqH_wT*kh#yMZJy-XOQd+kn*Cz(HMww=1iteE3X^ZmTtP}&q-@)UR{$npUz3EZrd(tEv2B{ZaB|GF_~d%`7MnZ7vx>3?Vl}^ z$G@$5h@l|l-pZ@(+vlD*7a{ga>S$z7tIAn7SnO$;QttWsJTF+&cC7Ok@@I2}qG)(a znXq+dFu5U3FeEP4j2$s4?_q>mmz&gy55|OWp`honA7mGjuiGh6x4n!`smifil3h!V zXwJsi$~CDraav@tVNgaVxm;C>7yN-dji9_@6Q$N9wrf;d{Lr5cP;x1zr|KtzTUD+d z$kYkQ86!EYUDq2d2}`xq8Kf;yOJL19S#)Hgot1Ne2TGstbm)QBoDJER)pBhxFC@#y ze)|&~?Q9lfMS}?Firr?x#0 zs+W{g-ivwa#=@YTUA^Cpb7(k`ADEXhWsy=TlAD7!$na8oUUk$hd2=RFD$Gj8kQ$WX zYIG9~8d`KpQz1*+aB(}(jLx^czOmVBXb)CcD9jn00?bodfmoJAO4;x3%!w8F8-Bp9S7q3ph)qg9jC`<^Io7F%rHq|A?08bNa#1E^f3C$UsE=J)6`|n6GG)!R#X3BHYLm8eN~9 ztQ-rWc-4;?O^9=rg;pTsut*U-7EFplgn#?j@GjA$JO$`BPrF$UBOlg2y(P&K`s4+% zeaJUc-YCoss#qt*ID20^{Y~%bVr6a;PQ>r}?LlEVqoNkV_n&O~P=d`S9FoHUMK%V_6U);~ve;{t8Vy%_7}(Ezp=HESx$SX=H*2wx`e7|Zx5nKZYI1^N zOEZ?i-E3k#kXuYJ?0=fi{zYscsj4<2NS-P$IDa4Tk zHgg^R;i23DkJJ@n3eA@2DrGa8AI_b%@1q(j_&4iUXl!YhQz>5`R86;G?poBpccI!-XG&QexKypP>f zjlaSlmuv0$h6$t5rt@x>aVpa3JbZfFp(WboJqItXp*L|{5M2vz^HBk}!Z1G<^;#wq z2a#;wp_0cI#+&(JnrlzzvlU_I1$ptWs7=y$b3bVS?vOd;^T({jKJG?^u6L{ot_7Mi z3-jRqD^0&~4)HrI3!TFL#c9TGi1^+!;`KtDTT+i0rtoUL)L?x`ASIcC zV-?niZfH_I7keLk6Ap)O{#F!Cxgi+vtiE3G-u+mgkyEo$I_mnZ4?lU4L&{m6oHG*s z$2wk1tg7LpCh2)xtdO0&%wH&?Z&l9EtQfPOAJ%nBF-+smCT0VjC6dQ z1HPtG`kH5VRoVuzZ||JXcBa$7T3j!Z#>{bI_G0F66aSEFpsG>0lS!~Ah;G(r4VG?s zmBs#N_)JcaapH+BVK~<9@L5PUCy7GI5fDOxyE}H-Z#9#(Yo+@JgjmmQ@n%)!2uCmx zFd=oR@`_ji>5RsjjFeKQ6_LiW`a~PrW@FPfq>$qTf0=znUgo}fcVD5>34?4*?yDV2 z9L7E|<(iMY3!$5gsA&6ZEs^uD4Q3TjuEd7AGWK-F_XN|;E}|WZk$md@zm6J$p&#i5 zHJFt77Lq$8pj`Vt-HsBpG}dzFHmP{Yt`|MV7Zut$p8lUF3=;w>s3ljzaXGuMsk^VG zW=F8D;9ARzAbaUM*^<*qR~{G^xiY-c_14K2`|wUGg6N7uW~`AN^{L9~5*4@H{0by7 z`HBLwxlJMWPYIO~@F_>LJ!0zkEKX<%XER>{evhla>>oTAG(-5OfmNz40_3pXXUddM z$JyMd-Dro{qZT>^ly!3*o;)VbNH)qwZX}}O=0Es78$g=|^QO_!4JIeNWxCx%fB%7e zLID4tg6E7a5oa8VEZs!s{q3AUk|n|`W}Llmz20)o=@Fh}sT4XwH^1(tqH(`ymQEg+ z{oe6AaVe{MAj}3M2f;|K2bee-WU+=eWbdT+%-SpUJA5l%9(wLR;`GIgkf9spJ~x3{OF54Ww+7Lk{IMm=f&%wWOau*i zV6309U*EP>+oBF5+e3h&D4`TTDQLA3vA zk8$;yM1Av<7=|ZQQAMH4%V$iyg3%7ks2|=;-t7H)TJ<05Zq_vsDRixoDTW}Wf6y1i zT!)GXhMOZJiLE=`*_P}`%VQyY`;@5ZJ4(I=Gpt^6y5>L35owPD4#A4GEIIpPQfT_m z()YGM!3ULFqBqs1HEQ$pypLlM?>76e~3J=50kFM7IU9Fg>$O3(CvP1|PvFr3xj zKm$#1%+^3Ys)L9Vj4PjD-1aNUbKT83HNUntY8vgtUB<{WFhl?g7@%UY$wWMX;yEHxJ!R40IE?mVu*D3D%smmhG zbIdH#qN+hyaOd5Z)rv`Y#Ons!_M_ZaLS(SMHe)_ArJ=MSV~;mVNmkTU-%g7g;H>7F zBz9n*3pm}k|Mn(lQ2H7dUON-Uil5|D49npYn@2EhTB*_DAY=7Nekwlp9j@6yYEG{) z8xM9U6TwJxQ~6`_vMVz_BsT$eC@$e)!hiN@6`0 zTcJ$3d{#p8Omn4GNR!lef`7BM_O4QBK+V;RNUgp5!27o9jbz7X4ewS4=ZAeYA9A{CdpT|WTWkN=rxkQNt@+hu(z>cpky#k zX;)+II7^N72g@yLP^f#fqN2xn>BK$K#&4r_gPUE3Ux3ILZGG4WC(7J&>2VYAJy1sf zG!yHq?0?)-MNCYre)0eVMd_U!x|6{mfBkMr5(m#qwE=F!2I*7qqRSsBeF02?%O7Ac zWx=tpyZn*Cu>X+t_j8xE`ujP^f8N0U!~cg8a2|vbP~5v%YyTa4Ge#nblz4+_L-ew~ zVF!QwQ%*&h`bpTP{K7He?_I)wz4iR|e^os)iR|6VMNrb^??0ae)yv)`{pZGiRbOn& z{(11&Qs#1`z%|FBAynu-!YY zeDVt@fGxpR-hwa_Y#yL+*k9L|i6c#H<^E$5URLQ}Bb~kb{6BAFFTuS1fB7_M=l_Ai zJYKtusLQ{*!+fwd%u3_^^Zf_xR|!|({th%X2#d5zZWU>lA^h2~ZMxO$uH`)-`Pu&6p#?|)2w!dCg-n9L`jn-yOvvp5f6Fw5 zkEE)?4C$^zit3(m-0a$=k>|@oP-YF0=8Llp6TmRAN0Lo5q9f-SIfd+}$lA$FNdpeT z)O@r1$^}HZ&BuU5p(z>dYRAz@Zy(^Nne0W8T35NO_CIppoCwi~Nh8}ppI12!CGPtI zWg0Wun7OEK*&J8So|;l&k7Z|=ov(2c`8Aw7k?Nk-`U<6h?-2}CBlt?eyQ)4|yzWkT zus74y`N?*CjG3}iL4*Mny(GMv?(*2Lw69y2Yt6DR_3pU;`Bu#m8l>I{f$Rz%xzpSV zmC2>|Z!o@xquQ<42*rqob5&06do9PBtPSPzm?pW{C$ZGby6Y!IB`_cPbx^+I=gfji z0p^!5Z7YBz@9r;F4XYM$_!QVlg1k?k#9%B=xmMnr!ox_5@1@nmo&bs~taH7z5 z+Dcb^oy%%Revx84OZyf`kvKivVz48+%OTGjlLTit3LANO4rqNvV%$SC?*b=YQf5t( z8>+0WE1Aw!?kjtU@LwB1KdHLoe36=G<0VjmtGZH zI0IrS#SF1_P$$U#st&oN1ISV&AlYC=2k?#={;5xVWOkD zkI$QEPycz6wTHMqvV39_Ra|)Oa^aYiKPS`BHp-;*>evbAY80zsslyQ~Tg}pS2m%{N z8E_7V?12(WIYdT-i1MvLmj&I&$oXCUu{EGRsv2-hFvS3cA)nnv??E#y!uW3Rk*2CJzH9)oJ z5_L`YLP9z__FiO=YXmhvKhPp!z_F8juiJ{b&i z&nZ#a88S|Ecv?+loc_v4A_ium{;9xj@BGDFAsSvG`Y4Ip2(H;S#6;})9t>9ICRcN5b&$$|M=;hcSy02tBe}xb%h%v3g^#(BxPzKeNQJ~* zEZ<~fHii_*xv7;fG$qiF>ow5rOd18t~z14SXsJ zHlBns!#&OQn>e!x>$LU{a#cPy*r}*8Q3EZXLNRH!A-{V83Q#7m#OubIleV%aiT9Pc znq3M@do11FU{`mxb7#Cx7n>q8ubRi!IE|{|v3|Y|Zdc{7+j5x*rvbl`y z`~+cv37L1d2%_^RefM_x2|Ybq{(LW^BUIf!&RJI%`JwkS$%hj1Y|x$@{%a|N#W@;wc~ z`Fpc^idm*-((i<2DnRm6^F>{vn5k$lS)bb%H(A3kJTDR(D=PY$To#h)@ZDr8D~#L> z1&b~}Af!jPM9A)1+5+nkW5;ezHe>irycYW3@T=b)o6IPirGi{nC}5bU<4W0` z@NY`$dH~vr{S9(QK80;7nFjI8kSq^AdEiXUBg^z$Vw*nk0Xpc{-n(1(b)P5D)a{xr zzBOnfJ8pj*)uzfv{KCx$EVLNGj8}nsBRZj_?=)M=bW!eFftndUj0?%DvS4OCTC#lS z81a=0?NC!^XEuL25&Pu$+lvd}3J9>j+PVvCcbMz)L}3Mhi4&$&nY*!yOa85mfg(7NRa?% z9*hKS0nqgSvi#!&r}iaiwt`c|Q)y{J1qO3uT59mN08q6LnCOXy92ZSa3OhFLt;%@{UlbfV4D8zF`w>}RNW-5z^?mEuY7q> zBtt1`q}S;m=!8_6i>wA&nHUDKY@qd%7`-RtK^>OaHa>D*4BSxik$qOVUkGB)NVtY{ zG1$p-_%nuki)q-UgZx$oP5Oe2HL-I@K-A;O$?IMqb5%wLdt)H&>r@Oc1&OQz9R3OH z{lQy#zP+-R+W5_uE5&S>6U4?fMOkmMV7h6G#e3H&r2nH3dg~PS=(-Z$xCEu$?4l+0N zXWolH>kPj8Bqe{n1B+0xve*owL9tYOWE^E`Ykw(sUuMxtES=q(c|KyqPDDAGIqwmI zNUBWty)B8fT~9Tb88Hl~sk;jQNHN1UP%mT^2iu0f>f7w_5aVgZ+IV><_&@Lb z)zC|?^zW8j`k|Me9cX6s-wnhDDnR#KMi>5aXa8OJ|3eX2&-T)dy!=1xP^e-195m{G zEaK7y{;L*rG~l~j0O9{zq4(W&qVK_gJ*`Ud==J9-_AgvR&0>C}J9j_$@D}dsgY}qR zFNU$YuL%Nvw>h@V%NII-#BTHJ!8uahzv+`r*(Bn%{QT0w&-w)LCDiVY55r|`Nd$xsSAUNk(i zJt`KQHWt=IE{XI5G_ zOB>b;U;lnf*yg&VM-4=>(pI!@H)khhDz~P(*_x%sYnLQ7TfWHfpxiVY8P(SQz`pjp z`ZiZH2AxzkdzWyAZrH+lORsN(7CF$X%h$?iJKpA6cUY}IoiKWYRMyqylBSK7E59@O zoIGi8=U{ftgZNN|lUry6@hd=Y&d6o!lYO-i$Wz$;E<-rBj{zQtOza#LL{My zAE2`$6`|$DfGya)hp~&kwdS;^fr&?Btj2z(NU;3wEp>(hEis8m3LXeHLILvt4Cmm<%2%P_P>iK=p2_&2 z`4(CHR^-?Csqj*tBY0`<78y)HN{-Q^{nv5mm1}y@W9+ooAa4c~Dw#6lc5?!nxB;xk z<4EJif4>`%AxFa-&@p{Vo=!*YiJRfIzuLK?5Y0p#BP_#ppG30e;V&X8g}Vq5C#@2b ziNf6?J%7uRFG|-a2a~DFBW$DkrQYI%29O(3P;cVfcRoAIg{Z)UWQA$)7Q zp&{q+PK3{H@=T{8BpaEZ+m|cDUDE0%H%Cu*^l7(f>Nvg5#dPvRBx9oGXsK7wjqI!{ zMbqn2-KXNU==XxFFpH;ljGsru6g} z@wn*Ry~G5&8HTPdXlh;*TDi{=1kbOJs<&z8vacm^zuhD4kbf^V;>bQuy(;G&%Vp+R z%g~)!#WnuO>$8^&=6GbCt;u!;Rs67rz(W1$2-7QNkgs;~3xJ-}8f~tX_VLltV?!w`2@L39 zQa3Dr`POqm)z`}Nn&VXhboX^5!+rEmI;w*%Sw!fPj(UXY*Y`C~?bo(DYBICIpvy&`mAyrwAoLY>s|17*6 z<6mHsB|oCtY$1oCrhFdxO=&|bv6)Q7^|-=fk4E@Tw+#Dg20AF85q&+@ z*4L%gwpQ0GuztI#{;5|(p%(XZ_aVnF`SrU)!8>)9!{wS=^*Pf&JVoT!N!RNJ&E%4y zGh2;2a$a3MqTS?fc!B7#X@{Z)L{Ya&rP#_;*Tza!#=-0f9_bP4*g#;h&~tWA2=b8v zuTMM*-qI{LOc;GAJkGtU<@+@kyC+|+UgNi#*=?d;2F-OCtwrfpaH9S0m>UB?5>rIUT)tCFmUz0R?m1b6t?nfDt#G2Fk^`uMIh;Sa-BA zKuw8XEGa2?KCZeVs)Ox3uY6|C>M?BKjQ`_vnfRMG43kr3F?XW_LewC#Fy@8O-uL*A zR@HJ4KP#&45YRyetZE}s-Y+oeR31FixnuD91H=Igr9}&DpQ$2X}1I4o^ z_B`1Hk>-LVR@Hm<<3xO=z-C@iCDRa|j^qGqgPYG--2+-`>yU$~1`B2Cx_1h^EWb+& zZ-09&PjQ!Vhwb+=Pp4c)Am&N+o`3gzAzEHU`U3-@h+q(oMaY5uO2b;F!U&5+AnKM^ zAkza_q-(nh`n-4QKL$XbY_WRP;W95Q4$8Vs8NBuNc@VZ_xF~;VN;A2<_zJ6}{%6Qa= zPrq){O+F4CZ4#}t?~k$#FHtc7`L zHhY)sraiNL*@!{+5F`^83OoT4TEB$f$tUjW8``$@77KWySD<@Qr2;QJf4&l}8D)B+ zCkSHowW?>4^yWSpTSIBlpGH+oiAB_JZg^A|S+IA;P%YM_OT8KNR54o}ROq4Ro?beA3d@IKpWew{*@qQ2$9t9ox)8^Mid+FHA48M)d6QL;8p>X?(`&45J@eRt{ z1d3GOFo0;vbCW)5K+dqg_NLX(dpW9>;}G*$>NUPa9P1cJ^-qV?-KX?@!1cM!$1fxhM`siTb`eqFN6;Lk{_Ux&~y_zmhed8Eh=!F#T4 zg$`zGbT$5QLx$&JjFl83VrLljhM%=BV%6K?D?Z^r{zs$gNfeC;Z^N&n>BJsFk>wbr zg-~IG8VAE#prpSpw%^OAzKrcwX}#p;u(J)2+=EeZHP-Z)d$`a!Vb@h%ICS18T8B!P|3OL6+F{JJ$9~f!oU(i@+M$H zM6lzI%oLd{P9V3e3(;1~gc&~#X~CCt2yw)$jL7(iVJf_F@#TI&tj;GG5L@xm5lR+R zz@f^v3xx}|*(gne@L3`|gojvP7B491VR*Oi?MEOHXJk@WBb;nZI0s{ zZq{RQv-2$*qjW(bCYPslNg^|-gX zG{mPaB-Sr#J#Kc<-+hLbSY2~XLoUa5|&Ax=VV zEdF2v0U(LSgZ4MbSVimwYpCLP_OoOgM<-*RC~-DBmle_Up9qOCx9j=FkvI$SLK zfXZzx!`LScB#&2~OMoU{W>3`6;^GJwTB%J}BzS+Ic=?;rt>TkuqJOSGIJ*Edsso!|TGkd>oU+z$cz zWa>0#HuS?EnU&aZbjPf2_VVZOthMx(^|5SORf8EY$XsjJ62^3eSYClV+wu*Zor%y( z=2~)yx6=>3HVT4qj3_GXUgwk&X~;J?^VL|}09+&>azKZ1Kj5?R{;?LY+!bu>G=$(s z=yUn^96g;>L<|4U+=jt9sWvx7=T+l)WxqaS!OjAW^!zT6e8 zO7up+{JkD>zJ09>p-yOmdZnUhA(|hLx&M=}15MyN3>Pgm67{A*PHu&i9Vx~hZr)8F zn7kdOxi(bHk+;Dcp^xL5?Jb55JUO)zS&fiwVLV*I{$=H)1@Vi*QJ-u{9t7_~GANgLa`h|y zJlv0xZFbS06zyfeu0RZN}&Pd^|kGUQN2`Y@-<}nuF&F$1zO6} zx$^=BWPC|r^57hDBfjgE1a05@K83hw_YNt$sf_?zXv1pxNro z&@xF^Qg%KvAU$c-tB=CaLf!pz!T@PV0>k>dBpk*#c**$9x;Gxs;QTDZgBSYc*{>}9 z%$?^oq8V&d$TKitJ$-WaA}p|w_ervGc3n8Q_wzn16vK9BLXAvc^Hhd}7;nlF9jP~| z6z|M0>e#Kc*Fdb_6>M-KpgcbDt0@Uq{5<_M*`X<2pG)I`&zk2D@`kKbLPt}ix}oli zN7D9SGs@{P^YB9|khTX<4G&VnY;fB7=1zo#jI$YhPtYkI0G|t7d>a zmcPviGT5X!rE5t>}#Ni+~R_a zFDDDtSALBLSI$tSwArWCfzm@!Q8rdW-sZ#flNHswLWzZqT8>wKMYCB!c_36~v`*kv|! zGgfab_>&6Ri^MBI6dUx?3N9$jo=D#_l{UuHxKhIFkj{7sA$ETQQQSGEhPp9|EIzy} z%hkT}*92(liysYZHmX!VuyDzTudlniiKcTlNw0KY)WRnHN#xRTxK&RiLd_QZQ9LB# zUq$xrpUNChF}+C5Ro2vHiHeP~)_F^20+`;=q`NtG@x{nHi~jxbX3#siJS)QT-LD?* zKwV<<5^hK^c2OipfiESFpCzNPy3bAiejFmhuMPL3_@IO4N|-%EibkWVD>HA_-nCQC zwSK2u^4lEBv!9BvQGjGT$#vKD&!$YqMP3tgJebtW4W{Fzd3`%NeO{h|E(XIAm{O(S zc60nnCxR*O_A_H=`wx`SULtS0EX0$dC-}~tl$8H%dl&(ZFQ;pPA7G|rBYglJAm_jb zuuV+OSblA1>b<~OM9kaG)bjL}H2SHvL{GY{UF}Wr=kN#QkL>H_x&T1PzAiLQi`FmM zd>A}`f3%{+WmZ>yGl`$z=DOn{SbL1Adb4g4L{VJ7X?XW}%?pV9&#AWKH6azN*fEO_ zx=B26pUR~_zZlT$m)x~d7}7DL1Jk3sYasZYVeCU1@@aV+WC(2(A5=C~HhByAm1dEN z=93jGl{ZJuURQ64R}gMo+zKvp66s2;$6ASAGaE)HTIz6;wlhfQ>0&|Qmxu}J1_ z-8rWm_s|NbZ1r6RNU)p})rq7Vj~p(sct>xo9Xb5$y`57eoF$0P^bK3y3pt`Mb<6hE zjziapr7p^!LZ29w)*V^*RvIQTY7IlLm%sxpE$9^~#u%d&&c)X66ukXo(PwOxW}h;! zsGClMKllEu+dgh_;XVr$kzyb*mwGzIO8epdW{H zCU!$5ya+qrg|NO9G3D6_nYNp@{^5*HUo!jU+uKx#4pNbxiNC;vG(5HLcVfU7F{ierusWvIA60Ecb$qy(ebftUngs_#)Y5)62uHLuxri zwN+7Mnh^t%mJ;twCmSx8_h5++(rh05lv;C7qVbTl}1*9qv!>Dsv)DGWE* zn){4%Z>-Oz!`If`)M??4ZJI!lWl;6{-S7hRPEDX_l zxw@3!Q@-ZOopyP9O;y!koQ$HX4c+GKe2ft|N~v11-vvdhW|vdontfBnagj`v=<%4@5`=FOJp4?9z3%iifS;Zc{QIgdvwv;dZdwWBxr8nL`ywve_wORWzg@bw|9wTF z;a3mya}Xr4s((Hq+R!83n99@>%55eT-Tau+4=|xBZrsOT7eeW1CC&z;JC0ITRHl45 zA7Tk-wd5VPx0ctdXd-tzbni|&s4x#=Sxg|7W2P+#DAToXS}Ji zg8g5()PKCE1Utb043eOoUwu{V@Zq9stdw#R@w7!`XXDhV7x%s;jx0cFQLrfzheJsb}OI$jd6rp=0;mX5wO6o!^z zXtZSqwNV=3w*FLTc4TQITA8bk={Ch9Ahpu>Nlz0bS9AXso&g zkY(Neh60(`o()aav`02E2q4`c=dgYjEA`5gDIsC(Np9snK} zE>M=yOZhkd@ss>X-TI~@jddVMuKgG1b*rw91E@F@8`f7}cmXMJ6F>)8I2<>|a!$hue_2y* zkT#;Rq(&f4o%572z8eF?l_G`SCL6;S!AS&kH^ ziVf;`9aser1rzh&IO&sV42#Qhtb7O%lIz$9YI@o!g7$iAUc)c3C&DcfIA9e3z&o7I z0UOaJZJzi_*KxZ1V+ zlV??@<(v!eL$y10VoLDuao*OimbjM*a2AL75au2oN3fcjEa$769Hz>*m`zuMUS4hI z-5akd1Zam&@{$t4vp-E9zuHd!Xx;N;w<}e^WDyxAk*-uj1oN12k}qbD=*h>?1^f<+ zL%eZjw?QybsB7AfE!v_FN;N?U~M=fa6m_>bFrv3q6Pz+>$5X-A^rg8xUU>462#)W*-agv2~Lw-0%!&6JI!*c8u2YS&w;*Z4THKvso-N{nqr z--j}PH-4J~(=WUfsWbkOmiu}BTl9+jeYY10BqO0C%E@EkoMwZs`!ei3<0bm!8()Gy zCBIYT-)*7!q99&f=bmxTxC<+bGPWv4I2>UK4ge@OGN)XX6IYtWPP4Yvo?%r{PA+}J z`lE>-tP*=+yKSUuBQ-*~?pqo{m65B524e~Ljb8;Bd*7t-TnJtWz@dW=xQ=Mg_;@BQ z?N1$UtPl8=h($cDx7M5ZXl>?eiOi&QZ@+7QG`i_kDf;HSas{`O;+9c0ckn!r5j4vj zo`X<5a;&89V|kiNwlklAmq{8$GC|p2JO^CzL=#hFYrYDGozh>z(NzQXx3p z=G4>QMWM8v?bJ*Xd1+*cqC`slQEzUp2I-jg4uxj`lsgph(@xn?PEmgi^? zB3ggelxF19k)0_UzU$M!0~i$WwHNNbfULJhx%(xNP}Yg}pyItcTAf@ai$^?@^iZ>q zx-heyg5}MjwP?Ekm(vA+u0;*_oiYmA7i`ZXU z^kyIsCSN`UBysZS;Hpv-?OkA@#Tv0kW8u~!Lbn|>PRqYptqA9jh$J<e?17O*h% z_R8t0%gYz1;-g_Fe5ZJF*Ridk5p_7G--AX%Og- zP88~+x#pN9f@q-Toi8hsZJT%*w@r`9r*j#UBcqcB{jYfPNh537(5idA+u&p`rspKU zVEB|mG|EWf88_)lhD6@h^D-BjhFLW?iH{klpKKqAr|f^slArqMKzsU>3=;LW@LbuE z7}oG;vToyRiq@Janp<{@0b|9H=X7A2z!$qMfY;SiFMVS)PtYN4p)>KLYOTN=sbuqiB_u}!@M}91`Cj^Jmqm>lm z(nHDyPtMA@4s}QOE!A=>TcX`bEgt^9vlbDfIu!XT%va=ih0J_#(+E)QG-vnq*M=f? zd#1xJ%uWPEj{r}$SlO|1Lmg{?t|hiaDjH>}&gN`T3mUBrxsPVnc*_2CPC~UZ!&#OmL~y8|I{z6=H!gc& z8pv?N^W{LWG5ew7yWO4*FxKiq8frCq+|Rg;UHSxa4|%hQ2f^gmgtwGF9WPJaWtXc#p1s4h4$jgUzc8Z~k>A1he#Mk88n3Xx zH}Jkald}L(q?ftQ2}@35gy#3`dq?7a0mp-)E{1Z1CD`4ZN$x!vp_oSAc{)qgb6si5 z=;A6XUerPFUxPSfdvTWSW|WdBTeadQ%RXS5H{SpD{I=UD;4~eRz@jpRe2R9JK@vL8 z;+^-(A_@>nYjtc&LHR3TSm-}Wi!_$f1N#NK@qPj0S=v&2w3 zD11A}Zum1NE$g!5hg3XCDn8eoB}Y_eu9FamG=K3W$Kxdr_o{h?FPHlia?kV5TuGA#Ry18rCUH+ z#(;E*6h(*-1JYaQ354*@O<-W&Z@stHTkpO6W%_Tb(oz<*92Vs{Ca!&u)Yx>uHFX}-wPmw z3L*~*KdV;<`Uk^PWBnNOmvAdl`!i3zBN0yd0Bi8 z^^%WVxZD@_3oUV4MzI&VhwRCerj2Q7;~8{c=iJ_nIXGWk`-NZDc9T-aY1oyI=JRGU&t%p|~Web0&+ z@Y`ew*eebVej94>9UU1q3sMtlO(|LRDZb8m;c#Go+fNP+tGTN8rcp-ycZQ$%-y9vr zDMsy1%CPg{1Q08Rzx=!4Tv@4aSt!UBR%EdvOUp|@)zm{FDr`VC;vHg=thk!twWFGyM}jFB^dzb{f9xWV5&!bbb2L=V{~Ziu_Wy~ zt=-YWw}Ol+qR5$M15;WY#baW}`OpPjGC-}Q=*V4C!`lzFXdR1Pv378oGHalvKB-yc z>;m_+qNr737kEkDjs-jP2i0dvHS@kE+3(4hoHTB}EFBfCII+Cp7L0HjI1_*RN^MlhBREutk}C9sDE}$o&y#8c)-C zc&tADdApGw&o^gPQp4q=Uvpaocy#urbZJLVy@u@ya6CkG@06?IX%^YjsdurTb67eg zthM2uAueX-ygJ)$L9OC!ck7PlZ)gc?xx3$8tUot4hC^CS?G;1Z@|{Sopmh9_-J+1@ za;}3S`Kd>kAl}!ZC*G&gL6HZ9>qAF;AN@S4&Jh>CgTG}mZrA*ZEPw7t$9Rjc_N}J` zOgj+4A}H>?#^MQ`N$Ky&uGu>2g_-PoP%cl%gMQ=pgzxuUtTMaFeVC_V-bJ6H+_eLB zCtNzKy@_(EYYJo|KTRuE>r>ndF=cKKSiQ&Jyg-o*QjB_Y@7?OrZn?=N zz5KyvhQcc}QLizeq*PE(jTIIwPLQ@eaXci=bEG2SWP!yoRibJ+ z4DZLnHZDJ|+^mCgeHx#6(>M9tpvGh6=bheNC0dF(M(muDEQykBRMYB*dDm8r&PZz* zb?b@$(zz5X)P))QU2S=HTd3<}yjFz7j?M@uiUo02-i);AGtTwl_m5_cZ4#YVQ^jfJ zWwVpX4z{mHYnE8Bhg6;Fjy2f@{Hh!_Z4&3)em>lzwobcJ-QEu>5h*ay8{o*uemTmIUNLuNILSK5=8nyoY}27+C*siX zUwVd5?UY0Ql`A(u-K(Cx;B;U|ogOQZFW1yONba>r!tA>T(IrI;(GIF95-G`DZ>$HH@O!(h>Ap4W?Q zGH&e4vAHLp{m1Mn_2s4a?(eSf2YFT^tS~5b?e8j@iX3M=Pn&T09y|O{^7PVdakI1V zK0r)Tzb(w)!xyBh8Ykc=8sqQlv-r|y&*WW~TTUjG^Tyu$(Q@G@k)L@&Y%c`hYE>6S z2MUcTJAV5nF=vTr`Awst|0=1$z5Q+IZuG7_GB1Ny;szbkpw|;vB`yj5AV2-Fg44pF zy3youd_N1e@+S4|{bViIMShf&Dn?b)UdNpabqpn}N{344K!_QjvO6_e=d7 zpfOHy7a8mw;XjKv+IWd)S+io7{`z!3biLG(Md!BHM`dFVyNiNI$v{KBfj2VyqwQV3 zUPuT{_sqPZ_+8%TQge)qNG-?MH~E#|HQwFM(=VbvFX>j0pFb0(JpJI7Nj0tVZ>xNB z;R9yMNjzq%UG#0bWZ!8DD!#DL{Df0Rx@M(NVL?^R%;e|ap=kp0Uw!@Ii`R4>h;S)I z@hAA3S9iyHw0adNW~+C-?RsH&lWv9*D`jgsrET@L>gH;f{l^iTuemi20SP6D`pP!v z;p|lWgoTdvmoHi|IkMQcuCuEuM1Jjv*=I@Stsh1%rV3RGL=Gk5WxpRHbvFk*4#Qe0 z$eK7VUq#0)Ul|YgyJbzB$Dmq}o|A&!egrSa?o1|HI+1-xLSo?2j>Y*ftW%uVLc*E6 zFu{W)nXhj_8N{3B#@_t4$lovRYN>jVtmn*Vw!W>N?n)T zUH1)FY!_m7F8c_;daXw)mb|{mR}b91&&1jZp`4^sCp5(7KB^lwp33o@nZ#HD>^gIyHcU{FD0+L!qs3jBx1j?=%p%i1!y_%r+!5+ z!Q_#hem=?xhh*?78^i3BY^{09gMa? z@`aIZ*(}q6G4necqz^`dgxU7LH@1oQZxDAFCtc)X=_#!ujU-;_V!+W-YsHB>q z7AB1PX`@s*cuLbdI0~a4ViYISop`i*qU{c#!G}tDE9{NAMc^-MFS5ldIgWP~+jZ6# z19%m}L~ku=pmCI9r@r1|m-%5O@Qxq>AkXSBG~0|1lwO)(BY6YCfl!1wj7QjK^n((p zw5|P|u2=ig;9_V9P!S(m!_SC#X}wo9*39~mG*MqioY!vQ@(!zRx33S5P^ggAS|B&e z{+=k0;D2!Cq$C7>%`O9Uby~PbT`b?vGoal6+mIbuDkxL*N~z~VOCts^2r%TJX|S~h z23EE!`+S!hd(~!l*n2=P#4SV^U}YrE?4%sqhZmhW zAwzG&s;+Q~(+c^OGw1}@!s_E^ScX?cf4u{Me&yyE)QDn|$Z%U_^}fC^gJ}NjOZzt# z3s`D^J9Pm!16cD%c+h~%&j7B0#9^V5-9WHX7H`V(jaxP#{!9A95r+hh((bmZPW}6H z@Jao5d&>6!gVp^V--eT^V50>G_jTJRD{Mxv0( zbHX7-*s=QF^3m-9zzEMRxJi?ut-Acqnlif#kwE%d5<&nTtr0y2Rh*ZwD-kZr|6`^J z4Y-+s(?Pi5qn8I91eESBeRMt=NlIrKx&?jukB zIjA9I*Wblw+q5R~?*R3O20VOl=zg)pvpYD}TTXL4|9He!jkZaBZ063kCZ4e^{iAW_ zh6v_wGj$5eshCdf40yU4rOsWYL$?mY?+hH7$2@Klb?@VGH46|80LuebU(nZuVNE#pLu23 zSfyP)Zw>86Oz9pMqa~h@pkH(R*wZiWjP>;+4>4Gl%=KU?xBB?fx$*_m)A14M87EYb z9EG&0{DrNiv-nzSu&b5WxZB1Oa$TkelgqSWft9)|TT9I25j(cDu~<@wPF!CNTwL!Y zdR%Lq7sW?hKW0l83dEG{wx;uDGq{q>Yj!9BrP##~PYMCMMlht837(kq&kJX927{tRVr>nZ(;{R6QNwikKU9;BcJ(@YIJmEP`u@k;G>#B11(W=*cS=d@uTdIF=tZ zb(9S@|IT@2+W>Wx0=x&6shM7yJ#q@2JxlCx;CN3qA!49$I1ii&7!?Z88qUTQxzTj8 z&*6D=9beZBr+p%%reP6k(cA948t8e(UNcTll?k@&muj+JD)S}G&zH%rG4k(>RTyhb zaz1bgKht4?PGE)2FZ~#Uc+Q*K!5c~1{rCaFZY z*us3;x|aX+-86P?Y12G|ASjOyRgRaKj6p1N3I;%sCm~kr;v(=<6{rC+=no)1J_%?w zdL$ilvBXS4^e+FQBeoA47z1ZVqs%Tk$~?QH`<=eNMqCd`qy3bzI3^TGn?;d!1IeE- z0-e5$6`8JConEN`o_1H_&DpT^5ZRDM4u$OjF2pDw$kYV*auMWVz!s$zB#~>qfh7qU zTvP=l$+?xQ?~>H-4!0%&%d;tDtqhFXbO-#G<2-2KXYC?y@N>DlX6daG5b7>RW6^6k zh?2UlGnN%Mjve_nH(Ea45A}jD0{I5z5On^DZJ>V*J=9g zcY(%AFr;0NC$9D{j}JDA4RGF{oJqKR8?X*g+4^5OH69pN0JcXhWZ>c~DcSrLk;yh6mQ^3 zT|Z=4?}WH#G|1uq<2R7|Akxm3aid*8Wmn+Tp-v8)grDC*wZwt>#W0EJJjFoZ|C{NS z0@CyGb&6P;2L*UL*z%#(`+e+NVekpy9hBh%r>TL8PwlzL#ikjE1`1h4%$5Vg%*i`I z-lq;L=S@}4ne+PY-tckI1~_iXn>XseYpAiI1Ens}Glled*!o@hS3vBD9c;bpgyDm- ze84=(5_;(|cjbG|iC5u*T-H?YNK7I-ESMwK#)S&(Lpk3eiNfpBDo0o=c5+=0)1pgT zM&PL);ww3($@`^*?ORenF~FiDB!S}t$`2=SMLuqD>e)1%YAI5;;R*`AGzFA63>Ge| zxvC97IkP;2zl2a-7Romjn!edbYe<87>4lvRKfG(^Q!2=Mg1J>~^F^^_Kj36u3u#Fm zCyG74%!3AnF6@v!&NBb`M*-3#gD%_02{NfZ*AK)Sv6Kb`1Dn3I{H?8Lp-rJ0DwH{J znCdOJU!E14@x7!@U64Pu6$AQ zp8lQ?Vr#L2vRYcKW5EK)XYJ-*VD;A!rv6-D63U8~Ka(|~52>hU`|U@==(92Q3$``S zWbpvinX`I!V+i;cyV+2xDFYECAeIIjpmR44A_FEaqe#|~{St@Wfn(O}43N%w04gs! zmA5wK>M|2W4+mgleygUEI*cK(U^C)@IM4N>6HX#r4saX5y;A{jd*}^?omPI*Q=?&I zla3z$oK z25O$M;Kj-yV+%}^bQ5pZ+qEWkIvxk;pqo1pzUTOUbbJ)ve^~Wajex3?DFXOLB+<=U z4&^F7QU57yai6*Zq`V%+HAs>|e;hG0)y zT9di;S=)V0JKA58^<Kw572 zM1+@3B)^*^ZB#lE?ohf<=QTpw(dcnZ$#T*{NC};?3QTJDt}oiIUrxpM_mvbPcD2sO+^Ae~iz7KUn%3Y)iuq7GO#Y~cfiB+nrl^xms%>iB4BkxiaBe6VQt zZrbVTmNTlX3tZCdP=LjOJp~^g%Eq zEJ)oR{9LB|X_+^|njY^RB~EK41W5!!qEXDFNURqttTI4Y5TV2EHKt9OQ^x`;`oF)d z212Q{o~ijCqaB!t^ec2Ic1-LaP}{b@;fky%QW{<09goSH(CZrr7iJtB-=PxbKi-*ZE+A&b3-wxSd#{f@L4Q>K;Gqn6;Ob+| z_JhLL3$Xzp6ivjY9Kvozud!G80n9i3w4P%yP^0)K^;~5C5w`R*D&196Jyb%^C_^)a z2{qjNliVVu+XDho(Zj|pJ%K-cAC2f+?BvN@0fTZRFrs6tRW6r6~(5(@Y zC$?5l>{Z#dcwKD+7PYw3;FeOb+tzSscG`kqE200%wtozpE8;T|?R>dxyzO@2@O_qB zLF?A3lCek_N5e22KHPx$C_~})#*~A2dE2SzAL!8*7{xr=F;1cz4g8N<7}^*lINZTD zbK^h-njF*Le8+=E8HJ|I>o6r$7!fJsc)EGXty86V#{^|v Date: Thu, 7 Dec 2017 00:26:38 +0100 Subject: [PATCH 1129/1483] rest: fix error string on invalid aggregation method --- gnocchi/rest/api.py | 3 +-- gnocchi/tests/functional/gabbits/metric.yaml | 7 ++++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 22532cce..38dc315e 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -468,8 +468,7 @@ class MetricController(rest.RestController): self.enforce_metric("get measures") if (aggregation not in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS): - msg = '''Invalid aggregation value %(agg)s, must be one of %(std)s - or %(custom)s''' + msg = "Invalid aggregation value %(agg)s, must be one of %(std)s" abort(400, msg % dict( agg=aggregation, std=archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)) diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index 8503c855..10535d3a 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -6,7 +6,6 @@ defaults: content-type: application/json # User foobar authorization: "basic Zm9vYmFyOg==" - content-type: application/json tests: - name: wrong metric @@ -177,6 +176,12 @@ tests: value: 12 status: 202 + - name: get measurements invalid agg method + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?aggregation=wtf + status: 400 + response_strings: + - Invalid aggregation value + - name: get measurements by start GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true&start=2015-03-06T14:34 response_json_paths: -- GitLab From 63ea0aaa3a6b1c277f132afad157af5547b78b8b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 7 Dec 2017 00:52:42 +0100 Subject: [PATCH 1130/1483] rest: fail properly if granularity is not a timespan The current code will fail with a 500 error. --- gnocchi/rest/api.py | 12 +++++++++--- gnocchi/tests/functional/gabbits/metric.yaml | 10 ++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 38dc315e..2022737a 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -485,6 +485,14 @@ class MetricController(rest.RestController): except Exception: abort(400, "Invalid value for stop") + if granularity is not None: + try: + granularity = utils.to_timespan(granularity) + except ValueError: + abort(400, {"cause": "Attribute value error", + "detail": "granularity", + "reason": "Invalid granularity"}) + if resample: if not granularity: abort(400, 'A granularity must be specified to resample') @@ -504,9 +512,7 @@ class MetricController(rest.RestController): try: return pecan.request.storage.get_measures( self.metric, start, stop, aggregation, - utils.to_timespan(granularity) - if granularity is not None else None, - resample) + granularity, resample) except (storage.MetricDoesNotExist, storage.GranularityDoesNotExist, storage.AggregationDoesNotExist) as e: diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index 10535d3a..fd3e7e86 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -201,6 +201,16 @@ tests: - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - name: get measurements from metric invalid granularity + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?granularity=foobar + request_headers: + accept: application/json + status: 400 + response_json_paths: + $.description.cause: Attribute value error + $.description.reason: Invalid granularity + $.description.detail: granularity + - name: push measurements to metric again POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures data: -- GitLab From 6aaf6abacb56bc5e8021f94be58029b03903b160 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 8 Dec 2017 09:19:11 +0100 Subject: [PATCH 1131/1483] Ban keystonemiddleware 4.19 Last keystonemiddleware have some missing dependencies. https://review.openstack.org/526624 Ban this version for now --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 3d4ddbfd..ad8bd79e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -20,7 +20,7 @@ classifier = [extras] keystone = - keystonemiddleware>=4.0.0 + keystonemiddleware>=4.0.0,!=4.19.0 mysql = pymysql oslo.db>=4.29.0 @@ -68,7 +68,7 @@ test = testtools>=0.9.38 WebTest>=2.0.16 doc8 - keystonemiddleware>=4.0.0 + keystonemiddleware>=4.0.0,!=4.19.0 wsgi_intercept>=1.4.1 test-swift = python-swiftclient -- GitLab From d1f88803e622ac223c9555d49bed0226a2304f8d Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 7 Dec 2017 12:04:17 +0100 Subject: [PATCH 1132/1483] rest: fix exception handling in batch measures This change have no tests, since the issue occurs only when the metric is created by 2 HTTP calls in parallel. Closes #552 --- gnocchi/rest/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 2022737a..125f36f5 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1561,7 +1561,7 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): archive_policy_name=metric[ 'archive_policy_name']) except indexer.NamedMetricAlreadyExists as e: - already_exists_names.append(e.metric) + already_exists_names.append(e.metric_name) except indexer.NoSuchResource: unknown_resources.append({ 'resource_id': six.text_type(resource_id), -- GitLab From 676541ca28eb9fb951a36b62c9c56a9d5c8b835e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 9 Dec 2017 13:53:56 +0100 Subject: [PATCH 1133/1483] rest: validate pagination options with voluptuous This validate pagination options with voluptuous and remove the side effect of modifying the query parametres. That avoids doing a copy of the dict each time. --- gnocchi/rest/api.py | 50 +++++++++++++++++++++++---------------------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 125f36f5..6d20bf38 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -185,7 +185,7 @@ def get_header_option(name, params): type, options = werkzeug.http.parse_options_header( pecan.request.headers.get('Accept')) return strtobool('Accept header' if name in options else name, - options.get(name, params.pop(name, 'false'))) + options.get(name, params.get(name, 'false'))) def get_history(params): @@ -211,23 +211,26 @@ METRIC_DEFAULT_PAGINATION = ['id:asc'] def get_pagination_options(params, default): - max_limit = pecan.request.conf.api.max_limit - limit = params.pop('limit', max_limit) - marker = params.pop('marker', None) - sorts = params.pop('sort', default) - if not isinstance(sorts, list): - sorts = [sorts] - try: - limit = PositiveNotNullInt(limit) - except ValueError: - abort(400, "Invalid 'limit' value: %s" % params.get('limit')) - - limit = min(limit, max_limit) - - return {'limit': limit, - 'marker': marker, - 'sorts': sorts} + opts = voluptuous.Schema({ + voluptuous.Required( + "limit", default=pecan.request.conf.api.max_limit): + voluptuous.All(voluptuous.Coerce(int), + voluptuous.Range(min=1), + voluptuous.Clamp( + min=1, max=pecan.request.conf.api.max_limit)), + "marker": six.text_type, + voluptuous.Required("sort", default=default): + voluptuous.All( + voluptuous.Coerce(arg_to_list), + [six.text_type]), + }, extra=voluptuous.REMOVE_EXTRA)(params) + except voluptuous.Invalid as e: + abort(400, {"cause": "Argument value error", + "reason": str(e)}) + opts['sorts'] = opts['sort'] + del opts['sort'] + return opts def ValidAggMethod(value): @@ -667,7 +670,9 @@ class MetricsController(rest.RestController): attr_filters.append({"=": {"creator": provided_creator}}) for k, v in six.iteritems(kwargs): - attr_filters.append({"=": {k: v}}) + # Ignore pagination option + if k not in ('limit', 'marker', 'sort'): + attr_filters.append({"=": {k: v}}) policy_filter = pecan.request.auth_helper.get_metric_policy_filter( pecan.request, "list metric") @@ -775,7 +780,6 @@ class ResourceHistoryController(rest.RestController): @pecan.expose('json') def get(self, **kwargs): - initial_kwargs = kwargs.copy() details = get_details(kwargs) pagination_opts = get_pagination_options( kwargs, RESOURCE_DEFAULT_PAGINATION) @@ -797,7 +801,7 @@ class ResourceHistoryController(rest.RestController): ) if resources and len(resources) >= pagination_opts['limit']: marker = "%s@%s" % (resources[-1].id, resources[-1].revision) - set_resp_link_hdr(marker, initial_kwargs, pagination_opts) + set_resp_link_hdr(marker, kwargs, pagination_opts) return resources except indexer.IndexerException as e: abort(400, six.text_type(e)) @@ -1124,7 +1128,6 @@ class ResourcesController(rest.RestController): @pecan.expose('json') def get_all(self, **kwargs): - initial_kwargs = kwargs.copy() details = get_details(kwargs) history = get_history(kwargs) pagination_opts = get_pagination_options( @@ -1148,7 +1151,7 @@ class ResourcesController(rest.RestController): resources[-1].revision) else: marker = str(resources[-1].id) - set_resp_link_hdr(marker, initial_kwargs, pagination_opts) + set_resp_link_hdr(marker, kwargs, pagination_opts) return resources except indexer.IndexerException as e: abort(400, six.text_type(e)) @@ -1350,7 +1353,6 @@ class SearchResourceTypeController(rest.RestController): self._resource_type = resource_type def _search(self, **kwargs): - initial_kwargs = kwargs.copy() if pecan.request.body: attr_filter = deserialize_and_validate(ResourceSearchSchema) elif kwargs.get("filter"): @@ -1386,7 +1388,7 @@ class SearchResourceTypeController(rest.RestController): resources[-1].revision) else: marker = str(resources[-1].id) - set_resp_link_hdr(marker, initial_kwargs, pagination_opts) + set_resp_link_hdr(marker, kwargs, pagination_opts) return resources @pecan.expose('json') -- GitLab From a92976fe4c3ad0a280a0e43f27d015e4d392a093 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 11 Dec 2017 11:48:30 +0100 Subject: [PATCH 1134/1483] rest: simplify archive policy aggregation method validation --- gnocchi/rest/api.py | 14 +++++--------- gnocchi/tests/functional/gabbits/archive.yaml | 14 ++++++++++++++ 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 6d20bf38..4feb6169 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -233,13 +233,6 @@ def get_pagination_options(params, default): return opts -def ValidAggMethod(value): - value = six.text_type(value) - if value in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS_VALUES: - return value - raise ValueError("Invalid aggregation method") - - class ArchivePolicyController(rest.RestController): def __init__(self, archive_policy): self.archive_policy = archive_policy @@ -302,16 +295,19 @@ class ArchivePoliciesController(rest.RestController): @pecan.expose('json') def post(self): + enforce("create archive policy", {}) # NOTE(jd): Initialize this one at run-time because we rely on conf conf = pecan.request.conf - enforce("create archive policy", {}) + valid_agg_methods = ( + archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS_VALUES + ) ArchivePolicySchema = voluptuous.Schema({ voluptuous.Required("name"): six.text_type, voluptuous.Required("back_window", default=0): PositiveOrNullInt, voluptuous.Required( "aggregation_methods", default=set(conf.archive_policy.default_aggregation_methods)): - [ValidAggMethod], + voluptuous.All(list(valid_agg_methods), voluptuous.Coerce(set)), voluptuous.Required("definition"): voluptuous.All([{ "granularity": Timespan, diff --git a/gnocchi/tests/functional/gabbits/archive.yaml b/gnocchi/tests/functional/gabbits/archive.yaml index 55964a8f..28cfefc2 100644 --- a/gnocchi/tests/functional/gabbits/archive.yaml +++ b/gnocchi/tests/functional/gabbits/archive.yaml @@ -485,6 +485,20 @@ tests: timespan: "1 shenanigan" status: 400 + - name: policy invalid aggregation method + POST: /v1/archive_policy + request_headers: + # User admin + authorization: "basic YWRtaW46" + data: + name: invalid-agg-method + aggregation_methods: + - wtf + definition: + - granularity: 1 second + timespan: 1 hour + status: 400 + - name: create policy when granularity is larger than timespan POST: /v1/archive_policy request_headers: -- GitLab From 529344ed99e18976e5f613efba3099aaebfc416c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sun, 10 Dec 2017 12:01:44 +0100 Subject: [PATCH 1135/1483] rest: do not validate paging keys in metric listing The current code in metric listing tries to validate the paging keys with voluptuous, but they are actually and correctly validated later by get_pagination_options(). Let's stop trying to be smart here and ignore them. --- gnocchi/rest/api.py | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 4feb6169..8238b470 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -632,26 +632,23 @@ class MetricsController(rest.RestController): "user_id": six.text_type, "project_id": six.text_type, "creator": six.text_type, - "limit": six.text_type, "name": six.text_type, "id": six.text_type, "unit": six.text_type, "archive_policy_name": six.text_type, "status": voluptuous.Any("active", "delete"), - "sort": voluptuous.Any([six.text_type], six.text_type), - "marker": six.text_type, - }) + }, extra=voluptuous.REMOVE_EXTRA) @classmethod @pecan.expose('json') def get_all(cls, **kwargs): - kwargs = cls.MetricListSchema(kwargs) + filtering = cls.MetricListSchema(kwargs) # Compat with old user/project API - provided_user_id = kwargs.pop('user_id', None) - provided_project_id = kwargs.pop('project_id', None) + provided_user_id = filtering.pop('user_id', None) + provided_project_id = filtering.pop('project_id', None) if provided_user_id is None and provided_project_id is None: - provided_creator = kwargs.pop('creator', None) + provided_creator = filtering.pop('creator', None) else: provided_creator = ( (provided_user_id or "") @@ -665,10 +662,8 @@ class MetricsController(rest.RestController): if provided_creator is not None: attr_filters.append({"=": {"creator": provided_creator}}) - for k, v in six.iteritems(kwargs): - # Ignore pagination option - if k not in ('limit', 'marker', 'sort'): - attr_filters.append({"=": {k: v}}) + for k, v in six.iteritems(filtering): + attr_filters.append({"=": {k: v}}) policy_filter = pecan.request.auth_helper.get_metric_policy_filter( pecan.request, "list metric") -- GitLab From 3489ddf865b562295099281b753819e8f809c468 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 11 Dec 2017 14:50:43 +0100 Subject: [PATCH 1136/1483] doc: fix formatting with `remoteuser`, enhance notes on basic --- doc/source/install.rst | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index efe45ab2..41f1bc8a 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -125,7 +125,9 @@ Configuring authentication The API server supports different authentication methods: -* `basic` (the default) which uses the standard HTTP `Authorization` header. +* `basic` (the default) which uses the standard HTTP `Authorization` header. By + default, only the user `admin` has some special permissions (e.g. create + archive policies). The password of the authentication is not used. * `keystone` to use `OpenStack Keystone`_. If you successfully installed the `keystone` flavor using `pip` (see :ref:`installation`), you can set @@ -133,9 +135,9 @@ The API server supports different authentication methods: You also need to configure the `keystone_authtoken` section in `gnocchi.conf` with the proper value so Gnocchi is able to validate tokens. -* `remoteuser` Gnocchi will look at the HTTP server REMOTE_USER environment - variable to get the username. Then the permissions model is the same as the - `basic` mode. +* `remoteuser` where Gnocchi will look at the HTTP server `REMOTE_USER` + environment variable to get the username. Then the permissions model is the + same as the `basic` mode. .. _`OpenStack Keystone`: http://launchpad.net/keystone -- GitLab From 8242cc17a15d797ec3632a276105c119dd7ba8f8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 8 Dec 2017 16:38:31 +0100 Subject: [PATCH 1137/1483] Remove unused tox targets py27-gate is not used anymore and I don't think anyone uses py27-gabbi, you can just filter the test by name easily. --- tox.ini | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/tox.ini b/tox.ini index d0dbd2d7..e8170019 100644 --- a/tox.ini +++ b/tox.ini @@ -127,21 +127,6 @@ whitelist_externals = bash commands = flake8 bashate -v devstack/plugin.sh -[testenv:py27-gate] -setenv = GNOCCHI_TEST_PATH=gnocchi/tests/functional_live - GABBI_LIVE=1 -passenv = {[testenv]passenv} GNOCCHI_SERVICE* GNOCCHI_AUTHORIZATION -sitepackages = True -basepython = python2.7 -commands = {toxinidir}/tools/pretty_tox.sh '{posargs}' - -# This target provides a shortcut to running just the gabbi tests. -[testenv:py27-gabbi] -deps = .[test,postgresql,file] -setenv = GNOCCHI_TEST_PATH=gnocchi/tests/functional -basepython = python2.7 -commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- {toxinidir}/tools/pretty_tox.sh '{posargs}' - [testenv:py27-cover] commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py testr --coverage --testr-args="{posargs}" -- GitLab From f8b10dafb6bd00e9831b242137694492cfd804a2 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 8 Dec 2017 16:32:02 +0100 Subject: [PATCH 1138/1483] doc: build documentation with Python 3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There's no reason to stick with Python 2 for this job. We cannot move docs-gnocchi.xyz yet to Python 3 because older branch do not work with Python 3 doc. --- gnocchi/gendoc.py | 15 +++++++-------- tox.ini | 2 +- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index 6d5545c6..d0169b64 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -41,13 +41,13 @@ def _format_json(txt): def _extract_body(req_or_resp): # TODO(jd) Make this a Sphinx option - if not req_or_resp.body: + if not req_or_resp.text: return "" - if req_or_resp.content_type == "application/json": - body = _format_json(req_or_resp.body) + if req_or_resp.content_type.startswith("application/json"): + body = _format_json(req_or_resp.text) else: - body = req_or_resp.body + body = req_or_resp.text return "\n ".join(body.split("\n")) @@ -200,8 +200,7 @@ def setup(app): for entry in scenarios: template = jinja2.Template(entry['request']) fake_file = six.moves.cStringIO() - fake_file.write(template.render( - scenarios=scenarios).encode('utf-8')) + fake_file.write(template.render(scenarios=scenarios)) fake_file.seek(0) request = webapp.RequestClass.from_file(fake_file) @@ -225,7 +224,7 @@ def setup(app): test.tearDown() test.tearDownClass() with open("doc/source/rest.j2", "r") as f: - template = jinja2.Template(f.read().decode('utf-8')) + template = jinja2.Template(f.read()) with open("doc/source/rest.rst", "w") as f: - f.write(template.render(scenarios=scenarios).encode('utf-8')) + f.write(template.render(scenarios=scenarios)) _RUN = True diff --git a/tox.ini b/tox.ini index e8170019..691abfad 100644 --- a/tox.ini +++ b/tox.ini @@ -141,7 +141,7 @@ show-source = true enable-extensions = H904 [testenv:docs] -basepython = python2.7 +basepython = python3 ## This does not work, see: https://github.com/tox-dev/tox/issues/509 # deps = {[testenv]deps} # .[postgresql,doc] -- GitLab From c8ad005b7157e80b614061930a16be56279b904c Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 12 Dec 2017 09:23:31 +0100 Subject: [PATCH 1139/1483] doc: fix py2/3 unicode issue This change fix the py2/py3 unicode issue. Also, it disables temporary the docs-gnocchi.xyz jobs, because we can't generate the master branch doc anymore. --- .travis.yml | 2 +- gnocchi/gendoc.py | 21 ++++++++++++++++----- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index 84f554f7..90c80dc7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,7 @@ cache: env: - TARGET: pep8 - TARGET: docs - - TARGET: docs-gnocchi.xyz +# - TARGET: docs-gnocchi.xyz - TARGET: py27-mysql-ceph-upgrade-from-3.1 - TARGET: py35-postgresql-file-upgrade-from-3.1 diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index d0169b64..4e3f598c 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -44,10 +44,12 @@ def _extract_body(req_or_resp): if not req_or_resp.text: return "" - if req_or_resp.content_type.startswith("application/json"): - body = _format_json(req_or_resp.text) + if six.PY2: + body = req_or_resp.body else: body = req_or_resp.text + if req_or_resp.content_type.startswith("application/json"): + body = _format_json(body) return "\n ".join(body.split("\n")) @@ -200,7 +202,10 @@ def setup(app): for entry in scenarios: template = jinja2.Template(entry['request']) fake_file = six.moves.cStringIO() - fake_file.write(template.render(scenarios=scenarios)) + content = template.render(scenarios=scenarios) + if six.PY2: + content = content.encode('utf-8') + fake_file.write(content) fake_file.seek(0) request = webapp.RequestClass.from_file(fake_file) @@ -224,7 +229,13 @@ def setup(app): test.tearDown() test.tearDownClass() with open("doc/source/rest.j2", "r") as f: - template = jinja2.Template(f.read()) + content = f.read() + if six.PY2: + content = content.decode("utf-8") + template = jinja2.Template(content) with open("doc/source/rest.rst", "w") as f: - f.write(template.render(scenarios=scenarios)) + content = template.render(scenarios=scenarios) + if six.PY2: + content = content.encode("utf-8") + f.write(content) _RUN = True -- GitLab From dc590f918c722f748cee7d7c88a2b3f210932de7 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 12 Dec 2017 09:27:52 +0100 Subject: [PATCH 1140/1483] doc: fail if something go wrong with the hack This change ensure the doc build fail if something goes wrong with the multiversion hack. --- gnocchi/gendoc.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index 4e3f598c..71ac1c76 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -105,7 +105,6 @@ class ScenarioList(list): multiversion_hack = """ import shutil -import subprocess import sys import os @@ -177,14 +176,14 @@ def setup(app): # file of old version of the module. # It also drop the database before each run. if sys.argv[0].endswith("sphinx-versioning"): - subprocess.call(["dropdb", os.environ['PGDATABASE']]) - subprocess.call(["createdb", os.environ['PGDATABASE']]) + subprocess.check_call(["dropdb", os.environ['PGDATABASE']]) + subprocess.check_call(["createdb", os.environ['PGDATABASE']]) from sphinxcontrib.versioning import sphinx_ version = sphinx_.EventHandlers.CURRENT_VERSION with tempfile.NamedTemporaryFile() as f: f.write(multiversion_hack % app.confdir) f.flush() - subprocess.call(['python', f.name, version]) + subprocess.check_call(['python', f.name, version]) _RUN = True return -- GitLab From fce72971ad353549b9d4762b677a9d5112824e7a Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 12 Dec 2017 11:10:59 +0100 Subject: [PATCH 1141/1483] doc: reenable gnocchi.xyz job --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 90c80dc7..84f554f7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,7 @@ cache: env: - TARGET: pep8 - TARGET: docs -# - TARGET: docs-gnocchi.xyz + - TARGET: docs-gnocchi.xyz - TARGET: py27-mysql-ceph-upgrade-from-3.1 - TARGET: py35-postgresql-file-upgrade-from-3.1 -- GitLab From ca872ddba4c7c3dd9e354d18e557bef6e0256b9b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 11 Dec 2017 16:18:30 +0100 Subject: [PATCH 1142/1483] rest: do not catch too wide exception The only user-related problem that can be raised here is InvalidPagination. Any other indexer exception would be a programming error, and should raise a 500 (and should be fixed). Do not convert blindly all IndexerException as it was the user fault. --- gnocchi/rest/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 8238b470..03feb0e4 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -682,7 +682,7 @@ class MetricsController(rest.RestController): if metrics and len(metrics) >= pagination_opts['limit']: set_resp_link_hdr(str(metrics[-1].id), kwargs, pagination_opts) return metrics - except indexer.IndexerException as e: + except indexer.InvalidPagination as e: abort(400, six.text_type(e)) -- GitLab From c5466b7b01569d035377a124392e0dc3367e8ded Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 11 Dec 2017 22:30:21 +0000 Subject: [PATCH 1143/1483] append once when merging this isn't that much faster based on benchmark but it's less work. --- gnocchi/carbonara.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 4dbc697d..03174534 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -97,10 +97,9 @@ def combine_timeseries(ts1, ts2): :param ts: The timeseries to combine. :return: A new timeseries. """ - _, index = numpy.unique( - numpy.append(ts1['timestamps'], ts2['timestamps']), - return_index=True) - return numpy.append(ts1, ts2)[index] + ts = numpy.append(ts1, ts2) + _, index = numpy.unique(ts['timestamps'], return_index=True) + return ts[index] class GroupedTimeSeries(object): -- GitLab From a3b91f6e0b7961504fb1d76b385b90b9655a860e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 12 Dec 2017 14:23:57 +0100 Subject: [PATCH 1144/1483] prometheus: no need to sanitize / / is already forbidden in Prometheus, no need to sanitize them. --- doc/source/prometheus.rst | 4 ---- gnocchi/rest/api.py | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/doc/source/prometheus.rst b/doc/source/prometheus.rst index df4c3262..1c42949a 100644 --- a/doc/source/prometheus.rst +++ b/doc/source/prometheus.rst @@ -25,9 +25,5 @@ based on each `job` and `instance` pair. This resource is created with the The metrics sent by Prometheus with this pair are attached to that resource and filled with the provided measures. -.. note:: - - `/` is forbidden in Gnocchi metric name, they are replaced by `_` - .. _`Prometheus`: https://prometheus.io/ .. _`Remote Write Adapter`: https://prometheus.io/docs/operating/configuration/# diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 03feb0e4..2263c160 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -2022,7 +2022,7 @@ class PrometheusWriteController(rest.RestController): for ts in f.timeseries: attrs = dict((l.name, l.value) for l in ts.labels) original_rid = (attrs["job"], attrs["instance"]) - name = attrs['__name__'].replace('/', '_') + name = attrs['__name__'] if ts.samples: measures_by_rid[original_rid][name] = ( MeasuresListSchema([{'timestamp': s.timestamp_ms / 1000.0, -- GitLab From adfe32ec7a466d8f03fa398a2cd880f45d1f3be1 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 12 Dec 2017 14:29:19 +0100 Subject: [PATCH 1145/1483] Prometheus: job and instance are not mandatory job and instance are not strictly mandatory from the protocol point of view, even is Prometheus always set it. --- gnocchi/rest/api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 2263c160..f8b1850e 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -2021,7 +2021,8 @@ class PrometheusWriteController(rest.RestController): measures_by_rid = collections.defaultdict(dict) for ts in f.timeseries: attrs = dict((l.name, l.value) for l in ts.labels) - original_rid = (attrs["job"], attrs["instance"]) + original_rid = (attrs.get("job", "none"), + attrs.get("instance", "none")) name = attrs['__name__'] if ts.samples: measures_by_rid[original_rid][name] = ( -- GitLab From 1e78543959b10ddc024cc91fe964472c1f0a4224 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 12 Dec 2017 16:14:20 +0100 Subject: [PATCH 1146/1483] file: do not write the unaggregated time serie atomically This has some cost which seems unneeded anymore. When this file is used, the metric is locked by its sack, so there is no concurrent access to this file. We can just write it at once. --- gnocchi/storage/file.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 697fffff..375338fc 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -89,9 +89,9 @@ class FileStorage(storage.StorageDriver): raise def _store_unaggregated_timeserie(self, metric, data, version=3): - self._atomic_file_store( - self._build_unaggregated_timeserie_path(metric, version), - data) + dest = self._build_unaggregated_timeserie_path(metric, version) + with open(dest, "wb") as f: + f.write(data) def _get_unaggregated_timeserie(self, metric, version=3): path = self._build_unaggregated_timeserie_path(metric, version) -- GitLab From 259b5c5e45713a7ae55af7d5393841f56c6be36f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 12 Dec 2017 11:39:42 +0100 Subject: [PATCH 1147/1483] incoming: only know about metric id The current code passes Metric objects to the incoming subsystem, but it actually knows only about metric ids. That's why it only uses metric.id everywhere. Let's simplify that and make the incoming subsystem completely agnostic with the representation model of the metric outside of it. --- gnocchi/incoming/__init__.py | 18 ++++----- gnocchi/incoming/ceph.py | 20 +++++----- gnocchi/incoming/file.py | 36 +++++++++--------- gnocchi/incoming/redis.py | 18 ++++----- gnocchi/incoming/s3.py | 24 ++++++------ gnocchi/incoming/swift.py | 24 ++++++------ gnocchi/rest/api.py | 12 +++--- gnocchi/rest/influxdb.py | 2 +- gnocchi/statsd.py | 2 +- gnocchi/storage/__init__.py | 4 +- gnocchi/tests/test_aggregates.py | 56 +++++++++++++-------------- gnocchi/tests/test_incoming.py | 2 +- gnocchi/tests/test_storage.py | 65 ++++++++++++++++---------------- 13 files changed, 140 insertions(+), 143 deletions(-) diff --git a/gnocchi/incoming/__init__.py b/gnocchi/incoming/__init__.py index b972aa25..4624b59c 100644 --- a/gnocchi/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -107,13 +107,13 @@ class IncomingDriver(object): return numpy.array(list(measures), dtype=TIMESERIES_ARRAY_DTYPE).tobytes() - def add_measures(self, metric, measures): + def add_measures(self, metric_id, measures): """Add a measure to a metric. - :param metric: The metric measured. + :param metric_id: The metric measured. :param measures: The actual measures. """ - self.add_measures_batch({metric: measures}) + self.add_measures_batch({metric_id: measures}) def add_measures_batch(self, metrics_and_measures): """Add a batch of measures for some metrics. @@ -124,12 +124,12 @@ class IncomingDriver(object): """ utils.parallel_map( self._store_new_measures, - ((metric, self._encode_measures(measures)) - for metric, measures + ((metric_id, self._encode_measures(measures)) + for metric_id, measures in six.iteritems(metrics_and_measures))) @staticmethod - def _store_new_measures(metric, data): + def _store_new_measures(metric_id, data): raise exceptions.NotImplementedError def measures_report(self, details=True): @@ -155,15 +155,15 @@ class IncomingDriver(object): raise exceptions.NotImplementedError @staticmethod - def delete_unprocessed_measures_for_metric_id(metric_id): + def delete_unprocessed_measures_for_metric(metric_id): raise exceptions.NotImplementedError @staticmethod - def process_measure_for_metric(metric): + def process_measure_for_metric(metric_id): raise exceptions.NotImplementedError @staticmethod - def has_unprocessed(metric): + def has_unprocessed(metric_id): raise exceptions.NotImplementedError def sack_for_metric(self, metric_id): diff --git a/gnocchi/incoming/ceph.py b/gnocchi/incoming/ceph.py index 8e5b588d..f1bb1afa 100644 --- a/gnocchi/incoming/ceph.py +++ b/gnocchi/incoming/ceph.py @@ -80,13 +80,13 @@ class CephStorage(incoming.IncomingDriver): def add_measures_batch(self, metrics_and_measures): data_by_sack = defaultdict(lambda: defaultdict(list)) - for metric, measures in six.iteritems(metrics_and_measures): + for metric_id, measures in six.iteritems(metrics_and_measures): name = "_".join(( self.MEASURE_PREFIX, - str(metric.id), + str(metric_id), str(uuid.uuid4()), datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S"))) - sack = self.get_sack_name(self.sack_for_metric(metric.id)) + sack = self.get_sack_name(self.sack_for_metric(metric_id)) data_by_sack[sack]['names'].append(name) data_by_sack[sack]['measures'].append( self._encode_measures(measures)) @@ -168,7 +168,7 @@ class CephStorage(incoming.IncomingDriver): marker = obj_names[-1] return names - def delete_unprocessed_measures_for_metric_id(self, metric_id): + def delete_unprocessed_measures_for_metric(self, metric_id): sack = self.sack_for_metric(metric_id) key_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) keys = tuple(self._list_keys_to_process(sack, key_prefix)) @@ -184,15 +184,15 @@ class CephStorage(incoming.IncomingDriver): self.ioctx.operate_write_op(op, self.get_sack_name(sack), flags=self.OMAP_WRITE_FLAGS) - def has_unprocessed(self, metric): - sack = self.sack_for_metric(metric.id) - object_prefix = self.MEASURE_PREFIX + "_" + str(metric.id) + def has_unprocessed(self, metric_id): + sack = self.sack_for_metric(metric_id) + object_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) return bool(self._list_keys_to_process(sack, object_prefix)) @contextlib.contextmanager - def process_measure_for_metric(self, metric): - sack = self.sack_for_metric(metric.id) - key_prefix = self.MEASURE_PREFIX + "_" + str(metric.id) + def process_measure_for_metric(self, metric_id): + sack = self.sack_for_metric(metric_id) + key_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) processed_keys = [] with rados.ReadOpCtx() as op: diff --git a/gnocchi/incoming/file.py b/gnocchi/incoming/file.py index 2e7afa3b..4dafd5dc 100644 --- a/gnocchi/incoming/file.py +++ b/gnocchi/incoming/file.py @@ -73,13 +73,13 @@ class FileStorage(incoming.IncomingDriver): return os.path.join(path, random_id) return path - def _store_new_measures(self, metric, data): + def _store_new_measures(self, metric_id, data): tmpfile = tempfile.NamedTemporaryFile( prefix='gnocchi', dir=self.basepath_tmp, delete=False) tmpfile.write(data) tmpfile.close() - path = self._build_measure_path(metric.id, True) + path = self._build_measure_path(metric_id, True) while True: try: os.rename(tmpfile.name, path) @@ -88,7 +88,7 @@ class FileStorage(incoming.IncomingDriver): if e.errno != errno.ENOENT: raise try: - os.mkdir(self._build_measure_path(metric.id)) + os.mkdir(self._build_measure_path(metric_id)) except OSError as e: # NOTE(jd) It's possible that another process created the # path just before us! In this case, good for us, let's do @@ -101,14 +101,12 @@ class FileStorage(incoming.IncomingDriver): if details: def build_metric_report(metric, sack): report_vars['metric_details'][metric] = len( - self._list_measures_container_for_metric_id_str(sack, - metric)) + self._list_measures_container_for_metric_str(sack, metric)) else: def build_metric_report(metric, sack): report_vars['metrics'] += 1 report_vars['measures'] += len( - self._list_measures_container_for_metric_id_str(sack, - metric)) + self._list_measures_container_for_metric_str(sack, metric)) for i in six.moves.range(self.NUM_SACKS): for metric in self.list_metric_with_measures_to_process(i): @@ -122,10 +120,10 @@ class FileStorage(incoming.IncomingDriver): def list_metric_with_measures_to_process(self, sack): return set(self._list_target(self._sack_path(sack))) - def _list_measures_container_for_metric_id_str(self, sack, metric_id): + def _list_measures_container_for_metric_str(self, sack, metric_id): return self._list_target(self._measure_path(sack, metric_id)) - def _list_measures_container_for_metric_id(self, metric_id): + def _list_measures_container_for_metric(self, metric_id): return self._list_target(self._build_measure_path(metric_id)) @staticmethod @@ -138,7 +136,7 @@ class FileStorage(incoming.IncomingDriver): return [] raise - def _delete_measures_files_for_metric_id(self, metric_id, files): + def _delete_measures_files_for_metric(self, metric_id, files): for f in files: try: os.unlink(self._build_measure_path(metric_id, f)) @@ -157,23 +155,23 @@ class FileStorage(incoming.IncomingDriver): if e.errno not in (errno.ENOENT, errno.ENOTEMPTY, errno.EEXIST): raise - def delete_unprocessed_measures_for_metric_id(self, metric_id): - files = self._list_measures_container_for_metric_id(metric_id) - self._delete_measures_files_for_metric_id(metric_id, files) + def delete_unprocessed_measures_for_metric(self, metric_id): + files = self._list_measures_container_for_metric(metric_id) + self._delete_measures_files_for_metric(metric_id, files) - def has_unprocessed(self, metric): - return os.path.isdir(self._build_measure_path(metric.id)) + def has_unprocessed(self, metric_id): + return os.path.isdir(self._build_measure_path(metric_id)) @contextlib.contextmanager - def process_measure_for_metric(self, metric): - files = self._list_measures_container_for_metric_id(metric.id) + def process_measure_for_metric(self, metric_id): + files = self._list_measures_container_for_metric(metric_id) measures = self._make_measures_array() for f in files: - abspath = self._build_measure_path(metric.id, f) + abspath = self._build_measure_path(metric_id, f) with open(abspath, "rb") as e: measures = numpy.append( measures, self._unserialize_measures(f, e.read())) yield measures - self._delete_measures_files_for_metric_id(metric.id, files) + self._delete_measures_files_for_metric(metric_id, files) diff --git a/gnocchi/incoming/redis.py b/gnocchi/incoming/redis.py index 55e2f9f7..c1684684 100644 --- a/gnocchi/incoming/redis.py +++ b/gnocchi/incoming/redis.py @@ -52,9 +52,9 @@ class RedisStorage(incoming.IncomingDriver): def add_measures_batch(self, metrics_and_measures): notified_sacks = set() pipe = self._client.pipeline(transaction=False) - for metric, measures in six.iteritems(metrics_and_measures): - sack_name = self.get_sack_name(self.sack_for_metric(metric.id)) - path = self._build_measure_path_with_sack(metric.id, sack_name) + for metric_id, measures in six.iteritems(metrics_and_measures): + sack_name = self.get_sack_name(self.sack_for_metric(metric_id)) + path = self._build_measure_path_with_sack(metric_id, sack_name) pipe.rpush(path, self._encode_measures(measures)) if self.greedy and sack_name not in notified_sacks: # value has no meaning, we just use this for notification @@ -97,21 +97,21 @@ class RedisStorage(incoming.IncomingDriver): keys = self._client.scan_iter(match=match, count=1000) return set([k.split(redis.SEP)[1].decode("utf8") for k in keys]) - def delete_unprocessed_measures_for_metric_id(self, metric_id): + def delete_unprocessed_measures_for_metric(self, metric_id): self._client.delete(self._build_measure_path(metric_id)) - def has_unprocessed(self, metric): - return bool(self._client.exists(self._build_measure_path(metric.id))) + def has_unprocessed(self, metric_id): + return bool(self._client.exists(self._build_measure_path(metric_id))) @contextlib.contextmanager - def process_measure_for_metric(self, metric): - key = self._build_measure_path(metric.id) + def process_measure_for_metric(self, metric_id): + key = self._build_measure_path(metric_id) item_len = self._client.llen(key) # lrange is inclusive on both ends, decrease to grab exactly n items item_len = item_len - 1 if item_len else item_len yield self._array_concatenate([ - self._unserialize_measures('%s-%s' % (metric.id, i), data) + self._unserialize_measures('%s-%s' % (metric_id, i), data) for i, data in enumerate(self._client.lrange(key, 0, item_len)) ]) diff --git a/gnocchi/incoming/s3.py b/gnocchi/incoming/s3.py index 54c3b49f..b687582b 100644 --- a/gnocchi/incoming/s3.py +++ b/gnocchi/incoming/s3.py @@ -76,12 +76,12 @@ class S3Storage(incoming.IncomingDriver): # need to create bucket first to store storage settings object super(S3Storage, self).upgrade(num_sacks) - def _store_new_measures(self, metric, data): + def _store_new_measures(self, metric_id, data): now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") self.s3.put_object( Bucket=self._bucket_name_measures, - Key=(self.get_sack_name(self.sack_for_metric(metric.id)) - + six.text_type(metric.id) + "/" + Key=(self.get_sack_name(self.sack_for_metric(metric_id)) + + six.text_type(metric_id) + "/" + six.text_type(uuid.uuid4()) + now), Body=data) @@ -127,7 +127,7 @@ class S3Storage(incoming.IncomingDriver): metrics.add(p['Prefix'].split('/', 2)[1]) return metrics - def _list_measure_files_for_metric_id(self, sack, metric_id): + def _list_measure_files_for_metric(self, sack, metric_id): files = set() response = {} while response.get('IsTruncated', True): @@ -148,19 +148,19 @@ class S3Storage(incoming.IncomingDriver): return files - def delete_unprocessed_measures_for_metric_id(self, metric_id): + def delete_unprocessed_measures_for_metric(self, metric_id): sack = self.sack_for_metric(metric_id) - files = self._list_measure_files_for_metric_id(sack, metric_id) + files = self._list_measure_files_for_metric(sack, metric_id) s3.bulk_delete(self.s3, self._bucket_name_measures, files) - def has_unprocessed(self, metric): - sack = self.sack_for_metric(metric.id) - return bool(self._list_measure_files_for_metric_id(sack, metric.id)) + def has_unprocessed(self, metric_id): + sack = self.sack_for_metric(metric_id) + return bool(self._list_measure_files_for_metric(sack, metric_id)) @contextlib.contextmanager - def process_measure_for_metric(self, metric): - sack = self.sack_for_metric(metric.id) - files = self._list_measure_files_for_metric_id(sack, metric.id) + def process_measure_for_metric(self, metric_id): + sack = self.sack_for_metric(metric_id) + files = self._list_measure_files_for_metric(sack, metric_id) measures = self._make_measures_array() for f in files: diff --git a/gnocchi/incoming/swift.py b/gnocchi/incoming/swift.py index 8358ad4a..66681a25 100644 --- a/gnocchi/incoming/swift.py +++ b/gnocchi/incoming/swift.py @@ -50,11 +50,11 @@ class SwiftStorage(incoming.IncomingDriver): for i in six.moves.xrange(num_sacks): self.swift.delete_container(prefix % i) - def _store_new_measures(self, metric, data): + def _store_new_measures(self, metric_id, data): now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") self.swift.put_object( - self.get_sack_name(self.sack_for_metric(metric.id)), - six.text_type(metric.id) + "/" + six.text_type(uuid.uuid4()) + now, + self.get_sack_name(self.sack_for_metric(metric_id)), + six.text_type(metric_id) + "/" + six.text_type(uuid.uuid4()) + now, data) def _build_report(self, details): @@ -81,26 +81,26 @@ class SwiftStorage(incoming.IncomingDriver): self.get_sack_name(sack), delimiter='/', full_listing=True) return set(f['subdir'][:-1] for f in files if 'subdir' in f) - def _list_measure_files_for_metric_id(self, sack, metric_id): + def _list_measure_files_for_metric(self, sack, metric_id): headers, files = self.swift.get_container( self.get_sack_name(sack), path=six.text_type(metric_id), full_listing=True) return files - def delete_unprocessed_measures_for_metric_id(self, metric_id): + def delete_unprocessed_measures_for_metric(self, metric_id): sack = self.sack_for_metric(metric_id) - files = self._list_measure_files_for_metric_id(sack, metric_id) + files = self._list_measure_files_for_metric(sack, metric_id) swift.bulk_delete(self.swift, self.get_sack_name(sack), files) - def has_unprocessed(self, metric): - sack = self.sack_for_metric(metric.id) - return bool(self._list_measure_files_for_metric_id(sack, metric.id)) + def has_unprocessed(self, metric_id): + sack = self.sack_for_metric(metric_id) + return bool(self._list_measure_files_for_metric(sack, metric_id)) @contextlib.contextmanager - def process_measure_for_metric(self, metric): - sack = self.sack_for_metric(metric.id) + def process_measure_for_metric(self, metric_id): + sack = self.sack_for_metric(metric_id) sack_name = self.get_sack_name(sack) - files = self._list_measure_files_for_metric_id(sack, metric.id) + files = self._list_measure_files_for_metric(sack, metric_id) yield self._array_concatenate([ self._unserialize_measures( diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index f8b1850e..d3742a4f 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -457,7 +457,7 @@ class MetricController(rest.RestController): abort(400, "Invalid input for measures") if params: pecan.request.incoming.add_measures( - self.metric, MeasuresListSchema(params)) + self.metric.id, MeasuresListSchema(params)) pecan.response.status = 202 @pecan.expose('json') @@ -501,7 +501,7 @@ class MetricController(rest.RestController): abort(400, six.text_type(e)) if (strtobool("refresh", refresh) and - pecan.request.incoming.has_unprocessed(self.metric)): + pecan.request.incoming.has_unprocessed(self.metric.id)): try: pecan.request.storage.refresh_metric( pecan.request.indexer, pecan.request.incoming, self.metric, @@ -1596,7 +1596,7 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): enforce("post measures", metric) pecan.request.incoming.add_measures_batch( - dict((metric, + dict((metric.id, body_by_rid[metric.resource_id][metric.name]) for metric in known_metrics)) @@ -1629,7 +1629,7 @@ class MetricsMeasuresBatchController(rest.RestController): enforce("post measures", metric) pecan.request.incoming.add_measures_batch( - dict((metric, body[metric.id]) for metric in + dict((metric.id, body[metric.id]) for metric in metrics)) pecan.response.status = 202 @@ -1815,7 +1815,7 @@ class AggregationController(rest.RestController): if strtobool("refresh", refresh): metrics_to_update = [ m for m in metrics - if pecan.request.incoming.has_unprocessed(m)] + if pecan.request.incoming.has_unprocessed(m.id)] for m in metrics_to_update: try: pecan.request.storage.refresh_metric( @@ -2048,7 +2048,7 @@ class PrometheusWriteController(rest.RestController): enforce("post measures", metric) measures_to_batch.update( - dict((metric, measures[metric.name]) for metric in + dict((metric.id, measures[metric.name]) for metric in metrics if metric.name in measures)) pecan.request.incoming.add_measures_batch(measures_to_batch) diff --git a/gnocchi/rest/influxdb.py b/gnocchi/rest/influxdb.py index 09e574d9..22130e2e 100644 --- a/gnocchi/rest/influxdb.py +++ b/gnocchi/rest/influxdb.py @@ -245,7 +245,7 @@ class InfluxDBController(rest.RestController): api.enforce("post measures", metric) measures_to_batch.update( - dict((metric, metrics_and_measures[metric.name]) + dict((metric.id, metrics_and_measures[metric.name]) for metric in metrics if metric.name in metrics_and_measures)) diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py index 02f9888c..95bb3cc8 100644 --- a/gnocchi/statsd.py +++ b/gnocchi/statsd.py @@ -113,7 +113,7 @@ class Stats(object): name=metric_name, resource_id=self.conf.statsd.resource_id) self.metrics[metric_name] = metric - self.incoming.add_measures(metric, (measure,)) + self.incoming.add_measures(metric.id, (measure,)) except Exception as e: LOG.error("Unable to add measure %s: %s", metric_name, e) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 67f68a20..74ac6ab3 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -403,7 +403,7 @@ class StorageDriver(object): # is going to process it anymore. lock.release() self._delete_metric(metric) - incoming.delete_unprocessed_measures_for_metric_id(metric.id) + incoming.delete_unprocessed_measures_for_metric(metric.id) LOG.debug("Deleted metric %s", metric) @staticmethod @@ -464,7 +464,7 @@ class StorageDriver(object): # NOTE(gordc): must lock at sack level try: LOG.debug("Processing measures for %s", metric) - with incoming.process_measure_for_metric(metric) \ + with incoming.process_measure_for_metric(metric.id) \ as measures: self._compute_and_store_timeseries(metric, measures) LOG.debug("Measures for metric %s processed", metric) diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 87bf03c8..117ceb61 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -931,13 +931,13 @@ class CrossMetricAggregated(base.TestCase): def test_get_measures_unknown_aggregation(self): metric2 = indexer.Metric(uuid.uuid4(), self.archive_policies['low']) - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) - self.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), @@ -957,13 +957,13 @@ class CrossMetricAggregated(base.TestCase): def test_get_measures_unknown_granularity(self): metric2 = indexer.Metric(uuid.uuid4(), self.archive_policies['low']) - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) - self.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), @@ -984,13 +984,13 @@ class CrossMetricAggregated(base.TestCase): def test_add_and_get_measures_different_archives(self): metric2 = indexer.Metric(uuid.uuid4(), self.archive_policies['no_granularity_match']) - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) - self.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), @@ -1010,13 +1010,13 @@ class CrossMetricAggregated(base.TestCase): def test_add_and_get_measures(self): metric2, __ = self._create_metric() - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) - self.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 41), 2), incoming.Measure(datetime64(2014, 1, 1, 12, 10, 31), 4), @@ -1168,14 +1168,14 @@ class CrossMetricAggregated(base.TestCase): def test_add_and_get_measures_with_holes(self): metric2, __ = self._create_metric() - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 5, 31), 8), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 42), ]) - self.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 2), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 6), @@ -1207,13 +1207,13 @@ class CrossMetricAggregated(base.TestCase): def test_resample(self): metric2, __ = self._create_metric() - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), ]) - self.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), 2), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), @@ -1244,13 +1244,13 @@ class CrossMetricAggregated(base.TestCase): def test_resample_minus_2_on_right(self): metric2, __ = self._create_metric() - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), ]) - self.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), 2), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), @@ -1281,13 +1281,13 @@ class CrossMetricAggregated(base.TestCase): def test_resample_minus_2_on_left(self): metric2, __ = self._create_metric() - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), ]) - self.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), 2), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), @@ -1320,13 +1320,13 @@ class CrossMetricAggregated(base.TestCase): def test_rolling(self): metric2, __ = self._create_metric() - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 5, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 10, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 15, 45), 44), ]) - self.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 12, 5, 41), 2), incoming.Measure(datetime64(2014, 1, 1, 12, 10, 31), 4), @@ -1364,13 +1364,13 @@ class CrossMetricAggregated(base.TestCase): def test_binary_operator_with_two_references(self): metric2, __ = self._create_metric() - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), ]) - self.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), 2), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), @@ -1399,7 +1399,7 @@ class CrossMetricAggregated(base.TestCase): def test_binary_operator_ts_on_left(self): metric2, __ = self._create_metric() - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), @@ -1426,7 +1426,7 @@ class CrossMetricAggregated(base.TestCase): def test_binary_operator_ts_on_right(self): metric2, __ = self._create_metric() - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), @@ -1452,13 +1452,13 @@ class CrossMetricAggregated(base.TestCase): def test_mix(self): metric2, __ = self._create_metric() - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), ]) - self.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), 2), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), @@ -1490,13 +1490,13 @@ class CrossMetricAggregated(base.TestCase): def test_bool(self): metric2, __ = self._create_metric() - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), ]) - self.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), 2), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), @@ -1536,13 +1536,13 @@ class CrossMetricAggregated(base.TestCase): def test_unary_operator(self): metric2, _ = self._create_metric() - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), -69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), -4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), ]) - self.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), -9), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), -2), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), diff --git a/gnocchi/tests/test_incoming.py b/gnocchi/tests/test_incoming.py index 7c5d0816..d67c0fac 100644 --- a/gnocchi/tests/test_incoming.py +++ b/gnocchi/tests/test_incoming.py @@ -59,7 +59,7 @@ class TestIncomingDriver(tests_base.TestCase): # NOTE(jd) Retry to send measures. It cannot be done only once as # there might be a race condition between the threads self.incoming.finish_sack_processing(sack_to_find) - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(numpy.datetime64("2014-01-01 12:00:01"), 69), ]) else: diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 41d55254..ba1a0710 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -66,12 +66,12 @@ class TestStorageDriver(tests_base.TestCase): self.assertIsInstance(driver, storage.StorageDriver) def test_corrupted_data(self): - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 13, 0, 1), 1), ]) @@ -90,7 +90,7 @@ class TestStorageDriver(tests_base.TestCase): numpy.timedelta64(5, 'm'), 1), m) def test_aborted_initial_processing(self): - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 5), ]) with mock.patch.object(self.storage, '_store_unaggregated_timeserie', @@ -115,7 +115,7 @@ class TestStorageDriver(tests_base.TestCase): def test_list_metric_with_measures_to_process(self): metrics = tests_utils.list_all_incoming_metrics(self.incoming) self.assertEqual(set(), metrics) - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) metrics = tests_utils.list_all_incoming_metrics(self.incoming) @@ -125,7 +125,7 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual(set([]), metrics) def test_delete_nonempty_metric(self): - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() @@ -137,7 +137,7 @@ class TestStorageDriver(tests_base.TestCase): self.metric) def test_delete_nonempty_metric_unprocessed(self): - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) self.index.delete_metric(self.metric.id) @@ -149,7 +149,7 @@ class TestStorageDriver(tests_base.TestCase): self.assertNotIn(str(self.metric.id), details) def test_delete_expunge_metric(self): - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() @@ -176,10 +176,10 @@ class TestStorageDriver(tests_base.TestCase): def test_measures_reporting(self): m2, __ = self._create_metric('medium') for i in six.moves.range(60): - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, i), 69), ]) - self.incoming.add_measures(m2, [ + self.incoming.add_measures(m2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, i), 69), ]) report = self.incoming.measures_report(True) @@ -195,7 +195,7 @@ class TestStorageDriver(tests_base.TestCase): def test_add_measures_big(self): m, __ = self._create_metric('high') - self.incoming.add_measures(m, [ + self.incoming.add_measures(m.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, i, j), 100) for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) self.trigger_processing([str(m.id)]) @@ -208,11 +208,11 @@ class TestStorageDriver(tests_base.TestCase): measures = [ incoming.Measure(datetime64(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] - self.incoming.add_measures(m, measures) + self.incoming.add_measures(m.id, measures) self.trigger_processing([str(m.id)]) # add measure to end, in same aggregate time as last point. - self.incoming.add_measures(m, [ + self.incoming.add_measures(m.id, [ incoming.Measure(datetime64(2014, 1, 6, 1, 58, 1), 100)]) with mock.patch.object(self.storage, '_store_metric_measures') as c: @@ -233,13 +233,12 @@ class TestStorageDriver(tests_base.TestCase): measures = [ incoming.Measure(datetime64(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] - self.incoming.add_measures(m, measures) + self.incoming.add_measures(m.id, measures) self.trigger_processing([str(m.id)]) # add measure to end, in same aggregate time as last point. new_point = datetime64(2014, 1, 6, 1, 58, 1) - self.incoming.add_measures( - m, [incoming.Measure(new_point, 100)]) + self.incoming.add_measures(m.id, [incoming.Measure(new_point, 100)]) with mock.patch.object(self.incoming, 'add_measures') as c: self.trigger_processing([str(m.id)]) @@ -249,7 +248,7 @@ class TestStorageDriver(tests_base.TestCase): new_point, args[1].granularity * 10e8)) def test_delete_old_measures(self): - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), @@ -266,7 +265,7 @@ class TestStorageDriver(tests_base.TestCase): ], self.storage.get_measures(self.metric)) # One year later… - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2015, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() @@ -305,7 +304,7 @@ class TestStorageDriver(tests_base.TestCase): apname) # First store some points scattered across different splits - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2016, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2016, 1, 2, 13, 7, 31), 42), incoming.Measure(datetime64(2016, 1, 4, 14, 9, 31), 4), @@ -360,7 +359,7 @@ class TestStorageDriver(tests_base.TestCase): # split (keep in mind the back window size in one hour here). We move # the BoundTimeSerie processing timeserie far away from its current # range. - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2016, 1, 10, 16, 18, 45), 45), incoming.Measure(datetime64(2016, 1, 10, 17, 12, 45), 46), ]) @@ -426,7 +425,7 @@ class TestStorageDriver(tests_base.TestCase): apname) # First store some points scattered across different splits - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2016, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2016, 1, 2, 13, 7, 31), 42), incoming.Measure(datetime64(2016, 1, 4, 14, 9, 31), 4), @@ -484,7 +483,7 @@ class TestStorageDriver(tests_base.TestCase): # Here we test a special case where the oldest_mutable_timestamp will # be 2016-01-10TOO:OO:OO = 1452384000.0, our new split key. - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2016, 1, 10, 0, 12), 45), ]) self.trigger_processing() @@ -547,7 +546,7 @@ class TestStorageDriver(tests_base.TestCase): apname) # First store some points scattered across different splits - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2016, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2016, 1, 2, 13, 7, 31), 42), incoming.Measure(datetime64(2016, 1, 4, 14, 9, 31), 4), @@ -615,7 +614,7 @@ class TestStorageDriver(tests_base.TestCase): # split (keep in mind the back window size in one hour here). We move # the BoundTimeSerie processing timeserie far away from its current # range. - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2016, 1, 10, 16, 18, 45), 45), incoming.Measure(datetime64(2016, 1, 10, 17, 12, 45), 46), ]) @@ -632,7 +631,7 @@ class TestStorageDriver(tests_base.TestCase): apname) # First store some points scattered across different splits - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2016, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2016, 1, 2, 13, 7, 31), 42), incoming.Measure(datetime64(2016, 1, 4, 14, 9, 31), 4), @@ -695,14 +694,14 @@ class TestStorageDriver(tests_base.TestCase): # split (keep in mind the back window size in one hour here). We move # the BoundTimeSerie processing timeserie far away from its current # range. - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2016, 1, 10, 16, 18, 45), 45), incoming.Measure(datetime64(2016, 1, 10, 17, 12, 45), 46), ]) self.trigger_processing() def test_updated_measures(self): - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), ]) @@ -715,7 +714,7 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 42.0), ], self.storage.get_measures(self.metric)) - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) @@ -746,7 +745,7 @@ class TestStorageDriver(tests_base.TestCase): ], self.storage.get_measures(self.metric, aggregation='min')) def test_add_and_get_measures(self): - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), @@ -828,7 +827,7 @@ class TestStorageDriver(tests_base.TestCase): granularity=numpy.timedelta64(42, 's')) def test_get_measure_unknown_aggregation(self): - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), @@ -840,7 +839,7 @@ class TestStorageDriver(tests_base.TestCase): def test_search_value(self): metric2, __ = self._create_metric() - self.incoming.add_measures(self.metric, [ + self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1,), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 5, 31), 8), @@ -848,7 +847,7 @@ class TestStorageDriver(tests_base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 42), ]) - self.incoming.add_measures(metric2, [ + self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 2), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 6), @@ -885,7 +884,7 @@ class TestStorageDriver(tests_base.TestCase): self.index.create_archive_policy(ap) m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()), name) m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0] - self.incoming.add_measures(m, [ + self.incoming.add_measures(m.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 0), 1), incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 1), incoming.Measure(datetime64(2014, 1, 1, 12, 0, 10), 1), @@ -900,7 +899,7 @@ class TestStorageDriver(tests_base.TestCase): self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)]) m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0] - self.incoming.add_measures(m, [ + self.incoming.add_measures(m.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 15), 1), ]) self.trigger_processing([str(m.id)]) -- GitLab From 30d865e2b2c94ebf7884927bf9c2d849d29eed91 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 12 Dec 2017 16:10:53 +0100 Subject: [PATCH 1148/1483] file: move directory creation in the upgrade path, not init --- gnocchi/storage/file.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 375338fc..1f7a0bba 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -39,6 +39,8 @@ class FileStorage(storage.StorageDriver): super(FileStorage, self).__init__(conf, coord) self.basepath = conf.file_basepath self.basepath_tmp = os.path.join(self.basepath, 'tmp') + + def upgrade(self): utils.ensure_paths([self.basepath_tmp]) def __str__(self): -- GitLab From 572f015db93c931a155a173f59c184803ac7d2d6 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 13 Dec 2017 15:29:50 +0100 Subject: [PATCH 1149/1483] storage: remove double check of granularity The storage engine check twice that the granularity matches. Do it once only. --- gnocchi/storage/__init__.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 74ac6ab3..b60cc901 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -241,18 +241,14 @@ class StorageDriver(object): else: raise GranularityDoesNotExist(metric, granularity) - all_keys = None try: all_keys = self._list_split_keys_for_metric( metric, aggregation, granularity) except MetricDoesNotExist: - for d in metric.archive_policy.definition: - if d.granularity == granularity: - return carbonara.AggregatedTimeSerie( - sampling=granularity, - aggregation_method=aggregation, - max_size=d.points) - raise GranularityDoesNotExist(metric, granularity) + return carbonara.AggregatedTimeSerie( + sampling=granularity, + aggregation_method=aggregation, + max_size=points) if from_timestamp: from_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( -- GitLab From 965f908718dd4045337b60983ec3e4879a0c8f6e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 13 Dec 2017 15:48:53 +0100 Subject: [PATCH 1150/1483] storage: merge GranularityDoesNotExist and AggregationDoesNotExist We often don't have enough information to distinguish the two and it does not matter in the end. Simplify the code base a bit by merging the two without losing information. --- gnocchi/rest/aggregates/processor.py | 8 +++-- gnocchi/rest/api.py | 6 ++-- gnocchi/storage/__init__.py | 36 ++++++++++--------- gnocchi/storage/ceph.py | 3 +- gnocchi/storage/file.py | 3 +- gnocchi/storage/redis.py | 3 +- gnocchi/storage/s3.py | 3 +- gnocchi/storage/swift.py | 3 +- .../tests/functional/gabbits/aggregation.yaml | 2 +- .../gabbits/metric-granularity.yaml | 2 +- gnocchi/tests/functional/gabbits/metric.yaml | 2 +- .../functional/gabbits/search-metric.yaml | 17 ++++++--- gnocchi/tests/test_storage.py | 2 +- 13 files changed, 55 insertions(+), 35 deletions(-) diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index e48eff14..3be1396b 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -75,8 +75,12 @@ def get_measures(storage, references, operations, for ref in references: if (ref.aggregation not in ref.metric.archive_policy.aggregation_methods): - raise gnocchi_storage.AggregationDoesNotExist(ref.metric, - ref.aggregation) + raise gnocchi_storage.AggregationDoesNotExist( + ref.metric, ref.aggregation, + # Use the first granularity, that should be good enough since + # they are all missing anyway + ref.metric.archive_policy.definition[0].granularity) + if granularity is not None: for d in ref.metric.archive_policy.definition: if d.granularity == granularity: diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index d3742a4f..d7ea6d3b 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -513,7 +513,6 @@ class MetricController(rest.RestController): self.metric, start, stop, aggregation, granularity, resample) except (storage.MetricDoesNotExist, - storage.GranularityDoesNotExist, storage.AggregationDoesNotExist) as e: abort(404, six.text_type(e)) @@ -1497,8 +1496,8 @@ class SearchMetricController(rest.RestController): } except storage.InvalidQuery as e: abort(400, six.text_type(e)) - except storage.GranularityDoesNotExist as e: - abort(400, six.text_type(e)) + except storage.AggregationDoesNotExist as e: + abort(400, e) class ResourcesMetricsMeasuresBatchController(rest.RestController): @@ -1837,7 +1836,6 @@ class AggregationController(rest.RestController): except exceptions.UnAggregableTimeseries as e: abort(400, e) except (storage.MetricDoesNotExist, - storage.GranularityDoesNotExist, storage.AggregationDoesNotExist) as e: abort(404, six.text_type(e)) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index b60cc901..8e360d89 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -59,23 +59,25 @@ class MetricDoesNotExist(StorageError): class AggregationDoesNotExist(StorageError): """Error raised when the aggregation method doesn't exists for a metric.""" - def __init__(self, metric, method): + def __init__(self, metric, method, granularity): self.metric = metric self.method = method - super(AggregationDoesNotExist, self).__init__( - "Aggregation method '%s' for metric %s does not exist" % - (method, metric)) - - -class GranularityDoesNotExist(StorageError): - """Error raised when the granularity doesn't exist for a metric.""" - - def __init__(self, metric, granularity): - self.metric = metric self.granularity = granularity - super(GranularityDoesNotExist, self).__init__( - "Granularity '%s' for metric %s does not exist" % - (utils.timespan_total_seconds(granularity), metric)) + super(AggregationDoesNotExist, self).__init__( + "Aggregation method '%s' at granularity '%s' " + "for metric %s does not exist" % + (method, utils.timespan_total_seconds(granularity), metric)) + + def jsonify(self): + return { + "cause": "Aggregation does not exist", + "detail": { + # FIXME(jd) Pecan does not use our JSON renderer for errors + # So we need to convert this + "granularity": utils.timespan_total_seconds(self.granularity), + "aggregation_method": self.method, + }, + } class MetricAlreadyExists(StorageError): @@ -198,7 +200,9 @@ class StorageDriver(object): :param resample: The granularity to resample to. """ if aggregation not in metric.archive_policy.aggregation_methods: - raise AggregationDoesNotExist(metric, aggregation) + if granularity is None: + granularity = metric.archive_policy.definition[0].granularity + raise AggregationDoesNotExist(metric, aggregation, granularity) if granularity is None: agg_timeseries = utils.parallel_map( @@ -239,7 +243,7 @@ class StorageDriver(object): points = d.points break else: - raise GranularityDoesNotExist(metric, granularity) + raise AggregationDoesNotExist(metric, aggregation, granularity) try: all_keys = self._list_split_keys_for_metric( diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 65660373..7846dd34 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -144,7 +144,8 @@ class CephStorage(storage.StorageDriver): except rados.ObjectNotFound: if self._object_exists( self._build_unaggregated_timeserie_path(metric, 3)): - raise storage.AggregationDoesNotExist(metric, aggregation) + raise storage.AggregationDoesNotExist( + metric, aggregation, key.sampling) else: raise storage.MetricDoesNotExist(metric) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 1f7a0bba..5d00da8b 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -150,6 +150,7 @@ class FileStorage(storage.StorageDriver): except IOError as e: if e.errno == errno.ENOENT: if os.path.exists(self._build_metric_dir(metric)): - raise storage.AggregationDoesNotExist(metric, aggregation) + raise storage.AggregationDoesNotExist( + metric, aggregation, key.sampling) raise storage.MetricDoesNotExist(metric) raise diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index eeeb4dd6..27554236 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -99,5 +99,6 @@ class RedisStorage(storage.StorageDriver): if data is None: if not self._client.exists(redis_key): raise storage.MetricDoesNotExist(metric) - raise storage.AggregationDoesNotExist(metric, aggregation) + raise storage.AggregationDoesNotExist( + metric, aggregation, key.sampling) return data diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index ac9593ef..98b6b3dd 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -173,7 +173,8 @@ class S3Storage(storage.StorageDriver): if e.response['Error'].get('Code') == 'NoSuchKey': raise storage.MetricDoesNotExist(metric) raise - raise storage.AggregationDoesNotExist(metric, aggregation) + raise storage.AggregationDoesNotExist( + metric, aggregation, key.sampling) raise return response['Body'].read() diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 98f7cf51..b17e24fb 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -155,7 +155,8 @@ class SwiftStorage(storage.StorageDriver): if e.http_status == 404: raise storage.MetricDoesNotExist(metric) raise - raise storage.AggregationDoesNotExist(metric, aggregation) + raise storage.AggregationDoesNotExist( + metric, aggregation, key.sampling) raise return contents diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index 4c3dbe47..abce43dc 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -176,7 +176,7 @@ tests: GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&granularity=42 status: 404 response_strings: - - Granularity '42.0' for metric + - Aggregation method 'mean' at granularity '42.0' for metric # Aggregation by resource and metric_name diff --git a/gnocchi/tests/functional/gabbits/metric-granularity.yaml b/gnocchi/tests/functional/gabbits/metric-granularity.yaml index c015e513..3cb3d1bb 100644 --- a/gnocchi/tests/functional/gabbits/metric-granularity.yaml +++ b/gnocchi/tests/functional/gabbits/metric-granularity.yaml @@ -43,7 +43,7 @@ tests: GET: /v1/metric/$RESPONSE['$[0].id']/measures?granularity=42 status: 404 response_strings: - - Granularity '42.0' for metric $RESPONSE['$[0].id'] does not exist + - Aggregation method 'mean' at granularity '42.0' for metric $RESPONSE['$[0].id'] does not exist - name: get measurements granularity GET: /v1/metric/$HISTORY['get metric list'].$RESPONSE['$[0].id']/measures?granularity=1 diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index fd3e7e86..9e9ce279 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -367,7 +367,7 @@ tests: GET: /v1/aggregation/metric?metric=$HISTORY['get metric list for aggregates'].$RESPONSE['$[0].id']&aggregation=last status: 404 response_strings: - - Aggregation method 'last' for metric $RESPONSE['$[0].id'] does not exist + - Aggregation method 'last' at granularity '1.0' for metric - name: aggregate measure unknown metric GET: /v1/aggregation/metric?metric=cee6ef1f-52cc-4a16-bbb5-648aedfd1c37 diff --git a/gnocchi/tests/functional/gabbits/search-metric.yaml b/gnocchi/tests/functional/gabbits/search-metric.yaml index 0a781e6f..812f1d9a 100644 --- a/gnocchi/tests/functional/gabbits/search-metric.yaml +++ b/gnocchi/tests/functional/gabbits/search-metric.yaml @@ -90,16 +90,25 @@ tests: data: "=": 12 status: 400 - response_strings: - - Granularity '300.0' for metric $HISTORY['get metric id'].$RESPONSE['$[0].id'] does not exist + request_headers: + accept: application/json + response_json_paths: + $.description.cause: Aggregation does not exist + $.description.detail.granularity: 300 + $.description.detail.aggregation_method: mean + - name: search with incorrect granularity POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=300 data: "=": 12 status: 400 - response_strings: - - Granularity '300.0' for metric $HISTORY['get metric id'].$RESPONSE['$[0].id'] does not exist + request_headers: + accept: application/json + response_json_paths: + $.description.cause: Aggregation does not exist + $.description.detail.granularity: 300 + $.description.detail.aggregation_method: mean - name: search measure with wrong start POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&start=foobar diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index ba1a0710..1f53aca6 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -821,7 +821,7 @@ class TestStorageDriver(tests_base.TestCase): to_timestamp=datetime64(2014, 1, 1, 12, 0, 2), granularity=numpy.timedelta64(5, 'm'))) - self.assertRaises(storage.GranularityDoesNotExist, + self.assertRaises(storage.AggregationDoesNotExist, self.storage.get_measures, self.metric, granularity=numpy.timedelta64(42, 's')) -- GitLab From d130e69e669c3470ce859cd3ab8c60c0523be56b Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 12 Dec 2017 21:03:32 +0000 Subject: [PATCH 1151/1483] cleanup numpy+list usage - no real point creating a new list with tolist. zip can loop through array - to_timestamps requires a list so no point passing creating input as generator - numpy suggest to set 'count' when using fromiter. timeit shows no real diff on small sizes --- gnocchi/rest/api.py | 5 ++--- gnocchi/utils.py | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index d7ea6d3b..a371ae8d 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -418,7 +418,7 @@ class ArchivePolicyRuleController(rest.RestController): def MeasuresListSchema(measures): try: - times = utils.to_timestamps((m['timestamp'] for m in measures)) + times = utils.to_timestamps([m['timestamp'] for m in measures]) except TypeError: abort(400, "Invalid format for measures") except ValueError as e: @@ -429,8 +429,7 @@ def MeasuresListSchema(measures): except Exception: abort(400, "Invalid input for a value") - return (incoming.Measure(t, v) for t, v in six.moves.zip( - times.tolist(), values)) + return (incoming.Measure(t, v) for t, v in six.moves.zip(times, values)) class MetricController(rest.RestController): diff --git a/gnocchi/utils.py b/gnocchi/utils.py index d4da8b16..1576ae7b 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -75,7 +75,6 @@ unix_universal_start64 = numpy.datetime64("1970") def to_timestamps(values): try: - values = list(values) if len(values) == 0: return [] if isinstance(values[0], (numpy.datetime64, datetime.datetime)): @@ -98,7 +97,7 @@ def to_timestamps(values): times = numpy.fromiter( numpy.add(numpy.datetime64(utcnow()), [to_timespan(v, True) for v in values]), - dtype='datetime64[ns]') + dtype='datetime64[ns]', count=len(values)) else: times = numpy.array(values, dtype='datetime64[ns]') else: @@ -115,7 +114,7 @@ def to_timestamps(values): def to_timestamp(value): - return to_timestamps((value,))[0] + return to_timestamps([value])[0] def to_datetime(value): -- GitLab From 7dae8e77f568d91fbe0b7bb2d2b5a246dad4a7eb Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 14 Dec 2017 16:34:03 +0100 Subject: [PATCH 1152/1483] storage: stop checking for archive policy aggregation methods match MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The storage API is currently responsible to check that requesting an aggregation method for a metric is possible or not based on its archive policy. This is out of its job actually, and it should just return an empty series if it has nothing – or raise MetricDoesNotExist if the metric is unknown. The API will take the job of doing this validation before hitting the storage subsystem. --- gnocchi/rest/api.py | 22 +++++++++++++++++++- gnocchi/storage/__init__.py | 5 ----- gnocchi/tests/functional/gabbits/metric.yaml | 8 +++++-- gnocchi/tests/test_storage.py | 5 ++--- 4 files changed, 29 insertions(+), 11 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index a371ae8d..7a0946c7 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -499,6 +499,15 @@ class MetricController(rest.RestController): except ValueError as e: abort(400, six.text_type(e)) + if aggregation not in self.metric.archive_policy.aggregation_methods: + abort(404, { + "cause": "Aggregation method does not exist for this metric", + "detail": { + "metric": self.metric.id, + "aggregation_method": aggregation, + }, + }) + if (strtobool("refresh", refresh) and pecan.request.incoming.has_unprocessed(self.metric.id)): try: @@ -1824,8 +1833,19 @@ class AggregationController(rest.RestController): if number_of_metrics == 1: # NOTE(sileht): don't do the aggregation if we only have one # metric + metric = metrics[0] + if (aggregation + not in metric.archive_policy.aggregation_methods): + abort(404, { + "cause": + "Aggregation method does not exist for this metric", + "detail": { + "metric": str(metric.id), + "aggregation_method": aggregation, + }, + }) return pecan.request.storage.get_measures( - metrics[0], start, stop, aggregation, + metric, start, stop, aggregation, granularity, resample) return processor.get_measures( pecan.request.storage, diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 8e360d89..6328b465 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -199,11 +199,6 @@ class StorageDriver(object): :param granularity: The granularity to retrieve. :param resample: The granularity to resample to. """ - if aggregation not in metric.archive_policy.aggregation_methods: - if granularity is None: - granularity = metric.archive_policy.definition[0].granularity - raise AggregationDoesNotExist(metric, aggregation, granularity) - if granularity is None: agg_timeseries = utils.parallel_map( self._get_measures_timeserie, diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index 9e9ce279..89e7a3bc 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -366,8 +366,12 @@ tests: - name: get measure unknown aggregates GET: /v1/aggregation/metric?metric=$HISTORY['get metric list for aggregates'].$RESPONSE['$[0].id']&aggregation=last status: 404 - response_strings: - - Aggregation method 'last' at granularity '1.0' for metric + request_headers: + accept: application/json + response_json_paths: + $.description.cause: Aggregation method does not exist for this metric + $.description.detail.metric: $HISTORY['get metric list for aggregates'].$RESPONSE['$[0].id'] + $.description.detail.aggregation_method: last - name: aggregate measure unknown metric GET: /v1/aggregation/metric?metric=cee6ef1f-52cc-4a16-bbb5-648aedfd1c37 diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 1f53aca6..6ab9dfb9 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -833,9 +833,8 @@ class TestStorageDriver(tests_base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) - self.assertRaises(storage.AggregationDoesNotExist, - self.storage.get_measures, - self.metric, aggregation='last') + self.assertEqual( + [], self.storage.get_measures(self.metric, aggregation='last')) def test_search_value(self): metric2, __ = self._create_metric() -- GitLab From 040d1077057845395d884494ceb637502a9e9fd3 Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 15 Dec 2017 16:29:35 +0000 Subject: [PATCH 1153/1483] bad internal link rest docs --- doc/source/rest.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 11edf77b..df3c8c2b 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -201,7 +201,7 @@ can be resampled to a new |granularity|. .. note:: - Gnocchi has an :ref:;`aggregates ` endpoint which provides + Gnocchi has an :ref:`aggregates ` endpoint which provides resampling as well as additional capabilities. -- GitLab From f3a550e46eb604f74ae4164540d6aefaa59fab23 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 18 Dec 2017 18:40:02 +0000 Subject: [PATCH 1154/1483] optimise aggregations - vectorize std. ~35x faster - use bincount+weights to compute sum. ~1.7x faster - use sum to get mean. ~1.6x faster - insert creates a copy. just flip array to get first. ~1.1x faster --- gnocchi/carbonara.py | 46 +++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 03174534..705175d4 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -124,10 +124,14 @@ class GroupedTimeSeries(object): return_counts=True) def mean(self): - return self._scipy_aggregate(ndimage.mean) + series = self.sum() + series['values'] /= self.counts + return series def sum(self): - return self._scipy_aggregate(ndimage.sum) + return make_timeseries(self.tstamps, numpy.bincount( + numpy.repeat(numpy.arange(self.counts.size), self.counts), + weights=self._ts['values'])) def min(self): return self._scipy_aggregate(ndimage.minimum) @@ -139,14 +143,15 @@ class GroupedTimeSeries(object): return self._scipy_aggregate(ndimage.median) def std(self): - # NOTE(sileht): ndimage.standard_deviation is really more performant - # but it use ddof=0, to get the same result as pandas we have to use - # ddof=1. If one day scipy allow to pass ddof, this should be changed. - return self._scipy_aggregate(ndimage.labeled_comprehension, - remove_unique=True, - func=functools.partial(numpy.std, ddof=1), - out_dtype='float64', - default=None) + mean_ts = self.mean() + diff_sq = numpy.square(self._ts['values'] - + numpy.repeat(mean_ts['values'], self.counts)) + bin_sum = numpy.bincount( + numpy.repeat(numpy.arange(self.counts.size), self.counts), + weights=diff_sq) + return make_timeseries(self.tstamps[self.counts > 1], + numpy.sqrt(bin_sum[self.counts > 1] / + (self.counts[self.counts > 1] - 1))) def count(self): return make_timeseries(self.tstamps, self.counts) @@ -154,14 +159,12 @@ class GroupedTimeSeries(object): def last(self): cumcounts = numpy.cumsum(self.counts) - 1 values = self._ts['values'][cumcounts] - return make_timeseries(self.tstamps, values) def first(self): - counts = numpy.insert(self.counts[:-1], 0, 0) - cumcounts = numpy.cumsum(counts) - values = self._ts['values'][cumcounts] - return make_timeseries(self.tstamps, values) + cumcounts = numpy.cumsum(self.counts[::-1]) - 1 + values = self._ts['values'][::-1][cumcounts] + return make_timeseries(self.tstamps, values[::-1]) def quantile(self, q): return self._scipy_aggregate(ndimage.labeled_comprehension, @@ -172,18 +175,13 @@ class GroupedTimeSeries(object): out_dtype='float64', default=None) - def _scipy_aggregate(self, method, remove_unique=False, *args, **kwargs): - if remove_unique: - tstamps = self.tstamps[self.counts > 1] - else: - tstamps = self.tstamps - - if len(tstamps) == 0: + def _scipy_aggregate(self, method, *args, **kwargs): + if len(self.tstamps) == 0: return make_timeseries([], []) - values = method(self._ts['values'], self.indexes, tstamps, + values = method(self._ts['values'], self.indexes, self.tstamps, *args, **kwargs) - return make_timeseries(tstamps, values) + return make_timeseries(self.tstamps, values) def derived(self): timestamps = self._ts_for_derive['timestamps'][1:] -- GitLab From 29ed321ccb9160cb6c703bb38c241208f8c044ee Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 12 Dec 2017 21:45:36 +0100 Subject: [PATCH 1155/1483] metricd: group metric by sack when expunging The current code is pretty naive, and if you have S sacks and M metrics, it will lock M sacks. This patches groups metrics by sacks and lock S at max sacks only. --- gnocchi/storage/__init__.py | 65 +++++++++++++++++++++-------------- gnocchi/tests/test_storage.py | 2 +- 2 files changed, 41 insertions(+), 26 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 6328b465..334f72a2 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -39,6 +39,9 @@ OPTS = [ LOG = daiquiri.getLogger(__name__) +ITEMGETTER_1 = operator.itemgetter(1) + + class StorageError(Exception): pass @@ -387,20 +390,6 @@ class StorageDriver(object): def _delete_metric(metric): raise NotImplementedError - def delete_metric(self, incoming, metric, sync=False): - LOG.debug("Deleting metric %s", metric) - lock = incoming.get_sack_lock( - self.coord, incoming.sack_for_metric(metric.id)) - if not lock.acquire(blocking=sync): - raise LockedMetric(metric) - # NOTE(gordc): no need to hold lock because the metric has been already - # marked as "deleted" in the indexer so no measure worker - # is going to process it anymore. - lock.release() - self._delete_metric(metric) - incoming.delete_unprocessed_measures_for_metric(metric.id) - LOG.debug("Deleted metric %s", metric) - @staticmethod def _delete_metric_measures(metric, timestamp_key, aggregation, granularity, version=3): @@ -428,21 +417,47 @@ class StorageDriver(object): on error :type sync: bool """ - - metrics_to_expunge = index.list_metrics(status='delete') - for m in metrics_to_expunge: + # FIXME(jd) The indexer could return them sorted/grouped by directly + metrics_to_expunge = sorted( + ((m, incoming.sack_for_metric(m.id)) + for m in index.list_metrics(status='delete')), + key=ITEMGETTER_1) + for sack, metrics in itertools.groupby( + metrics_to_expunge, key=ITEMGETTER_1): try: - self.delete_metric(incoming, m, sync) - index.expunge_metric(m.id) - except (indexer.NoSuchMetric, LockedMetric): - # It's possible another process deleted or is deleting the - # metric, not a big deal - pass + lock = incoming.get_sack_lock(self.coord, sack) + if not lock.acquire(blocking=sync): + # Retry later + LOG.debug( + "Sack %s is locked, cannot expunge metrics", sack) + continue + # NOTE(gordc): no need to hold lock because the metric has been + # already marked as "deleted" in the indexer so no measure + # worker is going to process it anymore. + lock.release() except Exception: if sync: raise - LOG.error("Unable to expunge metric %s from storage", m, - exc_info=True) + LOG.error("Unable to lock sack %s for expunging metrics", + sack, exc_info=True) + else: + for metric, sack in metrics: + LOG.debug("Deleting metric %s", metric) + try: + incoming.delete_unprocessed_measures_for_metric( + metric.id) + self._delete_metric(metric) + try: + index.expunge_metric(metric.id) + except indexer.NoSuchMetric: + # It's possible another process deleted or is + # deleting the metric, not a big deal + pass + except Exception: + if sync: + raise + LOG.error("Unable to expunge metric %s from storage", + metric, exc_info=True) def process_new_measures(self, indexer, incoming, metrics_to_process, sync=False): diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 6ab9dfb9..3a0275a0 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -129,7 +129,7 @@ class TestStorageDriver(tests_base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() - self.storage.delete_metric(self.incoming, self.metric, sync=True) + self.storage._delete_metric(self.metric) self.trigger_processing() self.assertEqual([], self.storage.get_measures(self.metric)) self.assertRaises(storage.MetricDoesNotExist, -- GitLab From 14f0892c3b041514a9bb91f7903b6ebdd810bd60 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 19 Dec 2017 22:24:50 +0000 Subject: [PATCH 1156/1483] fix devstack coordination setup it's missing the storage section so incorrectly setting coordination_url as the section --- devstack/plugin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 4bd189c1..4ba12128 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -215,7 +215,7 @@ function configure_gnocchi { fi if [ -n "$GNOCCHI_COORDINATOR_URL" ]; then - iniset $GNOCCHI_CONF coordination_url "$GNOCCHI_COORDINATOR_URL" + iniset $GNOCCHI_CONF DEFAULT coordination_url "$GNOCCHI_COORDINATOR_URL" fi if is_service_enabled gnocchi-statsd ; then -- GitLab From 1a1bbf97dabd49728f91486c7673a302c08ae06f Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 18 Dec 2017 09:33:58 +0100 Subject: [PATCH 1157/1483] rest: lazy load backends When of the backend are unreachable, the error is differ depending of if the issue occurs on startup or at runtime. This change ensures any error are raised at runtime. Closes-bug: #585 --- gnocchi/rest/app.py | 61 ++++++++++++++++------------ gnocchi/tests/functional/fixtures.py | 26 ++++++++++-- gnocchi/tests/test_rest.py | 17 ++++++-- 3 files changed, 71 insertions(+), 33 deletions(-) diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 5fbd095b..66728285 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -15,6 +15,7 @@ # under the License. import os import pkg_resources +import threading import uuid import daiquiri @@ -42,12 +43,18 @@ LOG = daiquiri.getLogger(__name__) jsonify.jsonify.register(object)(json.to_primitive) +def get_storage_driver(conf): + # NOTE(jd) This coordinator is never stop. I don't think it's a + # real problem since the Web app can never really be stopped + # anyway, except by quitting it entirely. + coord = metricd.get_coordinator_and_start(conf.coordination_url) + return gnocchi_storage.get_driver(conf, coord) + + class GnocchiHook(pecan.hooks.PecanHook): - def __init__(self, storage, indexer, incoming, conf): - self.storage = storage - self.indexer = indexer - self.incoming = incoming + def __init__(self, conf): + self.backends = {} self.conf = conf self.policy_enforcer = policy.Enforcer(conf) self.auth_helper = driver.DriverManager("gnocchi.rest.auth_helper", @@ -55,13 +62,30 @@ class GnocchiHook(pecan.hooks.PecanHook): invoke_on_load=True).driver def on_route(self, state): - state.request.storage = self.storage - state.request.indexer = self.indexer - state.request.incoming = self.incoming + state.request.storage = self._lazy_load('storage') + state.request.indexer = self._lazy_load('indexer') + state.request.incoming = self._lazy_load('incoming') state.request.conf = self.conf state.request.policy_enforcer = self.policy_enforcer state.request.auth_helper = self.auth_helper + BACKEND_LOADERS = { + 'storage': (threading.Lock(), get_storage_driver), + 'incoming': (threading.Lock(), gnocchi_incoming.get_driver), + 'indexer': (threading.Lock(), gnocchi_indexer.get_driver), + } + + def _lazy_load(self, name): + # NOTE(sileht): We don't care about raise error here, if something + # fail, this will just raise a 500, until the backend is ready. + if name not in self.backends: + lock, loader = self.BACKEND_LOADERS[name] + with lock: + # Recheck, maybe it have been created in the meantime. + if name not in self.backends: + self.backends[name] = loader(self.conf) + return self.backends[name] + class NotImplementedMiddleware(object): def __init__(self, app): @@ -96,22 +120,9 @@ global APPCONFIGS APPCONFIGS = {} -def load_app(conf, indexer=None, storage=None, incoming=None, coord=None, - not_implemented_middleware=True): +def load_app(conf, not_implemented_middleware=True): global APPCONFIGS - if not storage: - if not coord: - # NOTE(jd) This coordinator is never stop. I don't think it's a - # real problem since the Web app can never really be stopped - # anyway, except by quitting it entirely. - coord = metricd.get_coordinator_and_start(conf.coordination_url) - storage = gnocchi_storage.get_driver(conf, coord) - if not incoming: - incoming = gnocchi_incoming.get_driver(conf) - if not indexer: - indexer = gnocchi_indexer.get_driver(conf) - # Build the WSGI app cfg_path = conf.api.paste_config if not os.path.isabs(cfg_path): @@ -122,8 +133,7 @@ def load_app(conf, indexer=None, storage=None, incoming=None, coord=None, cfg_path = os.path.abspath(pkg_resources.resource_filename( __name__, "api-paste.ini")) - config = dict(conf=conf, indexer=indexer, storage=storage, - incoming=incoming, + config = dict(conf=conf, not_implemented_middleware=not_implemented_middleware) configkey = str(uuid.uuid4()) APPCONFIGS[configkey] = config @@ -136,11 +146,10 @@ def load_app(conf, indexer=None, storage=None, incoming=None, coord=None, return cors.CORS(app, conf=conf) -def _setup_app(root, conf, indexer, storage, incoming, - not_implemented_middleware): +def _setup_app(root, conf, not_implemented_middleware): app = pecan.make_app( root, - hooks=(GnocchiHook(storage, indexer, incoming, conf),), + hooks=(GnocchiHook(conf),), guess_content_type_from_ext=False, custom_renderers={"json": JsonRenderer} ) diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 97acbcb9..00092999 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -14,6 +14,8 @@ # under the License. """Fixtures for use with gabbi tests.""" +from __future__ import absolute_import + import os import shutil import tempfile @@ -23,6 +25,7 @@ from unittest import case import warnings import daiquiri +import fixtures from gabbi import fixture import numpy from oslo_config import cfg @@ -146,11 +149,21 @@ class ConfigFixture(fixture.GabbiFixture): i = incoming.get_driver(conf) i.upgrade(128) + self.fixtures = [ + fixtures.MockPatch("gnocchi.storage.get_driver", + return_value=s), + fixtures.MockPatch("gnocchi.incoming.get_driver", + return_value=i), + fixtures.MockPatch("gnocchi.indexer.get_driver", + return_value=self.index), + fixtures.MockPatch( + "gnocchi.cli.metricd.get_coordinator_and_start", + return_value=self.coord), + ] + for f in self.fixtures: + f.setUp() + LOAD_APP_KWARGS = { - 'coord': self.coord, - 'storage': s, - 'indexer': index, - 'incoming': i, 'conf': conf, } @@ -160,10 +173,15 @@ class ConfigFixture(fixture.GabbiFixture): def stop_fixture(self): """Clean up the config fixture and storage artifacts.""" + if hasattr(self, 'metricd_thread'): self.metricd_thread.stop() self.metricd_thread.join() + if hasattr(self, 'fixtures'): + for f in reversed(self.fixtures): + f.cleanUp() + if hasattr(self, 'index'): self.index.disconnect() diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 97db5a5b..d1942bd9 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -23,6 +23,7 @@ import hashlib import json import uuid +import fixtures import iso8601 from keystonemiddleware import fixture as ksm_fixture import mock @@ -172,16 +173,26 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): self.conf.set_override("auth_mode", self.auth_mode, group="api") + self.useFixture(fixtures.MockPatchObject( + app.GnocchiHook, "_lazy_load", self._fake_lazy_load)) + self.app = TestingApp(app.load_app(conf=self.conf, - indexer=self.index, - storage=self.storage, - incoming=self.incoming, not_implemented_middleware=False), storage=self.storage, indexer=self.index, incoming=self.incoming, auth_mode=self.auth_mode) + def _fake_lazy_load(self, name): + if name == "storage": + return self.storage + elif name == "indexer": + return self.index + elif name == "incoming": + return self.incoming + else: + raise RuntimeError("Invalid driver type: %s" % name) + # NOTE(jd) Used at least by docs @staticmethod def runTest(): -- GitLab From e51a80a77ca4f65096a8a2177d8abf52874ce5c4 Mon Sep 17 00:00:00 2001 From: sum12 Date: Mon, 18 Dec 2017 14:38:45 +0100 Subject: [PATCH 1158/1483] devstack: redis on opensuse needs to have default config this patch adds a default config and uses template of version of unitfile to restart redis --- devstack/plugin.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 4ba12128..3bf35931 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -116,7 +116,14 @@ function _gnocchi_install_redis { else # This will fail (correctly) where a redis package is unavailable install_package redis - restart_service redis + if is_suse; then + # opensuse intsall multi-instance version of redis + # and admin is expected to install the required conf + cp /etc/redis/default.conf.example /etc/redis/default.conf + restart_service redis@default + else + restart_service redis + fi fi pip_install_gr redis -- GitLab From 9a050c345756cdccaa3eb35a84c72db4e477cb10 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 14 Dec 2017 17:49:43 +0100 Subject: [PATCH 1159/1483] storage: rework search_value This cleans a bit the old search_value/find_measure storage API to do most of the API related work in the API, and only do the minimum work in the storage part. --- gnocchi/rest/api.py | 36 +++++++++++++++------- gnocchi/storage/__init__.py | 49 +++--------------------------- gnocchi/tests/test_storage.py | 57 +++++++++++++++++++++++------------ 3 files changed, 68 insertions(+), 74 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 7a0946c7..e0945f56 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1467,8 +1467,6 @@ class SearchMetricController(rest.RestController): @pecan.expose('json') def post(self, metric_id, start=None, stop=None, aggregation='mean', granularity=None): - granularity = [utils.to_timespan(g) - for g in arg_to_list(granularity or [])] metrics = pecan.request.indexer.list_metrics( attribute_filter={"in": {"id": arg_to_list(metric_id)}}) @@ -1493,20 +1491,36 @@ class SearchMetricController(rest.RestController): abort(400, "Invalid value for stop") try: - return { - str(metric.id): values - for metric, values in six.iteritems( - pecan.request.storage.search_value( - metrics, query, start, stop, aggregation, - granularity - ) - ) - } + predicate = storage.MeasureQuery(query) except storage.InvalidQuery as e: abort(400, six.text_type(e)) + + if granularity is not None: + granularity = sorted( + map(utils.to_timespan, arg_to_list(granularity)), + reverse=True) + + results = {} + + try: + for metric in metrics: + if granularity is None: + granularity = sorted(( + d.granularity + for d in metric.archive_policy.definition), + reverse=True) + results[str(metric.id)] = [] + for r in utils.parallel_map( + pecan.request.storage.find_measure, + ((metric, predicate, g, aggregation, + start, stop) + for g in granularity)): + results[str(metric.id)].extend(r) except storage.AggregationDoesNotExist as e: abort(400, e) + return results + class ResourcesMetricsMeasuresBatchController(rest.RestController): @pecan.expose('json') diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 334f72a2..865d0638 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -14,7 +14,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import collections import functools import itertools import operator @@ -568,53 +567,15 @@ class StorageDriver(object): self._store_unaggregated_timeserie(metric, ts.serialize()) - def _find_measure(self, metric, aggregation, granularity, predicate, - from_timestamp, to_timestamp): + def find_measure(self, metric, predicate, granularity, aggregation="mean", + from_timestamp=None, to_timestamp=None): timeserie = self._get_measures_timeserie( metric, aggregation, granularity, from_timestamp, to_timestamp) values = timeserie.fetch(from_timestamp, to_timestamp) - return {metric: - [(timestamp, g, value) - for timestamp, g, value in values - if predicate(value)]} - - def search_value(self, metrics, query, from_timestamp=None, - to_timestamp=None, aggregation='mean', - granularity=None): - """Search for an aggregated value that realizes a predicate. - - :param metrics: The list of metrics to look into. - :param query: The query being sent. - :param from_timestamp: The timestamp to get the measure from. - :param to_timestamp: The timestamp to get the measure to. - :param aggregation: The type of aggregation to retrieve. - :param granularity: The granularity to retrieve. - """ - - granularity = granularity or [] - predicate = MeasureQuery(query) - - results = utils.parallel_map( - self._find_measure, - [(metric, aggregation, - gran, predicate, - from_timestamp, to_timestamp) - for metric in metrics - for gran in granularity or - (defin.granularity - for defin in metric.archive_policy.definition)]) - result = collections.defaultdict(list) - for r in results: - for metric, metric_result in six.iteritems(r): - result[metric].extend(metric_result) - - # Sort the result - for metric, r in six.iteritems(result): - # Sort by timestamp asc, granularity desc - r.sort(key=lambda t: (t[0], - t[1])) - - return result + return [(timestamp, g, value) + for timestamp, g, value in values + if predicate(value)] class MeasureQuery(object): diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 3a0275a0..6eab39ee 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -836,7 +836,7 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual( [], self.storage.get_measures(self.metric, aggregation='last')) - def test_search_value(self): + def test_find_measures(self): metric2, __ = self._create_metric() self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1,), 69), @@ -855,27 +855,46 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing([str(self.metric.id), str(metric2.id)]) self.assertEqual( - {metric2: [], - self.metric: [ - (datetime64(2014, 1, 1), - numpy.timedelta64(1, 'D'), 33), - (datetime64(2014, 1, 1, 12), - numpy.timedelta64(1, 'h'), 33), - (datetime64(2014, 1, 1, 12), - numpy.timedelta64(5, 'm'), 69), - (datetime64(2014, 1, 1, 12, 10), - numpy.timedelta64(5, 'm'), 42)]}, - self.storage.search_value( - [metric2, self.metric], - {u"≥": 30})) + [ + (datetime64(2014, 1, 1), + numpy.timedelta64(1, 'D'), 33), + ], + self.storage.find_measure( + self.metric, storage.MeasureQuery({u"≥": 30}), + numpy.timedelta64(1, 'D'))) self.assertEqual( - {metric2: [], self.metric: []}, - self.storage.search_value( - [metric2, self.metric], - {u"∧": [ + [ + (datetime64(2014, 1, 1, 12), + numpy.timedelta64(5, 'm'), 69), + (datetime64(2014, 1, 1, 12, 10), + numpy.timedelta64(5, 'm'), 42) + ], + self.storage.find_measure( + self.metric, storage.MeasureQuery({u"≥": 30}), + numpy.timedelta64(5, 'm'))) + + self.assertEqual( + [], + self.storage.find_measure( + metric2, storage.MeasureQuery({u"≥": 30}), + numpy.timedelta64(5, 'm'))) + + self.assertEqual( + [], + self.storage.find_measure( + self.metric, storage.MeasureQuery({u"∧": [ + {u"eq": 100}, + {u"≠": 50}]}), + numpy.timedelta64(5, 'm'))) + + self.assertEqual( + [], + self.storage.find_measure( + metric2, storage.MeasureQuery({u"∧": [ {u"eq": 100}, - {u"≠": 50}]})) + {u"≠": 50}]}), + numpy.timedelta64(5, 'm'))) def test_resize_policy(self): name = str(uuid.uuid4()) -- GitLab From 073a86eef0cae44d3951e3ef4ecf48f5fefca3e9 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sun, 10 Dec 2017 22:49:42 +0100 Subject: [PATCH 1160/1483] Add datetime for resource attribute type This change adds a new type of attribute for resource: datetime. --- doc/source/rest.yaml | 12 +- .../versions/1c98ac614015_initial_base.py | 15 +-- ...205ff_add_updating_resource_type_states.py | 8 +- .../5c4f93e5bb4_mysql_float_to_timestamp.py | 6 +- gnocchi/indexer/sqlalchemy.py | 11 +- gnocchi/indexer/sqlalchemy_base.py | 99 +--------------- gnocchi/indexer/sqlalchemy_extension.py | 10 ++ gnocchi/indexer/sqlalchemy_types.py | 110 ++++++++++++++++++ gnocchi/resource_type.py | 11 ++ .../functional/gabbits/resource-type.yaml | 42 ++++++- ...ource-attribute-type-1e627a686568f72a.yaml | 4 + setup.cfg | 1 + 12 files changed, 214 insertions(+), 115 deletions(-) create mode 100644 gnocchi/indexer/sqlalchemy_types.py create mode 100644 releasenotes/notes/datetime-resource-attribute-type-1e627a686568f72a.yaml diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index e8862047..a0236f10 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -334,7 +334,8 @@ "flavor_id": {"type": "string", "required": true}, "image_ref": {"type": "string", "required": true}, "host": {"type": "string", "required": true}, - "server_group": {"type": "string", "required": false} + "server_group": {"type": "string", "required": false}, + "launched_at": {"type": "datetime", "required": false} } } @@ -350,6 +351,7 @@ "started_at": "2014-01-02 23:23:34", "ended_at": "2014-01-04 10:00:12", "flavor_id": "2", + "launched_at": "2017-12-10T08:10:42Z", "image_ref": "http://image", "host": "compute1", "display_name": "myvm", @@ -454,7 +456,8 @@ "display_name": {"type": "string", "required": true}, "prefix": {"type": "string", "required": false, "max_length": 8, "min_length": 3}, "size": {"type": "number", "min": 5, "max": 32.8}, - "enabled": {"type": "bool", "required": false} + "enabled": {"type": "bool", "required": false}, + "launched_at": {"type": "datetime", "required": false} } } @@ -487,6 +490,11 @@ "path": "/attributes/required-stuff", "value": {"type": "bool", "required": true, "options": {"fill": true}} }, + { + "op": "add", + "path": "/attributes/required-datetime", + "value": {"type": "datetime", "required": true, "options": {"fill": "2017-12-11T08:12:42Z"}} + }, { "op": "remove", "path": "/attributes/prefix" diff --git a/gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py b/gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py index ff04411f..f22def64 100644 --- a/gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py +++ b/gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py @@ -32,6 +32,7 @@ import sqlalchemy as sa import sqlalchemy_utils import gnocchi.indexer.sqlalchemy_base +import gnocchi.indexer.sqlalchemy_types def upgrade(): @@ -39,9 +40,9 @@ def upgrade(): sa.Column('type', sa.Enum('generic', 'instance', 'swift_account', 'volume', 'ceph_account', 'network', 'identity', 'ipmi', 'stack', 'image', name='resource_type_enum'), nullable=False), sa.Column('created_by_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), sa.Column('created_by_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), - sa.Column('started_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False), - sa.Column('revision_start', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False), - sa.Column('ended_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=True), + sa.Column('started_at', gnocchi.indexer.sqlalchemy_types.PreciseTimestamp(), nullable=False), + sa.Column('revision_start', gnocchi.indexer.sqlalchemy_types.PreciseTimestamp(), nullable=False), + sa.Column('ended_at', gnocchi.indexer.sqlalchemy_types.PreciseTimestamp(), nullable=True), sa.Column('user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), sa.Column('project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), @@ -139,14 +140,14 @@ def upgrade(): sa.Column('type', sa.Enum('generic', 'instance', 'swift_account', 'volume', 'ceph_account', 'network', 'identity', 'ipmi', 'stack', 'image', name='resource_type_enum'), nullable=False), sa.Column('created_by_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), sa.Column('created_by_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), - sa.Column('started_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False), - sa.Column('revision_start', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False), - sa.Column('ended_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=True), + sa.Column('started_at', gnocchi.indexer.sqlalchemy_types.PreciseTimestamp(), nullable=False), + sa.Column('revision_start', gnocchi.indexer.sqlalchemy_types.PreciseTimestamp(), nullable=False), + sa.Column('ended_at', gnocchi.indexer.sqlalchemy_types.PreciseTimestamp(), nullable=True), sa.Column('user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), sa.Column('project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), sa.Column('revision', sa.Integer(), nullable=False), sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), - sa.Column('revision_end', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False), + sa.Column('revision_end', gnocchi.indexer.sqlalchemy_types.PreciseTimestamp(), nullable=False), sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_resource_history_id_resource_id", ondelete='CASCADE'), sa.PrimaryKeyConstraint('revision'), mysql_charset='utf8', diff --git a/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py b/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py index 21dc7e42..33d60f47 100644 --- a/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py +++ b/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py @@ -24,7 +24,7 @@ Create Date: 2016-08-31 14:05:34.316496 from alembic import op import sqlalchemy as sa -from gnocchi.indexer import sqlalchemy_base +from gnocchi.indexer import sqlalchemy_types from gnocchi import utils # revision identifiers, used by Alembic. @@ -36,7 +36,7 @@ depends_on = None resource_type = sa.sql.table( 'resource_type', - sa.sql.column('updated_at', sqlalchemy_base.PreciseTimestamp())) + sa.sql.column('updated_at', sqlalchemy_types.PreciseTimestamp())) state_enum = sa.Enum("active", "creating", "creation_error", "deleting", @@ -80,10 +80,10 @@ def upgrade(): server_default="creating") op.add_column("resource_type", sa.Column("updated_at", - sqlalchemy_base.PreciseTimestamp(), + sqlalchemy_types.PreciseTimestamp(), nullable=True)) op.execute(resource_type.update().values({'updated_at': utils.utcnow()})) op.alter_column("resource_type", "updated_at", - type_=sqlalchemy_base.PreciseTimestamp(), + type_=sqlalchemy_types.PreciseTimestamp(), nullable=False) diff --git a/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py b/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py index 824a3e93..6c73dd73 100644 --- a/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py +++ b/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py @@ -27,7 +27,7 @@ from alembic import op import sqlalchemy as sa from sqlalchemy.sql import func -from gnocchi.indexer import sqlalchemy_base +from gnocchi.indexer import sqlalchemy_types # revision identifiers, used by Alembic. revision = '5c4f93e5bb4' @@ -61,7 +61,7 @@ def upgrade(): nullable=nullable) temp_col = sa.Column( column_name + "_ts", - sqlalchemy_base.TimestampUTC(), + sqlalchemy_types.TimestampUTC(), nullable=True) op.add_column(table_name, temp_col) t = sa.sql.table(table_name, existing_col, temp_col) @@ -71,7 +71,7 @@ def upgrade(): op.alter_column(table_name, column_name + "_ts", nullable=nullable, - type_=sqlalchemy_base.TimestampUTC(), + type_=sqlalchemy_types.TimestampUTC(), existing_nullable=nullable, existing_type=existing_type, new_column_name=column_name) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 99e026bf..328ba944 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -41,12 +41,13 @@ import six import sqlalchemy from sqlalchemy.engine import url as sqlalchemy_url import sqlalchemy.exc -from sqlalchemy import types +from sqlalchemy import types as sa_types import sqlalchemy_utils from gnocchi import exceptions from gnocchi import indexer from gnocchi.indexer import sqlalchemy_base as base +from gnocchi.indexer import sqlalchemy_types as types from gnocchi import resource_type from gnocchi import utils @@ -1210,10 +1211,10 @@ class QueryTransformer(object): } converters = ( - (base.TimestampUTC, utils.to_datetime), - (types.String, six.text_type), - (types.Integer, int), - (types.Numeric, float), + (types.TimestampUTC, utils.to_datetime), + (sa_types.String, six.text_type), + (sa_types.Integer, int), + (sa_types.Numeric, float), ) @classmethod diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index 172bcb60..1a3590e3 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -15,21 +15,16 @@ # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import -import calendar -import datetime -import decimal -import iso8601 from oslo_db.sqlalchemy import models import six import sqlalchemy -from sqlalchemy.dialects import mysql from sqlalchemy.ext import declarative -from sqlalchemy import types import sqlalchemy_utils from gnocchi import archive_policy from gnocchi import indexer +from gnocchi.indexer import sqlalchemy_types as types from gnocchi import resource_type from gnocchi import utils @@ -39,88 +34,6 @@ COMMON_TABLES_ARGS = {'mysql_charset': "utf8", 'mysql_engine': "InnoDB"} -class PreciseTimestamp(types.TypeDecorator): - """Represents a timestamp precise to the microsecond. - - Deprecated in favor of TimestampUTC. - Still used in alembic migrations. - """ - - impl = sqlalchemy.DateTime - - @staticmethod - def _decimal_to_dt(dec): - """Return a datetime from Decimal unixtime format.""" - if dec is None: - return None - - integer = int(dec) - micro = (dec - decimal.Decimal(integer)) * decimal.Decimal(1000000) - daittyme = datetime.datetime.utcfromtimestamp(integer) - return daittyme.replace(microsecond=int(round(micro))) - - @staticmethod - def _dt_to_decimal(utc): - """Datetime to Decimal. - - Some databases don't store microseconds in datetime - so we always store as Decimal unixtime. - """ - if utc is None: - return None - - decimal.getcontext().prec = 30 - return (decimal.Decimal(str(calendar.timegm(utc.utctimetuple()))) + - (decimal.Decimal(str(utc.microsecond)) / - decimal.Decimal("1000000.0"))) - - def load_dialect_impl(self, dialect): - if dialect.name == 'mysql': - return dialect.type_descriptor( - types.DECIMAL(precision=20, - scale=6, - asdecimal=True)) - return dialect.type_descriptor(self.impl) - - def compare_against_backend(self, dialect, conn_type): - if dialect.name == 'mysql': - return issubclass(type(conn_type), types.DECIMAL) - return issubclass(type(conn_type), type(self.impl)) - - def process_bind_param(self, value, dialect): - if value is not None: - value = utils.normalize_time(value) - if dialect.name == 'mysql': - return self._dt_to_decimal(value) - return value - - def process_result_value(self, value, dialect): - if dialect.name == 'mysql': - value = self._decimal_to_dt(value) - if value is not None: - return utils.normalize_time(value).replace( - tzinfo=iso8601.iso8601.UTC) - - -class TimestampUTC(types.TypeDecorator): - """Represents a timestamp precise to the microsecond.""" - - impl = sqlalchemy.DateTime - - def load_dialect_impl(self, dialect): - if dialect.name == 'mysql': - return dialect.type_descriptor(mysql.DATETIME(fsp=6)) - return self.impl - - def process_bind_param(self, value, dialect): - if value is not None: - return utils.normalize_time(value) - - def process_result_value(self, value, dialect): - if value is not None: - return value.replace(tzinfo=iso8601.iso8601.UTC) - - class GnocchiBase(models.ModelBase): __table_args__ = ( COMMON_TABLES_ARGS, @@ -267,7 +180,7 @@ class ResourceType(Base, GnocchiBase, resource_type.ResourceType): name="resource_type_state_enum"), nullable=False, server_default="creating") - updated_at = sqlalchemy.Column(TimestampUTC, nullable=False, + updated_at = sqlalchemy.Column(types.TimestampUTC, nullable=False, # NOTE(jd): We would like to use # sqlalchemy.func.now, but we can't # because the type of PreciseTimestamp in @@ -320,11 +233,11 @@ class ResourceMixin(ResourceJsonifier): nullable=False) creator = sqlalchemy.Column(sqlalchemy.String(255)) - started_at = sqlalchemy.Column(TimestampUTC, nullable=False, + started_at = sqlalchemy.Column(types.TimestampUTC, nullable=False, default=lambda: utils.utcnow()) - revision_start = sqlalchemy.Column(TimestampUTC, nullable=False, + revision_start = sqlalchemy.Column(types.TimestampUTC, nullable=False, default=lambda: utils.utcnow()) - ended_at = sqlalchemy.Column(TimestampUTC) + ended_at = sqlalchemy.Column(types.TimestampUTC) user_id = sqlalchemy.Column(sqlalchemy.String(255)) project_id = sqlalchemy.Column(sqlalchemy.String(255)) original_resource_id = sqlalchemy.Column(sqlalchemy.String(255), @@ -364,7 +277,7 @@ class ResourceHistory(ResourceMixin, Base, GnocchiBase): ondelete="CASCADE", name="fk_rh_id_resource_id"), nullable=False) - revision_end = sqlalchemy.Column(TimestampUTC, nullable=False, + revision_end = sqlalchemy.Column(types.TimestampUTC, nullable=False, default=lambda: utils.utcnow()) metrics = sqlalchemy.orm.relationship( Metric, primaryjoin="Metric.resource_id == ResourceHistory.id", diff --git a/gnocchi/indexer/sqlalchemy_extension.py b/gnocchi/indexer/sqlalchemy_extension.py index bc4d8418..ce81cf42 100644 --- a/gnocchi/indexer/sqlalchemy_extension.py +++ b/gnocchi/indexer/sqlalchemy_extension.py @@ -17,6 +17,7 @@ from __future__ import absolute_import import sqlalchemy import sqlalchemy_utils +from gnocchi.indexer import sqlalchemy_types from gnocchi import resource_type @@ -54,3 +55,12 @@ class NumberSchema(resource_type.NumberSchema, SchemaMixin): class BoolSchema(resource_type.BoolSchema, SchemaMixin): satype = sqlalchemy.Boolean + + +class DatetimeSchema(resource_type.DatetimeSchema, SchemaMixin): + satype = sqlalchemy_types.TimestampUTC() + + def for_filling(self, dialect): + if self.fill is None: + return None + return self.satype.process_bind_param(self.fill, dialect).isoformat() diff --git a/gnocchi/indexer/sqlalchemy_types.py b/gnocchi/indexer/sqlalchemy_types.py new file mode 100644 index 00000000..b566465a --- /dev/null +++ b/gnocchi/indexer/sqlalchemy_types.py @@ -0,0 +1,110 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2016 Red Hat, Inc. +# Copyright © 2014-2015 eNovance +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from __future__ import absolute_import + +import calendar +import datetime +import decimal + +import iso8601 +import sqlalchemy +from sqlalchemy.dialects import mysql +from sqlalchemy import types + +from gnocchi import utils + + +class PreciseTimestamp(types.TypeDecorator): + """Represents a timestamp precise to the microsecond. + + Deprecated in favor of TimestampUTC. + Still used in alembic migrations. + """ + + impl = sqlalchemy.DateTime + + @staticmethod + def _decimal_to_dt(dec): + """Return a datetime from Decimal unixtime format.""" + if dec is None: + return None + + integer = int(dec) + micro = (dec - decimal.Decimal(integer)) * decimal.Decimal(1000000) + daittyme = datetime.datetime.utcfromtimestamp(integer) + return daittyme.replace(microsecond=int(round(micro))) + + @staticmethod + def _dt_to_decimal(utc): + """Datetime to Decimal. + + Some databases don't store microseconds in datetime + so we always store as Decimal unixtime. + """ + if utc is None: + return None + + decimal.getcontext().prec = 30 + return (decimal.Decimal(str(calendar.timegm(utc.utctimetuple()))) + + (decimal.Decimal(str(utc.microsecond)) / + decimal.Decimal("1000000.0"))) + + def load_dialect_impl(self, dialect): + if dialect.name == 'mysql': + return dialect.type_descriptor( + types.DECIMAL(precision=20, + scale=6, + asdecimal=True)) + return dialect.type_descriptor(self.impl) + + def compare_against_backend(self, dialect, conn_type): + if dialect.name == 'mysql': + return issubclass(type(conn_type), types.DECIMAL) + return issubclass(type(conn_type), type(self.impl)) + + def process_bind_param(self, value, dialect): + if value is not None: + value = utils.normalize_time(value) + if dialect.name == 'mysql': + return self._dt_to_decimal(value) + return value + + def process_result_value(self, value, dialect): + if dialect.name == 'mysql': + value = self._decimal_to_dt(value) + if value is not None: + return utils.normalize_time(value).replace( + tzinfo=iso8601.iso8601.UTC) + + +class TimestampUTC(types.TypeDecorator): + """Represents a timestamp precise to the microsecond.""" + + impl = sqlalchemy.DateTime + + def load_dialect_impl(self, dialect): + if dialect.name == 'mysql': + return dialect.type_descriptor(mysql.DATETIME(fsp=6)) + return self.impl + + def process_bind_param(self, value, dialect): + if value is not None: + return utils.normalize_time(value) + + def process_result_value(self, value, dialect): + if value is not None: + return value.replace(tzinfo=iso8601.iso8601.UTC) diff --git a/gnocchi/resource_type.py b/gnocchi/resource_type.py index 9c78565d..9daec8e6 100644 --- a/gnocchi/resource_type.py +++ b/gnocchi/resource_type.py @@ -172,6 +172,17 @@ class UUIDSchema(CommonAttributeSchema): raise voluptuous.Invalid(e) +class DatetimeSchema(CommonAttributeSchema): + typename = "datetime" + + @staticmethod + def schema_ext(value): + try: + return utils.to_datetime(value) + except ValueError as e: + raise voluptuous.Invalid(e) + + class NumberSchema(CommonAttributeSchema): typename = "number" diff --git a/gnocchi/tests/functional/gabbits/resource-type.yaml b/gnocchi/tests/functional/gabbits/resource-type.yaml index d536c036..548839e5 100644 --- a/gnocchi/tests/functional/gabbits/resource-type.yaml +++ b/gnocchi/tests/functional/gabbits/resource-type.yaml @@ -127,6 +127,9 @@ tests: bool: type: bool required: false + datetime: + type: datetime + required: false status: 201 response_json_paths: $.name: my_custom_resource @@ -163,7 +166,9 @@ tests: bool: type: bool required: false - + datetime: + type: datetime + required: false response_headers: location: $SCHEME://$NETLOC/v1/resource_type/my_custom_resource @@ -214,6 +219,9 @@ tests: bool: type: bool required: false + datetime: + type: datetime + required: false # Some bad case case on the type @@ -259,11 +267,13 @@ tests: foobar: what uuid: e495ebad-be64-46c0-81d6-b079beb48df9 int: 1 + datetime: "2017-05-02T11:11:11Z" status: 201 response_json_paths: $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747 $.name: bar $.foobar: what + $.datetime: "2017-05-02T11:11:11+00:00" - name: patch custom resource PATCH: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 @@ -307,6 +317,7 @@ tests: $[0].id: d11edfca-4393-4fda-b94d-b05a3a1b3747 $[0].name: bar $[0].foobar: what + $[0].datetime: "2017-05-02T11:11:11+00:00" $[1].id: d11edfca-4393-4fda-b94d-b05a3a1b3747 $[1].name: foo $[1].foobar: what @@ -368,6 +379,13 @@ tests: required: True options: fill: "00000000-0000-0000-0000-000000000000" + - op: add + path: /attributes/newdatetime + value: + type: datetime + required: True + options: + fill: "2017-10-10T10:10:10Z" - op: remove path: /attributes/foobar status: 200 @@ -400,6 +418,9 @@ tests: bool: type: bool required: false + datetime: + type: datetime + required: False newstuff: type: string required: False @@ -426,6 +447,9 @@ tests: newuuid: type: uuid required: True + newdatetime: + type: datetime + required: True - name: post a new resource attribute with missing fill PATCH: /v1/resource_type/my_custom_resource @@ -494,6 +518,9 @@ tests: bool: type: bool required: false + datetime: + type: datetime + required: False newstuff: type: string required: False @@ -520,6 +547,9 @@ tests: newuuid: type: uuid required: True + newdatetime: + type: datetime + required: True - name: control new attributes of existing resource GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 @@ -533,6 +563,8 @@ tests: $.newint: 15 $.newstring: foobar $.newuuid: "00000000-0000-0000-0000-000000000000" + $.newdatetime: "2017-10-10T10:10:10+00:00" + - name: control new attributes of existing resource history GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast @@ -546,6 +578,7 @@ tests: $[0].newint: 15 $[0].newstring: foobar $[0].newuuid: "00000000-0000-0000-0000-000000000000" + $[0].newdatetime: "2017-10-10T10:10:10+00:00" $[1].id: d11edfca-4393-4fda-b94d-b05a3a1b3747 $[1].name: foo $[1].newstuff: null @@ -554,6 +587,7 @@ tests: $[1].newint: 15 $[1].newstring: foobar $[1].newuuid: "00000000-0000-0000-0000-000000000000" + $[1].newdatetime: "2017-10-10T10:10:10+00:00" # Invalid patch @@ -603,6 +637,9 @@ tests: bool: type: bool required: false + datetime: + type: datetime + required: False newstuff: type: string required: False @@ -629,6 +666,9 @@ tests: newuuid: type: uuid required: True + newdatetime: + type: datetime + required: True - name: delete/add the same resource attribute PATCH: /v1/resource_type/my_custom_resource diff --git a/releasenotes/notes/datetime-resource-attribute-type-1e627a686568f72a.yaml b/releasenotes/notes/datetime-resource-attribute-type-1e627a686568f72a.yaml new file mode 100644 index 00000000..8d9f3912 --- /dev/null +++ b/releasenotes/notes/datetime-resource-attribute-type-1e627a686568f72a.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + A new date type is available for resource type attribute: datetime. diff --git a/setup.cfg b/setup.cfg index ad8bd79e..d308df88 100644 --- a/setup.cfg +++ b/setup.cfg @@ -93,6 +93,7 @@ gnocchi.indexer.sqlalchemy.resource_type_attribute = uuid = gnocchi.indexer.sqlalchemy_extension:UUIDSchema number = gnocchi.indexer.sqlalchemy_extension:NumberSchema bool = gnocchi.indexer.sqlalchemy_extension:BoolSchema + datetime = gnocchi.indexer.sqlalchemy_extension:DatetimeSchema gnocchi.storage = swift = gnocchi.storage.swift:SwiftStorage -- GitLab From 3d0a8d44524377394caed0639a86274f9ddd175a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 18 Dec 2017 16:21:20 +0100 Subject: [PATCH 1161/1483] storage: make _get_measures batched This changes the signature of _get_measures to be batched. That will allow drivers than can retrieve several keys at the same time to do so. --- gnocchi/storage/__init__.py | 51 ++++++++++++---------- gnocchi/storage/ceph.py | 2 +- gnocchi/storage/file.py | 2 +- gnocchi/storage/redis.py | 4 +- gnocchi/storage/s3.py | 2 +- gnocchi/storage/swift.py | 2 +- gnocchi/tests/test_storage.py | 80 +++++++++++++++++------------------ 7 files changed, 74 insertions(+), 69 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 865d0638..829f18f0 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -125,8 +125,14 @@ class StorageDriver(object): def upgrade(): pass + def _get_measures(self, metric, keys, aggregation, version=3): + return utils.parallel_map( + self._get_measures_unbatched, + ((metric, key, aggregation, version) + for key in keys)) + @staticmethod - def _get_measures(metric, timestamp_key, aggregation, version=3): + def _get_measures_unbatched(metric, timestamp_key, aggregation, version=3): raise NotImplementedError @staticmethod @@ -219,16 +225,19 @@ class StorageDriver(object): return list(itertools.chain(*[ts.fetch(from_timestamp, to_timestamp) for ts in agg_timeseries])) - def _get_measures_and_unserialize(self, metric, key, aggregation): - data = self._get_measures(metric, key, aggregation) - try: - return carbonara.AggregatedTimeSerie.unserialize( - data, key, aggregation) - except carbonara.InvalidData: - LOG.error("Data corruption detected for %s " - "aggregated `%s' timeserie, granularity `%s' " - "around time `%s', ignoring.", - metric.id, aggregation, key.sampling, key) + def _get_measures_and_unserialize(self, metric, keys, aggregation): + raw_measures = self._get_measures(metric, keys, aggregation) + results = [] + for key, raw in six.moves.zip(keys, raw_measures): + try: + results.append(carbonara.AggregatedTimeSerie.unserialize( + raw, key, aggregation)) + except carbonara.InvalidData: + LOG.error("Data corruption detected for %s " + "aggregated `%s' timeserie, granularity `%s' " + "around time `%s', ignoring.", + metric.id, aggregation, key.sampling, key) + return results def _get_measures_timeserie(self, metric, aggregation, granularity, @@ -259,15 +268,12 @@ class StorageDriver(object): to_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( to_timestamp, granularity) - timeseries = list(filter( - lambda x: x is not None, - utils.parallel_map( - self._get_measures_and_unserialize, - ((metric, key, aggregation) - for key in sorted(all_keys) - if ((not from_timestamp or key >= from_timestamp) - and (not to_timestamp or key <= to_timestamp)))) - )) + keys = [key for key in sorted(all_keys) + if ((not from_timestamp or key >= from_timestamp) + and (not to_timestamp or key <= to_timestamp))] + + timeseries = self._get_measures_and_unserialize( + metric, keys, aggregation) return carbonara.AggregatedTimeSerie.from_timeseries( sampling=granularity, @@ -283,11 +289,12 @@ class StorageDriver(object): if write_full: try: existing = self._get_measures_and_unserialize( - metric, key, aggregation) + metric, [key], aggregation) except AggregationDoesNotExist: pass else: - if existing is not None: + if existing: + existing = existing[0] if split is not None: existing.merge(split) split = existing diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 7846dd34..e0fac7c5 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -137,7 +137,7 @@ class CephStorage(storage.StorageDriver): # It's possible that the object does not exists pass - def _get_measures(self, metric, key, aggregation, version=3): + def _get_measures_unbatched(self, metric, key, aggregation, version=3): try: name = self._get_object_name(metric, key, aggregation, version) return self._get_object_content(name) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 5d00da8b..7f217e11 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -141,7 +141,7 @@ class FileStorage(storage.StorageDriver): # measures) raise - def _get_measures(self, metric, key, aggregation, version=3): + def _get_measures_unbatched(self, metric, key, aggregation, version=3): path = self._build_metric_path_for_split( metric, aggregation, key, version) try: diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 27554236..bbcc445a 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -90,9 +90,7 @@ class RedisStorage(storage.StorageDriver): def _delete_metric(self, metric): self._client.delete(self._metric_key(metric)) - # Carbonara API - - def _get_measures(self, metric, key, aggregation, version=3): + def _get_measures_unbatched(self, metric, key, aggregation, version=3): redis_key = self._metric_key(metric) field = self._aggregated_field_for_split(aggregation, key, version) data = self._client.hget(redis_key, field) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 98b6b3dd..3e8c9d1b 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -158,7 +158,7 @@ class S3Storage(storage.StorageDriver): s3.bulk_delete(self.s3, bucket, [c['Key'] for c in response.get('Contents', ())]) - def _get_measures(self, metric, key, aggregation, version=3): + def _get_measures_unbatched(self, metric, key, aggregation, version=3): try: response = self.s3.get_object( Bucket=self._bucket_name, diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index b17e24fb..e13aa0de 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -142,7 +142,7 @@ class SwiftStorage(storage.StorageDriver): # Deleted in the meantime? Whatever. raise - def _get_measures(self, metric, key, aggregation, version=3): + def _get_measures_unbatched(self, metric, key, aggregation, version=3): try: headers, contents = self.swift.get_object( self._container_name(metric), self._object_name( diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 6eab39ee..6d29e564 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -328,22 +328,22 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull = self.assertFalse data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451520000, 's'), numpy.timedelta64(1, 'm'), - ), "mean") + )], "mean")[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451736000, 's'), numpy.timedelta64(60, 's'), - ), "mean") + )], "mean")[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(60, 's'), - ), "mean") + )], "mean")[0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -377,29 +377,29 @@ class TestStorageDriver(tests_base.TestCase): }, self.storage._list_split_keys_for_metric( self.metric, "mean", numpy.timedelta64(1, 'm'))) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451520000, 's'), numpy.timedelta64(60, 's'), - ), "mean") + )], "mean")[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451736000, 's'), numpy.timedelta64(60, 's'), - ), "mean") + )], "mean")[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(1, 'm'), - ), "mean") + )], "mean")[0] # Now this one is compressed because it has been rewritten! self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1452384000, 's'), numpy.timedelta64(60, 's'), - ), "mean") + )], "mean")[0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -449,22 +449,22 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull = self.assertFalse data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451520000, 's'), numpy.timedelta64(1, 'm'), - ), "mean") + )], "mean")[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451736000, 's'), numpy.timedelta64(1, 'm'), - ), "mean") + )], "mean")[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(1, 'm') - ), "mean") + )], "mean")[0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -500,29 +500,29 @@ class TestStorageDriver(tests_base.TestCase): }, self.storage._list_split_keys_for_metric( self.metric, "mean", numpy.timedelta64(1, 'm'))) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451520000, 's'), numpy.timedelta64(1, 'm'), - ), "mean") + )], "mean")[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451736000, 's'), numpy.timedelta64(1, 'm'), - ), "mean") + )], "mean")[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(60, 's') - ), "mean") + )], "mean")[0] # Now this one is compressed because it has been rewritten! self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1452384000, 's'), numpy.timedelta64(1, 'm'), - ), "mean") + )], "mean")[0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -571,22 +571,22 @@ class TestStorageDriver(tests_base.TestCase): data = self.storage._get_measures( self.metric, - carbonara.SplitKey( + [carbonara.SplitKey( numpy.datetime64(1451520000, 's'), numpy.timedelta64(1, 'm'), - ), "mean") + )], "mean")[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451736000, 's'), numpy.timedelta64(1, 'm') - ), "mean") + )], "mean")[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(1, 'm'), - ), "mean") + )], "mean")[0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -655,22 +655,22 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull = self.assertFalse data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451520000, 's'), numpy.timedelta64(60, 's'), - ), "mean") + )], "mean")[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451736000, 's'), numpy.timedelta64(1, 'm'), - ), "mean") + )], "mean")[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, carbonara.SplitKey( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(1, 'm'), - ), "mean") + )], "mean")[0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) -- GitLab From 28b2e85180eddfce80932886bfba7becb4cdee1a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 18 Dec 2017 16:27:04 +0100 Subject: [PATCH 1162/1483] redis: implement batched _get_measures This allows to retrieve several keys at the same time. --- gnocchi/storage/redis.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index bbcc445a..140bf7a4 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -14,6 +14,8 @@ # License for the specific language governing permissions and limitations # under the License. +import six + from gnocchi.common import redis from gnocchi import storage from gnocchi import utils @@ -90,13 +92,17 @@ class RedisStorage(storage.StorageDriver): def _delete_metric(self, metric): self._client.delete(self._metric_key(metric)) - def _get_measures_unbatched(self, metric, key, aggregation, version=3): + def _get_measures(self, metric, keys, aggregation, version=3): redis_key = self._metric_key(metric) - field = self._aggregated_field_for_split(aggregation, key, version) - data = self._client.hget(redis_key, field) - if data is None: - if not self._client.exists(redis_key): - raise storage.MetricDoesNotExist(metric) - raise storage.AggregationDoesNotExist( - metric, aggregation, key.sampling) - return data + fields = [ + self._aggregated_field_for_split(aggregation, key, version) + for key in keys + ] + results = self._client.hmget(redis_key, fields) + for key, data in six.moves.zip(keys, results): + if data is None: + if not self._client.exists(redis_key): + raise storage.MetricDoesNotExist(metric) + raise storage.AggregationDoesNotExist( + metric, aggregation, key.sampling) + return results -- GitLab From df37581081e739615c39090f6f66cb753a6bd2c6 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 19 Dec 2017 14:01:24 +0000 Subject: [PATCH 1163/1483] speed up min/max aggregates - sort values and set the appropriate index which effectively makes each index take the last value (min/max depending on sort order) - in theory, allows us to cache order so we can reuse computation if both min and max aggregates defined. --- gnocchi/carbonara.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 705175d4..090335b4 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -134,10 +134,18 @@ class GroupedTimeSeries(object): weights=self._ts['values'])) def min(self): - return self._scipy_aggregate(ndimage.minimum) + ordered = self._ts['values'].argsort() + uniq_inv = numpy.repeat(numpy.arange(self.counts.size), self.counts) + values = numpy.zeros(self.tstamps.size) + values[uniq_inv[ordered][::-1]] = self._ts['values'][ordered][::-1] + return make_timeseries(self.tstamps, values) def max(self): - return self._scipy_aggregate(ndimage.maximum) + ordered = self._ts['values'].argsort() + uniq_inv = numpy.repeat(numpy.arange(self.counts.size), self.counts) + values = numpy.zeros(self.tstamps.size) + values[uniq_inv[ordered]] = self._ts['values'][ordered] + return make_timeseries(self.tstamps, values) def median(self): return self._scipy_aggregate(ndimage.median) -- GitLab From 05b000babec14e9afdafd105f21b39b628e39f6f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 22 Dec 2017 10:17:17 +0100 Subject: [PATCH 1164/1483] tests: remove useless response_headers field that breaks with Gabbi 1.39.0 --- gnocchi/tests/functional/gabbits/resource.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index 132fc09f..f8606ef9 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -500,7 +500,6 @@ tests: data: electron.spin: archive_policy_name: medium - response_headers: response_json_paths: $[/name][1].name: electron.spin $[/name][1].resource_id: 85c44741-cc60-4033-804e-2d3098c7d2e9 -- GitLab From bf0504bbfb0ead5e709d610d52a1f7144911f57d Mon Sep 17 00:00:00 2001 From: Nobuhiro MIKI Date: Wed, 13 Dec 2017 00:12:11 +0900 Subject: [PATCH 1165/1483] rest: narrow down the response attributes --- doc/source/rest.j2 | 16 ++++++ doc/source/rest.yaml | 10 ++++ gnocchi/indexer/sqlalchemy_base.py | 7 ++- gnocchi/rest/api.py | 24 +++++++- .../tests/functional/gabbits/resource.yaml | 50 +++++++++++++++++ gnocchi/tests/functional/gabbits/search.yaml | 56 +++++++++++++++++++ 6 files changed, 159 insertions(+), 4 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index df3c8c2b..4a5326a7 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -452,6 +452,14 @@ query parameter: {{ scenarios['list-resource-generic-details']['doc'] }} +Limit attributes +~~~~~~~~~~~~~~~~ + +To limit response attributes, use `attrs=id&attrs=started_at&attrs=user_id` in the query +parameter: + +{{ scenarios['list-resource-generic-limit-attrs']['doc'] }} + Pagination ~~~~~~~~~~ @@ -634,6 +642,14 @@ Details about the |resource| can also be retrieved at the same time: {{ scenarios['search-resource-for-user-details']['doc'] }} +Limit attributes +~~~~~~~~~~~~~~~~ + +To limit response attributes, use `attrs=id&attrs=started_at&attrs=user_id` in the query +parameter: + +{{ scenarios['search-resource-for-user-limit-attrs']['doc'] }} + History ~~~~~~~ diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index a0236f10..e95a3623 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -367,6 +367,9 @@ - name: list-resource-generic-details request: GET /v1/resource/generic?details=true HTTP/1.1 +- name: list-resource-generic-limit-attrs + request: GET /v1/resource/generic?attrs=id&attrs=started_at&attrs=user_id HTTP/1.1 + - name: list-resource-generic-pagination request: GET /v1/resource/generic?limit=2&sort=id:asc HTTP/1.1 @@ -391,6 +394,13 @@ {"=": {"user_id": "{{ scenarios['create-resource-instance']['response'].json['user_id'] }}"}} +- name: search-resource-for-user-limit-attrs + request: | + POST /v1/search/resource/generic?attrs=id&attrs=started_at&attrs=user_id HTTP/1.1 + Content-Type: application/json + + {"=": {"user_id": "{{ scenarios['create-resource-instance']['response'].json['user_id'] }}"}} + - name: search-resource-for-user-after-timestamp request: | POST /v1/search/resource/instance HTTP/1.1 diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index 1a3590e3..7def0cf4 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -198,7 +198,7 @@ class ResourceType(Base, GnocchiBase, resource_type.ResourceType): class ResourceJsonifier(indexer.Resource): - def jsonify(self): + def jsonify(self, attrs=None): d = dict(self) del d['revision'] if 'metrics' not in sqlalchemy.inspect(self).unloaded: @@ -212,7 +212,10 @@ class ResourceJsonifier(indexer.Resource): self.creator.partition(":") ) - return d + if attrs: + return {key: val for key, val in d.items() if key in attrs} + else: + return d class ResourceMixin(ResourceJsonifier): diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index e0945f56..2d331466 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -188,6 +188,20 @@ def get_header_option(name, params): options.get(name, params.get(name, 'false'))) +def get_header_option_array(name, params): + type, options = werkzeug.http.parse_options_header( + pecan.request.headers.get('Accept')) + header_option = options.get(name, None) + post_option = params.get(name, None) + + if post_option: + return arg_to_list(post_option) + elif header_option: + return header_option.split('+') + else: + return None + + def get_history(params): return get_header_option('history', params) @@ -196,6 +210,10 @@ def get_details(params): return get_header_option('details', params) +def get_json_attrs(params): + return get_header_option_array('attrs', params) + + def strtobool(varname, v): """Convert a string to a boolean.""" try: @@ -1130,6 +1148,7 @@ class ResourcesController(rest.RestController): history = get_history(kwargs) pagination_opts = get_pagination_options( kwargs, RESOURCE_DEFAULT_PAGINATION) + json_attrs = get_json_attrs(kwargs) policy_filter = pecan.request.auth_helper.get_resource_policy_filter( pecan.request, "list resource", self._resource_type) @@ -1150,7 +1169,7 @@ class ResourcesController(rest.RestController): else: marker = str(resources[-1].id) set_resp_link_hdr(marker, kwargs, pagination_opts) - return resources + return [r.jsonify(json_attrs) for r in resources] except indexer.IndexerException as e: abort(400, six.text_type(e)) @@ -1391,8 +1410,9 @@ class SearchResourceTypeController(rest.RestController): @pecan.expose('json') def post(self, **kwargs): + json_attrs = get_json_attrs(kwargs) try: - return self._search(**kwargs) + return [r.jsonify(json_attrs) for r in self._search(**kwargs)] except indexer.IndexerException as e: abort(400, six.text_type(e)) diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index f8606ef9..20fc4b48 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -361,6 +361,56 @@ tests: $[0].user_id: 0fbb231484614b1a80131fc22f6afc9c $[-1].user_id: foobar + - name: list generic resources with attrs param + GET: /v1/resource/generic?attrs=id&attrs=started_at&attrs=user_id + response_json_paths: + $[0].`len`: 3 + $[0].id: $RESPONSE['$[0].id'] + $[0].started_at: $RESPONSE['$[0].started_at'] + $[0].user_id: $RESPONSE['$[0].user_id'] + $[1].`len`: 3 + + - name: list generic resources with invalid attrs param + GET: /v1/resource/generic?attrs=id&attrs=foo&attrs=bar + response_json_paths: + $[0].`len`: 1 + $[0].id: $RESPONSE['$[0].id'] + $[1].`len`: 1 + + - name: list generic resources without attrs param + GET: /v1/resource/generic + response_json_paths: + $[0].`len`: 13 + $[1].`len`: 13 + + - name: list generic resources with attrs header + GET: /v1/resource/generic + request_headers: + Accept: "application/json; attrs=id+started_at+user_id" + response_json_paths: + $[0].`len`: 3 + $[0].id: $RESPONSE['$[0].id'] + $[0].started_at: $RESPONSE['$[0].started_at'] + $[0].user_id: $RESPONSE['$[0].user_id'] + $[1].`len`: 3 + + - name: list generic resources with invalid attrs header + GET: /v1/resource/generic + request_headers: + Accept: "application/json; attrs=id+foo+bar" + response_json_paths: + $[0].`len`: 1 + $[0].id: $RESPONSE['$[0].id'] + $[1].`len`: 1 + + - name: list generic resources without attrs header + GET: /v1/resource/generic + request_headers: + Accept: "application/json" + response_json_paths: + $[0].`len`: 13 + $[1].`len`: 13 + - name: list all resources GET: /v1/resource/generic response_strings: diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml index 1e7e1bdb..9508411d 100644 --- a/gnocchi/tests/functional/gabbits/search.yaml +++ b/gnocchi/tests/functional/gabbits/search.yaml @@ -201,3 +201,59 @@ tests: project_id: foobar response_json_paths: $.`len`: 3 + + - name: search all resource with attrs param + POST: /v1/search/resource/generic?attrs=id&attrs=started_at&attrs=user_id + data: {} + response_json_paths: + $[0].`len`: 3 + $[0].id: $RESPONSE['$[0].id'] + $[0].started_at: $RESPONSE['$[0].started_at'] + $[0].user_id: $RESPONSE['$[0].user_id'] + $[1].`len`: 3 + + - name: search all resource with invalid attrs param + POST: /v1/search/resource/generic?attrs=id&attrs=foo&attrs=bar + data: {} + response_json_paths: + $[0].`len`: 1 + $[0].id: $RESPONSE['$[0].id'] + $[1].`len`: 1 + + - name: search all resource without attrs param + POST: /v1/search/resource/generic + data: {} + response_json_paths: + $[0].`len`: 13 + $[1].`len`: 13 + + - name: search all resource with attrs header + POST: /v1/search/resource/generic + data: {} + request_headers: + Accept: "application/json; attrs=id+started_at+user_id" + response_json_paths: + $[0].`len`: 3 + $[0].id: $RESPONSE['$[0].id'] + $[0].started_at: $RESPONSE['$[0].started_at'] + $[0].user_id: $RESPONSE['$[0].user_id'] + $[1].`len`: 3 + + - name: search all resource with invalid attrs header + POST: /v1/search/resource/generic + data: {} + request_headers: + Accept: "application/json; attrs=id+foo+bar" + response_json_paths: + $[0].`len`: 1 + $[0].id: $RESPONSE['$[0].id'] + $[1].`len`: 1 + + - name: search all resource without attrs header + POST: /v1/search/resource/generic + data: {} + request_headers: + Accept: "application/json" + response_json_paths: + $[0].`len`: 13 + $[1].`len`: 13 -- GitLab From 2dbb4bcb69ceb23e1d79baed28699da6b596e9f9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 15 Dec 2017 10:46:06 +0100 Subject: [PATCH 1166/1483] Retry and log on drivers initialization failure This retries and logs all failure to connect to any of the driver (index, storage, incoming and coordinator). The wait time is not changed (exponential up to 1 minute), but now both the API and metricd uses it this mechanism. The error is also logged, so that any failure that is not connection related (e.g. Python module missing) is properly logged and the operator knows what he going wrong: 2017-12-15 10:37:49,075 [9070] ERROR gnocchi.utils: Unable to initialize coordination driver Traceback (most recent call last): File "/Users/jd/Source/tenacity/tenacity/__init__.py", line 298, in call result = fn(*args, **kwargs) File "./gnocchi/cli/metricd.py", line 44, in get_coordinator_and_start coord.start(start_heart=True) File "/Users/jd/Source/tooz/tooz/coordination.py", line 423, in start self._start() File "/Users/jd/Source/tooz/tooz/drivers/mysql.py", line 139, in _start self._options) File "/Users/jd/Source/tooz/tooz/drivers/mysql.py", line 198, in get_connection cause=e) File "/Users/jd/Source/tooz/tooz/utils.py", line 225, in raise_with_cause excutils.raise_with_cause(exc_cls, message, *args, **kwargs) File "/usr/local/opt/python/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/oslo_utils/excutils.py", line 143, in raise_with_cause six.raise_from(exc_cls(message, *args, **kwargs), kwargs.get('cause')) File "/usr/local/opt/python/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/six.py", line 737, in raise_from raise value ToozConnectionError: (2003, "Can't connect to MySQL server on 'localhost' ([Errno 61] Connection refused)") Fixes #585 --- gnocchi/cli/metricd.py | 24 ++++++++---------------- gnocchi/incoming/__init__.py | 1 + gnocchi/indexer/__init__.py | 2 ++ gnocchi/storage/__init__.py | 1 + gnocchi/utils.py | 22 ++++++++++++++++++++++ 5 files changed, 34 insertions(+), 16 deletions(-) diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index 0ad70a2a..70821888 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -38,13 +38,7 @@ from gnocchi import utils LOG = daiquiri.getLogger(__name__) -# Retry with exponential backoff for up to 1 minute -_wait_exponential = tenacity.wait_exponential(multiplier=0.5, max=60) - - -retry_on_exception = tenacity.Retrying(wait=_wait_exponential) - - +@utils.retry_on_exception_and_log("Unable to initialize coordination driver") def get_coordinator_and_start(url): coord = coordination.get_coordinator(url, str(uuid.uuid4()).encode()) coord.start(start_heart=True) @@ -65,12 +59,10 @@ class MetricProcessBase(cotyledon.Service): self._wake_up.set() def _configure(self): - self.coord = retry_on_exception(get_coordinator_and_start, - self.conf.coordination_url) - self.store = retry_on_exception( - storage.get_driver, self.conf, self.coord) - self.incoming = retry_on_exception(incoming.get_driver, self.conf) - self.index = retry_on_exception(indexer.get_driver, self.conf) + self.coord = get_coordinator_and_start(self.conf.coordination_url) + self.store = storage.get_driver(self.conf, self.coord) + self.incoming = incoming.get_driver(self.conf) + self.index = indexer.get_driver(self.conf) def run(self): self._configure() @@ -112,7 +104,7 @@ class MetricReporting(MetricProcessBase): worker_id, conf, conf.metricd.metric_reporting_delay) def _configure(self): - self.incoming = retry_on_exception(incoming.get_driver, self.conf) + self.incoming = incoming.get_driver(self.conf) @staticmethod def close_services(): @@ -150,7 +142,7 @@ class MetricProcessor(MetricProcessBase): )(self._get_sacks_to_process) @tenacity.retry( - wait=_wait_exponential, + wait=utils.wait_exponential, # Never retry except when explicitly asked by raising TryAgain retry=tenacity.retry_never) def _configure(self): @@ -177,7 +169,7 @@ class MetricProcessor(MetricProcessBase): filler.daemon = True filler.start() - @retry_on_exception.wraps + @utils.retry_on_exception.wraps def _fill_sacks_to_process(self): try: for sack in self.incoming.iter_on_sacks_to_process(): diff --git a/gnocchi/incoming/__init__.py b/gnocchi/incoming/__init__.py index 4624b59c..bc59f3ad 100644 --- a/gnocchi/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -183,6 +183,7 @@ class IncomingDriver(object): pass +@utils.retry_on_exception_and_log("Unable to initialize incoming driver") def get_driver(conf): """Return configured incoming driver only diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 92356815..5baf8536 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -24,6 +24,7 @@ from six.moves.urllib import parse from stevedore import driver from gnocchi import exceptions +from gnocchi import utils OPTS = [ cfg.StrOpt('url', @@ -98,6 +99,7 @@ class Metric(object): __hash__ = object.__hash__ +@utils.retry_on_exception_and_log("Unable to initialize indexer driver") def get_driver(conf): """Return the configured driver.""" split = parse.urlsplit(conf.indexer.url) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 829f18f0..c2568651 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -110,6 +110,7 @@ class SackLockTimeoutError(StorageError): pass +@utils.retry_on_exception_and_log("Unable to initialize storage driver") def get_driver(conf, coord): """Return the configured driver.""" return utils.get_driver_class('gnocchi.storage', conf.storage)( diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 1576ae7b..0510d18c 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -30,6 +30,7 @@ import numpy import pytimeparse import six from stevedore import driver +import tenacity LOG = daiquiri.getLogger(__name__) @@ -312,3 +313,24 @@ def parallel_map(fn, list_of_args): parallel_map.MAX_WORKERS = get_default_workers() + +# Retry with exponential backoff for up to 1 minute +wait_exponential = tenacity.wait_exponential(multiplier=0.5, max=60) + +retry_on_exception = tenacity.Retrying(wait=wait_exponential) + + +class _retry_on_exception_and_log(tenacity.retry_if_exception_type): + def __init__(self, msg): + super(_retry_on_exception_and_log, self).__init__() + self.msg = msg + + def __call__(self, attempt): + if attempt.failed: + LOG.error(self.msg, exc_info=attempt.exception()) + return super(_retry_on_exception_and_log, self).__call__(attempt) + + +def retry_on_exception_and_log(msg): + return tenacity.Retrying( + wait=wait_exponential, retry=_retry_on_exception_and_log(msg)).wraps -- GitLab From cb05082e6e3b801107bb37d59492b8b3238d9f69 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 7 Dec 2017 00:52:42 +0100 Subject: [PATCH 1167/1483] rest: fail properly if granularity is not a timespan The current code will fail with a 500 error. (cherry picked from commit 63ea0aaa3a6b1c277f132afad157af5547b78b8b) --- gnocchi/rest/__init__.py | 11 +++++++++-- gnocchi/tests/functional/gabbits/metric.yaml | 10 ++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index ada780c0..4898b975 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -430,6 +430,14 @@ class MetricController(rest.RestController): except Exception: abort(400, "Invalid value for stop") + if granularity is not None: + try: + granularity = Timespan(granularity) + except ValueError: + abort(400, {"cause": "Attribute value error", + "detail": "granularity", + "reason": "Invalid granularity"}) + if resample: if not granularity: abort(400, 'A granularity must be specified to resample') @@ -454,8 +462,7 @@ class MetricController(rest.RestController): else: measures = pecan.request.storage.get_measures( self.metric, start, stop, aggregation, - Timespan(granularity) if granularity is not None else None, - resample) + granularity, resample) # Replace timestamp keys by their string versions return [(timestamp.isoformat(), offset, v) for timestamp, offset, v in measures] diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index 987f9a51..98d5ddae 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -194,6 +194,16 @@ tests: - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - name: get measurements from metric invalid granularity + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?granularity=foobar + request_headers: + accept: application/json + status: 400 + response_json_paths: + $.description.cause: Attribute value error + $.description.reason: Invalid granularity + $.description.detail: granularity + - name: push measurements to metric again POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures data: -- GitLab From 620efe50ee44a3a8b0af98588e538cf2cab11e6b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 22 Dec 2017 10:17:17 +0100 Subject: [PATCH 1168/1483] tests: remove useless response_headers field that breaks with Gabbi 1.39.0 (cherry picked from commit 05b000babec14e9afdafd105f21b39b628e39f6f) --- gnocchi/tests/functional/gabbits/resource.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index da423767..9cf5c35d 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -483,7 +483,6 @@ tests: data: electron.spin: archive_policy_name: medium - response_headers: response_json_paths: $[/name][1].name: electron.spin $[/name][1].resource_id: 85c44741-cc60-4033-804e-2d3098c7d2e9 -- GitLab From d94945f0b871972428e67767f1f28ebec7aa7110 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 2 Jan 2018 21:39:51 +0000 Subject: [PATCH 1169/1483] speed up median aggregate - find the position of max value of each range - calculate distance to middle based on range counts - get floor/ceiling of middle (same if odd count), add, and divide. - ~1.9x faster in random case --- gnocchi/carbonara.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 090335b4..ff6f1969 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -148,7 +148,18 @@ class GroupedTimeSeries(object): return make_timeseries(self.tstamps, values) def median(self): - return self._scipy_aggregate(ndimage.median) + ordered = self._ts['values'].argsort() + uniq_inv = numpy.repeat(numpy.arange(self.counts.size), self.counts) + max_pos = numpy.zeros(self.tstamps.size, dtype=numpy.int) + max_pos[uniq_inv[ordered]] = numpy.arange(self._ts.size) + # TODO(gordc): can use np.divmod when centos supports numpy 1.13 + mid_diff = numpy.floor_divide(self.counts, 2) + odd = numpy.mod(self.counts, 2) + mid_floor = max_pos - mid_diff + mid_ceil = mid_floor + (odd + 1) % 2 + return make_timeseries( + self.tstamps, (self._ts['values'][ordered][mid_floor] + + self._ts['values'][ordered][mid_ceil]) / 2.0) def std(self): mean_ts = self.mean() -- GitLab From 494e9211c8c734d2f9ca83c62f571d099a7832f6 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 21 Dec 2017 11:58:38 +0100 Subject: [PATCH 1170/1483] api: remove custom type for validation and leverage voluptuous --- gnocchi/rest/api.py | 29 ++++++++----------- gnocchi/tests/functional/gabbits/archive.yaml | 2 +- requirements.txt | 2 +- 3 files changed, 14 insertions(+), 19 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 2d331466..e7d59709 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -160,20 +160,6 @@ def deserialize_and_validate(schema, required=True, required) -def PositiveOrNullInt(value): - value = int(value) - if value < 0: - raise ValueError("Value must be positive") - return value - - -def PositiveNotNullInt(value): - value = int(value) - if value <= 0: - raise ValueError("Value must be positive and not null") - return value - - def Timespan(value): try: return utils.to_timespan(value) @@ -276,7 +262,10 @@ class ArchivePolicyController(rest.RestController): voluptuous.Required("definition"): voluptuous.All([{ "granularity": Timespan, - "points": PositiveNotNullInt, + "points": voluptuous.All( + voluptuous.Coerce(int), + voluptuous.Range(min=1), + ), "timespan": Timespan}], voluptuous.Length(min=1)), })) # Validate the data @@ -321,7 +310,10 @@ class ArchivePoliciesController(rest.RestController): ) ArchivePolicySchema = voluptuous.Schema({ voluptuous.Required("name"): six.text_type, - voluptuous.Required("back_window", default=0): PositiveOrNullInt, + voluptuous.Required("back_window", default=0): voluptuous.All( + voluptuous.Coerce(int), + voluptuous.Range(min=0), + ), voluptuous.Required( "aggregation_methods", default=set(conf.archive_policy.default_aggregation_methods)): @@ -329,7 +321,10 @@ class ArchivePoliciesController(rest.RestController): voluptuous.Required("definition"): voluptuous.All([{ "granularity": Timespan, - "points": PositiveNotNullInt, + "points": voluptuous.All( + voluptuous.Coerce(int), + voluptuous.Range(min=1), + ), "timespan": Timespan, }], voluptuous.Length(min=1)), }) diff --git a/gnocchi/tests/functional/gabbits/archive.yaml b/gnocchi/tests/functional/gabbits/archive.yaml index 28cfefc2..c81ed03e 100644 --- a/gnocchi/tests/functional/gabbits/archive.yaml +++ b/gnocchi/tests/functional/gabbits/archive.yaml @@ -455,7 +455,7 @@ tests: points: 0 status: 400 response_strings: - - "Invalid input: not a valid value for dictionary value" + - "Invalid input: value must be at least 1 for dictionary value " - name: create identical granularities policy POST: /v1/archive_policy diff --git a/requirements.txt b/requirements.txt index a31ccc30..921ac44f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ cotyledon>=1.5.0 six stevedore ujson -voluptuous +voluptuous>=0.6 werkzeug trollius; python_version < '3.4' tenacity>=4.6.0 -- GitLab From 9d5d537354ab4f2a8782c59a32fa598ef05e4bf6 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 21 Dec 2017 21:50:11 +0100 Subject: [PATCH 1171/1483] api: share ArchivePolicyDefinitionSchema definition --- gnocchi/rest/api.py | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index e7d59709..56eff823 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -237,6 +237,18 @@ def get_pagination_options(params, default): return opts +ArchivePolicyDefinitionSchema = voluptuous.Schema( + voluptuous.All([{ + "granularity": Timespan, + "points": voluptuous.All( + voluptuous.Coerce(int), + voluptuous.Range(min=1), + ), + "timespan": Timespan, + }], voluptuous.Length(min=1)), +) + + class ArchivePolicyController(rest.RestController): def __init__(self, archive_policy): self.archive_policy = archive_policy @@ -259,15 +271,8 @@ class ArchivePolicyController(rest.RestController): enforce("update archive policy", ap) body = deserialize_and_validate(voluptuous.Schema({ - voluptuous.Required("definition"): - voluptuous.All([{ - "granularity": Timespan, - "points": voluptuous.All( - voluptuous.Coerce(int), - voluptuous.Range(min=1), - ), - "timespan": Timespan}], voluptuous.Length(min=1)), - })) + voluptuous.Required("definition"): ArchivePolicyDefinitionSchema, + })) # Validate the data try: ap_items = [archive_policy.ArchivePolicyItem(**item) for item in @@ -318,16 +323,8 @@ class ArchivePoliciesController(rest.RestController): "aggregation_methods", default=set(conf.archive_policy.default_aggregation_methods)): voluptuous.All(list(valid_agg_methods), voluptuous.Coerce(set)), - voluptuous.Required("definition"): - voluptuous.All([{ - "granularity": Timespan, - "points": voluptuous.All( - voluptuous.Coerce(int), - voluptuous.Range(min=1), - ), - "timespan": Timespan, - }], voluptuous.Length(min=1)), - }) + voluptuous.Required("definition"): ArchivePolicyDefinitionSchema, + }) body = deserialize_and_validate(ArchivePolicySchema) # Validate the data -- GitLab From 27b1e610dd20577d2bbbe63d7424bbe5b3021914 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 3 Jan 2018 14:15:09 +0100 Subject: [PATCH 1172/1483] doc: reorder Dynamic Aggregate chapters This change moves the list of API calls of Dynamic Aggregate before the long list of available operations. --- doc/source/rest.j2 | 108 +++++++++++++++++++++++---------------------- 1 file changed, 55 insertions(+), 53 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 4a5326a7..fee5c23e 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -707,6 +707,61 @@ to apply to the |metrics|. `operations` can also be passed as a string, for example: `"operations": "(aggregate mean (metric (metric-id aggregation) (metric-id aggregation))"` +Cross-metric Usage +------------------ + +Aggregation across multiple |metrics| have different behavior depending +on whether boundary values are set (`start` and `stop`) and if `needed_overlap` +is set. + +Overlap percentage +~~~~~~~~~~~~~~~~~~ + +Gnocchi expects that time series have a certain percentage of timestamps in +common. This percent is controlled by the `needed_overlap` needed_overlap, +which by default expects 100% overlap. If this percentage is not reached, an +error is returned. + +.. note:: + + If `start` or `stop` boundary is not set, Gnocchi will set the missing + boundary to the first or last timestamp common across all series. + +Backfill +~~~~~~~~ + +The ability to fill in missing points from a subset of time series is supported +by specifying a `fill` value. Valid fill values include any float, `dropna` or +`null`. In the case of `null`, Gnocchi will compute the aggregation using only +the existing points. `dropna` is like `null` but remove NaN from the result. +The `fill` parameter will not backfill timestamps which contain no points in +any of the time series. Only timestamps which have datapoints in at least one +of the time series is returned. + +{{ scenarios['get-aggregates-by-metric-ids-fill']['doc'] }} + + +Search and aggregate +-------------------- + +It's also possible to do that aggregation on |metrics| linked to |resources|. +In order to select these |resources|, the following endpoint accepts a query +such as the one described in the :ref:`resource search API `. + +{{ scenarios['get-aggregates-by-attributes-lookup']['doc'] }} + +And metric name can be `wildcard` too. + +{{ scenarios['get-aggregates-by-attributes-lookup-wildcard']['doc'] }} + +Groupby +~~~~~~~ + +It is possible to group the |resource| search results by any attribute of the +requested |resource| type, and then compute the aggregation: + +{{ scenarios['get-aggregates-by-attributes-lookup-groupby']['doc'] }} + List of supported ------------------------------ @@ -799,60 +854,7 @@ Function operations (floor ()) (ceil ()) -Cross-metric Usage ------------------- - -Aggregation across multiple |metrics| have different behavior depending -on whether boundary values are set (`start` and `stop`) and if `needed_overlap` -is set. - -Overlap percentage -~~~~~~~~~~~~~~~~~~ - -Gnocchi expects that time series have a certain percentage of timestamps in -common. This percent is controlled by the `needed_overlap` needed_overlap, -which by default expects 100% overlap. If this percentage is not reached, an -error is returned. - -.. note:: - - If `start` or `stop` boundary is not set, Gnocchi will set the missing - boundary to the first or last timestamp common across all series. - -Backfill -~~~~~~~~ - -The ability to fill in missing points from a subset of time series is supported -by specifying a `fill` value. Valid fill values include any float, `dropna` or -`null`. In the case of `null`, Gnocchi will compute the aggregation using only -the existing points. `dropna` is like `null` but remove NaN from the result. -The `fill` parameter will not backfill timestamps which contain no points in -any of the time series. Only timestamps which have datapoints in at least one -of the time series is returned. - -{{ scenarios['get-aggregates-by-metric-ids-fill']['doc'] }} - - -Search and aggregate --------------------- - -It's also possible to do that aggregation on |metrics| linked to |resources|. -In order to select these |resources|, the following endpoint accepts a query -such as the one described in the :ref:`resource search API `. -{{ scenarios['get-aggregates-by-attributes-lookup']['doc'] }} - -And metric name can be `wildcard` too. - -{{ scenarios['get-aggregates-by-attributes-lookup-wildcard']['doc'] }} - -Groupby -~~~~~~~ - -It is possible to group the |resource| search results by any attribute of the -requested |resource| type, and then compute the aggregation: - -{{ scenarios['get-aggregates-by-attributes-lookup-groupby']['doc'] }} Examples -------- -- GitLab From 4211952e33750e7ff97f8cbb11091b7769311351 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 18 Dec 2017 19:01:29 +0100 Subject: [PATCH 1173/1483] storage: truncate aggregated time series before storing The current truncation code in AggregatedTimeSerie truncate on object initialize only. That means that split up to POINTS_PER_SPLIT points are stored, and that data is read and _truncated_ at this point. When new points are added (existing merge), it's possible that the number of points grows more than the number of points defined by the archive policy, storing too many points. That's hidden because when re-read, the AggregatedTimeSerie is truncated again. However, that makes the storage driver store too much data. This patches remove the max_size attribute from carbonara.AggregatedTimeSerie and simply expose a truncate() method that can be used to truncate each split based on the oldest point that is defined by the archive policy. --- doc/source/operating.rst | 6 +-- gnocchi/carbonara.py | 52 +++++++++---------- gnocchi/storage/__init__.py | 38 ++++++++++---- gnocchi/tests/test_aggregates.py | 13 +++-- gnocchi/tests/test_carbonara.py | 32 ++++++++---- gnocchi/tests/test_storage.py | 1 - ...ra-truncate-timespan-3694b96449709083.yaml | 9 ++++ run-upgrade-tests.sh | 10 +++- 8 files changed, 102 insertions(+), 59 deletions(-) create mode 100644 releasenotes/notes/carbonara-truncate-timespan-3694b96449709083.yaml diff --git a/doc/source/operating.rst b/doc/source/operating.rst index 50d3e0d5..31b5c905 100644 --- a/doc/source/operating.rst +++ b/doc/source/operating.rst @@ -85,9 +85,9 @@ points over a |timespan|. If your |archive policy| defines a policy of 10 points with a |granularity| of 1 second, the |time series| archive will keep up to 10 seconds, each representing an aggregation over 1 second. This means the |time series| will at -maximum retain 10 seconds of data (sometimes a bit more) between the more -recent point and the oldest point. That does not mean it will be 10 consecutive -seconds: there might be a gap if data is fed irregularly. +maximum retain 10 seconds of data between the more recent point and the oldest +point. That does not mean it will be 10 consecutive seconds: there might be a +gap if data is fed irregularly. **There is no expiry of data relative to the current timestamp. Data is only expired according to timespan.** diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index ff6f1969..b803fe77 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -542,8 +542,7 @@ class AggregatedTimeSerie(TimeSerie): COMPRESSED_SERIAL_LEN = struct.calcsize("" % ( + return "<%s 0x%x sampling=%s agg_method=%s>" % ( self.__class__.__name__, id(self), self.sampling, - self.max_size, self.aggregation_method, ) @@ -753,12 +755,6 @@ class AggregatedTimeSerie(TimeSerie): offset = int((first - start.key) / offset_div) * self.PADDED_SERIAL_LEN return offset, payload - def _truncate(self): - """Truncate the timeserie.""" - if self.max_size is not None: - # Remove empty points if any that could be added by aggregation - self.ts = self.ts[-self.max_size:] - @staticmethod def _resample_grouped(grouped_serie, agg_name, q=None): agg_func = getattr(grouped_serie, agg_name) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index c2568651..8b89dc16 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -244,10 +244,10 @@ class StorageDriver(object): aggregation, granularity, from_timestamp=None, to_timestamp=None): - # Find the number of point + # Find the timespan for d in metric.archive_policy.definition: if d.granularity == granularity: - points = d.points + timespan = d.timespan break else: raise AggregationDoesNotExist(metric, aggregation, granularity) @@ -258,8 +258,7 @@ class StorageDriver(object): except MetricDoesNotExist: return carbonara.AggregatedTimeSerie( sampling=granularity, - aggregation_method=aggregation, - max_size=points) + aggregation_method=aggregation) if from_timestamp: from_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( @@ -276,14 +275,25 @@ class StorageDriver(object): timeseries = self._get_measures_and_unserialize( metric, keys, aggregation) - return carbonara.AggregatedTimeSerie.from_timeseries( + ts = carbonara.AggregatedTimeSerie.from_timeseries( sampling=granularity, aggregation_method=aggregation, - timeseries=timeseries, - max_size=points) + timeseries=timeseries) + # We need to truncate because: + # - If the driver is not in WRITE_FULL mode, then it might read too + # much data that will be deleted once the split is rewritten. Just + # truncate so we don't return it. + # - If the driver is in WRITE_FULL but the archive policy has been + # resized, we might still have too much points stored, which will be + # deleted at a later point when new points will be procecessed. + # Truncate to be sure we don't return them. + if timespan is not None: + ts.truncate(timespan) + return ts def _store_timeserie_split(self, metric, key, split, - aggregation, oldest_mutable_timestamp): + aggregation, oldest_mutable_timestamp, + oldest_point_to_keep): # NOTE(jd) We write the full split only if the driver works that way # (self.WRITE_FULL) or if the oldest_mutable_timestamp is out of range. write_full = self.WRITE_FULL or next(key) <= oldest_mutable_timestamp @@ -314,6 +324,9 @@ class StorageDriver(object): aggregation, key) return + if oldest_point_to_keep is not None: + split.truncate(oldest_point_to_keep) + offset, data = split.serialize(key, compressed=write_full) return self._store_metric_measures(metric, key, aggregation, @@ -332,7 +345,7 @@ class StorageDriver(object): ts = carbonara.AggregatedTimeSerie.from_grouped_serie( grouped_serie, archive_policy_def.granularity, - aggregation_to_compute, max_size=archive_policy_def.points) + aggregation_to_compute) # Don't do anything if the timeserie is empty if not ts: @@ -362,6 +375,7 @@ class StorageDriver(object): self._delete_metric_measures(metric, key, aggregation) existing_keys.remove(key) else: + oldest_point_to_keep = None oldest_key_to_keep = None # Rewrite all read-only splits just for fun (and compression). This @@ -383,7 +397,8 @@ class StorageDriver(object): # compression). For that, we just pass None as split. self._store_timeserie_split( metric, key, - None, aggregation, oldest_mutable_timestamp) + None, aggregation, oldest_mutable_timestamp, + oldest_point_to_keep) for key, split in ts.split(): if oldest_key_to_keep is None or key >= oldest_key_to_keep: @@ -391,7 +406,8 @@ class StorageDriver(object): "Storing split %s (%s) for metric %s", key, aggregation, metric) self._store_timeserie_split( - metric, key, split, aggregation, oldest_mutable_timestamp) + metric, key, split, aggregation, oldest_mutable_timestamp, + oldest_point_to_keep) @staticmethod def _delete_metric(metric): diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 117ceb61..9901c507 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -53,8 +53,7 @@ class TestAggregatedTimeseries(base.BaseTestCase): agg_dict['return'] = ( processor.MetricReference(metric, "mean", resource), carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, agg_dict['sampling'], agg_dict['agg'], - max_size=agg_dict.get('size'), truncate=True)) + grouped, agg_dict['sampling'], agg_dict['agg'])) if existing: existing[2].merge(agg_dict['return'][2]) agg_dict['return'] = existing @@ -90,7 +89,6 @@ class TestAggregatedTimeseries(base.BaseTestCase): tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) tsc2 = carbonara.AggregatedTimeSerie( sampling=numpy.timedelta64(60, 's'), - max_size=50, aggregation_method='mean') tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 3, 0), 4)], @@ -721,6 +719,15 @@ class TestAggregatedTimeseries(base.BaseTestCase): numpy.timedelta64(300, 's'), 6.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(300, 's'), 5.1666666666666661), + (numpy.datetime64('2014-01-01T11:46:00.000000000'), + numpy.timedelta64(60, 's'), + 5.0), + (numpy.datetime64('2014-01-01T11:47:00.000000000'), + numpy.timedelta64(60, 's'), + 6.5), + (numpy.datetime64('2014-01-01T11:50:00.000000000'), + numpy.timedelta64(60, 's'), + 50.5), (datetime64(2014, 1, 1, 11, 54), numpy.timedelta64(60, 's'), 4.5), (datetime64(2014, 1, 1, 11, 56), diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 32e3e2d8..30974288 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -173,12 +173,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts.group_serie, 60) @staticmethod - def _resample(ts, sampling, agg, max_size=None, derived=False): + def _resample(ts, sampling, agg, derived=False): grouped = ts.group_serie(sampling) if derived: grouped = grouped.derived() return carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, sampling, agg, max_size=max_size, truncate=True) + grouped, sampling, agg) def test_derived_mean(self): ts = carbonara.TimeSerie.from_tuples( @@ -343,13 +343,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase): datetime64(2014, 1, 1, 12, 0, 9)], [3, 5]) - def test_max_size(self): + def test_truncate(self): ts = carbonara.TimeSerie.from_data( [datetime64(2014, 1, 1, 12, 0, 0), datetime64(2014, 1, 1, 12, 0, 4), datetime64(2014, 1, 1, 12, 0, 9)], [3, 5, 6]) - ts = self._resample(ts, numpy.timedelta64(1, 's'), 'mean', max_size=2) + ts = self._resample(ts, numpy.timedelta64(1, 's'), 'mean') + + ts.truncate(datetime64(2014, 1, 1, 12, 0, 0)) self.assertEqual(2, len(ts)) self.assertEqual(5, ts[0][1]) @@ -366,27 +368,31 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual(1, len(ts)) self.assertEqual(5, ts[datetime64(2014, 1, 1, 12, 0, 0)][1]) - def test_down_sampling_with_max_size(self): + def test_down_sampling_and_truncate(self): ts = carbonara.TimeSerie.from_data( [datetime64(2014, 1, 1, 12, 0, 0), datetime64(2014, 1, 1, 12, 1, 4), datetime64(2014, 1, 1, 12, 1, 9), datetime64(2014, 1, 1, 12, 2, 12)], [3, 5, 7, 1]) - ts = self._resample(ts, numpy.timedelta64(60, 's'), 'mean', max_size=2) + ts = self._resample(ts, numpy.timedelta64(60, 's'), 'mean') + + ts.truncate(datetime64(2014, 1, 1, 12, 0, 59)) self.assertEqual(2, len(ts)) self.assertEqual(6, ts[datetime64(2014, 1, 1, 12, 1, 0)][1]) self.assertEqual(1, ts[datetime64(2014, 1, 1, 12, 2, 0)][1]) - def test_down_sampling_with_max_size_and_method_max(self): + def test_down_sampling_and_truncate_and_method_max(self): ts = carbonara.TimeSerie.from_data( [datetime64(2014, 1, 1, 12, 0, 0), datetime64(2014, 1, 1, 12, 1, 4), datetime64(2014, 1, 1, 12, 1, 9), datetime64(2014, 1, 1, 12, 2, 12)], [3, 5, 70, 1]) - ts = self._resample(ts, numpy.timedelta64(60, 's'), 'max', max_size=2) + ts = self._resample(ts, numpy.timedelta64(60, 's'), 'max') + + ts.truncate(datetime64(2014, 1, 1, 12, 0, 59)) self.assertEqual(2, len(ts)) self.assertEqual(70, ts[datetime64(2014, 1, 1, 12, 1, 0)][1]) @@ -398,8 +404,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): grouped = ts.group_serie(agg_dict['sampling']) existing = agg_dict.get('return') agg_dict['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, agg_dict['sampling'], agg_dict['agg'], - max_size=agg_dict.get('size'), truncate=True) + grouped, agg_dict['sampling'], agg_dict['agg']) if existing: existing.merge(agg_dict['return']) agg_dict['return'] = existing @@ -436,6 +441,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self._resample_and_merge, agg_dict=ts)) self.assertEqual([ + (numpy.datetime64('2014-01-01T11:46:00.000000000'), + numpy.timedelta64(60, 's'), 4.0), + (numpy.datetime64('2014-01-01T11:47:00.000000000'), + numpy.timedelta64(60, 's'), 8.0), + (numpy.datetime64('2014-01-01T11:50:00.000000000'), + numpy.timedelta64(60, 's'), 50.0), (datetime64(2014, 1, 1, 11, 54), numpy.timedelta64(60000000000, 'ns'), 4.0), (datetime64(2014, 1, 1, 11, 56), @@ -836,7 +847,6 @@ class TestAggregatedTimeSerie(base.BaseTestCase): carbonara.AggregatedTimeSerie.from_timeseries( split, sampling=agg.sampling, - max_size=agg.max_size, aggregation_method=agg.aggregation_method)) def test_resample(self): diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 6d29e564..480891d9 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -271,7 +271,6 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.assertEqual([ - (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2015, 1, 1), numpy.timedelta64(1, 'D'), 69), (datetime64(2015, 1, 1, 12), numpy.timedelta64(1, 'h'), 69), (datetime64(2015, 1, 1, 12), numpy.timedelta64(5, 'm'), 69), diff --git a/releasenotes/notes/carbonara-truncate-timespan-3694b96449709083.yaml b/releasenotes/notes/carbonara-truncate-timespan-3694b96449709083.yaml new file mode 100644 index 00000000..a048a4de --- /dev/null +++ b/releasenotes/notes/carbonara-truncate-timespan-3694b96449709083.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + Gnocchi now strictly respects the archive policy configured timespan when + storing aggregates. Before, it could keep up to the number of points + defined in the archive policy, keeping more than the configured timespan. + The timespan duration is now strictly respected. Gnocchi only keeps the + points between the last aggregated timestamp and the last aggregated + timestamp minus the duration of the archive policy timespan. diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index 726554cd..bd50bd1d 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -19,7 +19,7 @@ dump_data(){ gnocchi resource list -c id -c type -c project_id -c user_id -c original_resource_id -c started_at -c ended_at -c revision_start -c revision_end | tee $dir/resources.list for resource_id in ${RESOURCE_IDS[@]} $RESOURCE_ID_EXT; do for agg in min max mean sum ; do - gnocchi measures show --aggregation $agg --resource-id $resource_id metric > $dir/${agg}.txt + gnocchi measures show --aggregation $agg --resource-id $resource_id metric -f json > $dir/${agg}.json done done } @@ -90,4 +90,10 @@ gnocchi resource delete $GNOCCHI_STATSD_RESOURCE_ID dump_data $GNOCCHI_DATA/new echo "* Checking output difference between Gnocchi $old_version and $new_version" -diff -uNr $GNOCCHI_DATA/old $GNOCCHI_DATA/new +# This asserts we find the new measures in the old ones. Gnocchi > 4.1 will +# store less points because it uses the timespan and not the points of the +# archive policy +for old in $GNOCCHI_DATA/old/*.json; do + new=$GNOCCHI_DATA/new/$(basename $old) + python -c "import json; old = json.load(open('$old')); new = json.load(open('$new')); assert all(i in old for i in new)" +done -- GitLab From d960741c03d2ae899aa5b2c9d6d2c0ba89209a39 Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 5 Jan 2018 15:04:06 +0000 Subject: [PATCH 1174/1483] remove unused from_tuples only used in tests. this hasn't been used since v1. --- gnocchi/carbonara.py | 4 -- gnocchi/tests/test_carbonara.py | 112 +++++++++++++++++--------------- 2 files changed, 60 insertions(+), 56 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index b803fe77..ffb56a9d 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -227,10 +227,6 @@ class TimeSerie(object): def from_data(cls, timestamps=None, values=None): return cls(make_timeseries(timestamps, values)) - @classmethod - def from_tuples(cls, timestamps_values): - return cls.from_data(*zip(*timestamps_values)) - def __eq__(self, other): return (isinstance(other, TimeSerie) and numpy.all(self.ts == other.ts)) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 30974288..39cbc6b7 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -164,10 +164,11 @@ class TestAggregatedTimeSerie(base.BaseTestCase): "2014-01-01 13:00:04+01:00"))))) def test_before_epoch(self): - ts = carbonara.TimeSerie.from_tuples( - [(datetime64(1950, 1, 1, 12), 3), - (datetime64(2014, 1, 1, 12), 5), - (datetime64(2014, 1, 1, 12), 6)]) + ts = carbonara.TimeSerie.from_data( + [datetime64(1950, 1, 1, 12), + datetime64(2014, 1, 1, 12), + datetime64(2014, 1, 1, 12)], + [3, 5, 6]) self.assertRaises(carbonara.BeforeEpochError, ts.group_serie, 60) @@ -181,19 +182,20 @@ class TestAggregatedTimeSerie(base.BaseTestCase): grouped, sampling, agg) def test_derived_mean(self): - ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 50), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 55), - (datetime.datetime(2014, 1, 1, 12, 1, 2), 65), - (datetime.datetime(2014, 1, 1, 12, 1, 14), 66), - (datetime.datetime(2014, 1, 1, 12, 1, 24), 70), - (datetime.datetime(2014, 1, 1, 12, 2, 4), 83), - (datetime.datetime(2014, 1, 1, 12, 2, 35), 92), - (datetime.datetime(2014, 1, 1, 12, 2, 42), 103), - (datetime.datetime(2014, 1, 1, 12, 3, 2), 105), - (datetime.datetime(2014, 1, 1, 12, 3, 22), 5), # Counter reset - (datetime.datetime(2014, 1, 1, 12, 3, 42), 7), - (datetime.datetime(2014, 1, 1, 12, 4, 9), 23)]) + ts = carbonara.TimeSerie.from_data( + [datetime.datetime(2014, 1, 1, 12, 0, 0), + datetime.datetime(2014, 1, 1, 12, 0, 4), + datetime.datetime(2014, 1, 1, 12, 1, 2), + datetime.datetime(2014, 1, 1, 12, 1, 14), + datetime.datetime(2014, 1, 1, 12, 1, 24), + datetime.datetime(2014, 1, 1, 12, 2, 4), + datetime.datetime(2014, 1, 1, 12, 2, 35), + datetime.datetime(2014, 1, 1, 12, 2, 42), + datetime.datetime(2014, 1, 1, 12, 3, 2), + datetime.datetime(2014, 1, 1, 12, 3, 22), # Counter reset + datetime.datetime(2014, 1, 1, 12, 3, 42), + datetime.datetime(2014, 1, 1, 12, 4, 9)], + [50, 55, 65, 66, 70, 83, 92, 103, 105, 5, 7, 23]) ts = self._resample(ts, numpy.timedelta64(60, 's'), 'mean', derived=True) @@ -213,16 +215,17 @@ class TestAggregatedTimeSerie(base.BaseTestCase): from_timestamp=datetime64(2014, 1, 1, 12)))) def test_derived_hole(self): - ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 50), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 55), - (datetime.datetime(2014, 1, 1, 12, 1, 2), 65), - (datetime.datetime(2014, 1, 1, 12, 1, 14), 66), - (datetime.datetime(2014, 1, 1, 12, 1, 24), 70), - (datetime.datetime(2014, 1, 1, 12, 3, 2), 105), - (datetime.datetime(2014, 1, 1, 12, 3, 22), 108), - (datetime.datetime(2014, 1, 1, 12, 3, 42), 200), - (datetime.datetime(2014, 1, 1, 12, 4, 9), 202)]) + ts = carbonara.TimeSerie.from_data( + [datetime.datetime(2014, 1, 1, 12, 0, 0), + datetime.datetime(2014, 1, 1, 12, 0, 4), + datetime.datetime(2014, 1, 1, 12, 1, 2), + datetime.datetime(2014, 1, 1, 12, 1, 14), + datetime.datetime(2014, 1, 1, 12, 1, 24), + datetime.datetime(2014, 1, 1, 12, 3, 2), + datetime.datetime(2014, 1, 1, 12, 3, 22), + datetime.datetime(2014, 1, 1, 12, 3, 42), + datetime.datetime(2014, 1, 1, 12, 4, 9)], + [50, 55, 65, 66, 70, 105, 108, 200, 202]) ts = self._resample(ts, numpy.timedelta64(60, 's'), 'last', derived=True) @@ -240,10 +243,11 @@ class TestAggregatedTimeSerie(base.BaseTestCase): from_timestamp=datetime64(2014, 1, 1, 12)))) def test_74_percentile_serialized(self): - ts = carbonara.TimeSerie.from_tuples( - [(datetime64(2014, 1, 1, 12, 0, 0), 3), - (datetime64(2014, 1, 1, 12, 0, 4), 5), - (datetime64(2014, 1, 1, 12, 0, 9), 6)]) + ts = carbonara.TimeSerie.from_data( + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9)], + [3, 5, 6]) ts = self._resample(ts, numpy.timedelta64(60, 's'), '74pct') self.assertEqual(1, len(ts)) @@ -255,10 +259,11 @@ class TestAggregatedTimeSerie(base.BaseTestCase): saved_ts = carbonara.AggregatedTimeSerie.unserialize( s, key, '74pct') - ts = carbonara.TimeSerie.from_tuples( - [(datetime64(2014, 1, 1, 12, 0, 0), 3), - (datetime64(2014, 1, 1, 12, 0, 4), 5), - (datetime64(2014, 1, 1, 12, 0, 9), 6)]) + ts = carbonara.TimeSerie.from_data( + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9)], + [3, 5, 6]) ts = self._resample(ts, numpy.timedelta64(60, 's'), '74pct') saved_ts.merge(ts) @@ -266,10 +271,11 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual(5.48, ts[datetime64(2014, 1, 1, 12, 0, 0)][1]) def test_95_percentile(self): - ts = carbonara.TimeSerie.from_tuples( - [(datetime64(2014, 1, 1, 12, 0, 0), 3), - (datetime64(2014, 1, 1, 12, 0, 4), 5), - (datetime64(2014, 1, 1, 12, 0, 9), 6)]) + ts = carbonara.TimeSerie.from_data( + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9)], + [3, 5, 6]) ts = self._resample(ts, numpy.timedelta64(60, 's'), '95pct') self.assertEqual(1, len(ts)) @@ -277,12 +283,13 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts[datetime64(2014, 1, 1, 12, 0, 0)][1]) def _do_test_aggregation(self, name, v1, v2): - ts = carbonara.TimeSerie.from_tuples( - [(datetime64(2014, 1, 1, 12, 0, 0), 3), - (datetime64(2014, 1, 1, 12, 0, 4), 6), - (datetime64(2014, 1, 1, 12, 0, 9), 5), - (datetime64(2014, 1, 1, 12, 1, 4), 8), - (datetime64(2014, 1, 1, 12, 1, 6), 9)]) + ts = carbonara.TimeSerie.from_data( + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9), + datetime64(2014, 1, 1, 12, 1, 4), + datetime64(2014, 1, 1, 12, 1, 6)], + [3, 6, 5, 8, 9]) ts = self._resample(ts, numpy.timedelta64(60, 's'), name) self.assertEqual(2, len(ts)) @@ -318,16 +325,17 @@ class TestAggregatedTimeSerie(base.BaseTestCase): 0.70710678118654757) def test_aggregation_std_with_unique(self): - ts = carbonara.TimeSerie.from_tuples( - [(datetime64(2014, 1, 1, 12, 0, 0), 3)]) + ts = carbonara.TimeSerie.from_data( + [datetime64(2014, 1, 1, 12, 0, 0)], [3]) ts = self._resample(ts, numpy.timedelta64(60, 's'), 'std') self.assertEqual(0, len(ts), ts.values) - ts = carbonara.TimeSerie.from_tuples( - [(datetime64(2014, 1, 1, 12, 0, 0), 3), - (datetime64(2014, 1, 1, 12, 0, 4), 6), - (datetime64(2014, 1, 1, 12, 0, 9), 5), - (datetime64(2014, 1, 1, 12, 1, 6), 9)]) + ts = carbonara.TimeSerie.from_data( + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9), + datetime64(2014, 1, 1, 12, 1, 6)], + [3, 6, 5, 9]) ts = self._resample(ts, numpy.timedelta64(60, 's'), "std") self.assertEqual(1, len(ts)) -- GitLab From 56f8f036aaac11219d02492c6542d53b0966468c Mon Sep 17 00:00:00 2001 From: gord chung Date: Sat, 6 Jan 2018 23:42:31 +0000 Subject: [PATCH 1175/1483] declare expected array instead of insert MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit numpy.insert takes a created array and inserts accordingly in a new array. numpy is actually faster if you just declare things up front and set the type. this drops numpy.insert usage and improves: - compressed serialisation: 5%-20% - uncompressed serialisation: 5%-10% - bounded serialisation: 5%-10% timeit a = numpy.empty(arr.size + 1); a[1:] = arr; a[0] = 123*23/1 1000000 loops, best of 3: 921 ns per loop timeit a = numpy.insert(arr, 0, 123*23/1) 100000 loops, best of 3: 6.3 µs per loop timeit a = numpy.empty(arr.size + 1); a.fill(123*23/1); a[1:] = arr 1000000 loops, best of 3: 1.11 µs per loop timeit a = numpy.full(arr.size + 1, 123*23/1); a[1:] = arr 1000000 loops, best of 3: 1.78 µs per loop --- gnocchi/carbonara.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index ffb56a9d..23216db7 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -380,8 +380,9 @@ class BoundTimeSerie(TimeSerie): def serialize(self): # NOTE(jd) Use a double delta encoding for timestamps - timestamps = numpy.insert(numpy.diff(self.timestamps), 0, self.first) - timestamps = timestamps.astype(dtype=' Date: Fri, 5 Jan 2018 23:26:55 +0000 Subject: [PATCH 1176/1483] simplify uncompressed serialization path we don't need to construct an extra array to store intermediate result. just zero'd ndarray we already built. this improves serialization by ~2x --- gnocchi/carbonara.py | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 23216db7..121958ff 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -735,20 +735,13 @@ class AggregatedTimeSerie(TimeSerie): locs = numpy.zeros(self.timestamps.size, dtype=numpy.int) locs[1:] = numpy.cumsum(numpy.diff(self.timestamps)) / offset_div - # Fill everything with zero - serial_dtype = [('b', ' Date: Mon, 8 Jan 2018 16:17:52 +0100 Subject: [PATCH 1177/1483] tests: Fix resource type with optional uuid This changes adds all missing tests for PATCH an optional attributes of a resource type (Only string type was having tests). And fix the bug where False is returned instead of None for uuid. Closes-bug: #616 --- gnocchi/indexer/sqlalchemy_extension.py | 2 +- .../functional/gabbits/resource-type.yaml | 77 ++++++++++++++++++- 2 files changed, 77 insertions(+), 2 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy_extension.py b/gnocchi/indexer/sqlalchemy_extension.py index ce81cf42..eaba6163 100644 --- a/gnocchi/indexer/sqlalchemy_extension.py +++ b/gnocchi/indexer/sqlalchemy_extension.py @@ -44,7 +44,7 @@ class UUIDSchema(resource_type.UUIDSchema, SchemaMixin): def for_filling(self, dialect): if self.fill is None: - return False # Don't set any server_default + return None return sqlalchemy.literal( self.satype.process_bind_param(self.fill, dialect)) diff --git a/gnocchi/tests/functional/gabbits/resource-type.yaml b/gnocchi/tests/functional/gabbits/resource-type.yaml index 548839e5..8827ee62 100644 --- a/gnocchi/tests/functional/gabbits/resource-type.yaml +++ b/gnocchi/tests/functional/gabbits/resource-type.yaml @@ -331,6 +331,28 @@ tests: authorization: "basic YWRtaW46" content-type: application/json-patch+json data: + - op: add + path: /attributes/new-optional-bool + value: + type: bool + required: False + - op: add + path: /attributes/new-optional-int + value: + type: number + required: False + min: 0 + max: 255 + - op: add + path: /attributes/new-optional-uuid + value: + type: uuid + required: False + - op: add + path: /attributes/new-optional-datetime + value: + type: datetime + required: False - op: add path: /attributes/newstuff value: @@ -421,6 +443,20 @@ tests: datetime: type: datetime required: False + new-optional-bool: + type: bool + required: False + new-optional-int: + type: number + required: False + min: 0 + max: 255 + new-optional-uuid: + type: uuid + required: False + new-optional-datetime: + type: datetime + required: False newstuff: type: string required: False @@ -521,6 +557,20 @@ tests: datetime: type: datetime required: False + new-optional-bool: + type: bool + required: False + new-optional-int: + type: number + required: False + min: 0 + max: 255 + new-optional-uuid: + type: uuid + required: False + new-optional-datetime: + type: datetime + required: False newstuff: type: string required: False @@ -564,7 +614,10 @@ tests: $.newstring: foobar $.newuuid: "00000000-0000-0000-0000-000000000000" $.newdatetime: "2017-10-10T10:10:10+00:00" - + $.new-optional-bool: null + $.new-optional-int: null + $.new-optional-uuid: null + $.new-optional-datetime: null - name: control new attributes of existing resource history GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast @@ -579,6 +632,10 @@ tests: $[0].newstring: foobar $[0].newuuid: "00000000-0000-0000-0000-000000000000" $[0].newdatetime: "2017-10-10T10:10:10+00:00" + $[0].new-optional-bool: null + $[0].new-optional-int: null + $[0].new-optional-uuid: null + $[0].new-optional-datetime: null $[1].id: d11edfca-4393-4fda-b94d-b05a3a1b3747 $[1].name: foo $[1].newstuff: null @@ -588,6 +645,10 @@ tests: $[1].newstring: foobar $[1].newuuid: "00000000-0000-0000-0000-000000000000" $[1].newdatetime: "2017-10-10T10:10:10+00:00" + $[1].new-optional-bool: null + $[1].new-optional-int: null + $[1].new-optional-uuid: null + $[1].new-optional-datetime: null # Invalid patch @@ -669,6 +730,20 @@ tests: newdatetime: type: datetime required: True + new-optional-bool: + type: bool + required: False + new-optional-int: + type: number + required: False + min: 0 + max: 255 + new-optional-uuid: + type: uuid + required: False + new-optional-datetime: + type: datetime + required: False - name: delete/add the same resource attribute PATCH: /v1/resource_type/my_custom_resource -- GitLab From 43838aae8fbf7dfe9f5be6894cb92968c99c31f7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 8 Jan 2018 13:51:42 +0100 Subject: [PATCH 1178/1483] service: fix utils.parallel_map number of workers setting Naming variable is hard Fixes #605 --- gnocchi/service.py | 2 +- gnocchi/tests/test_utils.py | 25 +++++++++++++++++-------- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/gnocchi/service.py b/gnocchi/service.py index 4075b082..3f0d1e5c 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -52,7 +52,7 @@ def prepare_service(args=None, conf=None, default_config_files=default_config_files, version=pbr.version.VersionInfo('gnocchi').version_string()) - utils.parallel_map.NUM_WORKERS = conf.parallel_operations + utils.parallel_map.MAX_WORKERS = conf.parallel_operations if not log_to_std and (conf.log_dir or conf.log_file): outputs = [daiquiri.output.File(filename=conf.log_file, diff --git a/gnocchi/tests/test_utils.py b/gnocchi/tests/test_utils.py index b3caebe8..db1a2b79 100644 --- a/gnocchi/tests/test_utils.py +++ b/gnocchi/tests/test_utils.py @@ -12,6 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. import datetime +import itertools import os import uuid @@ -119,13 +120,21 @@ class StopWatchTest(tests_base.TestCase): class ParallelMap(tests_base.TestCase): def test_parallel_map_one(self): - utils.parallel_map.NUM_WORKERS = 1 - self.assertEqual([1, 2, 3], - utils.parallel_map(lambda x: x, - [[1], [2], [3]])) + utils.parallel_map.MAX_WORKERS = 1 + starmap = itertools.starmap + with mock.patch("itertools.starmap") as sm: + sm.side_effect = starmap + self.assertEqual([1, 2, 3], + utils.parallel_map(lambda x: x, + [[1], [2], [3]])) + sm.assert_called() def test_parallel_map_four(self): - utils.parallel_map.NUM_WORKERS = 4 - self.assertEqual([1, 2, 3], - utils.parallel_map(lambda x: x, - [[1], [2], [3]])) + utils.parallel_map.MAX_WORKERS = 4 + starmap = itertools.starmap + with mock.patch("itertools.starmap") as sm: + sm.side_effect = starmap + self.assertEqual([1, 2, 3], + utils.parallel_map(lambda x: x, + [[1], [2], [3]])) + sm.assert_not_called() -- GitLab From f5d1b472d7f23f473174359349a8bd18dc77efe1 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 8 Jan 2018 05:08:51 +0000 Subject: [PATCH 1179/1483] fix median aggregation i mucked up #602 and the tests didn't find it. basically, the ordered array i thought i build was not ordered by group as required to make the logic of finding max and finding diff to middle, it was just ordered with no regard to group. this fixes it and expands the test so it's a bit more coverage. this unfortunately is slower than what we merged. this is ~1.3x faster unlike what i claimed before as ~1.9x faster --- gnocchi/carbonara.py | 7 ++---- gnocchi/tests/test_carbonara.py | 43 ++++++++++++++++++++------------- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 121958ff..56951b7c 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -148,14 +148,11 @@ class GroupedTimeSeries(object): return make_timeseries(self.tstamps, values) def median(self): - ordered = self._ts['values'].argsort() - uniq_inv = numpy.repeat(numpy.arange(self.counts.size), self.counts) - max_pos = numpy.zeros(self.tstamps.size, dtype=numpy.int) - max_pos[uniq_inv[ordered]] = numpy.arange(self._ts.size) + ordered = numpy.lexsort((self._ts['values'], self.indexes)) # TODO(gordc): can use np.divmod when centos supports numpy 1.13 mid_diff = numpy.floor_divide(self.counts, 2) odd = numpy.mod(self.counts, 2) - mid_floor = max_pos - mid_diff + mid_floor = (numpy.cumsum(self.counts) - 1) - mid_diff mid_ceil = mid_floor + (odd + 1) % 2 return make_timeseries( self.tstamps, (self._ts['values'][ordered][mid_floor] + diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 39cbc6b7..e578dba6 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -282,47 +282,56 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual(5.9000000000000004, ts[datetime64(2014, 1, 1, 12, 0, 0)][1]) - def _do_test_aggregation(self, name, v1, v2): + def _do_test_aggregation(self, name, v1, v2, v3): ts = carbonara.TimeSerie.from_data( [datetime64(2014, 1, 1, 12, 0, 0), - datetime64(2014, 1, 1, 12, 0, 4), - datetime64(2014, 1, 1, 12, 0, 9), - datetime64(2014, 1, 1, 12, 1, 4), - datetime64(2014, 1, 1, 12, 1, 6)], - [3, 6, 5, 8, 9]) + datetime64(2014, 1, 1, 12, 0, 10), + datetime64(2014, 1, 1, 12, 0, 20), + datetime64(2014, 1, 1, 12, 0, 30), + datetime64(2014, 1, 1, 12, 0, 40), + datetime64(2014, 1, 1, 12, 1, 0), + datetime64(2014, 1, 1, 12, 1, 10), + datetime64(2014, 1, 1, 12, 1, 20), + datetime64(2014, 1, 1, 12, 1, 30), + datetime64(2014, 1, 1, 12, 1, 40), + datetime64(2014, 1, 1, 12, 1, 50), + datetime64(2014, 1, 1, 12, 2, 0), + datetime64(2014, 1, 1, 12, 2, 10)], + [3, 5, 2, 3, 5, 8, 11, 22, 10, 42, 9, 4, 2]) ts = self._resample(ts, numpy.timedelta64(60, 's'), name) - self.assertEqual(2, len(ts)) + self.assertEqual(3, len(ts)) self.assertEqual(v1, ts[datetime64(2014, 1, 1, 12, 0, 0)][1]) self.assertEqual(v2, ts[datetime64(2014, 1, 1, 12, 1, 0)][1]) + self.assertEqual(v3, ts[datetime64(2014, 1, 1, 12, 2, 0)][1]) def test_aggregation_first(self): - self._do_test_aggregation('first', 3, 8) + self._do_test_aggregation('first', 3, 8, 4) def test_aggregation_last(self): - self._do_test_aggregation('last', 5, 9) + self._do_test_aggregation('last', 5, 9, 2) def test_aggregation_count(self): - self._do_test_aggregation('count', 3, 2) + self._do_test_aggregation('count', 5, 6, 2) def test_aggregation_sum(self): - self._do_test_aggregation('sum', 14, 17) + self._do_test_aggregation('sum', 18, 102, 6) def test_aggregation_mean(self): - self._do_test_aggregation('mean', 4.666666666666667, 8.5) + self._do_test_aggregation('mean', 3.6, 17, 3) def test_aggregation_median(self): - self._do_test_aggregation('median', 5.0, 8.5) + self._do_test_aggregation('median', 3.0, 10.5, 3) def test_aggregation_min(self): - self._do_test_aggregation('min', 3, 8) + self._do_test_aggregation('min', 2, 8, 2) def test_aggregation_max(self): - self._do_test_aggregation('max', 6, 9) + self._do_test_aggregation('max', 5, 42, 4) def test_aggregation_std(self): - self._do_test_aggregation('std', 1.5275252316519465, - 0.70710678118654757) + self._do_test_aggregation('std', 1.3416407864998738, + 13.266499161421599, 1.4142135623730951) def test_aggregation_std_with_unique(self): ts = carbonara.TimeSerie.from_data( -- GitLab From 7eaaad039ca38e479aecd9fb9d507230253e80c6 Mon Sep 17 00:00:00 2001 From: gord chung Date: Sun, 7 Jan 2018 23:54:40 +0000 Subject: [PATCH 1180/1483] optimise percentile computation with pure numpy i believe i figured out how percentile is computed. this does something very similar to median (for obvious reasons). at highlevel: - get the index the percentile would land on and assume it falls between indices - take the values of the indices it falls between and figure out weight of each value. - handle the percentiles that fall on an exact index. this is ~30x better. it's actually a bit slower (3%-5%?) than the median computation so it's debatable if we want to remove median code. drop scipy. also remove numpy.lib.recfunctions as it doesn't seem to used anywhere. --- gnocchi/carbonara.py | 28 ++++++++++++---------------- gnocchi/tests/test_carbonara.py | 9 +++++++++ requirements.txt | 1 - tox.ini | 4 +++- 4 files changed, 24 insertions(+), 18 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 56951b7c..e4835ffc 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -26,8 +26,6 @@ import time import lz4.block import numpy -import numpy.lib.recfunctions -from scipy import ndimage import six @@ -183,20 +181,18 @@ class GroupedTimeSeries(object): return make_timeseries(self.tstamps, values[::-1]) def quantile(self, q): - return self._scipy_aggregate(ndimage.labeled_comprehension, - func=functools.partial( - numpy.percentile, - q=q, - ), - out_dtype='float64', - default=None) - - def _scipy_aggregate(self, method, *args, **kwargs): - if len(self.tstamps) == 0: - return make_timeseries([], []) - - values = method(self._ts['values'], self.indexes, self.tstamps, - *args, **kwargs) + ordered = numpy.lexsort((self._ts['values'], self.indexes)) + min_pos = (numpy.cumsum(self.counts) - 1) - (self.counts - 1) + real_pos = min_pos + (self.counts - 1) * (q / 100) + floor_pos = numpy.floor(real_pos).astype(numpy.int, copy=False) + ceil_pos = numpy.ceil(real_pos).astype(numpy.int, copy=False) + values = ( + self._ts['values'][ordered][floor_pos] * (ceil_pos - real_pos) + + self._ts['values'][ordered][ceil_pos] * (real_pos - floor_pos)) + # NOTE(gordc): above code doesn't compute proper value if pct lands on + # exact index, it sets it to 0. we need to set it properly here + exact_pos = numpy.equal(floor_pos, ceil_pos) + values[exact_pos] = self._ts['values'][ordered][floor_pos][exact_pos] return make_timeseries(self.tstamps, values) def derived(self): diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index e578dba6..a3f06a1b 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -283,6 +283,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts[datetime64(2014, 1, 1, 12, 0, 0)][1]) def _do_test_aggregation(self, name, v1, v2, v3): + # NOTE(gordc): test data must have a group of odd count to properly + # test 50pct test case. ts = carbonara.TimeSerie.from_data( [datetime64(2014, 1, 1, 12, 0, 0), datetime64(2014, 1, 1, 12, 0, 10), @@ -323,6 +325,13 @@ class TestAggregatedTimeSerie(base.BaseTestCase): def test_aggregation_median(self): self._do_test_aggregation('median', 3.0, 10.5, 3) + def test_aggregation_50pct(self): + self._do_test_aggregation('50pct', 3.0, 10.5, 3) + + def test_aggregation_56pct(self): + self._do_test_aggregation('56pct', 3.4800000000000004, + 10.8, 3.120000000000001) + def test_aggregation_min(self): self._do_test_aggregation('min', 2, 8, 2) diff --git a/requirements.txt b/requirements.txt index 921ac44f..37b7c413 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,6 @@ oslo.config>=3.22.0 oslo.policy>=0.3.0 oslo.middleware>=3.22.0 pytimeparse -scipy>=0.18.1 # BSD pecan>=0.9 futures; python_version < '3' jsonpatch diff --git a/tox.ini b/tox.ini index 691abfad..dcf946e4 100644 --- a/tox.ini +++ b/tox.ini @@ -158,7 +158,9 @@ setenv = GNOCCHI_STORAGE_DEPS=file GNOCCHI_TEST_DEBUG=1 deps = {[testenv:docs]deps} sphinxcontrib-versioning -# fox <= 4.1 doc +# for <= 4.2 doc + scipy +# for <= 4.1 doc pandas # for 3.x doc oslotest -- GitLab From 93b68fcda62c530a764983f7d25d75f2dd2e6046 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 8 Jan 2018 14:18:03 +0100 Subject: [PATCH 1181/1483] test_rest: enhance error checking in negative aggregation test This checks for more details that just a string in the error returned by the API. --- gnocchi/tests/test_rest.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index d1942bd9..884abeea 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -1637,9 +1637,15 @@ class ResourceTest(RestTest): "/v1/aggregation/resource/" + self.resource_type + "/metric/foo?aggregation=max", params={"=": {"name": name}}, - status=400) - self.assertIn(b"Metrics can't being aggregated", - result.body) + status=400, + headers={"Accept": "application/json"}) + self.assertEqual("Metrics can't being aggregated", + result.json['description']['cause']) + self.assertEqual("No granularity match", + result.json['description']['reason']) + self.assertEqual( + sorted([[metric1['id'], 'max'], [metric2['id'], 'max']]), + sorted(result.json['description']['detail'])) def test_get_res_named_metric_measure_aggregation_nooverlap(self): result = self.app.post_json("/v1/metric", -- GitLab From e5215de4f050b84f8d8cd53fa57c598c6d7373a3 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 8 Jan 2018 14:28:12 +0100 Subject: [PATCH 1182/1483] rest/aggregates: declare _get_measures_by_name as static --- gnocchi/rest/aggregates/api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index f37a0926..f6987ae3 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -286,7 +286,8 @@ class AggregatesController(rest.RestController): return response - def _get_measures_by_name(self, resources, metric_wildcards, operations, + @staticmethod + def _get_measures_by_name(resources, metric_wildcards, operations, start, stop, granularity, needed_overlap, fill, details): -- GitLab From 60653de31b30bce623aca68afa414274c06e5c03 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 8 Jan 2018 13:40:05 +0100 Subject: [PATCH 1183/1483] storage: force granularities to be specified When getting measures, the storage driver does not look at archive policy anymore and wants the granularities to be retrieved to be specified. This is yet another step to unplug the archive policy concept from the storage concept. --- gnocchi/rest/aggregates/processor.py | 64 +++++----- gnocchi/rest/api.py | 49 ++++++-- gnocchi/storage/__init__.py | 24 ++-- .../gabbits/aggregates-with-metric-ids.yaml | 4 +- gnocchi/tests/test_aggregates.py | 24 ++-- gnocchi/tests/test_statsd.py | 10 +- gnocchi/tests/test_storage.py | 119 +++++++++++++----- 7 files changed, 186 insertions(+), 108 deletions(-) diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index 3be1396b..9971c00d 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -58,7 +58,7 @@ def _get_measures_timeserie(storage, ref, *args, **kwargs): def get_measures(storage, references, operations, from_timestamp=None, to_timestamp=None, - granularity=None, needed_overlap=100.0, + granularities=None, needed_overlap=100.0, fill=None): """Get aggregated measures of multiple entities. @@ -67,10 +67,30 @@ def get_measures(storage, references, operations, measured to aggregate. :param from timestamp: The timestamp to get the measure from. :param to timestamp: The timestamp to get the measure to. - :param granularity: The granularity to retrieve. + :param granularities: The granularities to retrieve. :param fill: The value to use to fill in missing data in series. """ + if granularities is None: + all_granularities = ( + definition.granularity + for ref in references + for definition in ref.metric.archive_policy.definition + ) + # granularities_in_common + granularities = [ + g + for g, occurrence in six.iteritems( + collections.Counter(all_granularities)) + if occurrence == len(references) + ] + + if not granularities: + raise exceptions.UnAggregableTimeseries( + list((ref.name, ref.aggregation) + for ref in references), + 'No granularity match') + references_with_missing_granularity = [] for ref in references: if (ref.aggregation not in @@ -81,45 +101,25 @@ def get_measures(storage, references, operations, # they are all missing anyway ref.metric.archive_policy.definition[0].granularity) - if granularity is not None: - for d in ref.metric.archive_policy.definition: - if d.granularity == granularity: - break - else: + available_granularities = [ + d.granularity + for d in ref.metric.archive_policy.definition + ] + for g in granularities: + if g not in available_granularities: references_with_missing_granularity.append( - (ref.name, ref.aggregation)) + (ref.name, ref.aggregation, g)) + break if references_with_missing_granularity: raise exceptions.UnAggregableTimeseries( references_with_missing_granularity, - "granularity '%d' is missing" % - utils.timespan_total_seconds(granularity)) - - if granularity is None: - granularities = ( - definition.granularity - for ref in references - for definition in ref.metric.archive_policy.definition - ) - granularities_in_common = [ - g - for g, occurrence in six.iteritems( - collections.Counter(granularities)) - if occurrence == len(references) - ] - - if not granularities_in_common: - raise exceptions.UnAggregableTimeseries( - list((ref.name, ref.aggregation) - for ref in references), - 'No granularity match') - else: - granularities_in_common = [granularity] + "Granularities are missing") tss = utils.parallel_map(_get_measures_timeserie, [(storage, ref, g, from_timestamp, to_timestamp) for ref in references - for g in granularities_in_common]) + for g in granularities]) return aggregated(tss, operations, from_timestamp, to_timestamp, needed_overlap, fill) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 56eff823..0312c139 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -493,14 +493,6 @@ class MetricController(rest.RestController): except Exception: abort(400, "Invalid value for stop") - if granularity is not None: - try: - granularity = utils.to_timespan(granularity) - except ValueError: - abort(400, {"cause": "Attribute value error", - "detail": "granularity", - "reason": "Invalid granularity"}) - if resample: if not granularity: abort(400, 'A granularity must be specified to resample') @@ -509,6 +501,17 @@ class MetricController(rest.RestController): except ValueError as e: abort(400, six.text_type(e)) + if granularity is None: + granularity = [d.granularity + for d in self.metric.archive_policy.definition] + else: + try: + granularity = [utils.to_timespan(granularity)] + except ValueError: + abort(400, {"cause": "Attribute value error", + "detail": "granularity", + "reason": "Invalid granularity"}) + if aggregation not in self.metric.archive_policy.aggregation_methods: abort(404, { "cause": "Aggregation method does not exist for this metric", @@ -528,8 +531,8 @@ class MetricController(rest.RestController): abort(503, six.text_type(e)) try: return pecan.request.storage.get_measures( - self.metric, start, stop, aggregation, - granularity, resample) + self.metric, granularity, start, stop, aggregation, + resample) except (storage.MetricDoesNotExist, storage.AggregationDoesNotExist) as e: abort(404, six.text_type(e)) @@ -1759,7 +1762,7 @@ def validate_qs(start, stop, granularity, needed_overlap, fill): if granularity is not None: try: - granularity = utils.to_timespan(granularity) + granularity = [utils.to_timespan(granularity)] except ValueError as e: abort(400, {"cause": "Argument value error", "detail": "granularity", @@ -1831,6 +1834,26 @@ class AggregationController(rest.RestController): except ValueError as e: abort(400, six.text_type(e)) + if granularity is None: + granularities = ( + definition.granularity + for m in metrics + for definition in m.archive_policy.definition + ) + # granularities_in_common + granularity = [ + g + for g, occurrence in six.iteritems( + collections.Counter(granularities)) + if occurrence == len(metrics) + ] + + if not granularity: + abort(400, exceptions.UnAggregableTimeseries( + list((metric.id, aggregation) + for metric in metrics), + 'No granularity match')) + operations = ["aggregate", reaggregation, []] if resample: operations[2].extend( @@ -1871,8 +1894,8 @@ class AggregationController(rest.RestController): }, }) return pecan.request.storage.get_measures( - metric, start, stop, aggregation, - granularity, resample) + metric, granularity, start, stop, aggregation, + resample) return processor.get_measures( pecan.request.storage, [processor.MetricReference(m, aggregation) for m in metrics], diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 8b89dc16..53b61b3b 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -197,29 +197,25 @@ class StorageDriver(object): """ return name.split("_")[-1] == 'v%s' % v - def get_measures(self, metric, from_timestamp=None, to_timestamp=None, - aggregation='mean', granularity=None, resample=None): + def get_measures(self, metric, granularities, + from_timestamp=None, to_timestamp=None, + aggregation='mean', resample=None): """Get a measure to a metric. :param metric: The metric measured. + :param granularities: The granularities to retrieve. :param from timestamp: The timestamp to get the measure from. :param to timestamp: The timestamp to get the measure to. :param aggregation: The type of aggregation to retrieve. - :param granularity: The granularity to retrieve. :param resample: The granularity to resample to. """ - if granularity is None: - agg_timeseries = utils.parallel_map( - self._get_measures_timeserie, - ((metric, aggregation, ap.granularity, - from_timestamp, to_timestamp) - for ap in reversed(metric.archive_policy.definition))) - else: - agg_timeseries = [self._get_measures_timeserie( - metric, aggregation, granularity, - from_timestamp, to_timestamp)] + agg_timeseries = utils.parallel_map( + self._get_measures_timeserie, + ((metric, aggregation, granularity, + from_timestamp, to_timestamp) + for granularity in sorted(granularities, reverse=True))) - if resample and granularity: + if resample: agg_timeseries = list(map(lambda agg: agg.resample(resample), agg_timeseries)) diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml index fe739c0a..b0aca8d6 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml @@ -663,9 +663,9 @@ tests: response_json_paths: $.code: 400 $.description.cause: "Metrics can't being aggregated" - $.description.reason: "granularity '123' is missing" + $.description.reason: "Granularities are missing" $.description.detail: - - ["$HISTORY['create metric1'].$RESPONSE['$.id']", mean] + - ["$HISTORY['create metric1'].$RESPONSE['$.id']", mean, 123] - name: get unknown aggregation POST: /v1/aggregates diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 9901c507..6eda5b88 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -986,7 +986,7 @@ class CrossMetricAggregated(base.TestCase): [str(self.metric.id), "mean"], [str(metric2.id), "mean"], ]], - granularity=numpy.timedelta64(12345456, 'ms')) + granularities=[numpy.timedelta64(12345456, 'ms')]) def test_add_and_get_measures_different_archives(self): metric2 = indexer.Metric(uuid.uuid4(), @@ -1166,7 +1166,7 @@ class CrossMetricAggregated(base.TestCase): ]], from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), to_timestamp=datetime64(2014, 1, 1, 12, 0, 1), - granularity=numpy.timedelta64(5, 'm'))["aggregated"] + granularities=[numpy.timedelta64(5, 'm')])["aggregated"] self.assertEqual([ (datetime64(2014, 1, 1, 12, 0, 0), @@ -1236,7 +1236,7 @@ class CrossMetricAggregated(base.TestCase): ["metric", [str(self.metric.id), "mean"], [str(metric2.id), "mean"]]], - granularity=numpy.timedelta64(1, 'h')) + granularities=[numpy.timedelta64(1, 'h')]) self.assertEqual({ str(self.metric.id): { @@ -1273,7 +1273,7 @@ class CrossMetricAggregated(base.TestCase): ["metric", [str(self.metric.id), "mean"], [str(metric2.id), "mean"]]], 2], - granularity=numpy.timedelta64(1, 'h')) + granularities=[numpy.timedelta64(1, 'h')]) self.assertEqual({ str(self.metric.id): { @@ -1312,7 +1312,7 @@ class CrossMetricAggregated(base.TestCase): ["metric", [str(self.metric.id), "mean"], [str(metric2.id), "mean"]]]], - granularity=numpy.timedelta64(1, 'h')) + granularities=[numpy.timedelta64(1, 'h')]) self.assertEqual({ str(self.metric.id): { @@ -1348,7 +1348,7 @@ class CrossMetricAggregated(base.TestCase): ["/", ["rolling", "sum", 2, ["metric", [str(self.metric.id), "mean"], [str(metric2.id), "mean"]]], 2], - granularity=numpy.timedelta64(5, 'm')) + granularities=[numpy.timedelta64(5, 'm')]) self.assertEqual({ str(self.metric.id): { @@ -1391,7 +1391,7 @@ class CrossMetricAggregated(base.TestCase): processor.MetricReference(metric2, "mean")], ["*", ["metric", str(self.metric.id), "mean"], ["metric", str(metric2.id), "mean"]], - granularity=numpy.timedelta64(1, 'h'))["aggregated"] + granularities=[numpy.timedelta64(1, 'h')])["aggregated"] self.assertEqual([ (datetime64(2014, 1, 1, 12, 0, 0), @@ -1417,7 +1417,7 @@ class CrossMetricAggregated(base.TestCase): values = processor.get_measures( self.storage, [processor.MetricReference(self.metric, "mean")], ["*", ["metric", str(self.metric.id), "mean"], 2], - granularity=numpy.timedelta64(1, 'h')) + granularities=[numpy.timedelta64(1, 'h')]) self.assertEqual({str(self.metric.id): { "mean": [ @@ -1444,7 +1444,7 @@ class CrossMetricAggregated(base.TestCase): values = processor.get_measures( self.storage, [processor.MetricReference(self.metric, "mean")], ["*", 2, ["metric", str(self.metric.id), "mean"]], - granularity=numpy.timedelta64(1, 'h')) + granularities=[numpy.timedelta64(1, 'h')]) self.assertEqual({str(self.metric.id): { "mean": [(datetime64(2014, 1, 1, 12, 0, 0), @@ -1484,7 +1484,7 @@ class CrossMetricAggregated(base.TestCase): ["*", ["metric", str(self.metric.id), "mean"], ["metric", str(metric2.id), "mean"]], ], - granularity=numpy.timedelta64(1, 'h'))["aggregated"] + granularities=[numpy.timedelta64(1, 'h')])["aggregated"] self.assertEqual([ (datetime64(2014, 1, 1, 13, 0, 0), @@ -1529,7 +1529,7 @@ class CrossMetricAggregated(base.TestCase): ], 10 ], - granularity=numpy.timedelta64(1, 'h'))["aggregated"] + granularities=[numpy.timedelta64(1, 'h')])["aggregated"] self.assertEqual([ (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(1, 'h'), 1), @@ -1563,7 +1563,7 @@ class CrossMetricAggregated(base.TestCase): processor.MetricReference(metric2, "mean")], ["abs", ["metric", [str(self.metric.id), "mean"], [str(metric2.id), "mean"]]], - granularity=numpy.timedelta64(1, 'h')) + granularities=[numpy.timedelta64(1, 'h')]) self.assertEqual({ str(self.metric.id): { diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index c6404fe4..ddcbdd1e 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -45,6 +45,8 @@ class TestStatsd(tests_base.TestCase): self.STATSD_USER_ID, "statsd") self.conf.set_override("archive_policy_name", self.STATSD_ARCHIVE_POLICY_NAME, "statsd") + ap = self.ARCHIVE_POLICIES["medium"] + self.granularities = [d.granularity for d in ap.definition] self.stats = statsd.Stats(self.conf) # Replace storage/indexer with correct ones that have been upgraded @@ -75,7 +77,7 @@ class TestStatsd(tests_base.TestCase): self.stats.indexer, self.stats.incoming, [str(metric.id)], sync=True) - measures = self.storage.get_measures(metric) + measures = self.storage.get_measures(metric, self.granularities) self.assertEqual([ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.0), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.0), @@ -96,7 +98,7 @@ class TestStatsd(tests_base.TestCase): self.stats.indexer, self.stats.incoming, [str(metric.id)], sync=True) - measures = self.storage.get_measures(metric) + measures = self.storage.get_measures(metric, self.granularities) self.assertEqual([ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.5), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.5), @@ -130,7 +132,7 @@ class TestStatsd(tests_base.TestCase): self.stats.indexer, self.stats.incoming, [str(metric.id)], sync=True) - measures = self.storage.get_measures(metric) + measures = self.storage.get_measures(metric, self.granularities) self.assertEqual([ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.0), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.0), @@ -150,7 +152,7 @@ class TestStatsd(tests_base.TestCase): self.stats.indexer, self.stats.incoming, [str(metric.id)], sync=True) - measures = self.storage.get_measures(metric) + measures = self.storage.get_measures(metric, self.granularities) self.assertEqual([ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 28), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 28), diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 480891d9..e16bd1eb 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -81,7 +81,13 @@ class TestStorageDriver(tests_base.TestCase): side_effect=carbonara.InvalidData()): self.trigger_processing() - m = self.storage.get_measures(self.metric) + granularities = [ + numpy.timedelta64(1, 'D'), + numpy.timedelta64(1, 'h'), + numpy.timedelta64(5, 'm'), + ] + + m = self.storage.get_measures(self.metric, granularities) self.assertIn((datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 1), m) self.assertIn((datetime64(2014, 1, 1, 13), @@ -104,7 +110,13 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.assertFalse(LOG.error.called) - m = self.storage.get_measures(self.metric) + granularities = [ + numpy.timedelta64(1, 'D'), + numpy.timedelta64(1, 'h'), + numpy.timedelta64(5, 'm'), + ] + + m = self.storage.get_measures(self.metric, granularities) self.assertIn((datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 5.0), m) self.assertIn((datetime64(2014, 1, 1, 12), @@ -131,7 +143,11 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.storage._delete_metric(self.metric) self.trigger_processing() - self.assertEqual([], self.storage.get_measures(self.metric)) + self.assertEqual([], self.storage.get_measures(self.metric, [ + numpy.timedelta64(1, 'D'), + numpy.timedelta64(1, 'h'), + numpy.timedelta64(5, 'm'), + ])) self.assertRaises(storage.MetricDoesNotExist, self.storage._get_unaggregated_timeserie, self.metric) @@ -200,7 +216,11 @@ class TestStorageDriver(tests_base.TestCase): for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) self.trigger_processing([str(m.id)]) - self.assertEqual(3661, len(self.storage.get_measures(m))) + self.assertEqual(3661, len(self.storage.get_measures(m, [ + numpy.timedelta64(1, 'h'), + numpy.timedelta64(1, 'm'), + numpy.timedelta64(1, 's'), + ]))) @mock.patch('gnocchi.carbonara.SplitKey.POINTS_PER_SPLIT', 48) def test_add_measures_update_subset_split(self): @@ -256,13 +276,19 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() + granularities = [ + numpy.timedelta64(1, 'D'), + numpy.timedelta64(1, 'h'), + numpy.timedelta64(5, 'm'), + ] + self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ], self.storage.get_measures(self.metric)) + ], self.storage.get_measures(self.metric, granularities)) # One year later… self.incoming.add_measures(self.metric.id, [ @@ -274,7 +300,7 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2015, 1, 1), numpy.timedelta64(1, 'D'), 69), (datetime64(2015, 1, 1, 12), numpy.timedelta64(1, 'h'), 69), (datetime64(2015, 1, 1, 12), numpy.timedelta64(5, 'm'), 69), - ], self.storage.get_measures(self.metric)) + ], self.storage.get_measures(self.metric, granularities)) self.assertEqual({ carbonara.SplitKey(numpy.datetime64(1244160000, 's'), @@ -351,8 +377,9 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42), (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), - ], self.storage.get_measures(self.metric, - granularity=numpy.timedelta64(1, 'm'))) + ], self.storage.get_measures( + self.metric, + granularities=[numpy.timedelta64(1, 'm')])) # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move @@ -409,8 +436,9 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), (datetime64(2016, 1, 10, 16, 18), numpy.timedelta64(1, 'm'), 45), (datetime64(2016, 1, 10, 17, 12), numpy.timedelta64(1, 'm'), 46), - ], self.storage.get_measures(self.metric, - granularity=numpy.timedelta64(1, 'm'))) + ], self.storage.get_measures( + self.metric, + granularities=[numpy.timedelta64(1, 'm')])) def test_rewrite_measures_oldest_mutable_timestamp_eq_next_key(self): """See LP#1655422""" @@ -472,8 +500,9 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42), (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), - ], self.storage.get_measures(self.metric, - granularity=numpy.timedelta64(60, 's'))) + ], self.storage.get_measures( + self.metric, + granularities=[numpy.timedelta64(60, 's')])) # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move @@ -531,8 +560,9 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), (datetime64(2016, 1, 10, 0, 12), numpy.timedelta64(1, 'm'), 45), - ], self.storage.get_measures(self.metric, - granularity=numpy.timedelta64(60, 's'))) + ], self.storage.get_measures( + self.metric, + granularities=[numpy.timedelta64(60, 's')])) def test_rewrite_measures_corruption_missing_file(self): # Create an archive policy that spans on several splits. Each split @@ -598,8 +628,9 @@ class TestStorageDriver(tests_base.TestCase): numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), - ], self.storage.get_measures(self.metric, - granularity=numpy.timedelta64(60, 's'))) + ], self.storage.get_measures( + self.metric, + granularities=[numpy.timedelta64(60, 's')])) # Test what happens if we delete the latest split and then need to # compress it! @@ -678,8 +709,9 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42), (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), - ], self.storage.get_measures(self.metric, - granularity=numpy.timedelta64(1, 'm'))) + ], self.storage.get_measures( + self.metric, + granularities=[numpy.timedelta64(1, 'm')])) # Test what happens if we write garbage self.storage._store_metric_measures( @@ -706,12 +738,18 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() + granularities = [ + numpy.timedelta64(1, 'D'), + numpy.timedelta64(1, 'h'), + numpy.timedelta64(5, 'm'), + ] + self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 55.5), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 55.5), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 42.0), - ], self.storage.get_measures(self.metric)) + ], self.storage.get_measures(self.metric, granularities)) self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), @@ -725,7 +763,7 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ], self.storage.get_measures(self.metric)) + ], self.storage.get_measures(self.metric, granularities)) self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 69), @@ -733,7 +771,8 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 42.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ], self.storage.get_measures(self.metric, aggregation='max')) + ], self.storage.get_measures(self.metric, + granularities, aggregation='max')) self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 4), @@ -741,7 +780,8 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 4.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ], self.storage.get_measures(self.metric, aggregation='min')) + ], self.storage.get_measures(self.metric, + granularities, aggregation='min')) def test_add_and_get_measures(self): self.incoming.add_measures(self.metric.id, [ @@ -752,13 +792,19 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() + granularities = [ + numpy.timedelta64(1, 'D'), + numpy.timedelta64(1, 'h'), + numpy.timedelta64(5, 'm'), + ] + self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ], self.storage.get_measures(self.metric)) + ], self.storage.get_measures(self.metric, granularities)) self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), @@ -766,6 +812,7 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures( self.metric, + granularities, from_timestamp=datetime64(2014, 1, 1, 12, 10, 0))) self.assertEqual([ @@ -775,6 +822,7 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), ], self.storage.get_measures( self.metric, + granularities, to_timestamp=datetime64(2014, 1, 1, 12, 6, 0))) self.assertEqual([ @@ -783,6 +831,7 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures( self.metric, + granularities, to_timestamp=datetime64(2014, 1, 1, 12, 10, 10), from_timestamp=datetime64(2014, 1, 1, 12, 10, 10))) @@ -792,6 +841,7 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), ], self.storage.get_measures( self.metric, + granularities, from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))) @@ -801,6 +851,7 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), ], self.storage.get_measures( self.metric, + granularities, from_timestamp=datetime64(2014, 1, 1, 12), to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))) @@ -810,7 +861,7 @@ class TestStorageDriver(tests_base.TestCase): self.metric, from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), to_timestamp=datetime64(2014, 1, 1, 12, 0, 2), - granularity=numpy.timedelta64(1, 'h'))) + granularities=[numpy.timedelta64(1, 'h')])) self.assertEqual([ (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), @@ -818,12 +869,12 @@ class TestStorageDriver(tests_base.TestCase): self.metric, from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), to_timestamp=datetime64(2014, 1, 1, 12, 0, 2), - granularity=numpy.timedelta64(5, 'm'))) + granularities=[numpy.timedelta64(5, 'm')])) self.assertRaises(storage.AggregationDoesNotExist, self.storage.get_measures, self.metric, - granularity=numpy.timedelta64(42, 's')) + granularities=[numpy.timedelta64(42, 's')]) def test_get_measure_unknown_aggregation(self): self.incoming.add_measures(self.metric.id, [ @@ -832,8 +883,14 @@ class TestStorageDriver(tests_base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) + granularities = [ + numpy.timedelta64(1, 'D'), + numpy.timedelta64(1, 'h'), + numpy.timedelta64(5, 'm'), + ] self.assertEqual( - [], self.storage.get_measures(self.metric, aggregation='last')) + [], self.storage.get_measures( + self.metric, granularities, aggregation='last')) def test_find_measures(self): metric2, __ = self._create_metric() @@ -911,7 +968,7 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 5), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1), - ], self.storage.get_measures(m)) + ], self.storage.get_measures(m, [numpy.timedelta64(5, 's')])) # expand to more points self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)]) @@ -925,7 +982,7 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12, 0, 5), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 15), numpy.timedelta64(5, 's'), 1), - ], self.storage.get_measures(m)) + ], self.storage.get_measures(m, [numpy.timedelta64(5, 's')])) # shrink timespan self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)]) @@ -933,16 +990,16 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual([ (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 15), numpy.timedelta64(5, 's'), 1), - ], self.storage.get_measures(m)) + ], self.storage.get_measures(m, [numpy.timedelta64(5, 's')])) def test_resample_no_metric(self): """https://github.com/gnocchixyz/gnocchi/issues/69""" self.assertEqual([], self.storage.get_measures( self.metric, + [numpy.timedelta64(300, 's')], datetime64(2014, 1, 1), datetime64(2015, 1, 1), - granularity=numpy.timedelta64(300, 's'), resample=numpy.timedelta64(1, 'h'))) -- GitLab From ec44c23d673530db9c4ea4895db526a1765f74af Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 10 Jan 2018 22:28:32 +0000 Subject: [PATCH 1184/1483] cleanup some numpy math and casting - math says (a - 1) - (b - 1) = (a - 1) - b + 1 = a - b - do the same thing for first aggregate rather than flipping array - remove cast to datetime64[ns] when unserialising. this is already enforced by the timeseries structure. all these changes have neglible improvements in performance but i think it's simpler to do less stuff. --- gnocchi/carbonara.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index e4835ffc..7de6cf33 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -176,13 +176,13 @@ class GroupedTimeSeries(object): return make_timeseries(self.tstamps, values) def first(self): - cumcounts = numpy.cumsum(self.counts[::-1]) - 1 - values = self._ts['values'][::-1][cumcounts] - return make_timeseries(self.tstamps, values[::-1]) + cumcounts = numpy.cumsum(self.counts) - self.counts + values = self._ts['values'][cumcounts] + return make_timeseries(self.tstamps, values) def quantile(self, q): ordered = numpy.lexsort((self._ts['values'], self.indexes)) - min_pos = (numpy.cumsum(self.counts) - 1) - (self.counts - 1) + min_pos = numpy.cumsum(self.counts) - self.counts real_pos = min_pos + (self.counts - 1) * (q / 100) floor_pos = numpy.floor(real_pos).astype(numpy.int, copy=False) ceil_pos = numpy.ceil(real_pos).astype(numpy.int, copy=False) @@ -362,11 +362,8 @@ class BoundTimeSerie(TimeSerie): except ValueError: raise InvalidData - timestamps = numpy.cumsum(timestamps) - timestamps = timestamps.astype(dtype='datetime64[ns]', copy=False) - return cls.from_data( - timestamps, + numpy.cumsum(timestamps), values, block_size=block_size, back_window=back_window) -- GitLab From 51ff00bea20b306a149674b77abdb0d22edfab75 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 11 Jan 2018 16:06:07 +0100 Subject: [PATCH 1185/1483] doc: fix gnocchi.xyz for stable/3.? by setting a tiny range for lz4 Fixes: #614 --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index dcf946e4..44c727f7 100644 --- a/tox.ini +++ b/tox.ini @@ -163,6 +163,7 @@ deps = {[testenv:docs]deps} # for <= 4.1 doc pandas # for 3.x doc + lz4>=0.9,<=0.13 oslotest oslosphinx retrying -- GitLab From df2b64026c6ea697c47200a9fb7ec2b02622de92 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 11 Jan 2018 22:56:42 +0100 Subject: [PATCH 1186/1483] indexer: ap rule, catch invalid ap This changes catch oslo.db error when archive policy doesn't exists for an archive policy rule, and raise ArchivePolicyNotFound instead. Closes #627 --- gnocchi/indexer/__init__.py | 6 ++++++ gnocchi/indexer/sqlalchemy.py | 4 ++++ gnocchi/rest/api.py | 2 ++ .../tests/functional/gabbits/archive-rule.yaml | 17 +++++++++++++++++ 4 files changed, 29 insertions(+) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 5baf8536..9e11a832 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -149,6 +149,12 @@ class NoSuchArchivePolicy(IndexerException): "Archive policy %s does not exist" % archive_policy) self.archive_policy = archive_policy + def jsonify(self): + return { + "cause": "Archive policy does not exist", + "detail": self.archive_policy, + } + class UnsupportedArchivePolicyChange(IndexerException): """Error raised when modifying archive policy if not supported.""" diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 328ba944..00881c1f 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -644,6 +644,10 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): try: with self.facade.writer() as session: session.add(apr) + except exception.DBReferenceError as e: + if e.constraint == 'fk_apr_ap_name_ap_name': + raise indexer.NoSuchArchivePolicy(archive_policy_name) + raise except exception.DBDuplicateEntry: raise indexer.ArchivePolicyRuleAlreadyExists(name) return apr diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 0312c139..0b8dcede 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -378,6 +378,8 @@ class ArchivePolicyRulesController(rest.RestController): ) except indexer.ArchivePolicyRuleAlreadyExists as e: abort(409, six.text_type(e)) + except indexer.NoSuchArchivePolicy as e: + abort(400, e) location = "/archive_policy_rule/" + ap.name set_resp_location_hdr(location) diff --git a/gnocchi/tests/functional/gabbits/archive-rule.yaml b/gnocchi/tests/functional/gabbits/archive-rule.yaml index 0c6c7915..90b0f43f 100644 --- a/gnocchi/tests/functional/gabbits/archive-rule.yaml +++ b/gnocchi/tests/functional/gabbits/archive-rule.yaml @@ -88,6 +88,23 @@ tests: metric_pattern: "disk.foo.*" status: 400 + - name: create archive policy rule with invalid archive policy + POST: /v1/archive_policy_rule + request_headers: + # User admin + authorization: "basic YWRtaW46" + accept: application/json + content-type: application/json + data: + name: test_rule + archive_policy_name: not-exists + metric_pattern: "disk.foo.*" + status: 400 + response_json_paths: + $.code: 400 + $.description.cause: "Archive policy does not exist" + $.description.detail: not-exists + - name: missing auth archive policy rule POST: /v1/archive_policy_rule request_headers: -- GitLab From 11fb962e750af885329d4d4399e119c3a6be59c3 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 11 Jan 2018 18:34:45 +0100 Subject: [PATCH 1187/1483] doc: do not test gnocchi.xyz build on stable branch (cherry picked from commit 3a6afc17fd3c46bc4c51a396d608da8a1e500fad) --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 7a6d7ea1..3096a124 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,6 @@ cache: env: - TARGET: pep8 - TARGET: docs - - TARGET: docs-gnocchi.xyz - TARGET: py27-mysql-ceph-upgrade-from-3.1 - TARGET: py35-postgresql-file-upgrade-from-3.1 -- GitLab From 09f13be07251da14d667853c34d8d1c702495bd8 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 8 Jan 2018 16:17:52 +0100 Subject: [PATCH 1188/1483] tests: Fix resource type with optional uuid This changes adds all missing tests for PATCH an optional attributes of a resource type (Only string type was having tests). And fix the bug where False is returned instead of None for uuid. Closes-bug: #616 (cherry picked from commit 13a0123fbc5087f9ed8c4c0ebb6348a85ab71a67) (cherry picked from commit d2ed3846b7e99b59de96827afc71a27b031225c6) --- gnocchi/indexer/sqlalchemy_extension.py | 2 +- .../functional/gabbits/resource-type.yaml | 64 +++++++++++++++++++ 2 files changed, 65 insertions(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy_extension.py b/gnocchi/indexer/sqlalchemy_extension.py index bc4d8418..cf3e1cf4 100644 --- a/gnocchi/indexer/sqlalchemy_extension.py +++ b/gnocchi/indexer/sqlalchemy_extension.py @@ -43,7 +43,7 @@ class UUIDSchema(resource_type.UUIDSchema, SchemaMixin): def for_filling(self, dialect): if self.fill is None: - return False # Don't set any server_default + return None return sqlalchemy.literal( self.satype.process_bind_param(self.fill, dialect)) diff --git a/gnocchi/tests/functional/gabbits/resource-type.yaml b/gnocchi/tests/functional/gabbits/resource-type.yaml index 90b9a8ba..3785e25f 100644 --- a/gnocchi/tests/functional/gabbits/resource-type.yaml +++ b/gnocchi/tests/functional/gabbits/resource-type.yaml @@ -320,6 +320,28 @@ tests: authorization: "basic YWRtaW46" content-type: application/json-patch+json data: + - op: add + path: /attributes/new-optional-bool + value: + type: bool + required: False + - op: add + path: /attributes/new-optional-int + value: + type: number + required: False + min: 0 + max: 255 + - op: add + path: /attributes/new-optional-uuid + value: + type: uuid + required: False + - op: add + path: /attributes/new-optional-datetime + value: + type: datetime + required: False - op: add path: /attributes/newstuff value: @@ -400,6 +422,17 @@ tests: bool: type: bool required: false + new-optional-bool: + type: bool + required: False + new-optional-int: + type: number + required: False + min: 0 + max: 255 + new-optional-uuid: + type: uuid + required: False newstuff: type: string required: False @@ -494,6 +527,17 @@ tests: bool: type: bool required: false + new-optional-bool: + type: bool + required: False + new-optional-int: + type: number + required: False + min: 0 + max: 255 + new-optional-uuid: + type: uuid + required: False newstuff: type: string required: False @@ -533,6 +577,9 @@ tests: $.newint: 15 $.newstring: foobar $.newuuid: "00000000-0000-0000-0000-000000000000" + $.new-optional-bool: null + $.new-optional-int: null + $.new-optional-uuid: null - name: control new attributes of existing resource history GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast @@ -546,6 +593,9 @@ tests: $[0].newint: 15 $[0].newstring: foobar $[0].newuuid: "00000000-0000-0000-0000-000000000000" + $[0].new-optional-bool: null + $[0].new-optional-int: null + $[0].new-optional-uuid: null $[1].id: d11edfca-4393-4fda-b94d-b05a3a1b3747 $[1].name: foo $[1].newstuff: null @@ -554,6 +604,9 @@ tests: $[1].newint: 15 $[1].newstring: foobar $[1].newuuid: "00000000-0000-0000-0000-000000000000" + $[1].new-optional-bool: null + $[1].new-optional-int: null + $[1].new-optional-uuid: null # Invalid patch @@ -629,6 +682,17 @@ tests: newuuid: type: uuid required: True + new-optional-bool: + type: bool + required: False + new-optional-int: + type: number + required: False + min: 0 + max: 255 + new-optional-uuid: + type: uuid + required: False - name: delete/add the same resource attribute PATCH: /v1/resource_type/my_custom_resource -- GitLab From e3176cb77d0c2377a0504db065f9a75024493ccd Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 11 Jan 2018 22:56:42 +0100 Subject: [PATCH 1189/1483] indexer: ap rule, catch invalid ap This changes catch oslo.db error when archive policy doesn't exists for an archive policy rule, and raise ArchivePolicyNotFound instead. Closes #627 (cherry picked from commit df2b64026c6ea697c47200a9fb7ec2b02622de92) --- gnocchi/indexer/sqlalchemy.py | 4 ++++ gnocchi/rest/__init__.py | 2 ++ .../tests/functional/gabbits/archive-rule.yaml | 15 +++++++++++++++ 3 files changed, 21 insertions(+) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 7fd270c9..593a6c89 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -637,6 +637,10 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): try: with self.facade.writer() as session: session.add(apr) + except exception.DBReferenceError as e: + if e.constraint == 'fk_apr_ap_name_ap_name': + raise indexer.NoSuchArchivePolicy(archive_policy_name) + raise except exception.DBDuplicateEntry: raise indexer.ArchivePolicyRuleAlreadyExists(name) return apr diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py index 4898b975..c7695404 100644 --- a/gnocchi/rest/__init__.py +++ b/gnocchi/rest/__init__.py @@ -326,6 +326,8 @@ class ArchivePolicyRulesController(rest.RestController): ) except indexer.ArchivePolicyRuleAlreadyExists as e: abort(409, e) + except indexer.NoSuchArchivePolicy as e: + abort(400, e) location = "/archive_policy_rule/" + ap.name set_resp_location_hdr(location) diff --git a/gnocchi/tests/functional/gabbits/archive-rule.yaml b/gnocchi/tests/functional/gabbits/archive-rule.yaml index 1d130c11..7a1ea859 100644 --- a/gnocchi/tests/functional/gabbits/archive-rule.yaml +++ b/gnocchi/tests/functional/gabbits/archive-rule.yaml @@ -88,6 +88,21 @@ tests: metric_pattern: "disk.foo.*" status: 400 + - name: create archive policy rule with invalid archive policy + POST: /v1/archive_policy_rule + request_headers: + # User admin + authorization: "basic YWRtaW46" + accept: application/json + content-type: application/json + data: + name: test_rule + archive_policy_name: not-exists + metric_pattern: "disk.foo.*" + status: 400 + response_strings: + - "Archive policy does not exist" + - name: missing auth archive policy rule POST: /v1/archive_policy_rule request_headers: -- GitLab From 866d2d9db3406ab52a64c05ad19460547dc1b8ff Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 2 Dec 2017 13:27:57 +0100 Subject: [PATCH 1190/1483] indexer: do not return empty IN statement in QueryFilter Fixes #530 (cherry picked from commit c89a0ef696a8bee5a95a336c599104c979c01b5f) --- gnocchi/indexer/sqlalchemy.py | 9 ++++++++- gnocchi/tests/test_indexer.py | 8 ++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 593a6c89..84c3d1bb 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1103,6 +1103,13 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): return sort_keys, sort_dirs +def _operator_in(field_name, value): + # Do not generate empty IN comparison + # https://github.com/gnocchixyz/gnocchi/issues/530 + if len(value): + return field_name.in_(value) + + class QueryTransformer(object): unary_operators = { u"not": sqlalchemy.not_, @@ -1131,7 +1138,7 @@ class QueryTransformer(object): u"≠": operator.ne, u"ne": operator.ne, - u"in": lambda field_name, values: field_name.in_(values), + u"in": _operator_in, u"like": lambda field, value: field.like(value), } diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 769ed4d6..ac796587 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -838,6 +838,14 @@ class TestIndexerDriver(tests_base.TestCase): resource_type, attribute_filter={"=": {"flavor_id": 1.0}}) self.assertEqual(0, len(r)) + def test_list_resource_empty_in(self): + self.index.create_resource('generic', str(uuid.uuid4()), + str(uuid.uuid4()), str(uuid.uuid4())) + self.assertEqual( + [], + self.index.list_resources( + attribute_filter={"in": {"id": []}})) + def test_list_resource_weird_date(self): self.assertRaises( indexer.QueryValueError, -- GitLab From 546189429270c628a0ffc49b1f02ee23b550c9fb Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 30 Nov 2017 17:15:07 +0100 Subject: [PATCH 1191/1483] indexer: fix upgrade when password contains a % oslo.config doesn't support %{} or %() variable interpolation while ConfigParser does. So when a password contains a '%', it's fine for oslo.config But the Python ConfigParser will raise the exception. Since alembic use ConfigParser and not oslo.config, we have to escape the url ourself. This is not a big deal since, we don't want alembic doing variable interpolation. (cherry picked from commit 344e382c260374569eea31673c99245fe513201b) --- gnocchi/indexer/sqlalchemy.py | 2 +- .../tests/indexer/sqlalchemy/test_utils.py | 25 +++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 gnocchi/tests/indexer/sqlalchemy/test_utils.py diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 84c3d1bb..fc78ed4d 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -297,7 +297,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): cfg = config.Config( "%s/alembic/alembic.ini" % os.path.dirname(__file__)) cfg.set_main_option('sqlalchemy.url', - self.conf.database.connection) + self.conf.database.connection.replace('%', '%%')) return cfg def get_engine(self): diff --git a/gnocchi/tests/indexer/sqlalchemy/test_utils.py b/gnocchi/tests/indexer/sqlalchemy/test_utils.py new file mode 100644 index 00000000..9d251ec4 --- /dev/null +++ b/gnocchi/tests/indexer/sqlalchemy/test_utils.py @@ -0,0 +1,25 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2017 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from gnocchi import indexer +from gnocchi.tests import base + + +class TestUtils(base.TestCase): + def test_percent_in_url(self): + url = 'mysql+pymysql://user:pass%word@localhost/foobar' + self.conf.set_override('url', url, 'indexer') + alembic = indexer.get_driver(self.conf)._get_alembic_config() + self.assertEqual(url, alembic.get_main_option("sqlalchemy.url")) -- GitLab From 71c0c31ccddeec660cba8a8c3b62cec96c76c41e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 11 Jan 2018 16:06:07 +0100 Subject: [PATCH 1192/1483] aggregates: add rateofchange method This change adds rateofchange method to Dynamic Aggregate API. --- doc/source/rest.j2 | 1 + gnocchi/rest/aggregates/api.py | 5 +++++ gnocchi/rest/aggregates/operations.py | 14 ++++++++++++-- .../gabbits/aggregates-with-metric-ids.yaml | 15 +++++++++++++++ .../aggregates-rateofchange-94785a381b7bc3b5.yaml | 4 ++++ 5 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/aggregates-rateofchange-94785a381b7bc3b5.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index fee5c23e..6f113e50 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -853,6 +853,7 @@ Function operations (tan ()) (floor ()) (ceil ()) + (rateofchange ()) diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index f6987ae3..6a411377 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -80,6 +80,11 @@ OperationsSchemaBase = [ agg_operations.unary_operators.keys())), _OperationsSubNodeSchema] ), + voluptuous.ExactSequence( + [voluptuous.Any(*list( + agg_operations.unary_operators_with_timestamps.keys())), + _OperationsSubNodeSchema] + ), voluptuous.ExactSequence( [u"aggregate", voluptuous.Any(*list(agg_operations.AGG_MAP.keys())), diff --git a/gnocchi/rest/aggregates/operations.py b/gnocchi/rest/aggregates/operations.py index b322c2d5..26cd5f04 100644 --- a/gnocchi/rest/aggregates/operations.py +++ b/gnocchi/rest/aggregates/operations.py @@ -99,6 +99,11 @@ unary_operators = { } +unary_operators_with_timestamps = { + u"rateofchange": lambda t, v: (t[1:], numpy.diff(v.T).T) +} + + def handle_unary_operator(nodes, granularity, timestamps, initial_values, is_aggregated, references): op = nodes[0] @@ -106,7 +111,11 @@ def handle_unary_operator(nodes, granularity, timestamps, initial_values, nodes[1], granularity, timestamps, initial_values, is_aggregated, references) - values = unary_operators[op](values) + if op in unary_operators: + values = unary_operators[op](values) + else: + timestamps, values = unary_operators_with_timestamps[op]( + timestamps, values) return granularity, timestamps, values, is_aggregated @@ -247,7 +256,8 @@ def evaluate(nodes, granularity, timestamps, initial_values, is_aggregated, return handle_binary_operator(nodes, granularity, timestamps, initial_values, is_aggregated, references) - elif nodes[0] in unary_operators: + elif (nodes[0] in unary_operators or + nodes[0] in unary_operators_with_timestamps): return handle_unary_operator(nodes, granularity, timestamps, initial_values, is_aggregated, references) diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml index b0aca8d6..81835c90 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml @@ -354,6 +354,21 @@ tests: - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] + - name: get aggregates one metric rateofchange + POST: /v1/aggregates?details=true + data: + operations: "(rateofchange (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean))" + response_json_paths: + $.references.`len`: 1 + $.references[/name][0].id: $HISTORY['create metric1'].$RESPONSE['$.id'] + $.measures."$HISTORY['create metric1'].$RESPONSE['$.id']".mean: + - ["2015-03-06T14:34:00+00:00", 60.0, -45.1] + - ["2015-03-06T14:35:00+00:00", 60.0, 12.0] + - ["2015-03-06T14:34:12+00:00", 1.0, -31.1] + - ["2015-03-06T14:34:15+00:00", 1.0, -28.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 25.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 2.0] + - name: get aggregates math with string POST: /v1/aggregates?details=true data: diff --git a/releasenotes/notes/aggregates-rateofchange-94785a381b7bc3b5.yaml b/releasenotes/notes/aggregates-rateofchange-94785a381b7bc3b5.yaml new file mode 100644 index 00000000..38f5b8fd --- /dev/null +++ b/releasenotes/notes/aggregates-rateofchange-94785a381b7bc3b5.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Dynamic Aggregate API have a new method called 'rateofchange'. -- GitLab From eff918e8cc0e3b363a6d575bb6058028e1e0cb18 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 11 Jan 2018 11:31:18 +0100 Subject: [PATCH 1193/1483] rest: store coordinator in pecan.request This will allow the API to use the coordinator directly. --- gnocchi/rest/app.py | 47 ++++++++++++++++++++++++++------------ gnocchi/tests/test_rest.py | 2 ++ 2 files changed, 34 insertions(+), 15 deletions(-) diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 66728285..df484a88 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -43,14 +43,6 @@ LOG = daiquiri.getLogger(__name__) jsonify.jsonify.register(object)(json.to_primitive) -def get_storage_driver(conf): - # NOTE(jd) This coordinator is never stop. I don't think it's a - # real problem since the Web app can never really be stopped - # anyway, except by quitting it entirely. - coord = metricd.get_coordinator_and_start(conf.coordination_url) - return gnocchi_storage.get_driver(conf, coord) - - class GnocchiHook(pecan.hooks.PecanHook): def __init__(self, conf): @@ -62,6 +54,7 @@ class GnocchiHook(pecan.hooks.PecanHook): invoke_on_load=True).driver def on_route(self, state): + state.request.coordinator = self._lazy_load('coordinator') state.request.storage = self._lazy_load('storage') state.request.indexer = self._lazy_load('indexer') state.request.incoming = self._lazy_load('incoming') @@ -69,21 +62,45 @@ class GnocchiHook(pecan.hooks.PecanHook): state.request.policy_enforcer = self.policy_enforcer state.request.auth_helper = self.auth_helper - BACKEND_LOADERS = { - 'storage': (threading.Lock(), get_storage_driver), - 'incoming': (threading.Lock(), gnocchi_incoming.get_driver), - 'indexer': (threading.Lock(), gnocchi_indexer.get_driver), + BACKEND_LOCKS = { + 'coordinator': threading.Lock(), + 'storage': threading.Lock(), + 'incoming': threading.Lock(), + 'indexer': threading.Lock(), } def _lazy_load(self, name): # NOTE(sileht): We don't care about raise error here, if something # fail, this will just raise a 500, until the backend is ready. if name not in self.backends: - lock, loader = self.BACKEND_LOADERS[name] - with lock: + with self.BACKEND_LOCKS[name]: # Recheck, maybe it have been created in the meantime. if name not in self.backends: - self.backends[name] = loader(self.conf) + if name == "coordinator": + # NOTE(jd) This coordinator is never stop. I don't + # think it's a real problem since the Web app can never + # really be stopped anyway, except by quitting it + # entirely. + self.backends[name] = ( + metricd.get_coordinator_and_start( + self.conf.coordination_url) + ) + elif name == "storage": + coord = self._lazy_load("coordinator") + self.backends[name] = ( + gnocchi_storage.get_driver(self.conf, coord) + ) + elif name == "incoming": + self.backends[name] = ( + gnocchi_incoming.get_driver(self.conf) + ) + elif name == "indexer": + self.backends[name] = ( + gnocchi_indexer.get_driver(self.conf) + ) + else: + raise RuntimeError("Unknown driver %s" % name) + return self.backends[name] diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 884abeea..20daf8c7 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -190,6 +190,8 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): return self.index elif name == "incoming": return self.incoming + elif name == "coordinator": + return self.coord else: raise RuntimeError("Invalid driver type: %s" % name) -- GitLab From 3f311f78cc62e8644e7fb26a3f36691db051301b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 11 Jan 2018 13:39:47 +0100 Subject: [PATCH 1194/1483] rest: export the available metricd processors in status Fixes #390 --- gnocchi/rest/api.py | 12 ++++++++++++ gnocchi/tests/functional/gabbits/base.yaml | 2 ++ 2 files changed, 14 insertions(+) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 0b8dcede..6632915d 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -27,10 +27,12 @@ import pyparsing import six from six.moves.urllib import parse as urllib_parse import tenacity +import tooz import voluptuous import werkzeug.http from gnocchi import archive_policy +from gnocchi.cli import metricd from gnocchi import incoming from gnocchi import indexer from gnocchi import json @@ -1961,6 +1963,11 @@ class StatusController(rest.RestController): @pecan.expose('json') def get(details=True): enforce("get status", {}) + try: + members_req = pecan.request.coordinator.get_members( + metricd.MetricProcessor.GROUP_ID) + except tooz.NotImplemented: + members_req = None try: report = pecan.request.incoming.measures_report( strtobool("details", details)) @@ -1969,6 +1976,11 @@ class StatusController(rest.RestController): report_dict = {"storage": {"summary": report['summary']}} if 'details' in report: report_dict["storage"]["measures_to_process"] = report['details'] + report_dict['metricd'] = {} + if members_req: + report_dict['metricd']['processors'] = members_req.get() + else: + report_dict['metricd']['processors'] = None return report_dict diff --git a/gnocchi/tests/functional/gabbits/base.yaml b/gnocchi/tests/functional/gabbits/base.yaml index 6997d587..43b46954 100644 --- a/gnocchi/tests/functional/gabbits/base.yaml +++ b/gnocchi/tests/functional/gabbits/base.yaml @@ -132,6 +132,7 @@ tests: authorization: "basic YWRtaW46" response_json_paths: $.storage.`len`: 2 + $.metricd.`len`: 1 - name: get status, no details GET: /v1/status?details=False @@ -140,3 +141,4 @@ tests: authorization: "basic YWRtaW46" response_json_paths: $.storage.`len`: 1 + $.metricd.`len`: 1 -- GitLab From 65f97ee78fc573c6dc7dcbf52509cef26d9101d4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 11 Jan 2018 13:57:23 +0100 Subject: [PATCH 1195/1483] metricd: use human readable member id This changes the format of the member id when using tooz to expose hostname and worker id. This makes it easier to have an idea of what node are connected and working. --- gnocchi/cli/metricd.py | 16 ++++++++++++---- gnocchi/rest/app.py | 1 + gnocchi/tests/base.py | 1 + gnocchi/tests/functional/fixtures.py | 4 +++- 4 files changed, 17 insertions(+), 5 deletions(-) diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index 70821888..36da079a 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -13,6 +13,7 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. +import socket import threading import time import uuid @@ -39,8 +40,8 @@ LOG = daiquiri.getLogger(__name__) @utils.retry_on_exception_and_log("Unable to initialize coordination driver") -def get_coordinator_and_start(url): - coord = coordination.get_coordinator(url, str(uuid.uuid4()).encode()) +def get_coordinator_and_start(member_id, url): + coord = coordination.get_coordinator(url, member_id) coord.start(start_heart=True) return coord @@ -49,7 +50,7 @@ class MetricProcessBase(cotyledon.Service): def __init__(self, worker_id, conf, interval_delay=0): super(MetricProcessBase, self).__init__(worker_id) self.conf = conf - self.startup_delay = worker_id + self.startup_delay = self.worker_id = worker_id self.interval_delay = interval_delay self._wake_up = threading.Event() self._shutdown = threading.Event() @@ -59,7 +60,14 @@ class MetricProcessBase(cotyledon.Service): self._wake_up.set() def _configure(self): - self.coord = get_coordinator_and_start(self.conf.coordination_url) + member_id = "%s.%s.%s" % (socket.gethostname(), + self.worker_id, + # NOTE(jd) Still use a uuid here so we're + # sure there's no conflict in case of + # crash/restart + str(uuid.uuid4())) + self.coord = get_coordinator_and_start(member_id, + self.conf.coordination_url) self.store = storage.get_driver(self.conf, self.coord) self.incoming = incoming.get_driver(self.conf) self.index = indexer.get_driver(self.conf) diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index df484a88..fd02b637 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -83,6 +83,7 @@ class GnocchiHook(pecan.hooks.PecanHook): # entirely. self.backends[name] = ( metricd.get_coordinator_and_start( + str(uuid.uuid4()), self.conf.coordination_url) ) elif name == "storage": diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index b1dcd17f..4b01611a 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -301,6 +301,7 @@ class TestCase(BaseTestCase): self.index = indexer.get_driver(self.conf) self.coord = metricd.get_coordinator_and_start( + str(uuid.uuid4()), self.conf.coordination_url) # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 00092999..686af0f6 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -22,6 +22,7 @@ import tempfile import threading import time from unittest import case +import uuid import warnings import daiquiri @@ -143,7 +144,8 @@ class ConfigFixture(fixture.GabbiFixture): self.index = index - self.coord = metricd.get_coordinator_and_start(conf.coordination_url) + self.coord = metricd.get_coordinator_and_start(str(uuid.uuid4()), + conf.coordination_url) s = storage.get_driver(conf, self.coord) s.upgrade() i = incoming.get_driver(conf) -- GitLab From 53b97f2dcad44a7593fde2bfac24d507c9c92b7d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 16 Jan 2018 11:18:23 +0100 Subject: [PATCH 1196/1483] service: log version number at startup Closes: #634 --- gnocchi/service.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gnocchi/service.py b/gnocchi/service.py index 3f0d1e5c..ce60d08e 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -48,9 +48,10 @@ def prepare_service(args=None, conf=None, conf.set_default("workers", workers, group="metricd") conf.set_default("parallel_operations", workers) + version = pbr.version.VersionInfo('gnocchi').version_string() conf(args, project='gnocchi', validate_default_values=True, default_config_files=default_config_files, - version=pbr.version.VersionInfo('gnocchi').version_string()) + version=version) utils.parallel_map.MAX_WORKERS = conf.parallel_operations @@ -100,6 +101,7 @@ def prepare_service(args=None, conf=None, conf.set_default("coordination_url", urlparse.urlunparse(parsed)) + LOG.info("Gnocchi version %s", version) conf.log_opt_values(LOG, logging.DEBUG) return conf -- GitLab From 246dd263e91e240cdbf74d253e09f834233ef21d Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 16 Jan 2018 14:04:55 +0100 Subject: [PATCH 1197/1483] indexer: fix not operator The not operator was buggy. This change fix it. Closes: #649 --- gnocchi/indexer/sqlalchemy.py | 2 +- gnocchi/tests/functional/gabbits/search.yaml | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 00881c1f..50d8f133 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1314,6 +1314,6 @@ class QueryTransformer(object): op = cls.unary_operators[operator] except KeyError: raise indexer.QueryInvalidOperator(operator) - return cls._handle_unary_op(engine, op, nodes) + return cls._handle_unary_op(engine, table, op, nodes) return cls._handle_binary_op(engine, table, op, nodes) return cls._handle_multiple_op(engine, table, op, nodes) diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml index 9508411d..b3de8060 100644 --- a/gnocchi/tests/functional/gabbits/search.yaml +++ b/gnocchi/tests/functional/gabbits/search.yaml @@ -144,6 +144,11 @@ tests: response_json_paths: $.`len`: 2 + - name: search not in_ query string + POST: /v1/search/resource/generic?filter=not%20id%20in%20%5Bfaef212f-0bf4-4030-a461-2186fef79be0%2C%20df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e%5D + response_json_paths: + $.`len`: 0 + - name: search empty in_ POST: /v1/search/resource/generic data: -- GitLab From 52fe97abca2d23d60914818ede0db64e94e4bcc2 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 16 Jan 2018 11:17:48 +0100 Subject: [PATCH 1198/1483] doc: add a configuration sample --- .gitignore | 1 + doc/source/install.rst | 7 +++++++ gnocchi/gendoc.py | 10 ++++++++++ 3 files changed, 18 insertions(+) diff --git a/.gitignore b/.gitignore index 56381f3e..a773d3d4 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ ChangeLog etc/gnocchi/gnocchi.conf doc/build doc/source/rest.rst +doc/source/gnocchi.conf.sample releasenotes/build cover .coverage diff --git a/doc/source/install.rst b/doc/source/install.rst index 41f1bc8a..7c8e2f69 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -226,4 +226,11 @@ Then, you can start devstack: .. _devstack: http://devstack.org + +Gnocchi Configuration sample +============================ + +.. literalinclude:: gnocchi.conf.sample + + .. include:: include/term-substitution.rst diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index 71ac1c76..f38ac32e 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -21,6 +21,7 @@ import sys import tempfile import jinja2 +from oslo_config import generator import six import six.moves import webob.request @@ -237,4 +238,13 @@ def setup(app): if six.PY2: content = content.encode("utf-8") f.write(content) + + config_output_file = 'doc/source/gnocchi.conf.sample' + app.info("Generating %s" % config_output_file) + generator.main([ + '--config-file', + '%s/gnocchi-config-generator.conf' % os.path.dirname(__file__), + '--output-file', config_output_file, + ]) + _RUN = True -- GitLab From b6ee8efaf343fc8878f5e35bd8ead2c47daa5861 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 16 Jan 2018 14:41:59 +0000 Subject: [PATCH 1199/1483] bump voluptuous requirement NotIn was added in 0.8.10: https://github.com/alecthomas/voluptuous/commit/f2241ce5edf6e16b3f3456f08f9b5bd7afe64a8d --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 37b7c413..ed334ba5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ cotyledon>=1.5.0 six stevedore ujson -voluptuous>=0.6 +voluptuous>=0.8.10 werkzeug trollius; python_version < '3.4' tenacity>=4.6.0 -- GitLab From 70cc23c4289b97c8fb3e0f12d5dadc00549516f3 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 17 Jan 2018 21:25:09 +0000 Subject: [PATCH 1200/1483] set __ne__ method neither total_ordering nor python2 sets a default __ne__ value behaviuor opposite to __eq__. this only happens in python3[1] so we need to set this. [1] https://docs.python.org/3/whatsnew/3.0.html#operators-and-special-methods Fixes: #658 --- gnocchi/carbonara.py | 24 ++++++------- gnocchi/tests/test_carbonara.py | 60 +++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 12 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 7de6cf33..f003d63e 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -19,6 +19,7 @@ import functools import itertools import math +import operator import random import re import struct @@ -487,27 +488,26 @@ class SplitKey(object): return hash(str(self.key.astype('datetime64[ns]')) + str(self.sampling.astype('timedelta64[ns]'))) - def __lt__(self, other): + def _compare(self, op, other): if isinstance(other, SplitKey): if self.sampling != other.sampling: raise TypeError( "Cannot compare %s with different sampling" % self.__class__.__name__) - return self.key < other.key + return op(self.key, other.key) if isinstance(other, numpy.datetime64): - return self.key < other + return op(self.key, other) raise TypeError("Cannot compare %r with %r" % (self, other)) + def __lt__(self, other): + return self._compare(operator.lt, other) + def __eq__(self, other): - if isinstance(other, SplitKey): - if self.sampling != other.sampling: - raise TypeError( - "Cannot compare %s with different sampling" % - self.__class__.__name__) - return self.key == other.key - if isinstance(other, numpy.datetime64): - return self.key == other - raise TypeError("Cannot compare %r with %r" % (self, other)) + return self._compare(operator.eq, other) + + def __ne__(self, other): + # neither total_ordering nor py2 sets ne as the opposite of eq + return self._compare(operator.ne, other) def __str__(self): return str(float(self)) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index a3f06a1b..48e0317f 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -823,6 +823,66 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertGreaterEqual(key, numpy.datetime64("1970")) + def test_split_key_cmp(self): + dt1 = numpy.datetime64("2015-01-01T15:03") + dt1_1 = numpy.datetime64("2015-01-01T15:03") + dt2 = numpy.datetime64("2015-01-05T15:03") + td = numpy.timedelta64(60, 's') + + self.assertEqual( + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td), + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td)) + self.assertEqual( + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td), + carbonara.SplitKey.from_timestamp_and_sampling(dt1_1, td)) + self.assertNotEqual( + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td), + carbonara.SplitKey.from_timestamp_and_sampling(dt2, td)) + + self.assertLess( + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td), + carbonara.SplitKey.from_timestamp_and_sampling(dt2, td)) + self.assertLessEqual( + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td), + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td)) + + self.assertGreater( + carbonara.SplitKey.from_timestamp_and_sampling(dt2, td), + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td)) + self.assertGreaterEqual( + carbonara.SplitKey.from_timestamp_and_sampling(dt2, td), + carbonara.SplitKey.from_timestamp_and_sampling(dt2, td)) + + def test_split_key_cmp_negative(self): + dt1 = numpy.datetime64("2015-01-01T15:03") + dt1_1 = numpy.datetime64("2015-01-01T15:03") + dt2 = numpy.datetime64("2015-01-05T15:03") + td = numpy.timedelta64(60, 's') + + self.assertFalse( + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td) != + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td)) + self.assertFalse( + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td) != + carbonara.SplitKey.from_timestamp_and_sampling(dt1_1, td)) + self.assertFalse( + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td) == + carbonara.SplitKey.from_timestamp_and_sampling(dt2, td)) + + self.assertFalse( + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td) >= + carbonara.SplitKey.from_timestamp_and_sampling(dt2, td)) + self.assertFalse( + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td) > + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td)) + + self.assertFalse( + carbonara.SplitKey.from_timestamp_and_sampling(dt2, td) <= + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td)) + self.assertFalse( + carbonara.SplitKey.from_timestamp_and_sampling(dt2, td) < + carbonara.SplitKey.from_timestamp_and_sampling(dt2, td)) + def test_split_key_next(self): self.assertEqual( numpy.datetime64("2015-03-06"), -- GitLab From da5811b289c5624d743c5985589cc9e1a94dae46 Mon Sep 17 00:00:00 2001 From: Chandan Kumar Date: Wed, 17 Jan 2018 18:46:12 +0530 Subject: [PATCH 1201/1483] Remove bundled in tree tempest plugin * https://review.openstack.org/532773 merged the gnocchi tempest plugin into telemetry tempest plugin. Let's remove it in the favor of same. * https://review.rdoproject.org/r/11161 packages the telemetry tempest plugin. * Keeping /gnocchi/tests/functional_live/gabbits{live.yaml, search-resource.yaml} as they are shared with tempest as well as for ensuring gnocchi-api/metricd. --- gnocchi/tempest/__init__.py | 0 gnocchi/tempest/config.py | 33 -------- gnocchi/tempest/plugin.py | 42 ---------- gnocchi/tempest/scenario/__init__.py | 110 --------------------------- setup.cfg | 3 - 5 files changed, 188 deletions(-) delete mode 100644 gnocchi/tempest/__init__.py delete mode 100644 gnocchi/tempest/config.py delete mode 100644 gnocchi/tempest/plugin.py delete mode 100644 gnocchi/tempest/scenario/__init__.py diff --git a/gnocchi/tempest/__init__.py b/gnocchi/tempest/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/gnocchi/tempest/config.py b/gnocchi/tempest/config.py deleted file mode 100644 index 74d7ef3e..00000000 --- a/gnocchi/tempest/config.py +++ /dev/null @@ -1,33 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -service_option = cfg.BoolOpt('gnocchi', - default=True, - help="Whether or not Gnocchi is expected to be" - "available") - -metric_group = cfg.OptGroup(name='metric', - title='Metric Service Options') - -metric_opts = [ - cfg.StrOpt('catalog_type', - default='metric', - help="Catalog type of the Metric service."), - cfg.StrOpt('endpoint_type', - default='publicURL', - choices=['public', 'admin', 'internal', - 'publicURL', 'adminURL', 'internalURL'], - help="The endpoint type to use for the metric service."), -] diff --git a/gnocchi/tempest/plugin.py b/gnocchi/tempest/plugin.py deleted file mode 100644 index 3410471f..00000000 --- a/gnocchi/tempest/plugin.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import - -import os - -from tempest.test_discover import plugins - -import gnocchi -from gnocchi.tempest import config as tempest_config - - -class GnocchiTempestPlugin(plugins.TempestPlugin): - def load_tests(self): - base_path = os.path.split(os.path.dirname( - os.path.abspath(gnocchi.__file__)))[0] - test_dir = "gnocchi/tempest" - full_test_dir = os.path.join(base_path, test_dir) - return full_test_dir, base_path - - def register_opts(self, conf): - conf.register_opt(tempest_config.service_option, - group='service_available') - conf.register_group(tempest_config.metric_group) - conf.register_opts(tempest_config.metric_opts, group='metric') - - def get_opt_lists(self): - return [(tempest_config.metric_group.name, - tempest_config.metric_opts), - ('service_available', [tempest_config.service_option])] diff --git a/gnocchi/tempest/scenario/__init__.py b/gnocchi/tempest/scenario/__init__.py deleted file mode 100644 index f662dc7b..00000000 --- a/gnocchi/tempest/scenario/__init__.py +++ /dev/null @@ -1,110 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import - -import os -import unittest - -from gabbi import runner -from gabbi import suitemaker -from gabbi import utils -import six.moves.urllib.parse as urlparse -from tempest import config -import tempest.test - -CONF = config.CONF - -TEST_DIR = os.path.join(os.path.dirname(__file__), '..', '..', - 'tests', 'functional_live', 'gabbits') - - -class GnocchiGabbiTest(tempest.test.BaseTestCase): - credentials = ['admin'] - - TIMEOUT_SCALING_FACTOR = 5 - - @classmethod - def skip_checks(cls): - super(GnocchiGabbiTest, cls).skip_checks() - if not CONF.service_available.gnocchi: - raise cls.skipException("Gnocchi support is required") - - def _do_test(self, filename): - token = self.os_admin.auth_provider.get_token() - url = self.os_admin.auth_provider.base_url( - {'service': CONF.metric.catalog_type, - 'endpoint_type': CONF.metric.endpoint_type}) - - parsed_url = urlparse.urlsplit(url) - prefix = parsed_url.path.rstrip('/') # turn it into a prefix - if parsed_url.scheme == 'https': - port = 443 - require_ssl = True - else: - port = 80 - require_ssl = False - host = parsed_url.hostname - if parsed_url.port: - port = parsed_url.port - - os.environ["GNOCCHI_SERVICE_TOKEN"] = token - os.environ["GNOCCHI_AUTHORIZATION"] = "not used" - - with open(os.path.join(TEST_DIR, filename)) as f: - suite_dict = utils.load_yaml(f) - suite_dict.setdefault('defaults', {})['ssl'] = require_ssl - test_suite = suitemaker.test_suite_from_dict( - loader=unittest.defaultTestLoader, - test_base_name="gabbi", - suite_dict=suite_dict, - test_directory=TEST_DIR, - host=host, port=port, - fixture_module=None, - intercept=None, - prefix=prefix, - handlers=runner.initialize_handlers([]), - test_loader_name="tempest") - - # NOTE(sileht): We hide stdout/stderr and reraise the failure - # manually, tempest will print it itself. - with open(os.devnull, 'w') as stream: - result = unittest.TextTestRunner( - stream=stream, verbosity=0, failfast=True, - ).run(test_suite) - - if not result.wasSuccessful(): - failures = (result.errors + result.failures + - result.unexpectedSuccesses) - if failures: - test, bt = failures[0] - name = test.test_data.get('name', test.id()) - msg = 'From test "%s" :\n%s' % (name, bt) - self.fail(msg) - - self.assertTrue(result.wasSuccessful()) - - -def test_maker(name, filename): - def test(self): - self._do_test(filename) - test.__name__ = name - return test - - -# Create one scenario per yaml file -for filename in os.listdir(TEST_DIR): - if not filename.endswith('.yaml'): - continue - name = "test_%s" % filename[:-5].lower().replace("-", "_") - setattr(GnocchiGabbiTest, name, - test_maker(name, filename)) diff --git a/setup.cfg b/setup.cfg index d308df88..77a6bd69 100644 --- a/setup.cfg +++ b/setup.cfg @@ -133,9 +133,6 @@ oslo.config.opts = oslo.config.opts.defaults = gnocchi = gnocchi.opts:set_defaults -tempest.test_plugins = - gnocchi_tests = gnocchi.tempest.plugin:GnocchiTempestPlugin - [build_sphinx] all_files = 1 build-dir = doc/build -- GitLab From 7016ffe470db4842a5280ece633007ae202909c6 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 11 Jan 2018 15:42:20 +0100 Subject: [PATCH 1202/1483] Introduce Aggregation object This introduces a new Aggregation objects (a named tuple) that stores the information needed to retrieve any aggregation from the storage driver. The goal is make most methods accept this Aggregation object as argument instead of a list of kwargs and to replace the archive policy usage in the storage driver. --- gnocchi/aggregation.py | 22 ++++++++++++ gnocchi/archive_policy.py | 14 ++++++++ gnocchi/rest/aggregates/processor.py | 7 ++-- gnocchi/storage/__init__.py | 52 ++++++++++++++-------------- 4 files changed, 67 insertions(+), 28 deletions(-) create mode 100644 gnocchi/aggregation.py diff --git a/gnocchi/aggregation.py b/gnocchi/aggregation.py new file mode 100644 index 00000000..a1004364 --- /dev/null +++ b/gnocchi/aggregation.py @@ -0,0 +1,22 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2018 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import collections + + +Aggregation = collections.namedtuple( + "Aggregation", + ["method", "granularity", "timespan"], +) diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py index d823d32a..afa248ff 100644 --- a/gnocchi/archive_policy.py +++ b/gnocchi/archive_policy.py @@ -23,6 +23,7 @@ from oslo_config import cfg from oslo_config import types import six +from gnocchi import aggregation from gnocchi import utils @@ -86,6 +87,19 @@ class ArchivePolicy(object): else: self.aggregation_methods = aggregation_methods + def get_aggregation(self, method, granularity): + # Find the timespan + for d in self.definition: + if d.granularity == granularity: + return aggregation.Aggregation( + method, d.granularity, d.timespan) + + @property + def aggregations(self): + return [aggregation.Aggregation(method, d.granularity, d.timespan) + for method in self.aggregation_methods + for d in self.definition] + @property def aggregation_methods(self): if '*' in self._aggregation_methods: diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index 9971c00d..8765e3db 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -51,9 +51,12 @@ class MetricReference(object): self.aggregation == other.aggregation) -def _get_measures_timeserie(storage, ref, *args, **kwargs): +def _get_measures_timeserie(storage, ref, granularity, *args, **kwargs): return (ref, storage._get_measures_timeserie( - ref.metric, ref.aggregation, *args, **kwargs)) + ref.metric, + ref.metric.archive_policy.get_aggregation( + ref.aggregation, granularity), + *args, **kwargs)) def get_measures(storage, references, operations, diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 53b61b3b..3aa26901 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -209,11 +209,18 @@ class StorageDriver(object): :param aggregation: The type of aggregation to retrieve. :param resample: The granularity to resample to. """ + + aggregations = [] + for g in sorted(granularities, reverse=True): + agg = metric.archive_policy.get_aggregation(aggregation, g) + if agg is None: + raise AggregationDoesNotExist(metric, aggregation, g) + aggregations.append(agg) + agg_timeseries = utils.parallel_map( self._get_measures_timeserie, - ((metric, aggregation, granularity, - from_timestamp, to_timestamp) - for granularity in sorted(granularities, reverse=True))) + ((metric, ag, from_timestamp, to_timestamp) + for ag in aggregations)) if resample: agg_timeseries = list(map(lambda agg: agg.resample(resample), @@ -236,44 +243,34 @@ class StorageDriver(object): metric.id, aggregation, key.sampling, key) return results - def _get_measures_timeserie(self, metric, - aggregation, granularity, + def _get_measures_timeserie(self, metric, aggregation, from_timestamp=None, to_timestamp=None): - - # Find the timespan - for d in metric.archive_policy.definition: - if d.granularity == granularity: - timespan = d.timespan - break - else: - raise AggregationDoesNotExist(metric, aggregation, granularity) - try: all_keys = self._list_split_keys_for_metric( - metric, aggregation, granularity) + metric, aggregation.method, aggregation.granularity) except MetricDoesNotExist: return carbonara.AggregatedTimeSerie( - sampling=granularity, - aggregation_method=aggregation) + sampling=aggregation.granularity, + aggregation_method=aggregation.method) if from_timestamp: from_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( - from_timestamp, granularity) + from_timestamp, aggregation.granularity) if to_timestamp: to_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( - to_timestamp, granularity) + to_timestamp, aggregation.granularity) keys = [key for key in sorted(all_keys) if ((not from_timestamp or key >= from_timestamp) and (not to_timestamp or key <= to_timestamp))] timeseries = self._get_measures_and_unserialize( - metric, keys, aggregation) + metric, keys, aggregation.method) ts = carbonara.AggregatedTimeSerie.from_timeseries( - sampling=granularity, - aggregation_method=aggregation, + sampling=aggregation.granularity, + aggregation_method=aggregation.method, timeseries=timeseries) # We need to truncate because: # - If the driver is not in WRITE_FULL mode, then it might read too @@ -283,8 +280,8 @@ class StorageDriver(object): # resized, we might still have too much points stored, which will be # deleted at a later point when new points will be procecessed. # Truncate to be sure we don't return them. - if timespan is not None: - ts.truncate(timespan) + if aggregation.timespan is not None: + ts.truncate(aggregation.timespan) return ts def _store_timeserie_split(self, metric, key, split, @@ -589,9 +586,12 @@ class StorageDriver(object): def find_measure(self, metric, predicate, granularity, aggregation="mean", from_timestamp=None, to_timestamp=None): + agg = metric.archive_policy.get_aggregation(aggregation, granularity) + if agg is None: + raise AggregationDoesNotExist(metric, aggregation, granularity) + timeserie = self._get_measures_timeserie( - metric, aggregation, granularity, - from_timestamp, to_timestamp) + metric, agg, from_timestamp, to_timestamp) values = timeserie.fetch(from_timestamp, to_timestamp) return [(timestamp, g, value) for timestamp, g, value in values -- GitLab From 6bfe869bb9c90b9b9d68bd08f8117faa044c8bc6 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 17 Jan 2018 19:48:10 +0000 Subject: [PATCH 1203/1483] cleanup only when new object currently, we scan for objects to cleanup or to rewrite. this is unnecessary i/o as the policies requires, at least, 1hrs worth of data (at high policy), and at lower policies requires days worth of data. it does not make sense for us to check for objects to clean for every measure. this leverages the fact we can compute when we need to rewrite objects without scanning by computing new split vs old split. using this knowledge, we scan only if we know a new object is created. only in this case will we check to see what objects need to be deleted or rewritten. this will still end up scanning sometimes even though no work is needed for non-append drivers (see: not ceph) but this should lower the false scans significantly. Fixes #608 --- gnocchi/storage/__init__.py | 78 ++++++++++++++++++++----------------- 1 file changed, 43 insertions(+), 35 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 3aa26901..e4678883 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -325,8 +325,7 @@ class StorageDriver(object): return self._store_metric_measures(metric, key, aggregation, data, offset=offset) - def _add_measures(self, aggregation, archive_policy_def, - metric, grouped_serie, + def _add_measures(self, aggregation, ap_def, metric, grouped_serie, previous_oldest_mutable_timestamp, oldest_mutable_timestamp): @@ -337,8 +336,7 @@ class StorageDriver(object): aggregation_to_compute = aggregation ts = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped_serie, archive_policy_def.granularity, - aggregation_to_compute) + grouped_serie, ap_def.granularity, aggregation_to_compute) # Don't do anything if the timeserie is empty if not ts: @@ -351,47 +349,57 @@ class StorageDriver(object): and previous_oldest_mutable_timestamp is not None ) - if archive_policy_def.timespan or need_rewrite: - existing_keys = self._list_split_keys_for_metric( - metric, aggregation, archive_policy_def.granularity) - - # First delete old splits - if archive_policy_def.timespan: - oldest_point_to_keep = ts.last - archive_policy_def.timespan + if ap_def.timespan: + oldest_point_to_keep = ts.last - ap_def.timespan oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) - for key in list(existing_keys): - # NOTE(jd) Only delete if the key is strictly inferior to - # the timestamp; we don't delete any timeserie split that - # contains our timestamp, so we prefer to keep a bit more - # than deleting too much - if key < oldest_key_to_keep: - self._delete_metric_measures(metric, key, aggregation) - existing_keys.remove(key) else: oldest_point_to_keep = None oldest_key_to_keep = None - # Rewrite all read-only splits just for fun (and compression). This - # only happens if `previous_oldest_mutable_timestamp' exists, which - # means we already wrote some splits at some point – so this is not the - # first time we treat this timeserie. - if need_rewrite: + if previous_oldest_mutable_timestamp and (ap_def.timespan or + need_rewrite): previous_oldest_mutable_key = ts.get_split_key( previous_oldest_mutable_timestamp) oldest_mutable_key = ts.get_split_key(oldest_mutable_timestamp) + # only cleanup if there is a new object, as there must be a new + # object for an old object to be cleanup if previous_oldest_mutable_key != oldest_mutable_key: - for key in existing_keys: - if previous_oldest_mutable_key <= key < oldest_mutable_key: - LOG.debug( - "Compressing previous split %s (%s) for metric %s", - key, aggregation, metric) - # NOTE(jd) Rewrite it entirely for fun (and later for - # compression). For that, we just pass None as split. - self._store_timeserie_split( - metric, key, - None, aggregation, oldest_mutable_timestamp, - oldest_point_to_keep) + existing_keys = sorted(self._list_split_keys_for_metric( + metric, aggregation, ap_def.granularity)) + + # First, check for old splits to delete + if ap_def.timespan: + oldest_point_to_keep = ts.last - ap_def.timespan + oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) + for key in list(existing_keys): + # NOTE(jd) Only delete if the key is strictly inferior + # the timestamp; we don't delete any timeserie split + # that contains our timestamp, so we prefer to keep a + # bit more than deleting too much + if key >= oldest_key_to_keep: + break + self._delete_metric_measures(metric, key, aggregation) + existing_keys.remove(key) + + # Rewrite all read-only splits just for fun (and compression). + # This only happens if `previous_oldest_mutable_timestamp' + # exists, which means we already wrote some splits at some + # point – so this is not the first time we treat this + # timeserie. + if need_rewrite: + for key in existing_keys: + if previous_oldest_mutable_key <= key: + if key >= oldest_mutable_key: + break + LOG.debug("Compressing previous split %s (%s) for " + "metric %s", key, aggregation, metric) + # NOTE(jd) Rewrite it entirely for fun (and later + # for compression). For that, we just pass None as + # split. + self._store_timeserie_split( + metric, key, None, aggregation, + oldest_mutable_timestamp, oldest_point_to_keep) for key, split in ts.split(): if oldest_key_to_keep is None or key >= oldest_key_to_keep: -- GitLab From 962eb864fc1a2af7520287f5ce573fb3a37a7475 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 19 Jan 2018 11:53:09 +0100 Subject: [PATCH 1204/1483] ceph: understandable error on misconfiguration Close #657 --- gnocchi/common/ceph.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/gnocchi/common/ceph.py b/gnocchi/common/ceph.py index b649cf00..407aa44a 100644 --- a/gnocchi/common/ceph.py +++ b/gnocchi/common/ceph.py @@ -65,7 +65,10 @@ def create_rados_connection(conf): conn = rados.Rados(conffile=conf.ceph_conffile, rados_id=conf.ceph_username, conf=options) - conn.connect() + try: + conn.connect() + except rados.InvalidArgumentError: + raise Exception("Unable to connect to ceph, check the configuration") ioctx = conn.open_ioctx(conf.ceph_pool) return conn, ioctx -- GitLab From 58610f6b861574f5abd90043ca516c3afd8066a5 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 22 Jan 2018 23:42:17 +0000 Subject: [PATCH 1205/1483] remove old leftover note all the xfail tests associated with original commit a952877a34a5027db0d1111dbfcca67ad47dc8ed have been fixed/removed. clear note to avoid noise. --- gnocchi/tests/functional/gabbits/resource.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index 20fc4b48..ad23d462 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -41,11 +41,6 @@ tests: archive_policy_name: medium status: 201 -# The top of the API is a bit confusing and presents some URIs which -# are not very useful. This isn't strictly a bug but does represent -# a measure of unfriendliness that we may wish to address. Thus the -# xfails. - - name: root of all GET: / response_headers: -- GitLab From 77a27cfc3742b9028ee51015bed81106806a0fe4 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 25 Jan 2018 10:23:14 +0100 Subject: [PATCH 1206/1483] redis: make socket_timeout optional Related: #683 --- gnocchi/common/redis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/common/redis.py b/gnocchi/common/redis.py index d7980405..275bd3ef 100644 --- a/gnocchi/common/redis.py +++ b/gnocchi/common/redis.py @@ -147,7 +147,7 @@ def get_client(conf): sentinel_hosts.insert(0, (kwargs['host'], kwargs['port'])) sentinel_server = sentinel.Sentinel( sentinel_hosts, - socket_timeout=kwargs['socket_timeout']) + socket_timeout=kwargs.get('socket_timeout')) sentinel_name = kwargs['sentinel'] del kwargs['sentinel'] if 'sentinel_fallback' in kwargs: -- GitLab From 0620a0a65ece2bef27dc68de59c3a1578a45c7eb Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Sat, 27 Jan 2018 17:02:53 -0500 Subject: [PATCH 1207/1483] docs: grammar fixes for statsd.rst this commit fixes some minor grammatic issues with the statsd daemon documentation. --- doc/source/statsd.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/source/statsd.rst b/doc/source/statsd.rst index 27629279..969ce179 100644 --- a/doc/source/statsd.rst +++ b/doc/source/statsd.rst @@ -12,8 +12,8 @@ listen to |metrics| sent over the network, named `gnocchi-statsd`. .. _`Statsd`: https://github.com/etsy/statsd/ -How It Works? -============= +How Does It Work? +================= In order to enable statsd support in Gnocchi, you need to configure the `[statsd]` option group in the configuration file. You need to provide a |resource| ID that will be used as the main generic |resource| where all the @@ -25,14 +25,14 @@ All the |metrics| will be created dynamically as the |metrics| are sent to `gnocchi-statsd`, and attached with the provided name to the |resource| ID you configured. -The `gnocchi-statsd` may be scaled, but trade-offs have to been made due to the +The `gnocchi-statsd` may be scaled, but trade-offs have been made due to the nature of the statsd protocol. That means that if you use |metrics| of type `counter`_ or sampling (`c` in the protocol), you should always send those |metrics| to the same daemon – or not use them at all. The other supported -types (`timing`_ and `gauges`_) does not suffer this limitation, but be aware -that you might have more |measures| that expected if you send the same |metric| -to different `gnocchi-statsd` server, as their cache nor their flush delay are -synchronized. +types (`timing`_ and `gauges`_) do not suffer this limitation, but be aware +that you might have more |measures| than expected if you send the same |metric| +to different `gnocchi-statsd` servers, as neither their cache nor their flush +delay are synchronized. .. _`counter`: https://github.com/etsy/statsd/blob/master/docs/metric_types.md#counting .. _`timing`: https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing -- GitLab From 02936ff4a950c7c3151dc7f70793f91e2225d926 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Mon, 29 Jan 2018 08:40:25 -0500 Subject: [PATCH 1208/1483] update documentation regarding resource ids Add information about the special handling of resource ids. Closes #698 --- doc/source/rest.j2 | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 6f113e50..4d895307 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -387,12 +387,19 @@ To create a generic |resource|: {{ scenarios['create-resource-generic']['doc'] }} -The *id*, *user_id* and *project_id* attributes must be an UUID. The timestamp +The *user_id* and *project_id* attributes must be UUIDs. The timestamp describing the lifespan of the |resource| are optional, and *started_at* is by default set to the current timestamp. -It's possible to retrieve the |resource| by the URL provided in the `Location` -header. +The *id* attribute may be a UUID or some other arbitrary string. If it is +not a UUID, the original value will be stored in the *original_resource_id* +attribute and Gnocchi will generate a new UUID that is unique for the user. +That is, if two users create resources with the same non-UUID *id* field, +the resources will have different UUIDs. + +You may use either of the *id* or the *original_resource_id* attributes to +refer to the |resource|. The value returned by the create operation +includes a `Location` header referencing the *id*. Non-generic resources ~~~~~~~~~~~~~~~~~~~~~ -- GitLab From f39195455542bd46c76709e4f2f11ec73e2b6470 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 16 Jan 2018 11:17:48 +0100 Subject: [PATCH 1209/1483] api: don't raise 400 in MeasuresListSchema voluptous schema must not abort() but raise an exception otherwise, we can't mix them in voluptous.Any/voluptous.All/... --- gnocchi/rest/api.py | 40 ++++++++++--------- .../functional/gabbits/batch-measures.yaml | 2 +- gnocchi/tests/functional/gabbits/metric.yaml | 12 ++++++ 3 files changed, 35 insertions(+), 19 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 6632915d..c6cbaf2f 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -148,18 +148,24 @@ def deserialize(expected_content_types=None): return params -def validate(schema, data, required=True): +def validate(schema, data, required=True, detailed_exc=False): try: return voluptuous.Schema(schema, required=required)(data) except voluptuous.Error as e: - abort(400, "Invalid input: %s" % e) + if detailed_exc: + abort(400, {"cause": "Invalid input", + "detail": str(e)}) + else: + abort(400, "Invalid input: %s" % e) def deserialize_and_validate(schema, required=True, - expected_content_types=None): + expected_content_types=None, + detailed_exc=False): return validate(schema, deserialize(expected_content_types=expected_content_types), - required) + required, + detailed_exc) def Timespan(value): @@ -434,14 +440,14 @@ def MeasuresListSchema(measures): try: times = utils.to_timestamps([m['timestamp'] for m in measures]) except TypeError: - abort(400, "Invalid format for measures") + raise voluptuous.Invalid("unexpected measures format") except ValueError as e: - abort(400, "Invalid input for timestamp: %s" % e) + raise voluptuous.Invalid("unexpected timestamp '%s'" % e) try: values = [float(i['value']) for i in measures] except Exception: - abort(400, "Invalid input for a value") + raise voluptuous.Invalid("unexpected measures value") return (incoming.Measure(t, v) for t, v in six.moves.zip(times, values)) @@ -462,15 +468,13 @@ class MetricController(rest.RestController): self.enforce_metric("get metric") return self.metric - @pecan.expose() + @pecan.expose('json') def post_measures(self): self.enforce_metric("post measures") - params = deserialize() - if not isinstance(params, list): - abort(400, "Invalid input for measures") - if params: - pecan.request.incoming.add_measures( - self.metric.id, MeasuresListSchema(params)) + measures = deserialize_and_validate(MeasuresListSchema, + detailed_exc=True) + if measures: + pecan.request.incoming.add_measures(self.metric.id, measures) pecan.response.status = 202 @pecan.expose('json') @@ -2105,10 +2109,10 @@ class PrometheusWriteController(rest.RestController): attrs.get("instance", "none")) name = attrs['__name__'] if ts.samples: - measures_by_rid[original_rid][name] = ( - MeasuresListSchema([{'timestamp': s.timestamp_ms / 1000.0, - 'value': s.value} - for s in ts.samples])) + data = [{'timestamp': s.timestamp_ms / 1000.0, + 'value': s.value} for s in ts.samples] + measures_by_rid[original_rid][name] = validate( + MeasuresListSchema, data, detailed_exc=True) creator = pecan.request.auth_helper.get_current_user(pecan.request) diff --git a/gnocchi/tests/functional/gabbits/batch-measures.yaml b/gnocchi/tests/functional/gabbits/batch-measures.yaml index 4a0ef279..61af5dd8 100644 --- a/gnocchi/tests/functional/gabbits/batch-measures.yaml +++ b/gnocchi/tests/functional/gabbits/batch-measures.yaml @@ -275,4 +275,4 @@ tests: - [ "2015-03-06T14:34:12", 12] status: 400 response_strings: - - "Invalid format for measures" + - "unexpected measures format" diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index 89e7a3bc..6f59706d 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -249,6 +249,18 @@ tests: value: -23 status: 202 + - name: push measurements with wrong measure objects + POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures + request_headers: + accept: application/json + data: + - [ "2015-03-06T14:33:57", 43.1] + - [ "2015-03-06T14:34:12", 12] + status: 400 + response_json_paths: + $.description.cause: "Invalid input" + $.description.detail: "unexpected measures format" + - name: refresh metric GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true -- GitLab From 081e0513e68fb39630f2abf22fac0e617bce6af6 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 17 Jan 2018 10:16:42 +0100 Subject: [PATCH 1210/1483] batch: allow to pass archive policy/unit This change allow to pass the archive policy/unit when we batch measures for metrics. --- doc/source/rest.yaml | 22 +++++--- gnocchi/rest/api.py | 47 +++++++++++----- .../functional/gabbits/batch-measures.yaml | 56 +++++++++++++++++-- gnocchi/tests/functional/gabbits/metric.yaml | 5 +- .../notes/ap-in-batch-d83f6aa163d200e9.yaml | 5 ++ 5 files changed, 107 insertions(+), 28 deletions(-) create mode 100644 releasenotes/notes/ap-in-batch-d83f6aa163d200e9.yaml diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index e95a3623..b54c56fc 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -584,16 +584,22 @@ { "{{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['id'] }}": { - "cpu.util": [ - { "timestamp": "2014-10-06T14:34:12", "value": 12 }, - { "timestamp": "2014-10-06T14:34:20", "value": 2 } - ] + "cpu.util": { + "archive_policy_name": "{{ scenarios['create-archive-policy']['response'].json['name'] }}", + "measures": [ + { "timestamp": "2014-10-06T14:34:12", "value": 12 }, + { "timestamp": "2014-10-06T14:34:20", "value": 2 } + ] + } }, "{{ scenarios['create-resource-instance-with-metrics']['response'].json['id'] }}": { - "cpu.util": [ - { "timestamp": "2014-10-06T14:34:12", "value": 6 }, - { "timestamp": "2014-10-06T14:34:20", "value": 25 } - ] + "cpu.util": { + "archive_policy_name": "{{ scenarios['create-archive-policy']['response'].json['name'] }}", + "measures": [ + { "timestamp": "2014-10-06T14:34:12", "value": 6 }, + { "timestamp": "2014-10-06T14:34:20", "value": 25 } + ] + } } } diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index c6cbaf2f..01d4fc2f 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -151,10 +151,11 @@ def deserialize(expected_content_types=None): def validate(schema, data, required=True, detailed_exc=False): try: return voluptuous.Schema(schema, required=required)(data) - except voluptuous.Error as e: + except voluptuous.Invalid as e: if detailed_exc: - abort(400, {"cause": "Invalid input", - "detail": str(e)}) + abort(400, {"cause": "Attribute value error", + "reason": str(e), + "detail": e.path}) else: abort(400, "Invalid input: %s" % e) @@ -1546,16 +1547,32 @@ class SearchMetricController(rest.RestController): class ResourcesMetricsMeasuresBatchController(rest.RestController): + + @staticmethod + def BackwardCompatibleMeasuresList(v): + v = voluptuous.Schema( + voluptuous.Any(MeasuresListSchema, + {voluptuous.Optional("archive_policy_name"): + six.text_type, + voluptuous.Optional("unit"): + six.text_type, + "measures": MeasuresListSchema}), + required=True)(v) + if isinstance(v, dict): + return v + else: + # Old format + return {"measures": v} + @pecan.expose('json') def post(self, create_metrics=False): creator = pecan.request.auth_helper.get_current_user( pecan.request) MeasuresBatchSchema = voluptuous.Schema( {functools.partial(ResourceID, creator=creator): - {six.text_type: MeasuresListSchema}} - ) - - body = deserialize_and_validate(MeasuresBatchSchema) + {six.text_type: self.BackwardCompatibleMeasuresList}}) + body = deserialize_and_validate(MeasuresBatchSchema, + detailed_exc=True) known_metrics = [] unknown_metrics = [] @@ -1575,9 +1592,9 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): all_metrics[metric.resource_id].append(metric) for original_resource_id, resource_id in body: - body_by_rid[resource_id] = body[(original_resource_id, - resource_id)] - names = list(body[(original_resource_id, resource_id)].keys()) + r = body[(original_resource_id, resource_id)] + body_by_rid[resource_id] = r + names = list(r.keys()) metrics = all_metrics[resource_id] known_names = [m.name for m in metrics] @@ -1585,9 +1602,11 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): already_exists_names = [] for name in names: if name not in known_names: - metric = MetricsController.MetricSchema({ - "name": name - }) + metric_data = {"name": name} + for attr in ["archive_policy_name", "unit"]: + if attr in r[name]: + metric_data[attr] = r[name][attr] + metric = MetricsController.MetricSchema(metric_data) try: m = pecan.request.indexer.create_metric( uuid.uuid4(), @@ -1641,7 +1660,7 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): pecan.request.incoming.add_measures_batch( dict((metric.id, - body_by_rid[metric.resource_id][metric.name]) + body_by_rid[metric.resource_id][metric.name]["measures"]) for metric in known_metrics)) pecan.response.status = 202 diff --git a/gnocchi/tests/functional/gabbits/batch-measures.yaml b/gnocchi/tests/functional/gabbits/batch-measures.yaml index 61af5dd8..ce91accc 100644 --- a/gnocchi/tests/functional/gabbits/batch-measures.yaml +++ b/gnocchi/tests/functional/gabbits/batch-measures.yaml @@ -180,6 +180,34 @@ tests: response_headers: content-length: 0 + - name: push measurements to unknown named metrics and create it with new format + POST: /v1/batch/resources/metrics/measures?create_metrics=true + data: + 46c9418d-d63b-4cdd-be89-8f57ffc5952e: + newformat: + archive_policy_name: simple + unit: "ks" + measures: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 42 + auto.newformat: + measures: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 24 + auto.test: + measures: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:35:12" + value: 24 + status: 202 + response_headers: + content-length: 0 + - name: get created metric to check creation GET: /v1/resource/generic/46c9418d-d63b-4cdd-be89-8f57ffc5952e/metric/auto.test @@ -188,6 +216,25 @@ tests: response_json_paths: $: - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] + - ["2015-03-06T14:35:12+00:00", 1.0, 24.0] + + - name: get created metric to check creation with newformat + GET: /v1/resource/generic/46c9418d-d63b-4cdd-be89-8f57ffc5952e/metric/newformat + + - name: get created metric to check creation with newformat2 + GET: /v1/resource/generic/46c9418d-d63b-4cdd-be89-8f57ffc5952e/metric/auto.newformat + + - name: ensure measure have been posted with newformat + GET: /v1/resource/generic/46c9418d-d63b-4cdd-be89-8f57ffc5952e/metric/newformat/measures?refresh=true&start=2015-03-06T14:34 + response_json_paths: + $: + - ["2015-03-06T14:34:12+00:00", 1.0, 42.0] + + - name: ensure measure have been posted with newformat2 + GET: /v1/resource/generic/46c9418d-d63b-4cdd-be89-8f57ffc5952e/metric/auto.newformat/measures?refresh=true&start=2015-03-06T14:34 + response_json_paths: + $: + - ["2015-03-06T14:34:12+00:00", 1.0, 24.0] - name: push measurements to unknown named metrics and resource with create_metrics with uuid resource id POST: /v1/batch/resources/metrics/measures?create_metrics=true @@ -202,10 +249,11 @@ tests: value: 12 bbbbbbbb-d63b-4cdd-be89-111111111111: auto.test: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 + measures: + - timestamp: "2015-03-06T14:33:57" + value: 43.1 + - timestamp: "2015-03-06T14:34:12" + value: 12 status: 400 response_json_paths: $.description.cause: "Unknown resources" diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index 6f59706d..fe934100 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -258,8 +258,9 @@ tests: - [ "2015-03-06T14:34:12", 12] status: 400 response_json_paths: - $.description.cause: "Invalid input" - $.description.detail: "unexpected measures format" + $.description.cause: "Attribute value error" + $.description.detail: [] + $.description.reason: "unexpected measures format" - name: refresh metric GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true diff --git a/releasenotes/notes/ap-in-batch-d83f6aa163d200e9.yaml b/releasenotes/notes/ap-in-batch-d83f6aa163d200e9.yaml new file mode 100644 index 00000000..4752566f --- /dev/null +++ b/releasenotes/notes/ap-in-batch-d83f6aa163d200e9.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + A new format for the batch payload is available to allow to pass + the archive policy description -- GitLab From d3a93519e55fa10bb1de3eac8821e26b2c6581e3 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 30 Jan 2018 15:47:37 +0100 Subject: [PATCH 1211/1483] indexer: detach metric from resource when marked as 'deleted' This allows to re associate another metric using the same name as used previously, even if the metric is not expunged yet. Closes: #702 --- gnocchi/indexer/sqlalchemy.py | 2 +- gnocchi/tests/test_indexer.py | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index 50d8f133..bd0ab812 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -1143,7 +1143,7 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): with self.facade.writer() as session: if session.query(Metric).filter( Metric.id == id, Metric.status == 'active').update( - {"status": "delete"}) == 0: + {"status": "delete", "resource_id": None}) == 0: raise indexer.NoSuchMetric(id) @staticmethod diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index c9d1d08d..2671bbfc 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -503,6 +503,32 @@ class TestIndexerDriver(tests_base.TestCase): self.assertIn('foo', metric_names) self.assertIn('bar', metric_names) + def test_update_resource_metrics_append_after_delete(self): + r1 = uuid.uuid4() + m1 = uuid.uuid4() + m2 = uuid.uuid4() + m3 = uuid.uuid4() + creator = str(uuid.uuid4()) + self.index.create_metric(m1, creator, + archive_policy_name="low") + self.index.create_metric(m2, creator, + archive_policy_name="low") + self.index.create_metric(m3, creator, + archive_policy_name="low") + self.index.create_resource('generic', r1, creator, + metrics={'foo': m1}) + rc = self.index.update_resource('generic', r1, metrics={'bar': m2}, + append_metrics=True) + self.index.delete_metric(m1) + rc = self.index.update_resource('generic', r1, metrics={'foo': m3}, + append_metrics=True) + r = self.index.get_resource('generic', r1, with_metrics=True) + self.assertEqual(rc, r) + metric_names = [m.name for m in rc.metrics] + self.assertEqual(2, len(metric_names)) + self.assertIn('foo', metric_names) + self.assertIn('bar', metric_names) + def test_update_resource_metrics_append_fail(self): r1 = uuid.uuid4() e1 = uuid.uuid4() -- GitLab From 84921cd6066302b14a7f57767859e6bcfbd50bde Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 31 Jan 2018 16:57:09 +0100 Subject: [PATCH 1212/1483] redis: concatenate bytes before unserializing This is actually faster than concatenating numpy arrays. --- gnocchi/incoming/redis.py | 6 ++---- gnocchi/storage/__init__.py | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/gnocchi/incoming/redis.py b/gnocchi/incoming/redis.py index c1684684..28a71f1d 100644 --- a/gnocchi/incoming/redis.py +++ b/gnocchi/incoming/redis.py @@ -110,10 +110,8 @@ class RedisStorage(incoming.IncomingDriver): # lrange is inclusive on both ends, decrease to grab exactly n items item_len = item_len - 1 if item_len else item_len - yield self._array_concatenate([ - self._unserialize_measures('%s-%s' % (metric_id, i), data) - for i, data in enumerate(self._client.lrange(key, 0, item_len)) - ]) + yield self._unserialize_measures(metric_id, b"".join( + self._client.lrange(key, 0, item_len))) # ltrim is inclusive, bump 1 to remove up to and including nth item self._client.ltrim(key, item_len + 1, -1) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index e4678883..670658e3 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -514,7 +514,7 @@ class StorageDriver(object): LOG.debug("Skipping %s (already processed)", metric) return - measures.sort(order='timestamps') + measures = numpy.sort(measures, order='timestamps') agg_methods = list(metric.archive_policy.aggregation_methods) block_size = metric.archive_policy.max_block_size -- GitLab From b71de7c5884e43968a4591ccd0291cae41ec7e46 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Mon, 29 Jan 2018 10:32:04 -0500 Subject: [PATCH 1213/1483] further work to make resource create docs match reality - Change documentation of user and project attributes based on comments in #698. - Link to timestamp format description when mentioning timestamps. - Minor rephrasing in the paragraph regaring the id attribute --- doc/source/rest.j2 | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 4d895307..6a8eafcb 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -387,15 +387,18 @@ To create a generic |resource|: {{ scenarios['create-resource-generic']['doc'] }} -The *user_id* and *project_id* attributes must be UUIDs. The timestamp -describing the lifespan of the |resource| are optional, and *started_at* is by -default set to the current timestamp. +The *user_id* and *project_id* attributes may be any arbitrary string. The +:ref:`timestamps` describing the lifespan of the +|resource| are optional, and *started_at* is by default set to the current +timestamp. The *id* attribute may be a UUID or some other arbitrary string. If it is -not a UUID, the original value will be stored in the *original_resource_id* -attribute and Gnocchi will generate a new UUID that is unique for the user. -That is, if two users create resources with the same non-UUID *id* field, -the resources will have different UUIDs. +a UUID, Gnocchi will use it verbatim. If it is not a UUID, the original +value will be stored in the *original_resource_id* attribute and Gnocchi +will generate a new UUID that is unique for the user. That is, if two +users submit create requests with the same non-UUID *id* attribute, the +resulting resources will have different UUID values in their respective +*id* attributes. You may use either of the *id* or the *original_resource_id* attributes to refer to the |resource|. The value returned by the create operation @@ -986,6 +989,8 @@ reporting values such as the number of new |measures| to process for each {{ scenarios['get-status']['doc'] }} +.. _timestamp-format: + Timestamp format ================ -- GitLab From 5d68134e1f4894f62b242b94ef08dc2c5e2581db Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 26 Jan 2018 21:55:03 +0000 Subject: [PATCH 1214/1483] use concatenate consistently it just does some validation we don't need then calls concatenate[1]. just call concatenate so we use it consistently everywhere. [1] https://github.com/numpy/numpy/blob/99019107cbde06294a356fd7fc859f9b31f87410/numpy/lib/function_base.py#L4326 --- gnocchi/carbonara.py | 2 +- gnocchi/incoming/ceph.py | 3 ++- gnocchi/incoming/file.py | 4 ++-- gnocchi/incoming/s3.py | 4 ++-- gnocchi/rest/aggregates/operations.py | 2 +- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index f003d63e..b583271f 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -96,7 +96,7 @@ def combine_timeseries(ts1, ts2): :param ts: The timeseries to combine. :return: A new timeseries. """ - ts = numpy.append(ts1, ts2) + ts = numpy.concatenate((ts1, ts2)) _, index = numpy.unique(ts['timestamps'], return_index=True) return ts[index] diff --git a/gnocchi/incoming/ceph.py b/gnocchi/incoming/ceph.py index f1bb1afa..09dbcff3 100644 --- a/gnocchi/incoming/ceph.py +++ b/gnocchi/incoming/ceph.py @@ -214,7 +214,8 @@ class CephStorage(incoming.IncomingDriver): measures = self._make_measures_array() for k, v in omaps: - measures = numpy.append(measures, self._unserialize_measures(k, v)) + measures = numpy.concatenate( + (measures, self._unserialize_measures(k, v))) processed_keys.append(k) yield measures diff --git a/gnocchi/incoming/file.py b/gnocchi/incoming/file.py index 4dafd5dc..d474fb77 100644 --- a/gnocchi/incoming/file.py +++ b/gnocchi/incoming/file.py @@ -169,8 +169,8 @@ class FileStorage(incoming.IncomingDriver): for f in files: abspath = self._build_measure_path(metric_id, f) with open(abspath, "rb") as e: - measures = numpy.append( - measures, self._unserialize_measures(f, e.read())) + measures = numpy.concatenate(( + measures, self._unserialize_measures(f, e.read()))) yield measures diff --git a/gnocchi/incoming/s3.py b/gnocchi/incoming/s3.py index b687582b..9af0e304 100644 --- a/gnocchi/incoming/s3.py +++ b/gnocchi/incoming/s3.py @@ -167,10 +167,10 @@ class S3Storage(incoming.IncomingDriver): response = self.s3.get_object( Bucket=self._bucket_name_measures, Key=f) - measures = numpy.append( + measures = numpy.concatenate(( measures, self._unserialize_measures(f, response['Body'].read()) - ) + )) yield measures diff --git a/gnocchi/rest/aggregates/operations.py b/gnocchi/rest/aggregates/operations.py index 26cd5f04..a090c253 100644 --- a/gnocchi/rest/aggregates/operations.py +++ b/gnocchi/rest/aggregates/operations.py @@ -191,7 +191,7 @@ def handle_resample(agg, granularity, timestamps, values, is_aggregated, if new_values is None: new_values = numpy.array([ts["values"]]) else: - new_values = numpy.append(new_values, [ts["values"]], axis=0) + new_values = numpy.concatenate((new_values, [ts["values"]])) return sampling, result_timestamps, new_values.T, is_aggregated -- GitLab From b50c9a02726d76593c111c4c406bd967481e9b43 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sun, 28 Jan 2018 10:37:12 +0100 Subject: [PATCH 1215/1483] tox: only run doc8 in docs target The docs target is run in Travis so we should be good. No need to run it here. --- setup.cfg | 1 - tox.ini | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 77a6bd69..f4162bb5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -67,7 +67,6 @@ test = testresources>=0.2.4 # Apache-2.0/BSD testtools>=0.9.38 WebTest>=2.0.16 - doc8 keystonemiddleware>=4.0.0,!=4.19.0 wsgi_intercept>=1.4.1 test-swift = diff --git a/tox.ini b/tox.ini index 44c727f7..adedd474 100644 --- a/tox.ini +++ b/tox.ini @@ -44,7 +44,6 @@ deps = .[test,redis,prometheus,{env:GNOCCHI_STORAGE_DEPS:},{env:GNOCCHI_INDEXER_ {env:GNOCCHI_TEST_TARBALLS:} cliff!=2.9.0 commands = - doc8 doc/source {toxinidir}/run-tests.sh {posargs} {toxinidir}/run-func-tests.sh {posargs} @@ -147,6 +146,7 @@ basepython = python3 # .[postgresql,doc] # setenv = GNOCCHI_STORAGE_DEPS=file deps = .[test,file,postgresql,doc] + doc8 setenv = GNOCCHI_TEST_DEBUG=1 commands = doc8 --ignore-path doc/source/rest.rst,doc/source/comparison-table.rst doc/source pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx -W -- GitLab From 441e6a581ac1147be01736b756df6bd13883e07d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 23 Jan 2018 14:11:49 -0800 Subject: [PATCH 1216/1483] rest: load metric details for ACL Fixes: #464 --- gnocchi/rest/api.py | 5 ++--- gnocchi/tests/test_rest.py | 41 ++++++++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 18 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 01d4fc2f..8795af46 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -564,10 +564,9 @@ class MetricsController(rest.RestController): except ValueError: abort(404, six.text_type(indexer.NoSuchMetric(id))) - # NOTE(sileht): Don't get detail for measure - details = len(remainder) == 0 + # Load details for ACL metrics = pecan.request.indexer.list_metrics( - attribute_filter={"=": {"id": metric_id}}, details=details) + attribute_filter={"=": {"id": metric_id}}, details=True) if not metrics: abort(404, six.text_type(indexer.NoSuchMetric(id))) return MetricController(metrics[0]), remainder diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 20daf8c7..b34d6571 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -466,14 +466,16 @@ class MetricTest(RestTest): def test_get_measures_with_another_user_allowed(self): rid = str(uuid.uuid4()) - self.app.post_json("/v1/resource/generic", - params={ - "id": rid, - "project_id": TestingApp.PROJECT_ID_2, - "metrics": { - "disk": {"archive_policy_name": "low"}, - } - }) + result = self.app.post_json( + "/v1/resource/generic", + params={ + "id": rid, + "project_id": TestingApp.PROJECT_ID_2, + "metrics": { + "disk": {"archive_policy_name": "low"}, + } + }) + metric_id = result.json['metrics']['disk'] measures_url = "/v1/resource/generic/%s/metric/disk/measures" % rid self.app.post_json(measures_url, params=[{"timestamp": '2013-01-01 23:23:23', @@ -485,22 +487,31 @@ class MetricTest(RestTest): ['2013-01-01T23:00:00+00:00', 3600.0, 1234.2], ['2013-01-01T23:20:00+00:00', 300.0, 1234.2]], result.json) + result = self.app.get("/v1/metric/%s/measures" % metric_id) + self.assertEqual( + [['2013-01-01T00:00:00+00:00', 86400.0, 1234.2], + ['2013-01-01T23:00:00+00:00', 3600.0, 1234.2], + ['2013-01-01T23:20:00+00:00', 300.0, 1234.2]], + result.json) def test_get_measures_with_another_user_disallowed(self): rid = str(uuid.uuid4()) - self.app.post_json("/v1/resource/generic", - params={ - "id": rid, - "metrics": { - "disk": {"archive_policy_name": "low"}, - } - }) + result = self.app.post_json( + "/v1/resource/generic", + params={ + "id": rid, + "metrics": { + "disk": {"archive_policy_name": "low"}, + } + }) + metric_id = result.json['metrics']['disk'] measures_url = "/v1/resource/generic/%s/metric/disk/measures" % rid self.app.post_json(measures_url, params=[{"timestamp": '2013-01-01 23:23:23', "value": 1234.2}]) with self.app.use_another_user(): self.app.get(measures_url, status=403) + self.app.get("/v1/metric/%s/measures" % metric_id, status=403) @mock.patch.object(utils, 'utcnow') def test_get_measure_start_relative(self, utcnow): -- GitLab From 56ad038946346219aadb4f94ab07c09440a4df71 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 1 Feb 2018 10:13:04 +0100 Subject: [PATCH 1217/1483] redis: only check key existence if no keys are retrieved The current code starts by doing this, but actually the HSCAN call is a good hint as whether the metric exist or not. --- gnocchi/storage/redis.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 140bf7a4..c0bcd5ae 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -68,8 +68,6 @@ class RedisStorage(storage.StorageDriver): def _list_split_keys(self, metric, aggregation, granularity, version=3): key = self._metric_key(metric) - if not self._client.exists(key): - raise storage.MetricDoesNotExist(metric) split_keys = set() hashes = self._client.hscan_iter( key, match=self._aggregated_field_for_split( @@ -77,6 +75,8 @@ class RedisStorage(storage.StorageDriver): for f, __ in hashes: meta = f.decode("utf8").split(self.FIELD_SEP, 1) split_keys.add(meta[0]) + if not split_keys and not self._client.exists(key): + raise storage.MetricDoesNotExist(metric) return split_keys def _delete_metric_measures(self, metric, key, aggregation, version=3): -- GitLab From 383dbca95b83c7c82b0cbe769acb3957e407b4e9 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 1 Feb 2018 09:49:00 +0100 Subject: [PATCH 1218/1483] redis: hmget requires at least one args If no keys have to be retrieved, we can skip the backend code and just return an empty list. This is specially needed by redis that need at least one key for using hmget. --- gnocchi/storage/__init__.py | 2 ++ gnocchi/storage/redis.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 670658e3..bb0df8d0 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -230,6 +230,8 @@ class StorageDriver(object): for ts in agg_timeseries])) def _get_measures_and_unserialize(self, metric, keys, aggregation): + if not keys: + return [] raw_measures = self._get_measures(metric, keys, aggregation) results = [] for key, raw in six.moves.zip(keys, raw_measures): diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index c0bcd5ae..1c6c73b0 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -93,6 +93,8 @@ class RedisStorage(storage.StorageDriver): self._client.delete(self._metric_key(metric)) def _get_measures(self, metric, keys, aggregation, version=3): + if not keys: + return [] redis_key = self._metric_key(metric) fields = [ self._aggregated_field_for_split(aggregation, key, version) -- GitLab From 30ed3c0a4679cf80c7bc874b2433027f847fc580 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 2 Feb 2018 15:35:45 +0100 Subject: [PATCH 1219/1483] api: add some uwsgi flags --thunder-lock is recommended when we mix threads and processes. http://uwsgi-docs.readthedocs.io/en/latest/articles/SerializingAccept.html "--hook-master-start", "unix_signal:15 gracefully_kill_them_all make child receving sigterm instead of sigkill on exit. This should avoid having some connections still in TIME-WAIT after the process stop. --- gnocchi/cli/api.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gnocchi/cli/api.py b/gnocchi/cli/api.py index f6bfb066..a4b8ab6d 100644 --- a/gnocchi/cli/api.py +++ b/gnocchi/cli/api.py @@ -87,6 +87,8 @@ def api(): conf.port or conf.api.port), "--master", "--enable-threads", + "--thunder-lock", "true", + "--hook-master-start", "unix_signal:15 gracefully_kill_them_all", "--die-on-term", # NOTE(jd) See https://github.com/gnocchixyz/gnocchi/issues/156 "--add-header", "Connection: close", -- GitLab From 5f1ac4fe82f17b3f7eed05269e3aad6df4c30bbe Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 5 Feb 2018 14:27:04 +0100 Subject: [PATCH 1220/1483] Fixed gnocchi-api UWSGI startup. --- debian/changelog | 6 ++++++ debian/control | 4 ++++ debian/gnocchi-api.init.in | 5 +++++ 3 files changed, 15 insertions(+) diff --git a/debian/changelog b/debian/changelog index 5dba90cf..1faed95d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (4.0.3-4) unstable; urgency=medium + + * Fixed gnocchi-api UWSGI startup. + + -- Thomas Goirand Mon, 05 Feb 2018 14:25:47 +0100 + gnocchi (4.0.3-3) unstable; urgency=medium * python3-gnocchi breaks+replaces python-gnocchi, therefore allowing diff --git a/debian/control b/debian/control index 0f4ff6f8..76b11b93 100644 --- a/debian/control +++ b/debian/control @@ -84,10 +84,14 @@ Package: gnocchi-api Architecture: all Depends: adduser, + debconf, gnocchi-common (= ${binary:Version}), lsb-base, + python3-keystoneclient, python3-openstackclient, + python3-pastescript, q-text-as-data, + uwsgi-plugin-python3, ${misc:Depends}, ${python3:Depends}, Description: Metric as a Service - API daemon diff --git a/debian/gnocchi-api.init.in b/debian/gnocchi-api.init.in index b7adc13f..4599f2df 100644 --- a/debian/gnocchi-api.init.in +++ b/debian/gnocchi-api.init.in @@ -15,3 +15,8 @@ DESC="OpenStack Gnocchi Api" PROJECT_NAME=gnocchi NAME=${PROJECT_NAME}-api +DAEMON=/usr/bin/uwsgi_python3 +DAEMON_ARGS="--master --paste-logger --processes 4 --die-on-term --logto /var/log/gnocchi/gnocchi-api.log --stats localhost:9315 --http-socket :8041 --wsgi-file /usr/bin/gnocchi-api" +NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG=yes +NO_OPENSTACK_LOGFILE_DAEMON_ARG=yes +USE_SYSLOG=no -- GitLab From cf1102b71ab4a039c4e5bdbe88220766df0bf169 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 5 Feb 2018 14:29:09 +0100 Subject: [PATCH 1221/1483] Now packaging 4.0.4 --- debian/changelog | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 1faed95d..1d83221c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,6 @@ -gnocchi (4.0.3-4) unstable; urgency=medium +gnocchi (4.0.4-1) unstable; urgency=medium + * New upstream release. * Fixed gnocchi-api UWSGI startup. -- Thomas Goirand Mon, 05 Feb 2018 14:25:47 +0100 -- GitLab From 1eebda74f380104e21066f337488b27ad519450c Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sun, 4 Feb 2018 08:29:03 +0100 Subject: [PATCH 1222/1483] uwsgi: --thunder-lock doesn't take param in cmd --- gnocchi/cli/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/cli/api.py b/gnocchi/cli/api.py index a4b8ab6d..41ed966a 100644 --- a/gnocchi/cli/api.py +++ b/gnocchi/cli/api.py @@ -87,7 +87,7 @@ def api(): conf.port or conf.api.port), "--master", "--enable-threads", - "--thunder-lock", "true", + "--thunder-lock", "--hook-master-start", "unix_signal:15 gracefully_kill_them_all", "--die-on-term", # NOTE(jd) See https://github.com/gnocchixyz/gnocchi/issues/156 -- GitLab From 7b12ab77060c0bb3bfbdd01673bc80aed3022e83 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sun, 4 Feb 2018 15:52:19 +0100 Subject: [PATCH 1223/1483] uwsgi: fix freeze, http/1.1 and keepalive Sometimes connection to gnocchi-api freeze because the body is not fully read. This may due to a Gnocchi returing an error before the wsgi.input is fully read, or a wrong/missing content-length. This change ensures we always read the entire wsgi.input with a Pecan after hook. And it switches back uwsgi router to "http" to ensure uwsgi acts as a HTTP Server (ensure correctness of the HTTP protocol, before passing the data to webob). Webob will always have the correct content-length. This also allows to re-enable keepalive of Gnocchi API by default. But this have a bad side-effect, the influxdb ingestion will not work out-of-the-box because it uses chunked enconding, and this is not supported with the uwsg "http" router [1]. To work around this, a new option "[api]/uwsgi_mode" allow to switch back to http-socket, with some doc to explain this mode requires a real HTTP Server on front of Gnocchi API for HTTP protocol correctness. [1] https://github.com/unbit/uwsgi/issues/1428 Closes-bug: #156 --- doc/source/influxdb.rst | 9 +++++++++ gnocchi/cli/api.py | 16 ++++++++++++---- gnocchi/opts.py | 9 +++++++++ gnocchi/rest/app.py | 10 ++++++++++ gnocchi/rest/influxdb.py | 3 +++ 5 files changed, 43 insertions(+), 4 deletions(-) diff --git a/doc/source/influxdb.rst b/doc/source/influxdb.rst index 10cb13d8..87e3deb4 100644 --- a/doc/source/influxdb.rst +++ b/doc/source/influxdb.rst @@ -39,5 +39,14 @@ configuration example:: http_headers = {"X-Gnocchi-InfluxDB-Tag-Resource-ID" = "host"} +Gnocchi configuration +===================== + +The default Gnocchi API server does not support the chunked encoding required +by the InfluxDB compatible endpoint. To enable chunked encoding, you must put a +real HTTP Server (Apache/NGINX/...) on front of Gnocchi API, and set +`[api]/uwsgi_mode = http-socket`. + + .. _`Telegraf`: https://github.com/influxdata/telegraf .. _`InfluxDB line protocol`: https://docs.influxdata.com/influxdb/v1.3/write_protocols/line_protocol_reference/ diff --git a/gnocchi/cli/api.py b/gnocchi/cli/api.py index 41ed966a..a444977b 100644 --- a/gnocchi/cli/api.py +++ b/gnocchi/cli/api.py @@ -81,17 +81,19 @@ def api(): workers = utils.get_default_workers() + # TODO(sileht): When uwsgi 2.1 will be release we should be able + # to use --wsgi-manage-chunked-input + # https://github.com/unbit/uwsgi/issues/1428 args = [ "--if-not-plugin", "python", "--plugin", "python", "--endif", - "--http-socket", "%s:%d" % (conf.host or conf.api.host, - conf.port or conf.api.port), + "--%s" % conf.api.uwsgi_mode, "%s:%d" % ( + conf.host or conf.api.host, + conf.port or conf.api.port), "--master", "--enable-threads", "--thunder-lock", "--hook-master-start", "unix_signal:15 gracefully_kill_them_all", "--die-on-term", - # NOTE(jd) See https://github.com/gnocchixyz/gnocchi/issues/156 - "--add-header", "Connection: close", "--processes", str(math.floor(workers * 1.5)), "--threads", str(workers), "--lazy-apps", @@ -99,6 +101,12 @@ def api(): "--wsgi", "gnocchi.rest.wsgi", "--pyargv", " ".join(sys.argv[1:]), ] + if conf.api.uwsgi_mode == "http": + args.extend([ + "--so-keepalive", + "--http-keepalive", + "--add-header", "Connection: Keep-Alive" + ]) virtual_env = os.getenv("VIRTUAL_ENV") if virtual_env is not None: diff --git a/gnocchi/opts.py b/gnocchi/opts.py index 75250838..b17524af 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -64,6 +64,15 @@ API_OPTS = ( cfg.PortOpt('port', default=8041, help="Port to listen on"), + cfg.StrOpt('uwsgi-mode', + default='http', + choices=["http", "http-socket", "socket"], + help="""Socket type to use for uWSGI: +* http: support HTTP/1.1 and keepalive, + but not chunked encoding (InfluxDB) +* http-socket/socket: support chunked encoding, but require a upstream HTTP + Server for HTTP/1.1, keepalive and HTTP protocol correctness. +""") ) diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index fd02b637..3a837d15 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -62,6 +62,16 @@ class GnocchiHook(pecan.hooks.PecanHook): state.request.policy_enforcer = self.policy_enforcer state.request.auth_helper = self.auth_helper + @staticmethod + def after(state): + # NOTE(sileht): uwsgi expects the application to consume the wsgi.input + # fd. Otherwise the connection with the application freeze. In our + # case, if we raise an error before we read request.body_file, or if + # json.load(body_file) doesn't read the whole file the freeze can + # occurs. This will ensures we always read the full body_file. + if state.request.content_length is not None: + state.request.body_file.read() + BACKEND_LOCKS = { 'coordinator': threading.Lock(), 'storage': threading.Lock(), diff --git a/gnocchi/rest/influxdb.py b/gnocchi/rest/influxdb.py index 22130e2e..29464103 100644 --- a/gnocchi/rest/influxdb.py +++ b/gnocchi/rest/influxdb.py @@ -142,6 +142,9 @@ class InfluxDBController(rest.RestController): def _write_get_lines(): encoding = pecan.request.headers.get('Transfer-Encoding', "").lower() if encoding == "chunked": + # TODO(sileht): Support reading chunk without uwsgi when + # pecan.request.environ['wsgi.input_terminated'] is set. + # https://github.com/unbit/uwsgi/issues/1428 if uwsgi is None: api.abort( 501, {"cause": "Not implemented error", -- GitLab From dfa14a7eb0bee4ae8e908b6cf3aa7576cef61da4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Feb 2018 10:59:57 +0100 Subject: [PATCH 1224/1483] doc: update conf for stable/4.2 --- doc/source/conf.py | 2 +- doc/source/releasenotes/4.2.rst | 6 ++++++ doc/source/releasenotes/index.rst | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 doc/source/releasenotes/4.2.rst diff --git a/doc/source/conf.py b/doc/source/conf.py index 3d3ae310..164785c8 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -180,7 +180,7 @@ html_theme_options = { # Multiversion docs scv_sort = ('semver',) scv_show_banner = True -scv_banner_main_ref = 'stable/4.1' +scv_banner_main_ref = 'stable/4.2' scv_priority = 'branches' scv_whitelist_branches = ('master', '^stable/([3-9]\.)') scv_whitelist_tags = ("^$",) diff --git a/doc/source/releasenotes/4.2.rst b/doc/source/releasenotes/4.2.rst new file mode 100644 index 00000000..f9387173 --- /dev/null +++ b/doc/source/releasenotes/4.2.rst @@ -0,0 +1,6 @@ +=================================== + 4.2 Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/4.2 diff --git a/doc/source/releasenotes/index.rst b/doc/source/releasenotes/index.rst index b7a4a627..d642caed 100644 --- a/doc/source/releasenotes/index.rst +++ b/doc/source/releasenotes/index.rst @@ -5,6 +5,7 @@ Release Notes :maxdepth: 2 unreleased + 4.2 4.1 4.0 3.1 -- GitLab From 1c2d0fa5218a5fa4c4212f5664d678fc6e559fce Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 16 Jan 2018 17:54:14 +0100 Subject: [PATCH 1225/1483] storage: pass Aggregation object to storage get_measures() This frees a bit more the storage engine from the archive policy system. --- gnocchi/archive_policy.py | 14 ++ gnocchi/rest/api.py | 34 ++++- gnocchi/storage/__init__.py | 25 ++-- gnocchi/tests/test_statsd.py | 26 ++-- gnocchi/tests/test_storage.py | 265 ++++++++++++++++++---------------- 5 files changed, 205 insertions(+), 159 deletions(-) diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py index afa248ff..51cb1227 100644 --- a/gnocchi/archive_policy.py +++ b/gnocchi/archive_policy.py @@ -27,6 +27,9 @@ from gnocchi import aggregation from gnocchi import utils +ATTRGETTER_GRANULARITY = operator.attrgetter("granularity") + + class ArchivePolicy(object): DEFAULT_AGGREGATION_METHODS = () @@ -94,6 +97,17 @@ class ArchivePolicy(object): return aggregation.Aggregation( method, d.granularity, d.timespan) + def get_aggregations_for_method(self, method): + """Return a list of aggregation for a method. + + List is sorted by granularity, desc. + + :param method: Aggregation method. + """ + return [aggregation.Aggregation(method, d.granularity, d.timespan) + for d in sorted(self.definition, + key=ATTRGETTER_GRANULARITY, reverse=True)] + @property def aggregations(self): return [aggregation.Aggregation(method, d.granularity, d.timespan) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 8795af46..d1c1de81 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -17,6 +17,7 @@ import collections import functools import itertools +import operator import uuid import jsonpatch @@ -50,6 +51,9 @@ except ImportError: PROMETHEUS_SUPPORTED = False +ATTRGETTER_GRANULARITY = operator.attrgetter("granularity") + + def arg_to_list(value): if isinstance(value, list): return value @@ -530,6 +534,17 @@ class MetricController(rest.RestController): }, }) + aggregations = [] + for g in sorted(granularity, reverse=True): + agg = self.metric.archive_policy.get_aggregation( + aggregation, g) + if agg is None: + abort(404, six.text_type( + storage.AggregationDoesNotExist( + self.metric, aggregation, g) + )) + aggregations.append(agg) + if (strtobool("refresh", refresh) and pecan.request.incoming.has_unprocessed(self.metric.id)): try: @@ -540,8 +555,7 @@ class MetricController(rest.RestController): abort(503, six.text_type(e)) try: return pecan.request.storage.get_measures( - self.metric, granularity, start, stop, aggregation, - resample) + self.metric, aggregations, start, stop, resample)[aggregation] except (storage.MetricDoesNotExist, storage.AggregationDoesNotExist) as e: abort(404, six.text_type(e)) @@ -1880,6 +1894,19 @@ class AggregationController(rest.RestController): for metric in metrics), 'No granularity match')) + aggregations = set() + for metric in metrics: + for g in granularity: + agg = metric.archive_policy.get_aggregation( + aggregation, g) + if agg is None: + abort(404, six.text_type( + storage.AggregationDoesNotExist(metric, aggregation, g) + )) + aggregations.add(agg) + aggregations = sorted(aggregations, key=ATTRGETTER_GRANULARITY, + reverse=True) + operations = ["aggregate", reaggregation, []] if resample: operations[2].extend( @@ -1920,8 +1947,7 @@ class AggregationController(rest.RestController): }, }) return pecan.request.storage.get_measures( - metric, granularity, start, stop, aggregation, - resample) + metric, aggregations, start, stop, resample)[aggregation] return processor.get_measures( pecan.request.storage, [processor.MetricReference(m, aggregation) for m in metrics], diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index bb0df8d0..a2629cf4 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -39,6 +39,7 @@ LOG = daiquiri.getLogger(__name__) ITEMGETTER_1 = operator.itemgetter(1) +ATTRGETTER_AGG_METHOD = operator.attrgetter("aggregation_method") class StorageError(Exception): @@ -197,26 +198,17 @@ class StorageDriver(object): """ return name.split("_")[-1] == 'v%s' % v - def get_measures(self, metric, granularities, + def get_measures(self, metric, aggregations, from_timestamp=None, to_timestamp=None, - aggregation='mean', resample=None): + resample=None): """Get a measure to a metric. :param metric: The metric measured. - :param granularities: The granularities to retrieve. + :param aggregations: The aggregations to retrieve. :param from timestamp: The timestamp to get the measure from. :param to timestamp: The timestamp to get the measure to. - :param aggregation: The type of aggregation to retrieve. :param resample: The granularity to resample to. """ - - aggregations = [] - for g in sorted(granularities, reverse=True): - agg = metric.archive_policy.get_aggregation(aggregation, g) - if agg is None: - raise AggregationDoesNotExist(metric, aggregation, g) - aggregations.append(agg) - agg_timeseries = utils.parallel_map( self._get_measures_timeserie, ((metric, ag, from_timestamp, to_timestamp) @@ -226,8 +218,13 @@ class StorageDriver(object): agg_timeseries = list(map(lambda agg: agg.resample(resample), agg_timeseries)) - return list(itertools.chain(*[ts.fetch(from_timestamp, to_timestamp) - for ts in agg_timeseries])) + return { + aggmethod: list(itertools.chain( + *[ts.fetch(from_timestamp, to_timestamp) + for ts in aggts])) + for aggmethod, aggts in itertools.groupby(agg_timeseries, + ATTRGETTER_AGG_METHOD) + } def _get_measures_and_unserialize(self, metric, keys, aggregation): if not keys: diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index ddcbdd1e..7aa073fb 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -46,7 +46,7 @@ class TestStatsd(tests_base.TestCase): self.conf.set_override("archive_policy_name", self.STATSD_ARCHIVE_POLICY_NAME, "statsd") ap = self.ARCHIVE_POLICIES["medium"] - self.granularities = [d.granularity for d in ap.definition] + self.aggregations = ap.get_aggregations_for_method("mean") self.stats = statsd.Stats(self.conf) # Replace storage/indexer with correct ones that have been upgraded @@ -77,12 +77,12 @@ class TestStatsd(tests_base.TestCase): self.stats.indexer, self.stats.incoming, [str(metric.id)], sync=True) - measures = self.storage.get_measures(metric, self.granularities) - self.assertEqual([ + measures = self.storage.get_measures(metric, self.aggregations) + self.assertEqual({"mean": [ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.0), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.0), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0) - ], measures) + ]}, measures) utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37) # This one is going to be ignored @@ -98,13 +98,13 @@ class TestStatsd(tests_base.TestCase): self.stats.indexer, self.stats.incoming, [str(metric.id)], sync=True) - measures = self.storage.get_measures(metric, self.granularities) - self.assertEqual([ + measures = self.storage.get_measures(metric, self.aggregations) + self.assertEqual({"mean": [ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.5), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.5), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0), (datetime64(2015, 1, 7, 13, 59), numpy.timedelta64(1, 'm'), 2.0) - ], measures) + ]}, measures) def test_gauge(self): self._test_gauge_or_ms("g") @@ -132,12 +132,12 @@ class TestStatsd(tests_base.TestCase): self.stats.indexer, self.stats.incoming, [str(metric.id)], sync=True) - measures = self.storage.get_measures(metric, self.granularities) - self.assertEqual([ + measures = self.storage.get_measures(metric, self.aggregations) + self.assertEqual({"mean": [ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.0), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.0), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0) - ], measures) + ]}, measures) utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37) self.server.datagram_received( @@ -152,13 +152,13 @@ class TestStatsd(tests_base.TestCase): self.stats.indexer, self.stats.incoming, [str(metric.id)], sync=True) - measures = self.storage.get_measures(metric, self.granularities) - self.assertEqual([ + measures = self.storage.get_measures(metric, self.aggregations) + self.assertEqual({"mean": [ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 28), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 28), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0), (datetime64(2015, 1, 7, 13, 59), numpy.timedelta64(1, 'm'), 55.0) - ], measures) + ]}, measures) class TestStatsdArchivePolicyRule(TestStatsd): diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index e16bd1eb..fb647e0d 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -20,6 +20,7 @@ import mock import numpy import six.moves +from gnocchi import aggregation as gaggregation from gnocchi import archive_policy from gnocchi import carbonara from gnocchi import incoming @@ -81,13 +82,10 @@ class TestStorageDriver(tests_base.TestCase): side_effect=carbonara.InvalidData()): self.trigger_processing() - granularities = [ - numpy.timedelta64(1, 'D'), - numpy.timedelta64(1, 'h'), - numpy.timedelta64(5, 'm'), - ] - - m = self.storage.get_measures(self.metric, granularities) + m = self.storage.get_measures( + self.metric, + self.metric.archive_policy.get_aggregations_for_method('mean'), + )['mean'] self.assertIn((datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 1), m) self.assertIn((datetime64(2014, 1, 1, 13), @@ -110,13 +108,11 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.assertFalse(LOG.error.called) - granularities = [ - numpy.timedelta64(1, 'D'), - numpy.timedelta64(1, 'h'), - numpy.timedelta64(5, 'm'), - ] + aggregations = ( + self.metric.archive_policy.get_aggregations_for_method("mean") + ) - m = self.storage.get_measures(self.metric, granularities) + m = self.storage.get_measures(self.metric, aggregations)['mean'] self.assertIn((datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 5.0), m) self.assertIn((datetime64(2014, 1, 1, 12), @@ -143,11 +139,13 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() self.storage._delete_metric(self.metric) self.trigger_processing() - self.assertEqual([], self.storage.get_measures(self.metric, [ - numpy.timedelta64(1, 'D'), - numpy.timedelta64(1, 'h'), - numpy.timedelta64(5, 'm'), - ])) + + aggregations = ( + self.metric.archive_policy.get_aggregations_for_method("mean") + ) + + self.assertEqual({"mean": []}, self.storage.get_measures( + self.metric, aggregations)) self.assertRaises(storage.MetricDoesNotExist, self.storage._get_unaggregated_timeserie, self.metric) @@ -216,11 +214,12 @@ class TestStorageDriver(tests_base.TestCase): for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) self.trigger_processing([str(m.id)]) - self.assertEqual(3661, len(self.storage.get_measures(m, [ - numpy.timedelta64(1, 'h'), - numpy.timedelta64(1, 'm'), - numpy.timedelta64(1, 's'), - ]))) + aggregations = ( + m.archive_policy.get_aggregations_for_method("mean") + ) + + self.assertEqual(3661, len( + self.storage.get_measures(m, aggregations)['mean'])) @mock.patch('gnocchi.carbonara.SplitKey.POINTS_PER_SPLIT', 48) def test_add_measures_update_subset_split(self): @@ -276,19 +275,17 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() - granularities = [ - numpy.timedelta64(1, 'D'), - numpy.timedelta64(1, 'h'), - numpy.timedelta64(5, 'm'), - ] + aggregations = ( + self.metric.archive_policy.get_aggregations_for_method("mean") + ) - self.assertEqual([ + self.assertEqual({"mean": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ], self.storage.get_measures(self.metric, granularities)) + ]}, self.storage.get_measures(self.metric, aggregations)) # One year later… self.incoming.add_measures(self.metric.id, [ @@ -296,11 +293,11 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() - self.assertEqual([ + self.assertEqual({"mean": [ (datetime64(2015, 1, 1), numpy.timedelta64(1, 'D'), 69), (datetime64(2015, 1, 1, 12), numpy.timedelta64(1, 'h'), 69), (datetime64(2015, 1, 1, 12), numpy.timedelta64(5, 'm'), 69), - ], self.storage.get_measures(self.metric, granularities)) + ]}, self.storage.get_measures(self.metric, aggregations)) self.assertEqual({ carbonara.SplitKey(numpy.datetime64(1244160000, 's'), @@ -372,14 +369,15 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) - self.assertEqual([ + aggregation = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'm')) + + self.assertEqual({"mean": [ (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69), (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42), (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), - ], self.storage.get_measures( - self.metric, - granularities=[numpy.timedelta64(1, 'm')])) + ]}, self.storage.get_measures(self.metric, [aggregation])) # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move @@ -429,16 +427,14 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) - self.assertEqual([ + self.assertEqual({"mean": [ (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69), (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42), (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), (datetime64(2016, 1, 10, 16, 18), numpy.timedelta64(1, 'm'), 45), (datetime64(2016, 1, 10, 17, 12), numpy.timedelta64(1, 'm'), 46), - ], self.storage.get_measures( - self.metric, - granularities=[numpy.timedelta64(1, 'm')])) + ]}, self.storage.get_measures(self.metric, [aggregation])) def test_rewrite_measures_oldest_mutable_timestamp_eq_next_key(self): """See LP#1655422""" @@ -495,14 +491,15 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) - self.assertEqual([ + aggregation = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'm')) + + self.assertEqual({"mean": [ (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69), (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42), (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), - ], self.storage.get_measures( - self.metric, - granularities=[numpy.timedelta64(60, 's')])) + ]}, self.storage.get_measures(self.metric, [aggregation])) # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move @@ -554,15 +551,13 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) - self.assertEqual([ + self.assertEqual({"mean": [ (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69), (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42), (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), (datetime64(2016, 1, 10, 0, 12), numpy.timedelta64(1, 'm'), 45), - ], self.storage.get_measures( - self.metric, - granularities=[numpy.timedelta64(60, 's')])) + ]}, self.storage.get_measures(self.metric, [aggregation])) def test_rewrite_measures_corruption_missing_file(self): # Create an archive policy that spans on several splits. Each split @@ -619,7 +614,10 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) - self.assertEqual([ + aggregation = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'm')) + + self.assertEqual({"mean": [ (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69), (datetime64(2016, 1, 2, 13, 7), @@ -628,9 +626,7 @@ class TestStorageDriver(tests_base.TestCase): numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), - ], self.storage.get_measures( - self.metric, - granularities=[numpy.timedelta64(60, 's')])) + ]}, self.storage.get_measures(self.metric, [aggregation])) # Test what happens if we delete the latest split and then need to # compress it! @@ -704,14 +700,15 @@ class TestStorageDriver(tests_base.TestCase): assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) - self.assertEqual([ + aggregation = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'm')) + + self.assertEqual({"mean": [ (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69), (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42), (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), - ], self.storage.get_measures( - self.metric, - granularities=[numpy.timedelta64(1, 'm')])) + ]}, self.storage.get_measures(self.metric, [aggregation])) # Test what happens if we write garbage self.storage._store_metric_measures( @@ -738,18 +735,16 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() - granularities = [ - numpy.timedelta64(1, 'D'), - numpy.timedelta64(1, 'h'), - numpy.timedelta64(5, 'm'), - ] + aggregations = ( + self.metric.archive_policy.get_aggregations_for_method("mean") + ) - self.assertEqual([ + self.assertEqual({"mean": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 55.5), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 55.5), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 42.0), - ], self.storage.get_measures(self.metric, granularities)) + ]}, self.storage.get_measures(self.metric, aggregations)) self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), @@ -757,31 +752,37 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() - self.assertEqual([ + self.assertEqual({"mean": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ], self.storage.get_measures(self.metric, granularities)) + ]}, self.storage.get_measures(self.metric, aggregations)) - self.assertEqual([ + aggregations = ( + self.metric.archive_policy.get_aggregations_for_method("max") + ) + + self.assertEqual({"max": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 69), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 69.0), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 42.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ], self.storage.get_measures(self.metric, - granularities, aggregation='max')) + ]}, self.storage.get_measures(self.metric, aggregations)) + + aggregations = ( + self.metric.archive_policy.get_aggregations_for_method("min") + ) - self.assertEqual([ + self.assertEqual({"min": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 4), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 4), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 4.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ], self.storage.get_measures(self.metric, - granularities, aggregation='min')) + ]}, self.storage.get_measures(self.metric, aggregations)) def test_add_and_get_measures(self): self.incoming.add_measures(self.metric.id, [ @@ -792,89 +793,91 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() - granularities = [ - numpy.timedelta64(1, 'D'), - numpy.timedelta64(1, 'h'), - numpy.timedelta64(5, 'm'), - ] + aggregations = ( + self.metric.archive_policy.get_aggregations_for_method("mean") + ) - self.assertEqual([ + self.assertEqual({"mean": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ], self.storage.get_measures(self.metric, granularities)) + ]}, self.storage.get_measures(self.metric, aggregations)) - self.assertEqual([ + self.assertEqual({"mean": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ], self.storage.get_measures( - self.metric, - granularities, + ]}, self.storage.get_measures( + self.metric, aggregations, from_timestamp=datetime64(2014, 1, 1, 12, 10, 0))) - self.assertEqual([ + self.assertEqual({"mean": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), - ], self.storage.get_measures( - self.metric, - granularities, + ]}, self.storage.get_measures( + self.metric, aggregations, to_timestamp=datetime64(2014, 1, 1, 12, 6, 0))) - self.assertEqual([ + self.assertEqual({"mean": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ], self.storage.get_measures( - self.metric, - granularities, + ]}, self.storage.get_measures( + self.metric, aggregations, to_timestamp=datetime64(2014, 1, 1, 12, 10, 10), from_timestamp=datetime64(2014, 1, 1, 12, 10, 10))) - self.assertEqual([ + self.assertEqual({"mean": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), - ], self.storage.get_measures( - self.metric, - granularities, + ]}, self.storage.get_measures( + self.metric, aggregations, from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))) - self.assertEqual([ + self.assertEqual({"mean": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), - ], self.storage.get_measures( - self.metric, - granularities, + ]}, self.storage.get_measures( + self.metric, aggregations, from_timestamp=datetime64(2014, 1, 1, 12), to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))) - self.assertEqual([ + aggregation_1h = ( + self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'h')) + ) + + self.assertEqual({"mean": [ (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), - ], self.storage.get_measures( - self.metric, + ]}, self.storage.get_measures( + self.metric, [aggregation_1h], from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime64(2014, 1, 1, 12, 0, 2), - granularities=[numpy.timedelta64(1, 'h')])) + to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))) - self.assertEqual([ + aggregation_5m = ( + self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(5, 'm')) + ) + + self.assertEqual({"mean": [ (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), - ], self.storage.get_measures( - self.metric, + ]}, self.storage.get_measures( + self.metric, [aggregation_5m], from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime64(2014, 1, 1, 12, 0, 2), - granularities=[numpy.timedelta64(5, 'm')])) + to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))) - self.assertRaises(storage.AggregationDoesNotExist, - self.storage.get_measures, - self.metric, - granularities=[numpy.timedelta64(42, 's')]) + self.assertEqual({"mean": []}, + self.storage.get_measures( + self.metric, + [gaggregation.Aggregation( + "mean", numpy.timedelta64(42, 's'), None)])) def test_get_measure_unknown_aggregation(self): self.incoming.add_measures(self.metric.id, [ @@ -883,14 +886,13 @@ class TestStorageDriver(tests_base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) - granularities = [ - numpy.timedelta64(1, 'D'), - numpy.timedelta64(1, 'h'), - numpy.timedelta64(5, 'm'), - ] + + aggregations = ( + self.metric.archive_policy.get_aggregations_for_method("last") + ) + self.assertEqual( - [], self.storage.get_measures( - self.metric, granularities, aggregation='last')) + {"last": []}, self.storage.get_measures(self.metric, aggregations)) def test_find_measures(self): metric2, __ = self._create_metric() @@ -964,11 +966,15 @@ class TestStorageDriver(tests_base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 0, 10), 1), ]) self.trigger_processing([str(m.id)]) - self.assertEqual([ + + aggregation = m.archive_policy.get_aggregation( + "mean", numpy.timedelta64(5, 's')) + + self.assertEqual({"mean": [ (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 5), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1), - ], self.storage.get_measures(m, [numpy.timedelta64(5, 's')])) + ]}, self.storage.get_measures(m, [aggregation])) # expand to more points self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)]) @@ -977,27 +983,30 @@ class TestStorageDriver(tests_base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 0, 15), 1), ]) self.trigger_processing([str(m.id)]) - self.assertEqual([ - (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(5, 's'), 1), + self.assertEqual({"mean": [ (datetime64(2014, 1, 1, 12, 0, 5), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 15), numpy.timedelta64(5, 's'), 1), - ], self.storage.get_measures(m, [numpy.timedelta64(5, 's')])) + ]}, self.storage.get_measures(m, [aggregation])) # shrink timespan self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)]) m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0] - self.assertEqual([ + aggregation = m.archive_policy.get_aggregation( + "mean", numpy.timedelta64(5, 's')) + self.assertEqual({"mean": [ (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 15), numpy.timedelta64(5, 's'), 1), - ], self.storage.get_measures(m, [numpy.timedelta64(5, 's')])) + ]}, self.storage.get_measures(m, [aggregation])) def test_resample_no_metric(self): """https://github.com/gnocchixyz/gnocchi/issues/69""" - self.assertEqual([], + aggregation = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(300, 's')) + self.assertEqual({"mean": []}, self.storage.get_measures( self.metric, - [numpy.timedelta64(300, 's')], + [aggregation], datetime64(2014, 1, 1), datetime64(2015, 1, 1), resample=numpy.timedelta64(1, 'h'))) -- GitLab From 82d938432910d17bf21d6dd83c36e31f5848b99f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Feb 2018 17:30:54 +0100 Subject: [PATCH 1226/1483] tests: test upgrade from Gnocchi 4.2 --- .travis.yml | 2 ++ tox.ini | 24 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/.travis.yml b/.travis.yml index 84f554f7..9525de9f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,6 +18,8 @@ env: - TARGET: py35-postgresql-file-upgrade-from-4.0 - TARGET: py27-mysql-ceph-upgrade-from-4.1 - TARGET: py35-postgresql-file-upgrade-from-4.1 + - TARGET: py27-mysql-ceph-upgrade-from-4.2 + - TARGET: py35-postgresql-file-upgrade-from-4.2 - TARGET: py27-mysql - TARGET: py35-mysql diff --git a/tox.ini b/tox.ini index adedd474..563043f8 100644 --- a/tox.ini +++ b/tox.ini @@ -119,6 +119,30 @@ deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.1,<4.2 pifpaf[ceph,gnocchi]>=0.13 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} +[testenv:py35-postgresql-file-upgrade-from-4.2] +# We should always recreate since the script upgrade +# Gnocchi we can't reuse the virtualenv +recreate = True +skip_install = True +usedevelop = False +setenv = GNOCCHI_VARIANT=test,postgresql,file +deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.2,<4.3 + pifpaf[gnocchi]>=0.13 + gnocchiclient>=2.8.0 +commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} + +[testenv:py27-mysql-ceph-upgrade-from-4.2] +# We should always recreate since the script upgrade +# Gnocchi we can't reuse the virtualenv +recreate = True +skip_install = True +usedevelop = False +setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib +deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.2,<4.3 + gnocchiclient>=2.8.0 + pifpaf[ceph,gnocchi]>=0.13 +commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} + [testenv:pep8] deps = hacking>=0.12,<0.13 bashate -- GitLab From 7cda69fd3695932130a83d04dff27f52ac073a98 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Feb 2018 17:32:44 +0100 Subject: [PATCH 1227/1483] Stop testing upgrade from Gnocchi 3.1 --- .travis.yml | 2 -- tox.ini | 24 ------------------------ 2 files changed, 26 deletions(-) diff --git a/.travis.yml b/.travis.yml index 9525de9f..c63fc703 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,8 +12,6 @@ env: - TARGET: docs - TARGET: docs-gnocchi.xyz - - TARGET: py27-mysql-ceph-upgrade-from-3.1 - - TARGET: py35-postgresql-file-upgrade-from-3.1 - TARGET: py27-mysql-ceph-upgrade-from-4.0 - TARGET: py35-postgresql-file-upgrade-from-4.0 - TARGET: py27-mysql-ceph-upgrade-from-4.1 diff --git a/tox.ini b/tox.ini index 563043f8..be301704 100644 --- a/tox.ini +++ b/tox.ini @@ -47,30 +47,6 @@ commands = {toxinidir}/run-tests.sh {posargs} {toxinidir}/run-func-tests.sh {posargs} -[testenv:py35-postgresql-file-upgrade-from-3.1] -# We should always recreate since the script upgrade -# Gnocchi we can't reuse the virtualenv -recreate = True -skip_install = True -usedevelop = False -setenv = GNOCCHI_VARIANT=test,postgresql,file -deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 - pifpaf[gnocchi]>=0.13 - gnocchiclient>=2.8.0 -commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} - -[testenv:py27-mysql-ceph-upgrade-from-3.1] -# We should always recreate since the script upgrade -# Gnocchi we can't reuse the virtualenv -recreate = True -skip_install = True -usedevelop = False -setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib -deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 - gnocchiclient>=2.8.0 - pifpaf[ceph,gnocchi]>=0.13 -commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} - [testenv:py35-postgresql-file-upgrade-from-4.0] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv -- GitLab From 9b1c506f0a69fb39d30d348418a83a975df259c8 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 7 Feb 2018 08:48:40 +0100 Subject: [PATCH 1228/1483] tox: don't install gnocchi twice We current install Gnocchi twice, once with no extra and develop mode, and a second time with extra but without develop mode. This change install gnocchi only once with extra and develop mode. --- tox.ini | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/tox.ini b/tox.ini index be301704..8e14e49e 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ minversion = 2.4 envlist = py{35,27}-{postgresql,mysql}{,-file,-swift,-ceph,-s3},pep8 [testenv] -usedevelop = True +skip_install = True sitepackages = False passenv = LANG GNOCCHI_TEST_* AWS_* setenv = @@ -40,7 +40,9 @@ setenv = # NOTE(jd) Install redis as a test dependency since it is used as a # coordination driver in functional tests (--coordination-driver is passed to # pifpaf) -deps = .[test,redis,prometheus,{env:GNOCCHI_STORAGE_DEPS:},{env:GNOCCHI_INDEXER_DEPS:}] +deps = + -e + .[test,redis,prometheus,{env:GNOCCHI_STORAGE_DEPS:},{env:GNOCCHI_INDEXER_DEPS:}] {env:GNOCCHI_TEST_TARBALLS:} cliff!=2.9.0 commands = @@ -51,8 +53,6 @@ commands = # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv recreate = True -skip_install = True -usedevelop = False setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.0,<4.1 pifpaf[gnocchi]>=0.13 @@ -63,8 +63,6 @@ commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-te # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv recreate = True -skip_install = True -usedevelop = False setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.0,<4.1 gnocchiclient>=2.8.0 @@ -75,8 +73,6 @@ commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv recreate = True -skip_install = True -usedevelop = False setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.1,<4.2 pifpaf[gnocchi]>=0.13 @@ -87,8 +83,6 @@ commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-te # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv recreate = True -skip_install = True -usedevelop = False setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.1,<4.2 gnocchiclient>=2.8.0 @@ -99,8 +93,6 @@ commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv recreate = True -skip_install = True -usedevelop = False setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.2,<4.3 pifpaf[gnocchi]>=0.13 @@ -111,8 +103,6 @@ commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-te # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv recreate = True -skip_install = True -usedevelop = False setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.2,<4.3 gnocchiclient>=2.8.0 -- GitLab From d09401ba3bc1cc6bf556454a7a5f8600dcabb1ee Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 7 Feb 2018 09:23:54 +0100 Subject: [PATCH 1229/1483] travis: don't bootstrap useless pythonenv --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index c63fc703..eaaa210e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,4 @@ -language: python +language: generic sudo: required services: -- GitLab From b1c4722a5df424363ce7515fc4a9db17777bcea1 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Feb 2018 17:17:46 +0100 Subject: [PATCH 1230/1483] storage: stop requiring a coordinator The storage engine does not need a coordinator, except for locking sacks. However sacks do not belong to the storage driver itself, so it makes no sense to requires one. --- gnocchi/cli/manage.py | 5 +---- gnocchi/cli/metricd.py | 4 ++-- gnocchi/rest/api.py | 2 ++ gnocchi/rest/app.py | 3 +-- gnocchi/storage/__init__.py | 17 +++++++++-------- gnocchi/storage/ceph.py | 4 ++-- gnocchi/storage/file.py | 4 ++-- gnocchi/storage/redis.py | 4 ++-- gnocchi/storage/s3.py | 4 ++-- gnocchi/storage/swift.py | 4 ++-- gnocchi/tests/base.py | 2 +- gnocchi/tests/functional/fixtures.py | 10 ++++++---- gnocchi/tests/test_storage.py | 10 ++++++---- 13 files changed, 38 insertions(+), 35 deletions(-) diff --git a/gnocchi/cli/manage.py b/gnocchi/cli/manage.py index dd94ae03..7ecb1ef5 100644 --- a/gnocchi/cli/manage.py +++ b/gnocchi/cli/manage.py @@ -61,10 +61,7 @@ def upgrade(): LOG.info("Upgrading indexer %s", index) index.upgrade() if not conf.skip_storage: - # FIXME(jd) Pass None as coordinator because it's not needed in this - # case. This will be removed when the storage will stop requiring a - # coordinator object. - s = storage.get_driver(conf, None) + s = storage.get_driver(conf) LOG.info("Upgrading storage %s", s) s.upgrade() if not conf.skip_incoming: diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index 36da079a..353edf1d 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -68,7 +68,7 @@ class MetricProcessBase(cotyledon.Service): str(uuid.uuid4())) self.coord = get_coordinator_and_start(member_id, self.conf.coordination_url) - self.store = storage.get_driver(self.conf, self.coord) + self.store = storage.get_driver(self.conf) self.incoming = incoming.get_driver(self.conf) self.index = indexer.get_driver(self.conf) @@ -266,7 +266,7 @@ class MetricJanitor(MetricProcessBase): worker_id, conf, conf.metricd.metric_cleanup_delay) def _run_job(self): - self.store.expunge_metrics(self.incoming, self.index) + self.store.expunge_metrics(self.coord, self.incoming, self.index) LOG.debug("Metrics marked for deletion removed from backend") diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index d1c1de81..fc3f30f1 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -549,6 +549,7 @@ class MetricController(rest.RestController): pecan.request.incoming.has_unprocessed(self.metric.id)): try: pecan.request.storage.refresh_metric( + pecan.request.coordinator, pecan.request.indexer, pecan.request.incoming, self.metric, pecan.request.conf.api.operation_timeout) except storage.SackLockTimeoutError as e: @@ -1928,6 +1929,7 @@ class AggregationController(rest.RestController): for m in metrics_to_update: try: pecan.request.storage.refresh_metric( + pecan.request.coordinator, pecan.request.indexer, pecan.request.incoming, m, pecan.request.conf.api.operation_timeout) except storage.SackLockTimeoutError as e: diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 3a837d15..9f9324b5 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -97,9 +97,8 @@ class GnocchiHook(pecan.hooks.PecanHook): self.conf.coordination_url) ) elif name == "storage": - coord = self._lazy_load("coordinator") self.backends[name] = ( - gnocchi_storage.get_driver(self.conf, coord) + gnocchi_storage.get_driver(self.conf) ) elif name == "incoming": self.backends[name] = ( diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index a2629cf4..32379287 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -112,16 +112,17 @@ class SackLockTimeoutError(StorageError): @utils.retry_on_exception_and_log("Unable to initialize storage driver") -def get_driver(conf, coord): +def get_driver(conf): """Return the configured driver.""" return utils.get_driver_class('gnocchi.storage', conf.storage)( - conf.storage, coord) + conf.storage) class StorageDriver(object): - def __init__(self, conf, coord): - self.coord = coord + @staticmethod + def __init__(conf): + pass @staticmethod def upgrade(): @@ -418,9 +419,9 @@ class StorageDriver(object): aggregation, granularity, version=3): raise NotImplementedError - def refresh_metric(self, indexer, incoming, metric, timeout): + def refresh_metric(self, coord, indexer, incoming, metric, timeout): s = incoming.sack_for_metric(metric.id) - lock = incoming.get_sack_lock(self.coord, s) + lock = incoming.get_sack_lock(coord, s) if not lock.acquire(blocking=timeout): raise SackLockTimeoutError( 'Unable to refresh metric: %s. Metric is locked. ' @@ -431,7 +432,7 @@ class StorageDriver(object): finally: lock.release() - def expunge_metrics(self, incoming, index, sync=False): + def expunge_metrics(self, coord, incoming, index, sync=False): """Remove deleted metrics :param incoming: The incoming storage @@ -448,7 +449,7 @@ class StorageDriver(object): for sack, metrics in itertools.groupby( metrics_to_expunge, key=ITEMGETTER_1): try: - lock = incoming.get_sack_lock(self.coord, sack) + lock = incoming.get_sack_lock(coord, sack) if not lock.acquire(blocking=sync): # Retry later LOG.debug( diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index e0fac7c5..133ebe7e 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -43,8 +43,8 @@ rados = ceph.rados class CephStorage(storage.StorageDriver): WRITE_FULL = False - def __init__(self, conf, coord=None): - super(CephStorage, self).__init__(conf, coord) + def __init__(self, conf): + super(CephStorage, self).__init__(conf) self.rados, self.ioctx = ceph.create_rados_connection(conf) def __str__(self): diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 7f217e11..9074e02b 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -35,8 +35,8 @@ OPTS = [ class FileStorage(storage.StorageDriver): WRITE_FULL = True - def __init__(self, conf, coord=None): - super(FileStorage, self).__init__(conf, coord) + def __init__(self, conf): + super(FileStorage, self).__init__(conf) self.basepath = conf.file_basepath self.basepath_tmp = os.path.join(self.basepath, 'tmp') diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 1c6c73b0..1ab55031 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -27,8 +27,8 @@ class RedisStorage(storage.StorageDriver): STORAGE_PREFIX = b"timeseries" FIELD_SEP = '_' - def __init__(self, conf, coord=None): - super(RedisStorage, self).__init__(conf, coord) + def __init__(self, conf): + super(RedisStorage, self).__init__(conf) self._client = redis.get_client(conf) def __str__(self): diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 3e8c9d1b..1063bf7b 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -68,8 +68,8 @@ class S3Storage(storage.StorageDriver): _consistency_wait = tenacity.wait_exponential(multiplier=0.1) - def __init__(self, conf, coord=None): - super(S3Storage, self).__init__(conf, coord) + def __init__(self, conf): + super(S3Storage, self).__init__(conf) self.s3, self._region_name, self._bucket_prefix = ( s3.get_connection(conf) ) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index e13aa0de..7dc68225 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -83,8 +83,8 @@ class SwiftStorage(storage.StorageDriver): WRITE_FULL = True - def __init__(self, conf, coord=None): - super(SwiftStorage, self).__init__(conf, coord) + def __init__(self, conf): + super(SwiftStorage, self).__init__(conf) self.swift = swift.get_connection(conf) self._container_prefix = conf.swift_container_prefix diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 4b01611a..45d8c8c9 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -352,7 +352,7 @@ class TestCase(BaseTestCase): self.conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26], "storage") - self.storage = storage.get_driver(self.conf, self.coord) + self.storage = storage.get_driver(self.conf) self.incoming = incoming.get_driver(self.conf) if self.conf.storage.driver == 'redis': diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 686af0f6..f7aa7dec 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -146,7 +146,7 @@ class ConfigFixture(fixture.GabbiFixture): self.coord = metricd.get_coordinator_and_start(str(uuid.uuid4()), conf.coordination_url) - s = storage.get_driver(conf, self.coord) + s = storage.get_driver(conf) s.upgrade() i = incoming.get_driver(conf) i.upgrade(128) @@ -170,7 +170,7 @@ class ConfigFixture(fixture.GabbiFixture): } # start up a thread to async process measures - self.metricd_thread = MetricdThread(index, s, i) + self.metricd_thread = MetricdThread(self.coord, index, s, i) self.metricd_thread.start() def stop_fixture(self): @@ -208,8 +208,9 @@ class ConfigFixture(fixture.GabbiFixture): class MetricdThread(threading.Thread): """Run metricd in a naive thread to process measures.""" - def __init__(self, index, storer, incoming, name='metricd'): + def __init__(self, coord, index, storer, incoming, name='metricd'): super(MetricdThread, self).__init__(name=name) + self.coord = coord self.index = index self.storage = storer self.incoming = incoming @@ -221,7 +222,8 @@ class MetricdThread(threading.Thread): metrics = self.index.list_metrics( attribute_filter={"in": {"id": metrics}}) for metric in metrics: - self.storage.refresh_metric(self.index, + self.storage.refresh_metric(self.coord, + self.index, self.incoming, metric, timeout=None) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index fb647e0d..e0fba1d3 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -46,7 +46,7 @@ class TestStorageDriver(tests_base.TestCase): self.metric, __ = self._create_metric() def test_driver_str(self): - driver = storage.get_driver(self.conf, None) + driver = storage.get_driver(self.conf) if isinstance(driver, file.FileStorage): s = driver.basepath @@ -63,7 +63,7 @@ class TestStorageDriver(tests_base.TestCase): driver.__class__.__name__, s)) def test_get_driver(self): - driver = storage.get_driver(self.conf, None) + driver = storage.get_driver(self.conf) self.assertIsInstance(driver, storage.StorageDriver) def test_corrupted_data(self): @@ -158,7 +158,8 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing() __, __, details = self.incoming._build_report(True) self.assertIn(str(self.metric.id), details) - self.storage.expunge_metrics(self.incoming, self.index, sync=True) + self.storage.expunge_metrics(self.coord, + self.incoming, self.index, sync=True) __, __, details = self.incoming._build_report(True) self.assertNotIn(str(self.metric.id), details) @@ -168,7 +169,8 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() self.index.delete_metric(self.metric.id) - self.storage.expunge_metrics(self.incoming, self.index, sync=True) + self.storage.expunge_metrics(self.coord, + self.incoming, self.index, sync=True) self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric, self.metric.id) -- GitLab From 903ee093f226c9209ad765c1fc4b216d9f84ede2 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 7 Feb 2018 15:14:46 +0000 Subject: [PATCH 1231/1483] cleanup storage exceptions - fix formatting - we don't lock metrics in v4.x --- gnocchi/storage/__init__.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 32379287..e056e229 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -92,14 +92,6 @@ class MetricAlreadyExists(StorageError): "Metric %s already exists" % metric) -class LockedMetric(StorageError): - """Error raised when this metric is already being handled by another.""" - - def __init__(self, metric): - self.metric = metric - super(LockedMetric, self).__init__("Metric %s is locked" % metric) - - class CorruptionError(ValueError, StorageError): """Data corrupted, damn it.""" @@ -108,7 +100,7 @@ class CorruptionError(ValueError, StorageError): class SackLockTimeoutError(StorageError): - pass + pass @utils.retry_on_exception_and_log("Unable to initialize storage driver") -- GitLab From 8fa8c5287c4521e01c52232fd5811a1b824280f4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 7 Feb 2018 09:47:17 +0100 Subject: [PATCH 1232/1483] rest: use common function to validate some query arguments --- gnocchi/rest/api.py | 27 +++++--------------- gnocchi/tests/functional/gabbits/metric.yaml | 4 +-- 2 files changed, 8 insertions(+), 23 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index fc3f30f1..0a5aa6c2 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -494,18 +494,6 @@ class MetricController(rest.RestController): agg=aggregation, std=archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)) - if start is not None: - try: - start = utils.to_timestamp(start) - except Exception: - abort(400, "Invalid value for start") - - if stop is not None: - try: - stop = utils.to_timestamp(stop) - except Exception: - abort(400, "Invalid value for stop") - if resample: if not granularity: abort(400, 'A granularity must be specified to resample') @@ -517,13 +505,11 @@ class MetricController(rest.RestController): if granularity is None: granularity = [d.granularity for d in self.metric.archive_policy.definition] + start, stop, _, _, _ = validate_qs( + start=start, stop=stop) else: - try: - granularity = [utils.to_timespan(granularity)] - except ValueError: - abort(400, {"cause": "Attribute value error", - "detail": "granularity", - "reason": "Invalid granularity"}) + start, stop, granularity, _, _ = validate_qs( + start=start, stop=stop, granularity=granularity) if aggregation not in self.metric.archive_policy.aggregation_methods: abort(404, { @@ -1774,9 +1760,8 @@ FillSchema = voluptuous.Schema( msg="Must be a float, 'dropna' or 'null'")) -# FIXME(sileht): should be in aggregates.api but we need to split all -# controllers to do this -def validate_qs(start, stop, granularity, needed_overlap, fill): +def validate_qs(start=None, stop=None, granularity=None, + needed_overlap=None, fill=None): if needed_overlap is not None: try: needed_overlap = float(needed_overlap) diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index fe934100..3c93af7f 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -207,8 +207,8 @@ tests: accept: application/json status: 400 response_json_paths: - $.description.cause: Attribute value error - $.description.reason: Invalid granularity + $.description.cause: Argument value error + $.description.reason: Unable to parse timespan $.description.detail: granularity - name: push measurements to metric again -- GitLab From c01de085128f3dca06fc0b066deaa6387aa8a197 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 1 Feb 2018 13:21:36 +0100 Subject: [PATCH 1233/1483] redis: use a Lua script to list split keys This improves latency --- gnocchi/storage/redis.py | 41 +++++++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 1ab55031..ce2a5db2 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -27,9 +27,36 @@ class RedisStorage(storage.StorageDriver): STORAGE_PREFIX = b"timeseries" FIELD_SEP = '_' + _SCRIPTS = { + "list_split_keys": """ +local metric_key = KEYS[1] +local ids = {} +local cursor = 0 +local substring = "([^" .. ARGV[2] .. "]*)" +repeat + local result = redis.call("HSCAN", metric_key, cursor, "MATCH", ARGV[1]) + cursor = tonumber(result[1]) + for i, v in ipairs(result[2]) do + -- Only return keys, not values + if i % 2 ~= 0 then + ids[#ids + 1] = v:gmatch(substring)() + end + end +until cursor == 0 +if #ids == 0 and redis.call("EXISTS", metric_key) == 0 then + return -1 +end +return ids +""", + } + def __init__(self, conf): super(RedisStorage, self).__init__(conf) self._client = redis.get_client(conf) + self._scripts = { + name: self._client.register_script(code) + for name, code in six.iteritems(self._SCRIPTS) + } def __str__(self): return "%s: %s" % (self.__class__.__name__, self._client) @@ -68,16 +95,12 @@ class RedisStorage(storage.StorageDriver): def _list_split_keys(self, metric, aggregation, granularity, version=3): key = self._metric_key(metric) - split_keys = set() - hashes = self._client.hscan_iter( - key, match=self._aggregated_field_for_split( - aggregation, '*', version, granularity)) - for f, __ in hashes: - meta = f.decode("utf8").split(self.FIELD_SEP, 1) - split_keys.add(meta[0]) - if not split_keys and not self._client.exists(key): + split_keys = self._scripts["list_split_keys"]( + keys=[key], args=[self._aggregated_field_for_split( + aggregation, '*', version, granularity), self.FIELD_SEP]) + if split_keys == -1: raise storage.MetricDoesNotExist(metric) - return split_keys + return set(split_keys) def _delete_metric_measures(self, metric, key, aggregation, version=3): field = self._aggregated_field_for_split(aggregation, key, version) -- GitLab From c97e1df70d2f8ca9bc71dd2bead216cb9454a0b7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 8 Feb 2018 09:47:57 +0100 Subject: [PATCH 1234/1483] api: simplify archive policy validation The current validation code validates measure aggregates retrieval against the whole list of valid aggregation methods. That serves little purpose as it is again validated just after against the actual list of existing aggregation method defined in the archive policy attached to this/these metric(s). This patch removes that validation and adds a missing test for the aggregation endpoint. --- gnocchi/rest/api.py | 14 -------------- gnocchi/tests/functional/gabbits/aggregation.yaml | 8 ++++++++ gnocchi/tests/functional/gabbits/metric.yaml | 10 +++++++--- 3 files changed, 15 insertions(+), 17 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 0a5aa6c2..b71730c6 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -487,12 +487,6 @@ class MetricController(rest.RestController): granularity=None, resample=None, refresh=False, **param): self.enforce_metric("get measures") - if (aggregation not in - archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS): - msg = "Invalid aggregation value %(agg)s, must be one of %(std)s" - abort(400, msg % dict( - agg=aggregation, - std=archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)) if resample: if not granularity: @@ -1834,14 +1828,6 @@ class AggregationController(rest.RestController): start, stop, granularity, needed_overlap, fill = validate_qs( start, stop, granularity, needed_overlap, fill) - if (aggregation - not in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS): - abort( - 400, - 'Invalid aggregation value %s, must be one of %s' - % (aggregation, - archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)) - if reaggregation is None: reaggregation = aggregation diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index abce43dc..c613fb55 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -113,6 +113,14 @@ tests: - ['2015-03-06T14:30:00+00:00', 300.0, 15.05] - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] + - name: get measure aggregates with invalid aggregation method + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&aggregation=wtf + request_headers: + accept: application/json + status: 404 + response_json_paths: + $.description: Aggregation method 'wtf' at granularity '1.0' for metric $HISTORY['get metric list'].$RESPONSE['$[0].id'] does not exist + - name: get measure aggregates and reaggregate GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&reaggregation=min poll: diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index 3c93af7f..89cf1613 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -178,9 +178,13 @@ tests: - name: get measurements invalid agg method GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?aggregation=wtf - status: 400 - response_strings: - - Invalid aggregation value + request_headers: + accept: application/json + status: 404 + response_json_paths: + $.description.cause: Aggregation method does not exist for this metric + $.description.detail.metric: $HISTORY['list valid metrics'].$RESPONSE['$[0].id'] + $.description.detail.aggregation_method: wtf - name: get measurements by start GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true&start=2015-03-06T14:34 -- GitLab From 14e84563a40d15af3720fd4c2429f9193c4ec9b8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 1 Feb 2018 13:48:21 +0100 Subject: [PATCH 1235/1483] redis: batch new measures retrieval with a Lua script --- gnocchi/common/redis.py | 16 +++++++++++++--- gnocchi/incoming/redis.py | 20 ++++++++++++++------ gnocchi/storage/redis.py | 6 +----- 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/gnocchi/common/redis.py b/gnocchi/common/redis.py index 275bd3ef..7f50d4c4 100644 --- a/gnocchi/common/redis.py +++ b/gnocchi/common/redis.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2017 Red Hat +# Copyright © 2017-2018 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -17,6 +17,7 @@ from __future__ import absolute_import from oslo_config import cfg +import six from six.moves.urllib import parse try: @@ -106,7 +107,7 @@ OPTS = [ ] -def get_client(conf): +def get_client(conf, scripts=None): if redis is None: raise RuntimeError("Redis Python module is unavailable") parsed_url = parse.urlparse(conf.redis_url) @@ -156,4 +157,13 @@ def get_client(conf): # The master_client is a redis.StrictRedis using a # Sentinel managed connection pool. return master_client - return redis.StrictRedis(**kwargs) + + client = redis.StrictRedis(**kwargs) + + if scripts is not None: + scripts = { + name: client.register_script(code) + for name, code in six.iteritems(scripts) + } + + return client, scripts diff --git a/gnocchi/incoming/redis.py b/gnocchi/incoming/redis.py index 28a71f1d..d524c255 100644 --- a/gnocchi/incoming/redis.py +++ b/gnocchi/incoming/redis.py @@ -23,9 +23,18 @@ from gnocchi import incoming class RedisStorage(incoming.IncomingDriver): + _SCRIPTS = { + "process_measure_for_metric": """ +local llen = redis.call("LLEN", KEYS[1]) +-- lrange is inclusive on both ends, decrease to grab exactly n items +if llen > 0 then llen = llen - 1 end +return {llen, table.concat(redis.call("LRANGE", KEYS[1], 0, llen), "")} +""", + } + def __init__(self, conf, greedy=True): super(RedisStorage, self).__init__(conf) - self._client = redis.get_client(conf) + self._client, self._scripts = redis.get_client(conf, self._SCRIPTS) self.greedy = greedy def __str__(self): @@ -106,12 +115,11 @@ class RedisStorage(incoming.IncomingDriver): @contextlib.contextmanager def process_measure_for_metric(self, metric_id): key = self._build_measure_path(metric_id) - item_len = self._client.llen(key) - # lrange is inclusive on both ends, decrease to grab exactly n items - item_len = item_len - 1 if item_len else item_len + item_len, data = self._scripts['process_measure_for_metric']( + keys=[key], + ) - yield self._unserialize_measures(metric_id, b"".join( - self._client.lrange(key, 0, item_len))) + yield self._unserialize_measures(metric_id, data) # ltrim is inclusive, bump 1 to remove up to and including nth item self._client.ltrim(key, item_len + 1, -1) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index ce2a5db2..e029368a 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -52,11 +52,7 @@ return ids def __init__(self, conf): super(RedisStorage, self).__init__(conf) - self._client = redis.get_client(conf) - self._scripts = { - name: self._client.register_script(code) - for name, code in six.iteritems(self._SCRIPTS) - } + self._client, self._scripts = redis.get_client(conf, self._SCRIPTS) def __str__(self): return "%s: %s" % (self.__class__.__name__, self._client) -- GitLab From a559f53037c19094dfdac43ac82f0656d12f875c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 1 Feb 2018 14:25:56 +0100 Subject: [PATCH 1236/1483] redis: create metrics using a single hsetnx() operation --- gnocchi/storage/redis.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index e029368a..b4cf4d3b 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -73,10 +73,9 @@ return ids return path + '_v%s' % version if version else path def _create_metric(self, metric): - key = self._metric_key(metric) - if self._client.exists(key): + if self._client.hsetnx( + self._metric_key(metric), self._unaggregated_field(), "") == 0: raise storage.MetricAlreadyExists(metric) - self._client.hset(key, self._unaggregated_field(), '') def _store_unaggregated_timeserie(self, metric, data, version=3): self._client.hset(self._metric_key(metric), -- GitLab From f30eea967ef43c06657e745b7ac00abefe6a72b9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 1 Feb 2018 17:02:19 +0100 Subject: [PATCH 1237/1483] redis: use a Lua script to get measures --- gnocchi/storage/redis.py | 40 ++++++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index b4cf4d3b..6b19e666 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2017 Red Hat +# Copyright © 2017-2018 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -14,8 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. -import six - from gnocchi.common import redis from gnocchi import storage from gnocchi import utils @@ -26,6 +24,7 @@ class RedisStorage(storage.StorageDriver): STORAGE_PREFIX = b"timeseries" FIELD_SEP = '_' + FIELD_SEP_B = b'_' _SCRIPTS = { "list_split_keys": """ @@ -47,6 +46,21 @@ if #ids == 0 and redis.call("EXISTS", metric_key) == 0 then return -1 end return ids +""", + "get_measures": """ +local results = redis.call("HMGET", KEYS[1], unpack(ARGV)) +local final = {} +for i, result in ipairs(results) do + if result == false then + local field = ARGV[i] + if redis.call("EXISTS", KEYS[1]) == 1 then + return {-1, field} + end + return {-2, field} + end + final[#final + 1] = result +end +return {0, final} """, } @@ -113,16 +127,18 @@ return ids def _get_measures(self, metric, keys, aggregation, version=3): if not keys: return [] - redis_key = self._metric_key(metric) fields = [ self._aggregated_field_for_split(aggregation, key, version) for key in keys ] - results = self._client.hmget(redis_key, fields) - for key, data in six.moves.zip(keys, results): - if data is None: - if not self._client.exists(redis_key): - raise storage.MetricDoesNotExist(metric) - raise storage.AggregationDoesNotExist( - metric, aggregation, key.sampling) - return results + code, result = self._scripts['get_measures']( + keys=[self._metric_key(metric)], + args=fields, + ) + if code == -1: + sampling = utils.to_timespan(result.split(self.FIELD_SEP_B)[2]) + raise storage.AggregationDoesNotExist( + metric, aggregation, sampling) + if code == -2: + raise storage.MetricDoesNotExist(metric) + return result -- GitLab From c2863d7be4e819611b75abe643419f520808025f Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Fri, 9 Feb 2018 10:36:03 -0500 Subject: [PATCH 1238/1483] improve documentation on tuning the number of sacks The documentation suggested that the number of sacks could be tuned via a configuration option the in gnocchi.conf file. This commit corrects the docs to indicate that the value can be changed only with the gnocchi-change-sack-size command. This commit also corrects spelling, grammar, and formatting issues in the same section. --- doc/source/operating.rst | 71 ++++++++++++++++++++-------------------- 1 file changed, 36 insertions(+), 35 deletions(-) diff --git a/doc/source/operating.rst b/doc/source/operating.rst index 31b5c905..fed78a3c 100644 --- a/doc/source/operating.rst +++ b/doc/source/operating.rst @@ -227,62 +227,63 @@ metricd daemon on any number of servers. How to scale measure processing ------------------------------- -Measurement data pushed to Gnocchi is divided into sacks for better -distribution. The number of partitions is controlled by the `sacks` option -under the `[incoming]` section. This value should be set based on the -number of active |metrics| the system will capture. Additionally, the number of -`sacks`, should be higher than the total number of active metricd workers. -distribution. Incoming |metrics| are pushed to specific sacks and each sack -is assigned to one or more `gnocchi-metricd` daemons for processing. +Measurement data pushed to Gnocchi is divided into "sacks" for better +distribution. Incoming |metrics| are pushed to specific sacks and +each sack is assigned to one or more `gnocchi-metricd` daemons for +processing. -How many sacks do we need to create ------------------------------------ - -This number of sacks enabled should be set based on the number of active -|metrics| the system will capture. Additionally, the number of sacks, should -be higher than the total number of active `gnocchi-metricd` workers. +The number of sacks should be set based on the number of active +|metrics| the system will capture. Additionally, the number of sacks +should be higher than the total number of active `gnocchi-metricd` +workers. In general, use the following equation to determine the appropriate `sacks` value to set:: sacks value = number of **active** metrics / 300 -If the estimated number of |metrics| is the absolute maximum, divide the value -by 500 instead. If the estimated number of active |metrics| is conservative and -expected to grow, divide the value by 100 instead to accommodate growth. +If the estimated number of |metrics| is the absolute maximum, divide +the value by 500 instead. If the estimated number of active |metrics| +is conservative and expected to grow, divide the value by 100 instead +to accommodate growth. How do we change sack size -------------------------- -In the event your system grows to capture signficantly more |metrics| than -originally anticipated, the number of sacks can be changed to maintain good -distribution. To avoid any loss of data when modifying `sacks` option. The -option should be changed in the following order:: +In the event your system grows to capture significantly more |metrics| +than originally anticipated, the number of sacks can be changed to +maintain good distribution. To avoid any loss of data when modifying +the number of `sacks`, the value should be changed in the following +order: - 1. Stop all input services (api, statsd) +1. Stop all input services (api, statsd). - 2. Stop all metricd services once backlog is cleared +2. Stop all metricd services once backlog is cleared. - 3. Run gnocchi-change-sack-size to set new sack size. Note - that sack value can only be changed if the backlog is empty. +3. Run ``gnocchi-change-sack-size `` to set new sack + size. Note that the sack value can only be changed if the backlog + is empty. - 4. Restart all gnocchi services (api, statsd, metricd) with new configuration +4. Restart all gnocchi services (api, statsd, metricd) with the new + configuration. -Alternatively, to minimise API downtime:: +Alternatively, to minimize API downtime: - 1. Run gnocchi-upgrade but use a new incoming storage target such as a new - ceph pool, file path, etc... Additionally, set |aggregate| storage to a - new target as well. +1. Run gnocchi-upgrade but use a new incoming storage target such as a new + ceph pool, file path, etc. Additionally, set |aggregate| storage to a + new target as well. - 2. Run gnocchi-change-sack-size against new target +2. Run ``gnocchi-change-sack-size `` against the new + target. - 3. Stop all input services (api, statsd) +3. Stop all input services (api, statsd). - 4. Restart all input services but target newly created incoming storage +4. Restart all input services but target the newly created incoming + storage. - 5. When done clearing backlog from original incoming storage, switch all - metricd datemons to target new incoming storage but maintain original - |aggregate| storage. +5. When done clearing backlog from original incoming storage, switch + all metricd daemons to target the new incoming storage but maintain + original |aggregate| storage. How to monitor Gnocchi ====================== -- GitLab From 80c6f56d3c88b40636c7e18275b2db80babadb04 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Feb 2018 15:34:25 +0100 Subject: [PATCH 1239/1483] incoming: batch incoming measures retrieval This patches rename the context manager process_measure_for_metric to process_measure_for_metrics as it now yields a dict of several metrics at the same time. --- gnocchi/incoming/__init__.py | 4 +-- gnocchi/incoming/ceph.py | 66 +++++++++++++++++++----------------- gnocchi/incoming/file.py | 22 +++++++----- gnocchi/incoming/redis.py | 31 +++++++++++------ gnocchi/incoming/s3.py | 30 ++++++++-------- gnocchi/incoming/swift.py | 32 ++++++++++------- gnocchi/storage/__init__.py | 27 ++++++++------- 7 files changed, 121 insertions(+), 91 deletions(-) diff --git a/gnocchi/incoming/__init__.py b/gnocchi/incoming/__init__.py index bc59f3ad..7fe4c804 100644 --- a/gnocchi/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2017 Red Hat, Inc. +# Copyright © 2017-2018 Red Hat, Inc. # Copyright © 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -159,7 +159,7 @@ class IncomingDriver(object): raise exceptions.NotImplementedError @staticmethod - def process_measure_for_metric(metric_id): + def process_measure_for_metrics(metric_id): raise exceptions.NotImplementedError @staticmethod diff --git a/gnocchi/incoming/ceph.py b/gnocchi/incoming/ceph.py index 09dbcff3..bed93bd7 100644 --- a/gnocchi/incoming/ceph.py +++ b/gnocchi/incoming/ceph.py @@ -190,40 +190,44 @@ class CephStorage(incoming.IncomingDriver): return bool(self._list_keys_to_process(sack, object_prefix)) @contextlib.contextmanager - def process_measure_for_metric(self, metric_id): - sack = self.sack_for_metric(metric_id) - key_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) - - processed_keys = [] + def process_measure_for_metrics(self, metric_ids): + measures = {} + processed_keys = defaultdict(list) with rados.ReadOpCtx() as op: - omaps, ret = self.ioctx.get_omap_vals(op, "", key_prefix, -1) - self.ioctx.operate_read_op(op, self.get_sack_name(sack), - flag=self.OMAP_READ_FLAGS) - # NOTE(sileht): after reading the libradospy, I'm - # not sure that ret will have the correct value - # get_omap_vals transforms the C int to python int - # before operate_read_op is called, I dunno if the int - # content is copied during this transformation or if - # this is a pointer to the C int, I think it's copied... - try: - ceph.errno_to_exception(ret) - except rados.ObjectNotFound: - # Object has been deleted, so this is just a stalled entry - # in the OMAP listing, ignore - return - - measures = self._make_measures_array() - for k, v in omaps: - measures = numpy.concatenate( - (measures, self._unserialize_measures(k, v))) - processed_keys.append(k) + for metric_id in metric_ids: + sack = self.sack_for_metric(metric_id) + key_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) + omaps, ret = self.ioctx.get_omap_vals(op, "", key_prefix, -1) + self.ioctx.operate_read_op(op, self.get_sack_name(sack), + flag=self.OMAP_READ_FLAGS) + # NOTE(sileht): after reading the libradospy, I'm + # not sure that ret will have the correct value + # get_omap_vals transforms the C int to python int + # before operate_read_op is called, I dunno if the int + # content is copied during this transformation or if + # this is a pointer to the C int, I think it's copied... + try: + ceph.errno_to_exception(ret) + except rados.ObjectNotFound: + # Object has been deleted, so this is just a stalled entry + # in the OMAP listing, ignore + continue + + m = self._make_measures_array() + for k, v in omaps: + m = numpy.concatenate( + (m, self._unserialize_measures(k, v))) + processed_keys[sack].append(k) + + measures[metric_id] = m yield measures # Now clean omap with rados.WriteOpCtx() as op: - # NOTE(sileht): come on Ceph, no return code - # for this operation ?!! - self.ioctx.remove_omap_keys(op, tuple(processed_keys)) - self.ioctx.operate_write_op(op, self.get_sack_name(sack), - flags=self.OMAP_WRITE_FLAGS) + for sack, keys in six.iteritems(processed_keys): + # NOTE(sileht): come on Ceph, no return code + # for this operation ?!! + self.ioctx.remove_omap_keys(op, tuple(keys)) + self.ioctx.operate_write_op(op, self.get_sack_name(sack), + flags=self.OMAP_WRITE_FLAGS) diff --git a/gnocchi/incoming/file.py b/gnocchi/incoming/file.py index d474fb77..a7930c56 100644 --- a/gnocchi/incoming/file.py +++ b/gnocchi/incoming/file.py @@ -163,15 +163,19 @@ class FileStorage(incoming.IncomingDriver): return os.path.isdir(self._build_measure_path(metric_id)) @contextlib.contextmanager - def process_measure_for_metric(self, metric_id): - files = self._list_measures_container_for_metric(metric_id) - measures = self._make_measures_array() - for f in files: - abspath = self._build_measure_path(metric_id, f) - with open(abspath, "rb") as e: - measures = numpy.concatenate(( - measures, self._unserialize_measures(f, e.read()))) + def process_measure_for_metrics(self, metric_ids): + measures = {} + for metric_id in metric_ids: + files = self._list_measures_container_for_metric(metric_id) + m = self._make_measures_array() + for f in files: + abspath = self._build_measure_path(metric_id, f) + with open(abspath, "rb") as e: + m = numpy.concatenate(( + m, self._unserialize_measures(f, e.read()))) + measures[metric_id] = m yield measures - self._delete_measures_files_for_metric(metric_id, files) + for metric_id in metric_ids: + self._delete_measures_files_for_metric(metric_id, files) diff --git a/gnocchi/incoming/redis.py b/gnocchi/incoming/redis.py index d524c255..793eac7e 100644 --- a/gnocchi/incoming/redis.py +++ b/gnocchi/incoming/redis.py @@ -113,16 +113,27 @@ return {llen, table.concat(redis.call("LRANGE", KEYS[1], 0, llen), "")} return bool(self._client.exists(self._build_measure_path(metric_id))) @contextlib.contextmanager - def process_measure_for_metric(self, metric_id): - key = self._build_measure_path(metric_id) - item_len, data = self._scripts['process_measure_for_metric']( - keys=[key], - ) - - yield self._unserialize_measures(metric_id, data) - - # ltrim is inclusive, bump 1 to remove up to and including nth item - self._client.ltrim(key, item_len + 1, -1) + def process_measure_for_metrics(self, metric_ids): + measures = {} + pipe = self._client.pipeline(transaction=False) + for metric_id in metric_ids: + key = self._build_measure_path(metric_id) + self._scripts['process_measure_for_metric']( + keys=[key], + client=pipe, + ) + + results = pipe.execute() + for metric_id, (item_len, data) in six.moves.zip(metric_ids, results): + measures[metric_id] = self._unserialize_measures(metric_id, data) + + yield measures + + for metric_id, (item_len, data) in six.moves.zip(metric_ids, results): + key = self._build_measure_path(metric_id) + # ltrim is inclusive, bump 1 to remove up to and including nth item + pipe.ltrim(key, item_len + 1, -1) + pipe.execute() def iter_on_sacks_to_process(self): self._client.config_set("notify-keyspace-events", "K$") diff --git a/gnocchi/incoming/s3.py b/gnocchi/incoming/s3.py index 9af0e304..4a08f615 100644 --- a/gnocchi/incoming/s3.py +++ b/gnocchi/incoming/s3.py @@ -158,21 +158,23 @@ class S3Storage(incoming.IncomingDriver): return bool(self._list_measure_files_for_metric(sack, metric_id)) @contextlib.contextmanager - def process_measure_for_metric(self, metric_id): - sack = self.sack_for_metric(metric_id) - files = self._list_measure_files_for_metric(sack, metric_id) - - measures = self._make_measures_array() - for f in files: - response = self.s3.get_object( - Bucket=self._bucket_name_measures, - Key=f) - measures = numpy.concatenate(( - measures, - self._unserialize_measures(f, response['Body'].read()) - )) + def process_measure_for_metrics(self, metric_ids): + measures = defaultdict(self._make_measures_array) + all_files = [] + for metric_id in metric_ids: + sack = self.sack_for_metric(metric_id) + files = self._list_measure_files_for_metric(sack, metric_id) + all_files.extend(files) + for f in files: + response = self.s3.get_object( + Bucket=self._bucket_name_measures, + Key=f) + measures[metric_id] = numpy.concatenate(( + measures[metric_id], + self._unserialize_measures(f, response['Body'].read()) + )) yield measures # Now clean objects - s3.bulk_delete(self.s3, self._bucket_name_measures, files) + s3.bulk_delete(self.s3, self._bucket_name_measures, all_files) diff --git a/gnocchi/incoming/swift.py b/gnocchi/incoming/swift.py index 66681a25..4d621ad3 100644 --- a/gnocchi/incoming/swift.py +++ b/gnocchi/incoming/swift.py @@ -97,18 +97,24 @@ class SwiftStorage(incoming.IncomingDriver): return bool(self._list_measure_files_for_metric(sack, metric_id)) @contextlib.contextmanager - def process_measure_for_metric(self, metric_id): - sack = self.sack_for_metric(metric_id) - sack_name = self.get_sack_name(sack) - files = self._list_measure_files_for_metric(sack, metric_id) - - yield self._array_concatenate([ - self._unserialize_measures( - f['name'], - self.swift.get_object(sack_name, f['name'])[1], - ) - for f in files - ]) + def process_measure_for_metrics(self, metric_ids): + measures = {} + all_files = defaultdict(list) + for metric_id in metric_ids: + sack = self.sack_for_metric(metric_id) + sack_name = self.get_sack_name(sack) + files = self._list_measure_files_for_metric(sack, metric_id) + all_files[sack_name].extend(files) + measures[metric_id] = self._array_concatenate([ + self._unserialize_measures( + f['name'], + self.swift.get_object(sack_name, f['name'])[1], + ) + for f in files + ]) + + yield measures # Now clean objects - swift.bulk_delete(self.swift, sack_name, files) + for sack_name, files in six.iteritems(all_files): + swift.bulk_delete(self.swift, sack_name, files) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index e056e229..752f2260 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -486,18 +486,21 @@ class StorageDriver(object): # measures will be skipped until cleaned by janitor. metrics = indexer.list_metrics( attribute_filter={"in": {"id": metrics_to_process}}) - for metric in metrics: - # NOTE(gordc): must lock at sack level - try: - LOG.debug("Processing measures for %s", metric) - with incoming.process_measure_for_metric(metric.id) \ - as measures: - self._compute_and_store_timeseries(metric, measures) - LOG.debug("Measures for metric %s processed", metric) - except Exception: - if sync: - raise - LOG.error("Error processing new measures", exc_info=True) + metrics_by_id = {m.id: m for m in metrics} + # NOTE(gordc): must lock at sack level + try: + LOG.debug("Processing measures for %s", metrics) + with incoming.process_measure_for_metrics([m.id for m in metrics]) \ + as metrics_and_measures: + for metric, measures in six.iteritems(metrics_and_measures): + self._compute_and_store_timeseries( + metrics_by_id[metric], measures + ) + LOG.debug("Measures for metric %s processed", metrics) + except Exception: + if sync: + raise + LOG.error("Error processing new measures", exc_info=True) def _compute_and_store_timeseries(self, metric, measures): # NOTE(mnaser): The metric could have been handled by -- GitLab From ddde3d2078edec2fd4bb9b03d9398a47974c4b64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Mon, 12 Feb 2018 10:34:52 +0100 Subject: [PATCH 1240/1483] d/control: Set Vcs-* to salsa.debian.org --- debian/changelog | 6 ++++++ debian/control | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index 1d83221c..5402fe23 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (4.0.4-2) UNRELEASED; urgency=medium + + * d/control: Set Vcs-* to salsa.debian.org + + -- Ondřej Nový Mon, 12 Feb 2018 10:34:51 +0100 + gnocchi (4.0.4-1) unstable; urgency=medium * New upstream release. diff --git a/debian/control b/debian/control index 76b11b93..376e4e02 100644 --- a/debian/control +++ b/debian/control @@ -76,8 +76,8 @@ Build-Depends-Indep: subunit (>= 0.0.18), testrepository, Standards-Version: 4.1.1 -Vcs-Browser: https://anonscm.debian.org/cgit/openstack/services/gnocchi.git -Vcs-Git: https://anonscm.debian.org/git/openstack/services/gnocchi.git +Vcs-Browser: https://salsa.debian.org/openstack-team/services/gnocchi +Vcs-Git: https://salsa.debian.org/openstack-team/services/gnocchi.git Homepage: https://github.com/openstack/gnocchi Package: gnocchi-api -- GitLab From 8ce12654c70932562f246903edf14ab112a58c48 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Feb 2018 18:20:58 +0100 Subject: [PATCH 1241/1483] Add Chef class to orchestrate complex actions Some of the code is currently tied to the Storage driver, whereas it interacts with many other components. This should be moved out. That patch introduces the Chef, a class responsible for doing complex actions that require interactions and orchestrations with different actors. --- gnocchi/chef.py | 94 +++++++++++++++++++++++++++++++++++ gnocchi/cli/metricd.py | 5 +- gnocchi/storage/__init__.py | 53 -------------------- gnocchi/tests/test_chef.py | 57 +++++++++++++++++++++ gnocchi/tests/test_storage.py | 24 --------- 5 files changed, 155 insertions(+), 78 deletions(-) create mode 100644 gnocchi/chef.py create mode 100644 gnocchi/tests/test_chef.py diff --git a/gnocchi/chef.py b/gnocchi/chef.py new file mode 100644 index 00000000..e146c331 --- /dev/null +++ b/gnocchi/chef.py @@ -0,0 +1,94 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2018 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import itertools +import operator + +import daiquiri + +from gnocchi import indexer + + +ITEMGETTER_1 = operator.itemgetter(1) + +LOG = daiquiri.getLogger(__name__) + + +class Chef(object): + """A master of cooking gnocchi. + + Give it a few tools and it'll make you happy! + + The Chef is responsible for executing actions that requires several drivers + at the same time, such as the coordinator, the incoming and storage + drivers, or the indexer. + + """ + + def __init__(self, coord, incoming, index, storage): + self.coord = coord + self.incoming = incoming + self.index = index + self.storage = storage + + def expunge_metrics(self, sync=False): + """Remove deleted metrics. + + :param sync: If True, then delete everything synchronously and raise + on error + :type sync: bool + """ + # FIXME(jd) The indexer could return them sorted/grouped by directly + metrics_to_expunge = sorted( + ((m, self.incoming.sack_for_metric(m.id)) + for m in self.index.list_metrics(status='delete')), + key=ITEMGETTER_1) + for sack, metrics in itertools.groupby( + metrics_to_expunge, key=ITEMGETTER_1): + try: + lock = self.incoming.get_sack_lock(self.coord, sack) + if not lock.acquire(blocking=sync): + # Retry later + LOG.debug( + "Sack %s is locked, cannot expunge metrics", sack) + continue + # NOTE(gordc): no need to hold lock because the metric has been + # already marked as "deleted" in the indexer so no measure + # worker is going to process it anymore. + lock.release() + except Exception: + if sync: + raise + LOG.error("Unable to lock sack %s for expunging metrics", + sack, exc_info=True) + else: + for metric, sack in metrics: + LOG.debug("Deleting metric %s", metric) + try: + self.incoming.delete_unprocessed_measures_for_metric( + metric.id) + self.storage._delete_metric(metric) + try: + self.index.expunge_metric(metric.id) + except indexer.NoSuchMetric: + # It's possible another process deleted or is + # deleting the metric, not a big deal + pass + except Exception: + if sync: + raise + LOG.error("Unable to expunge metric %s from storage", + metric, exc_info=True) diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index 353edf1d..c2e03fcd 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -28,6 +28,7 @@ import tenacity import tooz from tooz import coordination +from gnocchi import chef from gnocchi import exceptions from gnocchi import incoming from gnocchi import indexer @@ -71,6 +72,8 @@ class MetricProcessBase(cotyledon.Service): self.store = storage.get_driver(self.conf) self.incoming = incoming.get_driver(self.conf) self.index = indexer.get_driver(self.conf) + self.chef = chef.Chef(self.coord, self.incoming, + self.index, self.store) def run(self): self._configure() @@ -266,7 +269,7 @@ class MetricJanitor(MetricProcessBase): worker_id, conf, conf.metricd.metric_cleanup_delay) def _run_job(self): - self.store.expunge_metrics(self.coord, self.incoming, self.index) + self.chef.expunge_metrics() LOG.debug("Metrics marked for deletion removed from backend") diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 752f2260..2af2abcb 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -24,7 +24,6 @@ from oslo_config import cfg import six from gnocchi import carbonara -from gnocchi import indexer from gnocchi import utils @@ -38,7 +37,6 @@ OPTS = [ LOG = daiquiri.getLogger(__name__) -ITEMGETTER_1 = operator.itemgetter(1) ATTRGETTER_AGG_METHOD = operator.attrgetter("aggregation_method") @@ -424,57 +422,6 @@ class StorageDriver(object): finally: lock.release() - def expunge_metrics(self, coord, incoming, index, sync=False): - """Remove deleted metrics - - :param incoming: The incoming storage - :param index: An indexer to be used for querying metrics - :param sync: If True, then delete everything synchronously and raise - on error - :type sync: bool - """ - # FIXME(jd) The indexer could return them sorted/grouped by directly - metrics_to_expunge = sorted( - ((m, incoming.sack_for_metric(m.id)) - for m in index.list_metrics(status='delete')), - key=ITEMGETTER_1) - for sack, metrics in itertools.groupby( - metrics_to_expunge, key=ITEMGETTER_1): - try: - lock = incoming.get_sack_lock(coord, sack) - if not lock.acquire(blocking=sync): - # Retry later - LOG.debug( - "Sack %s is locked, cannot expunge metrics", sack) - continue - # NOTE(gordc): no need to hold lock because the metric has been - # already marked as "deleted" in the indexer so no measure - # worker is going to process it anymore. - lock.release() - except Exception: - if sync: - raise - LOG.error("Unable to lock sack %s for expunging metrics", - sack, exc_info=True) - else: - for metric, sack in metrics: - LOG.debug("Deleting metric %s", metric) - try: - incoming.delete_unprocessed_measures_for_metric( - metric.id) - self._delete_metric(metric) - try: - index.expunge_metric(metric.id) - except indexer.NoSuchMetric: - # It's possible another process deleted or is - # deleting the metric, not a big deal - pass - except Exception: - if sync: - raise - LOG.error("Unable to expunge metric %s from storage", - metric, exc_info=True) - def process_new_measures(self, indexer, incoming, metrics_to_process, sync=False): """Process added measures in background. diff --git a/gnocchi/tests/test_chef.py b/gnocchi/tests/test_chef.py new file mode 100644 index 00000000..0370747a --- /dev/null +++ b/gnocchi/tests/test_chef.py @@ -0,0 +1,57 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2018 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import datetime + +import numpy + +from gnocchi import chef +from gnocchi import incoming +from gnocchi import indexer +from gnocchi.tests import base + + +def datetime64(*args): + return numpy.datetime64(datetime.datetime(*args)) + + +class TestChef(base.TestCase): + def setUp(self): + super(TestChef, self).setUp() + self.metric, __ = self._create_metric() + self.chef = chef.Chef(self.coord, self.incoming, + self.index, self.storage) + + def test_delete_nonempty_metric_unprocessed(self): + self.incoming.add_measures(self.metric.id, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + ]) + self.index.delete_metric(self.metric.id) + self.trigger_processing() + __, __, details = self.incoming._build_report(True) + self.assertIn(str(self.metric.id), details) + self.chef.expunge_metrics(sync=True) + __, __, details = self.incoming._build_report(True) + self.assertNotIn(str(self.metric.id), details) + + def test_delete_expunge_metric(self): + self.incoming.add_measures(self.metric.id, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + ]) + self.trigger_processing() + self.index.delete_metric(self.metric.id) + self.chef.expunge_metrics(sync=True) + self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric, + self.metric.id) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index e0fba1d3..d5429f1e 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -150,30 +150,6 @@ class TestStorageDriver(tests_base.TestCase): self.storage._get_unaggregated_timeserie, self.metric) - def test_delete_nonempty_metric_unprocessed(self): - self.incoming.add_measures(self.metric.id, [ - incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), - ]) - self.index.delete_metric(self.metric.id) - self.trigger_processing() - __, __, details = self.incoming._build_report(True) - self.assertIn(str(self.metric.id), details) - self.storage.expunge_metrics(self.coord, - self.incoming, self.index, sync=True) - __, __, details = self.incoming._build_report(True) - self.assertNotIn(str(self.metric.id), details) - - def test_delete_expunge_metric(self): - self.incoming.add_measures(self.metric.id, [ - incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), - ]) - self.trigger_processing() - self.index.delete_metric(self.metric.id) - self.storage.expunge_metrics(self.coord, - self.incoming, self.index, sync=True) - self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric, - self.metric.id) - def test_measures_reporting_format(self): report = self.incoming.measures_report(True) self.assertIsInstance(report, dict) -- GitLab From 631384a06f7cd9ecd5cf47f444fc821133a8ed50 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 12 Feb 2018 14:45:43 +0100 Subject: [PATCH 1242/1483] storage: remove useless recomputing of values Those values are already computed earlier, no need to do it again here. --- gnocchi/storage/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 2af2abcb..cc5fe373 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -360,8 +360,6 @@ class StorageDriver(object): # First, check for old splits to delete if ap_def.timespan: - oldest_point_to_keep = ts.last - ap_def.timespan - oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) for key in list(existing_keys): # NOTE(jd) Only delete if the key is strictly inferior # the timestamp; we don't delete any timeserie split -- GitLab From 32ac11c0b44276f7c3144c97b491cf64753820ee Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Feb 2018 18:54:16 +0100 Subject: [PATCH 1243/1483] chef: import process_new_measures and refresh_metric This moves those methods out of storage and where they belong: in the Chef! --- gnocchi/chef.py | 43 +++++++++++++++++++++++++ gnocchi/cli/metricd.py | 7 ++-- gnocchi/rest/api.py | 16 ++++------ gnocchi/rest/app.py | 8 +++++ gnocchi/storage/__init__.py | 48 ++-------------------------- gnocchi/tests/base.py | 6 ++-- gnocchi/tests/functional/fixtures.py | 20 ++++-------- gnocchi/tests/test_rest.py | 13 +++----- gnocchi/tests/test_statsd.py | 16 +++------- 9 files changed, 82 insertions(+), 95 deletions(-) diff --git a/gnocchi/chef.py b/gnocchi/chef.py index e146c331..90423ce6 100644 --- a/gnocchi/chef.py +++ b/gnocchi/chef.py @@ -18,6 +18,7 @@ import itertools import operator import daiquiri +import six from gnocchi import indexer @@ -27,6 +28,10 @@ ITEMGETTER_1 = operator.itemgetter(1) LOG = daiquiri.getLogger(__name__) +class SackLockTimeoutError(Exception): + pass + + class Chef(object): """A master of cooking gnocchi. @@ -92,3 +97,41 @@ class Chef(object): raise LOG.error("Unable to expunge metric %s from storage", metric, exc_info=True) + + def refresh_metric(self, metric, timeout): + s = self.incoming.sack_for_metric(metric.id) + lock = self.incoming.get_sack_lock(self.coord, s) + if not lock.acquire(blocking=timeout): + raise SackLockTimeoutError( + 'Unable to refresh metric: %s. Metric is locked. ' + 'Please try again.' % metric.id) + try: + self.process_new_measures([str(metric.id)]) + finally: + lock.release() + + def process_new_measures(self, metrics_to_process, sync=False): + """Process added measures in background. + + Some drivers might need to have a background task running that process + the measures sent to metrics. This is used for that. + """ + # process only active metrics. deleted metrics with unprocessed + # measures will be skipped until cleaned by janitor. + metrics = self.index.list_metrics( + attribute_filter={"in": {"id": metrics_to_process}}) + metrics_by_id = {m.id: m for m in metrics} + # NOTE(gordc): must lock at sack level + try: + LOG.debug("Processing measures for %s", metrics) + with self.incoming.process_measure_for_metrics( + [m.id for m in metrics]) as metrics_and_measures: + for metric, measures in six.iteritems(metrics_and_measures): + self.storage.compute_and_store_timeseries( + metrics_by_id[metric], measures + ) + LOG.debug("Measures for metric %s processed", metrics) + except Exception: + if sync: + raise + LOG.error("Error processing new measures", exc_info=True) diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index c2e03fcd..71a063ec 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -241,8 +241,7 @@ class MetricProcessor(MetricProcessBase): try: metrics = self.incoming.list_metric_with_measures_to_process(s) m_count += len(metrics) - self.store.process_new_measures( - self.index, self.incoming, metrics) + self.chef.process_new_measures(metrics) s_count += 1 self.incoming.finish_sack_processing(s) self.sacks_with_measures_to_process.discard(s) @@ -310,8 +309,8 @@ def metricd_tester(conf): metrics.update(inc.list_metric_with_measures_to_process(i)) if len(metrics) >= conf.stop_after_processing_metrics: break - s.process_new_measures( - index, inc, + c = chef.Chef(None, inc, index, s) + c.process_new_measures( list(metrics)[:conf.stop_after_processing_metrics], True) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index b71730c6..cd19b289 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -33,6 +33,7 @@ import voluptuous import werkzeug.http from gnocchi import archive_policy +from gnocchi import chef from gnocchi.cli import metricd from gnocchi import incoming from gnocchi import indexer @@ -528,11 +529,10 @@ class MetricController(rest.RestController): if (strtobool("refresh", refresh) and pecan.request.incoming.has_unprocessed(self.metric.id)): try: - pecan.request.storage.refresh_metric( - pecan.request.coordinator, - pecan.request.indexer, pecan.request.incoming, self.metric, + pecan.request.chef.refresh_metric( + self.metric, pecan.request.conf.api.operation_timeout) - except storage.SackLockTimeoutError as e: + except chef.SackLockTimeoutError as e: abort(503, six.text_type(e)) try: return pecan.request.storage.get_measures( @@ -1899,11 +1899,9 @@ class AggregationController(rest.RestController): if pecan.request.incoming.has_unprocessed(m.id)] for m in metrics_to_update: try: - pecan.request.storage.refresh_metric( - pecan.request.coordinator, - pecan.request.indexer, pecan.request.incoming, m, - pecan.request.conf.api.operation_timeout) - except storage.SackLockTimeoutError as e: + pecan.request.chef.refresh_metric( + m, pecan.request.conf.api.operation_timeout) + except chef.SackLockTimeoutError as e: abort(503, six.text_type(e)) if number_of_metrics == 1: # NOTE(sileht): don't do the aggregation if we only have one diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 9f9324b5..d3ab7717 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -1,5 +1,6 @@ # -*- encoding: utf-8 -*- # +# Copyright © 2018 Red Hat # Copyright © 2014-2016 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -28,6 +29,7 @@ from pecan import templating from stevedore import driver import webob.exc +from gnocchi import chef from gnocchi.cli import metricd from gnocchi import exceptions from gnocchi import incoming as gnocchi_incoming @@ -58,6 +60,12 @@ class GnocchiHook(pecan.hooks.PecanHook): state.request.storage = self._lazy_load('storage') state.request.indexer = self._lazy_load('indexer') state.request.incoming = self._lazy_load('incoming') + state.request.chef = chef.Chef( + state.request.coordinator, + state.request.incoming, + state.request.indexer, + state.request.storage, + ) state.request.conf = self.conf state.request.policy_enforcer = self.policy_enforcer state.request.auth_helper = self.auth_helper diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index cc5fe373..e94022b3 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2016-2017 Red Hat, Inc. +# Copyright © 2016-2018 Red Hat, Inc. # Copyright © 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -97,10 +97,6 @@ class CorruptionError(ValueError, StorageError): super(CorruptionError, self).__init__(message) -class SackLockTimeoutError(StorageError): - pass - - @utils.retry_on_exception_and_log("Unable to initialize storage driver") def get_driver(conf): """Return the configured driver.""" @@ -407,47 +403,7 @@ class StorageDriver(object): aggregation, granularity, version=3): raise NotImplementedError - def refresh_metric(self, coord, indexer, incoming, metric, timeout): - s = incoming.sack_for_metric(metric.id) - lock = incoming.get_sack_lock(coord, s) - if not lock.acquire(blocking=timeout): - raise SackLockTimeoutError( - 'Unable to refresh metric: %s. Metric is locked. ' - 'Please try again.' % metric.id) - try: - self.process_new_measures(indexer, incoming, - [six.text_type(metric.id)]) - finally: - lock.release() - - def process_new_measures(self, indexer, incoming, metrics_to_process, - sync=False): - """Process added measures in background. - - Some drivers might need to have a background task running that process - the measures sent to metrics. This is used for that. - """ - # process only active metrics. deleted metrics with unprocessed - # measures will be skipped until cleaned by janitor. - metrics = indexer.list_metrics( - attribute_filter={"in": {"id": metrics_to_process}}) - metrics_by_id = {m.id: m for m in metrics} - # NOTE(gordc): must lock at sack level - try: - LOG.debug("Processing measures for %s", metrics) - with incoming.process_measure_for_metrics([m.id for m in metrics]) \ - as metrics_and_measures: - for metric, measures in six.iteritems(metrics_and_measures): - self._compute_and_store_timeseries( - metrics_by_id[metric], measures - ) - LOG.debug("Measures for metric %s processed", metrics) - except Exception: - if sync: - raise - LOG.error("Error processing new measures", exc_info=True) - - def _compute_and_store_timeseries(self, metric, measures): + def compute_and_store_timeseries(self, metric, measures): # NOTE(mnaser): The metric could have been handled by # another worker, ignore if no measures. if len(measures) == 0: diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 45d8c8c9..46b55ded 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -33,6 +33,7 @@ except ImportError: from testtools import testcase from gnocchi import archive_policy +from gnocchi import chef from gnocchi.cli import metricd from gnocchi import exceptions from gnocchi import incoming @@ -364,6 +365,8 @@ class TestCase(BaseTestCase): self.storage.upgrade() self.incoming.upgrade(128) + self.chef = chef.Chef( + self.coord, self.incoming, self.index, self.storage) def tearDown(self): self.index.disconnect() @@ -385,5 +388,4 @@ class TestCase(BaseTestCase): def trigger_processing(self, metrics=None): if metrics is None: metrics = [str(self.metric.id)] - self.storage.process_new_measures( - self.index, self.incoming, metrics, sync=True) + self.chef.process_new_measures(metrics, sync=True) diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index f7aa7dec..906e7235 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -34,6 +34,7 @@ from oslo_middleware import cors import sqlalchemy_utils import yaml +from gnocchi import chef from gnocchi.cli import metricd from gnocchi import incoming from gnocchi import indexer @@ -170,7 +171,7 @@ class ConfigFixture(fixture.GabbiFixture): } # start up a thread to async process measures - self.metricd_thread = MetricdThread(self.coord, index, s, i) + self.metricd_thread = MetricdThread(chef.Chef(self.coord, i, index, s)) self.metricd_thread.start() def stop_fixture(self): @@ -208,25 +209,18 @@ class ConfigFixture(fixture.GabbiFixture): class MetricdThread(threading.Thread): """Run metricd in a naive thread to process measures.""" - def __init__(self, coord, index, storer, incoming, name='metricd'): + def __init__(self, chef, name='metricd'): super(MetricdThread, self).__init__(name=name) - self.coord = coord - self.index = index - self.storage = storer - self.incoming = incoming + self.chef = chef self.flag = True def run(self): while self.flag: - metrics = utils.list_all_incoming_metrics(self.incoming) - metrics = self.index.list_metrics( + metrics = utils.list_all_incoming_metrics(self.chef.incoming) + metrics = self.chef.index.list_metrics( attribute_filter={"in": {"id": metrics}}) for metric in metrics: - self.storage.refresh_metric(self.coord, - self.index, - self.incoming, - metric, - timeout=None) + self.chef.refresh_metric(metric, timeout=None) time.sleep(0.1) def stop(self): diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index b34d6571..4a12ab79 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -61,9 +61,7 @@ class TestingApp(webtest.TestApp): def __init__(self, *args, **kwargs): self.auth_mode = kwargs.pop('auth_mode') - self.storage = kwargs.pop('storage') - self.indexer = kwargs.pop('indexer') - self.incoming = kwargs.pop('incoming') + self.chef = kwargs.pop('chef') super(TestingApp, self).__init__(*args, **kwargs) # Setup Keystone auth_token fake cache self.token = self.VALID_TOKEN @@ -129,9 +127,8 @@ class TestingApp(webtest.TestApp): elif self.auth_mode == "remoteuser": req.remote_user = self.user response = super(TestingApp, self).do_request(req, *args, **kwargs) - metrics = tests_utils.list_all_incoming_metrics(self.incoming) - self.storage.process_new_measures( - self.indexer, self.incoming, metrics, sync=True) + metrics = tests_utils.list_all_incoming_metrics(self.chef.incoming) + self.chef.process_new_measures(metrics, sync=True) return response @@ -178,9 +175,7 @@ class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): self.app = TestingApp(app.load_app(conf=self.conf, not_implemented_middleware=False), - storage=self.storage, - indexer=self.index, - incoming=self.incoming, + chef=self.chef, auth_mode=self.auth_mode) def _fake_lazy_load(self, name): diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index 7aa073fb..3ddd2173 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -73,9 +73,7 @@ class TestStatsd(tests_base.TestCase): metric = r.get_metric(metric_key) - self.storage.process_new_measures( - self.stats.indexer, self.stats.incoming, - [str(metric.id)], sync=True) + self.chef.process_new_measures([str(metric.id)], sync=True) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ @@ -94,9 +92,7 @@ class TestStatsd(tests_base.TestCase): ("127.0.0.1", 12345)) self.stats.flush() - self.storage.process_new_measures( - self.stats.indexer, self.stats.incoming, - [str(metric.id)], sync=True) + self.chef.process_new_measures([str(metric.id)], sync=True) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ @@ -128,9 +124,7 @@ class TestStatsd(tests_base.TestCase): metric = r.get_metric(metric_key) self.assertIsNotNone(metric) - self.storage.process_new_measures( - self.stats.indexer, self.stats.incoming, - [str(metric.id)], sync=True) + self.chef.process_new_measures([str(metric.id)], sync=True) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ @@ -148,9 +142,7 @@ class TestStatsd(tests_base.TestCase): ("127.0.0.1", 12345)) self.stats.flush() - self.storage.process_new_measures( - self.stats.indexer, self.stats.incoming, - [str(metric.id)], sync=True) + self.chef.process_new_measures([str(metric.id)], sync=True) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ -- GitLab From 413b380a63c0def914825c01ff6834e5e3f202e8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 7 Feb 2018 17:52:20 +0100 Subject: [PATCH 1244/1483] incoming: introduce a Sack object Rather than manipulating integers, let's use a Sack object to abstract properly our sack concept. This cleans the code in various places. --- gnocchi/cli/manage.py | 8 +-- gnocchi/cli/metricd.py | 12 ++--- gnocchi/incoming/__init__.py | 90 +++++++++++++++++++++++++++++----- gnocchi/incoming/ceph.py | 23 +++++---- gnocchi/incoming/file.py | 18 +++---- gnocchi/incoming/redis.py | 20 ++++---- gnocchi/incoming/s3.py | 23 ++++----- gnocchi/incoming/swift.py | 29 ++++++----- gnocchi/tests/base.py | 4 +- gnocchi/tests/test_incoming.py | 2 +- gnocchi/tests/utils.py | 5 +- 11 files changed, 146 insertions(+), 88 deletions(-) diff --git a/gnocchi/cli/manage.py b/gnocchi/cli/manage.py index 7ecb1ef5..5e89d52a 100644 --- a/gnocchi/cli/manage.py +++ b/gnocchi/cli/manage.py @@ -95,7 +95,7 @@ def change_sack_size(): LOG.error('Cannot change sack when non-empty backlog. Process ' 'remaining %s measures and try again', remainder) return - LOG.info("Changing sack size to: %s", conf.sacks_number) - old_num_sacks = s.NUM_SACKS - s.set_storage_settings(conf.sacks_number) - s.remove_sack_group(old_num_sacks) + LOG.info("Removing current %d sacks", s.NUM_SACKS) + s.remove_sacks() + LOG.info("Creating new %d sacks", conf.sacks_number) + s.upgrade(conf.sacks_number) diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index 71a063ec..c887cce8 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -23,7 +23,6 @@ import cotyledon from cotyledon import oslo_config_glue import daiquiri from oslo_config import cfg -import six import tenacity import tooz from tooz import coordination @@ -160,8 +159,7 @@ class MetricProcessor(MetricProcessBase): super(MetricProcessor, self)._configure() # create fallback in case paritioning fails or assigned no tasks - self.fallback_tasks = list( - six.moves.range(self.incoming.NUM_SACKS)) + self.fallback_tasks = list(self.incoming.iter_sacks()) try: self.partitioner = self.coord.join_partitioned_group( self.GROUP_ID, partitions=200) @@ -206,9 +204,9 @@ class MetricProcessor(MetricProcessBase): self.group_state != self.partitioner.ring.nodes): self.group_state = self.partitioner.ring.nodes.copy() self._tasks = [ - i for i in six.moves.range(self.incoming.NUM_SACKS) + sack for sack in self.incoming.iter_sacks() if self.partitioner.belongs_to_self( - i, replicas=self.conf.metricd.processing_replicas)] + sack, replicas=self.conf.metricd.processing_replicas)] except tooz.NotImplemented: # Do not log anything. If `run_watchers` is not implemented, it's # likely that partitioning is not implemented either, so it already @@ -305,8 +303,8 @@ def metricd_tester(conf): s = storage.get_driver(conf) inc = incoming.get_driver(conf) metrics = set() - for i in six.moves.range(inc.NUM_SACKS): - metrics.update(inc.list_metric_with_measures_to_process(i)) + for sack in inc.iter_sacks(): + metrics.update(inc.list_metric_with_measures_to_process(sack)) if len(metrics) >= conf.stop_after_processing_metrics: break c = chef.Chef(None, inc, index, s) diff --git a/gnocchi/incoming/__init__.py b/gnocchi/incoming/__init__.py index 7fe4c804..3fbc42ef 100644 --- a/gnocchi/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -15,6 +15,9 @@ # License for the specific language governing permissions and limitations # under the License. import collections +import functools +import hashlib +import operator import daiquiri import numpy @@ -38,9 +41,66 @@ class SackDetectionError(Exception): pass +@functools.total_ordering +class Sack(object): + """A sack is a recipient that contains measures for a group of metrics. + + It is identified by a positive integer called `number`. + """ + + # Use slots to make them as small as possible since we can create a ton of + # those. + __slots__ = [ + "number", + "total", + "name", + ] + + def __init__(self, number, total, name): + """Create a new sack. + + :param number: The sack number, identifying it. + :param total: The total number of sacks. + :param name: The sack name. + """ + self.number = number + self.total = total + self.name = name + + def __str__(self): + return self.name + + def __repr__(self): + return "<%s(%d/%d) %s>" % ( + self.__class__.__name__, self.number, self.total, str(self), + ) + + def _compare(self, op, other): + if isinstance(other, Sack): + if self.total != other.total: + raise TypeError( + "Cannot compare %s with different total number" % + self.__class__.__name__) + return op(self.number, other.number) + raise TypeError("Cannot compare %r with %r" % (self, other)) + + def __lt__(self, other): + return self._compare(operator.lt, other) + + def __eq__(self, other): + return self._compare(operator.eq, other) + + def __ne__(self, other): + # neither total_ordering nor py2 sets ne as the opposite of eq + return self._compare(operator.ne, other) + + def __hash__(self): + return hash(self.name) + + class IncomingDriver(object): MEASURE_PREFIX = "measure" - SACK_PREFIX = "incoming" + SACK_NAME_FORMAT = "incoming{total}-{number}" CFG_PREFIX = 'gnocchi-config' CFG_SACKS = 'sacks' @@ -53,13 +113,8 @@ class IncomingDriver(object): raise SackDetectionError(e) return self._num_sacks - @staticmethod - def __init__(conf, greedy=True): - pass - - def get_sack_prefix(self, num_sacks=None): - sacks = num_sacks if num_sacks else self.NUM_SACKS - return self.SACK_PREFIX + str(sacks) + '-%s' + def __init__(self, conf, greedy=True): + self._sacks = None def upgrade(self, num_sacks): try: @@ -82,7 +137,11 @@ class IncomingDriver(object): @staticmethod def get_sack_lock(coord, sack): - lock_name = b'gnocchi-sack-%s-lock' % str(sack).encode('ascii') + # FIXME(jd) Some tooz drivers have a limitation on lock name length + # (e.g. MySQL). This should be handled by tooz, but it's not yet. + lock_name = hashlib.new( + 'sha1', + ('gnocchi-sack-%s-lock' % str(sack)).encode()).hexdigest().encode() return coord.get_lock(lock_name) def _make_measures_array(self): @@ -166,11 +225,18 @@ class IncomingDriver(object): def has_unprocessed(metric_id): raise exceptions.NotImplementedError + def _get_sack_name(self, number): + return self.SACK_NAME_FORMAT.format( + total=self.NUM_SACKS, number=number) + + def _make_sack(self, i): + return Sack(i, self.NUM_SACKS, self._get_sack_name(i)) + def sack_for_metric(self, metric_id): - return metric_id.int % self.NUM_SACKS + return self._make_sack(metric_id.int % self.NUM_SACKS) - def get_sack_name(self, sack): - return self.get_sack_prefix() % sack + def iter_sacks(self): + return (self._make_sack(i) for i in six.moves.range(self.NUM_SACKS)) @staticmethod def iter_on_sacks_to_process(): diff --git a/gnocchi/incoming/ceph.py b/gnocchi/incoming/ceph.py index bed93bd7..d46f34d4 100644 --- a/gnocchi/incoming/ceph.py +++ b/gnocchi/incoming/ceph.py @@ -70,11 +70,10 @@ class CephStorage(incoming.IncomingDriver): self.ioctx.write_full(self.CFG_PREFIX, json.dumps({self.CFG_SACKS: num_sacks}).encode()) - def remove_sack_group(self, num_sacks): - prefix = self.get_sack_prefix(num_sacks) - for i in six.moves.xrange(num_sacks): + def remove_sacks(self): + for sack in self.iter_sacks(): try: - self.ioctx.remove_object(prefix % i) + self.ioctx.remove_object(str(sack)) except rados.ObjectNotFound: pass @@ -86,7 +85,7 @@ class CephStorage(incoming.IncomingDriver): str(metric_id), str(uuid.uuid4()), datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S"))) - sack = self.get_sack_name(self.sack_for_metric(metric_id)) + sack = self.sack_for_metric(metric_id) data_by_sack[sack]['names'].append(name) data_by_sack[sack]['measures'].append( self._encode_measures(measures)) @@ -103,7 +102,7 @@ class CephStorage(incoming.IncomingDriver): self.ioctx.set_omap(op, tuple(data['names']), tuple(data['measures'])) ops.append(self.ioctx.operate_aio_write_op( - op, sack, flags=self.OMAP_WRITE_FLAGS)) + op, str(sack), flags=self.OMAP_WRITE_FLAGS)) while ops: op = ops.pop() op.wait_for_complete() @@ -112,11 +111,11 @@ class CephStorage(incoming.IncomingDriver): metrics = set() count = 0 metric_details = defaultdict(int) - for i in six.moves.range(self.NUM_SACKS): + for sack in self.iter_sacks(): marker = "" while True: names = list(self._list_keys_to_process( - i, marker=marker, limit=self.Q_LIMIT)) + sack, marker=marker, limit=self.Q_LIMIT)) if names and names[0] < marker: raise incoming.ReportGenerationError( "Unable to cleanly compute backlog.") @@ -138,7 +137,7 @@ class CephStorage(incoming.IncomingDriver): omaps, ret = self.ioctx.get_omap_vals(op, marker, prefix, limit) try: self.ioctx.operate_read_op( - op, self.get_sack_name(sack), flag=self.OMAP_READ_FLAGS) + op, str(sack), flag=self.OMAP_READ_FLAGS) except rados.ObjectNotFound: # API have still written nothing return () @@ -181,7 +180,7 @@ class CephStorage(incoming.IncomingDriver): # NOTE(sileht): come on Ceph, no return code # for this operation ?!! self.ioctx.remove_omap_keys(op, keys) - self.ioctx.operate_write_op(op, self.get_sack_name(sack), + self.ioctx.operate_write_op(op, str(sack), flags=self.OMAP_WRITE_FLAGS) def has_unprocessed(self, metric_id): @@ -198,7 +197,7 @@ class CephStorage(incoming.IncomingDriver): sack = self.sack_for_metric(metric_id) key_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) omaps, ret = self.ioctx.get_omap_vals(op, "", key_prefix, -1) - self.ioctx.operate_read_op(op, self.get_sack_name(sack), + self.ioctx.operate_read_op(op, str(sack), flag=self.OMAP_READ_FLAGS) # NOTE(sileht): after reading the libradospy, I'm # not sure that ret will have the correct value @@ -229,5 +228,5 @@ class CephStorage(incoming.IncomingDriver): # NOTE(sileht): come on Ceph, no return code # for this operation ?!! self.ioctx.remove_omap_keys(op, tuple(keys)) - self.ioctx.operate_write_op(op, self.get_sack_name(sack), + self.ioctx.operate_write_op(op, str(sack), flags=self.OMAP_WRITE_FLAGS) diff --git a/gnocchi/incoming/file.py b/gnocchi/incoming/file.py index a7930c56..43d314be 100644 --- a/gnocchi/incoming/file.py +++ b/gnocchi/incoming/file.py @@ -49,16 +49,14 @@ class FileStorage(incoming.IncomingDriver): data = {self.CFG_SACKS: num_sacks} with open(os.path.join(self.basepath_tmp, self.CFG_PREFIX), 'w') as f: json.dump(data, f) - utils.ensure_paths([self._sack_path(i) - for i in six.moves.range(self.NUM_SACKS)]) + utils.ensure_paths((self._sack_path(s) for s in self.iter_sacks())) - def remove_sack_group(self, num_sacks): - prefix = self.get_sack_prefix(num_sacks) - for i in six.moves.xrange(num_sacks): - shutil.rmtree(os.path.join(self.basepath, prefix % i)) + def remove_sacks(self): + for sack in self.iter_sacks(): + shutil.rmtree(os.path.join(self.basepath, str(sack))) def _sack_path(self, sack): - return os.path.join(self.basepath, self.get_sack_name(sack)) + return os.path.join(self.basepath, str(sack)) def _measure_path(self, sack, metric_id): return os.path.join(self._sack_path(sack), six.text_type(metric_id)) @@ -108,9 +106,9 @@ class FileStorage(incoming.IncomingDriver): report_vars['measures'] += len( self._list_measures_container_for_metric_str(sack, metric)) - for i in six.moves.range(self.NUM_SACKS): - for metric in self.list_metric_with_measures_to_process(i): - build_metric_report(metric, i) + for sack in self.iter_sacks(): + for metric in self.list_metric_with_measures_to_process(sack): + build_metric_report(metric, sack) return (report_vars['metrics'] or len(report_vars['metric_details'].keys()), report_vars['measures'] or diff --git a/gnocchi/incoming/redis.py b/gnocchi/incoming/redis.py index 793eac7e..a163feb8 100644 --- a/gnocchi/incoming/redis.py +++ b/gnocchi/incoming/redis.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2017 Red Hat +# Copyright © 2017-2018 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -47,7 +47,7 @@ return {llen, table.concat(redis.call("LRANGE", KEYS[1], 0, llen), "")} self._client.hset(self.CFG_PREFIX, self.CFG_SACKS, num_sacks) @staticmethod - def remove_sack_group(num_sacks): + def remove_sacks(): # NOTE(gordc): redis doesn't maintain keys with empty values pass @@ -56,13 +56,13 @@ return {llen, table.concat(redis.call("LRANGE", KEYS[1], 0, llen), "")} def _build_measure_path(self, metric_id): return self._build_measure_path_with_sack( - metric_id, self.get_sack_name(self.sack_for_metric(metric_id))) + metric_id, str(self.sack_for_metric(metric_id))) def add_measures_batch(self, metrics_and_measures): notified_sacks = set() pipe = self._client.pipeline(transaction=False) for metric_id, measures in six.iteritems(metrics_and_measures): - sack_name = self.get_sack_name(self.sack_for_metric(metric_id)) + sack_name = str(self.sack_for_metric(metric_id)) path = self._build_measure_path_with_sack(metric_id, sack_name) pipe.rpush(path, self._encode_measures(measures)) if self.greedy and sack_name not in notified_sacks: @@ -80,7 +80,7 @@ return {llen, table.concat(redis.call("LRANGE", KEYS[1], 0, llen), "")} report_vars['metric_details'].update( dict(six.moves.zip(m_list, results))) - match = redis.SEP.join([self.get_sack_name("*").encode(), b"*"]) + match = redis.SEP.join([self._get_sack_name("*").encode(), b"*"]) metrics = 0 m_list = [] pipe = self._client.pipeline() @@ -102,7 +102,7 @@ return {llen, table.concat(redis.call("LRANGE", KEYS[1], 0, llen), "")} report_vars['metric_details'] if details else None) def list_metric_with_measures_to_process(self, sack): - match = redis.SEP.join([self.get_sack_name(sack).encode(), b"*"]) + match = redis.SEP.join([str(sack).encode(), b"*"]) keys = self._client.scan_iter(match=match, count=1000) return set([k.split(redis.SEP)[1].decode("utf8") for k in keys]) @@ -140,15 +140,15 @@ return {llen, table.concat(redis.call("LRANGE", KEYS[1], 0, llen), "")} p = self._client.pubsub() db = self._client.connection_pool.connection_kwargs['db'] keyspace = b"__keyspace@" + str(db).encode() + b"__:" - pattern = keyspace + self.SACK_PREFIX.encode() + b"*" + pattern = keyspace + self._get_sack_name("*").encode() p.psubscribe(pattern) for message in p.listen(): if message['type'] == 'pmessage' and message['pattern'] == pattern: # FIXME(jd) This is awful, we need a better way to extract this - # Format is defined by get_sack_prefix: incoming128-17 - yield int(message['channel'].split(b"-")[-1]) + # Format is defined by _get_sack_name: incoming128-17 + yield self._make_sack(int(message['channel'].split(b"-")[-1])) def finish_sack_processing(self, sack): # Delete the sack key which handles no data but is used to get a SET # notification in iter_on_sacks_to_process - self._client.delete(self.get_sack_name(sack)) + self._client.delete(str(sack)) diff --git a/gnocchi/incoming/s3.py b/gnocchi/incoming/s3.py index 4a08f615..933ffd72 100644 --- a/gnocchi/incoming/s3.py +++ b/gnocchi/incoming/s3.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2016 Red Hat, Inc. +# Copyright © 2016-2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -20,7 +20,6 @@ import json import uuid import numpy -import six from gnocchi.common import s3 from gnocchi import incoming @@ -31,6 +30,9 @@ botocore = s3.botocore class S3Storage(incoming.IncomingDriver): + # NOTE(gordc): override to follow s3 partitioning logic + SACK_NAME_FORMAT = "{number}-{total}/" + def __init__(self, conf, greedy=True): super(S3Storage, self).__init__(conf) self.s3, self._region_name, self._bucket_prefix = ( @@ -55,12 +57,8 @@ class S3Storage(incoming.IncomingDriver): Key=self.CFG_PREFIX, Body=json.dumps(data).encode()) - def get_sack_prefix(self, num_sacks=None): - # NOTE(gordc): override to follow s3 partitioning logic - return '%s-' + ('%s/' % (num_sacks if num_sacks else self.NUM_SACKS)) - @staticmethod - def remove_sack_group(num_sacks): + def remove_sacks(num_sacks): # nothing to cleanup since sacks are part of path pass @@ -80,9 +78,9 @@ class S3Storage(incoming.IncomingDriver): now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") self.s3.put_object( Bucket=self._bucket_name_measures, - Key=(self.get_sack_name(self.sack_for_metric(metric_id)) - + six.text_type(metric_id) + "/" - + six.text_type(uuid.uuid4()) + now), + Key=(str(self.sack_for_metric(metric_id)) + + str(metric_id) + "/" + + str(uuid.uuid4()) + now), Body=data) def _build_report(self, details): @@ -119,7 +117,7 @@ class S3Storage(incoming.IncomingDriver): kwargs = {} response = self.s3.list_objects_v2( Bucket=self._bucket_name_measures, - Prefix=self.get_sack_name(sack), + Prefix=str(sack), Delimiter="/", MaxKeys=limit, **kwargs) @@ -139,8 +137,7 @@ class S3Storage(incoming.IncomingDriver): kwargs = {} response = self.s3.list_objects_v2( Bucket=self._bucket_name_measures, - Prefix=(self.get_sack_name(sack) - + six.text_type(metric_id) + "/"), + Prefix=(str(sack) + str(metric_id) + "/"), **kwargs) for c in response.get('Contents', ()): diff --git a/gnocchi/incoming/swift.py b/gnocchi/incoming/swift.py index 4d621ad3..f38ea35b 100644 --- a/gnocchi/incoming/swift.py +++ b/gnocchi/incoming/swift.py @@ -42,35 +42,34 @@ class SwiftStorage(incoming.IncomingDriver): self.swift.put_container(self.CFG_PREFIX) self.swift.put_object(self.CFG_PREFIX, self.CFG_PREFIX, json.dumps({self.CFG_SACKS: num_sacks})) - for i in six.moves.range(num_sacks): - self.swift.put_container(self.get_sack_name(i)) + for sack in self.iter_sacks(): + self.swift.put_container(str(sack)) - def remove_sack_group(self, num_sacks): - prefix = self.get_sack_prefix(num_sacks) - for i in six.moves.xrange(num_sacks): - self.swift.delete_container(prefix % i) + def remove_sacks(self): + for sack in self.iter_sacks(): + self.swift.delete_container(str(sack)) def _store_new_measures(self, metric_id, data): now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") self.swift.put_object( - self.get_sack_name(self.sack_for_metric(metric_id)), - six.text_type(metric_id) + "/" + six.text_type(uuid.uuid4()) + now, + str(self.sack_for_metric(metric_id)), + str(metric_id) + "/" + str(uuid.uuid4()) + now, data) def _build_report(self, details): metric_details = defaultdict(int) nb_metrics = 0 measures = 0 - for i in six.moves.range(self.NUM_SACKS): + for sack in self.iter_sacks(): if details: headers, files = self.swift.get_container( - self.get_sack_name(i), full_listing=True) + str(sack), full_listing=True) for f in files: metric, __ = f['name'].split("/", 1) metric_details[metric] += 1 else: headers, files = self.swift.get_container( - self.get_sack_name(i), delimiter='/', full_listing=True) + str(sack), delimiter='/', full_listing=True) nb_metrics += len([f for f in files if 'subdir' in f]) measures += int(headers.get('x-container-object-count')) return (nb_metrics or len(metric_details), measures, @@ -78,19 +77,19 @@ class SwiftStorage(incoming.IncomingDriver): def list_metric_with_measures_to_process(self, sack): headers, files = self.swift.get_container( - self.get_sack_name(sack), delimiter='/', full_listing=True) + str(sack), delimiter='/', full_listing=True) return set(f['subdir'][:-1] for f in files if 'subdir' in f) def _list_measure_files_for_metric(self, sack, metric_id): headers, files = self.swift.get_container( - self.get_sack_name(sack), path=six.text_type(metric_id), + str(sack), path=six.text_type(metric_id), full_listing=True) return files def delete_unprocessed_measures_for_metric(self, metric_id): sack = self.sack_for_metric(metric_id) files = self._list_measure_files_for_metric(sack, metric_id) - swift.bulk_delete(self.swift, self.get_sack_name(sack), files) + swift.bulk_delete(self.swift, str(sack), files) def has_unprocessed(self, metric_id): sack = self.sack_for_metric(metric_id) @@ -102,7 +101,7 @@ class SwiftStorage(incoming.IncomingDriver): all_files = defaultdict(list) for metric_id in metric_ids: sack = self.sack_for_metric(metric_id) - sack_name = self.get_sack_name(sack) + sack_name = str(sack) files = self._list_measure_files_for_metric(sack, metric_id) all_files[sack_name].extend(files) measures[metric_id] = self._array_concatenate([ diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 46b55ded..3d46dbe9 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -361,7 +361,9 @@ class TestCase(BaseTestCase): self.storage.STORAGE_PREFIX = str(uuid.uuid4()).encode() if self.conf.incoming.driver == 'redis': - self.incoming.SACK_PREFIX = str(uuid.uuid4()) + self.incoming.SACK_NAME_FORMAT = ( + str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT + ) self.storage.upgrade() self.incoming.upgrade(128) diff --git a/gnocchi/tests/test_incoming.py b/gnocchi/tests/test_incoming.py index d67c0fac..b830df2e 100644 --- a/gnocchi/tests/test_incoming.py +++ b/gnocchi/tests/test_incoming.py @@ -43,7 +43,7 @@ class TestIncomingDriver(tests_base.TestCase): def _iter_on_sacks_to_process(): for sack in self.incoming.iter_on_sacks_to_process(): - self.assertIsInstance(sack, int) + self.assertIsInstance(sack, incoming.Sack) if sack == sack_to_find: found.set() break diff --git a/gnocchi/tests/utils.py b/gnocchi/tests/utils.py index 413264a8..3d197cd8 100644 --- a/gnocchi/tests/utils.py +++ b/gnocchi/tests/utils.py @@ -13,14 +13,13 @@ # under the License. from oslo_config import cfg from oslo_policy import opts as policy_opts -import six from gnocchi import opts def list_all_incoming_metrics(incoming): - return set.union(*[incoming.list_metric_with_measures_to_process(i) - for i in six.moves.range(incoming.NUM_SACKS)]) + return set.union(*[incoming.list_metric_with_measures_to_process(sack) + for sack in incoming.iter_sacks()]) def prepare_conf(): -- GitLab From 1d5d8a7f1caf8e93de12a796d0835d68fc5b8c6b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 8 Feb 2018 11:20:54 +0100 Subject: [PATCH 1245/1483] chef: move get_sack_lock from incoming --- gnocchi/chef.py | 13 +++++++++++-- gnocchi/cli/metricd.py | 2 +- gnocchi/incoming/__init__.py | 10 ---------- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/gnocchi/chef.py b/gnocchi/chef.py index 90423ce6..69840e06 100644 --- a/gnocchi/chef.py +++ b/gnocchi/chef.py @@ -14,6 +14,7 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. +import hashlib import itertools import operator @@ -64,7 +65,7 @@ class Chef(object): for sack, metrics in itertools.groupby( metrics_to_expunge, key=ITEMGETTER_1): try: - lock = self.incoming.get_sack_lock(self.coord, sack) + lock = self.get_sack_lock(sack) if not lock.acquire(blocking=sync): # Retry later LOG.debug( @@ -100,7 +101,7 @@ class Chef(object): def refresh_metric(self, metric, timeout): s = self.incoming.sack_for_metric(metric.id) - lock = self.incoming.get_sack_lock(self.coord, s) + lock = self.get_sack_lock(s) if not lock.acquire(blocking=timeout): raise SackLockTimeoutError( 'Unable to refresh metric: %s. Metric is locked. ' @@ -135,3 +136,11 @@ class Chef(object): if sync: raise LOG.error("Error processing new measures", exc_info=True) + + def get_sack_lock(self, sack): + # FIXME(jd) Some tooz drivers have a limitation on lock name length + # (e.g. MySQL). This should be handled by tooz, but it's not yet. + lock_name = hashlib.new( + 'sha1', + ('gnocchi-sack-%s-lock' % str(sack)).encode()).hexdigest().encode() + return self.coord.get_lock(lock_name) diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index c887cce8..7bb2f771 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -232,7 +232,7 @@ class MetricProcessor(MetricProcessBase): for s in sacks: # TODO(gordc): support delay release lock so we don't # process a sack right after another process - lock = self.incoming.get_sack_lock(self.coord, s) + lock = self.chef.get_sack_lock(s) if not lock.acquire(blocking=False): continue diff --git a/gnocchi/incoming/__init__.py b/gnocchi/incoming/__init__.py index 3fbc42ef..436e8ac4 100644 --- a/gnocchi/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -16,7 +16,6 @@ # under the License. import collections import functools -import hashlib import operator import daiquiri @@ -135,15 +134,6 @@ class IncomingDriver(object): """Return the number of sacks in storage. None if not set.""" raise exceptions.NotImplementedError - @staticmethod - def get_sack_lock(coord, sack): - # FIXME(jd) Some tooz drivers have a limitation on lock name length - # (e.g. MySQL). This should be handled by tooz, but it's not yet. - lock_name = hashlib.new( - 'sha1', - ('gnocchi-sack-%s-lock' % str(sack)).encode()).hexdigest().encode() - return coord.get_lock(lock_name) - def _make_measures_array(self): return numpy.array([], dtype=TIMESERIES_ARRAY_DTYPE) -- GitLab From d4a4accbe5f37469a4f742d0207d364cadb10978 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 14 Feb 2018 14:16:55 +0100 Subject: [PATCH 1246/1483] Remove devstack support This is not tested and not widely used. --- devstack/README.rst | 15 - devstack/apache-gnocchi.template | 10 - devstack/apache-ported-gnocchi.template | 23 -- devstack/plugin.sh | 469 ------------------------ devstack/settings | 64 ---- doc/source/install.rst | 21 -- tox.ini | 3 - 7 files changed, 605 deletions(-) delete mode 100644 devstack/README.rst delete mode 100644 devstack/apache-gnocchi.template delete mode 100644 devstack/apache-ported-gnocchi.template delete mode 100644 devstack/plugin.sh delete mode 100644 devstack/settings diff --git a/devstack/README.rst b/devstack/README.rst deleted file mode 100644 index 57eadc4b..00000000 --- a/devstack/README.rst +++ /dev/null @@ -1,15 +0,0 @@ -============================ -Enabling Gnocchi in DevStack -============================ - -1. Download DevStack:: - - git clone https://git.openstack.org/openstack-dev/devstack.git - cd devstack - -2. Add this repo as an external repository in ``local.conf`` file:: - - [[local|localrc]] - enable_plugin gnocchi https://github.com/gnocchixyz/gnocchi - -3. Run ``stack.sh``. diff --git a/devstack/apache-gnocchi.template b/devstack/apache-gnocchi.template deleted file mode 100644 index bc288755..00000000 --- a/devstack/apache-gnocchi.template +++ /dev/null @@ -1,10 +0,0 @@ - -WSGIDaemonProcess gnocchi lang='en_US.UTF-8' locale='en_US.UTF-8' user=%USER% display-name=%{GROUP} processes=%APIWORKERS% threads=32 %VIRTUALENV% -WSGIProcessGroup gnocchi -WSGIScriptAlias %SCRIPT_NAME% %WSGI% - - WSGIProcessGroup gnocchi - WSGIApplicationGroup %{GLOBAL} - - -WSGISocketPrefix /var/run/%APACHE_NAME% diff --git a/devstack/apache-ported-gnocchi.template b/devstack/apache-ported-gnocchi.template deleted file mode 100644 index fd346b20..00000000 --- a/devstack/apache-ported-gnocchi.template +++ /dev/null @@ -1,23 +0,0 @@ -Listen %GNOCCHI_PORT% - - - WSGIDaemonProcess gnocchi lang='en_US.UTF-8' locale='en_US.UTF-8' user=%USER% display-name=%{GROUP} processes=%APIWORKERS% threads=32 %VIRTUALENV% - WSGIProcessGroup gnocchi - WSGIScriptAlias / %WSGI% - WSGIApplicationGroup %{GLOBAL} - - Options FollowSymLinks - AllowOverride None - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - - ErrorLog /var/log/%APACHE_NAME%/gnocchi.log - CustomLog /var/log/%APACHE_NAME%/gnocchi-access.log combined - - -WSGISocketPrefix /var/run/%APACHE_NAME% diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100644 index 3bf35931..00000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,469 +0,0 @@ -# Gnocchi devstack plugin -# Install and start **Gnocchi** service - -# To enable Gnocchi service, add the following to localrc: -# -# enable_plugin gnocchi https://github.com/gnocchixyz/gnocchi master -# -# This will turn on both gnocchi-api and gnocchi-metricd services. -# If you don't want one of those (you do) you can use the -# disable_service command in local.conf. - -# Dependencies: -# -# - functions -# - ``functions`` -# - ``DEST``, ``STACK_USER`` must be defined -# - ``APACHE_NAME`` for wsgi -# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# - ``SERVICE_HOST`` -# - ``OS_AUTH_URL``, ``KEYSTONE_SERVICE_URI`` for auth in api - -# stack.sh -# --------- -# - install_gnocchi -# - configure_gnocchi -# - init_gnocchi -# - start_gnocchi -# - stop_gnocchi -# - cleanup_gnocchi - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set -o xtrace - - -if [ -z "$GNOCCHI_DEPLOY" ]; then - # Default - GNOCCHI_DEPLOY=simple - - # Fallback to common wsgi devstack configuration - if [ "$ENABLE_HTTPD_MOD_WSGI_SERVICES" == "True" ]; then - GNOCCHI_DEPLOY=mod_wsgi - fi -fi - -# Functions -# --------- - -# Test if any Gnocchi services are enabled -# is_gnocchi_enabled -function is_gnocchi_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"gnocchi-" ]] && return 0 - return 1 -} - -# Test if a Ceph services are enabled -# _is_ceph_enabled -function _is_ceph_enabled { - type is_ceph_enabled_for_service >/dev/null 2>&1 && return 0 - return 1 -} - -# create_gnocchi_accounts() - Set up common required gnocchi accounts - -# Project User Roles -# ------------------------------------------------------------------------- -# $SERVICE_TENANT_NAME gnocchi service -# gnocchi_swift gnocchi_swift ResellerAdmin (if Swift is enabled) -function create_gnocchi_accounts { - # Gnocchi - if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && is_service_enabled gnocchi-api ; then - # At this time, the /etc/openstack/clouds.yaml is available, - # we could leverage that by setting OS_CLOUD - OLD_OS_CLOUD=$OS_CLOUD - export OS_CLOUD='devstack-admin' - - create_service_user "gnocchi" - - local gnocchi_service=$(get_or_create_service "gnocchi" \ - "metric" "OpenStack Metric Service") - get_or_create_endpoint $gnocchi_service \ - "$REGION_NAME" \ - "$(gnocchi_service_url)" \ - "$(gnocchi_service_url)" \ - "$(gnocchi_service_url)" - - if is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then - get_or_create_project "gnocchi_swift" default - local gnocchi_swift_user=$(get_or_create_user "gnocchi_swift" \ - "$SERVICE_PASSWORD" default "gnocchi_swift@example.com") - get_or_add_user_project_role "ResellerAdmin" $gnocchi_swift_user "gnocchi_swift" - fi - - export OS_CLOUD=$OLD_OS_CLOUD - fi -} - -# return the service url for gnocchi -function gnocchi_service_url { - if [[ -n $GNOCCHI_SERVICE_PORT ]]; then - echo "$GNOCCHI_SERVICE_PROTOCOL://$GNOCCHI_SERVICE_HOST:$GNOCCHI_SERVICE_PORT" - else - echo "$GNOCCHI_SERVICE_PROTOCOL://$GNOCCHI_SERVICE_HOST$GNOCCHI_SERVICE_PREFIX" - fi -} - -# install redis -# NOTE(chdent): We shouldn't rely on ceilometer being present so cannot -# use its install_redis. There are enough packages now using redis -# that there should probably be something devstack itself for -# installing it. -function _gnocchi_install_redis { - if is_ubuntu; then - install_package redis-server - restart_service redis-server - else - # This will fail (correctly) where a redis package is unavailable - install_package redis - if is_suse; then - # opensuse intsall multi-instance version of redis - # and admin is expected to install the required conf - cp /etc/redis/default.conf.example /etc/redis/default.conf - restart_service redis@default - else - restart_service redis - fi - fi - - pip_install_gr redis -} - -function _gnocchi_install_grafana { - if is_ubuntu; then - local file=$(mktemp /tmp/grafanapkg-XXXXX) - wget -O "$file" "$GRAFANA_DEB_PKG" - sudo dpkg -i "$file" - rm $file - elif is_fedora; then - sudo yum install "$GRAFANA_RPM_PKG" - fi - sudo -u grafana mkdir -p /var/lib/grafana/plugins - sudo rm -rf /var/lib/grafana/plugins/grafana-gnocchi-datasource - if [ ! "$GRAFANA_PLUGIN_VERSION" ]; then - sudo grafana-cli plugins install gnocchixyz-gnocchi-datasource - elif [ "$GRAFANA_PLUGIN_VERSION" != "git" ]; then - tmpfile=/tmp/gnocchixyz-gnocchi-datasource-${GRAFANA_PLUGIN_VERSION}.tar.gz - wget https://github.com/gnocchixyz/grafana-gnocchi-datasource/releases/download/${GRAFANA_PLUGIN_VERSION}/gnocchixyz-gnocchi-datasource-${GRAFANA_PLUGIN_VERSION}.tar.gz -O $tmpfile - sudo -u grafana tar -xzf $tmpfile -C /var/lib/grafana/plugins - rm -f $file - else - git_clone ${GRAFANA_PLUGINS_REPO} ${GRAFANA_PLUGINS_DIR} - sudo ln -sf ${GRAFANA_PLUGINS_DIR}/dist /var/lib/grafana/plugins/grafana-gnocchi-datasource - # NOTE(sileht): This is long and have chance to fail, thx nodejs/npm - (cd /var/lib/grafana/plugins/grafana-gnocchi-datasource && npm install && ./run-tests.sh) || true - fi - sudo service grafana-server restart -} - -function _cleanup_gnocchi_apache_wsgi { - sudo rm -f $(apache_site_config_for gnocchi) -} - -# _config_gnocchi_apache_wsgi() - Set WSGI config files of Gnocchi -function _config_gnocchi_apache_wsgi { - local gnocchi_apache_conf=$(apache_site_config_for gnocchi) - local venv_path="" - local script_name=$GNOCCHI_SERVICE_PREFIX - - if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["gnocchi"]}/lib/$(python_version)/site-packages" - fi - - # Only run the API on a custom PORT if it has been specifically - # asked for. - if [[ -n $GNOCCHI_SERVICE_PORT ]]; then - sudo cp $GNOCCHI_DIR/devstack/apache-ported-gnocchi.template $gnocchi_apache_conf - sudo sed -e " - s|%GNOCCHI_PORT%|$GNOCCHI_SERVICE_PORT|g; - " -i $gnocchi_apache_conf - else - sudo cp $GNOCCHI_DIR/devstack/apache-gnocchi.template $gnocchi_apache_conf - sudo sed -e " - s|%SCRIPT_NAME%|$script_name|g; - " -i $gnocchi_apache_conf - fi - sudo sed -e " - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%WSGI%|$GNOCCHI_BIN_DIR/gnocchi-api|g; - s|%USER%|$STACK_USER|g - s|%APIWORKERS%|$API_WORKERS|g - s|%VIRTUALENV%|$venv_path|g - " -i $gnocchi_apache_conf -} - - - -# cleanup_gnocchi() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_gnocchi { - if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then - _cleanup_gnocchi_apache_wsgi - fi -} - -# configure_gnocchi() - Set config files, create data dirs, etc -function configure_gnocchi { - [ ! -d $GNOCCHI_DATA_DIR ] && sudo mkdir -m 755 -p $GNOCCHI_DATA_DIR - sudo chown $STACK_USER $GNOCCHI_DATA_DIR - - # Configure logging - iniset $GNOCCHI_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" - iniset $GNOCCHI_CONF metricd metric_processing_delay "$GNOCCHI_METRICD_PROCESSING_DELAY" - - # Set up logging - if [ "$SYSLOG" != "False" ]; then - iniset $GNOCCHI_CONF DEFAULT use_syslog "True" - fi - - # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$GNOCCHI_DEPLOY" != "mod_wsgi" ]; then - setup_colorized_logging $GNOCCHI_CONF DEFAULT - fi - - if [ -n "$GNOCCHI_COORDINATOR_URL" ]; then - iniset $GNOCCHI_CONF DEFAULT coordination_url "$GNOCCHI_COORDINATOR_URL" - fi - - if is_service_enabled gnocchi-statsd ; then - iniset $GNOCCHI_CONF statsd resource_id $GNOCCHI_STATSD_RESOURCE_ID - iniset $GNOCCHI_CONF statsd creator $GNOCCHI_STATSD_CREATOR - fi - - # Configure the storage driver - if _is_ceph_enabled && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then - iniset $GNOCCHI_CONF storage driver ceph - iniset $GNOCCHI_CONF storage ceph_username ${GNOCCHI_CEPH_USER} - iniset $GNOCCHI_CONF storage ceph_secret $(awk '/key/{print $3}' ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring) - elif is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then - iniset $GNOCCHI_CONF storage driver swift - iniset $GNOCCHI_CONF storage swift_user gnocchi_swift - iniset $GNOCCHI_CONF storage swift_key $SERVICE_PASSWORD - iniset $GNOCCHI_CONF storage swift_project_name "gnocchi_swift" - iniset $GNOCCHI_CONF storage swift_auth_version 3 - iniset $GNOCCHI_CONF storage swift_authurl $KEYSTONE_SERVICE_URI_V3 - elif [[ "$GNOCCHI_STORAGE_BACKEND" = 'file' ]] ; then - iniset $GNOCCHI_CONF storage driver file - iniset $GNOCCHI_CONF storage file_basepath $GNOCCHI_DATA_DIR/ - elif [[ "$GNOCCHI_STORAGE_BACKEND" = 'redis' ]] ; then - iniset $GNOCCHI_CONF storage driver redis - iniset $GNOCCHI_CONF storage redis_url $GNOCCHI_REDIS_URL - else - echo "ERROR: could not configure storage driver" - exit 1 - fi - - if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] ; then - # Configure auth token middleware - configure_auth_token_middleware $GNOCCHI_CONF gnocchi $GNOCCHI_AUTH_CACHE_DIR - iniset $GNOCCHI_CONF api auth_mode keystone - if is_service_enabled gnocchi-grafana; then - iniset $GNOCCHI_CONF cors allowed_origin ${GRAFANA_URL} - fi - else - inidelete $GNOCCHI_CONF api auth_mode - fi - - # Configure the indexer database - iniset $GNOCCHI_CONF indexer url `database_connection_url gnocchi` - - if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then - _config_gnocchi_apache_wsgi - elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then - # iniset creates these files when it's called if they don't exist. - GNOCCHI_UWSGI_FILE=$GNOCCHI_CONF_DIR/uwsgi.ini - - rm -f "$GNOCCHI_UWSGI_FILE" - - iniset "$GNOCCHI_UWSGI_FILE" uwsgi http $GNOCCHI_SERVICE_HOST:$GNOCCHI_SERVICE_PORT - iniset "$GNOCCHI_UWSGI_FILE" uwsgi wsgi-file "$GNOCCHI_BIN_DIR/gnocchi-api" - # This is running standalone - iniset "$GNOCCHI_UWSGI_FILE" uwsgi master true - # Set die-on-term & exit-on-reload so that uwsgi shuts down - iniset "$GNOCCHI_UWSGI_FILE" uwsgi die-on-term true - iniset "$GNOCCHI_UWSGI_FILE" uwsgi exit-on-reload true - iniset "$GNOCCHI_UWSGI_FILE" uwsgi threads 32 - iniset "$GNOCCHI_UWSGI_FILE" uwsgi processes $API_WORKERS - iniset "$GNOCCHI_UWSGI_FILE" uwsgi enable-threads true - iniset "$GNOCCHI_UWSGI_FILE" uwsgi plugins python - # uwsgi recommends this to prevent thundering herd on accept. - iniset "$GNOCCHI_UWSGI_FILE" uwsgi thunder-lock true - # Override the default size for headers from the 4k default. - iniset "$GNOCCHI_UWSGI_FILE" uwsgi buffer-size 65535 - # Make sure the client doesn't try to re-use the connection. - iniset "$GNOCCHI_UWSGI_FILE" uwsgi add-header "Connection: close" - # Don't share rados resources and python-requests globals between processes - iniset "$GNOCCHI_UWSGI_FILE" uwsgi lazy-apps true - fi -} - -# configure_keystone_for_gnocchi() - Configure Keystone needs for Gnocchi -function configure_keystone_for_gnocchi { - if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] ; then - if is_service_enabled gnocchi-grafana; then - # NOTE(sileht): keystone configuration have to be set before uwsgi - # is started - iniset $KEYSTONE_CONF cors allowed_origin ${GRAFANA_URL} - fi - fi -} - -# configure_ceph_gnocchi() - gnocchi config needs to come after gnocchi is set up -function configure_ceph_gnocchi { - # Configure gnocchi service options, ceph pool, ceph user and ceph key - sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GNOCCHI_CEPH_POOL} ${GNOCCHI_CEPH_POOL_PG} ${GNOCCHI_CEPH_POOL_PGP} - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GNOCCHI_CEPH_POOL} size ${CEPH_REPLICAS} - if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GNOCCHI_CEPH_POOL} crush_ruleset ${RULE_ID} - - fi - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GNOCCHI_CEPH_USER} mon "allow r" osd "allow rwx pool=${GNOCCHI_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring - sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring -} - - -# init_gnocchi() - Initialize etc. -function init_gnocchi { - # Create cache dir - sudo mkdir -p $GNOCCHI_AUTH_CACHE_DIR - sudo chown $STACK_USER $GNOCCHI_AUTH_CACHE_DIR - rm -f $GNOCCHI_AUTH_CACHE_DIR/* - - if is_service_enabled mysql postgresql; then - recreate_database gnocchi - fi - $GNOCCHI_BIN_DIR/gnocchi-upgrade -} - -function preinstall_gnocchi { - if is_ubuntu; then - # libpq-dev is needed to build psycopg2 - # uuid-runtime is needed to use the uuidgen command - install_package libpq-dev uuid-runtime - else - install_package postgresql-devel - fi - if [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then - install_package cython - install_package librados-dev - fi -} - -# install_gnocchi() - Collect source and prepare -function install_gnocchi { - if [[ "$GNOCCHI_STORAGE_BACKEND" = 'redis' ]] || [[ "${GNOCCHI_COORDINATOR_URL%%:*}" == "redis" ]]; then - _gnocchi_install_redis - fi - - if [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then - pip_install cradox - fi - - if is_service_enabled gnocchi-grafana - then - _gnocchi_install_grafana - fi - - [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && EXTRA_FLAVOR=,keystone - - # We don't use setup_package because we don't follow openstack/requirements - sudo -H pip install -e "$GNOCCHI_DIR"[test,$GNOCCHI_STORAGE_BACKEND,${DATABASE_TYPE}${EXTRA_FLAVOR}] - - if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then - install_apache_wsgi - elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then - pip_install uwsgi - fi - - # Create configuration directory - [ ! -d $GNOCCHI_CONF_DIR ] && sudo mkdir -m 755 -p $GNOCCHI_CONF_DIR - sudo chown $STACK_USER $GNOCCHI_CONF_DIR -} - -# start_gnocchi() - Start running processes, including screen -function start_gnocchi { - - if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then - enable_apache_site gnocchi - restart_apache_server - if [[ -n $GNOCCHI_SERVICE_PORT ]]; then - tail_log gnocchi /var/log/$APACHE_NAME/gnocchi.log - tail_log gnocchi-api /var/log/$APACHE_NAME/gnocchi-access.log - else - # NOTE(chdent): At the moment this is very noisy as it - # will tail the entire apache logs, not just the gnocchi - # parts. If you don't like this either USE_SCREEN=False - # or set GNOCCHI_SERVICE_PORT. - tail_log gnocchi /var/log/$APACHE_NAME/error[_\.]log - tail_log gnocchi-api /var/log/$APACHE_NAME/access[_\.]log - fi - elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then - run_process gnocchi-api "$GNOCCHI_BIN_DIR/uwsgi $GNOCCHI_UWSGI_FILE" - else - run_process gnocchi-api "$GNOCCHI_BIN_DIR/gnocchi-api --port $GNOCCHI_SERVICE_PORT" - fi - # only die on API if it was actually intended to be turned on - if is_service_enabled gnocchi-api; then - - echo "Waiting for gnocchi-api to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl -v --max-time 5 --noproxy '*' -s $(gnocchi_service_url)/v1/resource/generic ; do sleep 1; done"; then - die $LINENO "gnocchi-api did not start" - fi - fi - - # run metricd last so we are properly waiting for swift and friends - run_process gnocchi-metricd "$GNOCCHI_BIN_DIR/gnocchi-metricd -d --config-file $GNOCCHI_CONF" - run_process gnocchi-statsd "$GNOCCHI_BIN_DIR/gnocchi-statsd -d --config-file $GNOCCHI_CONF" -} - -# stop_gnocchi() - Stop running processes -function stop_gnocchi { - if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then - disable_apache_site gnocchi - restart_apache_server - fi - # Kill the gnocchi screen windows - for serv in gnocchi-api gnocchi-metricd gnocchi-statsd; do - stop_process $serv - done -} - -if is_service_enabled gnocchi-api; then - if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then - echo_summary "Configuring system services for Gnocchi" - preinstall_gnocchi - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Gnocchi" - stack_install_service gnocchi - configure_keystone_for_gnocchi - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Gnocchi" - if _is_ceph_enabled && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then - echo_summary "Configuring Gnocchi for Ceph" - configure_ceph_gnocchi - fi - configure_gnocchi - create_gnocchi_accounts - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing Gnocchi" - init_gnocchi - start_gnocchi - fi - - if [[ "$1" == "unstack" ]]; then - echo_summary "Stopping Gnocchi" - stop_gnocchi - fi - - if [[ "$1" == "clean" ]]; then - cleanup_gnocchi - fi -fi - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index d7033b40..00000000 --- a/devstack/settings +++ /dev/null @@ -1,64 +0,0 @@ -enable_service gnocchi-api -enable_service gnocchi-metricd -enable_service gnocchi-statsd - -# Set up default directories -GNOCCHI_DIR=$DEST/gnocchi -GNOCCHI_CONF_DIR=/etc/gnocchi -GNOCCHI_CONF=$GNOCCHI_CONF_DIR/gnocchi.conf -GNOCCHI_LOG_DIR=/var/log/gnocchi -GNOCCHI_AUTH_CACHE_DIR=${GNOCCHI_AUTH_CACHE_DIR:-/var/cache/gnocchi} -GNOCCHI_WSGI_DIR=${GNOCCHI_WSGI_DIR:-/var/www/gnocchi} -GNOCCHI_DATA_DIR=${GNOCCHI_DATA_DIR:-${DATA_DIR}/gnocchi} -GNOCCHI_COORDINATOR_URL=${GNOCCHI_COORDINATOR_URL:-redis://localhost:6379} -GNOCCHI_METRICD_PROCESSING_DELAY=${GNOCCHI_METRICD_PROCESSING_DELAY:-5} - -# GNOCCHI_DEPLOY defines how Gnocchi is deployed, allowed values: -# - mod_wsgi : Run Gnocchi under Apache HTTPd mod_wsgi -# - simple : Run gnocchi-api -# - uwsgi : Run Gnocchi under uwsgi -# - : Fallback to ENABLE_HTTPD_MOD_WSGI_SERVICES -GNOCCHI_DEPLOY=${GNOCCHI_DEPLOY} - -# Toggle for deploying Gnocchi with/without Keystone -GNOCCHI_USE_KEYSTONE=$(trueorfalse True GNOCCHI_USE_KEYSTONE) - -# Support potential entry-points console scripts and venvs -if [[ ${USE_VENV} = True ]]; then - PROJECT_VENV["gnocchi"]=${GNOCCHI_DIR}.venv - GNOCCHI_BIN_DIR=${PROJECT_VENV["gnocchi"]}/bin -else - GNOCCHI_BIN_DIR=$(get_python_exec_prefix) -fi - - -# Gnocchi connection info. -GNOCCHI_SERVICE_PROTOCOL=http -# NOTE(chdent): If you are not using mod wsgi you need to set port! -GNOCCHI_SERVICE_PORT=${GNOCCHI_SERVICE_PORT:-8041} -GNOCCHI_SERVICE_PREFIX=${GNOCCHI_SERVICE_PREFIX:-'/metric'} -GNOCCHI_SERVICE_HOST=${GNOCCHI_SERVICE_HOST:-${SERVICE_HOST}} - -# Gnocchi statsd info -GNOCCHI_STATSD_RESOURCE_ID=${GNOCCHI_STATSD_RESOURCE_ID:-$(uuidgen)} -GNOCCHI_STATSD_CREATOR=${GNOCCHI_STATSD_CREATOR:-admin} - -# Ceph gnocchi info -GNOCCHI_CEPH_USER=${GNOCCHI_CEPH_USER:-gnocchi} -GNOCCHI_CEPH_POOL=${GNOCCHI_CEPH_POOL:-gnocchi} -GNOCCHI_CEPH_POOL_PG=${GNOCCHI_CEPH_POOL_PG:-8} -GNOCCHI_CEPH_POOL_PGP=${GNOCCHI_CEPH_POOL_PGP:-8} - -# Redis gnocchi info -GNOCCHI_REDIS_URL=${GNOCCHI_REDIS_URL:-redis://localhost:6379} - -# Gnocchi backend -GNOCCHI_STORAGE_BACKEND=${GNOCCHI_STORAGE_BACKEND:-redis} - -# Grafana settings -GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.3.1-1.x86_64.rpm} -GRAFANA_DEB_PKG=${GRAFANA_DEB_PKG:-https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.3.1_amd64.deb} -GRAFANA_PLUGIN_VERSION=${GRAFANA_PLUGIN_VERSION} -GRAFANA_PLUGINS_DIR=${GRAFANA_PLUGINS_DIR:-$DEST/grafana-gnocchi-datasource} -GRAFANA_PLUGINS_REPO=${GRAFANA_PLUGINS_REPO:-http://github.com/gnocchixyz/grafana-gnocchi-datasource.git} -GRAFANA_URL=${GRAFANA_URL:-http://$HOST_IP:3000} diff --git a/doc/source/install.rst b/doc/source/install.rst index 7c8e2f69..9b63715a 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -205,27 +205,6 @@ to build a Docker image containing Gnocchi latest version (fetched from PyPI). .. _gnocchi-openshift repository: https://github.com/gnocchixyz/gnocchi-openshift -Installation Using Devstack -=========================== - -To enable Gnocchi in `devstack`_, add the following to local.conf: - -:: - - enable_plugin gnocchi https://github.com/gnocchixyz/gnocchi master - -To enable Grafana support in devstack, you can also enable `gnocchi-grafana`:: - - enable_service gnocchi-grafana - -Then, you can start devstack: - -:: - - ./stack.sh - -.. _devstack: http://devstack.org - Gnocchi Configuration sample ============================ diff --git a/tox.ini b/tox.ini index 8e14e49e..3a678bd4 100644 --- a/tox.ini +++ b/tox.ini @@ -111,10 +111,7 @@ commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE [testenv:pep8] deps = hacking>=0.12,<0.13 - bashate -whitelist_externals = bash commands = flake8 - bashate -v devstack/plugin.sh [testenv:py27-cover] commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py testr --coverage --testr-args="{posargs}" -- GitLab From fd1fff5cee3317f570b6a15d938cc2c49df60d48 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 15 Feb 2018 17:02:51 +0100 Subject: [PATCH 1247/1483] rest: stop using set to validate archive policies voluptuous does not know how to use set and now wants to check that the default value validates against the validators. --- .travis.yml | 17 ++++++++++++++++- gnocchi/archive_policy.py | 2 +- gnocchi/rest/api.py | 12 ++++++------ 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/.travis.yml b/.travis.yml index eaaa210e..79b10316 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,8 +25,13 @@ env: - TARGET: py35-postgresql before_script: - # Travis We need to fetch all tags/branches for documentation target + # NOTE(sileht): We need to fetch all tags/branches for documentation. + # For the multiversioning, we change all remotes refs to point to + # the pull request checkout. So the "master" branch will be the PR sha and not + # real "master" branch. This ensures the doc build use the PR code for initial + # doc setup. - if \[ "$TRAVIS_PULL_REQUEST" != "false" -o -n "$TRAVIS_TAG" \]; then + set -x; case $TARGET in docs*) git config --get-all remote.origin.fetch; @@ -36,6 +41,16 @@ before_script: git fetch --unshallow --tags; ;; esac ; + case $TARGET in + docs-gnocchi.xyz) + git branch -a | sed -n "/\/HEAD /d; /\/master$/d; s,remotes/origin/,,p;" | xargs -i git branch {} origin/{} ; + git branch -D master; + git checkout -b master; + git remote set-url origin file:///home/tester/src; + git ls-remote --heads --tags | grep heads; + ;; + esac ; + set +x; fi install: - if \[ "$TRAVIS_PULL_REQUEST" != "false" -o -n "$TRAVIS_TAG" \]; then diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py index 51cb1227..2ace6994 100644 --- a/gnocchi/archive_policy.py +++ b/gnocchi/archive_policy.py @@ -88,7 +88,7 @@ class ArchivePolicy(object): if aggregation_methods is None: self.aggregation_methods = self.DEFAULT_AGGREGATION_METHODS else: - self.aggregation_methods = aggregation_methods + self.aggregation_methods = set(aggregation_methods) def get_aggregation(self, method, granularity): # Find the timespan diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index cd19b289..aa2d2358 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -222,10 +222,10 @@ def strtobool(varname, v): abort(400, "Unable to parse `%s': %s" % (varname, six.text_type(e))) -RESOURCE_DEFAULT_PAGINATION = ['revision_start:asc', - 'started_at:asc'] +RESOURCE_DEFAULT_PAGINATION = [u'revision_start:asc', + u'started_at:asc'] -METRIC_DEFAULT_PAGINATION = ['id:asc'] +METRIC_DEFAULT_PAGINATION = [u'id:asc'] def get_pagination_options(params, default): @@ -324,7 +324,7 @@ class ArchivePoliciesController(rest.RestController): enforce("create archive policy", {}) # NOTE(jd): Initialize this one at run-time because we rely on conf conf = pecan.request.conf - valid_agg_methods = ( + valid_agg_methods = list( archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS_VALUES ) ArchivePolicySchema = voluptuous.Schema({ @@ -335,8 +335,8 @@ class ArchivePoliciesController(rest.RestController): ), voluptuous.Required( "aggregation_methods", - default=set(conf.archive_policy.default_aggregation_methods)): - voluptuous.All(list(valid_agg_methods), voluptuous.Coerce(set)), + default=list(conf.archive_policy.default_aggregation_methods)): + valid_agg_methods, voluptuous.Required("definition"): ArchivePolicyDefinitionSchema, }) -- GitLab From 09265400012e3e466bd4d8694f8d49b7b0d3bfd4 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 19 Feb 2018 10:23:19 +0100 Subject: [PATCH 1248/1483] Now packaging 4.2.0 --- debian/changelog | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index 5402fe23..9aaa1a41 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,12 @@ -gnocchi (4.0.4-2) UNRELEASED; urgency=medium +gnocchi (4.2.0-1) UNRELEASED; urgency=medium + [ Ondřej Nový ] * d/control: Set Vcs-* to salsa.debian.org - -- Ondřej Nový Mon, 12 Feb 2018 10:34:51 +0100 + [ Thomas Goirand ] + * New upstream release. + + -- Thomas Goirand Mon, 19 Feb 2018 10:23:06 +0100 gnocchi (4.0.4-1) unstable; urgency=medium -- GitLab From a9be03ffb509fae70dd7896bee2bc3c35d783cf5 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 19 Feb 2018 10:34:14 +0100 Subject: [PATCH 1249/1483] Fixed (build-)depends for this release. --- debian/changelog | 1 + debian/control | 28 ++++++++++++++++------------ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/debian/changelog b/debian/changelog index 9aaa1a41..a1e87c71 100644 --- a/debian/changelog +++ b/debian/changelog @@ -5,6 +5,7 @@ gnocchi (4.2.0-1) UNRELEASED; urgency=medium [ Thomas Goirand ] * New upstream release. + * Fixed (build-)depends for this release. -- Thomas Goirand Mon, 19 Feb 2018 10:23:06 +0100 diff --git a/debian/control b/debian/control index 376e4e02..2786ef8e 100644 --- a/debian/control +++ b/debian/control @@ -7,7 +7,7 @@ Uploaders: Build-Depends: debhelper (>= 10), dh-python, - openstack-pkg-tools (>= 54~), + openstack-pkg-tools (>= 66~), python-all, python-pbr, python-setuptools, @@ -22,17 +22,18 @@ Build-Depends-Indep: postgresql-server-dev-all, python3-boto3, python3-botocore (>= 1.5), + python3-cachetools, python3-cotyledon (>= 1.5.0), python3-coverage (>= 3.6), python3-daiquiri, python3-doc8, python3-fixtures, python3-future (>= 0.15), - python3-gabbi (>= 1.30), + python3-gabbi (>= 1.37.0), python3-iso8601, python3-jsonpatch, python3-keystoneclient (>= 1:1.6.0), - python3-keystonemiddleware (>= 4.0.0), + python3-keystonemiddleware (>= 4.19.0), python3-lz4 (>= 0.9.0), python3-mock, python3-monotonic, @@ -40,19 +41,20 @@ Build-Depends-Indep: python3-numpy, python3-os-testr, python3-oslo.config (>= 1:3.22.0), - python3-oslo.db (>= 4.8.0), + python3-oslo.db (>= 4.29.0), python3-oslo.middleware (>= 3.22.0), python3-oslo.policy, - python3-pandas, python3-paste, python3-pastedeploy, python3-pecan, python3-prettytable, + python3-protobuf, python3-psycopg2, python3-pymysql, + python3-pyparsing (>= 2.2.0), python3-redis, - python3-scipy, python3-six, + python3-snappy, python3-sphinx-bootstrap-theme, python3-sphinx-rtd-theme, python3-sphinxcontrib.httpdomain, @@ -61,7 +63,7 @@ Build-Depends-Indep: python3-stevedore, python3-swiftclient (>= 3.1.0), python3-sysv-ipc, - python3-tenacity (>= 3.1.0), + python3-tenacity (>= 4.6.0), python3-testresources, python3-testscenarios, python3-testtools (>= 0.9.38), @@ -136,6 +138,7 @@ Section: python Architecture: all Depends: alembic, + python3-cachetools, python3-boto3, python3-botocore (>= 1.5), python3-cotyledon (>= 1.5.0), @@ -144,31 +147,32 @@ Depends: python3-iso8601, python3-jsonpatch, python3-keystoneclient (>= 1:1.6.0), - python3-keystonemiddleware (>= 4.0.0), + python3-keystonemiddleware (>= 4.19.0), python3-lz4 (>= 0.9.0), python3-monotonic, python3-numpy, python3-oslo.config (>= 1:3.22.0), - python3-oslo.db (>= 4.8.0), + python3-oslo.db (>= 4.29.0), python3-oslo.middleware (>= 3.22.0), python3-oslo.policy, python3-oslosphinx (>= 2.2.0.0), - python3-pandas, python3-paste, python3-pastedeploy, python3-pbr, python3-pecan, python3-prettytable, + python3-protobuf, python3-psycopg2, python3-pymysql, + python3-pyparsing (>= 2.2.0), python3-redis, - python3-scipy, python3-six, + python3-snappy, python3-sqlalchemy, python3-sqlalchemy-utils (>= 0.32.14), python3-stevedore, python3-swiftclient (>= 3.1.0), - python3-tenacity (>= 3.1.0), + python3-tenacity (>= 4.6.0), python3-tooz (>= 1.38), python3-ujson, python3-voluptuous, -- GitLab From 63fe5623d9b13e563e33a267fbc749fc3f6a851e Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 19 Feb 2018 10:34:36 +0100 Subject: [PATCH 1250/1483] Standards-Version: 4.1.3 --- debian/changelog | 1 + debian/control | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index a1e87c71..d093d0e9 100644 --- a/debian/changelog +++ b/debian/changelog @@ -6,6 +6,7 @@ gnocchi (4.2.0-1) UNRELEASED; urgency=medium [ Thomas Goirand ] * New upstream release. * Fixed (build-)depends for this release. + * Standards-Version is now 4.1.3. -- Thomas Goirand Mon, 19 Feb 2018 10:23:06 +0100 diff --git a/debian/control b/debian/control index 2786ef8e..82ff3ecb 100644 --- a/debian/control +++ b/debian/control @@ -77,7 +77,7 @@ Build-Depends-Indep: python3-yaml, subunit (>= 0.0.18), testrepository, -Standards-Version: 4.1.1 +Standards-Version: 4.1.3 Vcs-Browser: https://salsa.debian.org/openstack-team/services/gnocchi Vcs-Git: https://salsa.debian.org/openstack-team/services/gnocchi.git Homepage: https://github.com/openstack/gnocchi -- GitLab From 97dae5ae36e075b840fbaf9039157bd2d23feb3a Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 19 Feb 2018 10:37:09 +0100 Subject: [PATCH 1251/1483] Add patch for Py3 compat, courtesy of James Page from Canonical. --- debian/changelog | 1 + debian/patches/py3-compat.patch | 42 +++++++++++++++++++++++++++++++++ debian/patches/series | 1 + 3 files changed, 44 insertions(+) create mode 100644 debian/patches/py3-compat.patch create mode 100644 debian/patches/series diff --git a/debian/changelog b/debian/changelog index d093d0e9..9adb4f91 100644 --- a/debian/changelog +++ b/debian/changelog @@ -7,6 +7,7 @@ gnocchi (4.2.0-1) UNRELEASED; urgency=medium * New upstream release. * Fixed (build-)depends for this release. * Standards-Version is now 4.1.3. + * Add patch for Py3 compat, courtesy of James Page from Canonical. -- Thomas Goirand Mon, 19 Feb 2018 10:23:06 +0100 diff --git a/debian/patches/py3-compat.patch b/debian/patches/py3-compat.patch new file mode 100644 index 00000000..bf6141e0 --- /dev/null +++ b/debian/patches/py3-compat.patch @@ -0,0 +1,42 @@ +Description: Ensure member_id is correctly encoded +Author: James Page +Forwarded: no + +Index: gnocchi/gnocchi/cli/metricd.py +=================================================================== +--- gnocchi.orig/gnocchi/cli/metricd.py ++++ gnocchi/gnocchi/cli/metricd.py +@@ -67,12 +67,14 @@ class MetricProcessBase(cotyledon.Servic + self._wake_up.set() + + def _configure(self): +- member_id = "%s.%s.%s" % (socket.gethostname(), +- self.worker_id, +- # NOTE(jd) Still use a uuid here so we're +- # sure there's no conflict in case of +- # crash/restart +- str(uuid.uuid4())) ++ member_id = str.encode( ++ "%s.%s.%s" % (socket.gethostname(), ++ self.worker_id, ++ # NOTE(jd) Still use a uuid here so we're ++ # sure there's no conflict in case of ++ # crash/restart ++ str(uuid.uuid4())) ++ ) + self.coord = get_coordinator_and_start(member_id, + self.conf.coordination_url) + self.store = storage.get_driver(self.conf, self.coord) +Index: gnocchi/gnocchi/rest/app.py +=================================================================== +--- gnocchi.orig/gnocchi/rest/app.py ++++ gnocchi/gnocchi/rest/app.py +@@ -93,7 +93,7 @@ class GnocchiHook(pecan.hooks.PecanHook) + # entirely. + self.backends[name] = ( + metricd.get_coordinator_and_start( +- str(uuid.uuid4()), ++ str.encode(str(uuid.uuid4())), + self.conf.coordination_url) + ) + elif name == "storage": diff --git a/debian/patches/series b/debian/patches/series new file mode 100644 index 00000000..aff3d2ae --- /dev/null +++ b/debian/patches/series @@ -0,0 +1 @@ +py3-compat.patch -- GitLab From 16047440c8a54e00b63302f1c4e9d02427ab3723 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 19 Feb 2018 09:57:07 +0000 Subject: [PATCH 1252/1483] Fixed keystonemiddleware version --- debian/control | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/control b/debian/control index 82ff3ecb..d08cc1c9 100644 --- a/debian/control +++ b/debian/control @@ -33,7 +33,7 @@ Build-Depends-Indep: python3-iso8601, python3-jsonpatch, python3-keystoneclient (>= 1:1.6.0), - python3-keystonemiddleware (>= 4.19.0), + python3-keystonemiddleware (>= 4.0.0), python3-lz4 (>= 0.9.0), python3-mock, python3-monotonic, @@ -147,7 +147,7 @@ Depends: python3-iso8601, python3-jsonpatch, python3-keystoneclient (>= 1:1.6.0), - python3-keystonemiddleware (>= 4.19.0), + python3-keystonemiddleware (>= 4.0.0), python3-lz4 (>= 0.9.0), python3-monotonic, python3-numpy, -- GitLab From 2eb485228685dacb62bee09abcee9679e9b1414a Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 19 Feb 2018 10:32:23 +0000 Subject: [PATCH 1253/1483] Fixed diff with upstream tag. --- .travis.yml | 1 + gnocchi/cli/metricd.py | 7 ------- gnocchi/tests/functional/gabbits/metric-list.yaml | 2 +- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/.travis.yml b/.travis.yml index 1a42176d..84f554f7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,6 +10,7 @@ cache: env: - TARGET: pep8 - TARGET: docs + - TARGET: docs-gnocchi.xyz - TARGET: py27-mysql-ceph-upgrade-from-3.1 - TARGET: py35-postgresql-file-upgrade-from-3.1 diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index a25d8e80..36da079a 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -46,13 +46,6 @@ def get_coordinator_and_start(member_id, url): return coord -# Retry with exponential backoff for up to 1 minute -_wait_exponential = tenacity.wait_exponential(multiplier=0.5, max=60) - - -retry_on_exception = tenacity.Retrying(wait=_wait_exponential).call - - class MetricProcessBase(cotyledon.Service): def __init__(self, worker_id, conf, interval_delay=0): super(MetricProcessBase, self).__init__(worker_id) diff --git a/gnocchi/tests/functional/gabbits/metric-list.yaml b/gnocchi/tests/functional/gabbits/metric-list.yaml index 347e3c87..f71b2d10 100644 --- a/gnocchi/tests/functional/gabbits/metric-list.yaml +++ b/gnocchi/tests/functional/gabbits/metric-list.yaml @@ -89,7 +89,7 @@ tests: - name: list metrics GET: /v1/metric response_json_paths: - $.`len`: 2 + $.`len`: 4 - name: list metrics by id GET: /v1/metric?id=$HISTORY['create metric 1'].$RESPONSE['id'] -- GitLab From 8086999f8ee8327820695c8426b667aa0da7dd69 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 19 Feb 2018 10:33:26 +0000 Subject: [PATCH 1254/1483] Add missing python3-pytimeparse (b-)d --- debian/control | 2 ++ 1 file changed, 2 insertions(+) diff --git a/debian/control b/debian/control index d08cc1c9..69e67a65 100644 --- a/debian/control +++ b/debian/control @@ -52,6 +52,7 @@ Build-Depends-Indep: python3-psycopg2, python3-pymysql, python3-pyparsing (>= 2.2.0), + python3-pytimeparse, python3-redis, python3-six, python3-snappy, @@ -165,6 +166,7 @@ Depends: python3-psycopg2, python3-pymysql, python3-pyparsing (>= 2.2.0), + python3-pytimeparse, python3-redis, python3-six, python3-snappy, -- GitLab From d255f96f32da7db550e8a36dc7f45a003b60fdff Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 19 Feb 2018 10:38:26 +0000 Subject: [PATCH 1255/1483] * Blacklist test_gnocchi_config_generator_run(), we don't care about it anyway, as we're using oslo-config-generator. --- debian/changelog | 2 ++ debian/rules | 3 +-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index 9adb4f91..4b5390c2 100644 --- a/debian/changelog +++ b/debian/changelog @@ -8,6 +8,8 @@ gnocchi (4.2.0-1) UNRELEASED; urgency=medium * Fixed (build-)depends for this release. * Standards-Version is now 4.1.3. * Add patch for Py3 compat, courtesy of James Page from Canonical. + * Blacklist test_gnocchi_config_generator_run(), we don't care about it + anyway, as we're using oslo-config-generator. -- Thomas Goirand Mon, 19 Feb 2018 10:23:06 +0100 diff --git a/debian/rules b/debian/rules index 75830e05..58dda945 100755 --- a/debian/rules +++ b/debian/rules @@ -3,8 +3,7 @@ UPSTREAM_GIT:=https://github.com/gnocchixyz/gnocchi include /usr/share/openstack-pkg-tools/pkgos.make -UNIT_TEST_BLACKLIST = test_carbonara.CarbonaraCmd.* - +UNIT_TEST_BLACKLIST = test_carbonara.CarbonaraCmd.*|.*test_bin\.BinTestCase\.test_gnocchi_config_generator_run.* %: dh $@ --buildsystem=python_distutils --with python3,sphinxdoc -- GitLab From f4345225719cb8e4d14380ecd4a6437653dca05c Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 19 Feb 2018 10:49:10 +0000 Subject: [PATCH 1256/1483] Fixed wsgi install. --- debian/gnocchi-common.install | 1 - 1 file changed, 1 deletion(-) diff --git a/debian/gnocchi-common.install b/debian/gnocchi-common.install index a74fd66d..dd8f549d 100644 --- a/debian/gnocchi-common.install +++ b/debian/gnocchi-common.install @@ -1,3 +1,2 @@ gnocchi/rest/api-paste.ini /usr/share/gnocchi-common -gnocchi/rest/app.wsgi /usr/share/gnocchi-common gnocchi/rest/policy.json /usr/share/gnocchi-common -- GitLab From a155db68bc6368590d571157f1624e814fbb85e2 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 19 Feb 2018 10:55:28 +0000 Subject: [PATCH 1257/1483] dh_python3 --shebang=/usr/bin/python3 --- debian/rules | 3 +++ 1 file changed, 3 insertions(+) diff --git a/debian/rules b/debian/rules index 58dda945..78f8788f 100755 --- a/debian/rules +++ b/debian/rules @@ -82,6 +82,9 @@ override_dh_clean: dh_clean -O--buildsystem=python_distutils rm -rf build debian/gnocchi-common.postinst debian/gnocchi-common.config debian/gnocchi-api.config debian/gnocchi-api.postinst +override_dh_python3: + dh_python3 --shebang=/usr/bin/python3 + # Commands not to run override_dh_installcatalogs: override_dh_installemacsen override_dh_installifupdown: -- GitLab From e9f49610fe9c481701c6f6461bf7b7e8cd135a66 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 19 Feb 2018 11:07:50 +0000 Subject: [PATCH 1258/1483] Fixed uwsgi params. --- debian/changelog | 1 + debian/gnocchi-api.init.in | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 4b5390c2..846859e3 100644 --- a/debian/changelog +++ b/debian/changelog @@ -10,6 +10,7 @@ gnocchi (4.2.0-1) UNRELEASED; urgency=medium * Add patch for Py3 compat, courtesy of James Page from Canonical. * Blacklist test_gnocchi_config_generator_run(), we don't care about it anyway, as we're using oslo-config-generator. + * Fixed uwsgi params. -- Thomas Goirand Mon, 19 Feb 2018 10:23:06 +0100 diff --git a/debian/gnocchi-api.init.in b/debian/gnocchi-api.init.in index 4599f2df..4998c4c5 100644 --- a/debian/gnocchi-api.init.in +++ b/debian/gnocchi-api.init.in @@ -16,7 +16,7 @@ DESC="OpenStack Gnocchi Api" PROJECT_NAME=gnocchi NAME=${PROJECT_NAME}-api DAEMON=/usr/bin/uwsgi_python3 -DAEMON_ARGS="--master --paste-logger --processes 4 --die-on-term --logto /var/log/gnocchi/gnocchi-api.log --stats localhost:9315 --http-socket :8041 --wsgi-file /usr/bin/gnocchi-api" +DAEMON_ARGS="--master --enable-threads --thunder-lock --die-on-term --threads 4 --lazy-apps --wsgi gnocchi.rest.wsgi --paste-logger --processes 4 --die-on-term --logto /var/log/gnocchi/gnocchi-api.log --http-socket :8041" NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG=yes NO_OPENSTACK_LOGFILE_DAEMON_ARG=yes USE_SYSLOG=no -- GitLab From e67116e1efb237e477cb983de56f3d662b710eb4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 9 Feb 2018 13:42:56 +0100 Subject: [PATCH 1259/1483] storage: replace _store_metric_measures with _store_metric_splits This new method allow to store several splits at once for a metric. --- gnocchi/storage/__init__.py | 17 +++++++++++++---- gnocchi/storage/ceph.py | 17 +++++++++-------- gnocchi/storage/file.py | 13 +++++++------ gnocchi/storage/redis.py | 13 ++++++++----- gnocchi/storage/s3.py | 15 ++++++++------- gnocchi/storage/swift.py | 13 +++++++------ gnocchi/tests/test_storage.py | 17 +++++++++-------- 7 files changed, 61 insertions(+), 44 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index e94022b3..ef6e9ad8 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -160,8 +160,17 @@ class StorageDriver(object): raise NotImplementedError @staticmethod - def _store_metric_measures(metric, timestamp_key, aggregation, - data, offset=None, version=3): + def _store_metric_splits(metric, keys_and_data_and_offset, aggregation, + version=3): + """Store metric split. + + Store a bunch of splits for a metric. + + :param metric: The metric to store for + :param keys_and_data_and_offset: A list of (key, data, offset) tuples + :param aggregation: The aggregation method concerned + :param version: Storage engine format version. + """ raise NotImplementedError def _list_split_keys_for_metric(self, metric, aggregation, granularity, @@ -308,8 +317,8 @@ class StorageDriver(object): offset, data = split.serialize(key, compressed=write_full) - return self._store_metric_measures(metric, key, aggregation, - data, offset=offset) + return self._store_metric_splits( + metric, [(key, data, offset)], aggregation) def _add_measures(self, aggregation, ap_def, metric, grouped_serie, previous_oldest_mutable_timestamp, diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 133ebe7e..8897207c 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -77,15 +77,16 @@ class CephStorage(storage.StorageDriver): else: self.ioctx.write_full(name, b"") - def _store_metric_measures(self, metric, key, aggregation, - data, offset=None, version=3): - name = self._get_object_name(metric, key, aggregation, version) - if offset is None: - self.ioctx.write_full(name, data) - else: - self.ioctx.write(name, data, offset=offset) + def _store_metric_splits(self, metric, keys_and_data_and_offset, + aggregation, version=3): with rados.WriteOpCtx() as op: - self.ioctx.set_omap(op, (name,), (b"",)) + for key, data, offset in keys_and_data_and_offset: + name = self._get_object_name(metric, key, aggregation, version) + if offset is None: + self.ioctx.write_full(name, data) + else: + self.ioctx.write(name, data, offset=offset) + self.ioctx.set_omap(op, (name,), (b"",)) self.ioctx.operate_write_op( op, self._build_unaggregated_timeserie_path(metric, 3)) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 9074e02b..7605edb3 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -124,12 +124,13 @@ class FileStorage(storage.StorageDriver): os.unlink(self._build_metric_path_for_split( metric, aggregation, key, version)) - def _store_metric_measures(self, metric, key, aggregation, - data, offset=None, version=3): - self._atomic_file_store( - self._build_metric_path_for_split( - metric, aggregation, key, version), - data) + def _store_metric_splits(self, metric, keys_and_data_and_offset, + aggregation, version=3): + for key, data, offset in keys_and_data_and_offset: + self._atomic_file_store( + self._build_metric_path_for_split( + metric, aggregation, key, version), + data) def _delete_metric(self, metric): path = self._build_metric_dir(metric) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 6b19e666..2475f29f 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -115,11 +115,14 @@ return {0, final} field = self._aggregated_field_for_split(aggregation, key, version) self._client.hdel(self._metric_key(metric), field) - def _store_metric_measures(self, metric, key, aggregation, - data, offset=None, version=3): - field = self._aggregated_field_for_split( - aggregation, key, version) - self._client.hset(self._metric_key(metric), field, data) + def _store_metric_splits(self, metric, keys_and_data_and_offset, + aggregation, version=3): + pipe = self._client.pipeline(transaction=False) + metric_key = self._metric_key(metric) + for key, data, offset in keys_and_data_and_offset: + key = self._aggregated_field_for_split(aggregation, key, version) + pipe.hset(metric_key, key, data) + pipe.execute() def _delete_metric(self, metric): self._client.delete(self._metric_key(metric)) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 1063bf7b..528c0b7e 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -122,13 +122,14 @@ class S3Storage(storage.StorageDriver): wait=self._consistency_wait, stop=self._consistency_stop)(_head) - def _store_metric_measures(self, metric, key, aggregation, - data, offset=0, version=3): - self._put_object_safe( - Bucket=self._bucket_name, - Key=self._prefix(metric) + self._object_name( - key, aggregation, version), - Body=data) + def _store_metric_splits(self, metric, keys_and_data_and_offset, + aggregation, version=3): + for key, data, offset in keys_and_data_and_offset: + self._put_object_safe( + Bucket=self._bucket_name, + Key=self._prefix(metric) + self._object_name( + key, aggregation, version), + Body=data) def _delete_metric_measures(self, metric, key, aggregation, version=3): diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 7dc68225..8ab9a81c 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -112,12 +112,13 @@ class SwiftStorage(storage.StorageDriver): if resp['status'] == 204: raise storage.MetricAlreadyExists(metric) - def _store_metric_measures(self, metric, key, aggregation, - data, offset=None, version=3): - self.swift.put_object( - self._container_name(metric), - self._object_name(key, aggregation, version), - data) + def _store_metric_splits(self, metric, keys_and_data_and_offset, + aggregation, version=3): + for key, data, offset in keys_and_data_and_offset: + self.swift.put_object( + self._container_name(metric), + self._object_name(key, aggregation, version), + data) def _delete_metric_measures(self, metric, key, aggregation, version=3): self.swift.delete_object( diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index d5429f1e..2353d90c 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -212,7 +212,7 @@ class TestStorageDriver(tests_base.TestCase): self.incoming.add_measures(m.id, [ incoming.Measure(datetime64(2014, 1, 6, 1, 58, 1), 100)]) - with mock.patch.object(self.storage, '_store_metric_measures') as c: + with mock.patch.object(self.storage, '_store_metric_splits') as c: # should only resample last aggregate self.trigger_processing([str(m.id)]) count = 0 @@ -221,7 +221,7 @@ class TestStorageDriver(tests_base.TestCase): args = call[1] if (args[0] == m_sql and args[2] == 'mean' - and args[1].sampling == numpy.timedelta64(1, 'm')): + and args[1][0][0].sampling == numpy.timedelta64(1, 'm')): count += 1 self.assertEqual(1, count) @@ -689,12 +689,13 @@ class TestStorageDriver(tests_base.TestCase): ]}, self.storage.get_measures(self.metric, [aggregation])) # Test what happens if we write garbage - self.storage._store_metric_measures( - self.metric, carbonara.SplitKey( - numpy.datetime64(1451952000, 's'), - numpy.timedelta64(1, 'm'), - ), "mean", - b"oh really?") + self.storage._store_metric_splits( + self.metric, [ + (carbonara.SplitKey( + numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm')), + b"oh really?", None) + ], "mean") # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move -- GitLab From 657f041e6841277efa6b1195b90b5942edfc848d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 8 Feb 2018 16:43:51 +0100 Subject: [PATCH 1260/1483] storage: replace _get_unaggregated_timeserie by _get_or_create_unaggregated_timeseries MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This replaces this method in each storage driver by a new one that can retrieve several unaggregated timeseries at the same time. The storage engine does not leverage it and only passes a list of one metric… for now. --- gnocchi/storage/__init__.py | 81 ++++++++++++++++------------------- gnocchi/storage/ceph.py | 11 +++-- gnocchi/storage/file.py | 20 +++++++-- gnocchi/storage/redis.py | 28 +++++++----- gnocchi/storage/s3.py | 18 ++++---- gnocchi/storage/swift.py | 15 ++++--- gnocchi/tests/test_storage.py | 6 +-- gnocchi/tests/test_utils.py | 10 +++++ gnocchi/utils.py | 19 ++++++++ 9 files changed, 129 insertions(+), 79 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index ef6e9ad8..b5b9ea21 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -90,13 +90,6 @@ class MetricAlreadyExists(StorageError): "Metric %s already exists" % metric) -class CorruptionError(ValueError, StorageError): - """Data corrupted, damn it.""" - - def __init__(self, message): - super(CorruptionError, self).__init__(message) - - @utils.retry_on_exception_and_log("Unable to initialize storage driver") def get_driver(conf): """Return the configured driver.""" @@ -125,35 +118,31 @@ class StorageDriver(object): raise NotImplementedError @staticmethod - def _get_unaggregated_timeserie(metric, version=3): + def _get_or_create_unaggregated_timeseries_unbatched(metric, version=3): + """Get the unaggregated timeserie of metrics. + + If the metrics does not exist, it is created. + + :param metric: A metric. + :param version: The storage format version number. + """ raise NotImplementedError - def _get_unaggregated_timeserie_and_unserialize( - self, metric, block_size, back_window): - """Retrieve unaggregated timeserie for a metric and unserialize it. + def _get_or_create_unaggregated_timeseries(self, metrics, version=3): + """Get the unaggregated timeserie of metrics. - Returns a gnocchi.carbonara.BoundTimeSerie object. If the data cannot - be retrieved, returns None. + If the metrics does not exist, it is created. + :param metrics: A list of metrics. + :param version: The storage format version number. """ - with utils.StopWatch() as sw: - raw_measures = ( - self._get_unaggregated_timeserie( - metric) - ) - if not raw_measures: - return - LOG.debug( - "Retrieve unaggregated measures " - "for %s in %.2fs", - metric.id, sw.elapsed()) - try: - return carbonara.BoundTimeSerie.unserialize( - raw_measures, block_size, back_window) - except carbonara.InvalidData: - raise CorruptionError( - "Data corruption detected for %s " - "unaggregated timeserie" % metric.id) + return dict( + six.moves.zip( + metrics, + utils.parallel_map( + utils.return_none_on_failure( + self._get_or_create_unaggregated_timeseries_unbatched), + ((metric, version) for metric in metrics)))) @staticmethod def _store_unaggregated_timeserie(metric, data, version=3): @@ -430,19 +419,25 @@ class StorageDriver(object): if any(filter(lambda x: x.startswith("rate:"), agg_methods)): back_window += 1 - try: - ts = self._get_unaggregated_timeserie_and_unserialize( - metric, block_size=block_size, back_window=back_window) - except MetricDoesNotExist: - try: - self._create_metric(metric) - except MetricAlreadyExists: - # Created in the mean time, do not worry - pass - ts = None - except CorruptionError as e: - LOG.error(e) + with utils.StopWatch() as sw: + raw_measures = ( + self._get_or_create_unaggregated_timeseries( + [metric])[metric] + ) + LOG.debug("Retrieve unaggregated measures for %s in %.2fs", + metric.id, sw.elapsed()) + + if raw_measures is None: ts = None + else: + try: + ts = carbonara.BoundTimeSerie.unserialize( + raw_measures, block_size, back_window) + except carbonara.InvalidData: + LOG.error("Data corruption detected for %s " + "unaggregated timeserie, creating a new one", + metric.id) + ts = None if ts is None: # This is the first time we treat measures for this diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 8897207c..42752961 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -184,12 +184,17 @@ class CephStorage(storage.StorageDriver): return (('gnocchi_%s_none' % metric.id) + ("_v%s" % version if version else "")) - def _get_unaggregated_timeserie(self, metric, version=3): + def _get_or_create_unaggregated_timeseries_unbatched( + self, metric, version=3): try: - return self._get_object_content( + contents = self._get_object_content( self._build_unaggregated_timeserie_path(metric, version)) except rados.ObjectNotFound: - raise storage.MetricDoesNotExist(metric) + self._create_metric(metric) + else: + # _create_metric writes "" so replace it by None to indicate + # emptiness instead. + return contents or None def _store_unaggregated_timeserie(self, metric, data, version=3): self.ioctx.write_full( diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 7605edb3..d63959e2 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -31,6 +31,12 @@ OPTS = [ help='Path used to store gnocchi data files.'), ] +# Python 2 compatibility +try: + FileNotFoundError +except NameError: + FileNotFoundError = None + class FileStorage(storage.StorageDriver): WRITE_FULL = True @@ -95,15 +101,21 @@ class FileStorage(storage.StorageDriver): with open(dest, "wb") as f: f.write(data) - def _get_unaggregated_timeserie(self, metric, version=3): + def _get_or_create_unaggregated_timeseries_unbatched( + self, metric, version=3): path = self._build_unaggregated_timeserie_path(metric, version) try: with open(path, 'rb') as f: return f.read() + except FileNotFoundError: + pass except IOError as e: - if e.errno == errno.ENOENT: - raise storage.MetricDoesNotExist(metric) - raise + if e.errno != errno.ENOENT: + raise + try: + self._create_metric(metric) + except storage.MetricAlreadyExists: + pass def _list_split_keys(self, metric, aggregation, granularity, version=3): try: diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 2475f29f..054a18d3 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -13,6 +13,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import six from gnocchi.common import redis from gnocchi import storage @@ -86,21 +87,26 @@ return {0, final} str(utils.timespan_total_seconds(granularity or key.sampling))]) return path + '_v%s' % version if version else path - def _create_metric(self, metric): - if self._client.hsetnx( - self._metric_key(metric), self._unaggregated_field(), "") == 0: - raise storage.MetricAlreadyExists(metric) - def _store_unaggregated_timeserie(self, metric, data, version=3): self._client.hset(self._metric_key(metric), self._unaggregated_field(version), data) - def _get_unaggregated_timeserie(self, metric, version=3): - data = self._client.hget(self._metric_key(metric), - self._unaggregated_field(version)) - if data is None: - raise storage.MetricDoesNotExist(metric) - return data + def _get_or_create_unaggregated_timeseries(self, metrics, version=3): + pipe = self._client.pipeline(transaction=False) + for metric in metrics: + metric_key = self._metric_key(metric) + unagg_key = self._unaggregated_field(version) + # Create the metric if it was not created + pipe.hsetnx(metric_key, unagg_key, "") + # Get the data + pipe.hget(metric_key, unagg_key) + ts = { + # Replace "" by None + metric: data or None + for metric, (created, data) + in six.moves.zip(metrics, utils.grouper(pipe.execute(), 2)) + } + return ts def _list_split_keys(self, metric, aggregation, granularity, version=3): key = self._metric_key(metric) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 528c0b7e..6216b97c 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -104,9 +104,6 @@ class S3Storage(storage.StorageDriver): def _prefix(metric): return str(metric.id) + '/' - def _create_metric(self, metric): - pass - def _put_object_safe(self, Bucket, Key, Body): put = self.s3.put_object(Bucket=Bucket, Key=Key, Body=Body) @@ -217,16 +214,17 @@ class S3Storage(storage.StorageDriver): return S3Storage._prefix(metric) + 'none' + ("_v%s" % version if version else "") - def _get_unaggregated_timeserie(self, metric, version=3): + def _get_or_create_unaggregated_timeseries_unbatched( + self, metric, version=3): + key = self._build_unaggregated_timeserie_path(metric, version) try: response = self.s3.get_object( - Bucket=self._bucket_name, - Key=self._build_unaggregated_timeserie_path(metric, version)) + Bucket=self._bucket_name, Key=key) except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') == "NoSuchKey": - raise storage.MetricDoesNotExist(metric) - raise - return response['Body'].read() + if e.response['Error'].get('Code') != "NoSuchKey": + raise + else: + return response['Body'].read() def _store_unaggregated_timeserie(self, metric, data, version=3): self._put_object_safe( diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 8ab9a81c..8fa19961 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -187,16 +187,21 @@ class SwiftStorage(storage.StorageDriver): def _build_unaggregated_timeserie_path(version): return 'none' + ("_v%s" % version if version else "") - def _get_unaggregated_timeserie(self, metric, version=3): + def _get_or_create_unaggregated_timeseries_unbatched( + self, metric, version=3): try: headers, contents = self.swift.get_object( self._container_name(metric), self._build_unaggregated_timeserie_path(version)) except swclient.ClientException as e: - if e.http_status == 404: - raise storage.MetricDoesNotExist(metric) - raise - return contents + if e.http_status != 404: + raise + try: + self._create_metric(metric) + except storage.MetricAlreadyExists: + pass + else: + return contents def _store_unaggregated_timeserie(self, metric, data, version=3): self.swift.put_object(self._container_name(metric), diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 2353d90c..4f3c1287 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -146,9 +146,9 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual({"mean": []}, self.storage.get_measures( self.metric, aggregations)) - self.assertRaises(storage.MetricDoesNotExist, - self.storage._get_unaggregated_timeserie, - self.metric) + self.assertEqual( + {self.metric: None}, + self.storage._get_or_create_unaggregated_timeseries([self.metric])) def test_measures_reporting_format(self): report = self.incoming.measures_report(True) diff --git a/gnocchi/tests/test_utils.py b/gnocchi/tests/test_utils.py index db1a2b79..c000d070 100644 --- a/gnocchi/tests/test_utils.py +++ b/gnocchi/tests/test_utils.py @@ -138,3 +138,13 @@ class ParallelMap(tests_base.TestCase): utils.parallel_map(lambda x: x, [[1], [2], [3]])) sm.assert_not_called() + + +class ReturnNoneOnFailureTest(tests_base.TestCase): + def test_works(self): + + @utils.return_none_on_failure + def foobar(): + raise Exception("boom") + + self.assertIsNone(foobar()) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index 0510d18c..c04cc7aa 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -314,6 +314,25 @@ def parallel_map(fn, list_of_args): parallel_map.MAX_WORKERS = get_default_workers() + +def return_none_on_failure(f): + try: + # Python 3 + fname = f.__qualname__ + except AttributeError: + fname = f.__name__ + + @six.wraps(f) + def _return_none_on_failure(*args, **kwargs): + try: + return f(*args, **kwargs) + except Exception as e: + LOG.critical("Unexpected error while calling %s: %s", + fname, e, exc_info=True) + + return _return_none_on_failure + + # Retry with exponential backoff for up to 1 minute wait_exponential = tenacity.wait_exponential(multiplier=0.5, max=60) -- GitLab From 368a178e0b7e6ab8239677722c509b8fe89541f3 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 19 Feb 2018 09:31:33 +0100 Subject: [PATCH 1261/1483] tests: run functional tests on real backends --- gnocchi/tests/functional/fixtures.py | 49 +++++++++++++++++++++++++--- 1 file changed, 45 insertions(+), 4 deletions(-) diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 906e7235..a8e4d5e3 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -18,6 +18,7 @@ from __future__ import absolute_import import os import shutil +import subprocess import tempfile import threading import time @@ -126,8 +127,35 @@ class ConfigFixture(fixture.GabbiFixture): if conf.indexer.url is None: raise case.SkipTest("No indexer configured") - conf.set_override('driver', 'file', 'storage') - conf.set_override('file_basepath', data_tmp_dir, 'storage') + storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file") + + conf.set_override('driver', storage_driver, 'storage') + if conf.storage.driver == 'file': + conf.set_override('file_basepath', data_tmp_dir, 'storage') + elif conf.storage.driver == 'ceph': + conf.set_override('ceph_conffile', os.getenv("CEPH_CONF"), + 'storage') + pool_name = uuid.uuid4().hex + with open(os.devnull, 'w') as f: + subprocess.call("rados -c %s mkpool %s" % ( + os.getenv("CEPH_CONF"), pool_name), shell=True, + stdout=f, stderr=subprocess.STDOUT) + conf.set_override('ceph_pool', pool_name, 'storage') + elif conf.storage.driver == "s3": + conf.set_override('s3_endpoint_url', + os.getenv("GNOCCHI_STORAGE_HTTP_URL"), + group="storage") + conf.set_override('s3_access_key_id', "gnocchi", group="storage") + conf.set_override('s3_secret_access_key', "anythingworks", + group="storage") + conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26], + "storage") + elif conf.storage.driver == "swift": + # NOTE(sileht): This fixture must start before any driver stuff + swift_fixture = fixtures.MockPatch( + 'swiftclient.client.Connection', + base.FakeSwiftClient) + swift_fixture.setUp() # NOTE(jd) All of that is still very SQL centric but we only support # SQL for now so let's say it's good enough. @@ -148,9 +176,16 @@ class ConfigFixture(fixture.GabbiFixture): self.coord = metricd.get_coordinator_and_start(str(uuid.uuid4()), conf.coordination_url) s = storage.get_driver(conf) - s.upgrade() i = incoming.get_driver(conf) - i.upgrade(128) + + if conf.storage.driver == 'redis': + # Create one prefix per test + s.STORAGE_PREFIX = str(uuid.uuid4()).encode() + + if conf.incoming.driver == 'redis': + i.SACK_NAME_FORMAT = ( + str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT + ) self.fixtures = [ fixtures.MockPatch("gnocchi.storage.get_driver", @@ -166,10 +201,16 @@ class ConfigFixture(fixture.GabbiFixture): for f in self.fixtures: f.setUp() + if conf.storage.driver == 'swift': + self.fixtures.append(swift_fixture) + LOAD_APP_KWARGS = { 'conf': conf, } + s.upgrade() + i.upgrade(128) + # start up a thread to async process measures self.metricd_thread = MetricdThread(chef.Chef(self.coord, i, index, s)) self.metricd_thread.start() -- GitLab From 0b43edae55bffd27d855bc9ad069288fb7c0ef7d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 9 Feb 2018 10:05:22 +0100 Subject: [PATCH 1262/1483] storage: replace _store_unaggregated_timeserie by _store_unaggregated_timeseries This allows to store several unaggregated timeseries at the same time. --- gnocchi/storage/__init__.py | 21 +++++++++++++++++++-- gnocchi/storage/ceph.py | 4 +++- gnocchi/storage/file.py | 5 +++-- gnocchi/storage/redis.py | 9 ++++++--- gnocchi/storage/s3.py | 5 +++-- gnocchi/storage/swift.py | 11 +++++++---- gnocchi/tests/test_storage.py | 2 +- 7 files changed, 42 insertions(+), 15 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index b5b9ea21..3883a665 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -145,9 +145,26 @@ class StorageDriver(object): ((metric, version) for metric in metrics)))) @staticmethod - def _store_unaggregated_timeserie(metric, data, version=3): + def _store_unaggregated_timeseries_unbatched(metric, data, version=3): + """Store unaggregated timeseries. + + :param metric: A metric. + :param data: The data to store. + :param version: Storage engine data format version + """ raise NotImplementedError + def _store_unaggregated_timeseries(self, metrics_and_data, version=3): + """Store unaggregated timeseries. + + :param metrics_and_data: A list of (metric, serialized_data) tuples + :param version: Storage engine data format version + """ + utils.parallel_map( + utils.return_none_on_failure( + self._store_unaggregated_timeseries_unbatched), + ((metric, data, version) for metric, data in metrics_and_data)) + @staticmethod def _store_metric_splits(metric, keys_and_data_and_offset, aggregation, version=3): @@ -490,7 +507,7 @@ class StorageDriver(object): "in %.2f seconds%s", metric.id, len(measures), elapsed, perf) - self._store_unaggregated_timeserie(metric, ts.serialize()) + self._store_unaggregated_timeseries([(metric, ts.serialize())]) def find_measure(self, metric, predicate, granularity, aggregation="mean", from_timestamp=None, to_timestamp=None): diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 42752961..40cc5ef2 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -1,5 +1,6 @@ # -*- encoding: utf-8 -*- # +# Copyright © 2018 Red Hat # Copyright © 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -196,7 +197,8 @@ class CephStorage(storage.StorageDriver): # emptiness instead. return contents or None - def _store_unaggregated_timeserie(self, metric, data, version=3): + def _store_unaggregated_timeseries_unbatched( + self, metric, data, version=3): self.ioctx.write_full( self._build_unaggregated_timeserie_path(metric, version), data) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index d63959e2..c169acb5 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -1,7 +1,7 @@ # -*- encoding: utf-8 -*- # # Copyright © 2014 Objectif Libre -# Copyright © 2015 Red Hat +# Copyright © 2015-2018 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -96,7 +96,8 @@ class FileStorage(storage.StorageDriver): if e.errno != errno.EEXIST: raise - def _store_unaggregated_timeserie(self, metric, data, version=3): + def _store_unaggregated_timeseries_unbatched( + self, metric, data, version=3): dest = self._build_unaggregated_timeserie_path(metric, version) with open(dest, "wb") as f: f.write(data) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 054a18d3..336fc212 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -87,9 +87,12 @@ return {0, final} str(utils.timespan_total_seconds(granularity or key.sampling))]) return path + '_v%s' % version if version else path - def _store_unaggregated_timeserie(self, metric, data, version=3): - self._client.hset(self._metric_key(metric), - self._unaggregated_field(version), data) + def _store_unaggregated_timeseries(self, metrics_and_data, version=3): + pipe = self._client.pipeline(transaction=False) + unagg_key = self._unaggregated_field(version) + for metric, data in metrics_and_data: + pipe.hset(self._metric_key(metric), unagg_key, data) + pipe.execute() def _get_or_create_unaggregated_timeseries(self, metrics, version=3): pipe = self._client.pipeline(transaction=False) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 6216b97c..450025d6 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2016-2017 Red Hat, Inc. +# Copyright © 2016-2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -226,7 +226,8 @@ class S3Storage(storage.StorageDriver): else: return response['Body'].read() - def _store_unaggregated_timeserie(self, metric, data, version=3): + def _store_unaggregated_timeseries_unbatched( + self, metric, data, version=3): self._put_object_safe( Bucket=self._bucket_name, Key=self._build_unaggregated_timeserie_path(metric, version), diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 8fa19961..c01733e9 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -1,5 +1,6 @@ # -*- encoding: utf-8 -*- # +# Copyright © 2018 Red Hat # Copyright © 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -203,7 +204,9 @@ class SwiftStorage(storage.StorageDriver): else: return contents - def _store_unaggregated_timeserie(self, metric, data, version=3): - self.swift.put_object(self._container_name(metric), - self._build_unaggregated_timeserie_path(version), - data) + def _store_unaggregated_timeseries_unbatched( + self, metric, data, version=3): + self.swift.put_object( + self._container_name(metric), + self._build_unaggregated_timeserie_path(version), + data) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 4f3c1287..23b09569 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -97,7 +97,7 @@ class TestStorageDriver(tests_base.TestCase): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 5), ]) - with mock.patch.object(self.storage, '_store_unaggregated_timeserie', + with mock.patch.object(self.storage, '_store_unaggregated_timeseries', side_effect=Exception): try: self.trigger_processing() -- GitLab From 7f6258ae8b9a315b9883e94ac3dee519a29f580d Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 21 Feb 2018 14:02:49 +0100 Subject: [PATCH 1263/1483] Fix unicode error in filter error message --- gnocchi/rest/api.py | 2 +- gnocchi/tests/functional/gabbits/search.yaml | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index aa2d2358..9e6c4b86 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1295,7 +1295,7 @@ class QueryStringSearchAttrFilter(object): try: parsed_query = cls.expr.parseString(query, parseAll=True)[0] except pyparsing.ParseException as e: - raise abort(400, "Invalid filter: %s" % six.text_type(e)) + raise abort(400, "Invalid filter: %s" % str(e)) return cls._parsed_query2dict(parsed_query) @classmethod diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml index b3de8060..b4a56fce 100644 --- a/gnocchi/tests/functional/gabbits/search.yaml +++ b/gnocchi/tests/functional/gabbits/search.yaml @@ -139,6 +139,12 @@ tests: response_json_paths: $.`len`: 2 + - name: search invalid query string + POST: /v1/search/resource/generic?filter=id%20%3D%3D%20foobar + status: 400 + response_strings: + - "Invalid filter: Expected" + - name: search in_ query string POST: /v1/search/resource/generic?filter=id%20in%20%5Bfaef212f-0bf4-4030-a461-2186fef79be0%2C%20df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e%5D response_json_paths: -- GitLab From e0f505ce5faf24dcffa8bfe9dfa07f2e5d4d266c Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 9 Feb 2018 14:10:54 +0100 Subject: [PATCH 1264/1483] storage: replace _store_timeserie_split by _store_timeserie_splits This implements a batched version of the split storage for a metric so that both the fetching of existing and storing of new splits are batched. --- gnocchi/storage/__init__.py | 110 ++++++++++++++++++++-------------- gnocchi/tests/test_storage.py | 4 +- 2 files changed, 67 insertions(+), 47 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 3883a665..790c664a 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -235,13 +235,15 @@ class StorageDriver(object): results = [] for key, raw in six.moves.zip(keys, raw_measures): try: - results.append(carbonara.AggregatedTimeSerie.unserialize( - raw, key, aggregation)) + ts = carbonara.AggregatedTimeSerie.unserialize( + raw, key, aggregation) except carbonara.InvalidData: LOG.error("Data corruption detected for %s " "aggregated `%s' timeserie, granularity `%s' " "around time `%s', ignoring.", metric.id, aggregation, key.sampling, key) + else: + results.append(ts) return results def _get_measures_timeserie(self, metric, aggregation, @@ -285,46 +287,62 @@ class StorageDriver(object): ts.truncate(aggregation.timespan) return ts - def _store_timeserie_split(self, metric, key, split, - aggregation, oldest_mutable_timestamp, - oldest_point_to_keep): - # NOTE(jd) We write the full split only if the driver works that way - # (self.WRITE_FULL) or if the oldest_mutable_timestamp is out of range. - write_full = self.WRITE_FULL or next(key) <= oldest_mutable_timestamp - if write_full: - try: - existing = self._get_measures_and_unserialize( - metric, [key], aggregation) - except AggregationDoesNotExist: - pass - else: + def _store_timeserie_splits(self, metric, keys_and_splits, + aggregation, oldest_mutable_timestamp, + oldest_point_to_keep): + keys_to_rewrite = [] + splits_to_rewrite = [] + for key, split in six.iteritems(keys_and_splits): + # NOTE(jd) We write the full split only if the driver works that + # way (self.WRITE_FULL) or if the oldest_mutable_timestamp is out + # of range. + write_full = ( + self.WRITE_FULL or next(key) <= oldest_mutable_timestamp + ) + if write_full: + keys_to_rewrite.append(key) + splits_to_rewrite.append(split) + + # Update the splits that were passed as argument with the data already + # stored in the case that we need to rewrite them fully. + # First, fetch all those existing splits. + try: + existing_data = self._get_measures_and_unserialize( + metric, keys_to_rewrite, aggregation) + except AggregationDoesNotExist: + pass + else: + for key, split, existing in six.moves.zip( + keys_to_rewrite, splits_to_rewrite, existing_data): if existing: - existing = existing[0] if split is not None: existing.merge(split) - split = existing - - if split is None: - # `split' can be none if existing is None and no split was passed - # in order to rewrite and compress the data; in that case, it means - # the split key is present and listed, but some aggregation method - # or granularity is missing. That means data is corrupted, but it - # does not mean we have to fail, we can just do nothing and log a - # warning. - LOG.warning("No data found for metric %s, granularity %f " - "and aggregation method %s (split key %s): " - "possible data corruption", - metric, key.sampling, - aggregation, key) - return - - if oldest_point_to_keep is not None: - split.truncate(oldest_point_to_keep) - - offset, data = split.serialize(key, compressed=write_full) - - return self._store_metric_splits( - metric, [(key, data, offset)], aggregation) + keys_and_splits[key] = existing + + key_data_offset = [] + for key, split in six.iteritems(keys_and_splits): + if split is None: + # `split' can be none if existing is None and no split was + # passed in order to rewrite and compress the data; in that + # case, it means the split key is present and listed, but some + # aggregation method or granularity is missing. That means data + # is corrupted, but it does not mean we have to fail, we can + # just do nothing and log a warning. + LOG.warning("No data found for metric %s, granularity %f " + "and aggregation method %s (split key %s): " + "possible data corruption", + metric, key.sampling, + aggregation, key) + continue + + if oldest_point_to_keep is not None: + split.truncate(oldest_point_to_keep) + + offset, data = split.serialize( + key, compressed=key in keys_to_rewrite) + key_data_offset.append((key, data, offset)) + + return self._store_metric_splits(metric, key_data_offset, aggregation) def _add_measures(self, aggregation, ap_def, metric, grouped_serie, previous_oldest_mutable_timestamp, @@ -357,6 +375,8 @@ class StorageDriver(object): oldest_point_to_keep = None oldest_key_to_keep = None + keys_and_split_to_store = {} + if previous_oldest_mutable_timestamp and (ap_def.timespan or need_rewrite): previous_oldest_mutable_key = ts.get_split_key( @@ -396,18 +416,18 @@ class StorageDriver(object): # NOTE(jd) Rewrite it entirely for fun (and later # for compression). For that, we just pass None as # split. - self._store_timeserie_split( - metric, key, None, aggregation, - oldest_mutable_timestamp, oldest_point_to_keep) + keys_and_split_to_store[key] = None for key, split in ts.split(): if oldest_key_to_keep is None or key >= oldest_key_to_keep: LOG.debug( "Storing split %s (%s) for metric %s", key, aggregation, metric) - self._store_timeserie_split( - metric, key, split, aggregation, oldest_mutable_timestamp, - oldest_point_to_keep) + keys_and_split_to_store[key] = split + + self._store_timeserie_splits( + metric, keys_and_split_to_store, aggregation, + oldest_mutable_timestamp, oldest_point_to_keep) @staticmethod def _delete_metric(metric): diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 23b09569..7a47f3a6 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -480,12 +480,12 @@ class TestStorageDriver(tests_base.TestCase): ]}, self.storage.get_measures(self.metric, [aggregation])) # Now store brand new points that should force a rewrite of one of the - # split (keep in mind the back window size in one hour here). We move + # split (keep in mind the back window size is one hour here). We move # the BoundTimeSerie processing timeserie far away from its current # range. # Here we test a special case where the oldest_mutable_timestamp will - # be 2016-01-10TOO:OO:OO = 1452384000.0, our new split key. + # be 2016-01-10T00:00:00 = 1452384000.0, our new split key. self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2016, 1, 10, 0, 12), 45), ]) -- GitLab From 6771ae4e864f662ae18f9c32c0b86402d3a4040f Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Sat, 24 Feb 2018 23:48:27 +0100 Subject: [PATCH 1265/1483] Fixed debian/copyright holders list and years --- debian/copyright | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/debian/copyright b/debian/copyright index 3deff357..35773989 100644 --- a/debian/copyright +++ b/debian/copyright @@ -8,10 +8,13 @@ Copyright: (c) 2014-2015, Julien Danjou (c) 2014-2015, Mirantis INC. (c) 2014-2015, Red Hat INC. (c) 2014-2015, Objectif Libre + (c) 2010-2017, OpenStack Foundation + (c) 2014-2016, eNovance + (c) 2016, Prometheus Team License: Apache-2 Files: debian/* -Copyright: (c) 2014-2016, Thomas Goirand +Copyright: (c) 2014-2018, Thomas Goirand License: Apache-2 License: Apache-2 -- GitLab From 09836a127b8762075f8fb638585366a162b43987 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Sat, 24 Feb 2018 22:54:20 +0000 Subject: [PATCH 1266/1483] Some debian/rules clean-ups. --- debian/control | 3 --- debian/rules | 21 +++++++++++++++------ 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/debian/control b/debian/control index 69e67a65..61527cc8 100644 --- a/debian/control +++ b/debian/control @@ -8,9 +8,6 @@ Build-Depends: debhelper (>= 10), dh-python, openstack-pkg-tools (>= 66~), - python-all, - python-pbr, - python-setuptools, python3-all, python3-pbr, python3-setuptools, diff --git a/debian/rules b/debian/rules index 78f8788f..06b1adbf 100755 --- a/debian/rules +++ b/debian/rules @@ -4,13 +4,26 @@ UPSTREAM_GIT:=https://github.com/gnocchixyz/gnocchi include /usr/share/openstack-pkg-tools/pkgos.make UNIT_TEST_BLACKLIST = test_carbonara.CarbonaraCmd.*|.*test_bin\.BinTestCase\.test_gnocchi_config_generator_run.* + %: dh $@ --buildsystem=python_distutils --with python3,sphinxdoc +override_dh_clean: + dh_clean + rm -rf build debian/gnocchi-common.postinst debian/gnocchi-common.config debian/gnocchi-api.config debian/gnocchi-api.postinst + +override_dh_auto_clean: + python3 setup.py clean + +override_dh_auto_build: + echo "Do nothing..." + +override_dh_auto_build: + echo "Do nothing..." + override_dh_auto_install: set -e ; for pyvers in $(PYTHON3S); do \ - python$$pyvers setup.py install --install-layout=deb \ - --root $(CURDIR)/debian/python3-gnocchi; \ + python$$pyvers setup.py install --install-layout=deb --root $(CURDIR)/debian/python3-gnocchi; \ done mkdir -p $(CURDIR)/debian/python3-gnocchi/usr/lib/python3/dist-packages/gnocchi/indexer cp -auxf gnocchi/indexer/alembic $(CURDIR)/debian/python3-gnocchi/usr/lib/python3/dist-packages/gnocchi/indexer @@ -78,10 +91,6 @@ override_dh_sphinxdoc: # $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR echo "Do nothing" -override_dh_clean: - dh_clean -O--buildsystem=python_distutils - rm -rf build debian/gnocchi-common.postinst debian/gnocchi-common.config debian/gnocchi-api.config debian/gnocchi-api.postinst - override_dh_python3: dh_python3 --shebang=/usr/bin/python3 -- GitLab From 625a2187145627406d12bfd4c77d99466c153fb7 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Sun, 25 Feb 2018 10:32:59 +0000 Subject: [PATCH 1267/1483] Some debian/rules clean-ups. --- debian/python3-gnocchi.install | 1 + debian/rules | 70 ++++++++++++++++------------------ debian/start_pg.sh | 0 3 files changed, 34 insertions(+), 37 deletions(-) create mode 100644 debian/python3-gnocchi.install mode change 100644 => 100755 debian/start_pg.sh diff --git a/debian/python3-gnocchi.install b/debian/python3-gnocchi.install new file mode 100644 index 00000000..028be4f6 --- /dev/null +++ b/debian/python3-gnocchi.install @@ -0,0 +1 @@ +/usr/lib/python3* \ No newline at end of file diff --git a/debian/rules b/debian/rules index 06b1adbf..baadaacf 100755 --- a/debian/rules +++ b/debian/rules @@ -16,22 +16,46 @@ override_dh_auto_clean: python3 setup.py clean override_dh_auto_build: + /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-common.postinst + /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-common.config + /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-api.postinst + /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-api.config + +override_dh_auto_install: echo "Do nothing..." -override_dh_auto_build: +override_dh_auto_test: echo "Do nothing..." -override_dh_auto_install: +override_dh_install: set -e ; for pyvers in $(PYTHON3S); do \ - python$$pyvers setup.py install --install-layout=deb --root $(CURDIR)/debian/python3-gnocchi; \ + python$$pyvers setup.py install --install-layout=deb --root $(CURDIR)/debian/tmp; \ done - mkdir -p $(CURDIR)/debian/python3-gnocchi/usr/lib/python3/dist-packages/gnocchi/indexer - cp -auxf gnocchi/indexer/alembic $(CURDIR)/debian/python3-gnocchi/usr/lib/python3/dist-packages/gnocchi/indexer + +ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) + echo "===> Starting PGSQL" + BINDIR=`pg_config --bindir` ; \ + PG_MYTMPDIR=`mktemp -d` ; \ + chown postgres:postgres $$PG_MYTMPDIR || true ; \ + export PGHOST=$$PG_MYTMPDIR ; \ + chmod +x debian/start_pg.sh ; \ + debian/start_pg.sh $$PG_MYTMPDIR ; \ + export GNOCCHI_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ + export GNOCCHI_TEST_STORAGE_DRIVER=file ; \ + export PATH=$(PATH):$(CURDIR)/debian/bin && PYTHONPATH=$(CURDIR):$(CURDIR)/debian/bin ; \ + pkgos-dh_auto_test --no-py2 'gnocchi\.tests\.(?!.*('"$(UNIT_TEST_BLACKLIST)"'))' ; \ + echo "===> Stopping PGSQL" ; \ + $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR +endif + + + mkdir -p $(CURDIR)/debian/tmp/usr/lib/python3/dist-packages/gnocchi/indexer + cp -auxf gnocchi/indexer/alembic $(CURDIR)/debian/tmp/usr/lib/python3/dist-packages/gnocchi/indexer rm -rf $(CURDIR)/debian/python*-gnocchi/usr/lib/python*/dist-packages/*.pth rm -rf $(CURDIR)/debian/python*-gnocchi/usr/etc mkdir -p $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common - PYTHONPATH=$(CURDIR)/debian/python3-gnocchi/usr/lib/python3/dist-packages python3-oslo-config-generator \ + PYTHONPATH=$(CURDIR)/debian/tmp/usr/lib/python3/dist-packages python3-oslo-config-generator \ --output-file $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf \ --wrap-width 140 \ --namespace gnocchi \ @@ -45,37 +69,9 @@ override_dh_auto_install: sed -i 's|^[ \t#]*url[ \t#]*=.*|url = sqlite:////var/lib/gnocchi/gnocchidb|' $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf sed -i 's|^[# \t]*auth_protocol[\t #]*=.*|auth_protocol = http|' $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf - /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-common.postinst - /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-common.config - /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-api.postinst - /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-api.config + dh_install + dh_missing --fail-missing -override_dh_auto_test: -ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) - @echo "===> Running tests" - set -e ; set -x ; for i in $(PYTHON3S) ; do \ - PYMAJOR=`echo $$i | cut -d'.' -f1` ; \ - echo "===> Starting PGSQL" ; \ - BINDIR=`pg_config --bindir` ; \ - PG_MYTMPDIR=`mktemp -d` ; \ - chown postgres:postgres $$PG_MYTMPDIR || true ; \ - export PGHOST=$$PG_MYTMPDIR ; \ - chmod +x debian/start_pg.sh ; \ - debian/start_pg.sh $$PG_MYTMPDIR ; \ - export GNOCCHI_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ - export GNOCCHI_TEST_STORAGE_DRIVER=file ; \ - echo "===> Testing with python$$i (python$$PYMAJOR)" ; \ - rm -rf .testrepository ; \ - testr-python$$PYMAJOR init ; \ - TEMP_REZ=`mktemp -t` ; \ - export PATH=$(PATH):$(CURDIR)/debian/bin && PYTHONPATH=$(CURDIR):$(CURDIR)/debian/bin PYTHON=python$$i testr-python$$PYMAJOR run --subunit 'gnocchi\.tests\.(?!.*('"$(UNIT_TEST_BLACKLIST)"'))' | tee $$TEMP_REZ | subunit2pyunit ; \ - cat $$TEMP_REZ | subunit-filter -s --no-passthrough | subunit-stats ; \ - rm -f $$TEMP_REZ ; \ - testr-python$$PYMAJOR slowest ; \ - echo "===> Stopping PGSQL" ; \ - $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR ; \ - done -endif override_dh_sphinxdoc: # echo "===> Starting PGSQL" ; \ @@ -89,7 +85,7 @@ override_dh_sphinxdoc: # PYTHONPATH=. sphinx-build -b html doc/source debian/python3-gnocchi-doc/usr/share/doc/python3-gnocchi-doc/html ; \ # dh_sphinxdoc -O--buildsystem=python_distutils ; \ # $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR - echo "Do nothing" + echo "Do nothing..." override_dh_python3: dh_python3 --shebang=/usr/bin/python3 diff --git a/debian/start_pg.sh b/debian/start_pg.sh old mode 100644 new mode 100755 -- GitLab From 7fa02b1a9d95fc9f0aaaf8e6d9c54f19818e6185 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Sun, 25 Feb 2018 12:24:31 +0000 Subject: [PATCH 1268/1483] test --- debian/rules | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/debian/rules b/debian/rules index baadaacf..6bd2ad5a 100755 --- a/debian/rules +++ b/debian/rules @@ -33,8 +33,11 @@ override_dh_install: done ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) - echo "===> Starting PGSQL" - BINDIR=`pg_config --bindir` ; \ + @echo "===> Running tests" + set -e ; set -x ; for i in $(PYTHON3S) ; do \ + PYMAJOR=`echo $$i | cut -d'.' -f1` ; \ + echo "===> Starting PGSQL" ; \ + BINDIR=`pg_config --bindir` ; \ PG_MYTMPDIR=`mktemp -d` ; \ chown postgres:postgres $$PG_MYTMPDIR || true ; \ export PGHOST=$$PG_MYTMPDIR ; \ @@ -42,10 +45,17 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) debian/start_pg.sh $$PG_MYTMPDIR ; \ export GNOCCHI_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ export GNOCCHI_TEST_STORAGE_DRIVER=file ; \ - export PATH=$(PATH):$(CURDIR)/debian/bin && PYTHONPATH=$(CURDIR):$(CURDIR)/debian/bin ; \ - pkgos-dh_auto_test --no-py2 'gnocchi\.tests\.(?!.*('"$(UNIT_TEST_BLACKLIST)"'))' ; \ + echo "===> Testing with python$$i (python$$PYMAJOR)" ; \ + rm -rf .testrepository ; \ + testr-python$$PYMAJOR init ; \ + TEMP_REZ=`mktemp -t` ; \ + export PATH=$(PATH):$(CURDIR)/debian/bin && PYTHONPATH=$(CURDIR):$(CURDIR)/debian/bin PYTHON=python$$i testr-python$$PYMAJOR run --subunit 'gnocchi\.tests\.(?!.*('"$(UNIT_TEST_BLACKLIST)"'))' | tee $$TEMP_REZ | subunit2p$ + cat $$TEMP_REZ | subunit-filter -s --no-passthrough | subunit-stats ; \ + rm -f $$TEMP_REZ ; \ + testr-python$$PYMAJOR slowest ; \ echo "===> Stopping PGSQL" ; \ - $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR + $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR ; \ + done endif -- GitLab From 966bed50253ed0473525a97a05d7b2f2de286c28 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 12 Feb 2018 14:05:04 +0100 Subject: [PATCH 1269/1483] storage: change _delete_metric_measures to _delete_metric_splits This new method allows to delete a bunch of splits at the same time for a single metric, batching delete. --- gnocchi/storage/__init__.py | 13 ++++++++++--- gnocchi/storage/ceph.py | 26 ++++++++++++++------------ gnocchi/storage/file.py | 3 ++- gnocchi/storage/redis.py | 10 +++++++--- gnocchi/storage/s3.py | 4 ++-- gnocchi/storage/swift.py | 3 ++- gnocchi/tests/test_storage.py | 6 +++--- 7 files changed, 40 insertions(+), 25 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 790c664a..1d862a52 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -391,6 +391,7 @@ class StorageDriver(object): # First, check for old splits to delete if ap_def.timespan: + deleted_keys = set() for key in list(existing_keys): # NOTE(jd) Only delete if the key is strictly inferior # the timestamp; we don't delete any timeserie split @@ -398,8 +399,10 @@ class StorageDriver(object): # bit more than deleting too much if key >= oldest_key_to_keep: break - self._delete_metric_measures(metric, key, aggregation) + deleted_keys.add(key) existing_keys.remove(key) + self._delete_metric_splits( + metric, deleted_keys, aggregation) # Rewrite all read-only splits just for fun (and compression). # This only happens if `previous_oldest_mutable_timestamp' @@ -434,10 +437,14 @@ class StorageDriver(object): raise NotImplementedError @staticmethod - def _delete_metric_measures(metric, timestamp_key, - aggregation, granularity, version=3): + def _delete_metric_splits_unbatched(metric, keys, aggregation, version=3): raise NotImplementedError + def _delete_metric_splits(self, metric, keys, aggregation, version=3): + utils.parallel_map( + utils.return_none_on_failure(self._delete_metric_splits_unbatched), + ((metric, key, aggregation) for key in keys)) + def compute_and_store_timeseries(self, metric, measures): # NOTE(mnaser): The metric could have been handled by # another worker, ignore if no measures. diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 40cc5ef2..11ebc354 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -91,19 +91,21 @@ class CephStorage(storage.StorageDriver): self.ioctx.operate_write_op( op, self._build_unaggregated_timeserie_path(metric, 3)) - def _delete_metric_measures(self, metric, key, aggregation, version=3): - name = self._get_object_name(metric, key, aggregation, version) - - try: - self.ioctx.remove_object(name) - except rados.ObjectNotFound: - # It's possible that we already remove that object and then crashed - # before removing it from the OMAP key list; then no big deal - # anyway. - pass - + def _delete_metric_splits(self, metric, keys, aggregation, version=3): + names = tuple( + self._get_object_name(metric, key, aggregation, version) + for key in keys + ) with rados.WriteOpCtx() as op: - self.ioctx.remove_omap_keys(op, (name,)) + for name in names: + try: + self.ioctx.remove_object(name) + except rados.ObjectNotFound: + # It's possible that we already remove that object and then + # crashed before removing it from the OMAP key list; then + # no big deal anyway. + pass + self.ioctx.remove_omap_keys(op, names) self.ioctx.operate_write_op( op, self._build_unaggregated_timeserie_path(metric, 3)) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index c169acb5..16dd5228 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -133,7 +133,8 @@ class FileStorage(storage.StorageDriver): keys.add(meta[0]) return keys - def _delete_metric_measures(self, metric, key, aggregation, version=3): + def _delete_metric_splits_unbatched( + self, metric, key, aggregation, version=3): os.unlink(self._build_metric_path_for_split( metric, aggregation, key, version)) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 336fc212..2d47c9c9 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -120,9 +120,13 @@ return {0, final} raise storage.MetricDoesNotExist(metric) return set(split_keys) - def _delete_metric_measures(self, metric, key, aggregation, version=3): - field = self._aggregated_field_for_split(aggregation, key, version) - self._client.hdel(self._metric_key(metric), field) + def _delete_metric_splits(self, metric, keys, aggregation, version=3): + metric_key = self._metric_key(metric) + pipe = self._client.pipeline(transaction=False) + for key in keys: + pipe.hdel(metric_key, self._aggregated_field_for_split( + aggregation, key, version)) + pipe.execute() def _store_metric_splits(self, metric, keys_and_data_and_offset, aggregation, version=3): diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 450025d6..969146fb 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -128,8 +128,8 @@ class S3Storage(storage.StorageDriver): key, aggregation, version), Body=data) - def _delete_metric_measures(self, metric, key, aggregation, - version=3): + def _delete_metric_splits_unbatched(self, metric, key, aggregation, + version=3): self.s3.delete_object( Bucket=self._bucket_name, Key=self._prefix(metric) + self._object_name( diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index c01733e9..bc34cff1 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -121,7 +121,8 @@ class SwiftStorage(storage.StorageDriver): self._object_name(key, aggregation, version), data) - def _delete_metric_measures(self, metric, key, aggregation, version=3): + def _delete_metric_splits_unbatched( + self, metric, key, aggregation, version=3): self.swift.delete_object( self._container_name(metric), self._object_name(key, aggregation, version)) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 7a47f3a6..2a1f34ff 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -608,11 +608,11 @@ class TestStorageDriver(tests_base.TestCase): # Test what happens if we delete the latest split and then need to # compress it! - self.storage._delete_metric_measures( - self.metric, carbonara.SplitKey( + self.storage._delete_metric_splits( + self.metric, [carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(1, 'm'), - ), 'mean') + )], 'mean') # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move -- GitLab From f4e12be47778de8da8a2c8b3a4dec3fd08c77841 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 12 Feb 2018 15:11:33 +0100 Subject: [PATCH 1270/1483] storage: make _add_measures take an Aggregation object as argument --- gnocchi/archive_policy.py | 4 ++-- gnocchi/storage/__init__.py | 44 +++++++++++++++++++++---------------- 2 files changed, 27 insertions(+), 21 deletions(-) diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py index 2ace6994..40105139 100644 --- a/gnocchi/archive_policy.py +++ b/gnocchi/archive_policy.py @@ -111,8 +111,8 @@ class ArchivePolicy(object): @property def aggregations(self): return [aggregation.Aggregation(method, d.granularity, d.timespan) - for method in self.aggregation_methods - for d in self.definition] + for d in sorted(self.definition, key=ATTRGETTER_GRANULARITY) + for method in self.aggregation_methods] @property def aggregation_methods(self): diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 1d862a52..a4dc5003 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -38,6 +38,7 @@ LOG = daiquiri.getLogger(__name__) ATTRGETTER_AGG_METHOD = operator.attrgetter("aggregation_method") +ATTRGETTER_GRANULARITY = operator.attrgetter("granularity") class StorageError(Exception): @@ -344,18 +345,18 @@ class StorageDriver(object): return self._store_metric_splits(metric, key_data_offset, aggregation) - def _add_measures(self, aggregation, ap_def, metric, grouped_serie, + def _add_measures(self, metric, aggregation, grouped_serie, previous_oldest_mutable_timestamp, oldest_mutable_timestamp): - if aggregation.startswith("rate:"): + if aggregation.method.startswith("rate:"): grouped_serie = grouped_serie.derived() - aggregation_to_compute = aggregation[5:] + aggregation_to_compute = aggregation.method[5:] else: - aggregation_to_compute = aggregation + aggregation_to_compute = aggregation.method ts = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped_serie, ap_def.granularity, aggregation_to_compute) + grouped_serie, aggregation.granularity, aggregation_to_compute) # Don't do anything if the timeserie is empty if not ts: @@ -368,8 +369,8 @@ class StorageDriver(object): and previous_oldest_mutable_timestamp is not None ) - if ap_def.timespan: - oldest_point_to_keep = ts.last - ap_def.timespan + if aggregation.timespan: + oldest_point_to_keep = ts.last - aggregation.timespan oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) else: oldest_point_to_keep = None @@ -377,7 +378,7 @@ class StorageDriver(object): keys_and_split_to_store = {} - if previous_oldest_mutable_timestamp and (ap_def.timespan or + if previous_oldest_mutable_timestamp and (aggregation.timespan or need_rewrite): previous_oldest_mutable_key = ts.get_split_key( previous_oldest_mutable_timestamp) @@ -387,10 +388,10 @@ class StorageDriver(object): # object for an old object to be cleanup if previous_oldest_mutable_key != oldest_mutable_key: existing_keys = sorted(self._list_split_keys_for_metric( - metric, aggregation, ap_def.granularity)) + metric, aggregation.method, aggregation.granularity)) # First, check for old splits to delete - if ap_def.timespan: + if aggregation.timespan: deleted_keys = set() for key in list(existing_keys): # NOTE(jd) Only delete if the key is strictly inferior @@ -402,7 +403,7 @@ class StorageDriver(object): deleted_keys.add(key) existing_keys.remove(key) self._delete_metric_splits( - metric, deleted_keys, aggregation) + metric, deleted_keys, aggregation.method) # Rewrite all read-only splits just for fun (and compression). # This only happens if `previous_oldest_mutable_timestamp' @@ -415,7 +416,8 @@ class StorageDriver(object): if key >= oldest_mutable_key: break LOG.debug("Compressing previous split %s (%s) for " - "metric %s", key, aggregation, metric) + "metric %s", key, aggregation.method, + metric) # NOTE(jd) Rewrite it entirely for fun (and later # for compression). For that, we just pass None as # split. @@ -425,11 +427,11 @@ class StorageDriver(object): if oldest_key_to_keep is None or key >= oldest_key_to_keep: LOG.debug( "Storing split %s (%s) for metric %s", - key, aggregation, metric) + key, aggregation.method, metric) keys_and_split_to_store[key] = split self._store_timeserie_splits( - metric, keys_and_split_to_store, aggregation, + metric, keys_and_split_to_store, aggregation.method, oldest_mutable_timestamp, oldest_point_to_keep) @staticmethod @@ -505,17 +507,21 @@ class StorageDriver(object): tstamp = max(bound_timeserie.first, measures['timestamps'][0]) new_first_block_timestamp = bound_timeserie.first_block_timestamp() computed_points['number'] = len(bound_timeserie) - for d in definition: + + for granularity, aggregations in itertools.groupby( + # No need to sort the aggregation, they are already + metric.archive_policy.aggregations, + ATTRGETTER_GRANULARITY): ts = bound_timeserie.group_serie( - d.granularity, carbonara.round_timestamp( - tstamp, d.granularity)) + granularity, carbonara.round_timestamp( + tstamp, granularity)) utils.parallel_map( self._add_measures, - ((aggregation, d, metric, ts, + ((metric, aggregation, ts, current_first_block_timestamp, new_first_block_timestamp) - for aggregation in agg_methods)) + for aggregation in aggregations)) with utils.StopWatch() as sw: ts.set_values(measures, -- GitLab From d553452308341e29ccb3fb0d56ce719290886355 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 13 Feb 2018 14:34:02 +0100 Subject: [PATCH 1271/1483] carbonara: handle timeseries derivation for rate aggregations This moves the handling into Carbonara itself so the storage engine does not have to handle that. --- gnocchi/carbonara.py | 7 ++++++- gnocchi/storage/__init__.py | 9 +-------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index b583271f..f290c057 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -610,7 +610,12 @@ class AggregatedTimeSerie(TimeSerie): @classmethod def from_grouped_serie(cls, grouped_serie, sampling, aggregation_method): - agg_name, q = cls._get_agg_method(aggregation_method) + if aggregation_method.startswith("rate:"): + grouped_serie = grouped_serie.derived() + aggregation_method_name = aggregation_method[5:] + else: + aggregation_method_name = aggregation_method + agg_name, q = cls._get_agg_method(aggregation_method_name) return cls(sampling, aggregation_method, ts=cls._resample_grouped(grouped_serie, agg_name, q)) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index a4dc5003..61109843 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -348,15 +348,8 @@ class StorageDriver(object): def _add_measures(self, metric, aggregation, grouped_serie, previous_oldest_mutable_timestamp, oldest_mutable_timestamp): - - if aggregation.method.startswith("rate:"): - grouped_serie = grouped_serie.derived() - aggregation_to_compute = aggregation.method[5:] - else: - aggregation_to_compute = aggregation.method - ts = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped_serie, aggregation.granularity, aggregation_to_compute) + grouped_serie, aggregation.granularity, aggregation.method) # Don't do anything if the timeserie is empty if not ts: -- GitLab From 286c578a1cf459141eb99e7bca1589f5ace9ef54 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 26 Feb 2018 16:24:32 +0100 Subject: [PATCH 1272/1483] carbonara: use __slots__ on SplitKey Gnocchi might creates a lot of those, let's make them small. --- gnocchi/carbonara.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index f290c057..18ff724c 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -452,6 +452,11 @@ class SplitKey(object): are regularly spaced. """ + __slots__ = ( + 'key', + 'sampling', + ) + POINTS_PER_SPLIT = 3600 def __init__(self, value, sampling): -- GitLab From c7631f3c197d1cd288d2d937b2d9b5bb0cb597b0 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 14 Feb 2018 16:08:07 +0100 Subject: [PATCH 1273/1483] carbonara: make fetch() return an AggregatedTimeSerie The current return format is very specific to the Gnocchi API, whereas there should not be any need to do the formatting at this low-level stage. --- gnocchi/carbonara.py | 20 +-- gnocchi/storage/__init__.py | 8 +- gnocchi/tests/test_carbonara.py | 220 ++++++++++++-------------------- 3 files changed, 99 insertions(+), 149 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 18ff724c..454e2677 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -17,7 +17,6 @@ """Time series data manipulation, better with pancetta.""" import functools -import itertools import math import operator import random @@ -217,13 +216,16 @@ class TimeSerie(object): ts = make_timeseries([], []) self.ts = ts + def __iter__(self): + return (tuple(i) for i in self.ts) + @classmethod def from_data(cls, timestamps=None, values=None): return cls(make_timeseries(timestamps, values)) def __eq__(self, other): return (isinstance(other, TimeSerie) and - numpy.all(self.ts == other.ts)) + numpy.array_equal(self.ts, other.ts)) def __getitem__(self, key): if isinstance(key, numpy.datetime64): @@ -607,11 +609,13 @@ class AggregatedTimeSerie(TimeSerie): def from_timeseries(cls, timeseries, sampling, aggregation_method): # NOTE(gordc): Indices must be unique across all timeseries. Also, # timeseries should be a list that is ordered within list and series. - if not timeseries: - timeseries = [make_timeseries([], [])] + if timeseries: + ts = numpy.concatenate([ts.ts for ts in timeseries]) + else: + ts = None return cls(sampling=sampling, aggregation_method=aggregation_method, - ts=numpy.concatenate(timeseries)) + ts=ts) @classmethod def from_grouped_serie(cls, grouped_serie, sampling, aggregation_method): @@ -760,10 +764,8 @@ class AggregatedTimeSerie(TimeSerie): from_ = None else: from_ = round_timestamp(from_timestamp, self.sampling) - points = self[from_:to_timestamp] - return six.moves.zip(points['timestamps'], - itertools.repeat(self.sampling), - points['values']) + return self.__class__(self.sampling, self.aggregation_method, + ts=self[from_:to_timestamp]) @classmethod def benchmark(cls): diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 61109843..6667554b 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -223,7 +223,9 @@ class StorageDriver(object): return { aggmethod: list(itertools.chain( - *[ts.fetch(from_timestamp, to_timestamp) + *[[(timestamp, ts.sampling, value) + for timestamp, value + in ts.fetch(from_timestamp, to_timestamp)] for ts in aggts])) for aggmethod, aggts in itertools.groupby(agg_timeseries, ATTRGETTER_AGG_METHOD) @@ -544,8 +546,8 @@ class StorageDriver(object): timeserie = self._get_measures_timeserie( metric, agg, from_timestamp, to_timestamp) values = timeserie.fetch(from_timestamp, to_timestamp) - return [(timestamp, g, value) - for timestamp, g, value in values + return [(timestamp, granularity, value) + for timestamp, value in values if predicate(value)] diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 48e0317f..281306af 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -132,33 +132,24 @@ class TestAggregatedTimeSerie(base.BaseTestCase): values=[3, 5, 6], sampling=numpy.timedelta64(1, 's')) self.assertEqual( - [(datetime64(2014, 1, 1, 12), - numpy.timedelta64(1000000, 'us'), 3), - (datetime64(2014, 1, 1, 12, 0, 4), - numpy.timedelta64(1000000, 'us'), 5), - (datetime64(2014, 1, 1, 12, 0, 9), - numpy.timedelta64(1000000, 'us'), 6)], + [(datetime64(2014, 1, 1, 12), 3), + (datetime64(2014, 1, 1, 12, 0, 4), 5), + (datetime64(2014, 1, 1, 12, 0, 9), 6)], list(ts.fetch())) self.assertEqual( - [(datetime64(2014, 1, 1, 12, 0, 4), - numpy.timedelta64(1000000, 'us'), 5), - (datetime64(2014, 1, 1, 12, 0, 9), - numpy.timedelta64(1000000, 'us'), 6)], + [(datetime64(2014, 1, 1, 12, 0, 4), 5), + (datetime64(2014, 1, 1, 12, 0, 9), 6)], list(ts.fetch( from_timestamp=datetime64(2014, 1, 1, 12, 0, 4)))) self.assertEqual( - [(datetime64(2014, 1, 1, 12, 0, 4), - numpy.timedelta64(1000000, 'us'), 5), - (datetime64(2014, 1, 1, 12, 0, 9), - numpy.timedelta64(1000000, 'us'), 6)], + [(datetime64(2014, 1, 1, 12, 0, 4), 5), + (datetime64(2014, 1, 1, 12, 0, 9), 6)], list(ts.fetch( from_timestamp=numpy.datetime64(iso8601.parse_date( "2014-01-01 12:00:04"))))) self.assertEqual( - [(datetime64(2014, 1, 1, 12, 0, 4), - numpy.timedelta64(1000000, 'us'), 5), - (datetime64(2014, 1, 1, 12, 0, 9), - numpy.timedelta64(1000000, 'us'), 6)], + [(datetime64(2014, 1, 1, 12, 0, 4), 5), + (datetime64(2014, 1, 1, 12, 0, 9), 6)], list(ts.fetch( from_timestamp=numpy.datetime64(iso8601.parse_date( "2014-01-01 13:00:04+01:00"))))) @@ -201,16 +192,11 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual(5, len(ts)) self.assertEqual( - [(datetime64(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(60, 's'), 5), - (datetime64(2014, 1, 1, 12, 1, 0), - numpy.timedelta64(60, 's'), 5), - (datetime64(2014, 1, 1, 12, 2, 0), - numpy.timedelta64(60, 's'), 11), - (datetime64(2014, 1, 1, 12, 3, 0), - numpy.timedelta64(60, 's'), -32), - (datetime64(2014, 1, 1, 12, 4, 0), - numpy.timedelta64(60, 's'), 16)], + [(datetime64(2014, 1, 1, 12, 0, 0), 5), + (datetime64(2014, 1, 1, 12, 1, 0), 5), + (datetime64(2014, 1, 1, 12, 2, 0), 11), + (datetime64(2014, 1, 1, 12, 3, 0), -32), + (datetime64(2014, 1, 1, 12, 4, 0), 16)], list(ts.fetch( from_timestamp=datetime64(2014, 1, 1, 12)))) @@ -231,14 +217,10 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual(4, len(ts)) self.assertEqual( - [(datetime64(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(60, 's'), 5), - (datetime64(2014, 1, 1, 12, 1, 0), - numpy.timedelta64(60, 's'), 4), - (datetime64(2014, 1, 1, 12, 3, 0), - numpy.timedelta64(60, 's'), 92), - (datetime64(2014, 1, 1, 12, 4, 0), - numpy.timedelta64(60, 's'), 2)], + [(datetime64(2014, 1, 1, 12, 0, 0), 5), + (datetime64(2014, 1, 1, 12, 1, 0), 4), + (datetime64(2014, 1, 1, 12, 3, 0), 92), + (datetime64(2014, 1, 1, 12, 4, 0), 2)], list(ts.fetch( from_timestamp=datetime64(2014, 1, 1, 12)))) @@ -467,47 +449,28 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self._resample_and_merge, agg_dict=ts)) self.assertEqual([ - (numpy.datetime64('2014-01-01T11:46:00.000000000'), - numpy.timedelta64(60, 's'), 4.0), - (numpy.datetime64('2014-01-01T11:47:00.000000000'), - numpy.timedelta64(60, 's'), 8.0), - (numpy.datetime64('2014-01-01T11:50:00.000000000'), - numpy.timedelta64(60, 's'), 50.0), - (datetime64(2014, 1, 1, 11, 54), - numpy.timedelta64(60000000000, 'ns'), 4.0), - (datetime64(2014, 1, 1, 11, 56), - numpy.timedelta64(60000000000, 'ns'), 4.0), - (datetime64(2014, 1, 1, 11, 57), - numpy.timedelta64(60000000000, 'ns'), 6.0), - (datetime64(2014, 1, 1, 11, 58), - numpy.timedelta64(60000000000, 'ns'), 5.0), - (datetime64(2014, 1, 1, 12, 1), - numpy.timedelta64(60000000000, 'ns'), 5.5), - (datetime64(2014, 1, 1, 12, 2), - numpy.timedelta64(60000000000, 'ns'), 8.0), - (datetime64(2014, 1, 1, 12, 3), - numpy.timedelta64(60000000000, 'ns'), 3.0), - (datetime64(2014, 1, 1, 12, 4), - numpy.timedelta64(60000000000, 'ns'), 7.0), - (datetime64(2014, 1, 1, 12, 5), - numpy.timedelta64(60000000000, 'ns'), 8.0), - (datetime64(2014, 1, 1, 12, 6), - numpy.timedelta64(60000000000, 'ns'), 4.0) + (numpy.datetime64('2014-01-01T11:46:00.000000000'), 4.0), + (numpy.datetime64('2014-01-01T11:47:00.000000000'), 8.0), + (numpy.datetime64('2014-01-01T11:50:00.000000000'), 50.0), + (datetime64(2014, 1, 1, 11, 54), 4.0), + (datetime64(2014, 1, 1, 11, 56), 4.0), + (datetime64(2014, 1, 1, 11, 57), 6.0), + (datetime64(2014, 1, 1, 11, 58), 5.0), + (datetime64(2014, 1, 1, 12, 1), 5.5), + (datetime64(2014, 1, 1, 12, 2), 8.0), + (datetime64(2014, 1, 1, 12, 3), 3.0), + (datetime64(2014, 1, 1, 12, 4), 7.0), + (datetime64(2014, 1, 1, 12, 5), 8.0), + (datetime64(2014, 1, 1, 12, 6), 4.0) ], list(ts['return'].fetch())) self.assertEqual([ - (datetime64(2014, 1, 1, 12, 1), - numpy.timedelta64(60000000000, 'ns'), 5.5), - (datetime64(2014, 1, 1, 12, 2), - numpy.timedelta64(60000000000, 'ns'), 8.0), - (datetime64(2014, 1, 1, 12, 3), - numpy.timedelta64(60000000000, 'ns'), 3.0), - (datetime64(2014, 1, 1, 12, 4), - numpy.timedelta64(60000000000, 'ns'), 7.0), - (datetime64(2014, 1, 1, 12, 5), - numpy.timedelta64(60000000000, 'ns'), 8.0), - (datetime64(2014, 1, 1, 12, 6), - numpy.timedelta64(60000000000, 'ns'), 4.0) + (datetime64(2014, 1, 1, 12, 1), 5.5), + (datetime64(2014, 1, 1, 12, 2), 8.0), + (datetime64(2014, 1, 1, 12, 3), 3.0), + (datetime64(2014, 1, 1, 12, 4), 7.0), + (datetime64(2014, 1, 1, 12, 5), 8.0), + (datetime64(2014, 1, 1, 12, 6), 4.0) ], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)))) def test_fetch_agg_pct(self): @@ -526,19 +489,18 @@ class TestAggregatedTimeSerie(base.BaseTestCase): reference = [ (datetime64( 2014, 1, 1, 12, 0, 0 - ), 1.0, 3.9), + ), 3.9), (datetime64( 2014, 1, 1, 12, 0, 2 - ), 1.0, 4) + ), 4) ] self.assertEqual(len(reference), len(list(result))) for ref, res in zip(reference, result): self.assertEqual(ref[0], res[0]) - self.assertEqual(ref[1], res[1]) # Rounding \o/ - self.assertAlmostEqual(ref[2], res[2]) + self.assertAlmostEqual(ref[1], res[1]) tsb.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, 0, 2, 113), 110)], @@ -550,19 +512,18 @@ class TestAggregatedTimeSerie(base.BaseTestCase): reference = [ (datetime64( 2014, 1, 1, 12, 0, 0 - ), 1.0, 3.9), + ), 3.9), (datetime64( 2014, 1, 1, 12, 0, 2 - ), 1.0, 99.4) + ), 99.4) ] self.assertEqual(len(reference), len(list(result))) for ref, res in zip(reference, result): self.assertEqual(ref[0], res[0]) - self.assertEqual(ref[1], res[1]) # Rounding \o/ - self.assertAlmostEqual(ref[2], res[2]) + self.assertAlmostEqual(ref[1], res[1]) def test_fetch_nano(self): ts = {'sampling': numpy.timedelta64(200, 'ms'), @@ -586,15 +547,13 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self._resample_and_merge, agg_dict=ts)) self.assertEqual([ - (datetime64(2014, 1, 1, 11, 46, 0, 200000), - numpy.timedelta64(200000000, 'ns'), 6.0), - (datetime64(2014, 1, 1, 11, 47, 0, 200000), - numpy.timedelta64(200000000, 'ns'), 50.0), - (datetime64(2014, 1, 1, 11, 48, 0, 400000), - numpy.timedelta64(200000000, 'ns'), 4.0), - (datetime64(2014, 1, 1, 11, 48, 0, 800000), - numpy.timedelta64(200000000, 'ns'), 4.5) + (datetime64(2014, 1, 1, 11, 46, 0, 200000), 6.0), + (datetime64(2014, 1, 1, 11, 47, 0, 200000), 50.0), + (datetime64(2014, 1, 1, 11, 48, 0, 400000), 4.0), + (datetime64(2014, 1, 1, 11, 48, 0, 800000), 4.5) ], list(ts['return'].fetch())) + self.assertEqual(numpy.timedelta64(200000000, 'ns'), + ts['return'].sampling) def test_fetch_agg_std(self): # NOTE (gordc): this is a good test to ensure we drop NaN entries @@ -613,10 +572,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self._resample_and_merge, agg_dict=ts)) self.assertEqual([ - (datetime64(2014, 1, 1, 12, 1, 0), - numpy.timedelta64(60000000000, 'ns'), 2.1213203435596424), - (datetime64(2014, 1, 1, 12, 2, 0), - numpy.timedelta64(60000000000, 'ns'), 9.8994949366116654), + (datetime64(2014, 1, 1, 12, 1, 0), 2.1213203435596424), + (datetime64(2014, 1, 1, 12, 2, 0), 9.8994949366116654), ], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)))) tsb.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 2, 13), 110)], @@ -625,10 +582,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self._resample_and_merge, agg_dict=ts)) self.assertEqual([ - (datetime64(2014, 1, 1, 12, 1, 0), - numpy.timedelta64(60000000000, 'ns'), 2.1213203435596424), - (datetime64(2014, 1, 1, 12, 2, 0), - numpy.timedelta64(60000000000, 'ns'), 59.304300012730948), + (datetime64(2014, 1, 1, 12, 1, 0), 2.1213203435596424), + (datetime64(2014, 1, 1, 12, 2, 0), 59.304300012730948), ], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)))) def test_fetch_agg_max(self): @@ -646,12 +601,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self._resample_and_merge, agg_dict=ts)) self.assertEqual([ - (datetime64(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(60000000000, 'ns'), 3), - (datetime64(2014, 1, 1, 12, 1, 0), - numpy.timedelta64(60000000000, 'ns'), 7), - (datetime64(2014, 1, 1, 12, 2, 0), - numpy.timedelta64(60000000000, 'ns'), 15), + (datetime64(2014, 1, 1, 12, 0, 0), 3), + (datetime64(2014, 1, 1, 12, 1, 0), 7), + (datetime64(2014, 1, 1, 12, 2, 0), 15), ], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)))) tsb.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 2, 13), 110)], @@ -660,12 +612,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self._resample_and_merge, agg_dict=ts)) self.assertEqual([ - (datetime64(2014, 1, 1, 12, 0, 0), - numpy.timedelta64(60, 's'), 3), - (datetime64(2014, 1, 1, 12, 1, 0), - numpy.timedelta64(60, 's'), 7), - (datetime64(2014, 1, 1, 12, 2, 0), - numpy.timedelta64(60, 's'), 110), + (datetime64(2014, 1, 1, 12, 0, 0), 3), + (datetime64(2014, 1, 1, 12, 1, 0), 7), + (datetime64(2014, 1, 1, 12, 2, 0), 110), ], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0)))) def test_serialize(self): @@ -726,15 +675,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual( [ - (datetime64( - 2014, 1, 1, 12, 0, 1 - ), numpy.timedelta64(1, 's'), 1.5), - (datetime64( - 2014, 1, 1, 12, 0, 2 - ), numpy.timedelta64(1, 's'), 3.5), - (datetime64( - 2014, 1, 1, 12, 0, 3 - ), numpy.timedelta64(1, 's'), 2.5), + (datetime64(2014, 1, 1, 12, 0, 1), 1.5), + (datetime64(2014, 1, 1, 12, 0, 2), 3.5), + (datetime64(2014, 1, 1, 12, 0, 3), 2.5), ], list(ts['return'].fetch())) @@ -759,12 +702,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual( [ - (datetime64(2014, 1, 1, 12, 0, 1), - numpy.timedelta64(1, 's'), 1.5), - (datetime64(2014, 1, 1, 12, 0, 2), - numpy.timedelta64(1, 's'), 3.5), - (datetime64(2014, 1, 1, 12, 0, 3), - numpy.timedelta64(1, 's'), 2.5), + (datetime64(2014, 1, 1, 12, 0, 1), 1.5), + (datetime64(2014, 1, 1, 12, 0, 2), 3.5), + (datetime64(2014, 1, 1, 12, 0, 3), 2.5), ], list(ts['return'].fetch())) @@ -776,12 +716,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual( [ - (datetime64(2014, 1, 1, 12, 0, 1), - numpy.timedelta64(1, 's'), 1.5), - (datetime64(2014, 1, 1, 12, 0, 2), - numpy.timedelta64(1, 's'), 3.5), - (datetime64(2014, 1, 1, 12, 0, 3), - numpy.timedelta64(1, 's'), 2.5), + (datetime64(2014, 1, 1, 12, 0, 1), 1.5), + (datetime64(2014, 1, 1, 12, 0, 2), 3.5), + (datetime64(2014, 1, 1, 12, 0, 3), 2.5), ], list(ts['return'].fetch())) @@ -794,12 +731,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual( [ - (datetime64(2014, 1, 1, 12, 0, 1), - numpy.timedelta64(1, 's'), 1.5), - (datetime64(2014, 1, 1, 12, 0, 2), - numpy.timedelta64(1, 's'), 3.5), - (datetime64(2014, 1, 1, 12, 0, 3), - numpy.timedelta64(1, 's'), 3.5), + (datetime64(2014, 1, 1, 12, 0, 1), 1.5), + (datetime64(2014, 1, 1, 12, 0, 2), 3.5), + (datetime64(2014, 1, 1, 12, 0, 3), 3.5), ], list(ts['return'].fetch())) @@ -950,3 +884,15 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual(2, len(agg_ts)) self.assertEqual(5, agg_ts[0][1]) self.assertEqual(3, agg_ts[1][1]) + + def test_iter(self): + ts = carbonara.TimeSerie.from_data( + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 11), + datetime64(2014, 1, 1, 12, 0, 12)], + [3, 5, 6]) + self.assertEqual([ + (numpy.datetime64('2014-01-01T12:00:00'), 3.), + (numpy.datetime64('2014-01-01T12:00:11'), 5.), + (numpy.datetime64('2014-01-01T12:00:12'), 6.), + ], list(ts)) -- GitLab From 4c6ad5b3fd5b702ad499fc5a2fc05f44632e5098 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 26 Feb 2018 16:34:04 +0100 Subject: [PATCH 1274/1483] carbonara: allow eq and ne comparison to SplitKey While you can't sort and compare SplitKey with different sampling, you can say if they are equals or not easily. --- gnocchi/carbonara.py | 4 ++++ gnocchi/tests/test_carbonara.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 454e2677..7622463d 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -498,6 +498,10 @@ class SplitKey(object): def _compare(self, op, other): if isinstance(other, SplitKey): if self.sampling != other.sampling: + if op == operator.eq: + return False + if op == operator.ne: + return True raise TypeError( "Cannot compare %s with different sampling" % self.__class__.__name__) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 281306af..1b550596 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -16,6 +16,7 @@ import datetime import functools import math +import operator import fixtures import iso8601 @@ -762,6 +763,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): dt1_1 = numpy.datetime64("2015-01-01T15:03") dt2 = numpy.datetime64("2015-01-05T15:03") td = numpy.timedelta64(60, 's') + td2 = numpy.timedelta64(300, 's') self.assertEqual( carbonara.SplitKey.from_timestamp_and_sampling(dt1, td), @@ -772,6 +774,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertNotEqual( carbonara.SplitKey.from_timestamp_and_sampling(dt1, td), carbonara.SplitKey.from_timestamp_and_sampling(dt2, td)) + self.assertNotEqual( + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td), + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td2)) self.assertLess( carbonara.SplitKey.from_timestamp_and_sampling(dt1, td), @@ -792,6 +797,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): dt1_1 = numpy.datetime64("2015-01-01T15:03") dt2 = numpy.datetime64("2015-01-05T15:03") td = numpy.timedelta64(60, 's') + td2 = numpy.timedelta64(300, 's') self.assertFalse( carbonara.SplitKey.from_timestamp_and_sampling(dt1, td) != @@ -802,6 +808,29 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertFalse( carbonara.SplitKey.from_timestamp_and_sampling(dt1, td) == carbonara.SplitKey.from_timestamp_and_sampling(dt2, td)) + self.assertFalse( + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td) == + carbonara.SplitKey.from_timestamp_and_sampling(dt2, td2)) + self.assertRaises( + TypeError, + operator.le, + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td), + carbonara.SplitKey.from_timestamp_and_sampling(dt2, td2)) + self.assertRaises( + TypeError, + operator.ge, + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td), + carbonara.SplitKey.from_timestamp_and_sampling(dt2, td2)) + self.assertRaises( + TypeError, + operator.gt, + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td), + carbonara.SplitKey.from_timestamp_and_sampling(dt2, td2)) + self.assertRaises( + TypeError, + operator.lt, + carbonara.SplitKey.from_timestamp_and_sampling(dt1, td), + carbonara.SplitKey.from_timestamp_and_sampling(dt2, td2)) self.assertFalse( carbonara.SplitKey.from_timestamp_and_sampling(dt1, td) >= -- GitLab From 933e776678823a2f8d6e93f11408732ed1c9efa9 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Tue, 27 Feb 2018 13:12:33 +0000 Subject: [PATCH 1275/1483] Releasing to unstable. --- debian/changelog | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index 846859e3..88628f2d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -gnocchi (4.2.0-1) UNRELEASED; urgency=medium +gnocchi (4.2.0-1) unstable; urgency=medium [ Ondřej Nový ] * d/control: Set Vcs-* to salsa.debian.org @@ -12,7 +12,7 @@ gnocchi (4.2.0-1) UNRELEASED; urgency=medium anyway, as we're using oslo-config-generator. * Fixed uwsgi params. - -- Thomas Goirand Mon, 19 Feb 2018 10:23:06 +0100 + -- Thomas Goirand Tue, 27 Feb 2018 13:12:12 +0000 gnocchi (4.0.4-1) unstable; urgency=medium -- GitLab From 1024a01308580ccb569fca35348df8cd4c7e37f1 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Tue, 27 Feb 2018 13:15:44 +0000 Subject: [PATCH 1276/1483] Fixed for loop. --- debian/rules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/rules b/debian/rules index 6bd2ad5a..8917c360 100755 --- a/debian/rules +++ b/debian/rules @@ -54,7 +54,7 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) rm -f $$TEMP_REZ ; \ testr-python$$PYMAJOR slowest ; \ echo "===> Stopping PGSQL" ; \ - $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR ; \ + $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR ; done endif -- GitLab From d1f28187632a5643a7646b4d6a3789dd6e006cc5 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Tue, 27 Feb 2018 13:18:30 +0000 Subject: [PATCH 1277/1483] Reverse my commit. --- debian/rules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/rules b/debian/rules index 8917c360..6bd2ad5a 100755 --- a/debian/rules +++ b/debian/rules @@ -54,7 +54,7 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) rm -f $$TEMP_REZ ; \ testr-python$$PYMAJOR slowest ; \ echo "===> Stopping PGSQL" ; \ - $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR ; + $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR ; \ done endif -- GitLab From dca486ca529daa3e2ee2ce0ac8bd99c5ef996f46 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Tue, 27 Feb 2018 13:21:49 +0000 Subject: [PATCH 1278/1483] Fixed for loop for real this time. --- debian/rules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/rules b/debian/rules index 6bd2ad5a..9da24aea 100755 --- a/debian/rules +++ b/debian/rules @@ -49,7 +49,7 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) rm -rf .testrepository ; \ testr-python$$PYMAJOR init ; \ TEMP_REZ=`mktemp -t` ; \ - export PATH=$(PATH):$(CURDIR)/debian/bin && PYTHONPATH=$(CURDIR):$(CURDIR)/debian/bin PYTHON=python$$i testr-python$$PYMAJOR run --subunit 'gnocchi\.tests\.(?!.*('"$(UNIT_TEST_BLACKLIST)"'))' | tee $$TEMP_REZ | subunit2p$ + export PATH=$(PATH):$(CURDIR)/debian/bin && PYTHONPATH=$(CURDIR):$(CURDIR)/debian/bin PYTHON=python$$i testr-python$$PYMAJOR run --subunit 'gnocchi\.tests\.(?!.*('"$(UNIT_TEST_BLACKLIST)"'))' | tee $$TEMP_REZ | subunit2pyunit ; \ cat $$TEMP_REZ | subunit-filter -s --no-passthrough | subunit-stats ; \ rm -f $$TEMP_REZ ; \ testr-python$$PYMAJOR slowest ; \ -- GitLab From 88de5969cc21f9e29e1daadabf454a85acc6b5c5 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Tue, 27 Feb 2018 13:27:07 +0000 Subject: [PATCH 1279/1483] Removed space at end of lines during for loop, breaking syntax. --- debian/rules | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/debian/rules b/debian/rules index 9da24aea..66dd556c 100755 --- a/debian/rules +++ b/debian/rules @@ -34,9 +34,9 @@ override_dh_install: ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) @echo "===> Running tests" - set -e ; set -x ; for i in $(PYTHON3S) ; do \ - PYMAJOR=`echo $$i | cut -d'.' -f1` ; \ - echo "===> Starting PGSQL" ; \ + set -e ; set -x ; for i in $(PYTHON3S) ; do \ + PYMAJOR=`echo $$i | cut -d'.' -f1` ; \ + echo "===> Starting PGSQL" ; \ BINDIR=`pg_config --bindir` ; \ PG_MYTMPDIR=`mktemp -d` ; \ chown postgres:postgres $$PG_MYTMPDIR || true ; \ @@ -46,9 +46,9 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) export GNOCCHI_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ export GNOCCHI_TEST_STORAGE_DRIVER=file ; \ echo "===> Testing with python$$i (python$$PYMAJOR)" ; \ - rm -rf .testrepository ; \ - testr-python$$PYMAJOR init ; \ - TEMP_REZ=`mktemp -t` ; \ + rm -rf .testrepository ; \ + testr-python$$PYMAJOR init ; \ + TEMP_REZ=`mktemp -t` ; \ export PATH=$(PATH):$(CURDIR)/debian/bin && PYTHONPATH=$(CURDIR):$(CURDIR)/debian/bin PYTHON=python$$i testr-python$$PYMAJOR run --subunit 'gnocchi\.tests\.(?!.*('"$(UNIT_TEST_BLACKLIST)"'))' | tee $$TEMP_REZ | subunit2pyunit ; \ cat $$TEMP_REZ | subunit-filter -s --no-passthrough | subunit-stats ; \ rm -f $$TEMP_REZ ; \ -- GitLab From d5d77d7d57602afca53bf4ba21b88eaab327545f Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Tue, 27 Feb 2018 13:50:40 +0000 Subject: [PATCH 1280/1483] Starting pgsql directly from debian/rules --- debian/rules | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/debian/rules b/debian/rules index 66dd556c..c60503f7 100755 --- a/debian/rules +++ b/debian/rules @@ -36,13 +36,22 @@ ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) @echo "===> Running tests" set -e ; set -x ; for i in $(PYTHON3S) ; do \ PYMAJOR=`echo $$i | cut -d'.' -f1` ; \ - echo "===> Starting PGSQL" ; \ - BINDIR=`pg_config --bindir` ; \ - PG_MYTMPDIR=`mktemp -d` ; \ - chown postgres:postgres $$PG_MYTMPDIR || true ; \ - export PGHOST=$$PG_MYTMPDIR ; \ - chmod +x debian/start_pg.sh ; \ - debian/start_pg.sh $$PG_MYTMPDIR ; \ + BINDIR=`pg_config --bindir` ; \ + PG_MYTMPDIR=`mktemp -d` ; \ + export LC_ALL="C" ; \ + export LANGUAGE=C ; \ + PGSQL_PORT=9823 ; \ + $$BINDIR/initdb -D $$PG_MYTMPDIR ; \ + $$BINDIR/pg_ctl -w -D $$PG_MYTMPDIR -o "-k $$PG_MYTMPDIR -p $$PGSQL_PORT" start > /dev/null ; \ + attempts=0 ; \ + while ! [ -e $$PG_MYTMPDIR/postmaster.pid ] ; do \ + if [ $$attempts -gt 10 ] ; then \ + echo "Exiting test: postgres pid file was not created after 30 seconds" ; \ + exit 1 ; \ + fi ; \ + attempts=$$((attempts+1)) ; \ + sleep 3 ; \ + done ; \ export GNOCCHI_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ export GNOCCHI_TEST_STORAGE_DRIVER=file ; \ echo "===> Testing with python$$i (python$$PYMAJOR)" ; \ -- GitLab From 6ca1cdabd16b61035781d67b88984367b0583cc5 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 27 Feb 2018 09:42:50 +0100 Subject: [PATCH 1281/1483] doc: document filter parameter All resource search payload can also be passed in the filter parameter into other format. This is missing from the documentation while Grafana have to use this format. So documents it --- doc/source/rest.j2 | 57 ++++++++++++++++++++++++++++++++++++++------ doc/source/rest.yaml | 32 +++++++++++++++++++++++++ gnocchi/gendoc.py | 4 ++++ 3 files changed, 86 insertions(+), 7 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 6a8eafcb..41cfcbe3 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -601,30 +601,48 @@ Gnocchi's search API supports to the ability to execute a query across |resources| or |metrics|. This API provides a language to construct more complex matching contraints beyond basic filtering. -Usage ------ +Usage and format +---------------- You can specify a time range to look for by specifying the `start` and/or `stop` query parameter, and the |aggregation method| to use by specifying the `aggregation` query parameter. +Query can be expressed in two formats: `JSON` or `STRING`. + The supported operators are: equal to (`=`, `==` or `eq`), lesser than (`<` or `lt`), greater than (`>` or `gt`), less than or equal to (`<=`, `le` or `≤`) greater than or equal to (`>=`, `ge` or `≥`) not equal to (`!=`, `ne` or `≠`), addition (`+` or `add`), substraction (`-` or `sub`), multiplication (`*`, -`mul` or `×`), division (`/`, `div` or `÷`). These operations take either one -argument, and in this case the second argument passed is the value, or it. +`mul` or `×`), division (`/`, `div` or `÷`). In JSON format, these operations +take only one argument, the second argument being automatically set to the +field value. In STRING format, this is just ` +` The operators or (`or` or `∨`), and (`and` or `∧`) and `not` are also -supported, and take a list of arguments as parameters. +supported. In JSON format, it takes a list of arguments as parameters. Using +STRING, the format is ` and/or ` or `not `. With the STRING format, parenthesis can be used to create group. + +An example of the JSON format:: + + ["and", + ["=", ["host", "example1"]], + ["like", ["owner", "admin-%"]], + ] + +And its STRING format equivalent:: + + host = "example1" or owner like "admin-%" .. _search-resource: Resource -------- -It's possible to search for |resources| using a query mechanism, using the -`POST` method and uploading a JSON formatted query. +It's possible to search for |resources| using a query mechanism by using the +`POST` method and uploading a JSON formatted query or by passing a +STRING a formatted query URL-encoded in the ``filter`` parameter. Single filter ~~~~~~~~~~~~~ @@ -638,6 +656,11 @@ Or even: {{ scenarios['search-resource-for-host-like']['doc'] }} +For the ``filter`` parameter version, the value is the URL-encoded version of +``{{ scenarios['search-resource-for-host-like-filter']['filter'] }}`` + +{{ scenarios['search-resource-for-host-like-filter']['doc'] }} + Multiple filters ~~~~~~~~~~~~~~~~ @@ -645,6 +668,12 @@ Complex operators such as `and` and `or` are also available: {{ scenarios['search-resource-for-user-after-timestamp']['doc'] }} +``filter`` version is +``{{ scenarios['search-resource-for-user-after-timestamp-filter']['filter'] }}`` +URL-encoded. + +{{ scenarios['search-resource-for-user-after-timestamp-filter']['doc'] }} + With details ~~~~~~~~~~~~ @@ -678,6 +707,14 @@ The timerange of the history can be set, too: {{ scenarios['search-resource-history-partial']['doc'] }} +This can be done with the ``filter`` parameter too: + +``{{ scenarios['search-resource-history-partial-filter']['filter'] }}`` + + +{{ scenarios['search-resource-history-partial-filter']['doc'] }} + + Magic ~~~~~ @@ -932,6 +969,12 @@ such as the one described in the :ref:`resource search API `. {{ scenarios['get-across-metrics-measures-by-attributes-lookup']['doc'] }} +Like for searching resource, the query +``{{ scenarios['get-across-metrics-measures-by-attributes-lookup-filter']['filter'] }}`` +can be passed in ``filter`` parameter + +{{ scenarios['get-across-metrics-measures-by-attributes-lookup-filter']['doc'] }} + It is possible to group the |resource| search results by any attribute of the requested |resource| type, and then compute the aggregation: diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index b54c56fc..1209e46f 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -387,6 +387,12 @@ {"like": {"host": "compute%"}} +- name: search-resource-for-host-like-filter + filter: host like "compute%" + request: | + POST /v1/search/resource/instance?filter={{ scenarios['search-resource-for-host-like-filter']['filter'] | urlencode }} HTTP/1.1 + Content-Type: application/json + - name: search-resource-for-user-details request: | POST /v1/search/resource/generic?details=true HTTP/1.1 @@ -411,6 +417,12 @@ {">=": {"started_at": "2010-01-01"}} ]} +- name: search-resource-for-user-after-timestamp-filter + filter: user_id = "{{ scenarios['create-resource-instance']['response'].json['user_id'] }}" and started_at >= "2010-01-01" + request: | + POST /v1/search/resource/instance?filter={{ scenarios['search-resource-for-user-after-timestamp-filter']['filter'] | urlencode }} HTTP/1.1 + Content-Type: application/json + - name: search-resource-lifespan request: | POST /v1/search/resource/instance HTTP/1.1 @@ -543,6 +555,20 @@ {"=": {"revision_end": null}}]} ]} +- name: search-resource-history-partial-filter + filter: host = 'compute1' and revision_start >= "{{ scenarios['get-instance']['response'].json['revision_start'] }}" and (revision_end <= "{{ scenarios['get-patched-instance']['response'].json['revision_start'] }}" or revision_end == null) + request: | + POST /v1/search/resource/instance?filter={{ scenarios['search-resource-history-partial-filter']['filter'] | urlencode }} HTTP/1.1 + Content-Type: application/json + Accept: application/json; history=true + + {"and": [ + {"=": {"host": "compute1"}}, + {">=": {"revision_start": "{{ scenarios['get-instance']['response'].json['revision_start'] }}"}}, + {"or": [{"<=": {"revision_end": "{{ scenarios['get-patched-instance']['response'].json['revision_start'] }}"}}, + {"=": {"revision_end": null}}]} + ]} + - name: create-resource-instance-with-metrics request: | POST /v1/resource/instance HTTP/1.1 @@ -761,6 +787,12 @@ {"=": {"server_group": "my_autoscaling_group"}} +- name: get-across-metrics-measures-by-attributes-lookup-filter + filter: server_group = "my_autoscaling_group" + request: | + POST /v1/aggregation/resource/instance/metric/cpu.util?start=2014-10-06T14:34&aggregation=mean&filter={{ scenarios['get-across-metrics-measures-by-attributes-lookup-filter']['filter'] | urlencode }} HTTP/1.1 + Content-Type: application/json + - name: get-across-metrics-measures-by-attributes-lookup-groupby request: | POST /v1/aggregation/resource/instance/metric/cpu.util?groupby=host&groupby=flavor_id HTTP/1.1 diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index f38ac32e..50a427b7 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -200,6 +200,10 @@ def setup(app): try: for entry in scenarios: + if 'filter' in entry: + entry['filter'] = jinja2.Template(entry['filter']).render( + scenarios=scenarios) + template = jinja2.Template(entry['request']) fake_file = six.moves.cStringIO() content = template.render(scenarios=scenarios) -- GitLab From a9baf1d73139549467015e2247470c7f3ce8f316 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 27 Feb 2018 17:33:42 +0100 Subject: [PATCH 1282/1483] doc: add more doc on pull-request and Pastamaker Closes #766 --- doc/source/contributing.rst | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 4d076845..f58095bd 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -33,7 +33,29 @@ When opening a pull-request, make sure that: `git rebase --interactive` and/or `git commit --amend`. * We recommend using `git pull-request`_ to send your pull-requests. +All sent pull-requests are checked using `Travis-CI`_, which is in charge of +running the tests suites. There are different scenarios being run: `PEP 8`_ +compliance tests, upgrade tests, unit and functional tests. + +All pull-requests must be reviewed by `members of the Gnocchi project`_. + +When a pull-request is approved by at least two of the members and when +Travis-CI confirms that all the tests run fine, the patch will be merged. + +The Gnocchi project leverages `Pastamaker`_ in order to schedule the merge of +the different pull-requests. Pastamaker is in charge of making sure that the +pull-request is up-to-date with respect to the `master` branch and that the +tests pass. Pull-requests are always merged in a serialized manner in order to +make sure that no pull-request can break another one. + +`Gnocchi's Pastamaker dashboard`_ shows the current status of the merge queue. + .. _`git pull-request`: https://github.com/jd/git-pull-request +.. _`PEP 8`: https://www.python.org/dev/peps/pep-0008/ +.. _`Travis-CI`: http://travis-ci.org +.. _`members of the Gnocchi project`: https://github.com/orgs/gnocchixyz/people +.. _`Pastamaker`: https://github.com/sileht/pastamaker +.. _`Gnocchi's Pastamaker dashboard`: https://pastamaker.gnocchi.xyz Running the Tests -- GitLab From 5d15bf79cd4b47b5e0136b7ffd1ffc10836899e7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 15 Feb 2018 09:59:26 +0100 Subject: [PATCH 1283/1483] storage: truncate the whole AggregatedTimeSerie, not every split There's no reason to truncate each split if we truncate the AggregatedTimeSerie once and for all before splitting it. --- gnocchi/carbonara.py | 9 ++++++--- gnocchi/storage/__init__.py | 16 ++++++---------- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 7622463d..59382a66 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -580,16 +580,17 @@ class AggregatedTimeSerie(TimeSerie): :param oldest_point: Oldest point to keep from, this excluded. :type oldest_point: numpy.datetime64 or numpy.timedelta64 + :return: The oldest point that could have been kept. """ last = self.last if last is None: - # There's nothing to truncate return if isinstance(oldest_point, numpy.timedelta64): oldest_point = last - oldest_point index = numpy.searchsorted(self.ts['timestamps'], oldest_point, side='right') self.ts = self.ts[index:] + return oldest_point def split(self): # NOTE(sileht): We previously use groupby with @@ -694,12 +695,14 @@ class AggregatedTimeSerie(TimeSerie): def get_split_key(self, timestamp=None): """Return the split key for a particular timestamp. - :param timestamp: If None, the first timestamp of the timeserie + :param timestamp: If None, the first timestamp of the timeseries is used. - :return: A SplitKey object. + :return: A SplitKey object or None if the timeseries is empty. """ if timestamp is None: timestamp = self.first + if timestamp is None: + return return SplitKey.from_timestamp_and_sampling( timestamp, self.sampling) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 6667554b..2d187700 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -291,8 +291,7 @@ class StorageDriver(object): return ts def _store_timeserie_splits(self, metric, keys_and_splits, - aggregation, oldest_mutable_timestamp, - oldest_point_to_keep): + aggregation, oldest_mutable_timestamp): keys_to_rewrite = [] splits_to_rewrite = [] for key, split in six.iteritems(keys_and_splits): @@ -338,9 +337,6 @@ class StorageDriver(object): aggregation, key) continue - if oldest_point_to_keep is not None: - split.truncate(oldest_point_to_keep) - offset, data = split.serialize( key, compressed=key in keys_to_rewrite) key_data_offset.append((key, data, offset)) @@ -365,11 +361,11 @@ class StorageDriver(object): ) if aggregation.timespan: - oldest_point_to_keep = ts.last - aggregation.timespan - oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) + oldest_point_to_keep = ts.truncate(aggregation.timespan) else: oldest_point_to_keep = None - oldest_key_to_keep = None + + oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) keys_and_split_to_store = {} @@ -419,7 +415,7 @@ class StorageDriver(object): keys_and_split_to_store[key] = None for key, split in ts.split(): - if oldest_key_to_keep is None or key >= oldest_key_to_keep: + if key >= oldest_key_to_keep: LOG.debug( "Storing split %s (%s) for metric %s", key, aggregation.method, metric) @@ -427,7 +423,7 @@ class StorageDriver(object): self._store_timeserie_splits( metric, keys_and_split_to_store, aggregation.method, - oldest_mutable_timestamp, oldest_point_to_keep) + oldest_mutable_timestamp) @staticmethod def _delete_metric(metric): -- GitLab From 739c5bc84491b7b5f4d206a6851d8843c38ebf57 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 27 Feb 2018 11:54:59 +0100 Subject: [PATCH 1284/1483] carbonara: use Aggregation object to create AggregatedTimeSerie This makes usage of the Aggregation object directly into Carbonara so it's attached to an AggregatedTimeSerie. --- gnocchi/aggregation.py | 22 ------- gnocchi/archive_policy.py | 8 +-- gnocchi/carbonara.py | 86 ++++++++++++++------------- gnocchi/rest/aggregates/operations.py | 4 +- gnocchi/rest/aggregates/processor.py | 10 ++-- gnocchi/storage/__init__.py | 18 +++--- gnocchi/tests/test_aggregates.py | 8 ++- gnocchi/tests/test_carbonara.py | 46 +++++--------- gnocchi/tests/test_storage.py | 3 +- 9 files changed, 88 insertions(+), 117 deletions(-) delete mode 100644 gnocchi/aggregation.py diff --git a/gnocchi/aggregation.py b/gnocchi/aggregation.py deleted file mode 100644 index a1004364..00000000 --- a/gnocchi/aggregation.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2018 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import collections - - -Aggregation = collections.namedtuple( - "Aggregation", - ["method", "granularity", "timespan"], -) diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py index 40105139..e9277331 100644 --- a/gnocchi/archive_policy.py +++ b/gnocchi/archive_policy.py @@ -23,7 +23,7 @@ from oslo_config import cfg from oslo_config import types import six -from gnocchi import aggregation +from gnocchi import carbonara from gnocchi import utils @@ -94,7 +94,7 @@ class ArchivePolicy(object): # Find the timespan for d in self.definition: if d.granularity == granularity: - return aggregation.Aggregation( + return carbonara.Aggregation( method, d.granularity, d.timespan) def get_aggregations_for_method(self, method): @@ -104,13 +104,13 @@ class ArchivePolicy(object): :param method: Aggregation method. """ - return [aggregation.Aggregation(method, d.granularity, d.timespan) + return [carbonara.Aggregation(method, d.granularity, d.timespan) for d in sorted(self.definition, key=ATTRGETTER_GRANULARITY, reverse=True)] @property def aggregations(self): - return [aggregation.Aggregation(method, d.granularity, d.timespan) + return [carbonara.Aggregation(method, d.granularity, d.timespan) for d in sorted(self.definition, key=ATTRGETTER_GRANULARITY) for method in self.aggregation_methods] diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 59382a66..35e631c7 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2016-2017 Red Hat, Inc. +# Copyright © 2016-2018 Red Hat, Inc. # Copyright © 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -16,6 +16,7 @@ # under the License. """Time series data manipulation, better with pancetta.""" +import collections import functools import math import operator @@ -532,6 +533,12 @@ class SplitKey(object): self.sampling) +Aggregation = collections.namedtuple( + "Aggregation", + ["method", "granularity", "timespan"], +) + + class AggregatedTimeSerie(TimeSerie): _AGG_METHOD_PCT_RE = re.compile(r"([1-9][0-9]?)pct") @@ -540,7 +547,7 @@ class AggregatedTimeSerie(TimeSerie): COMPRESSED_SERIAL_LEN = struct.calcsize("" % ( + return "<%s 0x%x granularity=%s agg_method=%s>" % ( self.__class__.__name__, id(self), - self.sampling, - self.aggregation_method, + self.aggregation.granularity, + self.aggregation.method, ) @staticmethod @@ -690,7 +696,7 @@ class AggregatedTimeSerie(TimeSerie): y = index * key.sampling + key.key x = everything['v'][index] - return cls.from_data(key.sampling, agg_method, y, x) + return cls.from_data(Aggregation(agg_method, key.sampling, None), y, x) def get_split_key(self, timestamp=None): """Return the split key for a particular timestamp. @@ -704,7 +710,7 @@ class AggregatedTimeSerie(TimeSerie): if timestamp is None: return return SplitKey.from_timestamp_and_sampling( - timestamp, self.sampling) + timestamp, self.aggregation.granularity) def serialize(self, start, compressed=True): """Serialize an aggregated timeserie. @@ -721,7 +727,7 @@ class AggregatedTimeSerie(TimeSerie): :return: a tuple of (offset, data) """ - offset_div = self.sampling + offset_div = self.aggregation.granularity # calculate how many seconds from start the series runs until and # initialize list to store alternating delimiter, float entries if compressed: @@ -770,9 +776,9 @@ class AggregatedTimeSerie(TimeSerie): if from_timestamp is None: from_ = None else: - from_ = round_timestamp(from_timestamp, self.sampling) - return self.__class__(self.sampling, self.aggregation_method, - ts=self[from_:to_timestamp]) + from_ = round_timestamp(from_timestamp, + self.aggregation.granularity) + return self.__class__(self.aggregation, ts=self[from_:to_timestamp]) @classmethod def benchmark(cls): @@ -810,7 +816,8 @@ class AggregatedTimeSerie(TimeSerie): ]: print(title) serialize_times = 50 - ts = cls.from_data(sampling, 'mean', timestamps, values) + aggregation = Aggregation("mean", sampling, None) + ts = cls.from_data(aggregation, timestamps, values) t0 = time.time() key = ts.get_split_key() for i in six.moves.range(serialize_times): @@ -857,7 +864,7 @@ class AggregatedTimeSerie(TimeSerie): # NOTE(sileht): propose a new series with half overload timestamps pts = ts.ts.copy() - tsbis = cls(ts=pts, sampling=sampling, aggregation_method='mean') + tsbis = cls(ts=pts, aggregation=aggregation) tsbis.ts['timestamps'] = ( tsbis.timestamps - numpy.timedelta64( sampling * points / 2, 's') @@ -872,8 +879,7 @@ class AggregatedTimeSerie(TimeSerie): for agg in ['mean', 'sum', 'max', 'min', 'std', 'median', 'first', 'last', 'count', '5pct', '90pct']: serialize_times = 3 if agg.endswith('pct') else 10 - ts = cls(ts=pts, sampling=sampling, - aggregation_method=agg) + ts = cls(ts=pts, aggregation=aggregation) t0 = time.time() for i in six.moves.range(serialize_times): ts.resample(resample) diff --git a/gnocchi/rest/aggregates/operations.py b/gnocchi/rest/aggregates/operations.py index a090c253..ebb3cea7 100644 --- a/gnocchi/rest/aggregates/operations.py +++ b/gnocchi/rest/aggregates/operations.py @@ -185,7 +185,9 @@ def handle_resample(agg, granularity, timestamps, values, is_aggregated, new_values = None result_timestamps = timestamps for ts in values.T: - ts = carbonara.AggregatedTimeSerie.from_data(None, agg, timestamps, ts) + ts = carbonara.AggregatedTimeSerie.from_data( + carbonara.Aggregation(agg, None, None), + timestamps, ts) ts = ts.resample(sampling) result_timestamps = ts["timestamps"] if new_values is None: diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index 8765e3db..1f87385c 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -136,10 +136,12 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, lookup_keys = collections.defaultdict(list) for (ref, timeserie) in refs_and_timeseries: from_ = (None if from_timestamp is None else - carbonara.round_timestamp(from_timestamp, timeserie.sampling)) - references[timeserie.sampling].append(ref) - lookup_keys[timeserie.sampling].append(ref.lookup_key) - series[timeserie.sampling].append(timeserie[from_:to_timestamp]) + carbonara.round_timestamp( + from_timestamp, timeserie.aggregation.granularity)) + references[timeserie.aggregation.granularity].append(ref) + lookup_keys[timeserie.aggregation.granularity].append(ref.lookup_key) + series[timeserie.aggregation.granularity].append( + timeserie[from_:to_timestamp]) result = [] is_aggregated = False diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 2d187700..901c3bac 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -37,7 +37,6 @@ OPTS = [ LOG = daiquiri.getLogger(__name__) -ATTRGETTER_AGG_METHOD = operator.attrgetter("aggregation_method") ATTRGETTER_GRANULARITY = operator.attrgetter("granularity") @@ -223,12 +222,13 @@ class StorageDriver(object): return { aggmethod: list(itertools.chain( - *[[(timestamp, ts.sampling, value) + *[[(timestamp, ts.aggregation.granularity, value) for timestamp, value in ts.fetch(from_timestamp, to_timestamp)] for ts in aggts])) - for aggmethod, aggts in itertools.groupby(agg_timeseries, - ATTRGETTER_AGG_METHOD) + for aggmethod, aggts + in itertools.groupby(agg_timeseries, + lambda v: v.aggregation.method) } def _get_measures_and_unserialize(self, metric, keys, aggregation): @@ -255,9 +255,7 @@ class StorageDriver(object): all_keys = self._list_split_keys_for_metric( metric, aggregation.method, aggregation.granularity) except MetricDoesNotExist: - return carbonara.AggregatedTimeSerie( - sampling=aggregation.granularity, - aggregation_method=aggregation.method) + return carbonara.AggregatedTimeSerie(aggregation) if from_timestamp: from_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( @@ -275,9 +273,7 @@ class StorageDriver(object): metric, keys, aggregation.method) ts = carbonara.AggregatedTimeSerie.from_timeseries( - sampling=aggregation.granularity, - aggregation_method=aggregation.method, - timeseries=timeseries) + timeseries, aggregation) # We need to truncate because: # - If the driver is not in WRITE_FULL mode, then it might read too # much data that will be deleted once the split is rewritten. Just @@ -347,7 +343,7 @@ class StorageDriver(object): previous_oldest_mutable_timestamp, oldest_mutable_timestamp): ts = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped_serie, aggregation.granularity, aggregation.method) + grouped_serie, aggregation) # Don't do anything if the timeserie is empty if not ts: diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 6eda5b88..4e1a443e 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -53,7 +53,10 @@ class TestAggregatedTimeseries(base.BaseTestCase): agg_dict['return'] = ( processor.MetricReference(metric, "mean", resource), carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, agg_dict['sampling'], agg_dict['agg'])) + grouped, + carbonara.Aggregation(agg_dict['agg'], + agg_dict['sampling'], + None))) if existing: existing[2].merge(agg_dict['return'][2]) agg_dict['return'] = existing @@ -88,8 +91,7 @@ class TestAggregatedTimeseries(base.BaseTestCase): 'size': 50, 'agg': 'mean'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) tsc2 = carbonara.AggregatedTimeSerie( - sampling=numpy.timedelta64(60, 's'), - aggregation_method='mean') + carbonara.Aggregation('mean', numpy.timedelta64(60, 's'), None)) tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 3, 0), 4)], dtype=carbonara.TIMESERIES_ARRAY_DTYPE), diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index 1b550596..a2b8f219 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -105,21 +105,6 @@ class TestBoundTimeSerie(base.BaseTestCase): class TestAggregatedTimeSerie(base.BaseTestCase): - @staticmethod - def test_base(): - carbonara.AggregatedTimeSerie.from_data( - 3, 'mean', - [datetime64(2014, 1, 1, 12, 0, 0), - datetime64(2014, 1, 1, 12, 0, 4), - datetime64(2014, 1, 1, 12, 0, 9)], - [3, 5, 6]) - carbonara.AggregatedTimeSerie.from_data( - "4s", 'mean', - [datetime64(2014, 1, 1, 12, 0, 0), - datetime64(2014, 1, 1, 12, 0, 4), - datetime64(2014, 1, 1, 12, 0, 9)], - [3, 5, 6]) - def test_benchmark(self): self.useFixture(fixtures.Timeout(300, gentle=True)) carbonara.AggregatedTimeSerie.benchmark() @@ -129,9 +114,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase): timestamps=[datetime64(2014, 1, 1, 12, 0, 0), datetime64(2014, 1, 1, 12, 0, 4), datetime64(2014, 1, 1, 12, 0, 9)], - aggregation_method='mean', values=[3, 5, 6], - sampling=numpy.timedelta64(1, 's')) + aggregation=carbonara.Aggregation( + "mean", numpy.timedelta64(1, 's'), None)) self.assertEqual( [(datetime64(2014, 1, 1, 12), 3), (datetime64(2014, 1, 1, 12, 0, 4), 5), @@ -167,11 +152,12 @@ class TestAggregatedTimeSerie(base.BaseTestCase): @staticmethod def _resample(ts, sampling, agg, derived=False): + aggregation = carbonara.Aggregation(agg, sampling, None) grouped = ts.group_serie(sampling) if derived: grouped = grouped.derived() return carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, sampling, agg) + grouped, aggregation) def test_derived_mean(self): ts = carbonara.TimeSerie.from_data( @@ -344,13 +330,14 @@ class TestAggregatedTimeSerie(base.BaseTestCase): ts[datetime64(2014, 1, 1, 12, 0, 0)][1]) def test_different_length_in_timestamps_and_data(self): - self.assertRaises(ValueError, - carbonara.AggregatedTimeSerie.from_data, - 3, 'mean', - [datetime64(2014, 1, 1, 12, 0, 0), - datetime64(2014, 1, 1, 12, 0, 4), - datetime64(2014, 1, 1, 12, 0, 9)], - [3, 5]) + self.assertRaises( + ValueError, + carbonara.AggregatedTimeSerie.from_data, + carbonara.Aggregation('mean', numpy.timedelta64(3, 's'), None), + [datetime64(2014, 1, 1, 12, 0, 0), + datetime64(2014, 1, 1, 12, 0, 4), + datetime64(2014, 1, 1, 12, 0, 9)], + [3, 5]) def test_truncate(self): ts = carbonara.TimeSerie.from_data( @@ -413,7 +400,8 @@ class TestAggregatedTimeSerie(base.BaseTestCase): grouped = ts.group_serie(agg_dict['sampling']) existing = agg_dict.get('return') agg_dict['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, agg_dict['sampling'], agg_dict['agg']) + grouped, carbonara.Aggregation( + agg_dict['agg'], agg_dict['sampling'], None)) if existing: existing.merge(agg_dict['return']) agg_dict['return'] = existing @@ -554,7 +542,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): (datetime64(2014, 1, 1, 11, 48, 0, 800000), 4.5) ], list(ts['return'].fetch())) self.assertEqual(numpy.timedelta64(200000000, 'ns'), - ts['return'].sampling) + ts['return'].aggregation.granularity) def test_fetch_agg_std(self): # NOTE (gordc): this is a good test to ensure we drop NaN entries @@ -894,9 +882,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): self.assertEqual(agg, carbonara.AggregatedTimeSerie.from_timeseries( - split, - sampling=agg.sampling, - aggregation_method=agg.aggregation_method)) + split, aggregation=agg.aggregation)) def test_resample(self): ts = carbonara.TimeSerie.from_data( diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 2a1f34ff..672ff1ed 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -20,7 +20,6 @@ import mock import numpy import six.moves -from gnocchi import aggregation as gaggregation from gnocchi import archive_policy from gnocchi import carbonara from gnocchi import incoming @@ -855,7 +854,7 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual({"mean": []}, self.storage.get_measures( self.metric, - [gaggregation.Aggregation( + [carbonara.Aggregation( "mean", numpy.timedelta64(42, 's'), None)])) def test_get_measure_unknown_aggregation(self): -- GitLab From 73d98e45351ab0f5a9b3ab95669b86ea51f8b564 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 21 Feb 2018 13:49:02 +0100 Subject: [PATCH 1285/1483] Add filter format in aggregation The filter= argument is missing from the aggregation API. This change adds it. --- gnocchi/rest/api.py | 7 ++++--- .../gabbits/resource-aggregation.yaml | 20 +++++++++++++++++++ ...aram-for-aggregation-f68c47c59ca81dc0.yaml | 5 +++++ 3 files changed, 29 insertions(+), 3 deletions(-) create mode 100644 releasenotes/notes/filter-param-for-aggregation-f68c47c59ca81dc0.yaml diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 9e6c4b86..5f893f7b 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1705,7 +1705,8 @@ class AggregationResourceController(rest.RestController): @pecan.expose('json') def post(self, start=None, stop=None, aggregation='mean', reaggregation=None, granularity=None, needed_overlap=100.0, - groupby=None, fill=None, refresh=False, resample=None): + groupby=None, fill=None, refresh=False, resample=None, + **kwargs): # First, set groupby in the right format: a sorted list of unique # strings. groupby = sorted(set(arg_to_list(groupby))) @@ -1714,7 +1715,8 @@ class AggregationResourceController(rest.RestController): # groups when using itertools.groupby later. try: resources = SearchResourceTypeController( - self.resource_type)._search(sort=groupby) + self.resource_type)._search(sort=groupby, + filter=kwargs.get("filter")) except indexer.InvalidPagination: abort(400, "Invalid groupby attribute") except indexer.IndexerException as e: @@ -1748,7 +1750,6 @@ class AggregationResourceController(rest.RestController): return results - FillSchema = voluptuous.Schema( voluptuous.Any(voluptuous.Coerce(float), "null", "dropna", msg="Must be a float, 'dropna' or 'null'")) diff --git a/gnocchi/tests/functional/gabbits/resource-aggregation.yaml b/gnocchi/tests/functional/gabbits/resource-aggregation.yaml index 0d6fa963..b301008b 100644 --- a/gnocchi/tests/functional/gabbits/resource-aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/resource-aggregation.yaml @@ -97,6 +97,26 @@ tests: value: 45.41 status: 202 + - name: aggregate metric with groupby on project_id with filter + POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&filter=user_id%3D%276c865dd0-7945-4e08-8b27-d0d7f1c2b667%27 + poll: + count: 10 + delay: 1 + response_json_paths: + $: + - measures: + - ["2015-03-06T14:30:00+00:00", 300.0, 21.525] + - ["2015-03-06T14:33:57+00:00", 1.0, 33.05] + - ["2015-03-06T14:34:12+00:00", 1.0, 10.0] + group: + project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 + - measures: + - ["2015-03-06T14:30:00+00:00", 300.0, 137.70499999999998] + - ["2015-03-06T14:33:57+00:00", 1.0, 230.0] + - ["2015-03-06T14:34:12+00:00", 1.0, 45.41] + group: + project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 + - name: aggregate metric with groupby on project_id POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id data: diff --git a/releasenotes/notes/filter-param-for-aggregation-f68c47c59ca81dc0.yaml b/releasenotes/notes/filter-param-for-aggregation-f68c47c59ca81dc0.yaml new file mode 100644 index 00000000..156833c0 --- /dev/null +++ b/releasenotes/notes/filter-param-for-aggregation-f68c47c59ca81dc0.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + /v1/aggregation/resources endpoint can now take the STRING format in + `filter` parameter instead of the JSON format into the request payload. -- GitLab From dea077da2323d6da09e7429a188c1bb8c46ced08 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 12 Feb 2018 17:53:47 +0100 Subject: [PATCH 1286/1483] storage: list_split_keys can now list several aggregations at once This should help to limit the number of calls make to list_split_keys in the future. --- gnocchi/storage/__init__.py | 29 +++---- gnocchi/storage/ceph.py | 32 +++++-- gnocchi/storage/file.py | 51 ++++++++--- gnocchi/storage/redis.py | 47 +++++++--- gnocchi/storage/s3.py | 64 +++++++------- gnocchi/storage/swift.py | 39 ++++++--- gnocchi/tests/test_storage.py | 158 ++++++++++++++++++++-------------- 7 files changed, 265 insertions(+), 155 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 901c3bac..9b2f6ddc 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -14,7 +14,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import functools import itertools import operator @@ -179,21 +178,21 @@ class StorageDriver(object): """ raise NotImplementedError - def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=3): - return set(map( - functools.partial(carbonara.SplitKey, sampling=granularity), - (numpy.array( - list(self._list_split_keys( - metric, aggregation, granularity, version)), - dtype=numpy.float) * 10e8).astype('datetime64[ns]'))) - @staticmethod - def _list_split_keys(metric, aggregation, granularity, version=3): + def _list_split_keys(metric, aggregations, version=3): + """List split keys for a metric. + + :param metric: The metric to look key for. + :param aggregations: List of Aggregations to look for. + :param version: Storage engine format version. + :return: A dict where keys are Aggregation objects and values are + a set of SplitKey objects. + """ raise NotImplementedError @staticmethod def _version_check(name, v): + """Validate object matches expected version. Version should be last attribute and start with 'v' @@ -252,8 +251,8 @@ class StorageDriver(object): def _get_measures_timeserie(self, metric, aggregation, from_timestamp=None, to_timestamp=None): try: - all_keys = self._list_split_keys_for_metric( - metric, aggregation.method, aggregation.granularity) + all_keys = self._list_split_keys( + metric, [aggregation])[aggregation] except MetricDoesNotExist: return carbonara.AggregatedTimeSerie(aggregation) @@ -374,8 +373,8 @@ class StorageDriver(object): # only cleanup if there is a new object, as there must be a new # object for an old object to be cleanup if previous_oldest_mutable_key != oldest_mutable_key: - existing_keys = sorted(self._list_split_keys_for_metric( - metric, aggregation.method, aggregation.granularity)) + existing_keys = sorted(self._list_split_keys( + metric, [aggregation])[aggregation]) # First, check for old splits to delete if aggregation.timespan: diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 11ebc354..d8a324f7 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -14,9 +14,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import collections from oslo_config import cfg +import six +from gnocchi import carbonara from gnocchi.common import ceph from gnocchi import storage from gnocchi import utils @@ -153,7 +156,7 @@ class CephStorage(storage.StorageDriver): else: raise storage.MetricDoesNotExist(metric) - def _list_split_keys(self, metric, aggregation, granularity, version=3): + def _list_split_keys(self, metric, aggregations, version=3): with rados.ReadOpCtx() as op: omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1) try: @@ -173,13 +176,26 @@ class CephStorage(storage.StorageDriver): except rados.ObjectNotFound: raise storage.MetricDoesNotExist(metric) - keys = set() - granularity = str(utils.timespan_total_seconds(granularity)) - for name, value in omaps: - meta = name.split('_') - if (aggregation == meta[3] and granularity == meta[4] - and self._version_check(name, version)): - keys.add(meta[2]) + raw_keys = [name.split("_") + for name, value in omaps + if self._version_check(name, version)] + keys = collections.defaultdict(set) + if not raw_keys: + return keys + zipped = list(zip(*raw_keys)) + k_timestamps = utils.to_timestamps(zipped[2]) + k_methods = zipped[3] + k_granularities = list(map(utils.to_timespan, zipped[4])) + + for timestamp, method, granularity in six.moves.zip( + k_timestamps, k_methods, k_granularities): + for aggregation in aggregations: + if (aggregation.method == method + and aggregation.granularity == granularity): + keys[aggregation].add(carbonara.SplitKey( + timestamp, + sampling=granularity)) + break return keys @staticmethod diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 16dd5228..90cbf726 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -14,13 +14,18 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import collections import errno +import itertools +import operator import os import shutil import tempfile from oslo_config import cfg +import six +from gnocchi import carbonara from gnocchi import storage from gnocchi import utils @@ -31,6 +36,8 @@ OPTS = [ help='Path used to store gnocchi data files.'), ] +ATTRGETTER_METHOD = operator.attrgetter("method") + # Python 2 compatibility try: FileNotFoundError @@ -118,19 +125,37 @@ class FileStorage(storage.StorageDriver): except storage.MetricAlreadyExists: pass - def _list_split_keys(self, metric, aggregation, granularity, version=3): - try: - files = os.listdir(self._build_metric_path(metric, aggregation)) - except OSError as e: - if e.errno == errno.ENOENT: - raise storage.MetricDoesNotExist(metric) - raise - keys = set() - granularity = str(utils.timespan_total_seconds(granularity)) - for f in files: - meta = f.split("_") - if meta[1] == granularity and self._version_check(f, version): - keys.add(meta[0]) + def _list_split_keys(self, metric, aggregations, version=3): + keys = collections.defaultdict(set) + for method, grouped_aggregations in itertools.groupby( + sorted(aggregations, key=ATTRGETTER_METHOD), + ATTRGETTER_METHOD): + try: + files = os.listdir( + self._build_metric_path(metric, method)) + except OSError as e: + if e.errno == errno.ENOENT: + raise storage.MetricDoesNotExist(metric) + raise + raw_keys = list(map( + lambda k: k.split("_"), + filter( + lambda f: self._version_check(f, version), + files))) + if not raw_keys: + continue + zipped = list(zip(*raw_keys)) + k_timestamps = utils.to_timestamps(zipped[0]) + k_granularities = list(map(utils.to_timespan, zipped[1])) + grouped_aggregations = list(grouped_aggregations) + for timestamp, granularity in six.moves.zip( + k_timestamps, k_granularities): + for agg in grouped_aggregations: + if granularity == agg.granularity: + keys[agg].add(carbonara.SplitKey( + timestamp, + sampling=granularity)) + break return keys def _delete_metric_splits_unbatched( diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 2d47c9c9..37c2adbe 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -15,6 +15,7 @@ # under the License. import six +from gnocchi import carbonara from gnocchi.common import redis from gnocchi import storage from gnocchi import utils @@ -32,22 +33,20 @@ class RedisStorage(storage.StorageDriver): local metric_key = KEYS[1] local ids = {} local cursor = 0 -local substring = "([^" .. ARGV[2] .. "]*)" +local substring = "([^%s]*)%s([^%s]*)%s([^%s]*)" repeat local result = redis.call("HSCAN", metric_key, cursor, "MATCH", ARGV[1]) cursor = tonumber(result[1]) for i, v in ipairs(result[2]) do -- Only return keys, not values - if i % 2 ~= 0 then - ids[#ids + 1] = v:gmatch(substring)() + if i %% 2 ~= 0 then + local timestamp, method, granularity = v:gmatch(substring)() + ids[#ids + 1] = {timestamp, method, granularity} end end until cursor == 0 -if #ids == 0 and redis.call("EXISTS", metric_key) == 0 then - return -1 -end return ids -""", +""" % (FIELD_SEP, FIELD_SEP, FIELD_SEP, FIELD_SEP, FIELD_SEP), "get_measures": """ local results = redis.call("HMGET", KEYS[1], unpack(ARGV)) local final = {} @@ -111,14 +110,36 @@ return {0, final} } return ts - def _list_split_keys(self, metric, aggregation, granularity, version=3): + def _list_split_keys(self, metric, aggregations, version=3): key = self._metric_key(metric) - split_keys = self._scripts["list_split_keys"]( - keys=[key], args=[self._aggregated_field_for_split( - aggregation, '*', version, granularity), self.FIELD_SEP]) - if split_keys == -1: + pipe = self._client.pipeline(transaction=False) + pipe.exists(key) + for aggregation in aggregations: + self._scripts["list_split_keys"]( + keys=[key], args=[self._aggregated_field_for_split( + aggregation.method, "*", + version, aggregation.granularity)], + client=pipe, + ) + results = pipe.execute() + metric_exists_p = results.pop(0) + if not metric_exists_p: raise storage.MetricDoesNotExist(metric) - return set(split_keys) + keys = {} + for aggregation, k in six.moves.zip(aggregations, results): + if not k: + keys[aggregation] = set() + continue + timestamps, methods, granularities = list(zip(*k)) + timestamps = utils.to_timestamps(timestamps) + granularities = map(utils.to_timespan, granularities) + keys[aggregation] = { + carbonara.SplitKey(timestamp, + sampling=granularity) + for timestamp, granularity + in six.moves.zip(timestamps, granularities) + } + return keys def _delete_metric_splits(self, metric, keys, aggregation, version=3): metric_key = self._metric_key(metric) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 969146fb..e39b7c63 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -18,6 +18,7 @@ import os from oslo_config import cfg import tenacity +from gnocchi import carbonara from gnocchi.common import s3 from gnocchi import storage from gnocchi import utils @@ -176,37 +177,42 @@ class S3Storage(storage.StorageDriver): raise return response['Body'].read() - def _list_split_keys(self, metric, aggregation, granularity, version=3): + def _list_split_keys(self, metric, aggregations, version=3): bucket = self._bucket_name - keys = set() - response = {} - while response.get('IsTruncated', True): - if 'NextContinuationToken' in response: - kwargs = { - 'ContinuationToken': response['NextContinuationToken'] - } - else: - kwargs = {} - try: - response = self.s3.list_objects_v2( - Bucket=bucket, - Prefix=self._prefix(metric) + '%s_%s' % ( - aggregation, - utils.timespan_total_seconds(granularity), - ), - **kwargs) - except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') == "NoSuchKey": - raise storage.MetricDoesNotExist(metric) - raise - for f in response.get('Contents', ()): + keys = {} + for aggregation in aggregations: + keys[aggregation] = set() + response = {} + while response.get('IsTruncated', True): + if 'NextContinuationToken' in response: + kwargs = { + 'ContinuationToken': response['NextContinuationToken'] + } + else: + kwargs = {} try: - meta = f['Key'].split('_') - if (self._version_check(f['Key'], version)): - keys.add(meta[2]) - except (ValueError, IndexError): - # Might be "none", or any other file. Be resilient. - continue + response = self.s3.list_objects_v2( + Bucket=bucket, + Prefix=self._prefix(metric) + '%s_%s' % ( + aggregation.method, + utils.timespan_total_seconds( + aggregation.granularity), + ), + **kwargs) + except botocore.exceptions.ClientError as e: + if e.response['Error'].get('Code') == "NoSuchKey": + raise storage.MetricDoesNotExist(metric) + raise + for f in response.get('Contents', ()): + try: + if (self._version_check(f['Key'], version)): + meta = f['Key'].split('_') + keys[aggregation].add(carbonara.SplitKey( + utils.to_timestamp(meta[2]), + sampling=aggregation.granularity)) + except (ValueError, IndexError): + # Might be "none", or any other file. Be resilient. + continue return keys @staticmethod diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index bc34cff1..deb1efe1 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -14,9 +14,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import collections from oslo_config import cfg +import six +from gnocchi import carbonara from gnocchi.common import swift from gnocchi import storage from gnocchi import utils @@ -163,7 +166,7 @@ class SwiftStorage(storage.StorageDriver): raise return contents - def _list_split_keys(self, metric, aggregation, granularity, version=3): + def _list_split_keys(self, metric, aggregations, version=3): container = self._container_name(metric) try: headers, files = self.swift.get_container( @@ -172,17 +175,29 @@ class SwiftStorage(storage.StorageDriver): if e.http_status == 404: raise storage.MetricDoesNotExist(metric) raise - keys = set() - granularity = str(utils.timespan_total_seconds(granularity)) - for f in files: - try: - meta = f['name'].split('_') - if (aggregation == meta[1] and granularity == meta[2] - and self._version_check(f['name'], version)): - keys.add(meta[0]) - except (ValueError, IndexError): - # Might be "none", or any other file. Be resilient. - continue + + raw_keys = list(map( + lambda k: k.split("_"), + (f['name'] for f in files + if self._version_check(f['name'], version) + and not f['name'].startswith('none')))) + keys = collections.defaultdict(set) + if not raw_keys: + return keys + zipped = list(zip(*raw_keys)) + k_timestamps = utils.to_timestamps(zipped[0]) + k_methods = zipped[1] + k_granularities = list(map(utils.to_timespan, zipped[2])) + + for timestamp, method, granularity in six.moves.zip( + k_timestamps, k_methods, k_granularities): + for aggregation in aggregations: + if (aggregation.method == method + and aggregation.granularity == granularity): + keys[aggregation].add(carbonara.SplitKey( + timestamp, + sampling=granularity)) + break return keys @staticmethod diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 672ff1ed..48dae3a7 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -276,21 +276,29 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2015, 1, 1, 12), numpy.timedelta64(5, 'm'), 69), ]}, self.storage.get_measures(self.metric, aggregations)) + agg = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'D')) self.assertEqual({ - carbonara.SplitKey(numpy.datetime64(1244160000, 's'), - numpy.timedelta64(1, 'D')), - }, self.storage._list_split_keys_for_metric( - self.metric, "mean", numpy.timedelta64(1, 'D'))) + agg: {carbonara.SplitKey(numpy.datetime64(1244160000, 's'), + numpy.timedelta64(1, 'D'))}, + }, self.storage._list_split_keys( + self.metric, [agg])) + agg = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'h')) self.assertEqual({ - carbonara.SplitKey(numpy.datetime64(1412640000, 's'), - numpy.timedelta64(1, 'h')), - }, self.storage._list_split_keys_for_metric( - self.metric, "mean", numpy.timedelta64(1, 'h'))) + agg: {carbonara.SplitKey(numpy.datetime64(1412640000, 's'), + numpy.timedelta64(1, 'h'))}, + }, self.storage._list_split_keys( + self.metric, [agg], + )) + agg = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(5, 'm')) self.assertEqual({ - carbonara.SplitKey(numpy.datetime64(1419120000, 's'), - numpy.timedelta64(5, 'm')), - }, self.storage._list_split_keys_for_metric( - self.metric, "mean", numpy.timedelta64(5, 'm'))) + agg: {carbonara.SplitKey(numpy.datetime64(1419120000, 's'), + numpy.timedelta64(5, 'm'))}, + }, self.storage._list_split_keys( + self.metric, [agg], + )) def test_rewrite_measures(self): # Create an archive policy that spans on several splits. Each split @@ -311,15 +319,18 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() + agg = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'm')) self.assertEqual({ - carbonara.SplitKey(numpy.datetime64(1451520000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451736000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451952000, 's'), - numpy.timedelta64(1, 'm')), - }, self.storage._list_split_keys_for_metric( - self.metric, "mean", numpy.timedelta64(1, 'm'))) + agg: { + carbonara.SplitKey(numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm')), + }, + }, self.storage._list_split_keys(self.metric, [agg])) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue @@ -366,17 +377,20 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() + agg = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'm')) self.assertEqual({ - carbonara.SplitKey(numpy.datetime64(1452384000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451736000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451520000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451952000, 's'), - numpy.timedelta64(1, 'm')), - }, self.storage._list_split_keys_for_metric( - self.metric, "mean", numpy.timedelta64(1, 'm'))) + agg: { + carbonara.SplitKey(numpy.datetime64(1452384000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm')), + }, + }, self.storage._list_split_keys(self.metric, [agg])) data = self.storage._get_measures( self.metric, [carbonara.SplitKey( numpy.datetime64(1451520000, 's'), @@ -433,15 +447,18 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() + agg = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'm')) self.assertEqual({ - carbonara.SplitKey(numpy.datetime64(1451520000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451736000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451952000, 's'), - numpy.timedelta64(1, 'm')), - }, self.storage._list_split_keys_for_metric( - self.metric, "mean", numpy.timedelta64(1, 'm'))) + agg: { + carbonara.SplitKey(numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm')), + }, + }, self.storage._list_split_keys(self.metric, [agg])) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue @@ -490,17 +507,20 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() + agg = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'm')) self.assertEqual({ - carbonara.SplitKey(numpy.datetime64('2016-01-10T00:00:00'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64('2016-01-02T12:00:00'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64('2015-12-31T00:00:00'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64('2016-01-05T00:00:00'), - numpy.timedelta64(1, 'm')), - }, self.storage._list_split_keys_for_metric( - self.metric, "mean", numpy.timedelta64(1, 'm'))) + agg: { + carbonara.SplitKey(numpy.datetime64('2016-01-10T00:00:00'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64('2016-01-02T12:00:00'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64('2015-12-31T00:00:00'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64('2016-01-05T00:00:00'), + numpy.timedelta64(1, 'm')), + } + }, self.storage._list_split_keys(self.metric, [agg])) data = self.storage._get_measures( self.metric, [carbonara.SplitKey( numpy.datetime64(1451520000, 's'), @@ -555,16 +575,21 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() + agg = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'm')) self.assertEqual({ - carbonara.SplitKey(numpy.datetime64('2015-12-31T00:00:00'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64('2016-01-02T12:00:00'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64('2016-01-05T00:00:00'), - numpy.timedelta64(1, 'm')), - }, self.storage._list_split_keys_for_metric( - self.metric, "mean", numpy.timedelta64(1, 'm'))) - + agg: { + carbonara.SplitKey(numpy.datetime64('2015-12-31T00:00:00'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64('2016-01-02T12:00:00'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64('2016-01-05T00:00:00'), + numpy.timedelta64(1, 'm')), + }, + }, self.storage._list_split_keys( + self.metric, + [agg], + )) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue else: @@ -642,15 +667,18 @@ class TestStorageDriver(tests_base.TestCase): ]) self.trigger_processing() + agg = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'm')) self.assertEqual({ - carbonara.SplitKey(numpy.datetime64(1451520000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451736000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451952000, 's'), - numpy.timedelta64(1, 'm')), - }, self.storage._list_split_keys_for_metric( - self.metric, "mean", numpy.timedelta64(1, 'm'))) + agg: { + carbonara.SplitKey(numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm')), + }, + }, self.storage._list_split_keys(self.metric, [agg])) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue -- GitLab From 6acccdb5fcb7d47a052917e5daf9639a184b4c44 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 1 Mar 2018 08:00:34 +0100 Subject: [PATCH 1287/1483] doc: Update setuptools for gnocchi.xyz --- tox.ini | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 3a678bd4..014b867a 100644 --- a/tox.ini +++ b/tox.ini @@ -143,7 +143,9 @@ basepython = python2.7 whitelist_externals = bash rm setenv = GNOCCHI_STORAGE_DEPS=file GNOCCHI_TEST_DEBUG=1 -deps = {[testenv:docs]deps} +deps = -U + setuptools>=22.0 + {[testenv:docs]deps} sphinxcontrib-versioning # for <= 4.2 doc scipy -- GitLab From 88c612873a81dadffc2969c511294fc462edb645 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 1 Mar 2018 10:30:39 +0100 Subject: [PATCH 1288/1483] Set minimal setuptools version We use environment marker in requirement so we need at least setuptools 20.6.8. This must be a setup_requires because pbr use setuptools to read the requirement. --- setup.py | 2 +- tox.ini | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index b96f524b..d1a140a8 100755 --- a/setup.py +++ b/setup.py @@ -17,5 +17,5 @@ import setuptools setuptools.setup( - setup_requires=['pbr'], + setup_requires=['pbr', 'setuptools>=20.6.8'], pbr=True) diff --git a/tox.ini b/tox.ini index 014b867a..3a678bd4 100644 --- a/tox.ini +++ b/tox.ini @@ -143,9 +143,7 @@ basepython = python2.7 whitelist_externals = bash rm setenv = GNOCCHI_STORAGE_DEPS=file GNOCCHI_TEST_DEBUG=1 -deps = -U - setuptools>=22.0 - {[testenv:docs]deps} +deps = {[testenv:docs]deps} sphinxcontrib-versioning # for <= 4.2 doc scipy -- GitLab From 7b371170f86ba1d56cf4d62fffa32a9fd2f5ad4d Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 1 Mar 2018 14:31:59 +0100 Subject: [PATCH 1289/1483] doc: don't build sdist By default tox built a sdist with the python OS packages. We already skip install/develop phase and manually use "pip install -e" The sdist is useless and skipping it allow to run tox on old OS that doesn't have a recent setuptools. --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 3a678bd4..afac9dc4 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,7 @@ [tox] minversion = 2.4 envlist = py{35,27}-{postgresql,mysql}{,-file,-swift,-ceph,-s3},pep8 +skipsdist = True [testenv] skip_install = True -- GitLab From 13a726e2047c9a570741572edb9bfc041c7a47e3 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 1 Mar 2018 08:23:57 +0100 Subject: [PATCH 1290/1483] tests: Ensure signal are propagated to subshell Closes #773 --- run-tests.sh | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/run-tests.sh b/run-tests.sh index 5f16b78c..320198ae 100755 --- a/run-tests.sh +++ b/run-tests.sh @@ -1,5 +1,17 @@ #!/bin/bash -x set -e + +# NOTE(sileht): Enable bash process tracking and send sigterm to the whole +# process group + +cleanup(){ + for PID in $PIDS; do + PGID=$(ps -o pgid "$PID" | grep [0-9] | tr -d ' ') + kill -- -$PGID + done +} +trap cleanup EXIT + PIDS="" GNOCCHI_TEST_STORAGE_DRIVERS=${GNOCCHI_TEST_STORAGE_DRIVERS:-file} GNOCCHI_TEST_INDEXER_DRIVERS=${GNOCCHI_TEST_INDEXER_DRIVERS:-postgresql} -- GitLab From 5549a91a16aa439a2e3ee29e0d5e21cd84a9c925 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 1 Mar 2018 14:08:31 +0100 Subject: [PATCH 1291/1483] storage/file: fix new incoming measures deletion The current code only delete one metric files :( --- gnocchi/incoming/file.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gnocchi/incoming/file.py b/gnocchi/incoming/file.py index 43d314be..40b81255 100644 --- a/gnocchi/incoming/file.py +++ b/gnocchi/incoming/file.py @@ -163,8 +163,10 @@ class FileStorage(incoming.IncomingDriver): @contextlib.contextmanager def process_measure_for_metrics(self, metric_ids): measures = {} + processed_files = {} for metric_id in metric_ids: files = self._list_measures_container_for_metric(metric_id) + processed_files[metric_id] = files m = self._make_measures_array() for f in files: abspath = self._build_measure_path(metric_id, f) @@ -175,5 +177,5 @@ class FileStorage(incoming.IncomingDriver): yield measures - for metric_id in metric_ids: + for metric_id, files in six.iteritems(processed_files): self._delete_measures_files_for_metric(metric_id, files) -- GitLab From fab43ba22a42332941ee154423a3bf85cd555e14 Mon Sep 17 00:00:00 2001 From: gord chung Date: Fri, 2 Mar 2018 14:01:48 +0000 Subject: [PATCH 1292/1483] expand test to validate measures are deleted Related to #784 --- gnocchi/tests/test_storage.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 48dae3a7..d0a926b8 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -125,9 +125,14 @@ class TestStorageDriver(tests_base.TestCase): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) + m2, __ = self._create_metric('medium') + self.incoming.add_measures(m2.id, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + ]) metrics = tests_utils.list_all_incoming_metrics(self.incoming) - self.assertEqual(set([str(self.metric.id)]), metrics) - self.trigger_processing() + m_list = [str(self.metric.id), str(m2.id)] + self.assertEqual(set(m_list), metrics) + self.trigger_processing(m_list) metrics = tests_utils.list_all_incoming_metrics(self.incoming) self.assertEqual(set([]), metrics) -- GitLab From a840b897a6171a5f7bff050beeddec6205d1b9fd Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 13 Feb 2018 09:01:10 +0100 Subject: [PATCH 1293/1483] storage: do not return empty data set on non-existing metric This decision is now available for the upper layer. This patch also fixes the S3 driver which was not checking properly for metric existence. Listing objects never returns a NoSuchKey error, at worse it returns an empty content list. We therefore need an extra check with a HEAD request on the unaggregated timeseries to be sure that the metric exists. It therefore needs to be created at _get_or_create_unaggregated_timeseries_unbatched() like the other drivers do. This was not caught earlier because of the return empty by default mechanism, but it is correctly tested. --- gnocchi/rest/aggregates/processor.py | 15 ++++--- gnocchi/rest/api.py | 16 +++++--- gnocchi/storage/__init__.py | 13 +++---- gnocchi/storage/s3.py | 58 ++++++++++++++++------------ gnocchi/tests/test_storage.py | 25 ++++++------ 5 files changed, 73 insertions(+), 54 deletions(-) diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index 1f87385c..6e0ef14c 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -52,11 +52,16 @@ class MetricReference(object): def _get_measures_timeserie(storage, ref, granularity, *args, **kwargs): - return (ref, storage._get_measures_timeserie( - ref.metric, - ref.metric.archive_policy.get_aggregation( - ref.aggregation, granularity), - *args, **kwargs)) + try: + data = storage._get_measures_timeserie( + ref.metric, + ref.metric.archive_policy.get_aggregation( + ref.aggregation, granularity), + *args, **kwargs) + except gnocchi_storage.MetricDoesNotExist: + data = carbonara.AggregatedTimeSerie( + carbonara.Aggregation(ref.aggregation, granularity, None)) + return (ref, data) def get_measures(storage, references, operations, diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 5f893f7b..cee5ef7d 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -537,9 +537,10 @@ class MetricController(rest.RestController): try: return pecan.request.storage.get_measures( self.metric, aggregations, start, stop, resample)[aggregation] - except (storage.MetricDoesNotExist, - storage.AggregationDoesNotExist) as e: + except storage.AggregationDoesNotExist as e: abort(404, six.text_type(e)) + except storage.MetricDoesNotExist: + return [] @pecan.expose() def delete(self): @@ -1918,8 +1919,12 @@ class AggregationController(rest.RestController): "aggregation_method": aggregation, }, }) - return pecan.request.storage.get_measures( - metric, aggregations, start, stop, resample)[aggregation] + try: + return pecan.request.storage.get_measures( + metric, aggregations, start, stop, resample + )[aggregation] + except storage.MetricDoesNotExist: + return [] return processor.get_measures( pecan.request.storage, [processor.MetricReference(m, aggregation) for m in metrics], @@ -1927,8 +1932,7 @@ class AggregationController(rest.RestController): granularity, needed_overlap, fill)["aggregated"] except exceptions.UnAggregableTimeseries as e: abort(400, e) - except (storage.MetricDoesNotExist, - storage.AggregationDoesNotExist) as e: + except storage.AggregationDoesNotExist as e: abort(404, six.text_type(e)) MetricIDsSchema = [utils.UUID] diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 9b2f6ddc..d48de66f 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -250,11 +250,7 @@ class StorageDriver(object): def _get_measures_timeserie(self, metric, aggregation, from_timestamp=None, to_timestamp=None): - try: - all_keys = self._list_split_keys( - metric, [aggregation])[aggregation] - except MetricDoesNotExist: - return carbonara.AggregatedTimeSerie(aggregation) + all_keys = self._list_split_keys(metric, [aggregation])[aggregation] if from_timestamp: from_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( @@ -534,8 +530,11 @@ class StorageDriver(object): if agg is None: raise AggregationDoesNotExist(metric, aggregation, granularity) - timeserie = self._get_measures_timeserie( - metric, agg, from_timestamp, to_timestamp) + try: + timeserie = self._get_measures_timeserie( + metric, agg, from_timestamp, to_timestamp) + except MetricDoesNotExist: + return [] values = timeserie.fetch(from_timestamp, to_timestamp) return [(timestamp, granularity, value) for timestamp, value in values diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index e39b7c63..2b5c54ef 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -165,18 +165,23 @@ class S3Storage(storage.StorageDriver): key, aggregation, version)) except botocore.exceptions.ClientError as e: if e.response['Error'].get('Code') == 'NoSuchKey': - try: - response = self.s3.list_objects_v2( - Bucket=self._bucket_name, Prefix=self._prefix(metric)) - except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') == 'NoSuchKey': - raise storage.MetricDoesNotExist(metric) - raise - raise storage.AggregationDoesNotExist( - metric, aggregation, key.sampling) + if self._metric_exists_p(metric, version): + raise storage.AggregationDoesNotExist( + metric, aggregation, key.sampling) + raise storage.MetricDoesNotExist(metric) raise return response['Body'].read() + def _metric_exists_p(self, metric, version): + unaggkey = self._build_unaggregated_timeserie_path(metric, version) + try: + self.s3.head_object(Bucket=self._bucket_name, Key=unaggkey) + except botocore.exceptions.ClientError as e: + if e.response['Error'].get('Code') == "404": + return False + raise + return True + def _list_split_keys(self, metric, aggregations, version=3): bucket = self._bucket_name keys = {} @@ -190,20 +195,19 @@ class S3Storage(storage.StorageDriver): } else: kwargs = {} - try: - response = self.s3.list_objects_v2( - Bucket=bucket, - Prefix=self._prefix(metric) + '%s_%s' % ( - aggregation.method, - utils.timespan_total_seconds( - aggregation.granularity), - ), - **kwargs) - except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') == "NoSuchKey": - raise storage.MetricDoesNotExist(metric) - raise - for f in response.get('Contents', ()): + response = self.s3.list_objects_v2( + Bucket=bucket, + Prefix=self._prefix(metric) + '%s_%s' % ( + aggregation.method, + utils.timespan_total_seconds( + aggregation.granularity), + ), + **kwargs) + # If response is empty then check that the metric exists + contents = response.get('Contents', ()) + if not contents and not self._metric_exists_p(metric, version): + raise storage.MetricDoesNotExist(metric) + for f in contents: try: if (self._version_check(f['Key'], version)): meta = f['Key'].split('_') @@ -227,10 +231,14 @@ class S3Storage(storage.StorageDriver): response = self.s3.get_object( Bucket=self._bucket_name, Key=key) except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') != "NoSuchKey": + if e.response['Error'].get('Code') == "NoSuchKey": + # Create the metric with empty data + self._put_object_safe( + Bucket=self._bucket_name, Key=key, Body="") + else: raise else: - return response['Body'].read() + return response['Body'].read() or None def _store_unaggregated_timeseries_unbatched( self, metric, data, version=3): diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index d0a926b8..8d8264fa 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -148,8 +148,9 @@ class TestStorageDriver(tests_base.TestCase): self.metric.archive_policy.get_aggregations_for_method("mean") ) - self.assertEqual({"mean": []}, self.storage.get_measures( - self.metric, aggregations)) + self.assertRaises(storage.MetricDoesNotExist, + self.storage.get_measures, + self.metric, aggregations) self.assertEqual( {self.metric: None}, self.storage._get_or_create_unaggregated_timeseries([self.metric])) @@ -902,8 +903,10 @@ class TestStorageDriver(tests_base.TestCase): self.metric.archive_policy.get_aggregations_for_method("last") ) - self.assertEqual( - {"last": []}, self.storage.get_measures(self.metric, aggregations)) + self.assertRaises( + storage.MetricDoesNotExist, + self.storage.get_measures, + self.metric, aggregations) def test_find_measures(self): metric2, __ = self._create_metric() @@ -1014,13 +1017,13 @@ class TestStorageDriver(tests_base.TestCase): """https://github.com/gnocchixyz/gnocchi/issues/69""" aggregation = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(300, 's')) - self.assertEqual({"mean": []}, - self.storage.get_measures( - self.metric, - [aggregation], - datetime64(2014, 1, 1), - datetime64(2015, 1, 1), - resample=numpy.timedelta64(1, 'h'))) + self.assertRaises(storage.MetricDoesNotExist, + self.storage.get_measures, + self.metric, + [aggregation], + datetime64(2014, 1, 1), + datetime64(2015, 1, 1), + resample=numpy.timedelta64(1, 'h')) class TestMeasureQuery(tests_base.TestCase): -- GitLab From b529a1d093899f5ef1bd8b9379a149e12a0dd219 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 14 Feb 2018 16:48:34 +0100 Subject: [PATCH 1294/1483] storage: introduce get_aggregated_measures and deprecate get_measures --- gnocchi/rest/aggregates/processor.py | 9 +++-- gnocchi/storage/__init__.py | 55 +++++++++++++++++++--------- 2 files changed, 42 insertions(+), 22 deletions(-) diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index 6e0ef14c..b7268a10 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -52,12 +52,13 @@ class MetricReference(object): def _get_measures_timeserie(storage, ref, granularity, *args, **kwargs): + agg = ref.metric.archive_policy.get_aggregation( + ref.aggregation, granularity) try: - data = storage._get_measures_timeserie( + data = storage.get_aggregated_measures( ref.metric, - ref.metric.archive_policy.get_aggregation( - ref.aggregation, granularity), - *args, **kwargs) + [agg], + *args, **kwargs)[agg] except gnocchi_storage.MetricDoesNotExist: data = carbonara.AggregatedTimeSerie( carbonara.Aggregation(ref.aggregation, granularity, None)) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index d48de66f..5d5e67ea 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -36,6 +36,7 @@ OPTS = [ LOG = daiquiri.getLogger(__name__) +ATTRGETTER_METHOD = operator.attrgetter("method") ATTRGETTER_GRANULARITY = operator.attrgetter("granularity") @@ -199,10 +200,30 @@ class StorageDriver(object): """ return name.split("_")[-1] == 'v%s' % v + def get_aggregated_measures(self, metric, aggregations, + from_timestamp=None, to_timestamp=None): + """Get aggregated measures from a metric. + + :param metric: The metric measured. + :param aggregations: The aggregations to retrieve. + :param from timestamp: The timestamp to get the measure from. + :param to timestamp: The timestamp to get the measure to. + """ + timeseries = utils.parallel_map( + self._get_measures_timeserie, + ((metric, agg, from_timestamp, to_timestamp) + for agg in aggregations)) + return { + agg: ts.fetch(from_timestamp, to_timestamp) + for agg, ts in six.moves.zip(aggregations, timeseries) + } + def get_measures(self, metric, aggregations, from_timestamp=None, to_timestamp=None, resample=None): - """Get a measure to a metric. + """Get aggregated measures from a metric. + + Deprecated. Use `get_aggregated_measures` instead. :param metric: The metric measured. :param aggregations: The aggregations to retrieve. @@ -210,24 +231,23 @@ class StorageDriver(object): :param to timestamp: The timestamp to get the measure to. :param resample: The granularity to resample to. """ - agg_timeseries = utils.parallel_map( - self._get_measures_timeserie, - ((metric, ag, from_timestamp, to_timestamp) - for ag in aggregations)) + timeseries = self.get_aggregated_measures( + metric, aggregations, from_timestamp, to_timestamp) if resample: - agg_timeseries = list(map(lambda agg: agg.resample(resample), - agg_timeseries)) + for agg, ts in six.iteritems(timeseries): + timeseries[agg] = ts.resample(resample) return { aggmethod: list(itertools.chain( - *[[(timestamp, ts.aggregation.granularity, value) + *[[(timestamp, timeseries[agg].aggregation.granularity, value) for timestamp, value - in ts.fetch(from_timestamp, to_timestamp)] - for ts in aggts])) - for aggmethod, aggts - in itertools.groupby(agg_timeseries, - lambda v: v.aggregation.method) + in timeseries[agg].fetch(from_timestamp, to_timestamp)] + for agg in sorted(aggs, + key=ATTRGETTER_GRANULARITY, + reverse=True)])) + for aggmethod, aggs in itertools.groupby(timeseries.keys(), + ATTRGETTER_METHOD) } def _get_measures_and_unserialize(self, metric, keys, aggregation): @@ -531,13 +551,12 @@ class StorageDriver(object): raise AggregationDoesNotExist(metric, aggregation, granularity) try: - timeserie = self._get_measures_timeserie( - metric, agg, from_timestamp, to_timestamp) + ts = self.get_aggregated_measures( + metric, [agg], from_timestamp, to_timestamp)[agg] except MetricDoesNotExist: return [] - values = timeserie.fetch(from_timestamp, to_timestamp) - return [(timestamp, granularity, value) - for timestamp, value in values + return [(timestamp, ts.aggregation.granularity, value) + for timestamp, value in ts if predicate(value)] -- GitLab From 2fd30f9ac928fbc72780a8383683bc99e2768f7a Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 5 Mar 2018 08:35:00 +0100 Subject: [PATCH 1295/1483] doc: fix grafana doc url --- doc/source/grafana.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/grafana.rst b/doc/source/grafana.rst index 16349ff5..93b1178c 100644 --- a/doc/source/grafana.rst +++ b/doc/source/grafana.rst @@ -52,6 +52,6 @@ steps: :alt: Grafana screenshot .. _`Grafana`: http://grafana.org -.. _`Documentation`: https://grafana.net/plugins/sileht-gnocchi-datasource +.. _`Documentation`: https://grafana.net/plugins/gnocchixyz-gnocchi-datasource .. _`Source`: https://github.com/gnocchixyz/grafana-gnocchi-datasource .. _`CORS`: https://en.wikipedia.org/wiki/Cross-origin_resource_sharing -- GitLab From 0c4095206f0bb6a33923ee0bfa6f21ff0984d546 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 14 Feb 2018 17:54:01 +0100 Subject: [PATCH 1296/1483] storage: list split keys for all aggregations in one call --- gnocchi/storage/__init__.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 5d5e67ea..8eaccf84 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -209,9 +209,10 @@ class StorageDriver(object): :param from timestamp: The timestamp to get the measure from. :param to timestamp: The timestamp to get the measure to. """ + keys = self._list_split_keys(metric, aggregations) timeseries = utils.parallel_map( self._get_measures_timeserie, - ((metric, agg, from_timestamp, to_timestamp) + ((metric, agg, keys[agg], from_timestamp, to_timestamp) for agg in aggregations)) return { agg: ts.fetch(from_timestamp, to_timestamp) @@ -268,10 +269,8 @@ class StorageDriver(object): results.append(ts) return results - def _get_measures_timeserie(self, metric, aggregation, + def _get_measures_timeserie(self, metric, aggregation, keys, from_timestamp=None, to_timestamp=None): - all_keys = self._list_split_keys(metric, [aggregation])[aggregation] - if from_timestamp: from_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( from_timestamp, aggregation.granularity) @@ -280,7 +279,7 @@ class StorageDriver(object): to_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( to_timestamp, aggregation.granularity) - keys = [key for key in sorted(all_keys) + keys = [key for key in sorted(keys) if ((not from_timestamp or key >= from_timestamp) and (not to_timestamp or key <= to_timestamp))] -- GitLab From 20ac7dda4229b3462c58cb67e725fbe96bc61610 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 14 Feb 2018 18:33:27 +0100 Subject: [PATCH 1297/1483] storage: allow to pass different aggregations to measures retrieval functions The list of keys passed as argument now embeds also its companion Aggregation object so it'll be possible to retrieve splits from different aggregations in one single call. --- gnocchi/storage/__init__.py | 31 +++++----- gnocchi/storage/ceph.py | 8 ++- gnocchi/storage/file.py | 4 +- gnocchi/storage/redis.py | 17 +++--- gnocchi/storage/s3.py | 6 +- gnocchi/storage/swift.py | 6 +- gnocchi/tests/test_storage.py | 108 +++++++++++++++++----------------- 7 files changed, 93 insertions(+), 87 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 8eaccf84..781770de 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -107,11 +107,11 @@ class StorageDriver(object): def upgrade(): pass - def _get_measures(self, metric, keys, aggregation, version=3): + def _get_measures(self, metric, keys_and_aggregations, version=3): return utils.parallel_map( self._get_measures_unbatched, ((metric, key, aggregation, version) - for key in keys)) + for key, aggregation in keys_and_aggregations)) @staticmethod def _get_measures_unbatched(metric, timestamp_key, aggregation, version=3): @@ -251,12 +251,13 @@ class StorageDriver(object): ATTRGETTER_METHOD) } - def _get_measures_and_unserialize(self, metric, keys, aggregation): - if not keys: + def _get_measures_and_unserialize(self, metric, keys_and_aggregations): + if not keys_and_aggregations: return [] - raw_measures = self._get_measures(metric, keys, aggregation) + raw_measures = self._get_measures(metric, keys_and_aggregations) results = [] - for key, raw in six.moves.zip(keys, raw_measures): + for (key, aggregation), raw in six.moves.zip( + keys_and_aggregations, raw_measures): try: ts = carbonara.AggregatedTimeSerie.unserialize( raw, key, aggregation) @@ -264,7 +265,7 @@ class StorageDriver(object): LOG.error("Data corruption detected for %s " "aggregated `%s' timeserie, granularity `%s' " "around time `%s', ignoring.", - metric.id, aggregation, key.sampling, key) + metric.id, aggregation.method, key.sampling, key) else: results.append(ts) return results @@ -279,12 +280,14 @@ class StorageDriver(object): to_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( to_timestamp, aggregation.granularity) - keys = [key for key in sorted(keys) - if ((not from_timestamp or key >= from_timestamp) - and (not to_timestamp or key <= to_timestamp))] + keys_and_aggregations = [ + (key, aggregation) for key in sorted(keys) + if ((not from_timestamp or key >= from_timestamp) + and (not to_timestamp or key <= to_timestamp)) + ] timeseries = self._get_measures_and_unserialize( - metric, keys, aggregation.method) + metric, keys_and_aggregations) ts = carbonara.AggregatedTimeSerie.from_timeseries( timeseries, aggregation) @@ -320,7 +323,7 @@ class StorageDriver(object): # First, fetch all those existing splits. try: existing_data = self._get_measures_and_unserialize( - metric, keys_to_rewrite, aggregation) + metric, [(key, aggregation) for key in keys_to_rewrite]) except AggregationDoesNotExist: pass else: @@ -344,7 +347,7 @@ class StorageDriver(object): "and aggregation method %s (split key %s): " "possible data corruption", metric, key.sampling, - aggregation, key) + aggregation.method, key) continue offset, data = split.serialize( @@ -432,7 +435,7 @@ class StorageDriver(object): keys_and_split_to_store[key] = split self._store_timeserie_splits( - metric, keys_and_split_to_store, aggregation.method, + metric, keys_and_split_to_store, aggregation, oldest_mutable_timestamp) @staticmethod diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index d8a324f7..9d6cc8e9 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -85,7 +85,8 @@ class CephStorage(storage.StorageDriver): aggregation, version=3): with rados.WriteOpCtx() as op: for key, data, offset in keys_and_data_and_offset: - name = self._get_object_name(metric, key, aggregation, version) + name = self._get_object_name( + metric, key, aggregation.method, version) if offset is None: self.ioctx.write_full(name, data) else: @@ -146,13 +147,14 @@ class CephStorage(storage.StorageDriver): def _get_measures_unbatched(self, metric, key, aggregation, version=3): try: - name = self._get_object_name(metric, key, aggregation, version) + name = self._get_object_name( + metric, key, aggregation.method, version) return self._get_object_content(name) except rados.ObjectNotFound: if self._object_exists( self._build_unaggregated_timeserie_path(metric, 3)): raise storage.AggregationDoesNotExist( - metric, aggregation, key.sampling) + metric, aggregation.method, key.sampling) else: raise storage.MetricDoesNotExist(metric) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 90cbf726..2ebf7cad 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -168,7 +168,7 @@ class FileStorage(storage.StorageDriver): for key, data, offset in keys_and_data_and_offset: self._atomic_file_store( self._build_metric_path_for_split( - metric, aggregation, key, version), + metric, aggregation.method, key, version), data) def _delete_metric(self, metric): @@ -183,7 +183,7 @@ class FileStorage(storage.StorageDriver): def _get_measures_unbatched(self, metric, key, aggregation, version=3): path = self._build_metric_path_for_split( - metric, aggregation, key, version) + metric, aggregation.method, key, version) try: with open(path, 'rb') as aggregation_file: return aggregation_file.read() diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 37c2adbe..bb43ec18 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -54,7 +54,7 @@ for i, result in ipairs(results) do if result == false then local field = ARGV[i] if redis.call("EXISTS", KEYS[1]) == 1 then - return {-1, field} + return {-1, i - 1} end return {-2, field} end @@ -154,28 +154,29 @@ return {0, final} pipe = self._client.pipeline(transaction=False) metric_key = self._metric_key(metric) for key, data, offset in keys_and_data_and_offset: - key = self._aggregated_field_for_split(aggregation, key, version) + key = self._aggregated_field_for_split( + aggregation.method, key, version) pipe.hset(metric_key, key, data) pipe.execute() def _delete_metric(self, metric): self._client.delete(self._metric_key(metric)) - def _get_measures(self, metric, keys, aggregation, version=3): - if not keys: + def _get_measures(self, metric, keys_and_aggregations, version=3): + if not keys_and_aggregations: return [] fields = [ - self._aggregated_field_for_split(aggregation, key, version) - for key in keys + self._aggregated_field_for_split(aggregation.method, key, version) + for key, aggregation in keys_and_aggregations ] code, result = self._scripts['get_measures']( keys=[self._metric_key(metric)], args=fields, ) if code == -1: - sampling = utils.to_timespan(result.split(self.FIELD_SEP_B)[2]) + missing_key, missing_agg = keys_and_aggregations[int(result)] raise storage.AggregationDoesNotExist( - metric, aggregation, sampling) + metric, missing_agg.method, missing_agg.granularity) if code == -2: raise storage.MetricDoesNotExist(metric) return result diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 2b5c54ef..2dfbb461 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -126,7 +126,7 @@ class S3Storage(storage.StorageDriver): self._put_object_safe( Bucket=self._bucket_name, Key=self._prefix(metric) + self._object_name( - key, aggregation, version), + key, aggregation.method, version), Body=data) def _delete_metric_splits_unbatched(self, metric, key, aggregation, @@ -162,12 +162,12 @@ class S3Storage(storage.StorageDriver): response = self.s3.get_object( Bucket=self._bucket_name, Key=self._prefix(metric) + self._object_name( - key, aggregation, version)) + key, aggregation.method, version)) except botocore.exceptions.ClientError as e: if e.response['Error'].get('Code') == 'NoSuchKey': if self._metric_exists_p(metric, version): raise storage.AggregationDoesNotExist( - metric, aggregation, key.sampling) + metric, aggregation.method, key.sampling) raise storage.MetricDoesNotExist(metric) raise return response['Body'].read() diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index deb1efe1..41ed0b41 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -121,7 +121,7 @@ class SwiftStorage(storage.StorageDriver): for key, data, offset in keys_and_data_and_offset: self.swift.put_object( self._container_name(metric), - self._object_name(key, aggregation, version), + self._object_name(key, aggregation.method, version), data) def _delete_metric_splits_unbatched( @@ -152,7 +152,7 @@ class SwiftStorage(storage.StorageDriver): try: headers, contents = self.swift.get_object( self._container_name(metric), self._object_name( - key, aggregation, version)) + key, aggregation.method, version)) except swclient.ClientException as e: if e.http_status == 404: try: @@ -162,7 +162,7 @@ class SwiftStorage(storage.StorageDriver): raise storage.MetricDoesNotExist(metric) raise raise storage.AggregationDoesNotExist( - metric, aggregation, key.sampling) + metric, aggregation.method, key.sampling) raise return contents diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 8d8264fa..a901b786 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -225,7 +225,7 @@ class TestStorageDriver(tests_base.TestCase): # policy is 60 points and split is 48. should only update 2nd half args = call[1] if (args[0] == m_sql - and args[2] == 'mean' + and args[2].method == 'mean' and args[1][0][0].sampling == numpy.timedelta64(1, 'm')): count += 1 self.assertEqual(1, count) @@ -343,29 +343,29 @@ class TestStorageDriver(tests_base.TestCase): else: assertCompressedIfWriteFull = self.assertFalse + aggregation = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'm')) + data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451520000, 's'), numpy.timedelta64(1, 'm'), - )], "mean")[0] + ), aggregation)])[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451736000, 's'), numpy.timedelta64(60, 's'), - )], "mean")[0] + ), aggregation)])[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(60, 's'), - )], "mean")[0] + ), aggregation)])[0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) - aggregation = self.metric.archive_policy.get_aggregation( - "mean", numpy.timedelta64(1, 'm')) - self.assertEqual({"mean": [ (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69), (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42), @@ -398,29 +398,29 @@ class TestStorageDriver(tests_base.TestCase): }, }, self.storage._list_split_keys(self.metric, [agg])) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451520000, 's'), numpy.timedelta64(60, 's'), - )], "mean")[0] + ), aggregation)])[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451736000, 's'), numpy.timedelta64(60, 's'), - )], "mean")[0] + ), aggregation)])[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(1, 'm'), - )], "mean")[0] + ), aggregation)])[0] # Now this one is compressed because it has been rewritten! self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1452384000, 's'), numpy.timedelta64(60, 's'), - )], "mean")[0] + ), aggregation)])[0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -471,29 +471,29 @@ class TestStorageDriver(tests_base.TestCase): else: assertCompressedIfWriteFull = self.assertFalse + aggregation = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'm')) + data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451520000, 's'), numpy.timedelta64(1, 'm'), - )], "mean")[0] + ), aggregation)])[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451736000, 's'), numpy.timedelta64(1, 'm'), - )], "mean")[0] + ), aggregation)])[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(1, 'm') - )], "mean")[0] + ), aggregation)])[0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) - aggregation = self.metric.archive_policy.get_aggregation( - "mean", numpy.timedelta64(1, 'm')) - self.assertEqual({"mean": [ (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69), (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42), @@ -528,29 +528,29 @@ class TestStorageDriver(tests_base.TestCase): } }, self.storage._list_split_keys(self.metric, [agg])) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451520000, 's'), numpy.timedelta64(1, 'm'), - )], "mean")[0] + ), agg)])[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451736000, 's'), numpy.timedelta64(1, 'm'), - )], "mean")[0] + ), agg)])[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(60, 's') - )], "mean")[0] + ), agg)])[0] # Now this one is compressed because it has been rewritten! self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1452384000, 's'), numpy.timedelta64(1, 'm'), - )], "mean")[0] + ), agg)])[0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -601,30 +601,30 @@ class TestStorageDriver(tests_base.TestCase): else: assertCompressedIfWriteFull = self.assertFalse + aggregation = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'm')) + data = self.storage._get_measures( self.metric, - [carbonara.SplitKey( + [(carbonara.SplitKey( numpy.datetime64(1451520000, 's'), numpy.timedelta64(1, 'm'), - )], "mean")[0] + ), aggregation)])[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451736000, 's'), numpy.timedelta64(1, 'm') - )], "mean")[0] + ), aggregation)])[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(1, 'm'), - )], "mean")[0] + ), aggregation)])[0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) - aggregation = self.metric.archive_policy.get_aggregation( - "mean", numpy.timedelta64(1, 'm')) - self.assertEqual({"mean": [ (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69), @@ -691,29 +691,29 @@ class TestStorageDriver(tests_base.TestCase): else: assertCompressedIfWriteFull = self.assertFalse + aggregation = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'm')) + data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451520000, 's'), numpy.timedelta64(60, 's'), - )], "mean")[0] + ), aggregation)])[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451736000, 's'), numpy.timedelta64(1, 'm'), - )], "mean")[0] + ), aggregation)])[0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(1, 'm'), - )], "mean")[0] + ), aggregation)])[0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) - aggregation = self.metric.archive_policy.get_aggregation( - "mean", numpy.timedelta64(1, 'm')) - self.assertEqual({"mean": [ (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69), (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42), @@ -728,7 +728,7 @@ class TestStorageDriver(tests_base.TestCase): numpy.datetime64(1451952000, 's'), numpy.timedelta64(1, 'm')), b"oh really?", None) - ], "mean") + ], aggregation) # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move -- GitLab From 4f4037db4d0e9322b402f35fcf5338035de921d7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 15 Feb 2018 14:03:32 +0100 Subject: [PATCH 1298/1483] storage: stop raising AggregationDoesNotExist on measures retrieval Raising an exception at this point is damageable as when multiple SplitKeys are being fetched at the same time, only one being absent will cancel the call. The caller is already solid and know show to handle None as a return value so just use None to indicate the split is non-existing. --- gnocchi/storage/__init__.py | 32 ++++++++++++++++++-------------- gnocchi/storage/ceph.py | 6 ++---- gnocchi/storage/file.py | 3 +-- gnocchi/storage/redis.py | 13 +++++-------- gnocchi/storage/s3.py | 3 +-- gnocchi/storage/swift.py | 3 +-- 6 files changed, 28 insertions(+), 32 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 781770de..5fb586c9 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -251,7 +251,14 @@ class StorageDriver(object): ATTRGETTER_METHOD) } - def _get_measures_and_unserialize(self, metric, keys_and_aggregations): + def _get_splits_and_unserialize(self, metric, keys_and_aggregations): + """Get splits and unserialize them + + :param metric: The metric to retrieve. + :param keys_and_aggregations: A list of tuple (SplitKey, Aggregation) + to retrieve. + :return: A list of AggregatedTimeSerie. + """ if not keys_and_aggregations: return [] raw_measures = self._get_measures(metric, keys_and_aggregations) @@ -286,7 +293,7 @@ class StorageDriver(object): and (not to_timestamp or key <= to_timestamp)) ] - timeseries = self._get_measures_and_unserialize( + timeseries = self._get_splits_and_unserialize( metric, keys_and_aggregations) ts = carbonara.AggregatedTimeSerie.from_timeseries( @@ -321,18 +328,15 @@ class StorageDriver(object): # Update the splits that were passed as argument with the data already # stored in the case that we need to rewrite them fully. # First, fetch all those existing splits. - try: - existing_data = self._get_measures_and_unserialize( - metric, [(key, aggregation) for key in keys_to_rewrite]) - except AggregationDoesNotExist: - pass - else: - for key, split, existing in six.moves.zip( - keys_to_rewrite, splits_to_rewrite, existing_data): - if existing: - if split is not None: - existing.merge(split) - keys_and_splits[key] = existing + existing_data = self._get_splits_and_unserialize( + metric, [(key, aggregation) for key in keys_to_rewrite]) + + for key, split, existing in six.moves.zip( + keys_to_rewrite, splits_to_rewrite, existing_data): + if existing: + if split is not None: + existing.merge(split) + keys_and_splits[key] = existing key_data_offset = [] for key, split in six.iteritems(keys_and_splits): diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 9d6cc8e9..71c885a9 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -153,10 +153,8 @@ class CephStorage(storage.StorageDriver): except rados.ObjectNotFound: if self._object_exists( self._build_unaggregated_timeserie_path(metric, 3)): - raise storage.AggregationDoesNotExist( - metric, aggregation.method, key.sampling) - else: - raise storage.MetricDoesNotExist(metric) + return + raise storage.MetricDoesNotExist(metric) def _list_split_keys(self, metric, aggregations, version=3): with rados.ReadOpCtx() as op: diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 2ebf7cad..6649aa6f 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -190,7 +190,6 @@ class FileStorage(storage.StorageDriver): except IOError as e: if e.errno == errno.ENOENT: if os.path.exists(self._build_metric_dir(metric)): - raise storage.AggregationDoesNotExist( - metric, aggregation, key.sampling) + return raise storage.MetricDoesNotExist(metric) raise diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index bb43ec18..a5a9d5ec 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -50,13 +50,14 @@ return ids "get_measures": """ local results = redis.call("HMGET", KEYS[1], unpack(ARGV)) local final = {} +local metric_exists = false for i, result in ipairs(results) do if result == false then - local field = ARGV[i] - if redis.call("EXISTS", KEYS[1]) == 1 then - return {-1, i - 1} + if not metric_exists and redis.call("EXISTS", KEYS[1]) == 0 then + return {-2, false} + else + metric_exists = true end - return {-2, field} end final[#final + 1] = result end @@ -173,10 +174,6 @@ return {0, final} keys=[self._metric_key(metric)], args=fields, ) - if code == -1: - missing_key, missing_agg = keys_and_aggregations[int(result)] - raise storage.AggregationDoesNotExist( - metric, missing_agg.method, missing_agg.granularity) if code == -2: raise storage.MetricDoesNotExist(metric) return result diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 2dfbb461..d20fbcbb 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -166,8 +166,7 @@ class S3Storage(storage.StorageDriver): except botocore.exceptions.ClientError as e: if e.response['Error'].get('Code') == 'NoSuchKey': if self._metric_exists_p(metric, version): - raise storage.AggregationDoesNotExist( - metric, aggregation.method, key.sampling) + return raise storage.MetricDoesNotExist(metric) raise return response['Body'].read() diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 41ed0b41..b2202fb3 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -161,8 +161,7 @@ class SwiftStorage(storage.StorageDriver): if e.http_status == 404: raise storage.MetricDoesNotExist(metric) raise - raise storage.AggregationDoesNotExist( - metric, aggregation.method, key.sampling) + return raise return contents -- GitLab From 60e758550e259c567533e1e0e93eb8a0c82cc2bd Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Mar 2018 11:04:41 +0100 Subject: [PATCH 1299/1483] storage: fix corruption scenario If data corruption occurs, the _get_splits_and_unserialize() method might not return the same number of arguments that it received. It's not a big deal when concatenating the splits to retrieve the data for reading. However, when requesting a bunch of splits for rewrite, that's a problem because the list is zipped with other data on iteration, possibly corrupting the results. --- gnocchi/storage/__init__.py | 4 ++-- gnocchi/tests/test_storage.py | 25 +++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 5fb586c9..f4a53c8b 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -273,8 +273,8 @@ class StorageDriver(object): "aggregated `%s' timeserie, granularity `%s' " "around time `%s', ignoring.", metric.id, aggregation.method, key.sampling, key) - else: - results.append(ts) + ts = carbonara.AggregatedTimeSerie(aggregation) + results.append(ts) return results def _get_measures_timeserie(self, metric, aggregation, keys, diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index a901b786..7baf66f4 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -65,6 +65,31 @@ class TestStorageDriver(tests_base.TestCase): driver = storage.get_driver(self.conf) self.assertIsInstance(driver, storage.StorageDriver) + def test_corrupted_split(self): + self.incoming.add_measures(self.metric.id, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + ]) + self.trigger_processing() + + aggregation = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(5, 'm')) + + with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize', + side_effect=carbonara.InvalidData()): + results = self.storage._get_splits_and_unserialize( + self.metric, + [ + (carbonara.SplitKey( + numpy.datetime64(1387800000, 's'), + numpy.timedelta64(5, 'm')), + aggregation) + ]) + self.assertEqual(1, len(results)) + self.assertIsInstance(results[0], carbonara.AggregatedTimeSerie) + # Assert it's an empty one since corrupted + self.assertEqual(0, len(results[0])) + self.assertEqual(results[0].aggregation, aggregation) + def test_corrupted_data(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), -- GitLab From d8fd8e6a6d5f35ddb8e201b70df1fdcf00a825c7 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 5 Mar 2018 18:29:15 +0100 Subject: [PATCH 1300/1483] storage: remove metric existence check when retrieving splits Splits are either retrieve for reading from: - the REST API, where the splits are actually listed before being retrieved, we don't care about the metric existence or not, None is enough. - on rewriting the split (WRITE_FULL) and in that case we know the metric exists --- gnocchi/storage/ceph.py | 5 +-- gnocchi/storage/file.py | 4 +- gnocchi/storage/redis.py | 31 ++------------- gnocchi/storage/s3.py | 4 +- gnocchi/storage/swift.py | 6 --- gnocchi/tests/test_storage.py | 73 +++++++++++++++++++++++++++++++++++ 6 files changed, 80 insertions(+), 43 deletions(-) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 71c885a9..9c0f7827 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -151,10 +151,7 @@ class CephStorage(storage.StorageDriver): metric, key, aggregation.method, version) return self._get_object_content(name) except rados.ObjectNotFound: - if self._object_exists( - self._build_unaggregated_timeserie_path(metric, 3)): - return - raise storage.MetricDoesNotExist(metric) + return def _list_split_keys(self, metric, aggregations, version=3): with rados.ReadOpCtx() as op: diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 6649aa6f..775e2cad 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -189,7 +189,5 @@ class FileStorage(storage.StorageDriver): return aggregation_file.read() except IOError as e: if e.errno == errno.ENOENT: - if os.path.exists(self._build_metric_dir(metric)): - return - raise storage.MetricDoesNotExist(metric) + return raise diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index a5a9d5ec..7f35e06d 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -47,22 +47,6 @@ repeat until cursor == 0 return ids """ % (FIELD_SEP, FIELD_SEP, FIELD_SEP, FIELD_SEP, FIELD_SEP), - "get_measures": """ -local results = redis.call("HMGET", KEYS[1], unpack(ARGV)) -local final = {} -local metric_exists = false -for i, result in ipairs(results) do - if result == false then - if not metric_exists and redis.call("EXISTS", KEYS[1]) == 0 then - return {-2, false} - else - metric_exists = true - end - end - final[#final + 1] = result -end -return {0, final} -""", } def __init__(self, conf): @@ -166,14 +150,7 @@ return {0, final} def _get_measures(self, metric, keys_and_aggregations, version=3): if not keys_and_aggregations: return [] - fields = [ - self._aggregated_field_for_split(aggregation.method, key, version) - for key, aggregation in keys_and_aggregations - ] - code, result = self._scripts['get_measures']( - keys=[self._metric_key(metric)], - args=fields, - ) - if code == -2: - raise storage.MetricDoesNotExist(metric) - return result + return self._client.hmget( + self._metric_key(metric), + [self._aggregated_field_for_split(aggregation.method, key, version) + for key, aggregation in keys_and_aggregations]) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index d20fbcbb..06fb5825 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -165,9 +165,7 @@ class S3Storage(storage.StorageDriver): key, aggregation.method, version)) except botocore.exceptions.ClientError as e: if e.response['Error'].get('Code') == 'NoSuchKey': - if self._metric_exists_p(metric, version): - return - raise storage.MetricDoesNotExist(metric) + return raise return response['Body'].read() diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index b2202fb3..96d116ff 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -155,12 +155,6 @@ class SwiftStorage(storage.StorageDriver): key, aggregation.method, version)) except swclient.ClientException as e: if e.http_status == 404: - try: - self.swift.head_container(self._container_name(metric)) - except swclient.ClientException as e: - if e.http_status == 404: - raise storage.MetricDoesNotExist(metric) - raise return raise return contents diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 7baf66f4..46804f2d 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -331,6 +331,79 @@ class TestStorageDriver(tests_base.TestCase): self.metric, [agg], )) + def test_get_measures_return(self): + self.incoming.add_measures(self.metric.id, [ + incoming.Measure(datetime64(2016, 1, 1, 12, 0, 1), 69), + incoming.Measure(datetime64(2016, 1, 2, 13, 7, 31), 42), + incoming.Measure(datetime64(2016, 1, 4, 14, 9, 31), 4), + incoming.Measure(datetime64(2016, 1, 6, 15, 12, 45), 44), + ]) + self.trigger_processing() + + aggregation = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(5, 'm')) + + data = self.storage._get_measures( + self.metric, [(carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(5, 'm'), + ), aggregation)]) + self.assertEqual(1, len(data)) + self.assertIsInstance(data[0], bytes) + self.assertGreater(len(data[0]), 0) + existing = data[0] + + # Now retrieve an existing and a non-existing key + data = self.storage._get_measures( + self.metric, [ + (carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(5, 'm'), + ), aggregation), + (carbonara.SplitKey( + numpy.datetime64(1451520010, 's'), + numpy.timedelta64(5, 'm'), + ), aggregation), + ]) + self.assertEqual(2, len(data)) + self.assertIsInstance(data[0], bytes) + self.assertGreater(len(data[0]), 0) + self.assertEqual(existing, data[0]) + self.assertIsNone(data[1]) + + # Now retrieve a non-existing and an existing key + data = self.storage._get_measures( + self.metric, [ + (carbonara.SplitKey( + numpy.datetime64(155152000, 's'), + numpy.timedelta64(5, 'm'), + ), aggregation), + (carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(5, 'm'), + ), aggregation), + ]) + self.assertEqual(2, len(data)) + self.assertIsInstance(data[1], bytes) + self.assertGreater(len(data[1]), 0) + self.assertEqual(existing, data[1]) + self.assertIsNone(data[0]) + + m2, _ = self._create_metric() + # Now retrieve a non-existing (= no aggregated measures) metric + data = self.storage._get_measures( + m2, [ + (carbonara.SplitKey( + numpy.datetime64(1451520010, 's'), + numpy.timedelta64(5, 'm'), + ), aggregation), + (carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(5, 'm'), + ), aggregation), + ]) + self.assertEqual([None, None], data) + def test_rewrite_measures(self): # Create an archive policy that spans on several splits. Each split # being 3600 points, let's go for 36k points so we have 10 splits. -- GitLab From 192814bf97ad5a8bb875a6de1c5b5bf968ec4604 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Tue, 6 Mar 2018 17:04:56 +0000 Subject: [PATCH 1301/1483] Still unreleased. --- debian/changelog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 88628f2d..ced5af79 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -gnocchi (4.2.0-1) unstable; urgency=medium +gnocchi (4.2.0-1) UNRELEASED; urgency=medium [ Ondřej Nový ] * d/control: Set Vcs-* to salsa.debian.org -- GitLab From a093825a56c7603701af738b71168501fe5c74ac Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Tue, 6 Mar 2018 17:14:24 +0000 Subject: [PATCH 1302/1483] Dropped debconf templates, using openstack-pkg-tools >= 70~. --- debian/changelog | 1 + debian/control | 2 +- debian/gnocchi-api.templates | 63 ------- debian/gnocchi-common.postrm | 30 --- debian/gnocchi-common.postrm.in | 16 ++ debian/gnocchi-common.prerm | 17 ++ debian/gnocchi-common.templates | 57 ------ debian/po/POTFILES.in | 2 - debian/po/cs.po | 314 -------------------------------- debian/po/da.po | 259 -------------------------- debian/po/de.po | 268 --------------------------- debian/po/es.po | 311 ------------------------------- debian/po/fr.po | 276 ---------------------------- debian/po/gl.po | 270 --------------------------- debian/po/it.po | 272 --------------------------- debian/po/ja.po | 275 ---------------------------- debian/po/nl.po | 280 ---------------------------- debian/po/pl.po | 274 ---------------------------- debian/po/pt.po | 258 -------------------------- debian/po/pt_BR.po | 276 ---------------------------- debian/po/ru.po | 281 ---------------------------- debian/po/sv.po | 281 ---------------------------- debian/po/templates.pot | 229 ----------------------- debian/po/zh_CN.po | 263 -------------------------- debian/rules | 4 +- 25 files changed, 38 insertions(+), 4541 deletions(-) delete mode 100644 debian/gnocchi-api.templates delete mode 100644 debian/gnocchi-common.postrm create mode 100644 debian/gnocchi-common.postrm.in create mode 100644 debian/gnocchi-common.prerm delete mode 100644 debian/gnocchi-common.templates delete mode 100644 debian/po/POTFILES.in delete mode 100644 debian/po/cs.po delete mode 100644 debian/po/da.po delete mode 100644 debian/po/de.po delete mode 100644 debian/po/es.po delete mode 100644 debian/po/fr.po delete mode 100644 debian/po/gl.po delete mode 100644 debian/po/it.po delete mode 100644 debian/po/ja.po delete mode 100644 debian/po/nl.po delete mode 100644 debian/po/pl.po delete mode 100644 debian/po/pt.po delete mode 100644 debian/po/pt_BR.po delete mode 100644 debian/po/ru.po delete mode 100644 debian/po/sv.po delete mode 100644 debian/po/templates.pot delete mode 100644 debian/po/zh_CN.po diff --git a/debian/changelog b/debian/changelog index ced5af79..ed77064f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -11,6 +11,7 @@ gnocchi (4.2.0-1) UNRELEASED; urgency=medium * Blacklist test_gnocchi_config_generator_run(), we don't care about it anyway, as we're using oslo-config-generator. * Fixed uwsgi params. + * Dropped debconf templates, using openstack-pkg-tools >= 70~. -- Thomas Goirand Tue, 27 Feb 2018 13:12:12 +0000 diff --git a/debian/control b/debian/control index 61527cc8..14c506d4 100644 --- a/debian/control +++ b/debian/control @@ -7,7 +7,7 @@ Uploaders: Build-Depends: debhelper (>= 10), dh-python, - openstack-pkg-tools (>= 66~), + openstack-pkg-tools (>= 70~), python3-all, python3-pbr, python3-setuptools, diff --git a/debian/gnocchi-api.templates b/debian/gnocchi-api.templates deleted file mode 100644 index 5209747f..00000000 --- a/debian/gnocchi-api.templates +++ /dev/null @@ -1,63 +0,0 @@ -# These templates have been reviewed by the debian-l10n-english -# team -# -# If modifications/additions/rewording are needed, please ask -# debian-l10n-english@lists.debian.org for advice. -# -# Even minor modifications require translation updates and such -# changes should be coordinated with translators and reviewers. - -Template: gnocchi/register-endpoint -Type: boolean -Default: false -_Description: Register Gnocchi in the Keystone endpoint catalog? - Each OpenStack service (each API) should be registered in order to be - accessible. This is done using "keystone service-create" and "keystone - endpoint-create". This can be done automatically now. - . - Note that you will need to have an up and running Keystone server on which to - connect using a known admin project name, admin username and password. The - admin auth token is not used anymore. - -Template: gnocchi/keystone-ip -Type: string -_Description: Keystone server IP address: - Please enter the IP address of the Keystone server, so that gnocchi-api can - contact Keystone to do the Gnocchi service and endpoint creation. - -Template: gnocchi/keystone-admin-name -Type: string -Default: admin -_Description: Keystone admin name: - To register the service endpoint, this package needs to know the Admin login, - name, project name, and password to the Keystone server. - -Template: gnocchi/keystone-project-name -Type: string -Default: admin -_Description: Keystone admin project name: - To register the service endpoint, this package needs to know the Admin login, - name, project name, and password to the Keystone server. - -Template: gnocchi/keystone-admin-password -Type: password -_Description: Keystone admin password: - To register the service endpoint, this package needs to know the Admin login, - name, project name, and password to the Keystone server. - -Template: gnocchi/endpoint-ip -Type: string -_Description: Gnocchi endpoint IP address: - Please enter the IP address that will be used to contact Gnocchi. - . - This IP address should be accessible from the clients that will use this - service, so if you are installing a public cloud, this should be a public - IP address. - -Template: gnocchi/region-name -Type: string -Default: regionOne -_Description: Name of the region to register: - OpenStack supports using availability zones, with each region representing - a location. Please enter the zone that you wish to use when registering the - endpoint. diff --git a/debian/gnocchi-common.postrm b/debian/gnocchi-common.postrm deleted file mode 100644 index 531cc6de..00000000 --- a/debian/gnocchi-common.postrm +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh - -set -e - -if [ "$1" = "purge" ] ; then - if [ -f /usr/share/debconf/confmodule ] ; then - . /usr/share/debconf/confmodule - - db_get gnocchi/configure_db || true - if [ "$RET" = "true" ]; then - if [ -f /usr/share/dbconfig-common/dpkg/postrm ]; then - . /usr/share/dbconfig-common/dpkg/postrm - dbc_go gnocchi-common $@ - else - rm -f /etc/dbconfig-common/gnocchi-common.conf - if which ucf >/dev/null 2>&1; then - ucf --purge /etc/dbconfig-common/gnocchi-common.conf - ucfr --purge gnocchi-common /etc/dbconfig-common/gnocchi-common.conf - fi - fi - fi - fi - - rm -fr /etc/gnocchi - rm -rf /var/lib/gnocchi /var/log/gnocchi -fi - -#DEBHELPER# - -exit 0 diff --git a/debian/gnocchi-common.postrm.in b/debian/gnocchi-common.postrm.in new file mode 100644 index 00000000..4ff2cf2d --- /dev/null +++ b/debian/gnocchi-common.postrm.in @@ -0,0 +1,16 @@ +#!/bin/sh + +set -e + +#PKGOS-INCLUDE# + +if [ "$1" = "purge" ] ; then + pkgos_dbc_postrm gnocchi gnocchi-common + + rm -fr /etc/gnocchi + rm -rf /var/lib/gnocchi /var/log/gnocchi +fi + +#DEBHELPER# + +exit 0 diff --git a/debian/gnocchi-common.prerm b/debian/gnocchi-common.prerm new file mode 100644 index 00000000..8012850b --- /dev/null +++ b/debian/gnocchi-common.prerm @@ -0,0 +1,17 @@ +#!/bin/sh + +set -e + +if [ "${1}" = "remove" ] && [ -r /usr/share/debconf/confmodule ] && [ -r /usr/share/dbconfig-common/dpkg/prerm ] ; then + . /usr/share/debconf/confmodule + + db_get gnocchi/configure_db + if [ "$RET" = "true" ]; then + . /usr/share/dbconfig-common/dpkg/prerm + dbc_go ${DPKG_MAINTSCRIPT_PACKAGE} $@ + fi +fi + +#DEBHELPER# + +exit 0 diff --git a/debian/gnocchi-common.templates b/debian/gnocchi-common.templates deleted file mode 100644 index 1c2bdbb1..00000000 --- a/debian/gnocchi-common.templates +++ /dev/null @@ -1,57 +0,0 @@ -# These templates have been reviewed by the debian-l10n-english -# team -# -# If modifications/additions/rewording are needed, please ask -# debian-l10n-english@lists.debian.org for advice. -# -# Even minor modifications require translation updates and such -# changes should be coordinated with translators and reviewers. - -Template: gnocchi/auth-host -Type: string -Default: 127.0.0.1 -_Description: Authentication server hostname: - Please specify the hostname of the authentication server for Gnocchi. Typically - this is also the hostname of the OpenStack Identity Service (Keystone). - -Template: gnocchi/admin-tenant-name -Type: string -Default: admin -# Translators: a "tenant" in OpenStack world is -# an entity that contains one or more username/password couples. -# It's typically the tenant that will be used for billing. Having more than one -# username/password is very helpful in larger organization. -# You're advised to either keep "tenant" without translating it -# or keep it parenthezised. Example for French: -# locataire ("tenant") -_Description: Authentication server tenant name: - Please specify the authentication server tenant name. - -Template: gnocchi/admin-user -Type: string -Default: admin -_Description: Authentication server username: - Please specify the username to use with the authentication server. - -Template: gnocchi/admin-password -Type: password -_Description: Authentication server password: - Please specify the password to use with the authentication server. - -Template: gnocchi/configure_db -Type: boolean -Default: false -_Description: Set up a database for Gnocchi? - No database has been set up for Gnocchi to use. Before - continuing, you should make sure you have the following information: - . - * the type of database that you want to use; - * the database server hostname (that server must allow TCP connections from this - machine); - * a username and password to access the database. - . - If some of these requirements are missing, do not choose this option and run with - regular SQLite support. - . - You can change this setting later on by running "dpkg-reconfigure -plow - gnocchi-common". diff --git a/debian/po/POTFILES.in b/debian/po/POTFILES.in deleted file mode 100644 index d94abc48..00000000 --- a/debian/po/POTFILES.in +++ /dev/null @@ -1,2 +0,0 @@ -[type: gettext/rfc822deb] gnocchi-common.templates -[type: gettext/rfc822deb] gnocchi-api.templates diff --git a/debian/po/cs.po b/debian/po/cs.po deleted file mode 100644 index 98bdfd6d..00000000 --- a/debian/po/cs.po +++ /dev/null @@ -1,314 +0,0 @@ -# Czech PO debconf template translation of glance. -# Copyright (C) 2012 Michal Simunek -# This file is distributed under the same license as the glance package. -# Michal Simunek , 2012 - 2013. -# -msgid "" -msgstr "" -"Project-Id-Version: glance 2013.1.2-4\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2013-08-25 13:01+0200\n" -"Last-Translator: Michal Simunek \n" -"Language-Team: Czech \n" -"Language: cs\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -#, fuzzy -#| msgid "Auth server hostname:" -msgid "Authentication server hostname:" -msgstr "Název hostitele autentizačního serveru:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" -"Zadejte prosím URL autentizačního serveru pro Gnocchi. Většinou je to také " -"URL OpenStack Identity Service (Keystone)." - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -#, fuzzy -#| msgid "Auth server tenant name:" -msgid "Authentication server tenant name:" -msgstr "Název nájemce pro autentizační server:" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -#, fuzzy -#| msgid "Auth server username:" -msgid "Authentication server username:" -msgstr "Uživatel autentizačního serveru:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -#, fuzzy -#| msgid "Auth server password:" -msgid "Authentication server password:" -msgstr "Heslo autentizačního serveru:" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "Set up a database for glance?" -msgid "Set up a database for Gnocchi?" -msgstr "Nastavit databázi pro glance?" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "No database has been set up for glance-registry or glance-api to use. " -#| "Before continuing, you should make sure you have:" -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" -"glance-registry, nebo glance-api, nemá nastavenu žádnou databázi k " -"používání. Před tím, než budete pokračovat se ujistěte že máte:" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| " - the server host name (that server must allow TCP connections from " -#| "this\n" -#| " machine);\n" -#| " - a username and password to access the database.\n" -#| " - A database type that you want to use." -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" -" - název hostitelského serveru (tento server musí přijímat TCP spojení\n" -" z tohoto počítače);\n" -" - uživatelské jméno a heslo pro přístup k databázi.\n" -" - Typ databáze, kterou chcete používat." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "If some of these requirements are missing, reject this option and run " -#| "with regular sqlite support." -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "" -"Pokud některou z těchto povinných voleb neznáte, přeskočte ji a glance " -"spouštějte s běžnou podporou sqlite." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "You can change this setting later on by running 'dpkg-reconfigure -plow " -#| "glance-common'." -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" -"Toto nastavení můžete později změnit spuštěním 'dpkg-reconfigure -plow " -"glance-common'." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Zaregistrovat Gnocchi v katalogu koncových bodů keystone?" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -#, fuzzy -#| msgid "" -#| "Each Openstack services (each API) should be registered in order to be " -#| "accessible. This is done using \"keystone service-create\" and \"keystone " -#| "endpoint-create\". Select if you want to run these commands now." -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" -"Aby byla každá služba Openstack (každé API) přístupná, musí být " -"zaregistrována. To se provádí pomocí příkazů \"keystone service-create\" a " -"\"keystone endpoint-create\". Zvolte si, zda-li se tyto příkazy mají nyní " -"spustit." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -#, fuzzy -#| msgid "" -#| "Note that you will need to have an up and running keystone server on " -#| "which to connect using the Keystone auth token." -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "" -"Berte na vědomí, že musíte mít běžící server keystone, na který se lze " -"připojit pomocí ověřovacího klíče pro Keystone." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -#, fuzzy -#| msgid "Keystone IP address:" -msgid "Keystone server IP address:" -msgstr "IP adresa serveru keystone:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" -"Zadejte IP adresu serveru keystone, aby se mohlo glance-api spojit s " -"Keystone a provozovat službu Gnocchi a vytvářet koncové body." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "Keystone Auth Token:" -msgid "Keystone admin name:" -msgstr "Autentizační klíč pro Keystone:" - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "IP adresa koncového bodu Gnocchi:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "" -"Zadejte IP adresu, která se bude používat ke spojení s Gnocchi (např: IP " -"adresa koncového bodu Gnocchi)." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" -"Tato IP adresa musí být přístupná z klientů, kteří budou tuto službu " -"používat, takže pokud instalujete veřejný cloud, musí to být veřejná IP " -"adresa." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "Název registrované oblasti:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -#, fuzzy -#| msgid "" -#| "Openstack can be used using availability zones, with each region " -#| "representing a location. Please enter the zone that you wish to use when " -#| "registering the endpoint." -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" -"Openstack lze využívat pomocí oblastí dostupnosti, přičemž každá oblast " -"představuje místo. Zadejte prosím oblast, kterou chcete použít při " -"registraci koncového bodu." - -#, fuzzy -#~| msgid "" -#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#~| "auth token." -#~ msgid "" -#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -#~ "authentication token." -#~ msgstr "" -#~ "Aby mohlo glance-api nastavit v Keystone svůj koncový bod, potřebuje " -#~ "autentizační klíč pro Keystone." diff --git a/debian/po/da.po b/debian/po/da.po deleted file mode 100644 index 3aa6861e..00000000 --- a/debian/po/da.po +++ /dev/null @@ -1,259 +0,0 @@ -# Danish translation gnocchi. -# Copyright (C) 2016 gnocchi & nedenstående oversættere. -# This file is distributed under the same license as the gnocchi package. -# Joe Hansen (joedalton2@yahoo.dk), 2012, 2013, 2014, 2016. -# -msgid "" -msgstr "" -"Project-Id-Version: gnocchi\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2014-02-22 12:42+0000\n" -"Last-Translator: Joe Hansen \n" -"Language-Team: Danish \n" -"Language: da\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "Authentication server hostname:" -msgstr "Værtsnavn for godkendelsesserver:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" -"Angiv venligst adressen for din godkendelsesserver for Gnocchi. Typisk er " -"dette også adressen for din OpenStack Identity Service (Keystone)." - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Authentication server tenant name:" -msgstr "Lejenavn (tenant) for godkendelsesserver:" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "Angiv venligst lejenavn (tenant) for godkendelsesserveren." - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Authentication server username:" -msgstr "Brugernavn for godkendelsesserver:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "Angiv venligst brugernavnet der skal bruges med godkendelsesserveren." - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Authentication server password:" -msgstr "Adgangskode for godkendelsesserver:" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "Angiv venligst adgangskoden der skal bruges med godkendelsesserveren." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "Set up a database for Gnocchi?" -msgstr "Opsæt en database for Gnocchi?" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" -"Ingen database er blevet opsat som Gnocchi kan bruge. Før du fortsætter, " -"skal du sikre dig, at du har den følgende information:" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" -" * databasetypen som du ønsker at bruge\n" -" * serverens værtsnavn (denne server skal tillade TCP-forbindelser\n" -" fra denne maskine)\n" -" * et brugernavn og adgangskode til at tilgå databasen" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "" -"Hvis nogle af disse krav mangler så afvis denne indstilling og kør med " -"normal SQLite-understøttelse." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" -"Du kan ændre denne indstilling senere ved at køre »dpkg-reconfigure -plow " -"gnocchi-common«." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Registrer Gnocchi i Keystones slutpunktskatalog?" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" -"Hver Openstacktjeneste (hver API) skal være registreret for at kunne tilgås. " -"Dette gøres med »keystone service-create« og »keystone endpoint-create«. " -"Dette kan gøres automatiks nu." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "" -"Bemærk at du skal have en op og kørende Keystoneserver, som du skal forbinde " -"til via et kendt administratorprojektnavn, administratorbrugernavn og " -"adgangskode. Administratorens godkendelsessymbol bruges ikke længere." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "Keystone server IP address:" -msgstr "IP-adresse for Keystoneserver:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" -"Indtast venligst IP-adressen for Keystoneserveren, så at glance-api kan " -"kontakte Keystone for at udføre Gnocchitjenesten og slutpunktsoprettelse." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -msgid "Keystone admin name:" -msgstr "Administratornavn for Keystone:" - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" -"For at registrere tjenesteslutpunkt skal denne pakke kende til " -"administratorlogind'et, navn, projektnavn og adgangskode for " -"Keystoneserveren." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "Projektnavn for Keystoneadministratoren:" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "Adgangskode for Keystoneadministratoren:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "IP-adresse for Gnochis slutpunkt:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "" -"Indtast venligst IP-adressen som vil blive brugt til at kontakte Gnocchi." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" -"Denne IP-adresse skal være tilgængelig fra klienterne, som vil bruge denne " -"tjeneste, så hvis du installerer en offentlig sky, skal dette være en " -"offentlig IP-adresse." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "Navn på regionen der skal registreres:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" -"OpenStack understøtter brug af tilgængelighedszoner, hvor hver region " -"repræsenterer et sted. Indtast venligst zonen du ønsker at bruge, når " -"slutpunktet registreres." - diff --git a/debian/po/de.po b/debian/po/de.po deleted file mode 100644 index 77e37f0d..00000000 --- a/debian/po/de.po +++ /dev/null @@ -1,268 +0,0 @@ -# German debconf translation of gnocchi. -# This file is distributed under the same license as the gnocchi package. -# Copyright (C) 2010 United States Government,2010-2011 OpenStack LLC. -# Copyright (C) of this file 2012-2016 Chris Leick . -# -msgid "" -msgstr "" -"Project-Id-Version: gnocchi 3.0.0-2\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2016-10-29 18:15+0100\n" -"Last-Translator: Chris Leick \n" -"Language-Team: German \n" -"Language: de\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "Authentication server hostname:" -msgstr "Rechnername des Authentifizierungsservers:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" -"Bitte geben Sie den Rechnernamen des Gnocci-Authentifizierungsservers an. " -"Typischerweise ist das gleichzeitig der Rechnername Ihres OpenStack-" -"Identitätsdienstes (Keystone)." - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Authentication server tenant name:" -msgstr "Tenant-Name des Authentifizierungsservers:" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "Bitte geben Sie den Tenant-Namen des Authentifizierungsservers an." - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Authentication server username:" -msgstr "Benutzername des Authentifizierungsservers:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "" -"Bitte geben Sie den Benutzernamen an, der für den Authentifizierungsserver " -"benutzt wird." - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Authentication server password:" -msgstr "Passwort des Authentifizierungsservers:" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "" -"Bitte geben Sie das Passwort an, der für den Authentifizierungsserver " -"benutzt wird." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "Set up a database for Gnocchi?" -msgstr "Eine Datenbank für Gnocci einrichten?" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" -"Es wurde keine Datenbank für die Benutzung mit Gnocci eingerichtet. Bevor Sie " -"fortfahren, sollten Sie sicherstellen, dass Sie die folgenden Informationen " -"haben:" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" -" * einen Datenbanktyp, den Sie verwenden möchten\n" -" * den Rechnernamen des Datenbankservers (dieser Server muss TCP-" -"Verbindungen\n" -" von diesem Rechner erlauben)\n" -" * einen Benutzernamen und ein Passwort, um auf die Datenbank zuzugreifen" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "" -"Falls einige dieser Anforderungen nicht erfüllt sind, wählen Sie diese " -"Option nicht und verwenden Sie stattdessen die reguläre Sqlite-Unterstützung." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" -"Sie können diese Einstellung später ändern, indem Sie »dpkg-reconfigure -" -"plow gnocci-common« ausführen." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Gnocci im Keystone-Endpunktkatalog registrieren?" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" -"Jeder OpenStack-Dienst (jedes API) sollte registriert werden, damit darauf " -"zugegriffen werden kann. Dies wird mittels »keystone service-create« und " -"»keystone endpoint-create« erreicht und kann nun automatisch erledigt werden." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "" -"Beachten Sie, dass Sie einen konfigurierten und laufenden Keystone-Server " -"haben müssen, mit dem Sie sich anhand eines bekannten " -"Administratorprojektnamens, Administratorbenutzernamens und Passworts " -"verbinden. Das Administratorauthentifizierungs-Token wird nicht mehr benutzt." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "Keystone server IP address:" -msgstr "IP-Adresse des Keystone-Servers:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" -"Bitte geben Sie die IP-Adresse des Keystone-Servers an, so dass Gnocci-API " -"Keystone kontaktieren kann, um den Gnocci-Dienst und den Endpunkt zu " -"erstellen." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -msgid "Keystone admin name:" -msgstr "Keystone-Administratorname:" - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" -"Um den Dienstendpunkt zu registrieren, muss dieses Paket den " -"Administratoranmeldenamen, Namen, Projektnamen und das Passwort für den " -"Keystone-Server kennen." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "Keystone-Administratorprojektname:" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "Keystone-Administratorpasswort:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "IP-Adresse des Gnocci-Endpunkts" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "" -"Bitte geben Sie die IP-Adresse ein, die zum Kontaktieren von Gnocci benutzt " -"wird." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" -"Auf diese IP-Adresse sollte von den Clients, die diesen Dienst verwenden, " -"zugegriffen werden können, daher sollte sie, falls Sie eine öffentliche " -"Cloud installieren, eine öffentliche IP-Adresse sein." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "Name der Region, die registriert wird:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" -"OpenStack unterstützt die Verwendung von Verfügbarkeitszonen, bei der jede " -"Region einen Ort repräsentiert. Bitte geben Sie die Zone, die Sie benutzen " -"möchten, bei der Registrierung des Endpunkts an." diff --git a/debian/po/es.po b/debian/po/es.po deleted file mode 100644 index b99bcbaf..00000000 --- a/debian/po/es.po +++ /dev/null @@ -1,311 +0,0 @@ -# glance po-debconf translation to Spanish -# Copyright (C) 2010 Software in the Public Interest -# This file is distributed under the same license as the glance package. -# -# Changes: -# - Initial translation -# Camaleón , 2012, 2013. -# -# - Updates -# -# -# Traductores, si no conocen el formato PO, merece la pena leer la -# documentación de gettext, especialmente las secciones dedicadas a este -# formato, por ejemplo ejecutando: -# info -n '(gettext)PO Files' -# info -n '(gettext)Header Entry' -# -# Equipo de traducción al español, por favor lean antes de traducir -# los siguientes documentos: -# -# - El proyecto de traducción de Debian al español -# http://www.debian.org/intl/spanish/ -# especialmente las notas y normas de traducción en -# http://www.debian.org/intl/spanish/notas -# -# - La guía de traducción de po's de debconf: -# /usr/share/doc/po-debconf/README-trans -# o http://www.debian.org/intl/l10n/po-debconf/README-trans -# -msgid "" -msgstr "" -"Project-Id-Version: glance 2012.1-3\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2013-10-19 11:01+0200\n" -"Last-Translator: Camaleón \n" -"Language-Team: Debian Spanish \n" -"Language: es\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"X-Generator: Virtaal 0.7.1\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "Authentication server hostname:" -msgstr "Nombre del equipo del servidor de autenticación:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" -"Indique el nombre del equipo del servidor de autenticación de Gnocchi. Suele " -"ser el nombre del equipo del Servicio de Identidad de OpenStack (Keystone)." - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Authentication server tenant name:" -msgstr "Nombre del inquilino («tenant») del servidor de autenticación:" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "" -"Indique el nombre del inquilino («tenant») del servidor de autenticación." - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Authentication server username:" -msgstr "Nombre de usuario del servidor de autenticación:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "" -"Indique el nombre de usuario para usar con el servidor de autenticación." - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Authentication server password:" -msgstr "Contraseña del servidor de autenticación:" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "Indique la contraseña para usar con del servidor de autenticación." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "Set up a database for Gnocchi?" -msgstr "¿Desea configurar una base de datos para Gnocchi?" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "No database has been set up for glance-registry or glance-api to use. " -#| "Before continuing, you should make sure you have the following " -#| "information:" -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" -"No se ha configurado ninguna base de datos para glance-registry o glance-" -"api. Antes de continuar debe asegurarse de que dispone de los siguientes " -"datos:" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" -" * el tipo de base de datos que quiere utilizar;\n" -" * el nombre del equipo del servidor de la base de datos (el servidor debe " -"permitir conexiones TCP desde este equipo).\n" -" * el nombre de usuario y la contraseña para acceder a la base de datos." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "" -"Si no dispone de alguno de estos datos, seleccione «no» en este apartado y " -"ejecute Gnocchi con SQLite." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "You can change this setting later on by running \"dpkg-reconfigure -plow " -#| "glance-common\"." -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" -"Podrá cambiar esta configuración más adelante ejecutando «dpkg-reconfigure -" -"plow glance-common»." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "¿Desea registrar Gnocchi en el catálogo de puntos finales de Keystone?" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" -"Debe registrar cada uno de los servicios OpenStack (cada API) para que sean " -"accesibles. Esto se lleva a cabo mediante las órdenes «keystone service-" -"create» y «keystone endpoint-create». Elija si desea ejecutar estas órdenes " -"ahora." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -#, fuzzy -#| msgid "" -#| "Note that you will need to have an up and running Keystone server on " -#| "which to connect using the Keystone authentication token." -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "" -"Tenga en cuenta que necesitará disponer de un servidor Keystone en ejecución " -"al que conectarse utilizando el token de autenticación de Keystone." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "Keystone server IP address:" -msgstr "Dirección IP del servidor Keystone:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" -"Introduzca la dirección IP del servidor Keystone para que glance-api pueda " -"contactar con Keystone para realizar el servicio Gnocchi y crear el punto de " -"cierre." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "Keystone authentication token:" -msgid "Keystone admin name:" -msgstr "Token de autenticación de Keystone:" - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "Dirección IP del punto de cierre de Gnocchi:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "" -"Introduzca la dirección IP que se utilizará para contactar con Gnocchi." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" -"Esta dirección IP debe ser accesible desde los clientes que usarán este " -"servicio, por lo que si está instalando una nube pública, debería ser una " -"dirección IP pública." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "Nombre de la región a registrar:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" -"OpenStack puede utilizarse con zonas de disponibilidad, donde cada región " -"representa una ubicación. Introduzca la zona que desea utilizar cuando \n" -"registre un punto de cierre." - -#, fuzzy -#~| msgid "" -#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#~| "authentication token." -#~ msgid "" -#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -#~ "authentication token." -#~ msgstr "" -#~ "Para configurar su punto final en Keystone, glance-api necesita el token " -#~ "de autenticación de Keystone." diff --git a/debian/po/fr.po b/debian/po/fr.po deleted file mode 100644 index 0a5bf470..00000000 --- a/debian/po/fr.po +++ /dev/null @@ -1,276 +0,0 @@ -# Translation of glance debconf templates to French. -# Copyright (C) 2013, French l10n team -# This file is distributed under the same license as the GLANCE package. -# Julien Patriarca , 2013. -# -msgid "" -msgstr "" -"Project-Id-Version: glance\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2013-10-26 18:35+0100\n" -"Last-Translator: Julien Patriarca \n" -"Language-Team: FRENCH \n" -"Language: fr\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"X-Generator: Poedit 1.5.4\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "Authentication server hostname:" -msgstr "Nom d'hôte du serveur d'authentification." - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" -"Veuillez indiquer le nom d'hôte du serveur d'authentification pour Gnocchi. " -"Typiquement c'est également le nom d'hôte du Service d'Identité OpenStack " -"(Keystone)." - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Authentication server tenant name:" -msgstr "Nom d'espace client du serveur d'authentification :" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "" -"Veuillez indiquer le nom de l'espace client du serveur d'authentification." - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Authentication server username:" -msgstr "Nom d'utilisateur pour le serveur d'authentification :" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "" -"Veuillez indiquer le nom d'utilisateur à utiliser sur le serveur " -"d'authentification." - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Authentication server password:" -msgstr "Mot de passe pour le serveur d'authentification :" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "" -"Veuillez indiquer le mot de passe à utiliser sur le serveur " -"d'authentification." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "Set up a database for Gnocchi?" -msgstr "Installer une base de données pour Gnocchi ?" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" -"Aucune base de données n'a été installée pour Gnocchi. Avant de continuer, " -"assurez-vous d'avoir :" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" -" - Le type de base de données que vous souhaitez utiliser ;\n" -" - le nom d'hôte du serveur de base de données (ce serveur\n" -" doit accepter les connexions TCP depuis cette machine);\n" -" - un nom d'utilisateur et un mot de passe pour accéder\n" -" à cette base de données." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "" -"Si certains de ces prérequis sont manquants, ignorer cette option et " -"exécutez l'application avec la gestion SQLite normale." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" -"Vous pouvez modifier ce réglage plus tard en lançant « dpkg-reconfigure -" -"plow gnocchi-common »." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Enregistrer Gnocchi dans le catalogue de points d'accès de Keystone ?" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" -"Chaque service OpenStack (chaque API) doit être enregistré pour être " -"accessible. Cela peut être fait en utilisant « keystone service-create » et " -"« keystone endpoint-create ». Cela peut maintenant être fait automatiquement." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -#, fuzzy -#| msgid "" -#| "Note that you will need to have an up and running Keystone server on " -#| "which to connect using the Keystone authentication token." -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "" -"Veuillez noter que vous aurez besoin d'avoir un serveur Keystone fonctionnel " -"sur lequel se connecter pour utiliser le jeton d'authentification Keystone." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "Keystone server IP address:" -msgstr "Adresse IP du serveur Keystone : " - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" -"Veuillez indiquer l'adresse IP du serveur Keystone, pour que l'API de " -"Gnocchi puisse contacter Keystone pour établir le service Glance et créer le " -"point d'accès." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "Keystone authentication token:" -msgid "Keystone admin name:" -msgstr "Jeton d'authentification Keystone : " - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "Adresse IP du point d'accès Gnocchi : " - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "" -"Veuillez indiquer l'adresse IP qui sera utilisée pour contacter Gnocchi." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" -"Cette adresse IP doit être accessible depuis les clients qui utiliseront ce " -"service, donc si vous installez un nuage public, ce devra être une adresse " -"IP publique." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "Nom de la région à enregistrer : " - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" -"OpenStack gère l'utilisation de zones disponibles, avec chaque région " -"représentant un lieu. Veuillez entrer une zone que vous souhaitez utiliser " -"lors de l'enregistrement d'un point d'accès." - -#~ msgid "" -#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -#~ "authentication token." -#~ msgstr "" -#~ "Pour configurer son point d'accès dans Keystone, l'API de Gnocchi a " -#~ "besoin du jeton d'authentification Keystone." diff --git a/debian/po/gl.po b/debian/po/gl.po deleted file mode 100644 index 9df1a0ad..00000000 --- a/debian/po/gl.po +++ /dev/null @@ -1,270 +0,0 @@ -# Galician translations for glance package. -# Copyright (C) 2012 THE glance'S COPYRIGHT HOLDER -# This file is distributed under the same license as the glance package. -# -# Jorge Barreiro Gonzalez , 2012. -msgid "" -msgstr "" -"Project-Id-Version: glance\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2012-06-23 12:02+0200\n" -"Last-Translator: Jorge Barreiro \n" -"Language-Team: Galician \n" -"Language: gl\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"X-Generator: Lokalize 1.0\n" -"Plural-Forms: nplurals=2; plural=n != 1;\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -#, fuzzy -#| msgid "Auth server admin token:" -msgid "Authentication server hostname:" -msgstr "Token do administrador do servidor de autenticación:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" -"Indique o URL do seu servidor de autenticación de Gnocchi. Normalmente isto " -"será tamén a URL do seu servizo de identidade OpenStack (Keystone)." - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -#, fuzzy -#| msgid "Auth server admin token:" -msgid "Authentication server tenant name:" -msgstr "Token do administrador do servidor de autenticación:" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -#, fuzzy -#| msgid "Auth server admin token:" -msgid "Authentication server username:" -msgstr "Token do administrador do servidor de autenticación:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -#, fuzzy -#| msgid "Auth server admin token:" -msgid "Authentication server password:" -msgstr "Token do administrador do servidor de autenticación:" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "Set up a database for glance-registry?" -msgid "Set up a database for Gnocchi?" -msgstr "Quere configurar unha base de datos para «glance-registry»?" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "No database has been set up for glance-registry to use. Before " -#| "continuing, you should make sure you have:" -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" -"Non se configurou ningunha base de datos para que «glance-registry» a use. " -"Antes de continuar, debería asegurarse de que ten:" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| " - the server host name (that server must allow TCP connections\n" -#| " from this machine);\n" -#| " - a username and password to access the database.\n" -#| " - A database type that you want to use." -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" -" - o nome do servidor (o servidor debe permitir conexións TCP\n" -" desde esta máquina);\n" -" - un nome de usuario e contrasinal para acceder á base de datos.\n" -" - O tipo de base de datos que quere usar." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "If some of these requirements are missing, reject this option and run " -#| "with regular sqlite support." -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "" -"Se non cumpre algún destes requisitos, rexeite esta opción e use a " -"infraestrutura «sqlite» normal." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "You can change this setting later on by running 'dpkg-reconfigure -plow " -#| "glance-registry" -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" -"Pode cambiar esta opción máis tarde executando «dpkg-reconfigure -plow " -"glance-registry»." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "Keystone server IP address:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -msgid "Keystone admin name:" -msgstr "" - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" diff --git a/debian/po/it.po b/debian/po/it.po deleted file mode 100644 index 7055f7ce..00000000 --- a/debian/po/it.po +++ /dev/null @@ -1,272 +0,0 @@ -# Italian description of gnocchi debconf messages. -# Copyright (C) 2016, gnocchi package copyright holder. -# This file is distributed under the same license as the gnocchi package. -# Beatrice Torracca , 2012, 2013, 2014, 2016. -msgid "" -msgstr "" -"Project-Id-Version: glance\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2016-09-30 07:28+0200\n" -"Last-Translator: Beatrice Torracca \n" -"Language-Team: Italian \n" -"Language: it\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"X-Generator: Virtaal 0.7.1\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "Authentication server hostname:" -msgstr "Nome host del server di autenticazione:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" -"Specificare il nome host del server di autenticazione per Gnocchi. " -"Tipicamente, è anche il nome host dell'OpenStack Identity Service (Keystone)." - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Authentication server tenant name:" -msgstr "Nome del locatario («tenant») per il server di autenticazione:" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "" -"Inserire il nome del locatario («tenant») per il server di autenticazione." - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Authentication server username:" -msgstr "Nome utente per il server di autenticazione:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "Inserire il nome utente da usare con il server di autenticazione." - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Authentication server password:" -msgstr "Password per il server di autenticazione:" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "Inserire la password da usare con il server di autenticazione." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "Set up a database for Gnocchi?" -msgstr "Impostare un database per Gnocchi?" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#| msgid "" -#| "No database has been set up for glance-registry or glance-api to use. " -#| "Before continuing, you should make sure you have the following " -#| "information:" -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" -"Non è stato impostato alcun database per l'uso da parte di Gnocchi. Prima di " -"continuare, assicurarsi di avere le seguenti informazioni:" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" -" * il tipo di database che si desidera usare;\n" -" * il nome host del server di database (che deve permettere le connessioni\n" -" TCP da questa macchina);\n" -" * un nome utente e una password per accedere al database." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "" -"Se non si ha uno o più di questi requisiti, non scegliere questa opzione ed " -"eseguire con il regolare supporto per SQLite." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#| msgid "" -#| "You can change this setting later on by running \"dpkg-reconfigure -plow " -#| "glance-common\"." -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" -"È possibile cambiare questa impostazione successivamente eseguendo «dpkg-" -"reconfigure -plow gnocchi-common»." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Registrare Gnocchi nel catalogo dei punti terminali di Keystone?" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" -"Ogni servizio OpenStack (ogni API) dovrebbe essere registrato per poter " -"essere accessibile. Ciò viene fatto usando «keystone service-create» e " -"«keystone endpoint-create». Ciò può essere fatto ora automaticamente." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -#| msgid "" -#| "Note that you will need to have an up and running Keystone server on " -#| "which to connect using the Keystone authentication token." -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "" -"Notare che sarà necessario avere un server Keystone in funzione a cui " -"connettersi usando un nome di progetto di amministrazione conosciuto, un " -"nome utente e password di amministratore. Il token di autenticazione di " -"amministratore non è più usato." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "Keystone server IP address:" -msgstr "Indirizzo IP del server Keystone:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" -"Inserire l'indirizzo IP del server Keystone, in modo che glance-api possa " -"contattare Keystone per effettuare la creazione del servizio e del punto " -"terminale Gnocchi." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -#| msgid "Keystone authentication token:" -msgid "Keystone admin name:" -msgstr "Nome amministratore Keystone:" - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" -"Per registrare il punto terminale del servizio questo pacchetto deve " -"conoscere il login, il nome, il nome del progetto e la password " -"dell'amministratore per il server Keystone." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "Nome del progetto di amministrazione Keystone:" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "Password amministratore Keystone:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "Indirizzo IP del punto terminale Gnocchi:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "Inserire l'indirizzo IP che verrà usato per contattare Gnocchi." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" -"Questo indirizzo IP dovrebbe essere accessibile dai client che useranno il " -"servizio, perciò se si sta installando una cloud pubblica, questo dovrebbe " -"essere un indirizzo IP pubblico." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "Nome della regione da registrare:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" -"OpenStack gestisce le zone di disponibilità, con ogni regione che " -"rappresenta una posizione. Inserire la zona che si desidera usare durante la " -"registrazione del punto terminale." diff --git a/debian/po/ja.po b/debian/po/ja.po deleted file mode 100644 index 09fafba0..00000000 --- a/debian/po/ja.po +++ /dev/null @@ -1,275 +0,0 @@ -# SOME DESCRIPTIVE TITLE. -# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER -# This file is distributed under the same license as the PACKAGE package. -# victory , 2012. -# Takuma Yamada , 2016. -# -msgid "" -msgstr "" -"Project-Id-Version: glance\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2016-04-07 13:15+0900\n" -"Last-Translator: Takuma Yamada \n" -"Language-Team: Japanese \n" -"Language: ja\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"X-Generator: Gtranslator 2.91.6\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "Authentication server hostname:" -msgstr "認証サーバのホスト名:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" -"Gnocchi 用認証サーバのホスト名を指定してください。通常これは OpenStack " -"Identity Service (Keystone) のホスト名と同じです。" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Authentication server tenant name:" -msgstr "認証サーバのテナント (tenant) 名:" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "認証サーバのテナント (tenant) 名を指定してください。" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Authentication server username:" -msgstr "認証サーバのユーザ名:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "認証サーバで使用するユーザ名を指定してください。" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Authentication server password:" -msgstr "認証サーバのパスワード:" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "認証サーバで使用するパスワードを指定してください。" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "Set up a database for Gnocchi?" -msgstr "Gnocchi 用のデータベースを用意しますか?" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" -"Gnocchi で使用するために設定されたデータベースがありません。続行する前に、以" -"下の情報が揃っていることを確認してください:" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" -" * 使用するデータベースの種類\n" -" * データベースサーバのホスト名 (そのサーバは、このマシンからのTCP 接続を\n" -" 許可する必要があります)\n" -" * データベースにアクセスするためのユーザ名とパスワード" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "" -"これらの要件が欠落している場合は、このオプションを選択しないでください。そし" -"て、標準 SQLite サポートで実行してください。" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" -"この設定は、後で \"dpkg-reconfigure -plow gnocchi-common\" を実行することで変" -"更できます。" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Gnocchi を Keystone のエンドポイントカタログに登録しますか?" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" -"各 OpenStack サービス (各 API) は、アクセス可能にするために登録する必要があり" -"ます。\"keystone service-create\" と \"keystone endpoint-create\" を使って登" -"録することができます。ここで自動的に行うことができます。" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -#| msgid "" -#| "Note that you will need to have an up and running Keystone server on " -#| "which to connect using the Keystone authentication token." -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "" -"既知の管理プロジェクト名、管理者のユーザ名とパスワードを使用して接続するに" -"は、Keystone サーバの起動および実行が必要になりますので注意してください。管理" -"者認証トークンはもう使用されていません。" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "Keystone server IP address:" -msgstr "Keystone サーバの IP アドレス:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" -"Keystone サーバの IP アドレスを入力してください。それにより gnocchi-api は " -"Keystone と通信し、Gnocchi サービスやエンドポイントの作成ができるようになりま" -"す。" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -#| msgid "Keystone authentication token:" -msgid "Keystone admin name:" -msgstr "Keystone 管理者名:" - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" -"サービスのエンドポイントを登録するには、このパッケージが Keystone サーバへの" -"管理者ログイン、名前、プロジェクト名、およびパスワードを知っている必要があり" -"ます。" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "Keystone 管理プロジェクト名:" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "Keystone 管理者パスワード:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "Gnocchi エンドポイントの IP アドレス:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "Gnocchi の通信に使用される IP アドレスを入力してください。" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" -"この IP アドレスは、このサービスを利用するクライアントからアクセス可能でなけ" -"ればなりません。パブリッククラウドをインストールしている場合は、パブリック " -"IP アドレスにする必要があります。" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "登録するリージョンの名前:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" -"OpenStack は、場所を表すリージョン毎に、アベイラビリティーゾーンの使用をサ" -"ポートします。エンドポイントを登録する際に、使用するゾーンを入力してくださ" -"い。" - -#~| msgid "" -#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#~| "auth token." -#~ msgid "" -#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -#~ "authentication token." -#~ msgstr "" -#~ "Keystone でエンドポイントを設定するには、glance-api は Keystone 認証トーク" -#~ "ンを必要とします。" diff --git a/debian/po/nl.po b/debian/po/nl.po deleted file mode 100644 index 7d0b79cb..00000000 --- a/debian/po/nl.po +++ /dev/null @@ -1,280 +0,0 @@ -# Dutch translation of gnocchi debconf templates. -# Copyright (C) 2012 THE PACKAGE'S COPYRIGHT HOLDER -# This file is distributed under the same license as the glance package. -# Jeroen Schot , 2012. -# Frans Spiesschaert , 2014, 2016. -# -msgid "" -msgstr "" -"Project-Id-Version: gnocchi_2.0.2-4\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2016-04-24 12:07+0200\n" -"Last-Translator: Frans Spiesschaert \n" -"Language-Team: Debian Dutch l10n Team \n" -"Language: nl\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"X-Generator: Gtranslator 2.91.6\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "Authentication server hostname:" -msgstr "Computernaam van de authenticatieserver:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" -"Geef de computernaam van de authenticatieserver voor Gnocchi op. Meestal is " -"dit ook de computernaam van de OpenStack Identiteitsdienst (Keystone)." - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Authentication server tenant name:" -msgstr "Naam van de cliëntruimte (tenant) op de authenticatieserver:" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "" -"Geef de naam op van de cliëntruimte (tenant) op de authenticatieserver." - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Authentication server username:" -msgstr "Gebruikersnaam op de authenticatieserver:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "" -"Geef de gebruikersnaam op die op de authenticatieserver gebruikt moet worden." - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Authentication server password:" -msgstr "Wachtwoord op de authenticatieserver:" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "" -"Geef het wachtwoord op dat op de authenticatieserver gebruikt moet worden." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "Set up a database for Gnocchi?" -msgstr "Een database opzetten voor Gnocchi?" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" -"Er werd geen database opgezet om door Gnocchi gebruikt te worden. Voor u " -"doorgaat moet u beschikken over de volgende informatie:" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" -" * het soort database dat u wilt gebruiken;\n" -" * de computernaam van de databaseserver (die server moet\n" -" TCP-verbindingen vanaf deze computer toestaan);\n" -" * een gebruikersnaam en een wachtwoord om toegang te krijgen tot de " -"database." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "" -"Indien sommige van deze gegevens ontbreken, moet u deze optie niet kiezen en " -"de toepassing gebruiken met gewone SQLite-ondersteuning." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" -"U kunt deze instelling later wijzigen door het uitvoeren van \"dpkg-" -"reconfigure -plow gnocchi-common\"." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Gnocchi opnemen in de catalogus van Keystone-toegangspunten?" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" -"Elke dienst van OpenStack (elke API) moet geregistreerd staan om " -"toegankelijk te zijn. Dit gebeurt met de opdrachten \"keystone service-create" -"\" en \"keystone endpoint-create\". Dit kan nu automatisch uitgevoerd worden." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -#| msgid "" -#| "Note that you will need to have an up and running Keystone server on " -#| "which to connect using the Keystone authentication token." -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "" -"Merk op dat u een functionerende Keystone-server moet hebben om er een " -"verbinding mee te maken met behulp van een gekende beheerdersprojectnaam, " -"beheerdersgebruikersnaam en wachtwoord. Het beheerderslegitimatiebewijs " -"wordt niet langer gebruikt." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "Keystone server IP address:" -msgstr "IP-adres van de Keystone-server:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" -"Geef het IP-adres van de Keystone-server op, zodat glance-api Keystone kan " -"contacteren om de Gnocchidienst en het toegangspunt aan te maken." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -#| msgid "Keystone authentication token:" -msgid "Keystone admin name:" -msgstr "Naam van de beheerder voor Keystone:" - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" -"Om het toegangspunt van de dienst te registreren moet dit pakket de " -"inloggegevens voor de Keystone-server van de beheerder kennen, naam, " -"projectnaam en wachtwoord." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "Naam van het project van de beheerder voor Keystone:" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "Wachtwoord van de beheerder voor Keystone:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "IP-adres van het toegangspunt van Gnocchi:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "" -"Geef het IP-adres op dat gebruikt zal worden voor het contact met Gnocchi." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" -"Dit IP-adres moet bereikbaar zijn voor de clients die deze dienst zullen " -"gebruiken. Indien u een openbare cloud installeert, moet dit dus een " -"algemeen bereikbaar IP-adres zijn." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "Naam van de registratieregio:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" -"OpenStack ondersteunt het gebruik van zones van beschikbaarheid, waarbij " -"elke regio een locatie vertegenwoordigt. Geef aan welke zone u wenst te " -"gebruiken bij het registreren van het toegangspunt." - -#~| msgid "" -#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#~| "authentication token." -#~ msgid "" -#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -#~ "authentication token." -#~ msgstr "" -#~ "Om zijn toegangspunt te kunnen aanmaken in Keystone, heeft gnocchi-api " -#~ "het authenticatiebewijs voor Keystone nodig." diff --git a/debian/po/pl.po b/debian/po/pl.po deleted file mode 100644 index 22fb705b..00000000 --- a/debian/po/pl.po +++ /dev/null @@ -1,274 +0,0 @@ -# Translation of glance debconf templates to Polish. -# Copyright (C) 2012 -# This file is distributed under the same license as the glance package. -# -# Michał Kułach , 2012. -msgid "" -msgstr "" -"Project-Id-Version: glance\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2012-06-09 10:11+0200\n" -"Last-Translator: Michał Kułach \n" -"Language-Team: Polish \n" -"Language: pl\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"X-Generator: Lokalize 1.2\n" -"Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 " -"|| n%100>=20) ? 1 : 2);\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -#, fuzzy -#| msgid "Auth server admin token:" -msgid "Authentication server hostname:" -msgstr "Token administratora serwera uwierzytelniania:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" -"Proszę podać adres URL serwera uwierzytelniania Gnocchi. Z reguły jest to " -"adres OpenStack Identity Service (Keystone) danego użytkownika." - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -#, fuzzy -#| msgid "Auth server admin token:" -msgid "Authentication server tenant name:" -msgstr "Token administratora serwera uwierzytelniania:" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -#, fuzzy -#| msgid "Auth server admin token:" -msgid "Authentication server username:" -msgstr "Token administratora serwera uwierzytelniania:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -#, fuzzy -#| msgid "Auth server admin token:" -msgid "Authentication server password:" -msgstr "Token administratora serwera uwierzytelniania:" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "Set up a database for glance-registry?" -msgid "Set up a database for Gnocchi?" -msgstr "Skonfigurować bazę danych do glance-registry?" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "No database has been set up for glance-registry to use. Before " -#| "continuing, you should make sure you have:" -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" -"Nie skonfigurowano bazy danych do użycia z glance-registry. Przed " -"kontynuowaniem, proszę upewnić się, że posiada się:" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| " - the server host name (that server must allow TCP connections\n" -#| " from this machine);\n" -#| " - a username and password to access the database.\n" -#| " - A database type that you want to use." -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" -" - nazwę serwera (serwer musi pozwalać na połączenia TCP\n" -" z tego komputera),\n" -" - nazwę użytkownika i hasło dostępowe do bazy danych,\n" -" - typ bazy danych, który chce się wykorzystać." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "If some of these requirements are missing, reject this option and run " -#| "with regular sqlite support." -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "" -"Jeśli nie zna się któregoś z powyższych punktów, proszę wybrać \"nie\" i " -"skorzystać ze zwykłego trybu, używającego sqlite." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "You can change this setting later on by running 'dpkg-reconfigure -plow " -#| "glance-registry" -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" -"Można zmienić to ustawienie później, wykonując \"dpkg-reconfigure -plow " -"glance-registry\"." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "Keystone server IP address:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -msgid "Keystone admin name:" -msgstr "" - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" - -#~ msgid "Pipeline flavor:" -#~ msgstr "Odmiana potoku:" diff --git a/debian/po/pt.po b/debian/po/pt.po deleted file mode 100644 index ddf0b979..00000000 --- a/debian/po/pt.po +++ /dev/null @@ -1,258 +0,0 @@ -# glance debconf portuguese messages -# Copyright (C) 2012 the glance'S COPYRIGHT HOLDER -# This file is distributed under the same license as the glance package. -# Pedro Ribeiro , 2012, 2017 -# -msgid "" -msgstr "" -"Project-Id-Version: gnocchi_3.0.4-4\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2017-09-11 10:43+0100\n" -"Last-Translator: Pedro Ribeiro \n" -"Language-Team: Potuguese \n" -"Language: pt\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "Authentication server hostname:" -msgstr "Nome do servidor de autenticação:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" -"Indique o nome do seu servidor de autenticação para o Gnocchi. Normalmente, " -"é o nome do seu Serviço de Identidade OpenStack (Keystone)." - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Authentication server tenant name:" -msgstr "Nome do 'tenant' do servidor de autenticação:" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "Indique, por favor, o nome do 'tenant' do servidor de autenticação." - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Authentication server username:" -msgstr "Nome de utilizador para o servidor de autenticação:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "" -"Indique, por favor, o nome de utilizador para o servidor de autenticação." - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Authentication server password:" -msgstr "Palavra chave do servidor de autenticação:" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "" -"Indique, por favor, a palavra-chave para usar no servidor de autenticação." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "Set up a database for Gnocchi?" -msgstr "Configurar uma base de dados para o Gnocchi?" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" -"Não foi definida nenhuma base de dados para ser usada pelo Gnocchi. Antes de " -"continuar, certifique-se que tem a seguinte informação:" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" -" * o tipo de base de dados que quer usar;\n" -" * o nome do servidor (esse servidor deve aceitar ligações TCP a partir\n" -"desta máquina);\n" -" * o nome de utilizador e palavra passe para aceder à base de dados." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "" -"Se algum destes requisitos estiver em falta, rejeite esta opção e execute " -"com o suporte SQLite normal." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" -"Pode mudar esta definição mais tarde ao executar \"dpkg-reconfigure -plow " -"gnocchi-common\"." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Registar o Gnocchi no catálogo de pontos finais do Keystone?" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" -"Cada serviço Openstack (cada API) deve estar registado para que seja " -"acessível. Isto é feito com \"keystone service-create\" e \"keystone " -"endpoint-create\". Pode correr estes comandos agora." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "" -"Note que irá necessitar de ter um servidor keystone a correr e pronto para " -"receber ligações autenticadas com um nome de administrador de projecto, nome " -"de utilizador e password. O token de autorização de admin já não é usado." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "Keystone server IP address:" -msgstr "Endereço IP do keystone:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" -"Indique o endereço IP do seu servidor keystone, de modo a que o glance-api " -"possa contactar o Keystone para criar o serviço e ponto final Gnocchi." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -msgid "Keystone admin name:" -msgstr "Nome de administrador Keystone:" - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" -"Para registar o endpoint do serviço, este pacote necessita de saber o nome " -"de utilizador, nome, nome do projecto e password para o servidor Keystone." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "Nome de projecto do administrador Keystone:" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "Password de administrador Keystone:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "Endereço IP do ponto final Gnocchi:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "Indique o endereço IP que irá ser usado para contactar o Gnocchi." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" -"Este endereço IP deve ser acessível a partir dos clientes que irão usar este " -"serviço, portanto se está a instalar uma cloud pública, este deve ser um " -"endereço IP público." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "Nome da região a registar:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" -"O Openstack suporta a utilização de zonas de disponibilidade, com cada " -"região a representar uma localização. Por favor, indique a zona que quer " -"user ao registar um ponto final." diff --git a/debian/po/pt_BR.po b/debian/po/pt_BR.po deleted file mode 100644 index 5725212e..00000000 --- a/debian/po/pt_BR.po +++ /dev/null @@ -1,276 +0,0 @@ -# Debconf translations for gnocchi. -# Copyright (C) 2012 THE gnocchi'S COPYRIGHT HOLDER -# This file is distributed under the same license as the gnocchi package. -# Adriano Rafael Gomes , 2012-2016. -# -msgid "" -msgstr "" -"Project-Id-Version: gnocchi\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2016-04-30 16:34-0300\n" -"Last-Translator: Adriano Rafael Gomes \n" -"Language-Team: Brazilian Portuguese \n" -"Language: pt_BR\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "Authentication server hostname:" -msgstr "Nome de máquina do servidor de autenticação:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" -"Por favor, especifique o nome de máquina do seu servidor de autenticação " -"para o Gnocchi. Tipicamente, esse é também o nome de máquina do Serviço de " -"Identidade do OpenStack (Keystone)." - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Authentication server tenant name:" -msgstr "Nome de locatário (\"tenant\") do servidor de autenticação:" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "" -"Por favor, especifique o nome de locatário (\"tenant\") do servidor de " -"autenticação." - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Authentication server username:" -msgstr "Nome de usuário do servidor de autenticação:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "" -"Por favor, especifique o nome de usuário para usar com o servidor de " -"autenticação." - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Authentication server password:" -msgstr "Senha do servidor de autenticação:" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "" -"Por favor, especifique a senha para usar com o servidor de autenticação." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "Set up a database for Gnocchi?" -msgstr "Configurar um banco de dados para o Gnocchi?" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" -"Nenhum banco de dados foi configurado para o Gnocchi utilizar. Antes de " -"continuar, você deve se certificar que você tem as seguintes informações:" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" -" * o tipo de banco de dados que você quer usar;\n" -" * o nome de máquina do servidor de banco de dados (tal servidor deve\n" -" permitir conexões TCP a partir deste computador);\n" -" * um usuário e uma senha para acessar o banco de dados." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "" -"Se algum desses requisitos estiver faltando, rejeite essa opção e execute " -"com suporte regular ao SQLite." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" -"Você pode mudar essa configuração depois, executando \"dpkg-reconfigure -" -"plow gnocchi-common\"." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Registrar o Gnocchi no catálogo de \"endpoint\" do Keystone?" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" -"Cada serviço OpenStack (cada API) deve ser registrado para ser acessível. " -"Isso é feito usando \"keystone service-create\" e \"keystone endpoint-create" -"\". Isso pode ser feito automaticamente agora." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "" -"Note que você precisará ter um servidor Keystone configurado e em execução " -"no qual conectar usando um nome de projeto de admin, nome de usuário de " -"admin e senha conhecidos. O \"token\" de autenticação do admin não é mais " -"usado." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "Keystone server IP address:" -msgstr "Endereço IP do servidor Keystone:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" -"Por favor, informe o endereço IP do servidor Keystone, de forma que o glance-" -"api possa contatar o Keystone para efetuar a criação do \"endpoint\" e do " -"serviço Gnocchi." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -msgid "Keystone admin name:" -msgstr "Nome de admin do Keystone:" - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" -"Para registrar o \"endpoint\" do serviço, esse pacote precisa saber o login, " -"nome, nome do projeto e senha do Admin no servidor Keystone." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "Nome do projeto admin no Keystone:" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "Senha do admin no Keystone:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "Endereço IP do \"endpoint\" Gnocchi:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "" -"Por favor, informe o endereço IP que será usado para contatar o Gnocchi." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" -"Esse endereço IP deveria ser acessível a partir dos clientes que usarão esse " -"serviço, assim se você está instalando uma nuvem pública, ele deveria ser um " -"endereço IP público." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "Nome da região para registrar:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" -"O OpenStack suporta usar zonas de disponibilidade, com cada região " -"representando uma localidade. Por favor, informe a zona que você deseja usar " -"ao registrar o \"endpoint\"." - -#~| msgid "" -#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#~| "authentication token." -#~ msgid "" -#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -#~ "authentication token." -#~ msgstr "" -#~ "Para configurar o seu \"endpoint\" no Keystone, o gnocchi-api precisa do " -#~ "\"token\" de autenticação do Keystone." diff --git a/debian/po/ru.po b/debian/po/ru.po deleted file mode 100644 index a6322aa4..00000000 --- a/debian/po/ru.po +++ /dev/null @@ -1,281 +0,0 @@ -# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER -# This file is distributed under the same license as the glance package. -# -# Yuri Kozlov , 2012, 2013. -msgid "" -msgstr "" -"Project-Id-Version: glance 2013.2-1\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2013-11-17 08:45+0400\n" -"Last-Translator: Yuri Kozlov \n" -"Language-Team: Russian \n" -"Language: ru\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"X-Generator: Lokalize 1.4\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" -"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "Authentication server hostname:" -msgstr "Имя узла сервера аутентификации:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" -"Введите имя узла сервера аутентификации для Gnocchi. Данное имя обычно " -"совпадает с именем узла OpenStack Identity Service (Keystone)." - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Authentication server tenant name:" -msgstr "Членское имя сервера аутентификации:" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "Укажите членское (tenant) имя сервера аутентификации." - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Authentication server username:" -msgstr "Имя пользователя для сервера аутентификации:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "Введите имя пользователя для работы с сервером аутентификации." - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Authentication server password:" -msgstr "Пароль для сервера аутентификации:" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "Введите пароль для работы с сервером аутентификации." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "Set up a database for Gnocchi?" -msgstr "Настроить базу данных для Gnocchi?" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "No database has been set up for glance-registry or glance-api to use. " -#| "Before continuing, you should make sure you have the following " -#| "information:" -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" -"Для использования glance-registry или glance-api требуется база данных, " -"которая пока не настроена. Перед тем как продолжить, проверьте:" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" -" * тип базы данных, который хотите использовать;\n" -" * имя узла сервера базы данных (этот сервер должен принимать\n" -" TCP-соединения с этой машины);\n" -" * имя пользователя и пароль для доступа к базе данных." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "" -"Если не хватает хотя бы одного параметра, ответьте отрицательно и включите " -"поддержку SQLite." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "You can change this setting later on by running \"dpkg-reconfigure -plow " -#| "glance-common\"." -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" -"Позднее, вы можете изменить эту настройку, запустив «dpkg-reconfigure -plow " -"glance-common»." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Зарегистрировать Gnocchi в каталоге конечных точек Keystone?" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" -"Для доступа к службам Openstack (каждому API) их нужно регистрировать. Это " -"выполняется с помощью команды «keystone service-create» и «keystone endpoint-" -"create». Это может быть сделано автоматически прямо сейчас." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -#, fuzzy -#| msgid "" -#| "Note that you will need to have an up and running Keystone server on " -#| "which to connect using the Keystone authentication token." -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "" -"Заметим, что у вас должен быть работающий сервер Keystone, к которому будет " -"произведено подключение с помощью токена аутентификации Keystone." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "Keystone server IP address:" -msgstr "IP-адрес сервера Keystone:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" -"Введите IP-адрес сервера Keystone для того, чтобы glance-api могла " -"подключиться к Keystone для запуска службы Gnocchi и создания конечной точки." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "Keystone authentication token:" -msgid "Keystone admin name:" -msgstr "Токен аутентификации Keystone:" - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "IP-адрес конечной точки Gnocchi:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "Введите IP-адрес, который будет использован для подключения к Gnocchi." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" -"Этот IP-адрес должен быть доступен клиентам, которые будут использовать эту " -"службу, поэтому если вы разворачиваете открытое облако, то это должен быть " -"публичный IP-адрес." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "Название области для регистрации:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" -"Openstack поддерживает разделение на зоны доступности, где каждая область " -"представляет определённое расположение. Введите зону, которую вы хотите " -"использовать при регистрации конечной точки." - -#, fuzzy -#~| msgid "" -#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#~| "authentication token." -#~ msgid "" -#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -#~ "authentication token." -#~ msgstr "" -#~ "Для настройки собственной конечной точки в Keystone glance-api требуется " -#~ "токен аутентификации Keystone." diff --git a/debian/po/sv.po b/debian/po/sv.po deleted file mode 100644 index 81249530..00000000 --- a/debian/po/sv.po +++ /dev/null @@ -1,281 +0,0 @@ -# Translation of glance debconf template to Swedish -# Copyright (C) 2012-2014 Martin Bagge -# This file is distributed under the same license as the glance package. -# -# Martin Bagge , 2012, 2014 -msgid "" -msgstr "" -"Project-Id-Version: glance\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2014-01-09 10:35+0100\n" -"Last-Translator: Martin Bagge / brother \n" -"Language-Team: Swedish \n" -"Language: Swedish\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"X-Generator: Poedit 1.5.4\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "Authentication server hostname:" -msgstr "Värdnamn för identifieringsserver:Värdnamn för identifieringsserver:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" -"Ange värdnamn till din Gnocchi-identifieringsserver. Detta är vanligtvis " -"samma värdnamn som till din OpenStack-identitetstjänst (Keystone)." - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Authentication server tenant name:" -msgstr "Namn för \"tenant\" (administratör) på identifieringsservern:" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "" -"Ange \"tenant\"-namn för identifieringsservern. (\"Tenant\" är ungefär " -"översättningsbart till \"administratör\")." - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Authentication server username:" -msgstr "Användarnamn på identifieringsservern:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "" -"Ange användarnamnet som ska användas för att komma åt identifieringsservern." - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Authentication server password:" -msgstr "Lösenord på identifieringsservern:" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "" -"Ange lösenordet som ska användas för att komma åt identifieringsservern." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "Set up a database for Gnocchi?" -msgstr "Ska en databas installeras för Gnocchi?" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "No database has been set up for glance-registry or glance-api to use. " -#| "Before continuing, you should make sure you have the following " -#| "information:" -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" -"Ingen databas har installerats för glance-registry. Innan du fortsätter " -"behöver följande finnas tillgängligt:" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" -" * vilken databastyp du vill använda.\n" -" * serverns värdnamn (som måste kunna ta emot TCP-anslutningar\n" -" från den här maskinen)\n" -" * användarnamn och lösenord för att komma åt databasen." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "" -"Om något av dessa krav saknar bör du avböja detta alternativ och fortsätta " -"använda SQLite-stödet." - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "You can change this setting later on by running \"dpkg-reconfigure -plow " -#| "glance-common\"." -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" -"Denna inställning kan ändras senare genom att köra \"dpkg-reconfigure -plow " -"neutron\"." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "Ska Gnocchi registreras i keystones katalog med ändpunkter?" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" -"Alla OpenStack-tjänster (varje API) ska registreras för att kunna användas. " -"Detta görs med kommandona \"keystone service-create\" och \"keystone " -"endpoint-create\". Detta kan göras automatiskt nu." - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -#, fuzzy -#| msgid "" -#| "Note that you will need to have an up and running Keystone server on " -#| "which to connect using the Keystone authentication token." -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "OBS. Du behöver ha en fungerande keystone-server att ansluta till." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "Keystone server IP address:" -msgstr "IP-adress till Keystone:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" -"Ange IP-adressen till din Keystone-server så att glance-api kan kontakta " -"Keystone för att lägga till Gnocchi-tjänsten som en ändpunkt." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -#, fuzzy -#| msgid "Keystone authentication token:" -msgid "Keystone admin name:" -msgstr "Autetiseringsvärde för Keystone:" - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "IP-adress för Gnocchi-ändpunkt:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "Ange den IP-adress som ska användas för att kontakta Gnocchi." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" -"Denna IP-adress ska vara nåbar från klienterna som vill använda den här " -"tjänsten. Om detta är en publik molntjänst så ska det vara en publik IP-" -"adress." - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "Regionnamn:" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" -"OpenStack kan användas med tillgänglighetszoner. Varje region representerar " -"en plats. Ange zonen som ska användas när ändpunkten registreras." - -#, fuzzy -#~| msgid "" -#~| "To configure its endpoint in Keystone, glance-api needs the Keystone " -#~| "authentication token." -#~ msgid "" -#~ "To configure its endpoint in Keystone, gnocchi-api needs the Keystone " -#~ "authentication token." -#~ msgstr "" -#~ "För att lägga till ändpunkt i Keystone behöver glance-api ett " -#~ "autentiseringsvärde för Keystone." diff --git a/debian/po/templates.pot b/debian/po/templates.pot deleted file mode 100644 index 5e60a281..00000000 --- a/debian/po/templates.pot +++ /dev/null @@ -1,229 +0,0 @@ -# SOME DESCRIPTIVE TITLE. -# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER -# This file is distributed under the same license as the gnocchi package. -# FIRST AUTHOR , YEAR. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: gnocchi\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" -"Last-Translator: FULL NAME \n" -"Language-Team: LANGUAGE \n" -"Language: \n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=CHARSET\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "Authentication server hostname:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Authentication server tenant name:" -msgstr "" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Authentication server username:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Authentication server password:" -msgstr "" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "Set up a database for Gnocchi?" -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "Keystone server IP address:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -msgid "Keystone admin name:" -msgstr "" - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" diff --git a/debian/po/zh_CN.po b/debian/po/zh_CN.po deleted file mode 100644 index 0fd9ec5d..00000000 --- a/debian/po/zh_CN.po +++ /dev/null @@ -1,263 +0,0 @@ -# SOME DESCRIPTIVE TITLE. -# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER -# This file is distributed under the same license as the PACKAGE package. -# FIRST AUTHOR , YEAR. -# -msgid "" -msgstr "" -"Project-Id-Version: glance\n" -"Report-Msgid-Bugs-To: gnocchi@packages.debian.org\n" -"POT-Creation-Date: 2016-03-29 13:10+0000\n" -"PO-Revision-Date: 2012-08-27 17:14+0800\n" -"Last-Translator: ben \n" -"Language-Team: LANGUAGE \n" -"Language: \n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -#, fuzzy -#| msgid "Auth server admin token:" -msgid "Authentication server hostname:" -msgstr "Auth 服务器管理token:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:2001 -msgid "" -"Please specify the hostname of the authentication server for Gnocchi. " -"Typically this is also the hostname of the OpenStack Identity Service " -"(Keystone)." -msgstr "" -"请指定您的Gnocchi认证服务器的URL。一般来说这个URL也是您的OpenStack身份服务的" -"URL(keystone)。" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -#, fuzzy -#| msgid "Auth server admin token:" -msgid "Authentication server tenant name:" -msgstr "Auth 服务器管理token:" - -#. Type: string -#. Description -#. Translators: a "tenant" in OpenStack world is -#. an entity that contains one or more username/password couples. -#. It's typically the tenant that will be used for billing. Having more than one -#. username/password is very helpful in larger organization. -#. You're advised to either keep "tenant" without translating it -#. or keep it parenthezised. Example for French: -#. locataire ("tenant") -#: ../gnocchi-common.templates:3001 -msgid "Please specify the authentication server tenant name." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -#, fuzzy -#| msgid "Auth server admin token:" -msgid "Authentication server username:" -msgstr "Auth 服务器管理token:" - -#. Type: string -#. Description -#: ../gnocchi-common.templates:4001 -msgid "Please specify the username to use with the authentication server." -msgstr "" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -#, fuzzy -#| msgid "Auth server admin token:" -msgid "Authentication server password:" -msgstr "Auth 服务器管理token:" - -#. Type: password -#. Description -#: ../gnocchi-common.templates:5001 -msgid "Please specify the password to use with the authentication server." -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "Set up a database for glance-registry?" -msgid "Set up a database for Gnocchi?" -msgstr "为glance-registry设置数据库?" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "No database has been set up for glance-registry to use. Before " -#| "continuing, you should make sure you have:" -msgid "" -"No database has been set up for Gnocchi to use. Before continuing, you " -"should make sure you have the following information:" -msgstr "" -"未曾为glance-registry 设置数据库。如果你想现在设置,请确定你有以下信息:" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| " - the server host name (that server must allow TCP connections\n" -#| " from this machine);\n" -#| " - a username and password to access the database.\n" -#| " - A database type that you want to use." -msgid "" -" * the type of database that you want to use;\n" -" * the database server hostname (that server must allow TCP connections from " -"this\n" -" machine);\n" -" * a username and password to access the database." -msgstr "" -" * 数据库服务器的主机名 (需要这台主机的TCP链接);\n" -" * 访问这个数据库的用户名及密码;\n" -" * 你希望使用的数据库管理软件的类型。" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "If some of these requirements are missing, reject this option and run " -#| "with regular sqlite support." -msgid "" -"If some of these requirements are missing, do not choose this option and run " -"with regular SQLite support." -msgstr "如果部分需求缺失,请运行通用的SQLite。" - -#. Type: boolean -#. Description -#: ../gnocchi-common.templates:6001 -#, fuzzy -#| msgid "" -#| "You can change this setting later on by running 'dpkg-reconfigure -plow " -#| "glance-registry" -msgid "" -"You can change this setting later on by running \"dpkg-reconfigure -plow " -"gnocchi-common\"." -msgstr "" -"您可以通过运行\"dpkg-reconfigure-plow glance-registry\" 命令来修改配置。" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "Register Gnocchi in the Keystone endpoint catalog?" -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Each OpenStack service (each API) should be registered in order to be " -"accessible. This is done using \"keystone service-create\" and \"keystone " -"endpoint-create\". This can be done automatically now." -msgstr "" - -#. Type: boolean -#. Description -#: ../gnocchi-api.templates:2001 -msgid "" -"Note that you will need to have an up and running Keystone server on which " -"to connect using a known admin project name, admin username and password. " -"The admin auth token is not used anymore." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "Keystone server IP address:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:3001 -msgid "" -"Please enter the IP address of the Keystone server, so that gnocchi-api can " -"contact Keystone to do the Gnocchi service and endpoint creation." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:4001 -msgid "Keystone admin name:" -msgstr "" - -#. Type: string -#. Description -#. Type: string -#. Description -#. Type: password -#. Description -#: ../gnocchi-api.templates:4001 ../gnocchi-api.templates:5001 -#: ../gnocchi-api.templates:6001 -msgid "" -"To register the service endpoint, this package needs to know the Admin " -"login, name, project name, and password to the Keystone server." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:5001 -msgid "Keystone admin project name:" -msgstr "" - -#. Type: password -#. Description -#: ../gnocchi-api.templates:6001 -msgid "Keystone admin password:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Gnocchi endpoint IP address:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "Please enter the IP address that will be used to contact Gnocchi." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:7001 -msgid "" -"This IP address should be accessible from the clients that will use this " -"service, so if you are installing a public cloud, this should be a public IP " -"address." -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "Name of the region to register:" -msgstr "" - -#. Type: string -#. Description -#: ../gnocchi-api.templates:8001 -msgid "" -"OpenStack supports using availability zones, with each region representing a " -"location. Please enter the zone that you wish to use when registering the " -"endpoint." -msgstr "" diff --git a/debian/rules b/debian/rules index c60503f7..a2b7825a 100755 --- a/debian/rules +++ b/debian/rules @@ -11,6 +11,7 @@ UNIT_TEST_BLACKLIST = test_carbonara.CarbonaraCmd.*|.*test_bin\.BinTestCase\.tes override_dh_clean: dh_clean rm -rf build debian/gnocchi-common.postinst debian/gnocchi-common.config debian/gnocchi-api.config debian/gnocchi-api.postinst + rm -rf debian/CHANGEME-common.postrm debian/*.templates debian/po override_dh_auto_clean: python3 setup.py clean @@ -20,6 +21,8 @@ override_dh_auto_build: /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-common.config /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-api.postinst /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-api.config + /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_postrm gnocchi-common.postrm + pkgos-merge-templates gnocchi-common gnocchi db ksat override_dh_auto_install: echo "Do nothing..." @@ -86,7 +89,6 @@ endif --namespace cotyledon \ --namespace keystonemiddleware.auth_token sed -i 's|^[ \t#]*url[ \t#]*=.*|url = sqlite:////var/lib/gnocchi/gnocchidb|' $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf - sed -i 's|^[# \t]*auth_protocol[\t #]*=.*|auth_protocol = http|' $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf dh_install dh_missing --fail-missing -- GitLab From 3f8a22a51bec3a60709bcbf26648c4fd6a66d2b8 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 19 Feb 2018 13:24:09 +0100 Subject: [PATCH 1303/1483] Move out from pbr All pbr interesting features (the seutp.cfg) have been implemented in setuptools since version 30.3.0 (8 Dec 2016). Also pbr will break us soon and force us to found a new solution to generate things during setup.py steps. https://github.com/gnocchixyz/gnocchi/issues/755 When you use pbr you cannot override most of the cmdclass. So we cannot make our hooks working anymore. Generation of ChangeLog and AUTHORS are done during egg_info. gnocchi-api script is created on the fly with the right python header Package file listing and package version is done by the pypa maintained setuptools_scm. This may impact downstream packaging as setuptools versions are a bit old: * xenial: 20.7 * zesty: 33.1.1 * centos7/rhel7: 0.9.8 ... really ? * rdo queen: 22.0 * rdo rocky plan to 38.6 Closes-bug: #755 --- .gitignore | 1 + MANIFEST.in | 5 +- doc/source/conf.py | 8 +-- gnocchi/__init__.py | 21 +++++++ gnocchi/cli/api.py | 5 ++ gnocchi/cli/manage.py | 11 +++- gnocchi/genconfig.py | 29 ---------- gnocchi/rest/gnocchi-api | 22 ------- gnocchi/setuptools.py | 122 +++++++++++++++++++++++++++++++++++++++ requirements.txt | 27 --------- run-upgrade-tests.sh | 2 +- setup.cfg | 66 +++++++++++++-------- setup.py | 31 +++++++++- tox.ini | 8 ++- 14 files changed, 243 insertions(+), 115 deletions(-) delete mode 100644 gnocchi/genconfig.py delete mode 100755 gnocchi/rest/gnocchi-api create mode 100644 gnocchi/setuptools.py delete mode 100644 requirements.txt diff --git a/.gitignore b/.gitignore index a773d3d4..415c6ca0 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ AUTHORS ChangeLog etc/gnocchi/gnocchi.conf +gnocchi/rest/gnocchi-api doc/build doc/source/rest.rst doc/source/gnocchi.conf.sample diff --git a/MANIFEST.in b/MANIFEST.in index 8f248e6e..54a0a8cb 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1 +1,4 @@ -include etc/gnocchi/gnocchi.conf +include ChangeLog +include AUTHORS +exclude .gitignore +exclude .github diff --git a/doc/source/conf.py b/doc/source/conf.py index 164785c8..46837a49 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -12,7 +12,7 @@ import datetime import os -import subprocess +import pkg_resources # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -54,11 +54,7 @@ copyright = u'%s, The Gnocchi Developers' % datetime.date.today().year # built documents. # # The short X.Y version. -version = subprocess.Popen(['sh', '-c', 'cd ../..; python setup.py --version'], - stdout=subprocess.PIPE).stdout.read() -version = version.strip() -# The full version, including alpha/beta/rc tags. -release = version +release = pkg_resources.get_distribution('gnocchi').version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/gnocchi/__init__.py b/gnocchi/__init__.py index e69de29b..f56466ff 100644 --- a/gnocchi/__init__.py +++ b/gnocchi/__init__.py @@ -0,0 +1,21 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pkg_resources + +try: + __version__ = pkg_resources.get_distribution(__name__).version +except pkg_resources.DistributionNotFound: + # package is not installed + pass diff --git a/gnocchi/cli/api.py b/gnocchi/cli/api.py index a444977b..2a9c0fbb 100644 --- a/gnocchi/cli/api.py +++ b/gnocchi/cli/api.py @@ -24,6 +24,7 @@ from oslo_config import cfg from oslo_policy import opts as policy_opts from gnocchi import opts +from gnocchi.rest import app from gnocchi import service from gnocchi import utils @@ -48,6 +49,10 @@ def prepare_service(conf=None): return conf +def wsgi(): + return app.load_app(prepare_service()) + + def api(): # Compat with previous pbr script try: diff --git a/gnocchi/cli/manage.py b/gnocchi/cli/manage.py index 5e89d52a..c5220ef9 100644 --- a/gnocchi/cli/manage.py +++ b/gnocchi/cli/manage.py @@ -14,14 +14,15 @@ # See the License for the specific language governing permissions and # limitations under the License. import copy +import os import sys import daiquiri from oslo_config import cfg +from oslo_config import generator import six from gnocchi import archive_policy -from gnocchi import genconfig from gnocchi import incoming from gnocchi import indexer from gnocchi import service @@ -32,7 +33,13 @@ LOG = daiquiri.getLogger(__name__) def config_generator(): - return genconfig.prehook(None, sys.argv[1:]) + args = sys.argv[1:] + if args is None: + args = ['--output-file', 'etc/gnocchi/gnocchi.conf'] + return generator.main(['--config-file', + '%s/../gnocchi-config-generator.conf' % + os.path.dirname(__file__)] + + args) _SACK_NUMBER_OPT = cfg.IntOpt( diff --git a/gnocchi/genconfig.py b/gnocchi/genconfig.py deleted file mode 100644 index 0eba7359..00000000 --- a/gnocchi/genconfig.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016-2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - - -def prehook(cmd, args=None): - if args is None: - args = ['--output-file', 'etc/gnocchi/gnocchi.conf'] - try: - from oslo_config import generator - generator.main( - ['--config-file', - '%s/gnocchi-config-generator.conf' % os.path.dirname(__file__)] - + args) - except Exception as e: - print("Unable to build sample configuration file: %s" % e) diff --git a/gnocchi/rest/gnocchi-api b/gnocchi/rest/gnocchi-api deleted file mode 100755 index 0663d1a5..00000000 --- a/gnocchi/rest/gnocchi-api +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/python - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -if __name__ == '__main__': - import sys - from gnocchi.cli import api - sys.exit(api.api()) -else: - from gnocchi.cli import api - from gnocchi.rest import app - application = app.load_app(api.prepare_service()) diff --git a/gnocchi/setuptools.py b/gnocchi/setuptools.py new file mode 100644 index 00000000..1cc63992 --- /dev/null +++ b/gnocchi/setuptools.py @@ -0,0 +1,122 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from __future__ import absolute_import + +import os +import subprocess + +from distutils import version +from setuptools.command import develop +from setuptools.command import easy_install +from setuptools.command import egg_info +from setuptools.command import install_scripts + +# NOTE(sileht): We use a template to set the right +# python version in the sheban +SCRIPT_TMPL = """ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +from gnocchi.cli import api + +if __name__ == '__main__': + sys.exit(api.api()) +else: + application = api.wsgi() +""" + + +def git(*args): + p = subprocess.Popen(["git"] + list(args), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out, _ = p.communicate() + return out.strip().decode('utf-8', 'replace') + + +class local_egg_info(egg_info.egg_info): + def run(self): + if os.path.exists(".git"): + self._gen_changelog_and_authors() + egg_info.egg_info.run(self) + + @staticmethod + def _gen_changelog_and_authors(): + with open("AUTHORS", 'wb') as f: + authors = git('log', '--format=%aN <%aE>') + authors = sorted(set(authors.split("\n"))) + f.writelines([b"%s\n" % author.encode('utf8') + for author in authors]) + + with open("ChangeLog", "wb") as f: + f.write(b"CHANGES\n") + f.write(b"=======\n\n") + changelog = git('log', '--decorate=full', '--format=%s%x00%d') + for line in changelog.split('\n'): + msg, refname = line.split("\x00") + + if "refs/tags/" in refname: + refname = refname.strip()[1:-1] # remove wrapping ()'s + # If we start with "tag: refs/tags/1.2b1, tag: + # refs/tags/1.2" The first split gives us "['', '1.2b1, + # tag:', '1.2']" Which is why we do the second split below + # on the comma + for tag_string in refname.split("refs/tags/")[1:]: + # git tag does not allow : or " " in tag names, so we + # split on ", " which is the separator between elements + candidate = tag_string.split(", ")[0] + try: + version.StrictVersion(candidate) + except ValueError: + pass + else: + f.write(b"\n%s\n" % candidate.encode('utf8')) + f.write(b"%s\n\n" % (b"-" * len(candidate))) + + if msg.startswith("Merge "): + continue + if msg.endswith("."): + msg = msg[:-1] + msg = msg.replace('*', '\*') + msg = msg.replace('_', '\_') + msg = msg.replace('`', '\`') + f.write(b"* %s\n" % msg.encode("utf8")) + + +class local_install_scripts(install_scripts.install_scripts): + def run(self): + install_scripts.install_scripts.run(self) + header = easy_install.get_script_header( + "", easy_install.sys_executable, False) + self.write_script("gnocchi-api", header + SCRIPT_TMPL) + + +class local_develop(develop.develop): + def install_wrapper_scripts(self, dist): + develop.develop.install_wrapper_scripts(self, dist) + header = easy_install.get_script_header( + "", easy_install.sys_executable, False) + self.write_script("gnocchi-api", header + SCRIPT_TMPL) diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index ed334ba5..00000000 --- a/requirements.txt +++ /dev/null @@ -1,27 +0,0 @@ -pbr -numpy>=1.9.0 -iso8601 -oslo.config>=3.22.0 -oslo.policy>=0.3.0 -oslo.middleware>=3.22.0 -pytimeparse -pecan>=0.9 -futures; python_version < '3' -jsonpatch -cotyledon>=1.5.0 -six -stevedore -ujson -voluptuous>=0.8.10 -werkzeug -trollius; python_version < '3.4' -tenacity>=4.6.0 -WebOb>=1.4.1 -Paste -PasteDeploy -monotonic -daiquiri -pyparsing>=2.2.0 -lz4>=0.9.0 -tooz>=1.38 -cachetools diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index bd50bd1d..def061bf 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -77,7 +77,7 @@ pifpaf_stop new_version=$(python setup.py --version) echo "* Upgrading Gnocchi from $old_version to $new_version" -pip install -q -U .[${GNOCCHI_VARIANT}] +pip install -v -U .[${GNOCCHI_VARIANT}] eval $(pifpaf --debug run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) # Gnocchi 3.1 uses basic auth by default diff --git a/setup.cfg b/setup.cfg index f4162bb5..9c06432c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,11 +1,10 @@ [metadata] name = gnocchi -url = http://launchpad.net/gnocchi -summary = Metric as a Service -description-file = - README.rst +url = http://gnocchi.xyz +description = Metric as a Service +long_description = file: README.rst author = Gnocchi developers -home-page = http://gnocchi.xyz +author_email = invalid@gnocchi.xyz classifier = Intended Audience :: Information Technology Intended Audience :: System Administrators @@ -18,7 +17,42 @@ classifier = Programming Language :: Python :: 3.5 Topic :: System :: Monitoring -[extras] +[options] +packages = + gnocchi + +include_package_data = true + +install_requires = + setuptools>=30.3 + numpy>=1.9.0 + iso8601 + oslo.config>=3.22.0 + oslo.policy>=0.3.0 + oslo.middleware>=3.22.0 + pytimeparse + pecan>=0.9 + futures; python_version < '3' + jsonpatch + cotyledon>=1.5.0 + six + stevedore + ujson + voluptuous>=0.8.10 + werkzeug + trollius; python_version < '3.4' + tenacity>=4.6.0 + WebOb>=1.4.1 + Paste + PasteDeploy + monotonic + daiquiri + pyparsing>=2.2.0 + lz4>=0.9.0 + tooz>=1.38 + cachetools + +[options.extras_require] keystone = keystonemiddleware>=4.0.0,!=4.19.0 mysql = @@ -72,21 +106,7 @@ test = test-swift = python-swiftclient -[global] -setup-hooks = - pbr.hooks.setup_hook - -[build_py] -pre-hook.build_config = gnocchi.genconfig.prehook - -[files] -packages = - gnocchi - -scripts = - gnocchi/rest/gnocchi-api - -[entry_points] +[options.entry_points] gnocchi.indexer.sqlalchemy.resource_type_attribute = string = gnocchi.indexer.sqlalchemy_extension:StringSchema uuid = gnocchi.indexer.sqlalchemy_extension:UUIDSchema @@ -137,5 +157,5 @@ all_files = 1 build-dir = doc/build source-dir = doc/source -[wheel] -universal = 1 +[bdist_wheel] +universal=1 diff --git a/setup.py b/setup.py index d1a140a8..74d11134 100755 --- a/setup.py +++ b/setup.py @@ -16,6 +16,33 @@ import setuptools +import gnocchi.setuptools + +cmdclass = { + 'egg_info': gnocchi.setuptools.local_egg_info, + 'develop': gnocchi.setuptools.local_develop, + 'install_scripts': gnocchi.setuptools.local_install_scripts, +} + +try: + from sphinx import setup_command + cmdclass['build_sphinx'] = setup_command.BuildDoc +except ImportError: + pass + + +def pbr_compat(v): + from setuptools_scm import version + # NOTE(sileht): this removes +g. to generate the same number as + # pbr. i don't get why yet but something call pbr even we don't depends on + # it anymore + v.dirty = False + v.node = None + return version.guess_next_dev_version(v) + + setuptools.setup( - setup_requires=['pbr', 'setuptools>=20.6.8'], - pbr=True) + setup_requires=['setuptools>=30.3.0', 'setuptools_scm'], + use_scm_version={'version_scheme': pbr_compat}, + cmdclass=cmdclass, +) diff --git a/tox.ini b/tox.ini index afac9dc4..f7339dc4 100644 --- a/tox.ini +++ b/tox.ini @@ -133,8 +133,10 @@ basepython = python3 # deps = {[testenv]deps} # .[postgresql,doc] # setenv = GNOCCHI_STORAGE_DEPS=file -deps = .[test,file,postgresql,doc] - doc8 +deps = + -e + .[test,file,postgresql,doc] + doc8 setenv = GNOCCHI_TEST_DEBUG=1 commands = doc8 --ignore-path doc/source/rest.rst,doc/source/comparison-table.rst doc/source pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx -W @@ -146,6 +148,8 @@ setenv = GNOCCHI_STORAGE_DEPS=file GNOCCHI_TEST_DEBUG=1 deps = {[testenv:docs]deps} sphinxcontrib-versioning +# for < 4.3 doc + pbr # for <= 4.2 doc scipy # for <= 4.1 doc -- GitLab From cefd859a8419263b37cc3915e8998c383a92d055 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Mar 2018 10:02:47 +0100 Subject: [PATCH 1304/1483] storage: add some tests for get_measures_aggregated --- gnocchi/tests/test_storage.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 46804f2d..5ac0eb30 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -215,6 +215,21 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual(2, report['summary']['metrics']) self.assertEqual(120, report['summary']['measures']) + def test_get_aggregated_measures(self): + self.incoming.add_measures(self.metric.id, [ + incoming.Measure(datetime64(2014, 1, 1, 12, i, j), 100) + for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) + self.trigger_processing([str(self.metric.id)]) + + aggregations = self.metric.archive_policy.aggregations + + measures = self.storage.get_aggregated_measures( + self.metric, aggregations) + self.assertEqual(len(aggregations), len(measures)) + self.assertGreater(len(measures[aggregations[0]]), 0) + for agg in aggregations: + self.assertEqual(agg, measures[agg].aggregation) + def test_add_measures_big(self): m, __ = self._create_metric('high') self.incoming.add_measures(m.id, [ -- GitLab From 109bc29bd685f6c9860fa5aa834b2869f7dbb905 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Mar 2018 13:52:51 +0100 Subject: [PATCH 1305/1483] carbonara: pass Aggregation object to AggregatedTimeSerie.unserialize This is already what is passed as argument by gnocchi.storage, but there were no test doing any assert. It does not really matter as the final AggregatedTimeSerie is built from concatenating splits, but better be safe than sorry. --- gnocchi/carbonara.py | 6 +++--- gnocchi/tests/test_carbonara.py | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 35e631c7..aec2da89 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -660,12 +660,12 @@ class AggregatedTimeSerie(TimeSerie): return six.indexbytes(serialized_data, 0) == ord("c") @classmethod - def unserialize(cls, data, key, agg_method): + def unserialize(cls, data, key, aggregation): """Unserialize an aggregated timeserie. :param data: Raw data buffer. :param key: A :class:`SplitKey` key. - :param agg_method: The aggregation method of this timeseries. + :param aggregation: The Aggregation object of this timeseries. """ x, y = [], [] @@ -696,7 +696,7 @@ class AggregatedTimeSerie(TimeSerie): y = index * key.sampling + key.key x = everything['v'][index] - return cls.from_data(Aggregation(agg_method, key.sampling, None), y, x) + return cls.from_data(aggregation, y, x) def get_split_key(self, timestamp=None): """Return the split key for a particular timestamp. diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index a2b8f219..bc13977a 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -226,7 +226,9 @@ class TestAggregatedTimeSerie(base.BaseTestCase): key = ts.get_split_key() o, s = ts.serialize(key) saved_ts = carbonara.AggregatedTimeSerie.unserialize( - s, key, '74pct') + s, key, ts.aggregation) + + self.assertEqual(ts.aggregation, saved_ts.aggregation) ts = carbonara.TimeSerie.from_data( [datetime64(2014, 1, 1, 12, 0, 0), @@ -624,7 +626,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): o, s = ts['return'].serialize(key) self.assertEqual(ts['return'], carbonara.AggregatedTimeSerie.unserialize( - s, key, 'mean')) + s, key, ts['return'].aggregation)) def test_no_truncation(self): ts = {'sampling': numpy.timedelta64(60, 's'), 'agg': 'mean'} -- GitLab From 83d7c1f413fd586e31d1292c1a7eb6b5b20f7493 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 6 Mar 2018 13:54:53 +0100 Subject: [PATCH 1306/1483] storage: add a basic test for _get_splits_and_unserialize --- gnocchi/tests/test_storage.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 5ac0eb30..a4ecd003 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -90,6 +90,29 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual(0, len(results[0])) self.assertEqual(results[0].aggregation, aggregation) + def test_get_splits_and_unserialize(self): + self.incoming.add_measures(self.metric.id, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + ]) + self.trigger_processing() + + aggregation = self.metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(5, 'm')) + + results = self.storage._get_splits_and_unserialize( + self.metric, + [ + (carbonara.SplitKey( + numpy.datetime64(1387800000, 's'), + numpy.timedelta64(5, 'm')), + aggregation) + ]) + self.assertEqual(1, len(results)) + self.assertIsInstance(results[0], carbonara.AggregatedTimeSerie) + # Assert it's not empty one since corrupted + self.assertGreater(len(results[0]), 0) + self.assertEqual(results[0].aggregation, aggregation) + def test_corrupted_data(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), -- GitLab From c487db41125c50509f8f832ef0c9e71d50617c90 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 7 Mar 2018 12:21:20 +0000 Subject: [PATCH 1307/1483] Releasing to unstable. --- debian/changelog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index ed77064f..8d224b82 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -gnocchi (4.2.0-1) UNRELEASED; urgency=medium +gnocchi (4.2.0-1) unstable; urgency=medium [ Ondřej Nový ] * d/control: Set Vcs-* to salsa.debian.org -- GitLab From 08159a10f381577d1434d6750a1800ec409b4915 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 7 Mar 2018 12:23:41 +0000 Subject: [PATCH 1308/1483] Disabled unit testing. --- debian/rules | 65 ++++++++++++++++++++++++++-------------------------- 1 file changed, 33 insertions(+), 32 deletions(-) diff --git a/debian/rules b/debian/rules index a2b7825a..35bf2171 100755 --- a/debian/rules +++ b/debian/rules @@ -36,38 +36,39 @@ override_dh_install: done ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) - @echo "===> Running tests" - set -e ; set -x ; for i in $(PYTHON3S) ; do \ - PYMAJOR=`echo $$i | cut -d'.' -f1` ; \ - BINDIR=`pg_config --bindir` ; \ - PG_MYTMPDIR=`mktemp -d` ; \ - export LC_ALL="C" ; \ - export LANGUAGE=C ; \ - PGSQL_PORT=9823 ; \ - $$BINDIR/initdb -D $$PG_MYTMPDIR ; \ - $$BINDIR/pg_ctl -w -D $$PG_MYTMPDIR -o "-k $$PG_MYTMPDIR -p $$PGSQL_PORT" start > /dev/null ; \ - attempts=0 ; \ - while ! [ -e $$PG_MYTMPDIR/postmaster.pid ] ; do \ - if [ $$attempts -gt 10 ] ; then \ - echo "Exiting test: postgres pid file was not created after 30 seconds" ; \ - exit 1 ; \ - fi ; \ - attempts=$$((attempts+1)) ; \ - sleep 3 ; \ - done ; \ - export GNOCCHI_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ - export GNOCCHI_TEST_STORAGE_DRIVER=file ; \ - echo "===> Testing with python$$i (python$$PYMAJOR)" ; \ - rm -rf .testrepository ; \ - testr-python$$PYMAJOR init ; \ - TEMP_REZ=`mktemp -t` ; \ - export PATH=$(PATH):$(CURDIR)/debian/bin && PYTHONPATH=$(CURDIR):$(CURDIR)/debian/bin PYTHON=python$$i testr-python$$PYMAJOR run --subunit 'gnocchi\.tests\.(?!.*('"$(UNIT_TEST_BLACKLIST)"'))' | tee $$TEMP_REZ | subunit2pyunit ; \ - cat $$TEMP_REZ | subunit-filter -s --no-passthrough | subunit-stats ; \ - rm -f $$TEMP_REZ ; \ - testr-python$$PYMAJOR slowest ; \ - echo "===> Stopping PGSQL" ; \ - $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR ; \ - done + echo "Not running unit tests until launching mysql or postgresql is fixed..." +# @echo "===> Running tests" +# set -e ; set -x ; for i in $(PYTHON3S) ; do \ +# PYMAJOR=`echo $$i | cut -d'.' -f1` ; \ +# BINDIR=`pg_config --bindir` ; \ +# PG_MYTMPDIR=`mktemp -d` ; \ +# export LC_ALL="C" ; \ +# export LANGUAGE=C ; \ +# PGSQL_PORT=9823 ; \ +# $$BINDIR/initdb -D $$PG_MYTMPDIR ; \ +# $$BINDIR/pg_ctl -w -D $$PG_MYTMPDIR -o "-k $$PG_MYTMPDIR -p $$PGSQL_PORT" start > /dev/null ; \ +# attempts=0 ; \ +# while ! [ -e $$PG_MYTMPDIR/postmaster.pid ] ; do \ +# if [ $$attempts -gt 10 ] ; then \ +# echo "Exiting test: postgres pid file was not created after 30 seconds" ; \ +# exit 1 ; \ +# fi ; \ +# attempts=$$((attempts+1)) ; \ +# sleep 3 ; \ +# done ; \ +# export GNOCCHI_INDEXER_URL="postgresql:///template1?host=$$PG_MYTMPDIR&port=9823" ; \ +# export GNOCCHI_TEST_STORAGE_DRIVER=file ; \ +# echo "===> Testing with python$$i (python$$PYMAJOR)" ; \ +# rm -rf .testrepository ; \ +# testr-python$$PYMAJOR init ; \ +# TEMP_REZ=`mktemp -t` ; \ +# export PATH=$(PATH):$(CURDIR)/debian/bin && PYTHONPATH=$(CURDIR):$(CURDIR)/debian/bin PYTHON=python$$i testr-python$$PYMAJOR run --subunit 'gnocchi\.tests\.(?!.*('"$(UNIT_TEST_BLACKLIST)"'))' | tee $$TEMP_REZ | subunit2pyunit ; \ +# cat $$TEMP_REZ | subunit-filter -s --no-passthrough | subunit-stats ; \ +# rm -f $$TEMP_REZ ; \ +# testr-python$$PYMAJOR slowest ; \ +# echo "===> Stopping PGSQL" ; \ +# $$BINDIR/pg_ctl stop -D $$PG_MYTMPDIR ; \ +# done endif -- GitLab From 8bf20e5f2fbbff087157ddfd8db1967de3069389 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 7 Mar 2018 12:27:31 +0000 Subject: [PATCH 1309/1483] Install /usr/bin/* in debian/python3-gnocchi.install --- debian/python3-gnocchi.install | 3 ++- debian/rules | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/debian/python3-gnocchi.install b/debian/python3-gnocchi.install index 028be4f6..7cd6c2a9 100644 --- a/debian/python3-gnocchi.install +++ b/debian/python3-gnocchi.install @@ -1 +1,2 @@ -/usr/lib/python3* \ No newline at end of file +/usr/lib/python3* +/usr/bin/* diff --git a/debian/rules b/debian/rules index 35bf2171..4bf66d8f 100755 --- a/debian/rules +++ b/debian/rules @@ -37,7 +37,7 @@ override_dh_install: ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS))) echo "Not running unit tests until launching mysql or postgresql is fixed..." -# @echo "===> Running tests" +# @echo "===> Running tests" # set -e ; set -x ; for i in $(PYTHON3S) ; do \ # PYMAJOR=`echo $$i | cut -d'.' -f1` ; \ # BINDIR=`pg_config --bindir` ; \ -- GitLab From c3b1b4b6522ca0b4f8dbcbed07b33f9f78c9c2e4 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 7 Mar 2018 12:31:21 +0000 Subject: [PATCH 1310/1483] Also copy templates for gnocchi-api. --- debian/rules | 1 + 1 file changed, 1 insertion(+) diff --git a/debian/rules b/debian/rules index 4bf66d8f..364a9ebf 100755 --- a/debian/rules +++ b/debian/rules @@ -22,6 +22,7 @@ override_dh_auto_build: /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-api.postinst /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-api.config /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_postrm gnocchi-common.postrm + pkgos-merge-templates gnocchi-api gnocchi endpoint pkgos-merge-templates gnocchi-common gnocchi db ksat override_dh_auto_install: -- GitLab From 5e7357b182d17df4f1278bcb8b861befc19fd1fc Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 15 Feb 2018 11:32:48 +0100 Subject: [PATCH 1311/1483] storage: _add_measures can handle multiple aggregations at the same time This allows to batch storage and deletion of splits for a metric. --- gnocchi/storage/__init__.py | 219 +++++++++++++++++----------------- gnocchi/storage/ceph.py | 15 ++- gnocchi/storage/file.py | 8 +- gnocchi/storage/redis.py | 12 +- gnocchi/storage/s3.py | 8 +- gnocchi/storage/swift.py | 8 +- gnocchi/tests/test_storage.py | 17 +-- 7 files changed, 143 insertions(+), 144 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index f4a53c8b..e2fb71b2 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -166,15 +166,16 @@ class StorageDriver(object): ((metric, data, version) for metric, data in metrics_and_data)) @staticmethod - def _store_metric_splits(metric, keys_and_data_and_offset, aggregation, - version=3): + def _store_metric_splits(metric, keys_aggregations_data_offset, version=3): """Store metric split. Store a bunch of splits for a metric. :param metric: The metric to store for - :param keys_and_data_and_offset: A list of (key, data, offset) tuples - :param aggregation: The aggregation method concerned + :param keys_aggregations_data_offset: A list of + (key, aggregation, data, offset) + tuples + :param version: Storage engine format version. """ raise NotImplementedError @@ -310,11 +311,12 @@ class StorageDriver(object): ts.truncate(aggregation.timespan) return ts - def _store_timeserie_splits(self, metric, keys_and_splits, - aggregation, oldest_mutable_timestamp): + def _store_timeserie_splits(self, metric, keys_and_aggregations_and_splits, + oldest_mutable_timestamp): keys_to_rewrite = [] splits_to_rewrite = [] - for key, split in six.iteritems(keys_and_splits): + for (key, aggregation), split in six.iteritems( + keys_and_aggregations_and_splits): # NOTE(jd) We write the full split only if the driver works that # way (self.WRITE_FULL) or if the oldest_mutable_timestamp is out # of range. @@ -329,47 +331,32 @@ class StorageDriver(object): # stored in the case that we need to rewrite them fully. # First, fetch all those existing splits. existing_data = self._get_splits_and_unserialize( - metric, [(key, aggregation) for key in keys_to_rewrite]) + metric, [(key, split.aggregation) + for key, split + in six.moves.zip(keys_to_rewrite, splits_to_rewrite)]) for key, split, existing in six.moves.zip( keys_to_rewrite, splits_to_rewrite, existing_data): if existing: - if split is not None: - existing.merge(split) - keys_and_splits[key] = existing - - key_data_offset = [] - for key, split in six.iteritems(keys_and_splits): - if split is None: - # `split' can be none if existing is None and no split was - # passed in order to rewrite and compress the data; in that - # case, it means the split key is present and listed, but some - # aggregation method or granularity is missing. That means data - # is corrupted, but it does not mean we have to fail, we can - # just do nothing and log a warning. - LOG.warning("No data found for metric %s, granularity %f " - "and aggregation method %s (split key %s): " - "possible data corruption", - metric, key.sampling, - aggregation.method, key) - continue - - offset, data = split.serialize( - key, compressed=key in keys_to_rewrite) - key_data_offset.append((key, data, offset)) - - return self._store_metric_splits(metric, key_data_offset, aggregation) - - def _add_measures(self, metric, aggregation, grouped_serie, + existing.merge(split) + keys_and_aggregations_and_splits[ + (key, split.aggregation)] = existing + + keys_aggregations_data_offset = [] + for (key, aggregation), split in six.iteritems( + keys_and_aggregations_and_splits): + # Do not store the split if it's empty. + if split: + offset, data = split.serialize( + key, compressed=key in keys_to_rewrite) + keys_aggregations_data_offset.append( + (key, split.aggregation, data, offset)) + + return self._store_metric_splits(metric, keys_aggregations_data_offset) + + def _add_measures(self, metric, aggregations, grouped_serie, previous_oldest_mutable_timestamp, oldest_mutable_timestamp): - ts = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped_serie, aggregation) - - # Don't do anything if the timeserie is empty - if not ts: - return - # We only need to check for rewrite if driver is not in WRITE_FULL mode # and if we already stored splits once need_rewrite = ( @@ -377,70 +364,84 @@ class StorageDriver(object): and previous_oldest_mutable_timestamp is not None ) - if aggregation.timespan: - oldest_point_to_keep = ts.truncate(aggregation.timespan) - else: - oldest_point_to_keep = None - - oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) - + # NOTE(jd) This dict uses (key, aggregation) tuples as keys because + # using just (key) would not carry the aggregation method and therefore + # would not be unique per aggregation! keys_and_split_to_store = {} + deleted_keys = set() + for aggregation in aggregations: + ts = carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped_serie, aggregation) - if previous_oldest_mutable_timestamp and (aggregation.timespan or - need_rewrite): - previous_oldest_mutable_key = ts.get_split_key( - previous_oldest_mutable_timestamp) - oldest_mutable_key = ts.get_split_key(oldest_mutable_timestamp) - - # only cleanup if there is a new object, as there must be a new - # object for an old object to be cleanup - if previous_oldest_mutable_key != oldest_mutable_key: - existing_keys = sorted(self._list_split_keys( - metric, [aggregation])[aggregation]) - - # First, check for old splits to delete - if aggregation.timespan: - deleted_keys = set() - for key in list(existing_keys): - # NOTE(jd) Only delete if the key is strictly inferior - # the timestamp; we don't delete any timeserie split - # that contains our timestamp, so we prefer to keep a - # bit more than deleting too much - if key >= oldest_key_to_keep: - break - deleted_keys.add(key) - existing_keys.remove(key) - self._delete_metric_splits( - metric, deleted_keys, aggregation.method) - - # Rewrite all read-only splits just for fun (and compression). - # This only happens if `previous_oldest_mutable_timestamp' - # exists, which means we already wrote some splits at some - # point – so this is not the first time we treat this - # timeserie. - if need_rewrite: - for key in existing_keys: - if previous_oldest_mutable_key <= key: - if key >= oldest_mutable_key: - break - LOG.debug("Compressing previous split %s (%s) for " - "metric %s", key, aggregation.method, - metric) - # NOTE(jd) Rewrite it entirely for fun (and later - # for compression). For that, we just pass None as - # split. - keys_and_split_to_store[key] = None - - for key, split in ts.split(): - if key >= oldest_key_to_keep: - LOG.debug( - "Storing split %s (%s) for metric %s", - key, aggregation.method, metric) - keys_and_split_to_store[key] = split + # Don't do anything if the timeserie is empty + if not ts: + continue + if aggregation.timespan: + oldest_point_to_keep = ts.truncate(aggregation.timespan) + else: + oldest_point_to_keep = None + + oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) + + if previous_oldest_mutable_timestamp and (aggregation.timespan or + need_rewrite): + previous_oldest_mutable_key = ts.get_split_key( + previous_oldest_mutable_timestamp) + oldest_mutable_key = ts.get_split_key(oldest_mutable_timestamp) + + # only cleanup if there is a new object, as there must be a new + # object for an old object to be cleanup + if previous_oldest_mutable_key != oldest_mutable_key: + existing_keys = sorted(self._list_split_keys( + metric, [aggregation])[aggregation]) + + # First, check for old splits to delete + if aggregation.timespan: + for key in list(existing_keys): + # NOTE(jd) Only delete if the key is strictly + # inferior the timestamp; we don't delete any + # timeserie split that contains our timestamp, so + # we prefer to keep a bit more than deleting too + # much + if key >= oldest_key_to_keep: + break + deleted_keys.add((key, aggregation)) + existing_keys.remove(key) + + # Rewrite all read-only splits just for fun (and + # compression). This only happens if + # `previous_oldest_mutable_timestamp' exists, which means + # we already wrote some splits at some point – so this is + # not the first time we treat this timeserie. + if need_rewrite: + for key in existing_keys: + if previous_oldest_mutable_key <= key: + if key >= oldest_mutable_key: + break + LOG.debug( + "Compressing previous split %s (%s) for " + "metric %s", key, aggregation.method, + metric) + # NOTE(jd) Rewrite it entirely for fun (and + # later for compression). For that, we just + # pass an empty split. + keys_and_split_to_store[ + (key, aggregation)] = ( + carbonara.AggregatedTimeSerie( + aggregation) + ) + + for key, split in ts.split(): + if key >= oldest_key_to_keep: + LOG.debug( + "Storing split %s (%s) for metric %s", + key, aggregation.method, metric) + keys_and_split_to_store[(key, aggregation)] = split + + self._delete_metric_splits(metric, deleted_keys) self._store_timeserie_splits( - metric, keys_and_split_to_store, aggregation, - oldest_mutable_timestamp) + metric, keys_and_split_to_store, oldest_mutable_timestamp) @staticmethod def _delete_metric(metric): @@ -450,10 +451,11 @@ class StorageDriver(object): def _delete_metric_splits_unbatched(metric, keys, aggregation, version=3): raise NotImplementedError - def _delete_metric_splits(self, metric, keys, aggregation, version=3): + def _delete_metric_splits(self, metric, keys_and_aggregations, version=3): utils.parallel_map( utils.return_none_on_failure(self._delete_metric_splits_unbatched), - ((metric, key, aggregation) for key in keys)) + ((metric, key, aggregation) + for key, aggregation in keys_and_aggregations)) def compute_and_store_timeseries(self, metric, measures): # NOTE(mnaser): The metric could have been handled by @@ -524,12 +526,9 @@ class StorageDriver(object): granularity, carbonara.round_timestamp( tstamp, granularity)) - utils.parallel_map( - self._add_measures, - ((metric, aggregation, ts, - current_first_block_timestamp, - new_first_block_timestamp) - for aggregation in aggregations)) + self._add_measures(metric, aggregations, ts, + current_first_block_timestamp, + new_first_block_timestamp) with utils.StopWatch() as sw: ts.set_values(measures, diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 9c0f7827..9d8233d6 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -81,12 +81,11 @@ class CephStorage(storage.StorageDriver): else: self.ioctx.write_full(name, b"") - def _store_metric_splits(self, metric, keys_and_data_and_offset, - aggregation, version=3): + def _store_metric_splits(self, metric, keys_aggregations_data_offset, + version=3): with rados.WriteOpCtx() as op: - for key, data, offset in keys_and_data_and_offset: - name = self._get_object_name( - metric, key, aggregation.method, version) + for key, agg, data, offset in keys_aggregations_data_offset: + name = self._get_object_name(metric, key, agg.method, version) if offset is None: self.ioctx.write_full(name, data) else: @@ -95,10 +94,10 @@ class CephStorage(storage.StorageDriver): self.ioctx.operate_write_op( op, self._build_unaggregated_timeserie_path(metric, 3)) - def _delete_metric_splits(self, metric, keys, aggregation, version=3): + def _delete_metric_splits(self, metric, keys_and_aggregations, version=3): names = tuple( - self._get_object_name(metric, key, aggregation, version) - for key in keys + self._get_object_name(metric, key, aggregation.method, version) + for key, aggregation in keys_and_aggregations ) with rados.WriteOpCtx() as op: for name in names: diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 775e2cad..7f8a2712 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -161,11 +161,11 @@ class FileStorage(storage.StorageDriver): def _delete_metric_splits_unbatched( self, metric, key, aggregation, version=3): os.unlink(self._build_metric_path_for_split( - metric, aggregation, key, version)) + metric, aggregation.method, key, version)) - def _store_metric_splits(self, metric, keys_and_data_and_offset, - aggregation, version=3): - for key, data, offset in keys_and_data_and_offset: + def _store_metric_splits(self, metric, keys_aggregations_data_offset, + version=3): + for key, aggregation, data, offset in keys_aggregations_data_offset: self._atomic_file_store( self._build_metric_path_for_split( metric, aggregation.method, key, version), diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 7f35e06d..4f2bf7e0 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -126,19 +126,19 @@ return ids } return keys - def _delete_metric_splits(self, metric, keys, aggregation, version=3): + def _delete_metric_splits(self, metric, keys_and_aggregations, version=3): metric_key = self._metric_key(metric) pipe = self._client.pipeline(transaction=False) - for key in keys: + for key, aggregation in keys_and_aggregations: pipe.hdel(metric_key, self._aggregated_field_for_split( - aggregation, key, version)) + aggregation.method, key, version)) pipe.execute() - def _store_metric_splits(self, metric, keys_and_data_and_offset, - aggregation, version=3): + def _store_metric_splits(self, metric, keys_aggregations_data_offset, + version=3): pipe = self._client.pipeline(transaction=False) metric_key = self._metric_key(metric) - for key, data, offset in keys_and_data_and_offset: + for key, aggregation, data, offset in keys_aggregations_data_offset: key = self._aggregated_field_for_split( aggregation.method, key, version) pipe.hset(metric_key, key, data) diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 06fb5825..5c75e57f 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -120,9 +120,9 @@ class S3Storage(storage.StorageDriver): wait=self._consistency_wait, stop=self._consistency_stop)(_head) - def _store_metric_splits(self, metric, keys_and_data_and_offset, - aggregation, version=3): - for key, data, offset in keys_and_data_and_offset: + def _store_metric_splits(self, metric, keys_aggregations_data_offset, + version=3): + for key, aggregation, data, offset in keys_aggregations_data_offset: self._put_object_safe( Bucket=self._bucket_name, Key=self._prefix(metric) + self._object_name( @@ -134,7 +134,7 @@ class S3Storage(storage.StorageDriver): self.s3.delete_object( Bucket=self._bucket_name, Key=self._prefix(metric) + self._object_name( - key, aggregation, version)) + key, aggregation.method, version)) def _delete_metric(self, metric): bucket = self._bucket_name diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 96d116ff..603a1b45 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -116,9 +116,9 @@ class SwiftStorage(storage.StorageDriver): if resp['status'] == 204: raise storage.MetricAlreadyExists(metric) - def _store_metric_splits(self, metric, keys_and_data_and_offset, - aggregation, version=3): - for key, data, offset in keys_and_data_and_offset: + def _store_metric_splits(self, metric, keys_aggregations_data_offset, + version=3): + for key, aggregation, data, offset in keys_aggregations_data_offset: self.swift.put_object( self._container_name(metric), self._object_name(key, aggregation.method, version), @@ -128,7 +128,7 @@ class SwiftStorage(storage.StorageDriver): self, metric, key, aggregation, version=3): self.swift.delete_object( self._container_name(metric), - self._object_name(key, aggregation, version)) + self._object_name(key, aggregation.method, version)) def _delete_metric(self, metric): container = self._container_name(metric) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index a4ecd003..1d09f830 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -287,10 +287,11 @@ class TestStorageDriver(tests_base.TestCase): for call in c.mock_calls: # policy is 60 points and split is 48. should only update 2nd half args = call[1] - if (args[0] == m_sql - and args[2].method == 'mean' - and args[1][0][0].sampling == numpy.timedelta64(1, 'm')): - count += 1 + if args[0] == m_sql: + for key, aggregation, data, offset in args[1]: + if (key.sampling == numpy.timedelta64(1, 'm') + and aggregation.method == "mean"): + count += 1 self.assertEqual(1, count) def test_add_measures_update_subset(self): @@ -775,10 +776,10 @@ class TestStorageDriver(tests_base.TestCase): # Test what happens if we delete the latest split and then need to # compress it! self.storage._delete_metric_splits( - self.metric, [carbonara.SplitKey( + self.metric, [(carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(1, 'm'), - )], 'mean') + ), aggregation)]) # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move @@ -863,8 +864,8 @@ class TestStorageDriver(tests_base.TestCase): (carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(1, 'm')), - b"oh really?", None) - ], aggregation) + aggregation, b"oh really?", None), + ]) # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move -- GitLab From 4f12a089700801104a4f45afddd8ee647fe22d5d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 26 Feb 2018 13:29:03 +0100 Subject: [PATCH 1312/1483] storage: do all listing for all aggregations at once in _add_measures This factorize the call to _list_split_keys() so all aggregations that needs the list of keys are retrieved in just one call. --- gnocchi/storage/__init__.py | 103 +++++++++++++++++++++--------------- 1 file changed, 59 insertions(+), 44 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index e2fb71b2..ffb84237 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -364,11 +364,9 @@ class StorageDriver(object): and previous_oldest_mutable_timestamp is not None ) - # NOTE(jd) This dict uses (key, aggregation) tuples as keys because - # using just (key) would not carry the aggregation method and therefore - # would not be unique per aggregation! - keys_and_split_to_store = {} - deleted_keys = set() + timeseries = {} + aggregations_needing_list_of_keys = set() + for aggregation in aggregations: ts = carbonara.AggregatedTimeSerie.from_grouped_serie( grouped_serie, aggregation) @@ -376,14 +374,14 @@ class StorageDriver(object): # Don't do anything if the timeserie is empty if not ts: continue + # Otherwise, store it for the next iteration + timeseries[aggregation] = ts if aggregation.timespan: oldest_point_to_keep = ts.truncate(aggregation.timespan) else: oldest_point_to_keep = None - oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) - if previous_oldest_mutable_timestamp and (aggregation.timespan or need_rewrite): previous_oldest_mutable_key = ts.get_split_key( @@ -393,44 +391,61 @@ class StorageDriver(object): # only cleanup if there is a new object, as there must be a new # object for an old object to be cleanup if previous_oldest_mutable_key != oldest_mutable_key: - existing_keys = sorted(self._list_split_keys( - metric, [aggregation])[aggregation]) - - # First, check for old splits to delete - if aggregation.timespan: - for key in list(existing_keys): - # NOTE(jd) Only delete if the key is strictly - # inferior the timestamp; we don't delete any - # timeserie split that contains our timestamp, so - # we prefer to keep a bit more than deleting too - # much - if key >= oldest_key_to_keep: + aggregations_needing_list_of_keys.add(aggregation) + + all_existing_keys = self._list_split_keys( + metric, aggregations_needing_list_of_keys) + + # NOTE(jd) This dict uses (key, aggregation) tuples as keys because + # using just (key) would not carry the aggregation method and therefore + # would not be unique per aggregation! + keys_and_split_to_store = {} + deleted_keys = set() + + for aggregation, ts in six.iteritems(timeseries): + oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) + + # If we listed the keys for the aggregation, that's because we need + # to check for cleanup and/or rewrite + if aggregation in all_existing_keys: + # FIXME(jd) This should be sorted by the driver and asserted it + # is in tests. It's likely backends already sort anyway. + existing_keys = sorted(all_existing_keys[aggregation]) + # First, check for old splits to delete + if aggregation.timespan: + for key in list(existing_keys): + # NOTE(jd) Only delete if the key is strictly + # inferior the timestamp; we don't delete any + # timeserie split that contains our timestamp, so + # we prefer to keep a bit more than deleting too + # much + if key >= oldest_key_to_keep: + break + deleted_keys.add((key, aggregation)) + existing_keys.remove(key) + + # Rewrite all read-only splits just for fun (and + # compression). This only happens if + # `previous_oldest_mutable_timestamp' exists, which means + # we already wrote some splits at some point – so this is + # not the first time we treat this timeserie. + if need_rewrite: + for key in existing_keys: + if previous_oldest_mutable_key <= key: + if key >= oldest_mutable_key: break - deleted_keys.add((key, aggregation)) - existing_keys.remove(key) - - # Rewrite all read-only splits just for fun (and - # compression). This only happens if - # `previous_oldest_mutable_timestamp' exists, which means - # we already wrote some splits at some point – so this is - # not the first time we treat this timeserie. - if need_rewrite: - for key in existing_keys: - if previous_oldest_mutable_key <= key: - if key >= oldest_mutable_key: - break - LOG.debug( - "Compressing previous split %s (%s) for " - "metric %s", key, aggregation.method, - metric) - # NOTE(jd) Rewrite it entirely for fun (and - # later for compression). For that, we just - # pass an empty split. - keys_and_split_to_store[ - (key, aggregation)] = ( - carbonara.AggregatedTimeSerie( - aggregation) - ) + LOG.debug( + "Compressing previous split %s (%s) for " + "metric %s", key, aggregation.method, + metric) + # NOTE(jd) Rewrite it entirely for fun (and + # later for compression). For that, we just + # pass an empty split. + keys_and_split_to_store[ + (key, aggregation)] = ( + carbonara.AggregatedTimeSerie( + aggregation) + ) for key, split in ts.split(): if key >= oldest_key_to_keep: -- GitLab From 872fea2076f03f57849e4001bd4fc605cb7fad26 Mon Sep 17 00:00:00 2001 From: Chris Lamb Date: Thu, 8 Mar 2018 16:09:54 -0800 Subject: [PATCH 1313/1483] Make the build reproducible Whilst working on the Reproducible Builds effort [0], we noticed that gnocchi could not be built reproducibly as it iterates over a Python set when generating documentation. This was originally filed in Debian as #892419. [0] https://reproducible-builds.org/ [1] https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=892419 Signed-off-by: Chris Lamb --- gnocchi/common/redis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/common/redis.py b/gnocchi/common/redis.py index 7f50d4c4..f187b4ca 100644 --- a/gnocchi/common/redis.py +++ b/gnocchi/common/redis.py @@ -103,7 +103,7 @@ OPTS = [ - http://redis.io/topics/sentinel - http://redis.io/topics/cluster-spec -""" % "`, `".join(CLIENT_ARGS)), +""" % "`, `".join(sorted(CLIENT_ARGS))), ] -- GitLab From 480b61565153284d115d7631aa729ad33b9f1301 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 26 Feb 2018 13:52:07 +0100 Subject: [PATCH 1314/1483] storage: more batching of store/delete operations in add_measures Rather than emitting store and delete on each aggregation computing, this batches them all at once for a metric by returning the operations from `_add_measures` directly. --- gnocchi/storage/__init__.py | 57 ++++++++++++++++++++++++-------- gnocchi/tests/test_aggregates.py | 2 +- gnocchi/tests/test_carbonara.py | 2 +- 3 files changed, 46 insertions(+), 15 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index ffb84237..7918b1f5 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -354,9 +354,29 @@ class StorageDriver(object): return self._store_metric_splits(metric, keys_aggregations_data_offset) - def _add_measures(self, metric, aggregations, grouped_serie, - previous_oldest_mutable_timestamp, - oldest_mutable_timestamp): + def _compute_split_operations(self, metric, aggregations, + grouped_serie, + previous_oldest_mutable_timestamp, + oldest_mutable_timestamp): + """Compute changes to a metric and return operations to be done. + + Based on an aggregations list and a grouped timeseries, this computes + what needs to be deleted and stored for a metric and returns it. + + :param metric: The metric + :param aggregations: The aggregations to compute for + :param grouped_serie: A grouped timeseries + :param previous_oldest_mutable_timestamp: The previous oldest storable + timestamp from the previous + backwindow. + :param oldest_mutable_timestamp: The current oldest storable timestamp + from the current backwindow. + :return: A tuple (keys_to_delete, keys_to_store) where keys_to_delete + is a set of `carbonara.SplitKey` to delete and where + keys_to_store is a dictionary of the form {key: aggts} + where key is a `carbonara.SplitKey` and aggts a + `carbonara.AggregatedTimeSerie` to be serialized. + """ # We only need to check for rewrite if driver is not in WRITE_FULL mode # and if we already stored splits once need_rewrite = ( @@ -454,9 +474,7 @@ class StorageDriver(object): key, aggregation.method, metric) keys_and_split_to_store[(key, aggregation)] = split - self._delete_metric_splits(metric, deleted_keys) - self._store_timeserie_splits( - metric, keys_and_split_to_store, oldest_mutable_timestamp) + return (deleted_keys, keys_and_split_to_store) @staticmethod def _delete_metric(metric): @@ -524,7 +542,7 @@ class StorageDriver(object): # sorry. computed_points = {"number": 0} - def _map_add_measures(bound_timeserie): + def _map_compute_splits_operations(bound_timeserie): # NOTE (gordc): bound_timeserie is entire set of # unaggregated measures matching largest # granularity. the following takes only the points @@ -533,6 +551,9 @@ class StorageDriver(object): new_first_block_timestamp = bound_timeserie.first_block_timestamp() computed_points['number'] = len(bound_timeserie) + all_deleted_keys = set() + all_keys_and_splits_to_store = {} + for granularity, aggregations in itertools.groupby( # No need to sort the aggregation, they are already metric.archive_policy.aggregations, @@ -540,14 +561,24 @@ class StorageDriver(object): ts = bound_timeserie.group_serie( granularity, carbonara.round_timestamp( tstamp, granularity)) - - self._add_measures(metric, aggregations, ts, - current_first_block_timestamp, - new_first_block_timestamp) + deleted_keys, keys_and_splits_to_store = ( + self._compute_split_operations( + metric, aggregations, ts, + current_first_block_timestamp, + new_first_block_timestamp) + ) + all_deleted_keys = all_deleted_keys.union(deleted_keys) + all_keys_and_splits_to_store.update(keys_and_splits_to_store) + + self._delete_metric_splits(metric, all_deleted_keys) + self._store_timeserie_splits(metric, all_keys_and_splits_to_store, + new_first_block_timestamp) with utils.StopWatch() as sw: - ts.set_values(measures, - before_truncate_callback=_map_add_measures) + ts.set_values( + measures, + before_truncate_callback=_map_compute_splits_operations + ) number_of_operations = (len(agg_methods) * len(definition)) perf = "" diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 4e1a443e..7b7dc919 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -44,7 +44,7 @@ def datetime64(*args): class TestAggregatedTimeseries(base.BaseTestCase): @staticmethod def _resample_and_merge(ts, agg_dict): - """Helper method that mimics _add_measures workflow.""" + """Helper method that mimics _compute_splits_operations workflow.""" grouped = ts.group_serie(agg_dict['sampling']) existing = agg_dict.get('return') name = agg_dict.get("name") diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py index bc13977a..680140fd 100644 --- a/gnocchi/tests/test_carbonara.py +++ b/gnocchi/tests/test_carbonara.py @@ -398,7 +398,7 @@ class TestAggregatedTimeSerie(base.BaseTestCase): @staticmethod def _resample_and_merge(ts, agg_dict): - """Helper method that mimics _add_measures workflow.""" + """Helper method that mimics _compute_splits_operations workflow.""" grouped = ts.group_serie(agg_dict['sampling']) existing = agg_dict.get('return') agg_dict['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( -- GitLab From 344d0b3aec975cb39cc913ebc191c1eb7b253e66 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 26 Feb 2018 14:41:18 +0100 Subject: [PATCH 1315/1483] storage: merge all _compute_split_operations calls into one This makes sure that all operations are batched, including the _list_split_key that is now done only once for a metric! --- gnocchi/storage/__init__.py | 70 +++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 34 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 7918b1f5..d018f617 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -354,8 +354,7 @@ class StorageDriver(object): return self._store_metric_splits(metric, keys_aggregations_data_offset) - def _compute_split_operations(self, metric, aggregations, - grouped_serie, + def _compute_split_operations(self, metric, aggregations_and_timeseries, previous_oldest_mutable_timestamp, oldest_mutable_timestamp): """Compute changes to a metric and return operations to be done. @@ -364,8 +363,8 @@ class StorageDriver(object): what needs to be deleted and stored for a metric and returns it. :param metric: The metric - :param aggregations: The aggregations to compute for - :param grouped_serie: A grouped timeseries + :param aggregations_and_timeseries: A dictionary of timeseries of the + form {aggregation: timeseries}. :param previous_oldest_mutable_timestamp: The previous oldest storable timestamp from the previous backwindow. @@ -384,18 +383,12 @@ class StorageDriver(object): and previous_oldest_mutable_timestamp is not None ) - timeseries = {} aggregations_needing_list_of_keys = set() - for aggregation in aggregations: - ts = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped_serie, aggregation) - - # Don't do anything if the timeserie is empty + for aggregation, ts in six.iteritems(aggregations_and_timeseries): + # Don't do anything if the timeseries is empty if not ts: continue - # Otherwise, store it for the next iteration - timeseries[aggregation] = ts if aggregation.timespan: oldest_point_to_keep = ts.truncate(aggregation.timespan) @@ -422,7 +415,11 @@ class StorageDriver(object): keys_and_split_to_store = {} deleted_keys = set() - for aggregation, ts in six.iteritems(timeseries): + for aggregation, ts in six.iteritems(aggregations_and_timeseries): + # Don't do anything if the timeseries is empty + if not ts: + continue + oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) # If we listed the keys for the aggregation, that's because we need @@ -551,27 +548,32 @@ class StorageDriver(object): new_first_block_timestamp = bound_timeserie.first_block_timestamp() computed_points['number'] = len(bound_timeserie) - all_deleted_keys = set() - all_keys_and_splits_to_store = {} - - for granularity, aggregations in itertools.groupby( - # No need to sort the aggregation, they are already - metric.archive_policy.aggregations, - ATTRGETTER_GRANULARITY): - ts = bound_timeserie.group_serie( - granularity, carbonara.round_timestamp( - tstamp, granularity)) - deleted_keys, keys_and_splits_to_store = ( - self._compute_split_operations( - metric, aggregations, ts, - current_first_block_timestamp, - new_first_block_timestamp) - ) - all_deleted_keys = all_deleted_keys.union(deleted_keys) - all_keys_and_splits_to_store.update(keys_and_splits_to_store) - - self._delete_metric_splits(metric, all_deleted_keys) - self._store_timeserie_splits(metric, all_keys_and_splits_to_store, + aggregations = metric.archive_policy.aggregations + + grouped_timeseries = { + granularity: bound_timeserie.group_serie( + granularity, + carbonara.round_timestamp(tstamp, granularity)) + for granularity, aggregations + # No need to sort the aggregation, they are already + in itertools.groupby(aggregations, ATTRGETTER_GRANULARITY) + } + + aggregations_and_timeseries = { + aggregation: carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped_timeseries[aggregation.granularity], aggregation) + for aggregation in aggregations + } + + deleted_keys, keys_and_split_to_store = ( + self._compute_split_operations( + metric, aggregations_and_timeseries, + current_first_block_timestamp, + new_first_block_timestamp) + ) + + self._delete_metric_splits(metric, deleted_keys) + self._store_timeserie_splits(metric, keys_and_split_to_store, new_first_block_timestamp) with utils.StopWatch() as sw: -- GitLab From ac730e1d0412d11ced2493bac6b762a593bc2bf9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 10 Mar 2018 13:35:51 +0100 Subject: [PATCH 1316/1483] ceph: change list_keys_to_process to return values This allows to return a dict that can be used by process_measure_for_metrics --- gnocchi/incoming/ceph.py | 36 ++++++++++-------------------------- 1 file changed, 10 insertions(+), 26 deletions(-) diff --git a/gnocchi/incoming/ceph.py b/gnocchi/incoming/ceph.py index d46f34d4..1c368d3d 100644 --- a/gnocchi/incoming/ceph.py +++ b/gnocchi/incoming/ceph.py @@ -140,7 +140,7 @@ class CephStorage(incoming.IncomingDriver): op, str(sack), flag=self.OMAP_READ_FLAGS) except rados.ObjectNotFound: # API have still written nothing - return () + return {} # NOTE(sileht): after reading the libradospy, I'm # not sure that ret will have the correct value # get_omap_vals transforms the C int to python int @@ -150,16 +150,16 @@ class CephStorage(incoming.IncomingDriver): try: ceph.errno_to_exception(ret) except rados.ObjectNotFound: - return () + return {} - return (k for k, v in omaps) + return dict(omaps) def list_metric_with_measures_to_process(self, sack): names = set() marker = "" while True: obj_names = list(self._list_keys_to_process( - sack, marker=marker, limit=self.Q_LIMIT)) + sack, marker=marker, limit=self.Q_LIMIT).keys()) names.update(name.split("_")[1] for name in obj_names) if len(obj_names) < self.Q_LIMIT: break @@ -170,7 +170,7 @@ class CephStorage(incoming.IncomingDriver): def delete_unprocessed_measures_for_metric(self, metric_id): sack = self.sack_for_metric(metric_id) key_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) - keys = tuple(self._list_keys_to_process(sack, key_prefix)) + keys = tuple(self._list_keys_to_process(sack, key_prefix).keys()) if not keys: return @@ -191,32 +191,16 @@ class CephStorage(incoming.IncomingDriver): @contextlib.contextmanager def process_measure_for_metrics(self, metric_ids): measures = {} - processed_keys = defaultdict(list) + processed_keys = {} with rados.ReadOpCtx() as op: for metric_id in metric_ids: sack = self.sack_for_metric(metric_id) - key_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) - omaps, ret = self.ioctx.get_omap_vals(op, "", key_prefix, -1) - self.ioctx.operate_read_op(op, str(sack), - flag=self.OMAP_READ_FLAGS) - # NOTE(sileht): after reading the libradospy, I'm - # not sure that ret will have the correct value - # get_omap_vals transforms the C int to python int - # before operate_read_op is called, I dunno if the int - # content is copied during this transformation or if - # this is a pointer to the C int, I think it's copied... - try: - ceph.errno_to_exception(ret) - except rados.ObjectNotFound: - # Object has been deleted, so this is just a stalled entry - # in the OMAP listing, ignore - continue - + processed_keys[sack] = self._list_keys_to_process( + sack, prefix=self.MEASURE_PREFIX + "_" + str(metric_id)) m = self._make_measures_array() - for k, v in omaps: + for k, v in six.iteritems(processed_keys[sack]): m = numpy.concatenate( (m, self._unserialize_measures(k, v))) - processed_keys[sack].append(k) measures[metric_id] = m @@ -227,6 +211,6 @@ class CephStorage(incoming.IncomingDriver): for sack, keys in six.iteritems(processed_keys): # NOTE(sileht): come on Ceph, no return code # for this operation ?!! - self.ioctx.remove_omap_keys(op, tuple(keys)) + self.ioctx.remove_omap_keys(op, tuple(keys.keys())) self.ioctx.operate_write_op(op, str(sack), flags=self.OMAP_WRITE_FLAGS) -- GitLab From 207ee6ff32a910a582e49efe66ce7906873d9c6d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 5 Mar 2018 12:03:09 +0100 Subject: [PATCH 1317/1483] carbonara: make BoundTimeSerie.set_values return callback result --- gnocchi/carbonara.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index aec2da89..79f63949 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -337,15 +337,25 @@ class BoundTimeSerie(TimeSerie): and self.back_window == other.back_window) def set_values(self, values, before_truncate_callback=None): - # NOTE: values must be sorted when passed in. + """Set the timestamps and values in this timeseries. + + :param values: A sorted timeseries array. + :param before_truncate_callback: A callback function to call before + truncating the BoundTimeSerie to its + maximum size. + :return: None of the return value of before_truncate_callback + """ if self.block_size is not None and len(self.ts) != 0: index = numpy.searchsorted(values['timestamps'], self.first_block_timestamp()) values = values[index:] super(BoundTimeSerie, self).set_values(values) if before_truncate_callback: - before_truncate_callback(self) + return_value = before_truncate_callback(self) + else: + return_value = None self._truncate() + return return_value _SERIALIZATION_TIMESTAMP_VALUE_LEN = struct.calcsize(" Date: Mon, 12 Mar 2018 23:02:54 +0100 Subject: [PATCH 1318/1483] Running wrap-and-sort -bast --- debian/changelog | 6 ++++++ debian/control | 2 +- debian/python3-gnocchi.install | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index 8d224b82..62721d50 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (4.2.0-2) UNRELEASED; urgency=medium + + * Running wrap-and-sort -bast + + -- Ondřej Nový Mon, 12 Mar 2018 23:02:54 +0100 + gnocchi (4.2.0-1) unstable; urgency=medium [ Ondřej Nový ] diff --git a/debian/control b/debian/control index 14c506d4..aa576e45 100644 --- a/debian/control +++ b/debian/control @@ -136,9 +136,9 @@ Section: python Architecture: all Depends: alembic, - python3-cachetools, python3-boto3, python3-botocore (>= 1.5), + python3-cachetools, python3-cotyledon (>= 1.5.0), python3-daiquiri, python3-future (>= 0.15), diff --git a/debian/python3-gnocchi.install b/debian/python3-gnocchi.install index 7cd6c2a9..fb3e5507 100644 --- a/debian/python3-gnocchi.install +++ b/debian/python3-gnocchi.install @@ -1,2 +1,2 @@ -/usr/lib/python3* /usr/bin/* +/usr/lib/python3* -- GitLab From 827ed4c3c8f8ed726692f7b1e4ce37777f906bb6 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 5 Mar 2018 15:20:46 +0100 Subject: [PATCH 1319/1483] storage: postpone splits deletion/storage after BoundTimeSerie update --- gnocchi/storage/__init__.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index d018f617..eaabc579 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -572,15 +572,20 @@ class StorageDriver(object): new_first_block_timestamp) ) - self._delete_metric_splits(metric, deleted_keys) - self._store_timeserie_splits(metric, keys_and_split_to_store, - new_first_block_timestamp) + return (new_first_block_timestamp, + deleted_keys, + keys_and_split_to_store) with utils.StopWatch() as sw: - ts.set_values( - measures, - before_truncate_callback=_map_compute_splits_operations + (new_first_block_timestamp, + deleted_keys, + keys_and_splits_to_store) = ts.set_values( + measures, + before_truncate_callback=_map_compute_splits_operations, ) + self._delete_metric_splits(metric, deleted_keys) + self._store_timeserie_splits(metric, keys_and_splits_to_store, + new_first_block_timestamp) number_of_operations = (len(agg_methods) * len(definition)) perf = "" -- GitLab From 395fc6c425d7b050abaeece981e0cdc5d6414c7e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 13 Mar 2018 15:18:36 +0100 Subject: [PATCH 1320/1483] Don't use pbr to get current version --- gnocchi/rest/api.py | 4 ++-- gnocchi/rest/influxdb.py | 4 ++-- gnocchi/service.py | 7 +++---- gnocchi/tests/test_rest.py | 4 ++-- 4 files changed, 9 insertions(+), 10 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index cee5ef7d..aa0a8e91 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -21,7 +21,6 @@ import operator import uuid import jsonpatch -import pbr.version import pecan from pecan import rest import pyparsing @@ -32,6 +31,7 @@ import tooz import voluptuous import werkzeug.http +import gnocchi from gnocchi import archive_policy from gnocchi import chef from gnocchi.cli import metricd @@ -2209,7 +2209,7 @@ class VersionsController(object): @pecan.expose('json') def index(): return { - "build": pbr.version.VersionInfo('gnocchi').version_string(), + "build": gnocchi.__version__, "versions": [ { "status": "CURRENT", diff --git a/gnocchi/rest/influxdb.py b/gnocchi/rest/influxdb.py index 29464103..58524f54 100644 --- a/gnocchi/rest/influxdb.py +++ b/gnocchi/rest/influxdb.py @@ -16,6 +16,7 @@ import collections import time +import gnocchi from gnocchi import incoming from gnocchi import indexer from gnocchi.rest import api @@ -23,7 +24,6 @@ from gnocchi import utils import daiquiri import numpy -import pbr.version import pecan from pecan import rest import pyparsing @@ -116,7 +116,7 @@ class InfluxDBController(rest.RestController): @pecan.expose() def ping(self): pecan.response.headers['X-Influxdb-Version'] = ( - "Gnocchi " + pbr.version.VersionInfo('gnocchi').version_string() + "Gnocchi " + gnocchi.__version__ ) @pecan.expose('json') diff --git a/gnocchi/service.py b/gnocchi/service.py index ce60d08e..b33d724c 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -19,9 +19,9 @@ import logging import daiquiri from oslo_config import cfg from oslo_db import options as db_options -import pbr.version from six.moves.urllib import parse as urlparse +import gnocchi from gnocchi import archive_policy from gnocchi import opts from gnocchi import utils @@ -48,10 +48,9 @@ def prepare_service(args=None, conf=None, conf.set_default("workers", workers, group="metricd") conf.set_default("parallel_operations", workers) - version = pbr.version.VersionInfo('gnocchi').version_string() conf(args, project='gnocchi', validate_default_values=True, default_config_files=default_config_files, - version=version) + version=gnocchi.__version__) utils.parallel_map.MAX_WORKERS = conf.parallel_operations @@ -101,7 +100,7 @@ def prepare_service(args=None, conf=None, conf.set_default("coordination_url", urlparse.urlunparse(parsed)) - LOG.info("Gnocchi version %s", version) + LOG.info("Gnocchi version %s", gnocchi.__version__) conf.log_opt_values(LOG, logging.DEBUG) return conf diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 4a12ab79..d5c6df29 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -27,12 +27,12 @@ import fixtures import iso8601 from keystonemiddleware import fixture as ksm_fixture import mock -import pbr.version import six import testscenarios from testtools import testcase import webtest +import gnocchi from gnocchi import archive_policy from gnocchi.rest import api from gnocchi.rest import app @@ -217,7 +217,7 @@ class RootTest(RestTest): r = self.app.get("/") self.assertEqual( json.loads(r.text)['build'], - pbr.version.VersionInfo('gnocchi').version_string()) + gnocchi.__version__) def test_status(self): with self.app.use_admin_user(): -- GitLab From ea9531da7387fb38d72bc863134ca0ce863f8375 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 26 Feb 2018 16:23:01 +0100 Subject: [PATCH 1321/1483] incoming: introduce process_measures_for_sack This adds a new method process_measures_for_sack to incoming storage driver. It allows to read an entire sack for new measure rather than individual metrics. This avoids doing 2 listing to process new metrics and only does one to load the measures. The process_new_measures(metrics) is kept for the refresh_metric() use case for now. Some refactoring might be possible after this patch. The S3 storage driver is a bit modified to store the incoming measures in // rather than / so it's easier to list incoming sacks and metrics. --- gnocchi/chef.py | 29 ++++++++++++++ gnocchi/cli/metricd.py | 4 +- gnocchi/common/redis.py | 1 + gnocchi/incoming/__init__.py | 4 ++ gnocchi/incoming/ceph.py | 31 ++++++++++++++ gnocchi/incoming/file.py | 30 ++++++++++++++ gnocchi/incoming/redis.py | 46 +++++++++++++++++++++ gnocchi/incoming/s3.py | 78 ++++++++++++++++++++++-------------- gnocchi/incoming/swift.py | 27 +++++++++++++ gnocchi/tests/base.py | 6 ++- gnocchi/tests/test_chef.py | 4 +- 11 files changed, 223 insertions(+), 37 deletions(-) diff --git a/gnocchi/chef.py b/gnocchi/chef.py index 69840e06..3618a01c 100644 --- a/gnocchi/chef.py +++ b/gnocchi/chef.py @@ -137,6 +137,35 @@ class Chef(object): raise LOG.error("Error processing new measures", exc_info=True) + def process_new_measures_for_sack(self, sack, sync=False): + """Process added measures in background. + + :param sack: The sack to process new measures for. + :param sync: If True, raise any issue immediately otherwise just log it + :return: The number of metrics processed. + """ + # NOTE(gordc): must lock at sack level + LOG.debug("Processing measures for sack %s", sack) + try: + with self.incoming.process_measures_for_sack(sack) as measures: + # process only active metrics. deleted metrics with unprocessed + # measures will be skipped until cleaned by janitor. + metrics = self.index.list_metrics( + attribute_filter={ + "in": {"id": measures.keys()} + }) + for metric in metrics: + self.storage.compute_and_store_timeseries( + metric, measures[metric.id] + ) + LOG.debug("Measures for metric %s processed", metric) + return len(measures) + except Exception: + if sync: + raise + LOG.error("Error processing new measures", exc_info=True) + return 0 + def get_sack_lock(self, sack): # FIXME(jd) Some tooz drivers have a limitation on lock name length # (e.g. MySQL). This should be handled by tooz, but it's not yet. diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index 7bb2f771..6f7a19ff 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -237,9 +237,7 @@ class MetricProcessor(MetricProcessBase): continue try: - metrics = self.incoming.list_metric_with_measures_to_process(s) - m_count += len(metrics) - self.chef.process_new_measures(metrics) + m_count += self.chef.process_new_measures_for_sack(s) s_count += 1 self.incoming.finish_sack_processing(s) self.sacks_with_measures_to_process.discard(s) diff --git a/gnocchi/common/redis.py b/gnocchi/common/redis.py index f187b4ca..8f9ceeb8 100644 --- a/gnocchi/common/redis.py +++ b/gnocchi/common/redis.py @@ -30,6 +30,7 @@ except ImportError: from gnocchi import utils +SEP_S = ':' SEP = b':' CLIENT_ARGS = frozenset([ diff --git a/gnocchi/incoming/__init__.py b/gnocchi/incoming/__init__.py index 436e8ac4..795e50eb 100644 --- a/gnocchi/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -211,6 +211,10 @@ class IncomingDriver(object): def process_measure_for_metrics(metric_id): raise exceptions.NotImplementedError + @staticmethod + def process_measures_for_sack(sack): + raise exceptions.NotImplementedError + @staticmethod def has_unprocessed(metric_id): raise exceptions.NotImplementedError diff --git a/gnocchi/incoming/ceph.py b/gnocchi/incoming/ceph.py index 1c368d3d..1842e1a2 100644 --- a/gnocchi/incoming/ceph.py +++ b/gnocchi/incoming/ceph.py @@ -13,6 +13,7 @@ # under the License. from collections import defaultdict import contextlib +import daiquiri import datetime import json import uuid @@ -25,6 +26,8 @@ from gnocchi import incoming rados = ceph.rados +LOG = daiquiri.getLogger(__name__) + class CephStorage(incoming.IncomingDriver): @@ -214,3 +217,31 @@ class CephStorage(incoming.IncomingDriver): self.ioctx.remove_omap_keys(op, tuple(keys.keys())) self.ioctx.operate_write_op(op, str(sack), flags=self.OMAP_WRITE_FLAGS) + + @contextlib.contextmanager + def process_measures_for_sack(self, sack): + measures = defaultdict(self._make_measures_array) + omaps = self._list_keys_to_process( + sack, prefix=self.MEASURE_PREFIX + "_") + for k, v in six.iteritems(omaps): + try: + metric_id = uuid.UUID(k.split("_")[1]) + except (ValueError, IndexError): + LOG.warning("Unable to parse measure object name %s", + k) + continue + measures[metric_id] = numpy.concatenate( + (measures[metric_id], self._unserialize_measures(k, v)) + ) + + yield measures + + # Now clean omap + processed_keys = tuple(omaps.keys()) + if processed_keys: + with rados.WriteOpCtx() as op: + # NOTE(sileht): come on Ceph, no return code + # for this operation ?!! + self.ioctx.remove_omap_keys(op, tuple(processed_keys)) + self.ioctx.operate_write_op(op, str(sack), + flags=self.OMAP_WRITE_FLAGS) diff --git a/gnocchi/incoming/file.py b/gnocchi/incoming/file.py index 40b81255..89432d5e 100644 --- a/gnocchi/incoming/file.py +++ b/gnocchi/incoming/file.py @@ -20,12 +20,15 @@ import shutil import tempfile import uuid +import daiquiri import numpy import six from gnocchi import incoming from gnocchi import utils +LOG = daiquiri.getLogger(__name__) + class FileStorage(incoming.IncomingDriver): def __init__(self, conf, greedy=True): @@ -179,3 +182,30 @@ class FileStorage(incoming.IncomingDriver): for metric_id, files in six.iteritems(processed_files): self._delete_measures_files_for_metric(metric_id, files) + + @contextlib.contextmanager + def process_measures_for_sack(self, sack): + measures = {} + processed_files = {} + for metric_id in self._list_target(self._sack_path(sack)): + try: + metric_id = uuid.UUID(metric_id) + except ValueError: + LOG.error("Unable to parse %s as an UUID, ignoring metric", + metric_id) + continue + files = self._list_measures_container_for_metric_str( + sack, metric_id) + processed_files[metric_id] = files + m = self._make_measures_array() + for f in files: + abspath = self._build_measure_path(metric_id, f) + with open(abspath, "rb") as e: + m = numpy.concatenate(( + m, self._unserialize_measures(f, e.read()))) + measures[metric_id] = m + + yield measures + + for metric_id, files in six.iteritems(processed_files): + self._delete_measures_files_for_metric(metric_id, files) diff --git a/gnocchi/incoming/redis.py b/gnocchi/incoming/redis.py index a163feb8..55c47b64 100644 --- a/gnocchi/incoming/redis.py +++ b/gnocchi/incoming/redis.py @@ -14,13 +14,18 @@ # License for the specific language governing permissions and limitations # under the License. import contextlib +import uuid +import daiquiri import six from gnocchi.common import redis from gnocchi import incoming +LOG = daiquiri.getLogger(__name__) + + class RedisStorage(incoming.IncomingDriver): _SCRIPTS = { @@ -30,6 +35,23 @@ local llen = redis.call("LLEN", KEYS[1]) if llen > 0 then llen = llen - 1 end return {llen, table.concat(redis.call("LRANGE", KEYS[1], 0, llen), "")} """, + "process_measures_for_sack": """ +local results = {} +local metric_id_extractor = "[^%s]*%s([^%s]*)" +local metric_with_measures = redis.call("KEYS", KEYS[1] .. "%s*") +for i, sack_metric in ipairs(metric_with_measures) do + local llen = redis.call("LLEN", sack_metric) + local metric_id = sack_metric:gmatch(metric_id_extractor)() + -- lrange is inclusive on both ends, decrease to grab exactly n items + if llen > 0 then llen = llen - 1 end + results[#results + 1] = { + metric_id, + llen, + table.concat(redis.call("LRANGE", sack_metric, 0, llen), "") + } +end +return results +""" % (redis.SEP_S, redis.SEP_S, redis.SEP_S, redis.SEP_S), } def __init__(self, conf, greedy=True): @@ -135,6 +157,30 @@ return {llen, table.concat(redis.call("LRANGE", KEYS[1], 0, llen), "")} pipe.ltrim(key, item_len + 1, -1) pipe.execute() + @contextlib.contextmanager + def process_measures_for_sack(self, sack): + results = self._scripts['process_measures_for_sack'](keys=[str(sack)]) + + measures = {} + for metric_id, item_len, data in results: + try: + metric_id = uuid.UUID(metric_id.decode()) + except ValueError: + LOG.error("Unable to parse metric id %s, ignoring", + metric_id) + continue + measures[metric_id] = self._unserialize_measures(metric_id, data) + + yield measures + + pipe = self._client.pipeline() + for metric_id, item_len, data in results: + key = self._build_measure_path_with_sack( + metric_id.decode(), str(sack)) + # ltrim is inclusive, bump 1 to remove up to and including nth item + pipe.ltrim(key, item_len + 1, -1) + pipe.execute() + def iter_on_sacks_to_process(self): self._client.config_set("notify-keyspace-events", "K$") p = self._client.pubsub() diff --git a/gnocchi/incoming/s3.py b/gnocchi/incoming/s3.py index 933ffd72..2c0ed6e1 100644 --- a/gnocchi/incoming/s3.py +++ b/gnocchi/incoming/s3.py @@ -15,6 +15,7 @@ # under the License. from collections import defaultdict import contextlib +import daiquiri import datetime import json import uuid @@ -27,11 +28,13 @@ from gnocchi import incoming boto3 = s3.boto3 botocore = s3.botocore +LOG = daiquiri.getLogger(__name__) + class S3Storage(incoming.IncomingDriver): # NOTE(gordc): override to follow s3 partitioning logic - SACK_NAME_FORMAT = "{number}-{total}/" + SACK_NAME_FORMAT = "{number}-{total}" def __init__(self, conf, greedy=True): super(S3Storage, self).__init__(conf) @@ -78,9 +81,9 @@ class S3Storage(incoming.IncomingDriver): now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") self.s3.put_object( Bucket=self._bucket_name_measures, - Key=(str(self.sack_for_metric(metric_id)) - + str(metric_id) + "/" - + str(uuid.uuid4()) + now), + Key="/".join((str(self.sack_for_metric(metric_id)), + str(metric_id), + str(uuid.uuid4()) + now)), Body=data) def _build_report(self, details): @@ -103,48 +106,40 @@ class S3Storage(incoming.IncomingDriver): return (len(metric_details), sum(metric_details.values()), metric_details if details else None) - def list_metric_with_measures_to_process(self, sack): - limit = 1000 # 1000 is the default anyway - metrics = set() + def _list_files(self, path_items, **kwargs): response = {} # Handle pagination while response.get('IsTruncated', True): if 'NextContinuationToken' in response: - kwargs = { - 'ContinuationToken': response['NextContinuationToken'] - } + kwargs['ContinuationToken'] = response['NextContinuationToken'] else: - kwargs = {} + try: + del kwargs['ContinuationToken'] + except KeyError: + pass response = self.s3.list_objects_v2( Bucket=self._bucket_name_measures, - Prefix=str(sack), - Delimiter="/", - MaxKeys=limit, + Prefix="/".join(path_items) + "/", **kwargs) + yield response + + def list_metric_with_measures_to_process(self, sack): + metrics = set() + for response in self._list_files((str(sack),), Delimiter="/"): for p in response.get('CommonPrefixes', ()): metrics.add(p['Prefix'].split('/', 2)[1]) return metrics - def _list_measure_files_for_metric(self, sack, metric_id): + def _list_measure_files(self, path_items): files = set() - response = {} - while response.get('IsTruncated', True): - if 'NextContinuationToken' in response: - kwargs = { - 'ContinuationToken': response['NextContinuationToken'] - } - else: - kwargs = {} - response = self.s3.list_objects_v2( - Bucket=self._bucket_name_measures, - Prefix=(str(sack) + str(metric_id) + "/"), - **kwargs) - + for response in self._list_files(path_items): for c in response.get('Contents', ()): files.add(c['Key']) - return files + def _list_measure_files_for_metric(self, sack, metric_id): + return self._list_measure_files((str(sack), str(metric_id))) + def delete_unprocessed_measures_for_metric(self, metric_id): sack = self.sack_for_metric(metric_id) files = self._list_measure_files_for_metric(sack, metric_id) @@ -175,3 +170,28 @@ class S3Storage(incoming.IncomingDriver): # Now clean objects s3.bulk_delete(self.s3, self._bucket_name_measures, all_files) + + @contextlib.contextmanager + def process_measures_for_sack(self, sack): + measures = defaultdict(self._make_measures_array) + files = self._list_measure_files((str(sack),)) + for f in files: + try: + sack, metric_id, measure_id = f.split("/") + metric_id = uuid.UUID(metric_id) + except ValueError: + LOG.warning("Unable to parse measure file name %s", f) + continue + + response = self.s3.get_object( + Bucket=self._bucket_name_measures, + Key=f) + measures[metric_id] = numpy.concatenate(( + measures[metric_id], + self._unserialize_measures(f, response['Body'].read()) + )) + + yield measures + + # Now clean objects + s3.bulk_delete(self.s3, self._bucket_name_measures, files) diff --git a/gnocchi/incoming/swift.py b/gnocchi/incoming/swift.py index f38ea35b..06500f9d 100644 --- a/gnocchi/incoming/swift.py +++ b/gnocchi/incoming/swift.py @@ -13,6 +13,7 @@ # under the License. from collections import defaultdict import contextlib +import daiquiri import datetime import json import uuid @@ -25,6 +26,8 @@ from gnocchi import incoming swclient = swift.swclient swift_utils = swift.swift_utils +LOG = daiquiri.getLogger(__name__) + class SwiftStorage(incoming.IncomingDriver): def __init__(self, conf, greedy=True): @@ -117,3 +120,27 @@ class SwiftStorage(incoming.IncomingDriver): # Now clean objects for sack_name, files in six.iteritems(all_files): swift.bulk_delete(self.swift, sack_name, files) + + @contextlib.contextmanager + def process_measures_for_sack(self, sack): + measures = defaultdict(self._make_measures_array) + sack_name = str(sack) + headers, files = self.swift.get_container(sack_name, full_listing=True) + for f in files: + try: + metric_id, random_id = f['name'].split("/") + metric_id = uuid.UUID(metric_id) + except ValueError: + LOG.warning("Unable to parse measure file name %s", f) + continue + measures[metric_id] = self._array_concatenate([ + measures[metric_id], + self._unserialize_measures( + metric_id, + self.swift.get_object(sack_name, f['name'])[1], + ) + ]) + + yield measures + + swift.bulk_delete(self.swift, sack_name, files) diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 3d46dbe9..f0d66b4e 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -389,5 +389,7 @@ class TestCase(BaseTestCase): def trigger_processing(self, metrics=None): if metrics is None: - metrics = [str(self.metric.id)] - self.chef.process_new_measures(metrics, sync=True) + self.chef.process_new_measures_for_sack( + self.incoming.sack_for_metric(self.metric.id), sync=True) + else: + self.chef.process_new_measures(metrics, sync=True) diff --git a/gnocchi/tests/test_chef.py b/gnocchi/tests/test_chef.py index 0370747a..0fd7a483 100644 --- a/gnocchi/tests/test_chef.py +++ b/gnocchi/tests/test_chef.py @@ -41,10 +41,8 @@ class TestChef(base.TestCase): self.index.delete_metric(self.metric.id) self.trigger_processing() __, __, details = self.incoming._build_report(True) - self.assertIn(str(self.metric.id), details) - self.chef.expunge_metrics(sync=True) - __, __, details = self.incoming._build_report(True) self.assertNotIn(str(self.metric.id), details) + self.chef.expunge_metrics(sync=True) def test_delete_expunge_metric(self): self.incoming.add_measures(self.metric.id, [ -- GitLab From 5778cfb10132e1fc6b62e9634594bee9f367504e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Sat, 3 Mar 2018 18:55:34 +0100 Subject: [PATCH 1322/1483] chef: lock sack when using process_new_measures_for_sack This makes it safer to call it without taking care of the lock externally. --- gnocchi/chef.py | 21 +++++++++++++++------ gnocchi/cli/metricd.py | 13 ++++--------- gnocchi/rest/api.py | 11 +++++++---- 3 files changed, 26 insertions(+), 19 deletions(-) diff --git a/gnocchi/chef.py b/gnocchi/chef.py index 3618a01c..65ac572a 100644 --- a/gnocchi/chef.py +++ b/gnocchi/chef.py @@ -29,8 +29,11 @@ ITEMGETTER_1 = operator.itemgetter(1) LOG = daiquiri.getLogger(__name__) -class SackLockTimeoutError(Exception): - pass +class SackAlreadyLocked(Exception): + def __init__(self, sack): + self.sack = sack + super(SackAlreadyLocked, self).__init__( + "Sack %s already locked" % sack) class Chef(object): @@ -103,9 +106,7 @@ class Chef(object): s = self.incoming.sack_for_metric(metric.id) lock = self.get_sack_lock(s) if not lock.acquire(blocking=timeout): - raise SackLockTimeoutError( - 'Unable to refresh metric: %s. Metric is locked. ' - 'Please try again.' % metric.id) + raise SackAlreadyLocked(s) try: self.process_new_measures([str(metric.id)]) finally: @@ -140,11 +141,17 @@ class Chef(object): def process_new_measures_for_sack(self, sack, sync=False): """Process added measures in background. + Lock a sack and try to process measures from it. If the sack cannot be + locked, the method will raise `SackAlreadyLocked`. + :param sack: The sack to process new measures for. :param sync: If True, raise any issue immediately otherwise just log it :return: The number of metrics processed. + """ - # NOTE(gordc): must lock at sack level + lock = self.get_sack_lock(sack) + if not lock.acquire(blocking=False): + raise SackAlreadyLocked(sack) LOG.debug("Processing measures for sack %s", sack) try: with self.incoming.process_measures_for_sack(sack) as measures: @@ -165,6 +172,8 @@ class Chef(object): raise LOG.error("Error processing new measures", exc_info=True) return 0 + finally: + lock.release() def get_sack_lock(self, sack): # FIXME(jd) Some tooz drivers have a limitation on lock name length diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index 6f7a19ff..6e1155da 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -230,22 +230,17 @@ class MetricProcessor(MetricProcessBase): sacks = (self.sacks_with_measures_to_process.copy() or self._get_sacks_to_process()) for s in sacks: - # TODO(gordc): support delay release lock so we don't - # process a sack right after another process - lock = self.chef.get_sack_lock(s) - if not lock.acquire(blocking=False): - continue - try: - m_count += self.chef.process_new_measures_for_sack(s) + try: + m_count += self.chef.process_new_measures_for_sack(s) + except chef.SackAlreadyLocked: + continue s_count += 1 self.incoming.finish_sack_processing(s) self.sacks_with_measures_to_process.discard(s) except Exception: LOG.error("Unexpected error processing assigned job", exc_info=True) - finally: - lock.release() LOG.debug("%d metrics processed from %d sacks", m_count, s_count) if sacks == self._get_sacks_to_process(): # We just did a full scan of all sacks, reset the timer diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index aa0a8e91..c9f9c261 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -532,8 +532,9 @@ class MetricController(rest.RestController): pecan.request.chef.refresh_metric( self.metric, pecan.request.conf.api.operation_timeout) - except chef.SackLockTimeoutError as e: - abort(503, six.text_type(e)) + except chef.SackAlreadyLocked: + abort(503, 'Unable to refresh metric: %s. Metric is locked. ' + 'Please try again.' % self.metric.id) try: return pecan.request.storage.get_measures( self.metric, aggregations, start, stop, resample)[aggregation] @@ -1903,8 +1904,10 @@ class AggregationController(rest.RestController): try: pecan.request.chef.refresh_metric( m, pecan.request.conf.api.operation_timeout) - except chef.SackLockTimeoutError as e: - abort(503, six.text_type(e)) + except chef.SackAlreadyLocked: + abort(503, 'Unable to refresh metric: %s. ' + 'Metric is locked. ' + 'Please try again.' % m.id) if number_of_metrics == 1: # NOTE(sileht): don't do the aggregation if we only have one # metric -- GitLab From f7d992304430bc225fed5d7c4951f449295f9fd6 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 16 Mar 2018 17:48:34 +0100 Subject: [PATCH 1323/1483] tests: fix conflicting user_id UUID in test This user_id is also used in the resource-aggregation scenario, which might conflict when tests are run at the same time. --- .../gabbits/aggregates-with-resources.yaml | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml index cb8652e0..935213e3 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml @@ -37,7 +37,7 @@ tests: POST: /v1/resource/generic data: id: 1ed9c196-4c9f-4ba8-a5be-c9a71a82aac4 - user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 + user_id: A50F549C-1F1C-4888-A71A-2C5473CCCEC1 project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 metrics: cpu.util: @@ -61,7 +61,7 @@ tests: POST: /v1/resource/generic data: id: 2447CD7E-48A6-4C50-A991-6677CC0D00E6 - user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 + user_id: A50F549C-1F1C-4888-A71A-2C5473CCCEC1 project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 metrics: cpu.util: @@ -81,7 +81,7 @@ tests: POST: /v1/resource/generic data: id: 33333BC5-5948-4F29-B7DF-7DE607660452 - user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 + user_id: A50F549C-1F1C-4888-A71A-2C5473CCCEC1 project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 metrics: cpu.util: @@ -124,7 +124,7 @@ tests: POST: /v1/aggregates?details=true data: resource_type: generic - search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" + search: "user_id = 'A50F549C-1F1C-4888-A71A-2C5473CCCEC1'" operations: "(aggregate mean (metric cpu.util mean))" poll: count: 10 @@ -143,7 +143,7 @@ tests: POST: /v1/aggregates?details=true data: resource_type: generic - search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" + search: "user_id = 'A50F549C-1F1C-4888-A71A-2C5473CCCEC1'" operations: "(metric (cpu.util mean) (cpu.idle mean))" poll: count: 10 @@ -171,7 +171,7 @@ tests: POST: /v1/aggregates?details=true data: resource_type: generic - search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" + search: "user_id = 'A50F549C-1F1C-4888-A71A-2C5473CCCEC1'" operations: "(metric cpu.util mean)" poll: count: 10 @@ -198,7 +198,7 @@ tests: POST: /v1/aggregates?details=true data: resource_type: generic - search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" + search: "user_id = 'A50F549C-1F1C-4888-A71A-2C5473CCCEC1'" operations: "(metric (cpu.* mean) (*way mean))" poll: count: 10 @@ -227,7 +227,7 @@ tests: POST: /v1/aggregates?details=true data: resource_type: generic - search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" + search: "user_id = 'A50F549C-1F1C-4888-A71A-2C5473CCCEC1'" operations: "(metric cpu.* mean)" poll: count: 10 @@ -272,7 +272,7 @@ tests: POST: /v1/aggregates?groupby=project_id&groupby=user_id&details=true data: resource_type: generic - search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" + search: "user_id = 'A50F549C-1F1C-4888-A71A-2C5473CCCEC1'" operations: "(aggregate mean (metric cpu.util mean))" response_json_paths: $.`len`: 2 @@ -284,7 +284,7 @@ tests: - ['2015-03-06T14:33:57+00:00', 1.0, 33.05] - ['2015-03-06T14:34:12+00:00', 1.0, 10.0] $[0].group: - user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 + user_id: A50F549C-1F1C-4888-A71A-2C5473CCCEC1 project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 $[1].measures.references.`len`: 1 $[1].measures.references[/id].[0]: $HISTORY['list resources'].$RESPONSE['$[2]'] @@ -293,7 +293,7 @@ tests: - ['2015-03-06T14:33:57+00:00', 1.0, 230.0] - ['2015-03-06T14:34:12+00:00', 1.0, 45.41] $[1].group: - user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 + user_id: A50F549C-1F1C-4888-A71A-2C5473CCCEC1 project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 # Negative tests @@ -322,7 +322,7 @@ tests: authorization: "basic Zm9vYmFyOg==" data: resource_type: generic - search: "user_id = '6c865dd0-7945-4e08-8b27-d0d7f1c2b667'" + search: "user_id = 'A50F549C-1F1C-4888-A71A-2C5473CCCEC1'" operations: "(aggregate mean (metric (notexists mean) (foobar mean)))" status: 400 response_json_paths: -- GitLab From 4cacc5354d4f88d54bb2965e2ac3443e5ef45036 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 5 Mar 2018 16:11:13 +0100 Subject: [PATCH 1324/1483] chef/storage: process all metrics from the sack in one single batch This allows in this first step to retrieve and store the unaggregated timeseries all at once. --- gnocchi/chef.py | 23 ++-- gnocchi/storage/__init__.py | 228 ++++++++++++++++++------------------ 2 files changed, 130 insertions(+), 121 deletions(-) diff --git a/gnocchi/chef.py b/gnocchi/chef.py index 65ac572a..359223d9 100644 --- a/gnocchi/chef.py +++ b/gnocchi/chef.py @@ -128,11 +128,13 @@ class Chef(object): LOG.debug("Processing measures for %s", metrics) with self.incoming.process_measure_for_metrics( [m.id for m in metrics]) as metrics_and_measures: - for metric, measures in six.iteritems(metrics_and_measures): - self.storage.compute_and_store_timeseries( - metrics_by_id[metric], measures - ) - LOG.debug("Measures for metric %s processed", metrics) + self.storage.add_measures_to_metrics({ + metrics_by_id[metric]: measures + for metric, measures + in six.iteritems(metrics_and_measures) + }) + LOG.debug("Measures for %d metrics processed", + len(metrics)) except Exception: if sync: raise @@ -161,11 +163,12 @@ class Chef(object): attribute_filter={ "in": {"id": measures.keys()} }) - for metric in metrics: - self.storage.compute_and_store_timeseries( - metric, measures[metric.id] - ) - LOG.debug("Measures for metric %s processed", metric) + self.storage.add_measures_to_metrics({ + metric: measures[metric.id] + for metric in metrics + }) + LOG.debug("Measures for %d metrics processed", + len(metrics)) return len(measures) except Exception: if sync: diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index eaabc579..9df33f6b 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -487,120 +487,126 @@ class StorageDriver(object): ((metric, key, aggregation) for key, aggregation in keys_and_aggregations)) - def compute_and_store_timeseries(self, metric, measures): - # NOTE(mnaser): The metric could have been handled by - # another worker, ignore if no measures. - if len(measures) == 0: - LOG.debug("Skipping %s (already processed)", metric) - return - - measures = numpy.sort(measures, order='timestamps') - - agg_methods = list(metric.archive_policy.aggregation_methods) - block_size = metric.archive_policy.max_block_size - back_window = metric.archive_policy.back_window - definition = metric.archive_policy.definition - # NOTE(sileht): We keep one more blocks to calculate rate of change - # correctly - if any(filter(lambda x: x.startswith("rate:"), agg_methods)): - back_window += 1 + def add_measures_to_metrics(self, metrics_and_measures): + """Update a metric with a new measures, computing new aggregations. + :param metrics_and_measures: A dict there keys are `storage.Metric` + objects and values are timeseries array of + the new measures. + """ with utils.StopWatch() as sw: - raw_measures = ( - self._get_or_create_unaggregated_timeseries( - [metric])[metric] - ) - LOG.debug("Retrieve unaggregated measures for %s in %.2fs", - metric.id, sw.elapsed()) - - if raw_measures is None: - ts = None - else: - try: - ts = carbonara.BoundTimeSerie.unserialize( - raw_measures, block_size, back_window) - except carbonara.InvalidData: - LOG.error("Data corruption detected for %s " - "unaggregated timeserie, creating a new one", - metric.id) + raw_measures = self._get_or_create_unaggregated_timeseries( + metrics_and_measures.keys()) + LOG.debug("Retrieve unaggregated measures for %d metric in %.2fs", + len(metrics_and_measures), sw.elapsed()) + + new_boundts = [] + + for metric, measures in six.iteritems(metrics_and_measures): + measures = numpy.sort(measures, order='timestamps') + + agg_methods = list(metric.archive_policy.aggregation_methods) + block_size = metric.archive_policy.max_block_size + back_window = metric.archive_policy.back_window + definition = metric.archive_policy.definition + # NOTE(sileht): We keep one more blocks to calculate rate of change + # correctly + if any(filter(lambda x: x.startswith("rate:"), agg_methods)): + back_window += 1 + + if raw_measures[metric] is None: ts = None - - if ts is None: - # This is the first time we treat measures for this - # metric, or data are corrupted, create a new one - ts = carbonara.BoundTimeSerie(block_size=block_size, - back_window=back_window) - current_first_block_timestamp = None - else: - current_first_block_timestamp = ts.first_block_timestamp() - - # NOTE(jd) This is Python where you need such - # hack to pass a variable around a closure, - # sorry. - computed_points = {"number": 0} - - def _map_compute_splits_operations(bound_timeserie): - # NOTE (gordc): bound_timeserie is entire set of - # unaggregated measures matching largest - # granularity. the following takes only the points - # affected by new measures for specific granularity - tstamp = max(bound_timeserie.first, measures['timestamps'][0]) - new_first_block_timestamp = bound_timeserie.first_block_timestamp() - computed_points['number'] = len(bound_timeserie) - - aggregations = metric.archive_policy.aggregations - - grouped_timeseries = { - granularity: bound_timeserie.group_serie( - granularity, - carbonara.round_timestamp(tstamp, granularity)) - for granularity, aggregations - # No need to sort the aggregation, they are already - in itertools.groupby(aggregations, ATTRGETTER_GRANULARITY) - } - - aggregations_and_timeseries = { - aggregation: carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped_timeseries[aggregation.granularity], aggregation) - for aggregation in aggregations - } - - deleted_keys, keys_and_split_to_store = ( - self._compute_split_operations( - metric, aggregations_and_timeseries, - current_first_block_timestamp, - new_first_block_timestamp) - ) - - return (new_first_block_timestamp, - deleted_keys, - keys_and_split_to_store) - - with utils.StopWatch() as sw: - (new_first_block_timestamp, - deleted_keys, - keys_and_splits_to_store) = ts.set_values( - measures, - before_truncate_callback=_map_compute_splits_operations, - ) - self._delete_metric_splits(metric, deleted_keys) - self._store_timeserie_splits(metric, keys_and_splits_to_store, - new_first_block_timestamp) - - number_of_operations = (len(agg_methods) * len(definition)) - perf = "" - elapsed = sw.elapsed() - if elapsed > 0: - perf = " (%d points/s, %d measures/s)" % ( - ((number_of_operations * computed_points['number']) / - elapsed), - ((number_of_operations * len(measures)) / elapsed) - ) - LOG.debug("Computed new metric %s with %d new measures " - "in %.2f seconds%s", - metric.id, len(measures), elapsed, perf) - - self._store_unaggregated_timeseries([(metric, ts.serialize())]) + else: + try: + ts = carbonara.BoundTimeSerie.unserialize( + raw_measures[metric], block_size, back_window) + except carbonara.InvalidData: + LOG.error("Data corruption detected for %s " + "unaggregated timeserie, creating a new one", + metric.id) + ts = None + + if ts is None: + # This is the first time we treat measures for this + # metric, or data are corrupted, create a new one + ts = carbonara.BoundTimeSerie(block_size=block_size, + back_window=back_window) + current_first_block_timestamp = None + else: + current_first_block_timestamp = ts.first_block_timestamp() + + # NOTE(jd) This is Python where you need such + # hack to pass a variable around a closure, + # sorry. + computed_points = {"number": 0} + + def _map_compute_splits_operations(bound_timeserie): + # NOTE (gordc): bound_timeserie is entire set of + # unaggregated measures matching largest + # granularity. the following takes only the points + # affected by new measures for specific granularity + tstamp = max(bound_timeserie.first, measures['timestamps'][0]) + new_first_block_timestamp = ( + bound_timeserie.first_block_timestamp() + ) + computed_points['number'] = len(bound_timeserie) + + aggregations = metric.archive_policy.aggregations + + grouped_timeseries = { + granularity: bound_timeserie.group_serie( + granularity, + carbonara.round_timestamp(tstamp, granularity)) + for granularity, aggregations + # No need to sort the aggregation, they are already + in itertools.groupby(aggregations, ATTRGETTER_GRANULARITY) + } + + aggregations_and_timeseries = { + aggregation: + carbonara.AggregatedTimeSerie.from_grouped_serie( + grouped_timeseries[aggregation.granularity], + aggregation) + for aggregation in aggregations + } + + deleted_keys, keys_and_split_to_store = ( + self._compute_split_operations( + metric, aggregations_and_timeseries, + current_first_block_timestamp, + new_first_block_timestamp) + ) + + return (new_first_block_timestamp, + deleted_keys, + keys_and_split_to_store) + + with utils.StopWatch() as sw: + (new_first_block_timestamp, + deleted_keys, + keys_and_splits_to_store) = ts.set_values( + measures, + before_truncate_callback=_map_compute_splits_operations) + self._delete_metric_splits(metric, deleted_keys) + self._store_timeserie_splits(metric, keys_and_splits_to_store, + new_first_block_timestamp) + + new_boundts.append((metric, ts.serialize())) + + number_of_operations = (len(agg_methods) * len(definition)) + perf = "" + elapsed = sw.elapsed() + if elapsed > 0: + perf = " (%d points/s, %d measures/s)" % ( + ((number_of_operations * computed_points['number']) / + elapsed), + ((number_of_operations * len(measures)) / elapsed) + ) + LOG.debug("Computed new metric %s with %d new measures " + "in %.2f seconds%s", + metric.id, len(measures), elapsed, perf) + + self._store_unaggregated_timeseries(new_boundts) def find_measure(self, metric, predicate, granularity, aggregation="mean", from_timestamp=None, to_timestamp=None): -- GitLab From 3ab20112775ccc73120a5dad98982eb050d14f8f Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 20 Mar 2018 06:45:29 +0100 Subject: [PATCH 1325/1483] git: ignore setuptools_scm 1.16.0/1/2 SOURCES.txt generated by this version is wrong and broke bdist* commands. This change ignores these versions. --- setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 74d11134..03534032 100755 --- a/setup.py +++ b/setup.py @@ -42,7 +42,8 @@ def pbr_compat(v): setuptools.setup( - setup_requires=['setuptools>=30.3.0', 'setuptools_scm'], + setup_requires=['setuptools>=30.3.0', + 'setuptools_scm!=1.16.0,!=1.16.1,!=1.16.2'], use_scm_version={'version_scheme': pbr_compat}, cmdclass=cmdclass, ) -- GitLab From c054ab52c64c1bbccde0f8debc059315d5376896 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 7 Mar 2018 10:29:49 +0100 Subject: [PATCH 1326/1483] storage: _delete_metric_splits can now batch over multiple metrics --- gnocchi/storage/__init__.py | 12 ++++++++++-- gnocchi/storage/ceph.py | 35 +++++++++++++++++++---------------- gnocchi/storage/redis.py | 12 +++++++----- gnocchi/tests/test_storage.py | 4 ++-- 4 files changed, 38 insertions(+), 25 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 9df33f6b..4777f4a8 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -481,10 +481,18 @@ class StorageDriver(object): def _delete_metric_splits_unbatched(metric, keys, aggregation, version=3): raise NotImplementedError - def _delete_metric_splits(self, metric, keys_and_aggregations, version=3): + def _delete_metric_splits(self, metrics_keys_aggregations, version=3): + """Delete splits of metrics. + + :param metrics_keys_aggregations: A dict where keys are + `storage.Metric` and values are lists + of (key, aggregation) tuples. + """ utils.parallel_map( utils.return_none_on_failure(self._delete_metric_splits_unbatched), ((metric, key, aggregation) + for metric, keys_and_aggregations + in six.iteritems(metrics_keys_aggregations) for key, aggregation in keys_and_aggregations)) def add_measures_to_metrics(self, metrics_and_measures): @@ -587,7 +595,7 @@ class StorageDriver(object): keys_and_splits_to_store) = ts.set_values( measures, before_truncate_callback=_map_compute_splits_operations) - self._delete_metric_splits(metric, deleted_keys) + self._delete_metric_splits({metric: deleted_keys}) self._store_timeserie_splits(metric, keys_and_splits_to_store, new_first_block_timestamp) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 9d8233d6..c2c914d4 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -94,23 +94,26 @@ class CephStorage(storage.StorageDriver): self.ioctx.operate_write_op( op, self._build_unaggregated_timeserie_path(metric, 3)) - def _delete_metric_splits(self, metric, keys_and_aggregations, version=3): - names = tuple( - self._get_object_name(metric, key, aggregation.method, version) - for key, aggregation in keys_and_aggregations - ) + def _delete_metric_splits(self, metrics_keys_aggregations, version=3): with rados.WriteOpCtx() as op: - for name in names: - try: - self.ioctx.remove_object(name) - except rados.ObjectNotFound: - # It's possible that we already remove that object and then - # crashed before removing it from the OMAP key list; then - # no big deal anyway. - pass - self.ioctx.remove_omap_keys(op, names) - self.ioctx.operate_write_op( - op, self._build_unaggregated_timeserie_path(metric, 3)) + for metric, keys_and_aggregations in six.iteritems( + metrics_keys_aggregations): + names = tuple( + self._get_object_name( + metric, key, aggregation.method, version) + for key, aggregation in keys_and_aggregations + ) + for name in names: + try: + self.ioctx.remove_object(name) + except rados.ObjectNotFound: + # It's possible that we already remove that object and + # then crashed before removing it from the OMAP key + # list; then no big deal anyway. + pass + self.ioctx.remove_omap_keys(op, names) + self.ioctx.operate_write_op( + op, self._build_unaggregated_timeserie_path(metric, 3)) def _delete_metric(self, metric): with rados.ReadOpCtx() as op: diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 4f2bf7e0..2d0986af 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -126,12 +126,14 @@ return ids } return keys - def _delete_metric_splits(self, metric, keys_and_aggregations, version=3): - metric_key = self._metric_key(metric) + def _delete_metric_splits(self, metrics_keys_aggregations, version=3): pipe = self._client.pipeline(transaction=False) - for key, aggregation in keys_and_aggregations: - pipe.hdel(metric_key, self._aggregated_field_for_split( - aggregation.method, key, version)) + for metric, keys_and_aggregations in six.iteritems( + metrics_keys_aggregations): + metric_key = self._metric_key(metric) + for key, aggregation in keys_and_aggregations: + pipe.hdel(metric_key, self._aggregated_field_for_split( + aggregation.method, key, version)) pipe.execute() def _store_metric_splits(self, metric, keys_aggregations_data_offset, diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 1d09f830..513c4e0e 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -776,10 +776,10 @@ class TestStorageDriver(tests_base.TestCase): # Test what happens if we delete the latest split and then need to # compress it! self.storage._delete_metric_splits( - self.metric, [(carbonara.SplitKey( + {self.metric: [(carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(1, 'm'), - ), aggregation)]) + ), aggregation)]}) # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move -- GitLab From 16aa1e52fb151461aebd5d9f167f711d3a2e2666 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 7 Mar 2018 11:26:50 +0100 Subject: [PATCH 1327/1483] storage: batch split storage on multiple metrics --- gnocchi/storage/__init__.py | 39 +++++++++++++++++++++++++++-------- gnocchi/storage/ceph.py | 23 ++++++++++++--------- gnocchi/storage/file.py | 13 ++++++------ gnocchi/storage/redis.py | 14 +++++++------ gnocchi/storage/s3.py | 15 +++++++------- gnocchi/storage/swift.py | 13 ++++++------ gnocchi/tests/test_storage.py | 17 ++++++++------- 7 files changed, 79 insertions(+), 55 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 4777f4a8..10f86b7c 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -166,19 +166,39 @@ class StorageDriver(object): ((metric, data, version) for metric, data in metrics_and_data)) @staticmethod - def _store_metric_splits(metric, keys_aggregations_data_offset, version=3): - """Store metric split. + def _store_metric_splits_unbatched(metric, key, aggregation, data, offset, + version=3): + """Store a metric split. - Store a bunch of splits for a metric. + :param metric: A metric. + :param key: The `carbonara.SplitKey`. + :param aggregation: The `carbonara.Aggregation`. + :param data: The actual data to write. + :param offset: The offset to write to. + :param version: Storage engine format version. + """ + raise NotImplementedError - :param metric: The metric to store for - :param keys_aggregations_data_offset: A list of - (key, aggregation, data, offset) - tuples + def _store_metric_splits(self, metrics_keys_aggregations_data_offset, + version=3): + """Store metric splits. + Store a bunch of splits for some metrics. + + :param metrics_keys_aggregations_data_offset: A dict where keys are + `storage.Metric` and + values are a list of + (key, aggregation, + data, offset) tuples. :param version: Storage engine format version. """ - raise NotImplementedError + utils.parallel_map( + self._store_metric_splits_unbatched, + ((metric, key, aggregation, data, offset, version) + for metric, keys_aggregations_data_offset + in six.iteritems(metrics_keys_aggregations_data_offset) + for key, aggregation, data, offset + in keys_aggregations_data_offset)) @staticmethod def _list_split_keys(metric, aggregations, version=3): @@ -352,7 +372,8 @@ class StorageDriver(object): keys_aggregations_data_offset.append( (key, split.aggregation, data, offset)) - return self._store_metric_splits(metric, keys_aggregations_data_offset) + return self._store_metric_splits( + {metric: keys_aggregations_data_offset}) def _compute_split_operations(self, metric, aggregations_and_timeseries, previous_oldest_mutable_timestamp, diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index c2c914d4..5b2ab6bb 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -81,18 +81,21 @@ class CephStorage(storage.StorageDriver): else: self.ioctx.write_full(name, b"") - def _store_metric_splits(self, metric, keys_aggregations_data_offset, + def _store_metric_splits(self, metrics_keys_aggregations_data_offset, version=3): with rados.WriteOpCtx() as op: - for key, agg, data, offset in keys_aggregations_data_offset: - name = self._get_object_name(metric, key, agg.method, version) - if offset is None: - self.ioctx.write_full(name, data) - else: - self.ioctx.write(name, data, offset=offset) - self.ioctx.set_omap(op, (name,), (b"",)) - self.ioctx.operate_write_op( - op, self._build_unaggregated_timeserie_path(metric, 3)) + for metric, keys_aggregations_data_offset in six.iteritems( + metrics_keys_aggregations_data_offset): + for key, agg, data, offset in keys_aggregations_data_offset: + name = self._get_object_name( + metric, key, agg.method, version) + if offset is None: + self.ioctx.write_full(name, data) + else: + self.ioctx.write(name, data, offset=offset) + self.ioctx.set_omap(op, (name,), (b"",)) + self.ioctx.operate_write_op( + op, self._build_unaggregated_timeserie_path(metric, 3)) def _delete_metric_splits(self, metrics_keys_aggregations, version=3): with rados.WriteOpCtx() as op: diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 7f8a2712..010d851b 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -163,13 +163,12 @@ class FileStorage(storage.StorageDriver): os.unlink(self._build_metric_path_for_split( metric, aggregation.method, key, version)) - def _store_metric_splits(self, metric, keys_aggregations_data_offset, - version=3): - for key, aggregation, data, offset in keys_aggregations_data_offset: - self._atomic_file_store( - self._build_metric_path_for_split( - metric, aggregation.method, key, version), - data) + def _store_metric_splits_unbatched(self, metric, key, aggregation, data, + offset, version): + self._atomic_file_store( + self._build_metric_path_for_split( + metric, aggregation.method, key, version), + data) def _delete_metric(self, metric): path = self._build_metric_dir(metric) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 2d0986af..569eb94c 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -136,14 +136,16 @@ return ids aggregation.method, key, version)) pipe.execute() - def _store_metric_splits(self, metric, keys_aggregations_data_offset, + def _store_metric_splits(self, metrics_keys_aggregations_data_offset, version=3): pipe = self._client.pipeline(transaction=False) - metric_key = self._metric_key(metric) - for key, aggregation, data, offset in keys_aggregations_data_offset: - key = self._aggregated_field_for_split( - aggregation.method, key, version) - pipe.hset(metric_key, key, data) + for metric, keys_aggs_data_offset in six.iteritems( + metrics_keys_aggregations_data_offset): + metric_key = self._metric_key(metric) + for key, aggregation, data, offset in keys_aggs_data_offset: + key = self._aggregated_field_for_split( + aggregation.method, key, version) + pipe.hset(metric_key, key, data) pipe.execute() def _delete_metric(self, metric): diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 5c75e57f..29134d63 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -120,14 +120,13 @@ class S3Storage(storage.StorageDriver): wait=self._consistency_wait, stop=self._consistency_stop)(_head) - def _store_metric_splits(self, metric, keys_aggregations_data_offset, - version=3): - for key, aggregation, data, offset in keys_aggregations_data_offset: - self._put_object_safe( - Bucket=self._bucket_name, - Key=self._prefix(metric) + self._object_name( - key, aggregation.method, version), - Body=data) + def _store_metric_splits_unbatched(self, metric, key, aggregation, data, + offset, version): + self._put_object_safe( + Bucket=self._bucket_name, + Key=self._prefix(metric) + self._object_name( + key, aggregation.method, version), + Body=data) def _delete_metric_splits_unbatched(self, metric, key, aggregation, version=3): diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 603a1b45..287173dc 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -116,13 +116,12 @@ class SwiftStorage(storage.StorageDriver): if resp['status'] == 204: raise storage.MetricAlreadyExists(metric) - def _store_metric_splits(self, metric, keys_aggregations_data_offset, - version=3): - for key, aggregation, data, offset in keys_aggregations_data_offset: - self.swift.put_object( - self._container_name(metric), - self._object_name(key, aggregation.method, version), - data) + def _store_metric_splits_unbatched(self, metric, key, aggregation, data, + offset, version): + self.swift.put_object( + self._container_name(metric), + self._object_name(key, aggregation.method, version), + data) def _delete_metric_splits_unbatched( self, metric, key, aggregation, version=3): diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 513c4e0e..9bc6a546 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -287,11 +287,12 @@ class TestStorageDriver(tests_base.TestCase): for call in c.mock_calls: # policy is 60 points and split is 48. should only update 2nd half args = call[1] - if args[0] == m_sql: - for key, aggregation, data, offset in args[1]: - if (key.sampling == numpy.timedelta64(1, 'm') - and aggregation.method == "mean"): - count += 1 + for metric, key_agg_data_offset in six.iteritems(args[0]): + if metric == m_sql: + for key, aggregation, data, offset in key_agg_data_offset: + if (key.sampling == numpy.timedelta64(1, 'm') + and aggregation.method == "mean"): + count += 1 self.assertEqual(1, count) def test_add_measures_update_subset(self): @@ -859,13 +860,13 @@ class TestStorageDriver(tests_base.TestCase): ]}, self.storage.get_measures(self.metric, [aggregation])) # Test what happens if we write garbage - self.storage._store_metric_splits( - self.metric, [ + self.storage._store_metric_splits({ + self.metric: [ (carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(1, 'm')), aggregation, b"oh really?", None), - ]) + ]}) # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move -- GitLab From 7de72f7cacd481c03fd28ce971b5625c58703fa4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 20 Mar 2018 17:15:07 +0100 Subject: [PATCH 1328/1483] tests: fix duplicate resource id between tests Some of the tests in another scenario use those id already, making a potential conflict. --- gnocchi/tests/functional/gabbits/resource-aggregation.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/tests/functional/gabbits/resource-aggregation.yaml b/gnocchi/tests/functional/gabbits/resource-aggregation.yaml index b301008b..644e82fe 100644 --- a/gnocchi/tests/functional/gabbits/resource-aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/resource-aggregation.yaml @@ -80,7 +80,7 @@ tests: - name: create resource 3 POST: /v1/resource/generic data: - id: 33333BC5-5948-4F29-B7DF-7DE607660452 + id: 0FB0B7CD-9A41-4A76-8B2E-BFC02843506A user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 metrics: @@ -89,7 +89,7 @@ tests: status: 201 - name: post cpuutil measures 3 - POST: /v1/resource/generic/33333BC5-5948-4F29-B7DF-7DE607660452/metric/cpu.util/measures + POST: /v1/resource/generic/0FB0B7CD-9A41-4A76-8B2E-BFC02843506A/metric/cpu.util/measures data: - timestamp: "2015-03-06T14:33:57" value: 230 -- GitLab From 37df0ad641ffe05895b4907c06a2a626ff184038 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 7 Mar 2018 12:11:15 +0100 Subject: [PATCH 1329/1483] storage: update_metric_splits can batch several metrics at once --- gnocchi/storage/__init__.py | 107 +++++++++++++++++++++--------------- 1 file changed, 62 insertions(+), 45 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 10f86b7c..3e21d779 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -331,49 +331,64 @@ class StorageDriver(object): ts.truncate(aggregation.timespan) return ts - def _store_timeserie_splits(self, metric, keys_and_aggregations_and_splits, - oldest_mutable_timestamp): - keys_to_rewrite = [] - splits_to_rewrite = [] - for (key, aggregation), split in six.iteritems( - keys_and_aggregations_and_splits): - # NOTE(jd) We write the full split only if the driver works that - # way (self.WRITE_FULL) or if the oldest_mutable_timestamp is out - # of range. - write_full = ( - self.WRITE_FULL or next(key) <= oldest_mutable_timestamp - ) - if write_full: - keys_to_rewrite.append(key) - splits_to_rewrite.append(split) - - # Update the splits that were passed as argument with the data already - # stored in the case that we need to rewrite them fully. - # First, fetch all those existing splits. - existing_data = self._get_splits_and_unserialize( - metric, [(key, split.aggregation) - for key, split - in six.moves.zip(keys_to_rewrite, splits_to_rewrite)]) - - for key, split, existing in six.moves.zip( - keys_to_rewrite, splits_to_rewrite, existing_data): - if existing: - existing.merge(split) - keys_and_aggregations_and_splits[ - (key, split.aggregation)] = existing - - keys_aggregations_data_offset = [] - for (key, aggregation), split in six.iteritems( - keys_and_aggregations_and_splits): - # Do not store the split if it's empty. - if split: - offset, data = split.serialize( - key, compressed=key in keys_to_rewrite) - keys_aggregations_data_offset.append( - (key, split.aggregation, data, offset)) - - return self._store_metric_splits( - {metric: keys_aggregations_data_offset}) + def _update_metric_splits(self, metrics_keys_aggregations_splits): + """Store splits of `carbonara.`AggregatedTimeSerie` for a metric. + + This reads the existing split and merge it with the new one give as + argument, then writing it to the storage. + + :param metrics_keys_aggregations_splits: A dict where keys are + `storage.Metric` and values + are tuples of the form + ({(key, aggregation): split}, + oldest_mutable_timestamp) + """ + metrics_splits_to_store = {} + + for metric, (keys_and_aggregations_and_splits, + oldest_mutable_timestamp) in six.iteritems( + metrics_keys_aggregations_splits): + keys_to_rewrite = [] + splits_to_rewrite = [] + for (key, aggregation), split in six.iteritems( + keys_and_aggregations_and_splits): + # NOTE(jd) We write the full split only if the driver works + # that way (self.WRITE_FULL) or if the oldest_mutable_timestamp + # is out of range. + write_full = ( + self.WRITE_FULL or next(key) <= oldest_mutable_timestamp + ) + if write_full: + keys_to_rewrite.append(key) + splits_to_rewrite.append(split) + + # Update the splits that were passed as argument with the data + # already stored in the case that we need to rewrite them fully. + # First, fetch all those existing splits. + existing_data = self._get_splits_and_unserialize( + metric, [(key, split.aggregation) + for key, split + in six.moves.zip(keys_to_rewrite, splits_to_rewrite)]) + + for key, split, existing in six.moves.zip( + keys_to_rewrite, splits_to_rewrite, existing_data): + if existing: + existing.merge(split) + keys_and_aggregations_and_splits[ + (key, split.aggregation)] = existing + + keys_aggregations_data_offset = [] + for (key, aggregation), split in six.iteritems( + keys_and_aggregations_and_splits): + # Do not store the split if it's empty. + if split: + offset, data = split.serialize( + key, compressed=key in keys_to_rewrite) + keys_aggregations_data_offset.append( + (key, split.aggregation, data, offset)) + metrics_splits_to_store[metric] = keys_aggregations_data_offset + + return self._store_metric_splits(metrics_splits_to_store) def _compute_split_operations(self, metric, aggregations_and_timeseries, previous_oldest_mutable_timestamp, @@ -617,8 +632,10 @@ class StorageDriver(object): measures, before_truncate_callback=_map_compute_splits_operations) self._delete_metric_splits({metric: deleted_keys}) - self._store_timeserie_splits(metric, keys_and_splits_to_store, - new_first_block_timestamp) + self._update_metric_splits({ + metric: (keys_and_splits_to_store, + new_first_block_timestamp), + }) new_boundts.append((metric, ts.serialize())) -- GitLab From 8e041df2519e0c7bbd3bca949a06f70fc24b099d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 7 Mar 2018 12:19:49 +0100 Subject: [PATCH 1330/1483] storage: update and delete all splits for all metrics at once --- gnocchi/storage/__init__.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 3e21d779..3093183a 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -545,6 +545,8 @@ class StorageDriver(object): len(metrics_and_measures), sw.elapsed()) new_boundts = [] + splits_to_delete = {} + splits_to_update = {} for metric, measures in six.iteritems(metrics_and_measures): measures = numpy.sort(measures, order='timestamps') @@ -630,12 +632,12 @@ class StorageDriver(object): deleted_keys, keys_and_splits_to_store) = ts.set_values( measures, - before_truncate_callback=_map_compute_splits_operations) - self._delete_metric_splits({metric: deleted_keys}) - self._update_metric_splits({ - metric: (keys_and_splits_to_store, - new_first_block_timestamp), - }) + before_truncate_callback=_map_compute_splits_operations, + ) + + splits_to_delete[metric] = deleted_keys + splits_to_update[metric] = (keys_and_splits_to_store, + new_first_block_timestamp) new_boundts.append((metric, ts.serialize())) @@ -652,6 +654,8 @@ class StorageDriver(object): "in %.2f seconds%s", metric.id, len(measures), elapsed, perf) + self._delete_metric_splits(splits_to_delete) + self._update_metric_splits(splits_to_update) self._store_unaggregated_timeseries(new_boundts) def find_measure(self, metric, predicate, granularity, aggregation="mean", -- GitLab From 72a7a39b3f7a26030c101b3e6b531bde1a8556aa Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 12 Mar 2018 17:05:11 +0100 Subject: [PATCH 1331/1483] metricd: publish statistics about processing speed Closes #600 --- gnocchi/cli/metricd.py | 2 + gnocchi/rest/api.py | 13 ++++- gnocchi/storage/__init__.py | 62 +++++++++++++--------- gnocchi/tests/functional/gabbits/base.yaml | 4 +- 4 files changed, 54 insertions(+), 27 deletions(-) diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index 6e1155da..859efea4 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -242,6 +242,8 @@ class MetricProcessor(MetricProcessBase): LOG.error("Unexpected error processing assigned job", exc_info=True) LOG.debug("%d metrics processed from %d sacks", m_count, s_count) + # Update statistics + self.coord.update_capabitilities(self.GROUP_ID, self.statistics) if sacks == self._get_sacks_to_process(): # We just did a full scan of all sacks, reset the timer self._last_full_sack_scan.reset() diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index c9f9c261..bfd42c5e 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -2005,9 +2005,20 @@ class StatusController(rest.RestController): report_dict["storage"]["measures_to_process"] = report['details'] report_dict['metricd'] = {} if members_req: - report_dict['metricd']['processors'] = members_req.get() + members = members_req.get() + caps = [ + pecan.request.coordinator.get_member_capabilities( + metricd.MetricProcessor.GROUP_ID, member) + for member in members + ] + report_dict['metricd']['processors'] = members + report_dict['metricd']['statistics'] = { + member: cap.get() + for member, cap in six.moves.zip(members, caps) + } else: report_dict['metricd']['processors'] = None + report_dict['metricd']['statistics'] = {} return report_dict diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 3093183a..9c215212 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -14,6 +14,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import collections import itertools import operator @@ -97,11 +98,31 @@ def get_driver(conf): conf.storage) +class Statistics(collections.defaultdict): + class StatisticsTimeContext(object): + def __init__(self, stats, name): + self.stats = stats + self.name = name + " time" + + def __enter__(self): + self.sw = utils.StopWatch() + self.sw.start() + return self + + def __exit__(self, type, value, traceback): + self.stats[self.name] += self.sw.elapsed() + + def __init__(self): + super(Statistics, self).__init__(lambda: 0) + + def time(self, name): + return self.StatisticsTimeContext(self, name) + + class StorageDriver(object): - @staticmethod - def __init__(conf): - pass + def __init__(self, conf): + self.statistics = Statistics() @staticmethod def upgrade(): @@ -538,11 +559,12 @@ class StorageDriver(object): objects and values are timeseries array of the new measures. """ - with utils.StopWatch() as sw: + with self.statistics.time("raw measures fetch"): raw_measures = self._get_or_create_unaggregated_timeseries( metrics_and_measures.keys()) - LOG.debug("Retrieve unaggregated measures for %d metric in %.2fs", - len(metrics_and_measures), sw.elapsed()) + self.statistics["raw measures fetch"] += len(metrics_and_measures) + self.statistics["processed measures"] += sum( + map(len, metrics_and_measures.values())) new_boundts = [] splits_to_delete = {} @@ -554,7 +576,6 @@ class StorageDriver(object): agg_methods = list(metric.archive_policy.aggregation_methods) block_size = metric.archive_policy.max_block_size back_window = metric.archive_policy.back_window - definition = metric.archive_policy.definition # NOTE(sileht): We keep one more blocks to calculate rate of change # correctly if any(filter(lambda x: x.startswith("rate:"), agg_methods)): @@ -627,7 +648,7 @@ class StorageDriver(object): deleted_keys, keys_and_split_to_store) - with utils.StopWatch() as sw: + with self.statistics.time("aggregated measures compute"): (new_first_block_timestamp, deleted_keys, keys_and_splits_to_store) = ts.set_values( @@ -641,22 +662,15 @@ class StorageDriver(object): new_boundts.append((metric, ts.serialize())) - number_of_operations = (len(agg_methods) * len(definition)) - perf = "" - elapsed = sw.elapsed() - if elapsed > 0: - perf = " (%d points/s, %d measures/s)" % ( - ((number_of_operations * computed_points['number']) / - elapsed), - ((number_of_operations * len(measures)) / elapsed) - ) - LOG.debug("Computed new metric %s with %d new measures " - "in %.2f seconds%s", - metric.id, len(measures), elapsed, perf) - - self._delete_metric_splits(splits_to_delete) - self._update_metric_splits(splits_to_update) - self._store_unaggregated_timeseries(new_boundts) + with self.statistics.time("splits delete"): + self._delete_metric_splits(splits_to_delete) + self.statistics["splits delete"] += len(splits_to_delete) + with self.statistics.time("splits update"): + self._update_metric_splits(splits_to_update) + self.statistics["splits delete"] += len(splits_to_update) + with self.statistics.time("raw measures store"): + self._store_unaggregated_timeseries(new_boundts) + self.statistics["raw measures store"] += len(new_boundts) def find_measure(self, metric, predicate, granularity, aggregation="mean", from_timestamp=None, to_timestamp=None): diff --git a/gnocchi/tests/functional/gabbits/base.yaml b/gnocchi/tests/functional/gabbits/base.yaml index 43b46954..fdde5f49 100644 --- a/gnocchi/tests/functional/gabbits/base.yaml +++ b/gnocchi/tests/functional/gabbits/base.yaml @@ -132,7 +132,7 @@ tests: authorization: "basic YWRtaW46" response_json_paths: $.storage.`len`: 2 - $.metricd.`len`: 1 + $.metricd.`len`: 2 - name: get status, no details GET: /v1/status?details=False @@ -141,4 +141,4 @@ tests: authorization: "basic YWRtaW46" response_json_paths: $.storage.`len`: 1 - $.metricd.`len`: 1 + $.metricd.`len`: 2 -- GitLab From e424ea8e5bf66073ef76e6670152637aa5acba0f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 13 Mar 2018 09:43:35 +0100 Subject: [PATCH 1332/1483] metricd: use process_new_measures_for_sack in test code --- gnocchi/cli/metricd.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index 859efea4..9344d523 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -297,14 +297,15 @@ def metricd_tester(conf): index = indexer.get_driver(conf) s = storage.get_driver(conf) inc = incoming.get_driver(conf) - metrics = set() + c = chef.Chef(None, inc, index, s) + metrics_count = 0 for sack in inc.iter_sacks(): - metrics.update(inc.list_metric_with_measures_to_process(sack)) - if len(metrics) >= conf.stop_after_processing_metrics: + try: + metrics_count += c.process_new_measures_for_sack(s, True) + except chef.SackAlreadyLocked: + continue + if metrics_count >= conf.stop_after_processing_metrics: break - c = chef.Chef(None, inc, index, s) - c.process_new_measures( - list(metrics)[:conf.stop_after_processing_metrics], True) def metricd(): -- GitLab From 0e3cc19e884b0eb474a624bf8db84e99547b49bd Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 26 Mar 2018 16:43:35 +0000 Subject: [PATCH 1333/1483] Fixed dbc postrm. --- debian/changelog | 8 ++++++-- debian/control | 2 +- debian/gnocchi-common.postrm.in | 2 +- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/debian/changelog b/debian/changelog index 62721d50..c1e69760 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,12 @@ -gnocchi (4.2.0-2) UNRELEASED; urgency=medium +gnocchi (4.2.0-2) unstable; urgency=medium + [ Ondřej Nový ] * Running wrap-and-sort -bast - -- Ondřej Nový Mon, 12 Mar 2018 23:02:54 +0100 + [ Thomas Goirand ] + * Fixed dbc postrm. + + -- Thomas Goirand Mon, 26 Mar 2018 16:43:11 +0000 gnocchi (4.2.0-1) unstable; urgency=medium diff --git a/debian/control b/debian/control index aa576e45..b322ba86 100644 --- a/debian/control +++ b/debian/control @@ -7,7 +7,7 @@ Uploaders: Build-Depends: debhelper (>= 10), dh-python, - openstack-pkg-tools (>= 70~), + openstack-pkg-tools (>= 74~), python3-all, python3-pbr, python3-setuptools, diff --git a/debian/gnocchi-common.postrm.in b/debian/gnocchi-common.postrm.in index 4ff2cf2d..b99233ea 100644 --- a/debian/gnocchi-common.postrm.in +++ b/debian/gnocchi-common.postrm.in @@ -5,7 +5,7 @@ set -e #PKGOS-INCLUDE# if [ "$1" = "purge" ] ; then - pkgos_dbc_postrm gnocchi gnocchi-common + pkgos_dbc_postrm gnocchi gnocchi-common $@ rm -fr /etc/gnocchi rm -rf /var/lib/gnocchi /var/log/gnocchi -- GitLab From 51ac20f06690f54933fb6f84ba3353ea0eae5170 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 13 Mar 2018 11:16:38 +0100 Subject: [PATCH 1334/1483] chef: merge refresh_metric and process_new_measures This simplifies the code base by just allowing to process new measures sack per sack with the existing process_new_measures_for_sack() or to use the new refresh_metrics() who takes a list of metrics. Since the list of metric must be known, there's no need to use list_metric_with_measures_to_process() anymore, so it can be removed. Since some of the tests (test_rest) need to process all sacks now, reduce the number of sack to a small number in order for each test not to take 10s. --- gnocchi/chef.py | 57 +++++++++++++++------------- gnocchi/incoming/__init__.py | 4 -- gnocchi/incoming/ceph.py | 13 ------- gnocchi/incoming/file.py | 5 +-- gnocchi/incoming/redis.py | 5 --- gnocchi/incoming/s3.py | 7 ---- gnocchi/incoming/swift.py | 5 --- gnocchi/rest/api.py | 8 ++-- gnocchi/tests/base.py | 7 ++-- gnocchi/tests/functional/fixtures.py | 7 +--- gnocchi/tests/test_aggregates.py | 24 ++++++------ gnocchi/tests/test_rest.py | 6 +-- gnocchi/tests/test_statsd.py | 8 ++-- gnocchi/tests/test_storage.py | 38 +++++-------------- gnocchi/tests/utils.py | 5 --- 15 files changed, 70 insertions(+), 129 deletions(-) diff --git a/gnocchi/chef.py b/gnocchi/chef.py index 359223d9..e26bc487 100644 --- a/gnocchi/chef.py +++ b/gnocchi/chef.py @@ -102,32 +102,31 @@ class Chef(object): LOG.error("Unable to expunge metric %s from storage", metric, exc_info=True) - def refresh_metric(self, metric, timeout): - s = self.incoming.sack_for_metric(metric.id) - lock = self.get_sack_lock(s) - if not lock.acquire(blocking=timeout): - raise SackAlreadyLocked(s) - try: - self.process_new_measures([str(metric.id)]) - finally: - lock.release() - - def process_new_measures(self, metrics_to_process, sync=False): - """Process added measures in background. + def refresh_metrics(self, metrics, timeout=None, sync=False): + """Process added measures in background for some metrics only. - Some drivers might need to have a background task running that process - the measures sent to metrics. This is used for that. + :param metrics: The list of `indexer.Metric` to refresh. + :param timeout: Time to wait for the process to happen. + :param sync: If an error occurs, raise, otherwise just log it. """ # process only active metrics. deleted metrics with unprocessed # measures will be skipped until cleaned by janitor. - metrics = self.index.list_metrics( - attribute_filter={"in": {"id": metrics_to_process}}) metrics_by_id = {m.id: m for m in metrics} - # NOTE(gordc): must lock at sack level - try: - LOG.debug("Processing measures for %s", metrics) - with self.incoming.process_measure_for_metrics( - [m.id for m in metrics]) as metrics_and_measures: + metrics_to_refresh = sorted( + ((metric, self.incoming.sack_for_metric(metric.id)) + for metric in metrics), + key=ITEMGETTER_1) + for sack, metric_and_sack in itertools.groupby( + metrics_to_refresh, ITEMGETTER_1): + lock = self.get_sack_lock(sack) + # FIXME(jd) timeout should be global for all sack locking + if not lock.acquire(blocking=timeout): + raise SackAlreadyLocked(sack) + metrics = [m[0].id for m in metric_and_sack] + try: + LOG.debug("Processing measures for %d metrics", len(metrics)) + with self.incoming.process_measure_for_metrics( + metrics) as metrics_and_measures: self.storage.add_measures_to_metrics({ metrics_by_id[metric]: measures for metric, measures @@ -135,24 +134,28 @@ class Chef(object): }) LOG.debug("Measures for %d metrics processed", len(metrics)) - except Exception: - if sync: - raise - LOG.error("Error processing new measures", exc_info=True) + except Exception: + if sync: + raise + LOG.error("Error processing new measures", exc_info=True) + finally: + lock.release() - def process_new_measures_for_sack(self, sack, sync=False): + def process_new_measures_for_sack(self, sack, blocking=False, sync=False): """Process added measures in background. Lock a sack and try to process measures from it. If the sack cannot be locked, the method will raise `SackAlreadyLocked`. :param sack: The sack to process new measures for. + :param blocking: Block to be sure the sack is processed or raise + `SackAlreadyLocked` otherwise. :param sync: If True, raise any issue immediately otherwise just log it :return: The number of metrics processed. """ lock = self.get_sack_lock(sack) - if not lock.acquire(blocking=False): + if not lock.acquire(blocking=blocking): raise SackAlreadyLocked(sack) LOG.debug("Processing measures for sack %s", sack) try: diff --git a/gnocchi/incoming/__init__.py b/gnocchi/incoming/__init__.py index 795e50eb..f716adfa 100644 --- a/gnocchi/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -199,10 +199,6 @@ class IncomingDriver(object): def _build_report(details): raise exceptions.NotImplementedError - @staticmethod - def list_metric_with_measures_to_process(sack): - raise exceptions.NotImplementedError - @staticmethod def delete_unprocessed_measures_for_metric(metric_id): raise exceptions.NotImplementedError diff --git a/gnocchi/incoming/ceph.py b/gnocchi/incoming/ceph.py index 1842e1a2..8a39fa8e 100644 --- a/gnocchi/incoming/ceph.py +++ b/gnocchi/incoming/ceph.py @@ -157,19 +157,6 @@ class CephStorage(incoming.IncomingDriver): return dict(omaps) - def list_metric_with_measures_to_process(self, sack): - names = set() - marker = "" - while True: - obj_names = list(self._list_keys_to_process( - sack, marker=marker, limit=self.Q_LIMIT).keys()) - names.update(name.split("_")[1] for name in obj_names) - if len(obj_names) < self.Q_LIMIT: - break - else: - marker = obj_names[-1] - return names - def delete_unprocessed_measures_for_metric(self, metric_id): sack = self.sack_for_metric(metric_id) key_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) diff --git a/gnocchi/incoming/file.py b/gnocchi/incoming/file.py index 89432d5e..446807e8 100644 --- a/gnocchi/incoming/file.py +++ b/gnocchi/incoming/file.py @@ -110,7 +110,7 @@ class FileStorage(incoming.IncomingDriver): self._list_measures_container_for_metric_str(sack, metric)) for sack in self.iter_sacks(): - for metric in self.list_metric_with_measures_to_process(sack): + for metric in set(self._list_target(self._sack_path(sack))): build_metric_report(metric, sack) return (report_vars['metrics'] or len(report_vars['metric_details'].keys()), @@ -118,9 +118,6 @@ class FileStorage(incoming.IncomingDriver): sum(report_vars['metric_details'].values()), report_vars['metric_details'] if details else None) - def list_metric_with_measures_to_process(self, sack): - return set(self._list_target(self._sack_path(sack))) - def _list_measures_container_for_metric_str(self, sack, metric_id): return self._list_target(self._measure_path(sack, metric_id)) diff --git a/gnocchi/incoming/redis.py b/gnocchi/incoming/redis.py index 55c47b64..2a5191b4 100644 --- a/gnocchi/incoming/redis.py +++ b/gnocchi/incoming/redis.py @@ -123,11 +123,6 @@ return results return (metrics, report_vars['measures'], report_vars['metric_details'] if details else None) - def list_metric_with_measures_to_process(self, sack): - match = redis.SEP.join([str(sack).encode(), b"*"]) - keys = self._client.scan_iter(match=match, count=1000) - return set([k.split(redis.SEP)[1].decode("utf8") for k in keys]) - def delete_unprocessed_measures_for_metric(self, metric_id): self._client.delete(self._build_measure_path(metric_id)) diff --git a/gnocchi/incoming/s3.py b/gnocchi/incoming/s3.py index 2c0ed6e1..c4f7b45d 100644 --- a/gnocchi/incoming/s3.py +++ b/gnocchi/incoming/s3.py @@ -123,13 +123,6 @@ class S3Storage(incoming.IncomingDriver): **kwargs) yield response - def list_metric_with_measures_to_process(self, sack): - metrics = set() - for response in self._list_files((str(sack),), Delimiter="/"): - for p in response.get('CommonPrefixes', ()): - metrics.add(p['Prefix'].split('/', 2)[1]) - return metrics - def _list_measure_files(self, path_items): files = set() for response in self._list_files(path_items): diff --git a/gnocchi/incoming/swift.py b/gnocchi/incoming/swift.py index 06500f9d..4445eee4 100644 --- a/gnocchi/incoming/swift.py +++ b/gnocchi/incoming/swift.py @@ -78,11 +78,6 @@ class SwiftStorage(incoming.IncomingDriver): return (nb_metrics or len(metric_details), measures, metric_details if details else None) - def list_metric_with_measures_to_process(self, sack): - headers, files = self.swift.get_container( - str(sack), delimiter='/', full_listing=True) - return set(f['subdir'][:-1] for f in files if 'subdir' in f) - def _list_measure_files_for_metric(self, sack, metric_id): headers, files = self.swift.get_container( str(sack), path=six.text_type(metric_id), diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index bfd42c5e..42f1e764 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -529,8 +529,8 @@ class MetricController(rest.RestController): if (strtobool("refresh", refresh) and pecan.request.incoming.has_unprocessed(self.metric.id)): try: - pecan.request.chef.refresh_metric( - self.metric, + pecan.request.chef.refresh_metrics( + [self.metric], pecan.request.conf.api.operation_timeout) except chef.SackAlreadyLocked: abort(503, 'Unable to refresh metric: %s. Metric is locked. ' @@ -1902,8 +1902,8 @@ class AggregationController(rest.RestController): if pecan.request.incoming.has_unprocessed(m.id)] for m in metrics_to_update: try: - pecan.request.chef.refresh_metric( - m, pecan.request.conf.api.operation_timeout) + pecan.request.chef.refresh_metrics( + [m], pecan.request.conf.api.operation_timeout) except chef.SackAlreadyLocked: abort(503, 'Unable to refresh metric: %s. ' 'Metric is locked. ' diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index f0d66b4e..d0214b2e 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -366,7 +366,7 @@ class TestCase(BaseTestCase): ) self.storage.upgrade() - self.incoming.upgrade(128) + self.incoming.upgrade(3) self.chef = chef.Chef( self.coord, self.incoming, self.index, self.storage) @@ -390,6 +390,7 @@ class TestCase(BaseTestCase): def trigger_processing(self, metrics=None): if metrics is None: self.chef.process_new_measures_for_sack( - self.incoming.sack_for_metric(self.metric.id), sync=True) + self.incoming.sack_for_metric(self.metric.id), + blocking=True, sync=True) else: - self.chef.process_new_measures(metrics, sync=True) + self.chef.refresh_metrics(metrics, timeout=True, sync=True) diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index a8e4d5e3..56ac32a3 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -257,11 +257,8 @@ class MetricdThread(threading.Thread): def run(self): while self.flag: - metrics = utils.list_all_incoming_metrics(self.chef.incoming) - metrics = self.chef.index.list_metrics( - attribute_filter={"in": {"id": metrics}}) - for metric in metrics: - self.chef.refresh_metric(metric, timeout=None) + for sack in self.chef.incoming.iter_sacks(): + self.chef.process_new_measures_for_sack(sack, blocking=True) time.sleep(0.1) def stop(self): diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 7b7dc919..093361a7 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -1031,7 +1031,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 10, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 13, 10), 4), ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) + self.trigger_processing([self.metric, metric2]) values = processor.get_measures( self.storage, @@ -1190,7 +1190,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 6), incoming.Measure(datetime64(2014, 1, 1, 12, 13, 10), 2), ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) + self.trigger_processing([self.metric, metric2]) values = processor.get_measures( self.storage, @@ -1228,7 +1228,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), 4), ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) + self.trigger_processing([self.metric, metric2]) values = processor.get_measures( self.storage, @@ -1265,7 +1265,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), 4), ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) + self.trigger_processing([self.metric, metric2]) values = processor.get_measures( self.storage, @@ -1302,7 +1302,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), 4), ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) + self.trigger_processing([self.metric, metric2]) values = processor.get_measures( self.storage, @@ -1341,7 +1341,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 10, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 15, 10), 4), ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) + self.trigger_processing([self.metric, metric2]) values = processor.get_measures( self.storage, @@ -1385,7 +1385,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), 4), ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) + self.trigger_processing([self.metric, metric2]) values = processor.get_measures( self.storage, @@ -1414,7 +1414,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), ]) - self.trigger_processing([str(self.metric.id)]) + self.trigger_processing() values = processor.get_measures( self.storage, [processor.MetricReference(self.metric, "mean")], @@ -1441,7 +1441,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), ]) - self.trigger_processing([str(self.metric.id)]) + self.trigger_processing() values = processor.get_measures( self.storage, [processor.MetricReference(self.metric, "mean")], @@ -1473,7 +1473,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), 4), ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) + self.trigger_processing([self.metric, metric2]) values = processor.get_measures( self.storage, @@ -1511,7 +1511,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), 4), ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) + self.trigger_processing([self.metric, metric2]) values = processor.get_measures( self.storage, @@ -1557,7 +1557,7 @@ class CrossMetricAggregated(base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), -4), ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) + self.trigger_processing([self.metric, metric2]) values = processor.get_measures( self.storage, diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index d5c6df29..01f26d90 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -37,7 +37,6 @@ from gnocchi import archive_policy from gnocchi.rest import api from gnocchi.rest import app from gnocchi.tests import base as tests_base -from gnocchi.tests import utils as tests_utils from gnocchi import utils @@ -127,8 +126,9 @@ class TestingApp(webtest.TestApp): elif self.auth_mode == "remoteuser": req.remote_user = self.user response = super(TestingApp, self).do_request(req, *args, **kwargs) - metrics = tests_utils.list_all_incoming_metrics(self.chef.incoming) - self.chef.process_new_measures(metrics, sync=True) + for sack in self.chef.incoming.iter_sacks(): + self.chef.process_new_measures_for_sack( + sack, blocking=True, sync=True) return response diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index 3ddd2173..2b4e8ea1 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -73,7 +73,7 @@ class TestStatsd(tests_base.TestCase): metric = r.get_metric(metric_key) - self.chef.process_new_measures([str(metric.id)], sync=True) + self.chef.refresh_metrics([metric], sync=True) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ @@ -92,7 +92,7 @@ class TestStatsd(tests_base.TestCase): ("127.0.0.1", 12345)) self.stats.flush() - self.chef.process_new_measures([str(metric.id)], sync=True) + self.chef.refresh_metrics([metric], sync=True) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ @@ -124,7 +124,7 @@ class TestStatsd(tests_base.TestCase): metric = r.get_metric(metric_key) self.assertIsNotNone(metric) - self.chef.process_new_measures([str(metric.id)], sync=True) + self.chef.refresh_metrics([metric], sync=True) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ @@ -142,7 +142,7 @@ class TestStatsd(tests_base.TestCase): ("127.0.0.1", 12345)) self.stats.flush() - self.chef.process_new_measures([str(metric.id)], sync=True) + self.chef.refresh_metrics([metric], sync=True) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 9bc6a546..ff7b0b35 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -31,7 +31,6 @@ from gnocchi.storage import redis from gnocchi.storage import s3 from gnocchi.storage import swift from gnocchi.tests import base as tests_base -from gnocchi.tests import utils as tests_utils def datetime64(*args): @@ -167,23 +166,6 @@ class TestStorageDriver(tests_base.TestCase): self.assertIn((datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 5.0), m) - def test_list_metric_with_measures_to_process(self): - metrics = tests_utils.list_all_incoming_metrics(self.incoming) - self.assertEqual(set(), metrics) - self.incoming.add_measures(self.metric.id, [ - incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), - ]) - m2, __ = self._create_metric('medium') - self.incoming.add_measures(m2.id, [ - incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), - ]) - metrics = tests_utils.list_all_incoming_metrics(self.incoming) - m_list = [str(self.metric.id), str(m2.id)] - self.assertEqual(set(m_list), metrics) - self.trigger_processing(m_list) - metrics = tests_utils.list_all_incoming_metrics(self.incoming) - self.assertEqual(set([]), metrics) - def test_delete_nonempty_metric(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), @@ -242,7 +224,7 @@ class TestStorageDriver(tests_base.TestCase): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, i, j), 100) for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) - self.trigger_processing([str(self.metric.id)]) + self.trigger_processing([self.metric]) aggregations = self.metric.archive_policy.aggregations @@ -258,7 +240,7 @@ class TestStorageDriver(tests_base.TestCase): self.incoming.add_measures(m.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, i, j), 100) for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) - self.trigger_processing([str(m.id)]) + self.trigger_processing([m]) aggregations = ( m.archive_policy.get_aggregations_for_method("mean") @@ -274,7 +256,7 @@ class TestStorageDriver(tests_base.TestCase): incoming.Measure(datetime64(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] self.incoming.add_measures(m.id, measures) - self.trigger_processing([str(m.id)]) + self.trigger_processing([m]) # add measure to end, in same aggregate time as last point. self.incoming.add_measures(m.id, [ @@ -282,13 +264,13 @@ class TestStorageDriver(tests_base.TestCase): with mock.patch.object(self.storage, '_store_metric_splits') as c: # should only resample last aggregate - self.trigger_processing([str(m.id)]) + self.trigger_processing([m]) count = 0 for call in c.mock_calls: # policy is 60 points and split is 48. should only update 2nd half args = call[1] for metric, key_agg_data_offset in six.iteritems(args[0]): - if metric == m_sql: + if metric.id == m_sql.id: for key, aggregation, data, offset in key_agg_data_offset: if (key.sampling == numpy.timedelta64(1, 'm') and aggregation.method == "mean"): @@ -301,14 +283,14 @@ class TestStorageDriver(tests_base.TestCase): incoming.Measure(datetime64(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] self.incoming.add_measures(m.id, measures) - self.trigger_processing([str(m.id)]) + self.trigger_processing([m]) # add measure to end, in same aggregate time as last point. new_point = datetime64(2014, 1, 6, 1, 58, 1) self.incoming.add_measures(m.id, [incoming.Measure(new_point, 100)]) with mock.patch.object(self.incoming, 'add_measures') as c: - self.trigger_processing([str(m.id)]) + self.trigger_processing([m]) for __, args, __ in c.mock_calls: self.assertEqual( list(args[3])[0][0], carbonara.round_timestamp( @@ -1062,7 +1044,7 @@ class TestStorageDriver(tests_base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 6), incoming.Measure(datetime64(2014, 1, 1, 12, 13, 10), 2), ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) + self.trigger_processing([self.metric, metric2]) self.assertEqual( [ @@ -1117,7 +1099,7 @@ class TestStorageDriver(tests_base.TestCase): incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 1), incoming.Measure(datetime64(2014, 1, 1, 12, 0, 10), 1), ]) - self.trigger_processing([str(m.id)]) + self.trigger_processing([m]) aggregation = m.archive_policy.get_aggregation( "mean", numpy.timedelta64(5, 's')) @@ -1134,7 +1116,7 @@ class TestStorageDriver(tests_base.TestCase): self.incoming.add_measures(m.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 15), 1), ]) - self.trigger_processing([str(m.id)]) + self.trigger_processing([m]) self.assertEqual({"mean": [ (datetime64(2014, 1, 1, 12, 0, 5), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1), diff --git a/gnocchi/tests/utils.py b/gnocchi/tests/utils.py index 3d197cd8..a9773ce9 100644 --- a/gnocchi/tests/utils.py +++ b/gnocchi/tests/utils.py @@ -17,11 +17,6 @@ from oslo_policy import opts as policy_opts from gnocchi import opts -def list_all_incoming_metrics(incoming): - return set.union(*[incoming.list_metric_with_measures_to_process(sack) - for sack in incoming.iter_sacks()]) - - def prepare_conf(): conf = cfg.ConfigOpts() -- GitLab From 60a46fd9393ab4520e9133e0141ac46e88e315eb Mon Sep 17 00:00:00 2001 From: gord chung Date: Sun, 1 Apr 2018 22:21:58 -0400 Subject: [PATCH 1335/1483] don't do work if no measures bail early if we know there's no work to be done. --- gnocchi/chef.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/gnocchi/chef.py b/gnocchi/chef.py index e26bc487..badceea5 100644 --- a/gnocchi/chef.py +++ b/gnocchi/chef.py @@ -127,13 +127,14 @@ class Chef(object): LOG.debug("Processing measures for %d metrics", len(metrics)) with self.incoming.process_measure_for_metrics( metrics) as metrics_and_measures: - self.storage.add_measures_to_metrics({ - metrics_by_id[metric]: measures - for metric, measures - in six.iteritems(metrics_and_measures) - }) - LOG.debug("Measures for %d metrics processed", - len(metrics)) + if metrics_and_measures: + self.storage.add_measures_to_metrics({ + metrics_by_id[metric]: measures + for metric, measures + in six.iteritems(metrics_and_measures) + }) + LOG.debug("Measures for %d metrics processed", + len(metrics)) except Exception: if sync: raise @@ -162,6 +163,9 @@ class Chef(object): with self.incoming.process_measures_for_sack(sack) as measures: # process only active metrics. deleted metrics with unprocessed # measures will be skipped until cleaned by janitor. + if not measures: + return 0 + metrics = self.index.list_metrics( attribute_filter={ "in": {"id": measures.keys()} -- GitLab From 20efb4a9d96823f7c75c058a888d844700d858d1 Mon Sep 17 00:00:00 2001 From: gord chung Date: Sat, 7 Apr 2018 17:44:09 -0400 Subject: [PATCH 1336/1483] ignore webob 1.8.0, it's broke see: https://github.com/Pylons/webob/issues/355 Closes: #852 --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 9c06432c..7e95c9c7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,7 +42,7 @@ install_requires = werkzeug trollius; python_version < '3.4' tenacity>=4.6.0 - WebOb>=1.4.1 + WebOb>=1.4.1,!=1.8.0 Paste PasteDeploy monotonic -- GitLab From e003c37ec63f90db293bf7fbefb7f356abeeb832 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 10 Apr 2018 11:05:48 +0200 Subject: [PATCH 1337/1483] api: remove parameter passing via Accept header See https://github.com/Pylons/webob/issues/355 --- doc/source/rest.j2 | 4 -- doc/source/rest.yaml | 16 ++----- gnocchi/rest/aggregates/api.py | 2 +- gnocchi/rest/api.py | 47 ++++--------------- gnocchi/tests/functional/gabbits/history.yaml | 12 ++--- .../tests/functional/gabbits/resource.yaml | 20 -------- gnocchi/tests/functional/gabbits/search.yaml | 22 --------- gnocchi/tests/test_rest.py | 30 ++---------- ...ons-in-accept-header-7e5e074d8fccfb0f.yaml | 7 +++ setup.cfg | 2 +- tox.ini | 1 + 11 files changed, 30 insertions(+), 133 deletions(-) create mode 100644 releasenotes/notes/remove-options-in-accept-header-7e5e074d8fccfb0f.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 41cfcbe3..fe2fc164 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -696,10 +696,6 @@ It's possible to search for old revisions of |resources| in the same ways: {{ scenarios['search-resource-history']['doc'] }} -It is also possible to send the *history* parameter in the *Accept* header: - -{{ scenarios['search-resource-history-in-accept']['doc'] }} - Time range `````````` diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 1209e46f..32666e59 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -534,19 +534,11 @@ {"=": {"id": "{{ scenarios['create-resource-instance']['response'].json['id'] }}"}} -- name: search-resource-history-in-accept - request: | - POST /v1/search/resource/instance HTTP/1.1 - Content-Type: application/json - Accept: application/json; history=true - - {"=": {"id": "{{ scenarios['create-resource-instance']['response'].json['id'] }}"}} - - name: search-resource-history-partial request: | - POST /v1/search/resource/instance HTTP/1.1 + POST /v1/search/resource/instance?history=true HTTP/1.1 Content-Type: application/json - Accept: application/json; history=true + Accept: application/json {"and": [ {"=": {"host": "compute1"}}, @@ -558,9 +550,9 @@ - name: search-resource-history-partial-filter filter: host = 'compute1' and revision_start >= "{{ scenarios['get-instance']['response'].json['revision_start'] }}" and (revision_end <= "{{ scenarios['get-patched-instance']['response'].json['revision_start'] }}" or revision_end == null) request: | - POST /v1/search/resource/instance?filter={{ scenarios['search-resource-history-partial-filter']['filter'] | urlencode }} HTTP/1.1 + POST /v1/search/resource/instance?history=true&filter={{ scenarios['search-resource-history-partial-filter']['filter'] | urlencode }} HTTP/1.1 Content-Type: application/json - Accept: application/json; history=true + Accept: application/json {"and": [ {"=": {"host": "compute1"}}, diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index 6a411377..ef0b7a7e 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -196,7 +196,7 @@ class AggregatesController(rest.RestController): @pecan.expose("json") def post(self, start=None, stop=None, granularity=None, needed_overlap=None, fill=None, groupby=None, **kwargs): - details = api.get_details(kwargs) + details = api.get_bool_param('details', kwargs) if fill is None and needed_overlap is None: fill = "dropna" diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 42f1e764..3ef19c7b 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -181,37 +181,8 @@ def Timespan(value): raise voluptuous.Invalid(e) -def get_header_option(name, params): - type, options = werkzeug.http.parse_options_header( - pecan.request.headers.get('Accept')) - return strtobool('Accept header' if name in options else name, - options.get(name, params.get(name, 'false'))) - - -def get_header_option_array(name, params): - type, options = werkzeug.http.parse_options_header( - pecan.request.headers.get('Accept')) - header_option = options.get(name, None) - post_option = params.get(name, None) - - if post_option: - return arg_to_list(post_option) - elif header_option: - return header_option.split('+') - else: - return None - - -def get_history(params): - return get_header_option('history', params) - - -def get_details(params): - return get_header_option('details', params) - - -def get_json_attrs(params): - return get_header_option_array('attrs', params) +def get_bool_param(name, params, default='false'): + return strtobool(name, params.get(name, default)) def strtobool(varname, v): @@ -796,7 +767,7 @@ class ResourceHistoryController(rest.RestController): @pecan.expose('json') def get(self, **kwargs): - details = get_details(kwargs) + details = get_bool_param('details', kwargs) pagination_opts = get_pagination_options( kwargs, RESOURCE_DEFAULT_PAGINATION) @@ -1144,11 +1115,11 @@ class ResourcesController(rest.RestController): @pecan.expose('json') def get_all(self, **kwargs): - details = get_details(kwargs) - history = get_history(kwargs) + details = get_bool_param('details', kwargs) + history = get_bool_param('history', kwargs) pagination_opts = get_pagination_options( kwargs, RESOURCE_DEFAULT_PAGINATION) - json_attrs = get_json_attrs(kwargs) + json_attrs = arg_to_list(kwargs.get('attrs', None)) policy_filter = pecan.request.auth_helper.get_resource_policy_filter( pecan.request, "list resource", self._resource_type) @@ -1377,8 +1348,8 @@ class SearchResourceTypeController(rest.RestController): else: attr_filter = None - details = get_details(kwargs) - history = get_history(kwargs) + details = get_bool_param('details', kwargs) + history = get_bool_param('history', kwargs) pagination_opts = get_pagination_options( kwargs, RESOURCE_DEFAULT_PAGINATION) @@ -1410,7 +1381,7 @@ class SearchResourceTypeController(rest.RestController): @pecan.expose('json') def post(self, **kwargs): - json_attrs = get_json_attrs(kwargs) + json_attrs = arg_to_list(kwargs.get('attrs', None)) try: return [r.jsonify(json_attrs) for r in self._search(**kwargs)] except indexer.IndexerException as e: diff --git a/gnocchi/tests/functional/gabbits/history.yaml b/gnocchi/tests/functional/gabbits/history.yaml index f7503d26..ad2de03e 100644 --- a/gnocchi/tests/functional/gabbits/history.yaml +++ b/gnocchi/tests/functional/gabbits/history.yaml @@ -73,9 +73,7 @@ tests: $[0].project_id: fe20a931-1012-4cc6-addc-39556ec60907 - name: list all resources with history - GET: $LAST_URL - request_headers: - accept: application/json; details=True; history=True + GET: $LAST_URL?details=true&history=true response_json_paths: $.`len`: 3 $[0].id: f93450f2-d8a5-4d67-9985-02511241e7d1 @@ -97,9 +95,7 @@ tests: status: 200 - name: list all resources with history no change after metrics update - GET: /v1/resource/generic - request_headers: - accept: application/json; details=True; history=True + GET: /v1/resource/generic?details=true&history=true response_json_paths: $.`len`: 3 $[0].id: f93450f2-d8a5-4d67-9985-02511241e7d1 @@ -123,9 +119,7 @@ tests: $[/name][1].resource_id: f93450f2-d8a5-4d67-9985-02511241e7d1 - name: list all resources with history no change after metrics creation - GET: /v1/resource/generic - request_headers: - accept: application/json; details=True; history=True + GET: /v1/resource/generic?history=true&details=true response_json_paths: $.`len`: 3 $[0].id: f93450f2-d8a5-4d67-9985-02511241e7d1 diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index ad23d462..424ced1d 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -378,26 +378,6 @@ tests: $[0].`len`: 13 $[1].`len`: 13 - - name: list generic resources with attrs header - GET: /v1/resource/generic - request_headers: - Accept: "application/json; attrs=id+started_at+user_id" - response_json_paths: - $[0].`len`: 3 - $[0].id: $RESPONSE['$[0].id'] - $[0].started_at: $RESPONSE['$[0].started_at'] - $[0].user_id: $RESPONSE['$[0].user_id'] - $[1].`len`: 3 - - - name: list generic resources with invalid attrs header - GET: /v1/resource/generic - request_headers: - Accept: "application/json; attrs=id+foo+bar" - response_json_paths: - $[0].`len`: 1 - $[0].id: $RESPONSE['$[0].id'] - $[1].`len`: 1 - - name: list generic resources without attrs header GET: /v1/resource/generic request_headers: diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml index b4a56fce..ecfa2eaf 100644 --- a/gnocchi/tests/functional/gabbits/search.yaml +++ b/gnocchi/tests/functional/gabbits/search.yaml @@ -238,28 +238,6 @@ tests: $[0].`len`: 13 $[1].`len`: 13 - - name: search all resource with attrs header - POST: /v1/search/resource/generic - data: {} - request_headers: - Accept: "application/json; attrs=id+started_at+user_id" - response_json_paths: - $[0].`len`: 3 - $[0].id: $RESPONSE['$[0].id'] - $[0].started_at: $RESPONSE['$[0].started_at'] - $[0].user_id: $RESPONSE['$[0].user_id'] - $[1].`len`: 3 - - - name: search all resource with invalid attrs header - POST: /v1/search/resource/generic - data: {} - request_headers: - Accept: "application/json; attrs=id+foo+bar" - response_json_paths: - $[0].`len`: 1 - $[0].id: $RESPONSE['$[0].id'] - $[1].`len`: 1 - - name: search all resource without attrs header POST: /v1/search/resource/generic data: {} diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 01f26d90..e2ece5a9 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -1150,8 +1150,8 @@ class ResourceTest(RestTest): # Check the history history = self.app.post_json( - "/v1/search/resource/" + self.resource_type, - headers={"Accept": "application/json; history=true"}, + "/v1/search/resource/" + self.resource_type + "?history=true", + headers={"Accept": "application/json"}, params={"=": {"id": result['id']}}, status=200) history = json.loads(history.text) @@ -1338,8 +1338,8 @@ class ResourceTest(RestTest): self.assertEqual(result, resources[0]) resources = self.app.post_json( - "/v1/search/resource/" + self.resource_type, - headers={"Accept": "application/json; history=true"}, + "/v1/search/resource/" + self.resource_type + "?history=true", + headers={"Accept": "application/json"}, params={"and": [ {"=": {"id": result['id']}}, {"or": [{">=": {"revision_end": '2014-01-03T02:02:02'}}, @@ -1492,16 +1492,6 @@ class ResourceTest(RestTest): b"Unable to parse `details': invalid truth value", result.body) - def test_list_resources_with_bad_details_in_accept(self): - result = self.app.get("/v1/resource/generic", - headers={ - "Accept": "application/json; details=foo", - }, - status=400) - self.assertIn( - b"Unable to parse `Accept header': invalid truth value", - result.body) - def _do_test_list_resources_with_detail(self, request): # NOTE(jd) So this test is a bit fuzzy right now as we uses the same # database for all tests and the tests are running concurrently, but @@ -1594,22 +1584,10 @@ class ResourceTest(RestTest): self._do_test_list_resources_with_detail( lambda: self.app.get("/v1/resource/generic?details=true")) - def test_list_resources_with_details_via_accept(self): - self._do_test_list_resources_with_detail( - lambda: self.app.get( - "/v1/resource/generic", - headers={"Accept": "application/json; details=true"})) - def test_search_resources_with_details(self): self._do_test_list_resources_with_detail( lambda: self.app.post("/v1/search/resource/generic?details=true")) - def test_search_resources_with_details_via_accept(self): - self._do_test_list_resources_with_detail( - lambda: self.app.post( - "/v1/search/resource/generic", - headers={"Accept": "application/json; details=true"})) - def test_get_res_named_metric_measure_aggregated_policies_invalid(self): result = self.app.post_json("/v1/metric", params={"archive_policy_name": "low"}) diff --git a/releasenotes/notes/remove-options-in-accept-header-7e5e074d8fccfb0f.yaml b/releasenotes/notes/remove-options-in-accept-header-7e5e074d8fccfb0f.yaml new file mode 100644 index 00000000..b8db5e95 --- /dev/null +++ b/releasenotes/notes/remove-options-in-accept-header-7e5e074d8fccfb0f.yaml @@ -0,0 +1,7 @@ +--- +upgrade: + - | + The API offered several features that accepted option via the the use of + the `Accept` header. This usage was not compatible with the RFC7231 and has + therefore been removed. This created compatibility problem with WebOb 1.8.0 + and above. diff --git a/setup.cfg b/setup.cfg index 7e95c9c7..9c06432c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,7 +42,7 @@ install_requires = werkzeug trollius; python_version < '3.4' tenacity>=4.6.0 - WebOb>=1.4.1,!=1.8.0 + WebOb>=1.4.1 Paste PasteDeploy monotonic diff --git a/tox.ini b/tox.ini index f7339dc4..e66a0cf7 100644 --- a/tox.ini +++ b/tox.ini @@ -150,6 +150,7 @@ deps = {[testenv:docs]deps} sphinxcontrib-versioning # for < 4.3 doc pbr + WebOb<1.8 # for <= 4.2 doc scipy # for <= 4.1 doc -- GitLab From 074e717575f313907d1e097228cb24db35061581 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 28 Mar 2018 12:44:35 +0200 Subject: [PATCH 1338/1483] tests: replace refresh_metrics() with trigger_processing in statsd The current tests can fail just because the timeout it not set to None. The base method trigger_processing is meant to be used just for that! --- gnocchi/tests/test_statsd.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index 2b4e8ea1..43c6ed50 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -73,7 +73,7 @@ class TestStatsd(tests_base.TestCase): metric = r.get_metric(metric_key) - self.chef.refresh_metrics([metric], sync=True) + self.trigger_processing([metric]) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ @@ -92,7 +92,7 @@ class TestStatsd(tests_base.TestCase): ("127.0.0.1", 12345)) self.stats.flush() - self.chef.refresh_metrics([metric], sync=True) + self.trigger_processing([metric]) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ @@ -124,7 +124,7 @@ class TestStatsd(tests_base.TestCase): metric = r.get_metric(metric_key) self.assertIsNotNone(metric) - self.chef.refresh_metrics([metric], sync=True) + self.trigger_processing([metric]) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ @@ -142,7 +142,7 @@ class TestStatsd(tests_base.TestCase): ("127.0.0.1", 12345)) self.stats.flush() - self.chef.refresh_metrics([metric], sync=True) + self.trigger_processing([metric]) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ -- GitLab From e60f37d411a23488696c52743f81472550d16ed8 Mon Sep 17 00:00:00 2001 From: gord chung Date: Sat, 7 Apr 2018 16:59:37 -0400 Subject: [PATCH 1339/1483] speed up iteration over ts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit i have no idea why but: In [1]: import numpy In [2]: dates = numpy.array([numpy.datetime64('2018-01-01') + numpy.timedelta64(1, 'D') * i for i in range(1000)]) In [3]: values = numpy.random.rand(1000) In [4]: arr = numpy.empty(len(values), dtype=[('dates', ' Date: Fri, 13 Apr 2018 09:03:26 +0200 Subject: [PATCH 1340/1483] tests: refresh metric instead of polling Some times the fake metricd thread is not scheduled on time, to reduce failing tests when this occurs, this changes use refresh=true API with available. --- .../tests/functional/gabbits/aggregation.yaml | 30 ++++--------------- gnocchi/tests/functional/gabbits/async.yaml | 8 +---- .../gabbits/metric-granularity.yaml | 5 +--- .../gabbits/resource-aggregation.yaml | 20 +++---------- .../tests/functional/gabbits/resource.yaml | 5 +--- .../functional/gabbits/transformedids.yaml | 5 +--- 6 files changed, 14 insertions(+), 59 deletions(-) diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index c613fb55..be952b9e 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -94,20 +94,14 @@ tests: - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - name: get measure aggregates by granularity - GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1 - poll: - count: 10 - delay: 1 + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&refresh=true response_json_paths: $: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - name: get measure aggregates by granularity with timestamps - GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 - poll: - count: 10 - delay: 1 + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00&refresh=true response_json_paths: $: - ['2015-03-06T14:30:00+00:00', 300.0, 15.05] @@ -122,10 +116,7 @@ tests: $.description: Aggregation method 'wtf' at granularity '1.0' for metric $HISTORY['get metric list'].$RESPONSE['$[0].id'] does not exist - name: get measure aggregates and reaggregate - GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&reaggregation=min - poll: - count: 10 - delay: 1 + GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&reaggregation=min&refresh=true response_json_paths: $: - ['2015-03-06T14:30:00+00:00', 300.0, 2.55] @@ -234,10 +225,7 @@ tests: - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - name: get measure aggregates by granularity from resources - POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1 - poll: - count: 10 - delay: 1 + POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&refresh=true response_json_paths: $: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] @@ -297,10 +285,7 @@ tests: - A granularity must be specified to resample - name: get measure aggregates by granularity with timestamps from resources - POST: /v1/aggregation/resource/generic/metric/agg_meter?start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 - poll: - count: 10 - delay: 1 + POST: /v1/aggregation/resource/generic/metric/agg_meter?start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00&refresh=true response_json_paths: $: - ['2015-03-06T14:30:00+00:00', 300.0, 15.05] @@ -321,10 +306,7 @@ tests: - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - name: get measure aggregates by granularity from resources and reaggregate - POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&reaggregation=min - poll: - count: 10 - delay: 1 + POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&reaggregation=min&refresh=true response_json_paths: $: - ['2015-03-06T14:33:57+00:00', 1.0, 3.1] diff --git a/gnocchi/tests/functional/gabbits/async.yaml b/gnocchi/tests/functional/gabbits/async.yaml index 64eb71ed..bc2ae3e8 100644 --- a/gnocchi/tests/functional/gabbits/async.yaml +++ b/gnocchi/tests/functional/gabbits/async.yaml @@ -48,14 +48,8 @@ tests: value: 12 status: 202 -# This requires a poll as the measures are not immediately -# aggregated. - - name: get some measures - GET: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures - poll: - count: 50 - delay: .1 + GET: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures?refresh=true response_strings: - "2015" response_json_paths: diff --git a/gnocchi/tests/functional/gabbits/metric-granularity.yaml b/gnocchi/tests/functional/gabbits/metric-granularity.yaml index 3cb3d1bb..79afba48 100644 --- a/gnocchi/tests/functional/gabbits/metric-granularity.yaml +++ b/gnocchi/tests/functional/gabbits/metric-granularity.yaml @@ -46,11 +46,8 @@ tests: - Aggregation method 'mean' at granularity '42.0' for metric $RESPONSE['$[0].id'] does not exist - name: get measurements granularity - GET: /v1/metric/$HISTORY['get metric list'].$RESPONSE['$[0].id']/measures?granularity=1 + GET: /v1/metric/$HISTORY['get metric list'].$RESPONSE['$[0].id']/measures?granularity=1&refresh=true status: 200 - poll: - count: 50 - delay: .1 response_json_paths: $: - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] diff --git a/gnocchi/tests/functional/gabbits/resource-aggregation.yaml b/gnocchi/tests/functional/gabbits/resource-aggregation.yaml index 644e82fe..2796cbc0 100644 --- a/gnocchi/tests/functional/gabbits/resource-aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/resource-aggregation.yaml @@ -43,7 +43,7 @@ tests: - name: get aggregation with no data desc: https://github.com/gnocchixyz/gnocchi/issues/69 - POST: /v1/aggregation/resource/generic/metric/cpu.util?stop=2012-03-06T00:00:00&fill=0&granularity=300&resample=3600 + POST: /v1/aggregation/resource/generic/metric/cpu.util?stop=2012-03-06T00:00:00&fill=0&granularity=300&resample=3600&refresh=true request_headers: x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 @@ -51,9 +51,6 @@ tests: data: =: id: 4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4 - poll: - count: 10 - delay: 1 response_json_paths: $: [] @@ -98,10 +95,7 @@ tests: status: 202 - name: aggregate metric with groupby on project_id with filter - POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&filter=user_id%3D%276c865dd0-7945-4e08-8b27-d0d7f1c2b667%27 - poll: - count: 10 - delay: 1 + POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&filter=user_id%3D%276c865dd0-7945-4e08-8b27-d0d7f1c2b667%27&refresh=true response_json_paths: $: - measures: @@ -118,13 +112,10 @@ tests: project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 - name: aggregate metric with groupby on project_id - POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id + POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&refresh=true data: =: user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - poll: - count: 10 - delay: 1 response_json_paths: $: - measures: @@ -150,13 +141,10 @@ tests: - Invalid groupby attribute - name: aggregate metric with groupby on project_id and user_id - POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=user_id + POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=user_id&refresh=true data: =: user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - poll: - count: 10 - delay: 1 response_json_paths: $: - measures: diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index 424ced1d..525d006f 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -510,10 +510,7 @@ tests: content-length: 0 - name: request cpuutil measures again - GET: $LAST_URL - poll: - count: 50 - delay: .1 + GET: $LAST_URL?refresh=true response_json_paths: $[0][0]: "2015-03-06T14:33:57+00:00" $[0][1]: 1.0 diff --git a/gnocchi/tests/functional/gabbits/transformedids.yaml b/gnocchi/tests/functional/gabbits/transformedids.yaml index 08a6238b..d5ae2893 100644 --- a/gnocchi/tests/functional/gabbits/transformedids.yaml +++ b/gnocchi/tests/functional/gabbits/transformedids.yaml @@ -141,10 +141,7 @@ tests: status: 202 - name: list two measures by external resource id - GET: $LAST_URL - poll: - count: 10 - delay: 1 + GET: $LAST_URL?refresh=true response_json_paths: $[0][2]: 43.1 $[1][2]: 12 -- GitLab From df38c791f5a8e9d823f823cf7d67b5d1b0931a46 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 13 Apr 2018 10:09:04 +0200 Subject: [PATCH 1341/1483] tests: Fix debug logging Even with tests debug disable, each test prints the configuration options. And when enable some logs are missing. This change changes the approach. Now by default, we sent log to a fakelogger. And we print the stdout/stderr/logs when it fails or when tests debugging is enabled. --- gnocchi/service.py | 8 +-- gnocchi/tests/base.py | 74 ++++++++++++++-------------- gnocchi/tests/functional/fixtures.py | 8 +-- gnocchi/tests/test_archive_policy.py | 6 ++- 4 files changed, 51 insertions(+), 45 deletions(-) diff --git a/gnocchi/service.py b/gnocchi/service.py index b33d724c..cdcc9ec0 100644 --- a/gnocchi/service.py +++ b/gnocchi/service.py @@ -31,7 +31,8 @@ LOG = daiquiri.getLogger(__name__) def prepare_service(args=None, conf=None, default_config_files=None, - log_to_std=False, logging_level=None): + log_to_std=False, logging_level=None, + skip_log_opts=False): if conf is None: conf = cfg.ConfigOpts() # FIXME(jd) Use the pkg_entry info to register the options of these libs @@ -100,7 +101,8 @@ def prepare_service(args=None, conf=None, conf.set_default("coordination_url", urlparse.urlunparse(parsed)) - LOG.info("Gnocchi version %s", gnocchi.__version__) - conf.log_opt_values(LOG, logging.DEBUG) + if not skip_log_opts: + LOG.info("Gnocchi version %s", gnocchi.__version__) + conf.log_opt_values(LOG, logging.DEBUG) return conf diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index d0214b2e..9a7e8396 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -202,6 +202,16 @@ class CaptureOutput(fixtures.Fixture): self.stderr = self.useFixture(self._stderr_fixture).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', self.stderr)) + self._logs_fixture = fixtures.StringStream('logs') + self.logs = self.useFixture(self._logs_fixture).stream + self.useFixture(fixtures.MonkeyPatch( + 'daiquiri.output.STDERR', daiquiri.output.Stream(self.logs))) + + @property + def output(self): + self.logs.seek(0) + return self.logs.read() + class BaseTestCase(testcase.TestCase): def setUp(self): @@ -268,36 +278,14 @@ class TestCase(BaseTestCase): ), } - @classmethod - def setUpClass(self): - super(TestCase, self).setUpClass() + def setUp(self): + super(TestCase, self).setUp() self.conf = service.prepare_service( [], conf=utils.prepare_conf(), default_config_files=[], - logging_level=logging.DEBUG) - - if not os.getenv("GNOCCHI_TEST_DEBUG"): - daiquiri.setup(outputs=[]) - - py_root = os.path.abspath(os.path.join(os.path.dirname(__file__), - '..',)) - self.conf.set_override('paste_config', - os.path.join(py_root, 'rest', 'api-paste.ini'), - group="api") - self.conf.set_override('policy_file', - os.path.join(py_root, 'rest', 'policy.json'), - group="oslo_policy") - - # NOTE(jd) This allows to test S3 on AWS - if not os.getenv("AWS_ACCESS_KEY_ID"): - self.conf.set_override('s3_endpoint_url', - os.getenv("GNOCCHI_STORAGE_HTTP_URL"), - group="storage") - self.conf.set_override('s3_access_key_id', "gnocchi", - group="storage") - self.conf.set_override('s3_secret_access_key', "anythingworks", - group="storage") + logging_level=logging.DEBUG, + skip_log_opts=True) self.index = indexer.get_driver(self.conf) @@ -321,15 +309,28 @@ class TestCase(BaseTestCase): except indexer.ArchivePolicyAlreadyExists: pass + py_root = os.path.abspath(os.path.join(os.path.dirname(__file__), + '..',)) + self.conf.set_override('paste_config', + os.path.join(py_root, 'rest', 'api-paste.ini'), + group="api") + self.conf.set_override('policy_file', + os.path.join(py_root, 'rest', 'policy.json'), + group="oslo_policy") + + # NOTE(jd) This allows to test S3 on AWS + if not os.getenv("AWS_ACCESS_KEY_ID"): + self.conf.set_override('s3_endpoint_url', + os.getenv("GNOCCHI_STORAGE_HTTP_URL"), + group="storage") + self.conf.set_override('s3_access_key_id', "gnocchi", + group="storage") + self.conf.set_override('s3_secret_access_key', "anythingworks", + group="storage") + storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file") self.conf.set_override('driver', storage_driver, 'storage') - if storage_driver == 'ceph': - self.conf.set_override('ceph_conffile', - os.getenv("CEPH_CONF"), - 'storage') - def setUp(self): - super(TestCase, self).setUp() if swexc: self.useFixture(fixtures.MockPatch( 'swiftclient.client.Connection', @@ -341,6 +342,9 @@ class TestCase(BaseTestCase): tempdir.path, 'storage') elif self.conf.storage.driver == 'ceph': + self.conf.set_override('ceph_conffile', + os.getenv("CEPH_CONF"), + 'storage') pool_name = uuid.uuid4().hex with open(os.devnull, 'w') as f: subprocess.call("rados -c %s mkpool %s" % ( @@ -372,13 +376,9 @@ class TestCase(BaseTestCase): def tearDown(self): self.index.disconnect() + self.coord.stop() super(TestCase, self).tearDown() - @classmethod - def tearDownClass(cls): - cls.coord.stop() - super(TestCase, cls).tearDownClass() - def _create_metric(self, archive_policy_name="low"): """Create a metric and return it""" m = indexer.Metric(uuid.uuid4(), diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 56ac32a3..6bcdf9db 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -16,6 +16,7 @@ from __future__ import absolute_import +import logging import os import shutil import subprocess @@ -26,7 +27,6 @@ from unittest import case import uuid import warnings -import daiquiri import fixtures from gabbi import fixture import numpy @@ -101,9 +101,9 @@ class ConfigFixture(fixture.GabbiFixture): else: dcf = [] conf = service.prepare_service([], conf=utils.prepare_conf(), - default_config_files=dcf) - if not os.getenv("GNOCCHI_TEST_DEBUG"): - daiquiri.setup(outputs=[]) + default_config_files=dcf, + logging_level=logging.DEBUG, + skip_log_opts=True) py_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',)) diff --git a/gnocchi/tests/test_archive_policy.py b/gnocchi/tests/test_archive_policy.py index 6ec9d71e..1fc95c88 100644 --- a/gnocchi/tests/test_archive_policy.py +++ b/gnocchi/tests/test_archive_policy.py @@ -11,6 +11,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import logging + import numpy from gnocchi import archive_policy @@ -30,7 +32,9 @@ class TestArchivePolicy(base.BaseTestCase): def test_aggregation_methods(self): conf = service.prepare_service([], - default_config_files=[]) + default_config_files=[], + logging_level=logging.DEBUG, + skip_log_opts=True) ap = archive_policy.ArchivePolicy("foobar", 0, -- GitLab From 66bd9718ddd8dcdbeb601573236bffbee2798957 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 13 Apr 2018 09:40:04 +0200 Subject: [PATCH 1342/1483] incoming: provide group_metrics_by_sack method This can be leveraged by the Chef to group the metrics by sack as it wishes. --- gnocchi/chef.py | 38 +++++++++++++----------------------- gnocchi/incoming/__init__.py | 17 ++++++++++++++++ 2 files changed, 31 insertions(+), 24 deletions(-) diff --git a/gnocchi/chef.py b/gnocchi/chef.py index badceea5..f90c29af 100644 --- a/gnocchi/chef.py +++ b/gnocchi/chef.py @@ -15,8 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import hashlib -import itertools -import operator import daiquiri import six @@ -24,8 +22,6 @@ import six from gnocchi import indexer -ITEMGETTER_1 = operator.itemgetter(1) - LOG = daiquiri.getLogger(__name__) @@ -60,13 +56,10 @@ class Chef(object): on error :type sync: bool """ - # FIXME(jd) The indexer could return them sorted/grouped by directly - metrics_to_expunge = sorted( - ((m, self.incoming.sack_for_metric(m.id)) - for m in self.index.list_metrics(status='delete')), - key=ITEMGETTER_1) - for sack, metrics in itertools.groupby( - metrics_to_expunge, key=ITEMGETTER_1): + metrics_to_expunge = self.index.list_metrics(status='delete') + metrics_by_id = {m.id: m for m in metrics_to_expunge} + for sack, metric_ids in self.incoming.group_metrics_by_sack( + metrics_by_id.keys()): try: lock = self.get_sack_lock(sack) if not lock.acquire(blocking=sync): @@ -84,7 +77,8 @@ class Chef(object): LOG.error("Unable to lock sack %s for expunging metrics", sack, exc_info=True) else: - for metric, sack in metrics: + for metric_id in metric_ids: + metric = metrics_by_id[metric_id] LOG.debug("Deleting metric %s", metric) try: self.incoming.delete_unprocessed_measures_for_metric( @@ -112,29 +106,25 @@ class Chef(object): # process only active metrics. deleted metrics with unprocessed # measures will be skipped until cleaned by janitor. metrics_by_id = {m.id: m for m in metrics} - metrics_to_refresh = sorted( - ((metric, self.incoming.sack_for_metric(metric.id)) - for metric in metrics), - key=ITEMGETTER_1) - for sack, metric_and_sack in itertools.groupby( - metrics_to_refresh, ITEMGETTER_1): + for sack, metric_ids in self.incoming.group_metrics_by_sack( + metrics_by_id.keys()): lock = self.get_sack_lock(sack) # FIXME(jd) timeout should be global for all sack locking if not lock.acquire(blocking=timeout): raise SackAlreadyLocked(sack) - metrics = [m[0].id for m in metric_and_sack] try: - LOG.debug("Processing measures for %d metrics", len(metrics)) + LOG.debug("Processing measures for %d metrics", + len(metric_ids)) with self.incoming.process_measure_for_metrics( - metrics) as metrics_and_measures: + metric_ids) as metrics_and_measures: if metrics_and_measures: self.storage.add_measures_to_metrics({ - metrics_by_id[metric]: measures - for metric, measures + metrics_by_id[metric_id]: measures + for metric_id, measures in six.iteritems(metrics_and_measures) }) LOG.debug("Measures for %d metrics processed", - len(metrics)) + len(metric_ids)) except Exception: if sync: raise diff --git a/gnocchi/incoming/__init__.py b/gnocchi/incoming/__init__.py index f716adfa..51b72d10 100644 --- a/gnocchi/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -16,6 +16,7 @@ # under the License. import collections import functools +import itertools import operator import daiquiri @@ -32,6 +33,9 @@ LOG = daiquiri.getLogger(__name__) Measure = collections.namedtuple("Measure", ['timestamp', 'value']) +ITEMGETTER_1 = operator.itemgetter(1) + + class ReportGenerationError(Exception): pass @@ -156,6 +160,19 @@ class IncomingDriver(object): return numpy.array(list(measures), dtype=TIMESERIES_ARRAY_DTYPE).tobytes() + def group_metrics_by_sack(self, metrics): + """Iterate on a list of metrics, grouping them by sack. + + :param metrics: A list of metric uuid. + :return: An iterator yield (group, metrics). + """ + metrics_and_sacks = sorted( + ((m, self.sack_for_metric(m)) for m in metrics), + key=ITEMGETTER_1) + for sack, metrics in itertools.groupby(metrics_and_sacks, + key=ITEMGETTER_1): + yield sack, [m[0] for m in metrics] + def add_measures(self, metric_id, measures): """Add a measure to a metric. -- GitLab From d4a5bfd9e1466b748031144488049bf1df231d22 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 16 Apr 2018 13:48:02 +0200 Subject: [PATCH 1343/1483] metricd: fix typo in update_capabilities Fixes #856 --- gnocchi/cli/metricd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index 9344d523..a8ada8db 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -243,7 +243,7 @@ class MetricProcessor(MetricProcessBase): exc_info=True) LOG.debug("%d metrics processed from %d sacks", m_count, s_count) # Update statistics - self.coord.update_capabitilities(self.GROUP_ID, self.statistics) + self.coord.update_capabilities(self.GROUP_ID, self.store.statistics) if sacks == self._get_sacks_to_process(): # We just did a full scan of all sacks, reset the timer self._last_full_sack_scan.reset() -- GitLab From 7fbdcbff643e02275e05d028decf3e3ac1e79f0e Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 13 Mar 2018 16:57:10 +0100 Subject: [PATCH 1344/1483] Add gnocchi-injector This allows to inject measures and maybe process them in a single tool. This is useful to generate load to test metricd and also profile the processing code. --- gnocchi/cli/injector.py | 121 ++++++++++++++++++ gnocchi/tests/test_injector.py | 30 +++++ .../notes/injector-af9e68fdfe02d322.yaml | 6 + setup.cfg | 1 + 4 files changed, 158 insertions(+) create mode 100644 gnocchi/cli/injector.py create mode 100644 gnocchi/tests/test_injector.py create mode 100644 releasenotes/notes/injector-af9e68fdfe02d322.yaml diff --git a/gnocchi/cli/injector.py b/gnocchi/cli/injector.py new file mode 100644 index 00000000..1de2bf93 --- /dev/null +++ b/gnocchi/cli/injector.py @@ -0,0 +1,121 @@ +# -*- encoding: utf-8 -*- +# +# Copyright (c) 2018 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import random +import time +import uuid + +import daiquiri +import numpy +from oslo_config import cfg + +from gnocchi import chef +from gnocchi.cli import metricd +from gnocchi import incoming +from gnocchi import indexer +from gnocchi import service +from gnocchi import storage +from gnocchi import utils + +LOG = daiquiri.getLogger(__name__) + + +def injector(): + conf = cfg.ConfigOpts() + conf.register_cli_opts([ + cfg.IntOpt("--measures", + help="Measures per metric."), + cfg.IntOpt("--metrics", + help="Number of metrics to create."), + cfg.IntOpt("--archive-policy-name", + help="Name of archive policy to use.", + default="low"), + cfg.IntOpt("--interval", + help="Interval to sleep between metrics sending."), + cfg.BoolOpt("--process", default=False, + help="Process the ingested measures."), + ]) + return _inject(service.prepare_service(conf=conf, log_to_std=True), + metrics=conf.metrics, + measures=conf.measures, + archive_policy_name=conf.archive_policy_name, + process=conf.process, + interval=conf.interval) + + +def _inject_from_conf(conf, + metrics, measures, archive_policy_name="low", + process=False, interval=None): + inc = incoming.get_driver(conf) + coord = metricd.get_coordinator_and_start(str(uuid.uuid4()), + conf.coordination_url) + store = storage.get_driver(conf) + idx = indexer.get_driver(conf) + return _inject(inc, coord, store, idx, + metrics, measures, archive_policy_name, process, interval) + + +def _inject(inc, coord, store, idx, + metrics, measures, archive_policy_name="low", process=False, + interval=None): + LOG.info("Creating %d metrics", metrics) + with utils.StopWatch() as sw: + metrics = [ + idx.create_metric(uuid.uuid4(), "admin", + archive_policy_name).id + for _ in range(metrics) + ] + LOG.info("Created %d metrics in %.2fs", metrics, sw.elapsed()) + + LOG.info("Generating %d measures per metric for %d metrics… ", + measures, metrics) + now = numpy.datetime64(utils.utcnow()) + with utils.StopWatch() as sw: + measures = { + m_id: [incoming.Measure( + now + numpy.timedelta64(seconds=s), + random.randint(-999999, 999999)) for s in range(measures)] + for m_id in metrics + } + LOG.info("… done in %.2fs", sw.elapsed()) + + interval_timer = utils.StopWatch().start() + + while True: + interval_timer.reset() + with utils.StopWatch() as sw: + inc.add_measures_batch(measures) + total_measures = sum(map(len, measures.values())) + LOG.info("Pushed %d measures in %.2fs", + total_measures, + sw.elapsed()) + + if process: + c = chef.Chef(coord, inc, idx, store) + + with utils.StopWatch() as sw: + for s in inc.iter_sacks(): + c.process_new_measures_for_sack(s, blocking=True) + LOG.info("Processed %d sacks in %.2fs", + inc.NUM_SACKS, sw.elapsed()) + LOG.info("Speed: %.2f measures/s", + float(total_measures) / sw.elapsed()) + + if interval is None: + break + time.sleep(max(0, interval - interval_timer.elapsed())) + + return total_measures diff --git a/gnocchi/tests/test_injector.py b/gnocchi/tests/test_injector.py new file mode 100644 index 00000000..b01b7fdd --- /dev/null +++ b/gnocchi/tests/test_injector.py @@ -0,0 +1,30 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2018 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gnocchi.cli import injector +from gnocchi.tests import base + + +class InjectorTestCase(base.TestCase): + def test_inject(self): + self.assertEqual(100, injector._inject( + self.incoming, self.coord, self.storage, self.index, + measures=10, metrics=10)) + + def test_inject_process(self): + self.assertEqual(100, injector._inject( + self.incoming, self.coord, self.storage, self.index, + measures=10, metrics=10, process=True)) diff --git a/releasenotes/notes/injector-af9e68fdfe02d322.yaml b/releasenotes/notes/injector-af9e68fdfe02d322.yaml new file mode 100644 index 00000000..86e520d6 --- /dev/null +++ b/releasenotes/notes/injector-af9e68fdfe02d322.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + The `gnocchi-injector` tool has been added. It allows to inject random + measures to a configured number of metrics in order to generate load for + `metricd`. diff --git a/setup.cfg b/setup.cfg index 9c06432c..a0464674 100644 --- a/setup.cfg +++ b/setup.cfg @@ -145,6 +145,7 @@ console_scripts = gnocchi-change-sack-size = gnocchi.cli.manage:change_sack_size gnocchi-statsd = gnocchi.cli.statsd:statsd gnocchi-metricd = gnocchi.cli.metricd:metricd + gnocchi-injector = gnocchi.cli.injector:injector oslo.config.opts = gnocchi = gnocchi.opts:list_opts -- GitLab From f65e745dc86d2304c864c7a57af1e3b629b95c15 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 23 Apr 2018 11:05:43 +0200 Subject: [PATCH 1345/1483] swift: pass the project domain We miss to pass the project domain to swiftclient. This change does it. --- gnocchi/common/swift.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gnocchi/common/swift.py b/gnocchi/common/swift.py index ef57acdb..f961ba44 100644 --- a/gnocchi/common/swift.py +++ b/gnocchi/common/swift.py @@ -34,6 +34,7 @@ def get_connection(conf): 'endpoint_type': conf.swift_endpoint_type, 'service_type': conf.swift_service_type, 'user_domain_name': conf.swift_user_domain_name, + 'project_domain_name': conf.swift_project_domain_name, } if conf.swift_region: os_options['region_name'] = conf.swift_region -- GitLab From bfcfd36ea98736ca7cb1d2a635fba092eb7c2499 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 24 Apr 2018 13:20:27 +0200 Subject: [PATCH 1346/1483] indexer: drop/create missing constraint in slash reencoding migration script The current script cannot be executed if a resource has an history entry: ERROR gnocchi oslo_db.exception.DBReferenceError: (psycopg2.IntegrityError) update or delete on table "resource" violates foreign key constraint "fk_rh_id_resource_id" on table "resource_history" DEBUG [pifpaf.drivers] gnocchi-upgrade[71041] output: 2018-04-24 09:36:08.029 71041 ERROR gnocchi DETAIL: Key (id)=(e8bce9ff-5c30-524c-87b8-bfb1dce7855b) is still referenced from table "resource_history". DEBUG [pifpaf.drivers] gnocchi-upgrade[71041] output: 2018-04-24 09:36:08.029 71041 ERROR gnocchi [SQL: 'UPDATE resource SET id=%(param_1)s, original_resource_id=%(original_resource_id)s WHERE resource.id = %(id_1)s'] [parameters: {'param_1': 'bd6eac67-c1e9-5da0-9979-a797d776039e', 'original_resource_id': 'historized_resource', 'id_1': UUID('e8bce9ff-5c30-524c-87b8-bfb1dce7855b')}] (Background on this error at: http://sqlalche.me/e/gkpj) This drops and recreates the constraint at the end of the migration. --- .../versions/397987e38570_no_more_slash_and_reencode.py | 5 +++++ run-upgrade-tests.sh | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py b/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py index 80b9416e..a671fc1d 100644 --- a/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py +++ b/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py @@ -87,6 +87,8 @@ def upgrade(): if rt.tablename != "generic" ) + op.drop_constraint("fk_rh_id_resource_id", "resource_history", + type_="foreignkey") op.drop_constraint("fk_metric_resource_id_resource_id", "metric", type_="foreignkey") for name, table in resource_type_tablenames.items(): @@ -172,6 +174,9 @@ def upgrade(): "metric", "resource", ("resource_id",), ("id",), ondelete="SET NULL") + op.create_foreign_key("fk_rh_id_resource_id", + "resource_history", "resource", + ("id",), ("id",), ondelete="CASCADE") for metric in connection.execute(metric_table.select().where( metric_table.c.name.like("%/%"))): diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index def061bf..bb0e405a 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -33,6 +33,11 @@ inject_data() { gnocchi resource create generic --attribute id:$resource_id -n metric:high > /dev/null done + # Create a resource with an history + gnocchi resource-type create ext --attribute someattr:string:false:max_length=32 > /dev/null + gnocchi resource create --type ext --attribute someattr:foobar -n metric:high historized_resource > /dev/null + gnocchi resource update --type ext --attribute someattr:foobaz historized_resource > /dev/null + { measures_sep="" MEASURES=$(python -c 'import datetime, random, json; now = datetime.datetime.utcnow(); print(json.dumps([{"timestamp": (now - datetime.timedelta(seconds=i)).isoformat(), "value": random.uniform(-100000, 100000)} for i in range(0, 288000, 10)]))') -- GitLab From 76cf9e396017a68e95143244593dc006e341833d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 7 Mar 2018 14:31:57 +0100 Subject: [PATCH 1347/1483] storage: allow to batch splits retrieval for multiple metrics It also fixes the parallel_map call to be unique, making _get_splits a little bit faster for unbatched drivers. --- gnocchi/storage/__init__.py | 118 +++++++----- gnocchi/storage/ceph.py | 2 +- gnocchi/storage/file.py | 2 +- gnocchi/storage/redis.py | 36 +++- gnocchi/storage/s3.py | 2 +- gnocchi/storage/swift.py | 2 +- gnocchi/tests/test_storage.py | 349 +++++++++++++++++++--------------- 7 files changed, 294 insertions(+), 217 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 9c215212..6f29234b 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -128,14 +128,22 @@ class StorageDriver(object): def upgrade(): pass - def _get_measures(self, metric, keys_and_aggregations, version=3): - return utils.parallel_map( - self._get_measures_unbatched, - ((metric, key, aggregation, version) - for key, aggregation in keys_and_aggregations)) + def _get_splits(self, metrics_aggregations_keys, version=3): + results = collections.defaultdict( + lambda: collections.defaultdict(list)) + for metric, aggregation, split in utils.parallel_map( + lambda m, k, a, v: (m, a, self._get_splits_unbatched(m, k, a, v)), # noqa + ((metric, key, aggregation, version) + for metric, aggregations_and_keys + in six.iteritems(metrics_aggregations_keys) + for aggregation, keys + in six.iteritems(aggregations_and_keys) + for key in keys)): + results[metric][aggregation].append(split) + return results @staticmethod - def _get_measures_unbatched(metric, timestamp_key, aggregation, version=3): + def _get_splits_unbatched(metric, timestamp_key, aggregation, version=3): raise NotImplementedError @staticmethod @@ -293,30 +301,34 @@ class StorageDriver(object): ATTRGETTER_METHOD) } - def _get_splits_and_unserialize(self, metric, keys_and_aggregations): + def _get_splits_and_unserialize(self, metrics_aggregations_keys): """Get splits and unserialize them - :param metric: The metric to retrieve. - :param keys_and_aggregations: A list of tuple (SplitKey, Aggregation) - to retrieve. - :return: A list of AggregatedTimeSerie. + :param metrics_aggregations_keys: A dict where keys are + `storage.Metric` and values are dict + of {Aggregation: [SplitKey]} to + retrieve. + :return: A dict where keys are `storage.Metric` and values are dict + {aggregation: [`carbonara.AggregatedTimeSerie`]}. """ - if not keys_and_aggregations: - return [] - raw_measures = self._get_measures(metric, keys_and_aggregations) - results = [] - for (key, aggregation), raw in six.moves.zip( - keys_and_aggregations, raw_measures): - try: - ts = carbonara.AggregatedTimeSerie.unserialize( - raw, key, aggregation) - except carbonara.InvalidData: - LOG.error("Data corruption detected for %s " - "aggregated `%s' timeserie, granularity `%s' " - "around time `%s', ignoring.", - metric.id, aggregation.method, key.sampling, key) - ts = carbonara.AggregatedTimeSerie(aggregation) - results.append(ts) + raw_measures = self._get_splits(metrics_aggregations_keys) + results = collections.defaultdict( + lambda: collections.defaultdict(list)) + for metric, aggregations_and_raws in six.iteritems(raw_measures): + for aggregation, raws in six.iteritems(aggregations_and_raws): + for key, raw in six.moves.zip( + metrics_aggregations_keys[metric][aggregation], raws): + try: + ts = carbonara.AggregatedTimeSerie.unserialize( + raw, key, aggregation) + except carbonara.InvalidData: + LOG.error("Data corruption detected for %s " + "aggregated `%s' timeserie, granularity " + "`%s' around time `%s', ignoring.", + metric.id, aggregation.method, key.sampling, + key) + ts = carbonara.AggregatedTimeSerie(aggregation) + results[metric][aggregation].append(ts) return results def _get_measures_timeserie(self, metric, aggregation, keys, @@ -329,14 +341,15 @@ class StorageDriver(object): to_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( to_timestamp, aggregation.granularity) - keys_and_aggregations = [ - (key, aggregation) for key in sorted(keys) + keys = [ + key for key in sorted(keys) if ((not from_timestamp or key >= from_timestamp) and (not to_timestamp or key <= to_timestamp)) ] timeseries = self._get_splits_and_unserialize( - metric, keys_and_aggregations) + {metric: {aggregation: keys}} + )[metric][aggregation] ts = carbonara.AggregatedTimeSerie.from_timeseries( timeseries, aggregation) @@ -365,35 +378,37 @@ class StorageDriver(object): oldest_mutable_timestamp) """ metrics_splits_to_store = {} + keys_to_get = collections.defaultdict( + lambda: collections.defaultdict(list)) + splits_to_rewrite = collections.defaultdict( + lambda: collections.defaultdict(list)) for metric, (keys_and_aggregations_and_splits, oldest_mutable_timestamp) in six.iteritems( metrics_keys_aggregations_splits): - keys_to_rewrite = [] - splits_to_rewrite = [] for (key, aggregation), split in six.iteritems( keys_and_aggregations_and_splits): # NOTE(jd) We write the full split only if the driver works # that way (self.WRITE_FULL) or if the oldest_mutable_timestamp # is out of range. - write_full = ( - self.WRITE_FULL or next(key) <= oldest_mutable_timestamp - ) - if write_full: - keys_to_rewrite.append(key) - splits_to_rewrite.append(split) - - # Update the splits that were passed as argument with the data - # already stored in the case that we need to rewrite them fully. - # First, fetch all those existing splits. - existing_data = self._get_splits_and_unserialize( - metric, [(key, split.aggregation) - for key, split - in six.moves.zip(keys_to_rewrite, splits_to_rewrite)]) - - for key, split, existing in six.moves.zip( - keys_to_rewrite, splits_to_rewrite, existing_data): - if existing: + if self.WRITE_FULL or next(key) <= oldest_mutable_timestamp: + # Update the splits that were passed as argument with the + # data already stored in the case that we need to rewrite + # them fully. First, fetch all those existing splits. + keys_to_get[metric][aggregation].append(key) + splits_to_rewrite[metric][aggregation].append(split) + + existing_data = self._get_splits_and_unserialize(keys_to_get) + + for metric, (keys_and_aggregations_and_splits, + oldest_mutable_timestamp) in six.iteritems( + metrics_keys_aggregations_splits): + for aggregation, existing_list in six.iteritems( + existing_data[metric]): + for key, split, existing in six.moves.zip( + keys_to_get[metric][aggregation], + splits_to_rewrite[metric][aggregation], + existing_list): existing.merge(split) keys_and_aggregations_and_splits[ (key, split.aggregation)] = existing @@ -404,7 +419,8 @@ class StorageDriver(object): # Do not store the split if it's empty. if split: offset, data = split.serialize( - key, compressed=key in keys_to_rewrite) + key, + compressed=key in keys_to_get[metric][aggregation]) keys_aggregations_data_offset.append( (key, split.aggregation, data, offset)) metrics_splits_to_store[metric] = keys_aggregations_data_offset diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 5b2ab6bb..6f682f85 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -150,7 +150,7 @@ class CephStorage(storage.StorageDriver): # It's possible that the object does not exists pass - def _get_measures_unbatched(self, metric, key, aggregation, version=3): + def _get_splits_unbatched(self, metric, key, aggregation, version=3): try: name = self._get_object_name( metric, key, aggregation.method, version) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 010d851b..56929a1a 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -180,7 +180,7 @@ class FileStorage(storage.StorageDriver): # measures) raise - def _get_measures_unbatched(self, metric, key, aggregation, version=3): + def _get_splits_unbatched(self, metric, key, aggregation, version=3): path = self._build_metric_path_for_split( metric, aggregation.method, key, version) try: diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 569eb94c..4f691beb 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -13,6 +13,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import collections + import six from gnocchi import carbonara @@ -151,10 +153,30 @@ return ids def _delete_metric(self, metric): self._client.delete(self._metric_key(metric)) - def _get_measures(self, metric, keys_and_aggregations, version=3): - if not keys_and_aggregations: - return [] - return self._client.hmget( - self._metric_key(metric), - [self._aggregated_field_for_split(aggregation.method, key, version) - for key, aggregation in keys_and_aggregations]) + def _get_splits(self, metrics_aggregations_keys, version=3): + # Use a list of metric and aggregations with a constant sorting + metrics_aggregations = [ + (metric, aggregation) + for metric, aggregation_and_keys in six.iteritems( + metrics_aggregations_keys) + for aggregation, keys in six.iteritems(aggregation_and_keys) + # Do not send any fetch request if keys is empty + if keys + ] + + pipe = self._client.pipeline(transaction=False) + for metric, aggregation in metrics_aggregations: + pipe.hmget( + self._metric_key(metric), + [self._aggregated_field_for_split(aggregation.method, + key, version) + for key in metrics_aggregations_keys[metric][aggregation]]) + + results = collections.defaultdict( + lambda: collections.defaultdict(list)) + + for (metric, aggregation), result in six.moves.zip( + metrics_aggregations, pipe.execute()): + results[metric][aggregation] = result + + return results diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index 29134d63..c5a29ae7 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -156,7 +156,7 @@ class S3Storage(storage.StorageDriver): s3.bulk_delete(self.s3, bucket, [c['Key'] for c in response.get('Contents', ())]) - def _get_measures_unbatched(self, metric, key, aggregation, version=3): + def _get_splits_unbatched(self, metric, key, aggregation, version=3): try: response = self.s3.get_object( Bucket=self._bucket_name, diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 287173dc..c1a1ff65 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -147,7 +147,7 @@ class SwiftStorage(storage.StorageDriver): # Deleted in the meantime? Whatever. raise - def _get_measures_unbatched(self, metric, key, aggregation, version=3): + def _get_splits_unbatched(self, metric, key, aggregation, version=3): try: headers, contents = self.swift.get_object( self._container_name(metric), self._object_name( diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index ff7b0b35..f60594df 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -75,14 +75,15 @@ class TestStorageDriver(tests_base.TestCase): with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize', side_effect=carbonara.InvalidData()): - results = self.storage._get_splits_and_unserialize( - self.metric, - [ - (carbonara.SplitKey( - numpy.datetime64(1387800000, 's'), - numpy.timedelta64(5, 'm')), - aggregation) - ]) + results = self.storage._get_splits_and_unserialize({ + self.metric: { + aggregation: [ + carbonara.SplitKey( + numpy.datetime64(1387800000, 's'), + numpy.timedelta64(5, 'm')) + ], + }, + })[self.metric][aggregation] self.assertEqual(1, len(results)) self.assertIsInstance(results[0], carbonara.AggregatedTimeSerie) # Assert it's an empty one since corrupted @@ -98,14 +99,15 @@ class TestStorageDriver(tests_base.TestCase): aggregation = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(5, 'm')) - results = self.storage._get_splits_and_unserialize( - self.metric, - [ - (carbonara.SplitKey( - numpy.datetime64(1387800000, 's'), - numpy.timedelta64(5, 'm')), - aggregation) - ]) + results = self.storage._get_splits_and_unserialize({ + self.metric: { + aggregation: [ + carbonara.SplitKey( + numpy.datetime64(1387800000, 's'), + numpy.timedelta64(5, 'm')), + ], + }, + })[self.metric][aggregation] self.assertEqual(1, len(results)) self.assertIsInstance(results[0], carbonara.AggregatedTimeSerie) # Assert it's not empty one since corrupted @@ -365,28 +367,39 @@ class TestStorageDriver(tests_base.TestCase): aggregation = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(5, 'm')) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451520000, 's'), - numpy.timedelta64(5, 'm'), - ), aggregation)]) + data = self.storage._get_splits({ + self.metric: { + aggregation: [ + carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(5, 'm'), + )]}}) + self.assertEqual(1, len(data)) + data = data[self.metric] + self.assertEqual(1, len(data)) + data = data[aggregation] self.assertEqual(1, len(data)) self.assertIsInstance(data[0], bytes) self.assertGreater(len(data[0]), 0) existing = data[0] # Now retrieve an existing and a non-existing key - data = self.storage._get_measures( - self.metric, [ - (carbonara.SplitKey( - numpy.datetime64(1451520000, 's'), - numpy.timedelta64(5, 'm'), - ), aggregation), - (carbonara.SplitKey( - numpy.datetime64(1451520010, 's'), - numpy.timedelta64(5, 'm'), - ), aggregation), - ]) + data = self.storage._get_splits({ + self.metric: { + aggregation: [ + carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(5, 'm'), + ), + carbonara.SplitKey( + numpy.datetime64(1451520010, 's'), + numpy.timedelta64(5, 'm'), + ), + ]}}) + self.assertEqual(1, len(data)) + data = data[self.metric] + self.assertEqual(1, len(data)) + data = data[aggregation] self.assertEqual(2, len(data)) self.assertIsInstance(data[0], bytes) self.assertGreater(len(data[0]), 0) @@ -394,17 +407,22 @@ class TestStorageDriver(tests_base.TestCase): self.assertIsNone(data[1]) # Now retrieve a non-existing and an existing key - data = self.storage._get_measures( - self.metric, [ - (carbonara.SplitKey( - numpy.datetime64(155152000, 's'), - numpy.timedelta64(5, 'm'), - ), aggregation), - (carbonara.SplitKey( - numpy.datetime64(1451520000, 's'), - numpy.timedelta64(5, 'm'), - ), aggregation), - ]) + data = self.storage._get_splits({ + self.metric: { + aggregation: [ + carbonara.SplitKey( + numpy.datetime64(155152000, 's'), + numpy.timedelta64(5, 'm'), + ), + carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(5, 'm'), + ) + ]}}) + self.assertEqual(1, len(data)) + data = data[self.metric] + self.assertEqual(1, len(data)) + data = data[aggregation] self.assertEqual(2, len(data)) self.assertIsInstance(data[1], bytes) self.assertGreater(len(data[1]), 0) @@ -413,18 +431,19 @@ class TestStorageDriver(tests_base.TestCase): m2, _ = self._create_metric() # Now retrieve a non-existing (= no aggregated measures) metric - data = self.storage._get_measures( - m2, [ - (carbonara.SplitKey( - numpy.datetime64(1451520010, 's'), - numpy.timedelta64(5, 'm'), - ), aggregation), - (carbonara.SplitKey( - numpy.datetime64(1451520000, 's'), - numpy.timedelta64(5, 'm'), - ), aggregation), - ]) - self.assertEqual([None, None], data) + data = self.storage._get_splits({ + m2: { + aggregation: [ + carbonara.SplitKey( + numpy.datetime64(1451520010, 's'), + numpy.timedelta64(5, 'm'), + ), + carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(5, 'm'), + ) + ]}}) + self.assertEqual({m2: {aggregation: [None, None]}}, data) def test_rewrite_measures(self): # Create an archive policy that spans on several splits. Each split @@ -466,23 +485,26 @@ class TestStorageDriver(tests_base.TestCase): aggregation = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(1, 'm')) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451520000, 's'), - numpy.timedelta64(1, 'm'), - ), aggregation)])[0] + data = self.storage._get_splits({ + self.metric: { + aggregation: [carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm'), + )]}})[self.metric][aggregation][0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451736000, 's'), - numpy.timedelta64(60, 's'), - ), aggregation)])[0] + data = self.storage._get_splits({ + self.metric: { + aggregation: [carbonara.SplitKey( + numpy.datetime64(1451736000, 's'), + numpy.timedelta64(60, 's'), + )]}})[self.metric][aggregation][0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451952000, 's'), - numpy.timedelta64(60, 's'), - ), aggregation)])[0] + data = self.storage._get_splits({ + self.metric: { + aggregation: [carbonara.SplitKey( + numpy.datetime64(1451952000, 's'), + numpy.timedelta64(60, 's'), + )]}})[self.metric][aggregation][0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -517,30 +539,35 @@ class TestStorageDriver(tests_base.TestCase): numpy.timedelta64(1, 'm')), }, }, self.storage._list_split_keys(self.metric, [agg])) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451520000, 's'), - numpy.timedelta64(60, 's'), - ), aggregation)])[0] + data = self.storage._get_splits({ + self.metric: { + aggregation: [carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(60, 's'), + )]}})[self.metric][aggregation][0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451736000, 's'), - numpy.timedelta64(60, 's'), - ), aggregation)])[0] + data = self.storage._get_splits({ + self.metric: { + aggregation: [carbonara.SplitKey( + numpy.datetime64(1451736000, 's'), + numpy.timedelta64(60, 's'), + )]}})[self.metric][aggregation][0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451952000, 's'), - numpy.timedelta64(1, 'm'), - ), aggregation)])[0] + data = self.storage._get_splits({ + self.metric: { + aggregation: [carbonara.SplitKey( + numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm'), + )]}})[self.metric][aggregation][0] # Now this one is compressed because it has been rewritten! self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1452384000, 's'), - numpy.timedelta64(60, 's'), - ), aggregation)])[0] + data = self.storage._get_splits({ + self.metric: { + aggregation: [ + carbonara.SplitKey( + numpy.datetime64(1452384000, 's'), + numpy.timedelta64(60, 's'), + )]}})[self.metric][aggregation][0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -594,23 +621,25 @@ class TestStorageDriver(tests_base.TestCase): aggregation = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(1, 'm')) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451520000, 's'), - numpy.timedelta64(1, 'm'), - ), aggregation)])[0] + data = self.storage._get_splits( + {self.metric: { + aggregation: [carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm'), + )]}})[self.metric][aggregation][0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451736000, 's'), - numpy.timedelta64(1, 'm'), - ), aggregation)])[0] + data = self.storage._get_splits( + {self.metric: { + aggregation: [carbonara.SplitKey( + numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm'), + )]}})[self.metric][aggregation][0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( + data = self.storage._get_splits( + {self.metric: {aggregation: [carbonara.SplitKey( numpy.datetime64(1451952000, 's'), numpy.timedelta64(1, 'm') - ), aggregation)])[0] + )]}})[self.metric][aggregation][0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -647,30 +676,34 @@ class TestStorageDriver(tests_base.TestCase): numpy.timedelta64(1, 'm')), } }, self.storage._list_split_keys(self.metric, [agg])) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451520000, 's'), - numpy.timedelta64(1, 'm'), - ), agg)])[0] + data = self.storage._get_splits({ + self.metric: { + agg: [carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm'), + )]}})[self.metric][agg][0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451736000, 's'), - numpy.timedelta64(1, 'm'), - ), agg)])[0] + data = self.storage._get_splits({ + self.metric: { + agg: [carbonara.SplitKey( + numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm'), + )]}})[self.metric][agg][0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451952000, 's'), - numpy.timedelta64(60, 's') - ), agg)])[0] + data = self.storage._get_splits({ + self.metric: { + agg: [carbonara.SplitKey( + numpy.datetime64(1451952000, 's'), + numpy.timedelta64(60, 's') + )]}})[self.metric][agg][0] # Now this one is compressed because it has been rewritten! self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1452384000, 's'), - numpy.timedelta64(1, 'm'), - ), agg)])[0] + data = self.storage._get_splits({ + self.metric: { + agg: [carbonara.SplitKey( + numpy.datetime64(1452384000, 's'), + numpy.timedelta64(1, 'm'), + )]}})[self.metric][agg][0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -724,24 +757,27 @@ class TestStorageDriver(tests_base.TestCase): aggregation = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(1, 'm')) - data = self.storage._get_measures( - self.metric, - [(carbonara.SplitKey( - numpy.datetime64(1451520000, 's'), - numpy.timedelta64(1, 'm'), - ), aggregation)])[0] + data = self.storage._get_splits({ + self.metric: { + aggregation: + [carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm'), + )]}})[self.metric][aggregation][0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451736000, 's'), - numpy.timedelta64(1, 'm') - ), aggregation)])[0] + data = self.storage._get_splits({ + self.metric: { + aggregation: [carbonara.SplitKey( + numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm') + )]}})[self.metric][aggregation][0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451952000, 's'), - numpy.timedelta64(1, 'm'), - ), aggregation)])[0] + data = self.storage._get_splits({ + self.metric: { + aggregation: [carbonara.SplitKey( + numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm'), + )]}})[self.metric][aggregation][0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -814,23 +850,26 @@ class TestStorageDriver(tests_base.TestCase): aggregation = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(1, 'm')) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451520000, 's'), - numpy.timedelta64(60, 's'), - ), aggregation)])[0] + data = self.storage._get_splits({ + self.metric: { + aggregation: [carbonara.SplitKey( + numpy.datetime64(1451520000, 's'), + numpy.timedelta64(60, 's'), + )]}})[self.metric][aggregation][0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451736000, 's'), - numpy.timedelta64(1, 'm'), - ), aggregation)])[0] + data = self.storage._get_splits({ + self.metric: { + aggregation: [carbonara.SplitKey( + numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm'), + )]}})[self.metric][aggregation][0] self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, [(carbonara.SplitKey( - numpy.datetime64(1451952000, 's'), - numpy.timedelta64(1, 'm'), - ), aggregation)])[0] + data = self.storage._get_splits({ + self.metric: { + aggregation: [carbonara.SplitKey( + numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm'), + )]}})[self.metric][aggregation][0] assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) @@ -916,7 +955,7 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), ]}, self.storage.get_measures(self.metric, aggregations)) - def test_add_and_get_measures(self): + def test_add_and_get_splits(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), -- GitLab From ea9004f27003ad8019ab339147271aa84750e6dc Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Sun, 29 Apr 2018 11:40:05 +0200 Subject: [PATCH 1348/1483] Enable mergify --- .mergify.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .mergify.yml diff --git a/.mergify.yml b/.mergify.yml new file mode 100644 index 00000000..4c8a9ecb --- /dev/null +++ b/.mergify.yml @@ -0,0 +1,11 @@ +rules: + default: + protection: + required_pull_request_reviews: + required_approving_review_count: 2 + branches: + stable/.*: + protection: + required_pull_request_reviews: + required_approving_review_count: 1 + -- GitLab From 4461385bb12722505b0993024fd9b76b4e55ddd5 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Wed, 2 May 2018 06:56:01 +0200 Subject: [PATCH 1349/1483] Update mergify configuration --- .mergify.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index 4c8a9ecb..e183241f 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -1,6 +1,10 @@ rules: default: protection: + required_status_checks: + strict: True + contexts: + - continuous-integration/travis-ci required_pull_request_reviews: required_approving_review_count: 2 branches: -- GitLab From 5974a342f175db2fbe2a9c2b2c2c196e1e977e49 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 7 May 2018 09:24:47 +0200 Subject: [PATCH 1350/1483] Add missing gnocchi-statsd daemon package and init script. --- debian/changelog | 6 ++++++ debian/control | 14 ++++++++++++++ debian/gnocchi-statsd.init.in | 17 +++++++++++++++++ 3 files changed, 37 insertions(+) create mode 100644 debian/gnocchi-statsd.init.in diff --git a/debian/changelog b/debian/changelog index c1e69760..0fe6c0cb 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (4.2.0-3) unstable; urgency=medium + + * Add missing gnocchi-statsd daemon package and init script. + + -- Thomas Goirand Mon, 07 May 2018 09:23:50 +0200 + gnocchi (4.2.0-2) unstable; urgency=medium [ Ondřej Nový ] diff --git a/debian/control b/debian/control index b322ba86..ace113f3 100644 --- a/debian/control +++ b/debian/control @@ -131,6 +131,20 @@ Description: Metric as a Service - metric daemon . This package contains the metric daemon. +Package: gnocchi-statsd +Architecture: all +Depends: + gnocchi-common (= ${binary:Version}), + lsb-base, + ${misc:Depends}, + ${python:Depends}, +Description: Metric as a Service - statsd daemon + Gnocchi is a service for managing a set of resources and storing metrics about + them, in a scalable and resilient way. Its functionalities are exposed over an + HTTP REST API. + . + This package contains the statsd daemon. + Package: python3-gnocchi Section: python Architecture: all diff --git a/debian/gnocchi-statsd.init.in b/debian/gnocchi-statsd.init.in new file mode 100644 index 00000000..29669355 --- /dev/null +++ b/debian/gnocchi-statsd.init.in @@ -0,0 +1,17 @@ +#!/bin/sh +### BEGIN INIT INFO +# Provides: gnocchi-statsd +# Required-Start: $network $local_fs $remote_fs $syslog +# Required-Stop: $remote_fs +# Should-Start: postgresql mysql keystone rabbitmq-server ntp mongodb +# Should-Stop: postgresql mysql keystone rabbitmq-server ntp mongodb +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Gnocchi Statsd +# Description: Gnocchi Statsd +### END INIT INFO + +# Author: Thomas Goirand +DESC="OpenStack Gnocchi Statsd daemon" +PROJECT_NAME=gnocchi +NAME=${PROJECT_NAME}-statsd -- GitLab From 2a76e7371d06b3a5ed49b4a45bc5934d2422f6db Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 17 May 2018 10:06:19 +0200 Subject: [PATCH 1351/1483] injector: cleanup This removes unused method. And fixup the logging issue due to the shallowed "metrics" variable. --- gnocchi/cli/injector.py | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/gnocchi/cli/injector.py b/gnocchi/cli/injector.py index 1de2bf93..a624c419 100644 --- a/gnocchi/cli/injector.py +++ b/gnocchi/cli/injector.py @@ -23,11 +23,8 @@ import numpy from oslo_config import cfg from gnocchi import chef -from gnocchi.cli import metricd from gnocchi import incoming -from gnocchi import indexer from gnocchi import service -from gnocchi import storage from gnocchi import utils LOG = daiquiri.getLogger(__name__) @@ -56,24 +53,12 @@ def injector(): interval=conf.interval) -def _inject_from_conf(conf, - metrics, measures, archive_policy_name="low", - process=False, interval=None): - inc = incoming.get_driver(conf) - coord = metricd.get_coordinator_and_start(str(uuid.uuid4()), - conf.coordination_url) - store = storage.get_driver(conf) - idx = indexer.get_driver(conf) - return _inject(inc, coord, store, idx, - metrics, measures, archive_policy_name, process, interval) - - def _inject(inc, coord, store, idx, metrics, measures, archive_policy_name="low", process=False, interval=None): LOG.info("Creating %d metrics", metrics) with utils.StopWatch() as sw: - metrics = [ + metric_ids = [ idx.create_metric(uuid.uuid4(), "admin", archive_policy_name).id for _ in range(metrics) @@ -88,7 +73,7 @@ def _inject(inc, coord, store, idx, m_id: [incoming.Measure( now + numpy.timedelta64(seconds=s), random.randint(-999999, 999999)) for s in range(measures)] - for m_id in metrics + for m_id in metric_ids } LOG.info("… done in %.2fs", sw.elapsed()) -- GitLab From bc18ebded10bc763b98bcba46db925a3a5031ecd Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 22 May 2018 14:33:09 +0200 Subject: [PATCH 1352/1483] swift: avoid Connection aborted This change removes usage of threads with swift driver. This avoids to get "Connection aborted" because a thread is stuck and the server side decide to break the connection. Related-bug: #509 --- gnocchi/incoming/__init__.py | 12 +++++++----- gnocchi/incoming/swift.py | 5 +++++ gnocchi/storage/__init__.py | 16 ++++++++++------ gnocchi/storage/swift.py | 3 +++ gnocchi/utils.py | 6 +++++- 5 files changed, 30 insertions(+), 12 deletions(-) diff --git a/gnocchi/incoming/__init__.py b/gnocchi/incoming/__init__.py index 51b72d10..562a18ee 100644 --- a/gnocchi/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -106,6 +106,9 @@ class IncomingDriver(object): SACK_NAME_FORMAT = "incoming{total}-{number}" CFG_PREFIX = 'gnocchi-config' CFG_SACKS = 'sacks' + # NOTE(sileht): By default we use threads, but some driver can disable + # threads by setting this to utils.sequencial_map + MAP_METHOD = staticmethod(utils.parallel_map) @property def NUM_SACKS(self): @@ -188,11 +191,10 @@ class IncomingDriver(object): and values are a list of :py:class:`gnocchi.incoming.Measure`. """ - utils.parallel_map( - self._store_new_measures, - ((metric_id, self._encode_measures(measures)) - for metric_id, measures - in six.iteritems(metrics_and_measures))) + self.MAP_METHOD(self._store_new_measures, + ((metric_id, self._encode_measures(measures)) + for metric_id, measures + in six.iteritems(metrics_and_measures))) @staticmethod def _store_new_measures(metric_id, data): diff --git a/gnocchi/incoming/swift.py b/gnocchi/incoming/swift.py index 4445eee4..b232bfd7 100644 --- a/gnocchi/incoming/swift.py +++ b/gnocchi/incoming/swift.py @@ -22,6 +22,7 @@ import six from gnocchi.common import swift from gnocchi import incoming +from gnocchi import utils swclient = swift.swclient swift_utils = swift.swift_utils @@ -30,6 +31,10 @@ LOG = daiquiri.getLogger(__name__) class SwiftStorage(incoming.IncomingDriver): + # NOTE(sileht): Using threads with swiftclient doesn't work + # as expected, so disable it + MAP_METHOD = staticmethod(utils.sequencial_map) + def __init__(self, conf, greedy=True): super(SwiftStorage, self).__init__(conf) self.swift = swift.get_connection(conf) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 6f29234b..79ec19db 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -121,6 +121,10 @@ class Statistics(collections.defaultdict): class StorageDriver(object): + # NOTE(sileht): By default we use threads, but some driver can disable + # threads by setting this to utils.sequencial_map + MAP_METHOD = staticmethod(utils.parallel_map) + def __init__(self, conf): self.statistics = Statistics() @@ -131,7 +135,7 @@ class StorageDriver(object): def _get_splits(self, metrics_aggregations_keys, version=3): results = collections.defaultdict( lambda: collections.defaultdict(list)) - for metric, aggregation, split in utils.parallel_map( + for metric, aggregation, split in self.MAP_METHOD( lambda m, k, a, v: (m, a, self._get_splits_unbatched(m, k, a, v)), # noqa ((metric, key, aggregation, version) for metric, aggregations_and_keys @@ -168,7 +172,7 @@ class StorageDriver(object): return dict( six.moves.zip( metrics, - utils.parallel_map( + self.MAP_METHOD( utils.return_none_on_failure( self._get_or_create_unaggregated_timeseries_unbatched), ((metric, version) for metric in metrics)))) @@ -189,7 +193,7 @@ class StorageDriver(object): :param metrics_and_data: A list of (metric, serialized_data) tuples :param version: Storage engine data format version """ - utils.parallel_map( + self.MAP_METHOD( utils.return_none_on_failure( self._store_unaggregated_timeseries_unbatched), ((metric, data, version) for metric, data in metrics_and_data)) @@ -221,7 +225,7 @@ class StorageDriver(object): data, offset) tuples. :param version: Storage engine format version. """ - utils.parallel_map( + self.MAP_METHOD( self._store_metric_splits_unbatched, ((metric, key, aggregation, data, offset, version) for metric, keys_aggregations_data_offset @@ -260,7 +264,7 @@ class StorageDriver(object): :param to timestamp: The timestamp to get the measure to. """ keys = self._list_split_keys(metric, aggregations) - timeseries = utils.parallel_map( + timeseries = self.MAP_METHOD( self._get_measures_timeserie, ((metric, agg, keys[agg], from_timestamp, to_timestamp) for agg in aggregations)) @@ -561,7 +565,7 @@ class StorageDriver(object): `storage.Metric` and values are lists of (key, aggregation) tuples. """ - utils.parallel_map( + self.MAP_METHOD( utils.return_none_on_failure(self._delete_metric_splits_unbatched), ((metric, key, aggregation) for metric, keys_and_aggregations diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index c1a1ff65..9f8a8cbd 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -86,6 +86,9 @@ OPTS = [ class SwiftStorage(storage.StorageDriver): WRITE_FULL = True + # NOTE(sileht): Using threads with swiftclient doesn't work + # as expected, so disable it + MAP_METHOD = staticmethod(utils.sequencial_map) def __init__(self, conf): super(SwiftStorage, self).__init__(conf) diff --git a/gnocchi/utils.py b/gnocchi/utils.py index c04cc7aa..ad2d2b88 100644 --- a/gnocchi/utils.py +++ b/gnocchi/utils.py @@ -299,11 +299,15 @@ def get_driver_class(namespace, conf): conf.driver).driver +def sequencial_map(fn, list_of_args): + return list(itertools.starmap(fn, list_of_args)) + + def parallel_map(fn, list_of_args): """Run a function in parallel.""" if parallel_map.MAX_WORKERS == 1: - return list(itertools.starmap(fn, list_of_args)) + return sequencial_map(fn, list_of_args) with futures.ThreadPoolExecutor( max_workers=parallel_map.MAX_WORKERS) as executor: -- GitLab From 432aa86c83f96a0e80ce7f209a8eaea377fa5d85 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 1 Jun 2018 13:08:03 +0200 Subject: [PATCH 1353/1483] Replace Pastamaker by Mergify --- doc/source/contributing.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index f58095bd..1cfc0b76 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -42,20 +42,20 @@ All pull-requests must be reviewed by `members of the Gnocchi project`_. When a pull-request is approved by at least two of the members and when Travis-CI confirms that all the tests run fine, the patch will be merged. -The Gnocchi project leverages `Pastamaker`_ in order to schedule the merge of -the different pull-requests. Pastamaker is in charge of making sure that the +The Gnocchi project leverages `Mergify`_ in order to schedule the merge of the +different pull-requests. Mergify is in charge of making sure that the pull-request is up-to-date with respect to the `master` branch and that the tests pass. Pull-requests are always merged in a serialized manner in order to make sure that no pull-request can break another one. -`Gnocchi's Pastamaker dashboard`_ shows the current status of the merge queue. +`Gnocchi's Mergify dashboard`_ shows the current status of the merge queue. .. _`git pull-request`: https://github.com/jd/git-pull-request .. _`PEP 8`: https://www.python.org/dev/peps/pep-0008/ .. _`Travis-CI`: http://travis-ci.org .. _`members of the Gnocchi project`: https://github.com/orgs/gnocchixyz/people -.. _`Pastamaker`: https://github.com/sileht/pastamaker -.. _`Gnocchi's Pastamaker dashboard`: https://pastamaker.gnocchi.xyz +.. _`Mergify`: https://mergify.io +.. _`Gnocchi's Mergify dashboard`: https://gh.mergify.io/gnocchixyz Running the Tests -- GitLab From bc852a178ac7e8888c446ece48a2a4af2950d478 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Sun, 3 Jun 2018 10:42:31 +0200 Subject: [PATCH 1354/1483] Add dependency on python3-distutils (Closes: #896594). --- debian/changelog | 6 ++++++ debian/control | 1 + 2 files changed, 7 insertions(+) diff --git a/debian/changelog b/debian/changelog index 0fe6c0cb..1521b27f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (4.2.0-4) UNRELEASED; urgency=medium + + * Add dependency on python3-distutils (Closes: #896594). + + -- Thomas Goirand Sun, 03 Jun 2018 10:41:46 +0200 + gnocchi (4.2.0-3) unstable; urgency=medium * Add missing gnocchi-statsd daemon package and init script. diff --git a/debian/control b/debian/control index ace113f3..b4be3ef3 100644 --- a/debian/control +++ b/debian/control @@ -150,6 +150,7 @@ Section: python Architecture: all Depends: alembic, + python3-distutils | libpython3.5-stdlib, python3-boto3, python3-botocore (>= 1.5), python3-cachetools, -- GitLab From 806f32621dacca33f5a77014940c3d101212634b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 29 May 2018 11:58:16 +0200 Subject: [PATCH 1355/1483] file: allow to create a subdir to create less root directory Some filesystem have a limit in term of number of directory a directory can have. Also in performance point of view, some filesystem are slower when too much directories are created in one directory. To improve the situation is such case, this change adds an option to create subdirectories for storing metrics. for example file_subdir_len=16 will create: de3a72c4505543b4be/d1f9bb1395b52a/de3a72c4-5055-43b4-bed1-f9bb1395c52a --- gnocchi/storage/file.py | 61 ++++++++++++++++++- gnocchi/tests/test_storage.py | 23 +++++++ ...e-file-driver-layout-41c7a458160c4cb7.yaml | 8 +++ 3 files changed, 90 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/change-file-driver-layout-41c7a458160c4cb7.yaml diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 56929a1a..32ebd24d 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -17,11 +17,14 @@ import collections import errno import itertools +import json import operator import os import shutil import tempfile +import uuid +import daiquiri from oslo_config import cfg import six @@ -34,10 +37,16 @@ OPTS = [ cfg.StrOpt('file_basepath', default='/var/lib/gnocchi', help='Path used to store gnocchi data files.'), + cfg.IntOpt('file_subdir_len', + default=2, min=0, max=32, + help='if > 0, this create a subdirectory for every N bytes' + 'of the metric uuid') ] ATTRGETTER_METHOD = operator.attrgetter("method") +LOG = daiquiri.getLogger(__name__) + # Python 2 compatibility try: FileNotFoundError @@ -47,14 +56,54 @@ except NameError: class FileStorage(storage.StorageDriver): WRITE_FULL = True + CFG_PREFIX = 'gnocchi-storage-config' + CFG_SUBDIR_LEN = 'subdir_len' def __init__(self, conf): super(FileStorage, self).__init__(conf) self.basepath = conf.file_basepath self.basepath_tmp = os.path.join(self.basepath, 'tmp') + self.conf = conf + self._file_subdir_len = None + + @property + def SUBDIR_LEN(self): + if self._file_subdir_len is None: + config_path = os.path.join(self.basepath_tmp, self.CFG_PREFIX) + if os.path.exists(config_path): + with open(config_path, 'r') as f: + self._file_subdir_len = json.load(f)[self.CFG_SUBDIR_LEN] + elif self.is_old_directory_structure(): + self._file_subdir_len = 0 + else: + # Fresh install + self._file_subdir_len = self.conf.file_subdir_len + + if self._file_subdir_len != self.conf.file_subdir_len: + LOG.warning("Changing file_subdir_len is not supported, using " + "the stored value: %d", self._file_subdir_len) + return self._file_subdir_len + + def set_subdir_len(self, subdir_len): + data = {self.CFG_SUBDIR_LEN: subdir_len} + with open(os.path.join(self.basepath_tmp, self.CFG_PREFIX), 'w') as f: + json.dump(data, f) def upgrade(self): utils.ensure_paths([self.basepath_tmp]) + self.set_subdir_len(self.SUBDIR_LEN) + + def is_old_directory_structure(self): + # NOTE(sileht): We look for at least one metric directory + for p in os.listdir(self.basepath): + if os.path.isdir(p) and '-' in p: + try: + uuid.UUID(p) + except ValueError: + pass + else: + return True + return False def __str__(self): return "%s: %s" % (self.__class__.__name__, str(self.basepath)) @@ -68,7 +117,15 @@ class FileStorage(storage.StorageDriver): os.rename(tmpfile.name, dest) def _build_metric_dir(self, metric): - return os.path.join(self.basepath, str(metric.id)) + path_parts = [self.basepath] + if self.SUBDIR_LEN > 0: + metric_id = metric.id.hex + path_parts.extend( + [metric_id[start:start+self.SUBDIR_LEN] + for start in range(0, 32, self.SUBDIR_LEN) + ]) + path_parts.append(str(metric.id)) + return os.path.join(*path_parts) def _build_unaggregated_timeserie_path(self, metric, version=3): return os.path.join( @@ -91,7 +148,7 @@ class FileStorage(storage.StorageDriver): def _create_metric(self, metric): path = self._build_metric_dir(metric) try: - os.mkdir(path, 0o750) + os.makedirs(path, 0o750) except OSError as e: if e.errno == errno.EEXIST: raise storage.MetricAlreadyExists(metric) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index f60594df..96b15849 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -64,6 +64,29 @@ class TestStorageDriver(tests_base.TestCase): driver = storage.get_driver(self.conf) self.assertIsInstance(driver, storage.StorageDriver) + def test_file_driver_subdir_len(self): + driver = storage.get_driver(self.conf) + if not isinstance(driver, file.FileStorage): + self.skipTest("not file driver") + + # Check the default + self.assertEqual(2, driver.SUBDIR_LEN) + + metric = mock.Mock(id=uuid.UUID("12345678901234567890123456789012")) + expected = (driver.basepath + "/12/34/56/78/90/12/34/56/78/90/12/34/56" + "/78/90/12/12345678-9012-3456-7890-123456789012") + self.assertEqual(expected, driver._build_metric_dir(metric)) + + driver._file_subdir_len = 16 + expected = (driver.basepath + "/1234567890123456/7890123456" + "789012/12345678-9012-3456-7890-123456789012") + self.assertEqual(expected, driver._build_metric_dir(metric)) + + driver._file_subdir_len = 15 + expected = (driver.basepath + "/123456789012345/67890123456" + "7890/12/12345678-9012-3456-7890-123456789012") + self.assertEqual(expected, driver._build_metric_dir(metric)) + def test_corrupted_split(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), diff --git a/releasenotes/notes/change-file-driver-layout-41c7a458160c4cb7.yaml b/releasenotes/notes/change-file-driver-layout-41c7a458160c4cb7.yaml new file mode 100644 index 00000000..84495857 --- /dev/null +++ b/releasenotes/notes/change-file-driver-layout-41c7a458160c4cb7.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + By default, the file driver creates a subdirectory every two bytes of the metric + uuid. This avoid to reach limitation of certain filesystems and improve + performance in certain case. This is configurable with the option ``file_subdir_len``. + If the backend already have data coming from a previous version of Gnocchi, it + kept unchanged, ``file_subdir_len`` is set to 0. -- GitLab From 6004e99c4d661852258dd83b397f0f5c9079a755 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 4 Jun 2018 12:44:19 +0200 Subject: [PATCH 1356/1483] Add patch to remove dependency on distutils (Closes: #896594). --- debian/changelog | 4 +- debian/control | 1 - debian/patches/no-distutils-usage.diff | 56 ++++++++++++++++++++++++++ debian/patches/py3-compat.patch | 2 +- debian/patches/series | 1 + 5 files changed, 60 insertions(+), 4 deletions(-) create mode 100644 debian/patches/no-distutils-usage.diff diff --git a/debian/changelog b/debian/changelog index 1521b27f..ecd7fbc9 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,6 +1,6 @@ -gnocchi (4.2.0-4) UNRELEASED; urgency=medium +gnocchi (4.2.0-4) unstable; urgency=medium - * Add dependency on python3-distutils (Closes: #896594). + * Add patch to remove dependency on distutils (Closes: #896594). -- Thomas Goirand Sun, 03 Jun 2018 10:41:46 +0200 diff --git a/debian/control b/debian/control index b4be3ef3..ace113f3 100644 --- a/debian/control +++ b/debian/control @@ -150,7 +150,6 @@ Section: python Architecture: all Depends: alembic, - python3-distutils | libpython3.5-stdlib, python3-boto3, python3-botocore (>= 1.5), python3-cachetools, diff --git a/debian/patches/no-distutils-usage.diff b/debian/patches/no-distutils-usage.diff new file mode 100644 index 00000000..76b542a6 --- /dev/null +++ b/debian/patches/no-distutils-usage.diff @@ -0,0 +1,56 @@ +Index: b/gnocchi/cli/api.py +=================================================================== +--- a/gnocchi/cli/api.py ++++ b/gnocchi/cli/api.py +@@ -14,7 +14,7 @@ + # See the License for the specific language governing permissions and + # limitations under the License. + import copy +-from distutils import spawn ++import shutil + import math + import os + import sys +@@ -73,7 +73,7 @@ def api(): + "No need to pass `--' in gnocchi-api command line anymore, " + "please remove") + +- uwsgi = spawn.find_executable("uwsgi") ++ uwsgi = shutil.which("uwsgi") + if not uwsgi: + LOG.error("Unable to find `uwsgi'.\n" + "Be sure it is installed and in $PATH.") +Index: b/gnocchi/utils.py +=================================================================== +--- a/gnocchi/utils.py ++++ b/gnocchi/utils.py +@@ -15,7 +15,6 @@ + # License for the specific language governing permissions and limitations + # under the License. + import datetime +-import distutils.util + import errno + import itertools + import multiprocessing +@@ -203,10 +202,17 @@ def ensure_paths(paths): + raise + + +-def strtobool(v): +- if isinstance(v, bool): +- return v +- return bool(distutils.util.strtobool(v)) ++def strtobool(val): ++ if isinstance(val, bool): ++ return val ++ # copied from distutils.util ... ++ val = val.lower() ++ if val in ('y', 'yes', 't', 'true', 'on', '1'): ++ return 1 ++ elif val in ('n', 'no', 'f', 'false', 'off', '0'): ++ return 0 ++ else: ++ raise ValueError("invalid truth value %r" % (val,)) + + + class StopWatch(object): diff --git a/debian/patches/py3-compat.patch b/debian/patches/py3-compat.patch index bf6141e0..7a82b51b 100644 --- a/debian/patches/py3-compat.patch +++ b/debian/patches/py3-compat.patch @@ -6,7 +6,7 @@ Index: gnocchi/gnocchi/cli/metricd.py =================================================================== --- gnocchi.orig/gnocchi/cli/metricd.py +++ gnocchi/gnocchi/cli/metricd.py -@@ -67,12 +67,14 @@ class MetricProcessBase(cotyledon.Servic +@@ -60,12 +60,14 @@ class MetricProcessBase(cotyledon.Servic self._wake_up.set() def _configure(self): diff --git a/debian/patches/series b/debian/patches/series index aff3d2ae..30a054d7 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -1 +1,2 @@ py3-compat.patch +no-distutils-usage.diff -- GitLab From 1057b6c4692815f782e9fa8cab75e6cdb881c23d Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 4 Jun 2018 12:53:09 +0200 Subject: [PATCH 1357/1483] Add patch header --- debian/patches/no-distutils-usage.diff | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/debian/patches/no-distutils-usage.diff b/debian/patches/no-distutils-usage.diff index 76b542a6..c16c9f20 100644 --- a/debian/patches/no-distutils-usage.diff +++ b/debian/patches/no-distutils-usage.diff @@ -1,3 +1,8 @@ +Description: Avoid using distutils at runtime +Author: Matthias Klose +Forwarded: https://github.com/gnocchixyz/gnocchi/pull/904 +Last-Update: 2018-06-04 + Index: b/gnocchi/cli/api.py =================================================================== --- a/gnocchi/cli/api.py -- GitLab From 81f96df21fac9ce2bd9cc85a27f97f51150a3fab Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 4 Jun 2018 12:18:33 -0400 Subject: [PATCH 1358/1483] redis: use hiredis parser redis-py has a python and c parser[1]. install the c parser. it offers ~10%-90% performance improvement depending on task using benchmark from redis-py[2]. [1] https://github.com/andymccurdy/redis-py#parsers [2] https://github.com/andymccurdy/redis-py/blob/master/benchmarks/basic_operations.py --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index a0464674..bc975cbd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -72,6 +72,7 @@ s3 = botocore>=1.5 redis = redis>=2.10.0 # MIT + hiredis swift = python-swiftclient>=3.1.0 ceph = -- GitLab From 08a750bdad582ebfbcdbd4e029e45fcbb45c6f97 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Tue, 5 Jun 2018 15:50:56 +0200 Subject: [PATCH 1359/1483] Switch gnocchi to openstack-pkg-tools >= 81~ style of uwsgi app. --- debian/changelog | 6 ++++ debian/control | 2 +- debian/gnocchi-api-uwsgi.ini | 60 ++++++++++++++++++++++++++++++++++++ debian/gnocchi-api.init.in | 8 ++--- debian/gnocchi-api.install | 1 + 5 files changed, 71 insertions(+), 6 deletions(-) create mode 100644 debian/gnocchi-api-uwsgi.ini create mode 100644 debian/gnocchi-api.install diff --git a/debian/changelog b/debian/changelog index ecd7fbc9..e3bebd42 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (4.2.0-5) unstable; urgency=medium + + * Switch gnocchi to openstack-pkg-tools >= 81~ style of uwsgi app. + + -- Thomas Goirand Tue, 05 Jun 2018 15:50:00 +0200 + gnocchi (4.2.0-4) unstable; urgency=medium * Add patch to remove dependency on distutils (Closes: #896594). diff --git a/debian/control b/debian/control index ace113f3..6e8f0a37 100644 --- a/debian/control +++ b/debian/control @@ -7,7 +7,7 @@ Uploaders: Build-Depends: debhelper (>= 10), dh-python, - openstack-pkg-tools (>= 74~), + openstack-pkg-tools (>= 81~), python3-all, python3-pbr, python3-setuptools, diff --git a/debian/gnocchi-api-uwsgi.ini b/debian/gnocchi-api-uwsgi.ini new file mode 100644 index 00000000..64d53d0f --- /dev/null +++ b/debian/gnocchi-api-uwsgi.ini @@ -0,0 +1,60 @@ +[uwsgi] +############################ +### Generic UWSGI config ### +############################ + +# Override the default size for headers from the 4k default. +buffer-size = 65535 + +# This avoids error 104: "Connection reset by peer" +#rem-header = Content-Length + +# This is running standalone +master = true + +# Threads and processes +enable-threads = true + +processes = 8 + +# uwsgi recommends this to prevent thundering herd on accept. +thunder-lock = true + +plugins = python3 + +# This ensures that file descriptors aren't shared between the WSGI application processes. +lazy-apps = true + +# Log from the wsgi application: needs python3-pastescript as runtime depends. +paste-logger = true + +# automatically kill workers if master dies +no-orphans = true + +# exit instead of brutal reload on SIGTERM +die-on-term = true + +################################## +### OpenStack service specific ### +################################## + +# This is the standard port for the WSGI application, listening on all available IPs +logto = /var/log/gnocchi/gnocchi-api.log +name = gnocchi-api +uid = gnocchi +gid = gnocchi +chdir = /var/lib/gnocchi +wsgi = gnocchi.rest.wsgi + +# This is controled by the init script using the --http-socket +# or using the --https thing. https will be activated if a file +# /etc/gnocchi/ssl/private/*.pem is found. In both case, port 9292 +# on all IPs will be used. +# The partern to search for the private key file is: +# find /etc/gnocchi/ssl/private -type f -iname '*.pem' | head -n 1 +# and for the certificate: +# find /etc/gnocchi/ssl/private -type f -iname '*.crt' | head -n 1 +# just drop files there and restart the daemon, and you'll have +# SSL up and running. +#http-socket = [::]:8041 +#https-socket = [::]:8041,foobar.crt,foobar.key diff --git a/debian/gnocchi-api.init.in b/debian/gnocchi-api.init.in index 4998c4c5..55225d60 100644 --- a/debian/gnocchi-api.init.in +++ b/debian/gnocchi-api.init.in @@ -15,8 +15,6 @@ DESC="OpenStack Gnocchi Api" PROJECT_NAME=gnocchi NAME=${PROJECT_NAME}-api -DAEMON=/usr/bin/uwsgi_python3 -DAEMON_ARGS="--master --enable-threads --thunder-lock --die-on-term --threads 4 --lazy-apps --wsgi gnocchi.rest.wsgi --paste-logger --processes 4 --die-on-term --logto /var/log/gnocchi/gnocchi-api.log --http-socket :8041" -NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG=yes -NO_OPENSTACK_LOGFILE_DAEMON_ARG=yes -USE_SYSLOG=no +UWSGI_PORT=8041 +UWSGI_INI_PATH=/etc/gnocchi/gnocchi-api-uwsgi.ini +UWSGI_INI_APP=/usr/lib/python3/dist-packages/gnocchi/rest/wsgi.py diff --git a/debian/gnocchi-api.install b/debian/gnocchi-api.install new file mode 100644 index 00000000..15739f18 --- /dev/null +++ b/debian/gnocchi-api.install @@ -0,0 +1 @@ +debian/gnocchi-api-uwsgi.ini /etc/gnocchi -- GitLab From cda706e838ee9612879c5dfbb997b5fbe5fe83b8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 8 Mar 2018 17:25:08 +0100 Subject: [PATCH 1360/1483] storage: allow to list split keys for different metric at the same time --- gnocchi/storage/__init__.py | 28 ++++++- gnocchi/storage/ceph.py | 2 +- gnocchi/storage/file.py | 2 +- gnocchi/storage/redis.py | 65 +++++++++------ gnocchi/storage/s3.py | 2 +- gnocchi/storage/swift.py | 2 +- gnocchi/tests/test_storage.py | 152 ++++++++++++++++++---------------- 7 files changed, 148 insertions(+), 105 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 79ec19db..f9e5377d 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -234,7 +234,7 @@ class StorageDriver(object): in keys_aggregations_data_offset)) @staticmethod - def _list_split_keys(metric, aggregations, version=3): + def _list_split_keys_unbatched(self, metric, aggregations, version=3): """List split keys for a metric. :param metric: The metric to look key for. @@ -245,6 +245,28 @@ class StorageDriver(object): """ raise NotImplementedError + def _list_split_keys(self, metrics_and_aggregations, version=3): + """List split keys for metrics. + + :param metrics_and_aggregations: Dict of + {`storage.Metric`: + [`carbonara.Aggregation`]} + to look for. + :param version: Storage engine format version. + :return: A dict where keys are `storage.Metric` and values are dicts + where keys are `carbonara.Aggregation` objects and values are + a set of `carbonara.SplitKey` objects. + """ + metrics = list(metrics_and_aggregations.keys()) + r = utils.parallel_map( + self._list_split_keys_unbatched, + ((metric, metrics_and_aggregations[metric], version) + for metric in metrics)) + return { + metric: results + for metric, results in six.moves.zip(metrics, r) + } + @staticmethod def _version_check(name, v): @@ -263,7 +285,7 @@ class StorageDriver(object): :param from timestamp: The timestamp to get the measure from. :param to timestamp: The timestamp to get the measure to. """ - keys = self._list_split_keys(metric, aggregations) + keys = self._list_split_keys({metric: aggregations})[metric] timeseries = self.MAP_METHOD( self._get_measures_timeserie, ((metric, agg, keys[agg], from_timestamp, to_timestamp) @@ -484,7 +506,7 @@ class StorageDriver(object): aggregations_needing_list_of_keys.add(aggregation) all_existing_keys = self._list_split_keys( - metric, aggregations_needing_list_of_keys) + {metric: aggregations_needing_list_of_keys})[metric] # NOTE(jd) This dict uses (key, aggregation) tuples as keys because # using just (key) would not carry the aggregation method and therefore diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py index 6f682f85..91793cd7 100644 --- a/gnocchi/storage/ceph.py +++ b/gnocchi/storage/ceph.py @@ -158,7 +158,7 @@ class CephStorage(storage.StorageDriver): except rados.ObjectNotFound: return - def _list_split_keys(self, metric, aggregations, version=3): + def _list_split_keys_unbatched(self, metric, aggregations, version=3): with rados.ReadOpCtx() as op: omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1) try: diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py index 56929a1a..bb3049b8 100644 --- a/gnocchi/storage/file.py +++ b/gnocchi/storage/file.py @@ -125,7 +125,7 @@ class FileStorage(storage.StorageDriver): except storage.MetricAlreadyExists: pass - def _list_split_keys(self, metric, aggregations, version=3): + def _list_split_keys_unbatched(self, metric, aggregations, version=3): keys = collections.defaultdict(set) for method, grouped_aggregations in itertools.groupby( sorted(aggregations, key=ATTRGETTER_METHOD), diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 4f691beb..96aa212e 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -97,35 +97,46 @@ return ids } return ts - def _list_split_keys(self, metric, aggregations, version=3): - key = self._metric_key(metric) + def _list_split_keys(self, metrics_and_aggregations, version=3): pipe = self._client.pipeline(transaction=False) - pipe.exists(key) - for aggregation in aggregations: - self._scripts["list_split_keys"]( - keys=[key], args=[self._aggregated_field_for_split( - aggregation.method, "*", - version, aggregation.granularity)], - client=pipe, - ) + # Keep an ordered list of metrics + metrics = list(metrics_and_aggregations.keys()) + for metric in metrics: + key = self._metric_key(metric) + pipe.exists(key) + aggregations = metrics_and_aggregations[metric] + for aggregation in aggregations: + self._scripts["list_split_keys"]( + keys=[key], args=[self._aggregated_field_for_split( + aggregation.method, "*", + version, aggregation.granularity)], + client=pipe, + ) results = pipe.execute() - metric_exists_p = results.pop(0) - if not metric_exists_p: - raise storage.MetricDoesNotExist(metric) - keys = {} - for aggregation, k in six.moves.zip(aggregations, results): - if not k: - keys[aggregation] = set() - continue - timestamps, methods, granularities = list(zip(*k)) - timestamps = utils.to_timestamps(timestamps) - granularities = map(utils.to_timespan, granularities) - keys[aggregation] = { - carbonara.SplitKey(timestamp, - sampling=granularity) - for timestamp, granularity - in six.moves.zip(timestamps, granularities) - } + keys = collections.defaultdict(dict) + start = 0 + for metric in metrics: + metric_exists_p = results.pop(0) + if not metric_exists_p: + raise storage.MetricDoesNotExist(metric) + aggregations = metrics_and_aggregations[metric] + number_of_aggregations = len(aggregations) + keys_for_aggregations = results[start:number_of_aggregations] + start += number_of_aggregations + for aggregation, k in six.moves.zip( + aggregations, keys_for_aggregations): + if not k: + keys[metric][aggregation] = set() + continue + timestamps, methods, granularities = list(zip(*k)) + timestamps = utils.to_timestamps(timestamps) + granularities = map(utils.to_timespan, granularities) + keys[metric][aggregation] = { + carbonara.SplitKey(timestamp, + sampling=granularity) + for timestamp, granularity + in six.moves.zip(timestamps, granularities) + } return keys def _delete_metric_splits(self, metrics_keys_aggregations, version=3): diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py index c5a29ae7..16b73e5c 100644 --- a/gnocchi/storage/s3.py +++ b/gnocchi/storage/s3.py @@ -178,7 +178,7 @@ class S3Storage(storage.StorageDriver): raise return True - def _list_split_keys(self, metric, aggregations, version=3): + def _list_split_keys_unbatched(self, metric, aggregations, version=3): bucket = self._bucket_name keys = {} for aggregation in aggregations: diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py index 9f8a8cbd..50911395 100644 --- a/gnocchi/storage/swift.py +++ b/gnocchi/storage/swift.py @@ -161,7 +161,7 @@ class SwiftStorage(storage.StorageDriver): raise return contents - def _list_split_keys(self, metric, aggregations, version=3): + def _list_split_keys_unbatched(self, metric, aggregations, version=3): container = self._container_name(metric) try: headers, files = self.swift.get_container( diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index f60594df..e8a8c8ff 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -334,26 +334,27 @@ class TestStorageDriver(tests_base.TestCase): agg = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(1, 'D')) self.assertEqual({ - agg: {carbonara.SplitKey(numpy.datetime64(1244160000, 's'), - numpy.timedelta64(1, 'D'))}, - }, self.storage._list_split_keys( - self.metric, [agg])) + self.metric: { + agg: {carbonara.SplitKey(numpy.datetime64(1244160000, 's'), + numpy.timedelta64(1, 'D'))}, + }, + }, self.storage._list_split_keys({self.metric: [agg]})) agg = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(1, 'h')) self.assertEqual({ - agg: {carbonara.SplitKey(numpy.datetime64(1412640000, 's'), - numpy.timedelta64(1, 'h'))}, - }, self.storage._list_split_keys( - self.metric, [agg], - )) + self.metric: { + agg: {carbonara.SplitKey(numpy.datetime64(1412640000, 's'), + numpy.timedelta64(1, 'h'))}, + }, + }, self.storage._list_split_keys({self.metric: [agg]})) agg = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(5, 'm')) self.assertEqual({ - agg: {carbonara.SplitKey(numpy.datetime64(1419120000, 's'), - numpy.timedelta64(5, 'm'))}, - }, self.storage._list_split_keys( - self.metric, [agg], - )) + self.metric: { + agg: {carbonara.SplitKey(numpy.datetime64(1419120000, 's'), + numpy.timedelta64(5, 'm'))}, + } + }, self.storage._list_split_keys({self.metric: [agg]})) def test_get_measures_return(self): self.incoming.add_measures(self.metric.id, [ @@ -467,15 +468,17 @@ class TestStorageDriver(tests_base.TestCase): agg = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(1, 'm')) self.assertEqual({ - agg: { - carbonara.SplitKey(numpy.datetime64(1451520000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451736000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451952000, 's'), - numpy.timedelta64(1, 'm')), - }, - }, self.storage._list_split_keys(self.metric, [agg])) + self.metric: { + agg: { + carbonara.SplitKey(numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm')), + }, + } + }, self.storage._list_split_keys({self.metric: [agg]})) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue @@ -528,17 +531,19 @@ class TestStorageDriver(tests_base.TestCase): agg = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(1, 'm')) self.assertEqual({ - agg: { - carbonara.SplitKey(numpy.datetime64(1452384000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451736000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451520000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451952000, 's'), - numpy.timedelta64(1, 'm')), + self.metric: { + agg: { + carbonara.SplitKey(numpy.datetime64(1452384000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm')), + }, }, - }, self.storage._list_split_keys(self.metric, [agg])) + }, self.storage._list_split_keys({self.metric: [agg]})) data = self.storage._get_splits({ self.metric: { aggregation: [carbonara.SplitKey( @@ -603,15 +608,17 @@ class TestStorageDriver(tests_base.TestCase): agg = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(1, 'm')) self.assertEqual({ - agg: { - carbonara.SplitKey(numpy.datetime64(1451520000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451736000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451952000, 's'), - numpy.timedelta64(1, 'm')), + self.metric: { + agg: { + carbonara.SplitKey(numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm')), + }, }, - }, self.storage._list_split_keys(self.metric, [agg])) + }, self.storage._list_split_keys({self.metric: [agg]})) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue @@ -665,17 +672,19 @@ class TestStorageDriver(tests_base.TestCase): agg = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(1, 'm')) self.assertEqual({ - agg: { - carbonara.SplitKey(numpy.datetime64('2016-01-10T00:00:00'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64('2016-01-02T12:00:00'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64('2015-12-31T00:00:00'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64('2016-01-05T00:00:00'), - numpy.timedelta64(1, 'm')), - } - }, self.storage._list_split_keys(self.metric, [agg])) + self.metric: { + agg: { + carbonara.SplitKey(numpy.datetime64('2016-01-10T00:00:00'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64('2016-01-02T12:00:00'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64('2015-12-31T00:00:00'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64('2016-01-05T00:00:00'), + numpy.timedelta64(1, 'm')), + }, + }, + }, self.storage._list_split_keys({self.metric: [agg]})) data = self.storage._get_splits({ self.metric: { agg: [carbonara.SplitKey( @@ -737,18 +746,17 @@ class TestStorageDriver(tests_base.TestCase): agg = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(1, 'm')) self.assertEqual({ - agg: { - carbonara.SplitKey(numpy.datetime64('2015-12-31T00:00:00'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64('2016-01-02T12:00:00'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64('2016-01-05T00:00:00'), - numpy.timedelta64(1, 'm')), + self.metric: { + agg: { + carbonara.SplitKey(numpy.datetime64('2015-12-31T00:00:00'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64('2016-01-02T12:00:00'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64('2016-01-05T00:00:00'), + numpy.timedelta64(1, 'm')), + }, }, - }, self.storage._list_split_keys( - self.metric, - [agg], - )) + }, self.storage._list_split_keys({self.metric: [agg]})) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue else: @@ -832,15 +840,17 @@ class TestStorageDriver(tests_base.TestCase): agg = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(1, 'm')) self.assertEqual({ - agg: { - carbonara.SplitKey(numpy.datetime64(1451520000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451736000, 's'), - numpy.timedelta64(1, 'm')), - carbonara.SplitKey(numpy.datetime64(1451952000, 's'), - numpy.timedelta64(1, 'm')), + self.metric: { + agg: { + carbonara.SplitKey(numpy.datetime64(1451520000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451736000, 's'), + numpy.timedelta64(1, 'm')), + carbonara.SplitKey(numpy.datetime64(1451952000, 's'), + numpy.timedelta64(1, 'm')), + }, }, - }, self.storage._list_split_keys(self.metric, [agg])) + }, self.storage._list_split_keys({self.metric: [agg]})) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue -- GitLab From 7e5288132334fbecb2e86b4897e517dbebee9780 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 7 Jun 2018 14:52:16 +0200 Subject: [PATCH 1361/1483] deps: fix min requirement of cradox Gnocchi requires 2.0.0 and not 1.2.0 since a while now. This is done even we will drop it soon for helping packager. --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index a0464674..5075c1a6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -75,7 +75,7 @@ redis = swift = python-swiftclient>=3.1.0 ceph = - cradox>=1.2.0 + cradox>=2.0.0 ceph_alternative = python-rados>=12.2.0 # not available on pypi prometheus = -- GitLab From 60e608f70c8e13b17973809e84ec4d00c845da56 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 7 Jun 2018 15:28:12 +0200 Subject: [PATCH 1362/1483] gate fix requests 2.18.4 has requirement urllib3<1.23,>=1.21.1, but you'll have urllib3 1.23 which is incompatible. This explicits set the urllib3 version until requests release a fixed library --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index a0464674..df26f93c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -90,6 +90,7 @@ doc = reno>=1.6.2 test = pifpaf[ceph,gnocchi]>=1.0.1 + urllib3<1.23 # Temporary fix the gate, for unknown reason gabbi (which require 1.11) download 1.23 while requests requires <1.23 gabbi>=1.37.0 coverage>=3.6 fixtures -- GitLab From 9f31f6f50e70d4235e1adc8b16aaec50434e6a18 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 13 Jun 2018 10:26:13 +0200 Subject: [PATCH 1363/1483] Use rebase strategy to merge PR --- .mergify.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index e183241f..c9d2138d 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -7,6 +7,8 @@ rules: - continuous-integration/travis-ci required_pull_request_reviews: required_approving_review_count: 2 + merge_strategy: + method: rebase branches: stable/.*: protection: -- GitLab From 996fa2db7638bd8dfbcfb102e7cd1e514a108247 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 13 Jun 2018 11:45:06 -0400 Subject: [PATCH 1364/1483] new requests lib released remove workaround... or we can hit revert on original --- setup.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index b3358657..a80e7b26 100644 --- a/setup.cfg +++ b/setup.cfg @@ -91,7 +91,6 @@ doc = reno>=1.6.2 test = pifpaf[ceph,gnocchi]>=1.0.1 - urllib3<1.23 # Temporary fix the gate, for unknown reason gabbi (which require 1.11) download 1.23 while requests requires <1.23 gabbi>=1.37.0 coverage>=3.6 fixtures -- GitLab From 30ed3cc4ff38ea8c3af8f8b39c5d220a016847cd Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 22 Mar 2018 15:19:58 +0100 Subject: [PATCH 1365/1483] storage: get_aggregated_measures can retrieve multiple metrics --- gnocchi/rest/aggregates/processor.py | 5 +- gnocchi/storage/__init__.py | 100 ++++++++++++++------------- gnocchi/tests/test_storage.py | 5 +- 3 files changed, 57 insertions(+), 53 deletions(-) diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index b7268a10..c7e7df94 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -56,9 +56,8 @@ def _get_measures_timeserie(storage, ref, granularity, *args, **kwargs): ref.aggregation, granularity) try: data = storage.get_aggregated_measures( - ref.metric, - [agg], - *args, **kwargs)[agg] + {ref.metric: [agg]}, + *args, **kwargs)[ref.metric][agg] except gnocchi_storage.MetricDoesNotExist: data = carbonara.AggregatedTimeSerie( carbonara.Aggregation(ref.aggregation, granularity, None)) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index f9e5377d..04fdd8c6 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -258,7 +258,7 @@ class StorageDriver(object): a set of `carbonara.SplitKey` objects. """ metrics = list(metrics_and_aggregations.keys()) - r = utils.parallel_map( + r = self.MAP_METHOD( self._list_split_keys_unbatched, ((metric, metrics_and_aggregations[metric], version) for metric in metrics)) @@ -276,24 +276,60 @@ class StorageDriver(object): """ return name.split("_")[-1] == 'v%s' % v - def get_aggregated_measures(self, metric, aggregations, + def get_aggregated_measures(self, metrics_and_aggregations, from_timestamp=None, to_timestamp=None): """Get aggregated measures from a metric. - :param metric: The metric measured. - :param aggregations: The aggregations to retrieve. + :param metrics_and_aggregations: The metrics and aggregations to + retrieve in format + {metric: [aggregation, …]}. :param from timestamp: The timestamp to get the measure from. :param to timestamp: The timestamp to get the measure to. """ - keys = self._list_split_keys({metric: aggregations})[metric] - timeseries = self.MAP_METHOD( - self._get_measures_timeserie, - ((metric, agg, keys[agg], from_timestamp, to_timestamp) - for agg in aggregations)) - return { - agg: ts.fetch(from_timestamp, to_timestamp) - for agg, ts in six.moves.zip(aggregations, timeseries) - } + metrics_aggs_keys = self._list_split_keys(metrics_and_aggregations) + + for metric, aggregations_keys in six.iteritems(metrics_aggs_keys): + for aggregation, keys in six.iteritems(aggregations_keys): + start = ( + carbonara.SplitKey.from_timestamp_and_sampling( + from_timestamp, aggregation.granularity) + ) if from_timestamp else None + + stop = ( + carbonara.SplitKey.from_timestamp_and_sampling( + to_timestamp, aggregation.granularity) + ) if to_timestamp else None + + # Replace keys with filtered version + metrics_aggs_keys[metric][aggregation] = [ + key for key in sorted(keys) + if ((not start or key >= start) + and (not stop or key <= stop)) + ] + + metrics_aggregations_splits = self._get_splits_and_unserialize( + metrics_aggs_keys) + + results = collections.defaultdict(dict) + for metric, aggregations in six.iteritems(metrics_and_aggregations): + for aggregation in aggregations: + ts = carbonara.AggregatedTimeSerie.from_timeseries( + metrics_aggregations_splits[metric][aggregation], + aggregation) + # We need to truncate because: + # - If the driver is not in WRITE_FULL mode, then it might read + # too much data that will be deleted once the split is + # rewritten. Just truncate so we don't return it. + # - If the driver is in WRITE_FULL but the archive policy has + # been resized, we might still have too much points stored, + # which will be deleted at a later point when new points will + # be processed. Truncate to be sure we don't return them. + if aggregation.timespan is not None: + ts.truncate(aggregation.timespan) + results[metric][aggregation] = ts.fetch( + from_timestamp, to_timestamp) + + return results def get_measures(self, metric, aggregations, from_timestamp=None, to_timestamp=None, @@ -309,7 +345,7 @@ class StorageDriver(object): :param resample: The granularity to resample to. """ timeseries = self.get_aggregated_measures( - metric, aggregations, from_timestamp, to_timestamp) + {metric: aggregations}, from_timestamp, to_timestamp)[metric] if resample: for agg, ts in six.iteritems(timeseries): @@ -357,40 +393,6 @@ class StorageDriver(object): results[metric][aggregation].append(ts) return results - def _get_measures_timeserie(self, metric, aggregation, keys, - from_timestamp=None, to_timestamp=None): - if from_timestamp: - from_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( - from_timestamp, aggregation.granularity) - - if to_timestamp: - to_timestamp = carbonara.SplitKey.from_timestamp_and_sampling( - to_timestamp, aggregation.granularity) - - keys = [ - key for key in sorted(keys) - if ((not from_timestamp or key >= from_timestamp) - and (not to_timestamp or key <= to_timestamp)) - ] - - timeseries = self._get_splits_and_unserialize( - {metric: {aggregation: keys}} - )[metric][aggregation] - - ts = carbonara.AggregatedTimeSerie.from_timeseries( - timeseries, aggregation) - # We need to truncate because: - # - If the driver is not in WRITE_FULL mode, then it might read too - # much data that will be deleted once the split is rewritten. Just - # truncate so we don't return it. - # - If the driver is in WRITE_FULL but the archive policy has been - # resized, we might still have too much points stored, which will be - # deleted at a later point when new points will be procecessed. - # Truncate to be sure we don't return them. - if aggregation.timespan is not None: - ts.truncate(aggregation.timespan) - return ts - def _update_metric_splits(self, metrics_keys_aggregations_splits): """Store splits of `carbonara.`AggregatedTimeSerie` for a metric. @@ -722,7 +724,7 @@ class StorageDriver(object): try: ts = self.get_aggregated_measures( - metric, [agg], from_timestamp, to_timestamp)[agg] + {metric: [agg]}, from_timestamp, to_timestamp)[metric][agg] except MetricDoesNotExist: return [] return [(timestamp, ts.aggregation.granularity, value) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 133e9538..e61ae9f7 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -254,7 +254,10 @@ class TestStorageDriver(tests_base.TestCase): aggregations = self.metric.archive_policy.aggregations measures = self.storage.get_aggregated_measures( - self.metric, aggregations) + {self.metric: aggregations}) + self.assertEqual(1, len(measures)) + self.assertIn(self.metric, measures) + measures = measures[self.metric] self.assertEqual(len(aggregations), len(measures)) self.assertGreater(len(measures[aggregations[0]]), 0) for agg in aggregations: -- GitLab From f21ea84af6b6de24e3e36e6ce64b792a7e51ea2b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 19 Jun 2018 10:53:46 +0200 Subject: [PATCH 1366/1483] Add automatic backport labels --- .mergify.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index c9d2138d..14f682f1 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -9,6 +9,12 @@ rules: required_approving_review_count: 2 merge_strategy: method: rebase + automated_backport_labels: + backport-to-4.2: stable/4.2 + backport-to-4.1: stable/4.1 + backport-to-4.0: stable/4.0 + backport-to-3.1: stable/3.1 + backport-to-3.0: stable/3.0 branches: stable/.*: protection: -- GitLab From 11a2520a529a54b2926bb48c5e76656845325f5e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 19 Jun 2018 08:46:10 +0200 Subject: [PATCH 1367/1483] api: avoid some indexer queries If no resource_id or no metric names are passed in the batch payload, we can skip some sql queries. This change does that. Closes-bug: #911 --- gnocchi/rest/api.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 3ef19c7b..bd9a8336 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1549,9 +1549,14 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): attribute_filter = {"or": []} for original_resource_id, resource_id in body: names = list(body[(original_resource_id, resource_id)].keys()) - attribute_filter["or"].append({"and": [ - {"=": {"resource_id": resource_id}}, - {"in": {"name": names}}]}) + if names: + attribute_filter["or"].append({"and": [ + {"=": {"resource_id": resource_id}}, + {"in": {"name": names}}]}) + + if not attribute_filter["or"]: + pecan.response.status = 202 + return all_metrics = collections.defaultdict(list) for metric in pecan.request.indexer.list_metrics( -- GitLab From 2ad335bafbf5a7abfc83845b77e63882cea7f3db Mon Sep 17 00:00:00 2001 From: gordon chung Date: Sun, 3 Jun 2018 22:47:55 +0000 Subject: [PATCH 1368/1483] support resampling on calendar dates support ability to groupby year, half, quarter, month, and week. --- doc/source/rest.j2 | 12 ++ doc/source/rest.yaml | 3 + gnocchi/calendar.py | 100 ++++++++++++++ gnocchi/carbonara.py | 22 +++- gnocchi/rest/api.py | 7 +- .../functional/gabbits/resample-calendar.yaml | 122 ++++++++++++++++++ gnocchi/tests/test_calendar.py | 110 ++++++++++++++++ .../calendar-groups-1336b6d097c01b64.yaml | 6 + 8 files changed, 374 insertions(+), 8 deletions(-) create mode 100644 gnocchi/calendar.py create mode 100644 gnocchi/tests/functional/gabbits/resample-calendar.yaml create mode 100644 gnocchi/tests/test_calendar.py create mode 100644 releasenotes/notes/calendar-groups-1336b6d097c01b64.yaml diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index fe2fc164..678bee41 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -187,6 +187,18 @@ can be resampled to a new |granularity|. {{ scenarios['get-measures-resample']['doc'] }} +Time-series data can also be grouped by calendar dates beyond a standard day. +The resulting groupings are tied to the leading date of the group. For example, +grouping on month returns a monthly aggregate linked to the first of the month. +Available calendar groups are:: + + `Y` - by year. + `H` - by half. + `Q` - by quarter. + `M` - by month. + `W` - by week, starting on Sunday. + +{{ scenarios['get-measures-resample-calendar']['doc'] }} .. note:: diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml index 32666e59..705dca52 100644 --- a/doc/source/rest.yaml +++ b/doc/source/rest.yaml @@ -311,6 +311,9 @@ - name: get-measures-resample request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?granularity=1&resample=5 HTTP/1.1 +- name: get-measures-resample-calendar + request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?granularity=1&resample=W HTTP/1.1 + - name: create-resource-generic request: | POST /v1/resource/generic HTTP/1.1 diff --git a/gnocchi/calendar.py b/gnocchi/calendar.py new file mode 100644 index 00000000..9134ea97 --- /dev/null +++ b/gnocchi/calendar.py @@ -0,0 +1,100 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Gregorian calendar grouping helpers.""" + +import numpy + + +def _month_of_year(datetimes): + return (datetimes.astype('datetime64[M]', copy=False) - + datetimes.astype('datetime64[Y]', copy=False) + 1) + + +def month_of_year(datetimes): + """Return the calendar month of given dates.""" + return _month_of_year(datetimes).astype(int) + + +def iso_week_of_year(datetimes): + """Return the ISO week of the year of given dates.""" + dates_offset = (datetimes.astype('datetime64[D]', copy=False) + + numpy.timedelta64(3, 'D')).astype( + 'datetime64[W]', copy=False) + return numpy.ceil( + (dates_offset.astype('datetime64[D]', copy=False) - + dates_offset.astype('datetime64[Y]', copy=False) + 1).astype(int) / + 7.0) + + +def week_and_year(datetimes): + """Return the week of the year, grouped on Sunday, for given dates.""" + return ((datetimes.astype('datetime64[D]', copy=False) + + numpy.timedelta64(4, 'D')).astype('datetime64[W]', copy=False) - + numpy.timedelta64(4, 'D')) + + +def day_of_year(datetimes): + """Return the day of the year of given dates.""" + return (datetimes.astype('datetime64[D]', copy=False) - + datetimes.astype('datetime64[Y]', copy=False)).astype(int) + + +def day_of_month(datetimes): + """Return the day of the month of given dates.""" + return (datetimes.astype('datetime64[D]', copy=False) - + datetimes.astype('datetime64[M]', copy=False) + 1).astype(int) + + +def day_of_week(datetimes): + """Return the day of the week of given dates. Sunday(0) to Saturday(6).""" + return (datetimes.astype('datetime64[D]', copy=False) + + numpy.timedelta64(4, 'D')).astype(int) % 7 + + +def month_and_year(datetimes): + """Return the month and year of given dates.""" + return datetimes.astype('datetime64[M]', copy=False) + + +def quarter_and_year(datetimes): + """Return the quarter per year of given dates.""" + return (((_month_of_year(datetimes) - 1) // 3) * 3) + year(datetimes) + + +def quarter_of_year(datetimes): + """Return the quarter of the year of given dates.""" + return ((_month_of_year(datetimes) - 1) // 3 + 1).astype(int) + + +def half_and_year(datetimes): + """Return the half per year of given dates.""" + return (((_month_of_year(datetimes) - 1) // 6) * 6) + year(datetimes) + + +def half_of_year(datetimes): + """Return the half of the year of given dates.""" + return ((_month_of_year(datetimes) - 1) // 6 + 1).astype(int) + + +def year(datetimes): + """Return the year of given dates.""" + return datetimes.astype('datetime64[Y]', copy=False) + + +GROUPINGS = { + 'Y': year, + 'H': half_and_year, + 'Q': quarter_and_year, + 'M': month_and_year, + 'W': week_and_year} diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 47315a6d..02eeacca 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -29,6 +29,8 @@ import lz4.block import numpy import six +from gnocchi import calendar + UNIX_UNIVERSAL_START64 = numpy.datetime64("1970", 'ns') ONE_SECOND = numpy.timedelta64(1, 's') @@ -107,18 +109,23 @@ class GroupedTimeSeries(object): # duplicate timestamps, it uses numpy.unique that sorted list, but # we always assume the orderd to be the same as the input. self.granularity = granularity + self.can_derive = isinstance(granularity, numpy.timedelta64) self.start = start if start is None: self._ts = ts self._ts_for_derive = ts else: self._ts = ts[numpy.searchsorted(ts['timestamps'], start):] - start_derive = start - granularity - self._ts_for_derive = ts[ - numpy.searchsorted(ts['timestamps'], start_derive): - ] - - self.indexes = round_timestamp(self._ts['timestamps'], granularity) + if self.can_derive: + start_derive = start - granularity + self._ts_for_derive = ts[ + numpy.searchsorted(ts['timestamps'], start_derive): + ] + if self.can_derive: + self.indexes = round_timestamp(self._ts['timestamps'], granularity) + elif calendar.GROUPINGS.get(granularity): + self.indexes = calendar.GROUPINGS.get(granularity)( + self._ts['timestamps']) self.tstamps, self.counts = numpy.unique(self.indexes, return_counts=True) @@ -197,6 +204,9 @@ class GroupedTimeSeries(object): return make_timeseries(self.tstamps, values) def derived(self): + if not self.can_derive: + raise TypeError('Cannot derive aggregates on calendar ' + 'granularities.') timestamps = self._ts_for_derive['timestamps'][1:] values = numpy.diff(self._ts_for_derive['values']) # FIXME(sileht): create some alternative __init__ to avoid creating diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index bd9a8336..3066c103 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -33,6 +33,7 @@ import werkzeug.http import gnocchi from gnocchi import archive_policy +from gnocchi import calendar from gnocchi import chef from gnocchi.cli import metricd from gnocchi import incoming @@ -464,7 +465,8 @@ class MetricController(rest.RestController): if not granularity: abort(400, 'A granularity must be specified to resample') try: - resample = utils.to_timespan(resample) + resample = (resample if calendar.GROUPINGS.get(resample) else + utils.to_timespan(resample)) except ValueError as e: abort(400, six.text_type(e)) @@ -1821,7 +1823,8 @@ class AggregationController(rest.RestController): if not granularity: abort(400, 'A granularity must be specified to resample') try: - resample = utils.to_timespan(resample) + resample = (resample if calendar.GROUPINGS.get(resample) else + utils.to_timespan(resample)) except ValueError as e: abort(400, six.text_type(e)) diff --git a/gnocchi/tests/functional/gabbits/resample-calendar.yaml b/gnocchi/tests/functional/gabbits/resample-calendar.yaml new file mode 100644 index 00000000..5b6dc054 --- /dev/null +++ b/gnocchi/tests/functional/gabbits/resample-calendar.yaml @@ -0,0 +1,122 @@ +fixtures: + - ConfigFixture + +defaults: + request_headers: + content-type: application/json + # User foobar + authorization: "basic Zm9vYmFyOg==" + +tests: + - name: create archive policy + desc: for later use + POST: /v1/archive_policy + request_headers: + # User admin + authorization: "basic YWRtaW46" + data: + name: cookies + definition: + - granularity: 1 day + status: 201 + + - name: get metric empty + GET: /v1/metric + status: 200 + response_strings: + - "[]" + + - name: create valid metric + POST: /v1/metric + data: + archive_policy_name: cookies + status: 201 + response_json_paths: + $.archive_policy_name: cookies + + - name: get valid metric id + GET: /v1/metric/$RESPONSE['$.id'] + status: 200 + response_json_paths: + $.archive_policy.name: cookies + + - name: list valid metrics + GET: /v1/metric + response_json_paths: + $[0].archive_policy.name: cookies + + - name: push measurements to metric + POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures + data: + - timestamp: "2015-03-01T14:34:12" + value: 10 + - timestamp: "2015-03-06T14:34:12" + value: 12 + - timestamp: "2015-04-01T14:34:12" + value: 2 + - timestamp: "2015-04-06T14:34:12" + value: 4 + - timestamp: "2015-10-06T14:34:12" + value: 7 + - timestamp: "2016-01-06T14:34:12" + value: 12 + - timestamp: "2016-02-06T14:34:12" + value: 4 + status: 202 + + - name: get measurements from metric + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true + response_json_paths: + $: + - ["2015-03-01T00:00:00+00:00", 86400.0, 10.0] + - ["2015-03-06T00:00:00+00:00", 86400.0, 12.0] + - ["2015-04-01T00:00:00+00:00", 86400.0, 2.0] + - ["2015-04-06T00:00:00+00:00", 86400.0, 4.0] + - ["2015-10-06T00:00:00+00:00", 86400.0, 7.0] + - ["2016-01-06T00:00:00+00:00", 86400.0, 12.0] + - ["2016-02-06T00:00:00+00:00", 86400.0, 4.0] + + - name: get measurements from metric and resample calendar year + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=Y&granularity=86400 + response_json_paths: + $: + - ["2015-01-01T00:00:00+00:00", "Y", 7.0] + - ["2016-01-01T00:00:00+00:00", "Y", 8.0] + + - name: get measurements from metric and resample calendar year-half + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=H&granularity=86400 + response_json_paths: + $: + - ["2015-01-01T00:00:00+00:00", "H", 7.0] + - ["2015-07-01T00:00:00+00:00", "H", 7.0] + - ["2016-01-01T00:00:00+00:00", "H", 8.0] + + - name: get measurements from metric and resample calendar year-quarter + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=Q&granularity=86400 + response_json_paths: + $: + - ["2015-01-01T00:00:00+00:00", "Q", 11.0] + - ["2015-04-01T00:00:00+00:00", "Q", 3.0] + - ["2015-10-01T00:00:00+00:00", "Q", 7.0] + - ["2016-01-01T00:00:00+00:00", "Q", 8.0] + + - name: get measurements from metric and resample calendar year-month + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=M&granularity=86400 + response_json_paths: + $: + - ["2015-03-01T00:00:00+00:00", "M", 11.0] + - ["2015-04-01T00:00:00+00:00", "M", 3.0] + - ["2015-10-01T00:00:00+00:00", "M", 7.0] + - ["2016-01-01T00:00:00+00:00", "M", 12.0] + - ["2016-02-01T00:00:00+00:00", "M", 4.0] + + - name: get measurements from metric and resample calendar year-week + GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=W&granularity=86400 + response_json_paths: + $: + - ["2015-03-01T00:00:00+00:00", "W", 11.0] + - ["2015-03-29T00:00:00+00:00", "W", 2.0] + - ["2015-04-05T00:00:00+00:00", "W", 4.0] + - ["2015-10-04T00:00:00+00:00", "W", 7.0] + - ["2016-01-03T00:00:00+00:00", "W", 12.0] + - ["2016-01-31T00:00:00+00:00", "W", 4.0] diff --git a/gnocchi/tests/test_calendar.py b/gnocchi/tests/test_calendar.py new file mode 100644 index 00000000..297468dc --- /dev/null +++ b/gnocchi/tests/test_calendar.py @@ -0,0 +1,110 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import numpy +from numpy.testing import assert_equal + +from gnocchi import calendar +from gnocchi.tests import base as tests_base + + +class TestCalender(tests_base.TestCase): + + def test_get_year(self): + dates = numpy.array(['2018-01-01', '2019-01-01', '2020-01-01'], + dtype='datetime64[ns]') + assert_equal(numpy.array(['2018', '2019', '2020'], + dtype='datetime64[Y]'), + calendar.year(dates)) + + def test_half_of_year(self): + dates = numpy.arange('2018-01-01', '2018-12-31', dtype='datetime64[D]') + assert_equal(numpy.array([1] * 181 + [2] * 183), + calendar.half_of_year(dates)) + + def test_half_and_year(self): + dates = numpy.arange('2018-01-01', '2018-12-31', dtype='datetime64[D]') + assert_equal(numpy.array(['2018-01'] * 181 + ['2018-07'] * 183, + dtype='datetime64[M]'), + calendar.half_and_year(dates)) + + def test_quarter_of_year(self): + dates = numpy.arange('2018-01-01', '2018-12-31', dtype='datetime64[D]') + assert_equal(numpy.array([1] * 90 + [2] * 91 + [3] * 92 + [4] * 91), + calendar.quarter_of_year(dates)) + + def test_quarter_and_year(self): + dates = numpy.arange('2018-01-01', '2018-12-31', dtype='datetime64[D]') + assert_equal(numpy.array(['2018-01'] * 90 + ['2018-04'] * 91 + + ['2018-07'] * 92 + ['2018-10'] * 91, + dtype='datetime64[M]'), + calendar.quarter_and_year(dates)) + + def test_get_month_and_year(self): + dates = numpy.array(['2018-01-01', '2019-03-01', '2020-05-01'], + dtype='datetime64[ns]') + assert_equal(numpy.array(['2018-01', '2019-03', '2020-05'], + dtype='datetime64[M]'), + calendar.month_and_year(dates)) + + def test_day_of_week(self): + dates = numpy.arange('2010-01-01', '2020-12-31', dtype='datetime64[D]') + expected = numpy.array([i.isocalendar()[2] for i in + dates.astype('datetime64[ms]').astype(object)]) + # isocalendar sets sunday as 7, we set it as 0. + expected[expected == 7] = 0 + assert_equal(expected, calendar.day_of_week(dates)) + + def test_day_of_month(self): + dates = numpy.array(['2017-12-29', '2017-12-30', '2017-12-31', + '2018-01-01', '2018-01-02', '2018-01-03'], + dtype='datetime64[ns]') + assert_equal(numpy.array([29, 30, 31, 1, 2, 3]), + calendar.day_of_month(dates)) + + def test_day_of_year(self): + dates = numpy.array(['2017-12-29', '2017-12-30', '2017-12-31', + '2018-01-01', '2018-01-02', '2018-01-03'], + dtype='datetime64[ns]') + assert_equal(numpy.array([362, 363, 364, 0, 1, 2]), + calendar.day_of_year(dates)) + dates = numpy.array(['2016-12-29', '2016-12-30', '2016-12-31'], + dtype='datetime64[ns]') + assert_equal(numpy.array([363, 364, 365]), + calendar.day_of_year(dates)) + + def test_iso_week_of_year(self): + dates = numpy.arange('2010-01-01', '2020-12-31', dtype='datetime64[D]') + expected = numpy.array([i.isocalendar()[1] for i in + dates.astype('datetime64[ms]').astype(object)]) + assert_equal(expected, calendar.iso_week_of_year(dates)) + + def test_week_and_year(self): + dates = numpy.array(['2017-12-29', '2017-12-30', '2017-12-31', + '2018-01-01', '2018-01-02', '2018-01-03'], + dtype='datetime64[ns]') + assert_equal(numpy.array(['2017-12-24', '2017-12-24', '2017-12-31', + '2017-12-31', '2017-12-31', '2017-12-31'], + dtype='datetime64[D]'), + calendar.week_and_year(dates)) + dates = numpy.array(['2016-02-27', '2016-02-28', '2016-02-29'], + dtype='datetime64[ns]') + assert_equal(numpy.array(['2016-02-21', '2016-02-28', '2016-02-28'], + dtype='datetime64[D]'), + calendar.week_and_year(dates)) + + def test_month_of_year(self): + dates = numpy.array(['2018-01-01', '2019-03-01', '2020-05-01'], + dtype='datetime64[ns]') + assert_equal(numpy.array([1, 3, 5]), + calendar.month_of_year(dates)) diff --git a/releasenotes/notes/calendar-groups-1336b6d097c01b64.yaml b/releasenotes/notes/calendar-groups-1336b6d097c01b64.yaml new file mode 100644 index 00000000..e5fffd9c --- /dev/null +++ b/releasenotes/notes/calendar-groups-1336b6d097c01b64.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Resampling based on calendar dates are now supported. Using the `resample` + parameter, users can specify grouping by: `Y`, `H`, `Q`, `M`, or `W`. + Details on each grouping can be found in docs. -- GitLab From cb8a0e46c8815e0bd1dc25ff01c58eb9debe79a0 Mon Sep 17 00:00:00 2001 From: Andrei Ozerov Date: Sun, 1 Jul 2018 20:23:07 +0300 Subject: [PATCH 1369/1483] Docs: add reference to a Go SDK Add notes about open source Go SDK implementation for the Gnocchi API with links and installation instruction. --- doc/source/client.rst | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/doc/source/client.rst b/doc/source/client.rst index 6aa428a1..4be6893c 100644 --- a/doc/source/client.rst +++ b/doc/source/client.rst @@ -2,7 +2,10 @@ Client ======== -Gnocchi currently only provides a Python client and SDK which can be installed +Python +------ + +Gnocchi officially provides a Python client and SDK which can be installed using *pip*:: pip install gnocchiclient @@ -10,4 +13,17 @@ using *pip*:: This package provides the `gnocchi` command line tool that can be used to send requests to Gnocchi. You can read the `full documentation online`_. +Go +-- + +There is an open source Go implementation for the SDK, provided by the +`Gophercloud` project. +It can be installed using *go get*:: + + go get github.com/gophercloud/utils/gnocchi + +This package provides the Go SDK only. You can read the `godoc reference`_. + .. _full documentation online: http://gnocchi.xyz/gnocchiclient +.. _Gophercloud: https://github.com/gophercloud +.. _godoc reference: https://godoc.org/github.com/gophercloud/utils -- GitLab From f8e83607a60772ae4b81cc05197caa44785caac0 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Tue, 3 Jul 2018 03:58:52 +0000 Subject: [PATCH 1370/1483] use fromiter to build array incoming data isn't that large but if we don't need to build a list why build it. --- gnocchi/incoming/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gnocchi/incoming/__init__.py b/gnocchi/incoming/__init__.py index 562a18ee..d3620e99 100644 --- a/gnocchi/incoming/__init__.py +++ b/gnocchi/incoming/__init__.py @@ -160,8 +160,8 @@ class IncomingDriver(object): raise def _encode_measures(self, measures): - return numpy.array(list(measures), - dtype=TIMESERIES_ARRAY_DTYPE).tobytes() + return numpy.fromiter(measures, + dtype=TIMESERIES_ARRAY_DTYPE).tobytes() def group_metrics_by_sack(self, metrics): """Iterate on a list of metrics, grouping them by sack. -- GitLab From 888ca2f1c95287a8da81f08e89457281b40be3f8 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 16 Jul 2018 18:11:18 +0200 Subject: [PATCH 1371/1483] redis: fix list_split_keys with multiple metrics The iteration over the results was wrongly computed and did not return data for the second metric and following. --- gnocchi/storage/redis.py | 8 +++++--- gnocchi/tests/test_storage.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py index 96aa212e..d65027af 100644 --- a/gnocchi/storage/redis.py +++ b/gnocchi/storage/redis.py @@ -116,13 +116,15 @@ return ids keys = collections.defaultdict(dict) start = 0 for metric in metrics: - metric_exists_p = results.pop(0) + metric_exists_p = results[start] if not metric_exists_p: raise storage.MetricDoesNotExist(metric) aggregations = metrics_and_aggregations[metric] number_of_aggregations = len(aggregations) - keys_for_aggregations = results[start:number_of_aggregations] - start += number_of_aggregations + keys_for_aggregations = results[ + start + 1:start + 1 + number_of_aggregations + ] + start += 1 + number_of_aggregations # 1 for metric_exists_p for aggregation, k in six.moves.zip( aggregations, keys_for_aggregations): if not k: diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index e61ae9f7..b75e27a6 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -263,6 +263,34 @@ class TestStorageDriver(tests_base.TestCase): for agg in aggregations: self.assertEqual(agg, measures[agg].aggregation) + def test_get_aggregated_measures_multiple(self): + self.incoming.add_measures(self.metric.id, [ + incoming.Measure(datetime64(2014, 1, 1, 12, i, j), 100) + for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) + m2, __ = self._create_metric('medium') + self.incoming.add_measures(m2.id, [ + incoming.Measure(datetime64(2014, 1, 1, 12, i, j), 100) + for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) + self.trigger_processing([self.metric, m2]) + + aggregations = self.metric.archive_policy.aggregations + + measures = self.storage.get_aggregated_measures( + {self.metric: aggregations, + m2: m2.archive_policy.aggregations}) + + self.assertEqual({self.metric, m2}, set(measures.keys())) + self.assertEqual(len(aggregations), len(measures[self.metric])) + self.assertGreater(len(measures[self.metric][aggregations[0]]), 0) + for agg in aggregations: + self.assertEqual(agg, measures[self.metric][agg].aggregation) + self.assertEqual(len(m2.archive_policy.aggregations), + len(measures[m2])) + self.assertGreater( + len(measures[m2][m2.archive_policy.aggregations[0]]), 0) + for agg in m2.archive_policy.aggregations: + self.assertEqual(agg, measures[m2][agg].aggregation) + def test_add_measures_big(self): m, __ = self._create_metric('high') self.incoming.add_measures(m.id, [ -- GitLab From 47d13c00245cdea8f96830e696ff0b76612f506a Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 16 Jul 2018 20:21:40 +0200 Subject: [PATCH 1372/1483] Test against python3.7 --- .travis.yml | 10 +++++----- tox.ini | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index 79b10316..6f2fd7be 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,16 +13,16 @@ env: - TARGET: docs-gnocchi.xyz - TARGET: py27-mysql-ceph-upgrade-from-4.0 - - TARGET: py35-postgresql-file-upgrade-from-4.0 + - TARGET: py37-postgresql-file-upgrade-from-4.0 - TARGET: py27-mysql-ceph-upgrade-from-4.1 - - TARGET: py35-postgresql-file-upgrade-from-4.1 + - TARGET: py37-postgresql-file-upgrade-from-4.1 - TARGET: py27-mysql-ceph-upgrade-from-4.2 - - TARGET: py35-postgresql-file-upgrade-from-4.2 + - TARGET: py37-postgresql-file-upgrade-from-4.2 - TARGET: py27-mysql - - TARGET: py35-mysql + - TARGET: py37-mysql - TARGET: py27-postgresql - - TARGET: py35-postgresql + - TARGET: py37-postgresql before_script: # NOTE(sileht): We need to fetch all tags/branches for documentation. diff --git a/tox.ini b/tox.ini index e66a0cf7..7b4d5133 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] minversion = 2.4 -envlist = py{35,27}-{postgresql,mysql}{,-file,-swift,-ceph,-s3},pep8 +envlist = py{37,27}-{postgresql,mysql}{,-file,-swift,-ceph,-s3},pep8 skipsdist = True [testenv] @@ -50,7 +50,7 @@ commands = {toxinidir}/run-tests.sh {posargs} {toxinidir}/run-func-tests.sh {posargs} -[testenv:py35-postgresql-file-upgrade-from-4.0] +[testenv:py37-postgresql-file-upgrade-from-4.0] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv recreate = True @@ -70,7 +70,7 @@ deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.0,<4.1 pifpaf[ceph,gnocchi]>=0.13 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} -[testenv:py35-postgresql-file-upgrade-from-4.1] +[testenv:py37-postgresql-file-upgrade-from-4.1] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv recreate = True @@ -90,7 +90,7 @@ deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.1,<4.2 pifpaf[ceph,gnocchi]>=0.13 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} -[testenv:py35-postgresql-file-upgrade-from-4.2] +[testenv:py37-postgresql-file-upgrade-from-4.2] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv recreate = True -- GitLab From 3111e92a855f0a17ea53a569581a9a42bbe275c8 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 19 Jul 2018 22:09:06 +0200 Subject: [PATCH 1373/1483] Update mergify configuration Regex must start be ^ now. --- .mergify.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.mergify.yml b/.mergify.yml index 14f682f1..29cafbb5 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -16,7 +16,7 @@ rules: backport-to-3.1: stable/3.1 backport-to-3.0: stable/3.0 branches: - stable/.*: + '^stable/.*': protection: required_pull_request_reviews: required_approving_review_count: 1 -- GitLab From c35201476ecf2ab8944e0a9b787040b59c6cb210 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 19 Jul 2018 16:54:04 +0200 Subject: [PATCH 1374/1483] doc: fix some syntax mistakes in Ceph paragraph --- doc/source/install.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index 9b63715a..31071729 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -53,9 +53,9 @@ be created for example with: ceph auth get-or-create client.gnocchi mon "allow r" osd "allow rwx pool=metrics" -Gnocchi leverages some _librados_ features (omap, async, operation context) -available in the Python binding only since python-rados >= 12.2.0. To handle -this, Gnocchi uses _cradox_ python library which has exactly the same API but +Gnocchi leverages some *librados* features (omap, async, operation context) +available in the Python binding only since *python-rados* >= 12.2.0. To handle +this, Gnocchi uses *cradox* python library which has exactly the same API but works with Ceph >= 0.80.0. If Ceph and python-rados are >= 12.2.0, the cradox Python library becomes -- GitLab From 1d64df451947a2d4748be34e3037d5bdab074123 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 24 Jul 2018 20:55:05 +0200 Subject: [PATCH 1375/1483] tests: disable warning for py27-mysql-ceph-upgrade-from-4.0 --- tox.ini | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 7b4d5133..4c5563db 100644 --- a/tox.ini +++ b/tox.ini @@ -64,7 +64,9 @@ commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-te # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv recreate = True -setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib +setenv = + GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib + PYTHONWARNINGS=ignore deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.0,<4.1 gnocchiclient>=2.8.0 pifpaf[ceph,gnocchi]>=0.13 -- GitLab From c7c3c80733b32d0eaa535ea6b3496f8972408406 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 24 Jul 2018 16:47:16 +0200 Subject: [PATCH 1376/1483] Remove unused tox venv This from the old Zuul gate. --- tox.ini | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tox.ini b/tox.ini index 4c5563db..13ba5eba 100644 --- a/tox.ini +++ b/tox.ini @@ -119,11 +119,6 @@ commands = flake8 [testenv:py27-cover] commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py testr --coverage --testr-args="{posargs}" -[testenv:venv] -# This is used by the doc job on the gate -deps = {[testenv:docs]deps} -commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- {posargs} - [flake8] exclude = .tox,.eggs,doc,gnocchi/rest/prometheus/remote_pb2.py show-source = true -- GitLab From 9b9e84f641e160a5f33aeb71a2eb26b620f33181 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 12 Jul 2018 18:13:23 +0200 Subject: [PATCH 1377/1483] storage: make find_measures batched --- gnocchi/rest/api.py | 42 ++++++++------ gnocchi/storage/__init__.py | 32 ++++++----- .../functional/gabbits/search-metric.yaml | 6 ++ gnocchi/tests/test_storage.py | 57 ++++++++++--------- 4 files changed, 81 insertions(+), 56 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 3066c103..7c5c90cb 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1493,27 +1493,35 @@ class SearchMetricController(rest.RestController): map(utils.to_timespan, arg_to_list(granularity)), reverse=True) - results = {} + metrics_and_aggregations = collections.defaultdict(list) + + for metric in metrics: + if granularity is None: + granularity = sorted(( + d.granularity + for d in metric.archive_policy.definition), + reverse=True) + for gr in granularity: + agg = metric.archive_policy.get_aggregation( + aggregation, gr) + if agg is None: + abort(400, + storage.AggregationDoesNotExist( + metric, aggregation, gr)) + metrics_and_aggregations[metric].append(agg) try: - for metric in metrics: - if granularity is None: - granularity = sorted(( - d.granularity - for d in metric.archive_policy.definition), - reverse=True) - results[str(metric.id)] = [] - for r in utils.parallel_map( - pecan.request.storage.find_measure, - ((metric, predicate, g, aggregation, - start, stop) - for g in granularity)): - results[str(metric.id)].extend(r) - except storage.AggregationDoesNotExist as e: + return { + str(metric.id): results + for metric, results in six.iteritems( + pecan.request.storage.find_measure( + metrics_and_aggregations, predicate, start, stop)) + } + except storage.MetricDoesNotExist as e: + # This can happen if all the metrics have been created but one + # doesn't have any measures yet. abort(400, e) - return results - class ResourcesMetricsMeasuresBatchController(rest.RestController): diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 04fdd8c6..660c9204 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -57,6 +57,14 @@ class MetricDoesNotExist(StorageError): super(MetricDoesNotExist, self).__init__( "Metric %s does not exist" % metric) + def jsonify(self): + return { + "cause": "Metric does not exist", + "detail": { + "metric": self.metric, + }, + } + class AggregationDoesNotExist(StorageError): """Error raised when the aggregation method doesn't exists for a metric.""" @@ -716,20 +724,18 @@ class StorageDriver(object): self._store_unaggregated_timeseries(new_boundts) self.statistics["raw measures store"] += len(new_boundts) - def find_measure(self, metric, predicate, granularity, aggregation="mean", + def find_measure(self, metrics_and_aggregations, predicate, from_timestamp=None, to_timestamp=None): - agg = metric.archive_policy.get_aggregation(aggregation, granularity) - if agg is None: - raise AggregationDoesNotExist(metric, aggregation, granularity) - - try: - ts = self.get_aggregated_measures( - {metric: [agg]}, from_timestamp, to_timestamp)[metric][agg] - except MetricDoesNotExist: - return [] - return [(timestamp, ts.aggregation.granularity, value) - for timestamp, value in ts - if predicate(value)] + ts = self.get_aggregated_measures( + metrics_and_aggregations, + from_timestamp, to_timestamp) + return { + metric: [(timestamp, aggregation.granularity, value) + for aggregation, ts in six.iteritems(aggregations_and_ts) + for timestamp, value in ts + if predicate(value)] + for metric, aggregations_and_ts in six.iteritems(ts) + } class MeasureQuery(object): diff --git a/gnocchi/tests/functional/gabbits/search-metric.yaml b/gnocchi/tests/functional/gabbits/search-metric.yaml index 812f1d9a..f4c97d4f 100644 --- a/gnocchi/tests/functional/gabbits/search-metric.yaml +++ b/gnocchi/tests/functional/gabbits/search-metric.yaml @@ -78,12 +78,18 @@ tests: data: "=": 12 status: 200 + poll: + count: 10 + delay: 1 - name: search with multiple correct granularities POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=1second&granularity=2s data: "=": 12 status: 200 + poll: + count: 10 + delay: 1 - name: search with correct and incorrect granularities POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=1s&granularity=300 diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index b75e27a6..f1f7de1a 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -1150,46 +1150,51 @@ class TestStorageDriver(tests_base.TestCase): self.trigger_processing([self.metric, metric2]) self.assertEqual( - [ + {self.metric: [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 33), - ], - self.storage.find_measure( - self.metric, storage.MeasureQuery({u"≥": 30}), - numpy.timedelta64(1, 'D'))) + ]}, + self.storage.find_measure({ + self.metric: [self.metric.archive_policy.get_aggregation( + 'mean', numpy.timedelta64(1, 'D'))] + }, storage.MeasureQuery({u"≥": 30}))) self.assertEqual( - [ + {self.metric: [ (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 42) - ], - self.storage.find_measure( - self.metric, storage.MeasureQuery({u"≥": 30}), - numpy.timedelta64(5, 'm'))) + ]}, + self.storage.find_measure({ + self.metric: [self.metric.archive_policy.get_aggregation( + 'mean', numpy.timedelta64(5, 'm'))] + }, storage.MeasureQuery({u"≥": 30}))) self.assertEqual( - [], - self.storage.find_measure( - metric2, storage.MeasureQuery({u"≥": 30}), - numpy.timedelta64(5, 'm'))) + {metric2: []}, + self.storage.find_measure({ + metric2: [metric2.archive_policy.get_aggregation( + 'mean', numpy.timedelta64(5, 'm'))] + }, storage.MeasureQuery({u"≥": 30}))) self.assertEqual( - [], - self.storage.find_measure( - self.metric, storage.MeasureQuery({u"∧": [ - {u"eq": 100}, - {u"≠": 50}]}), - numpy.timedelta64(5, 'm'))) + {self.metric: []}, + self.storage.find_measure({ + self.metric: [self.metric.archive_policy.get_aggregation( + 'mean', numpy.timedelta64(5, 'm'))] + }, storage.MeasureQuery({u"∧": [ + {u"eq": 100}, + {u"≠": 50}]}))) self.assertEqual( - [], - self.storage.find_measure( - metric2, storage.MeasureQuery({u"∧": [ - {u"eq": 100}, - {u"≠": 50}]}), - numpy.timedelta64(5, 'm'))) + {metric2: []}, + self.storage.find_measure({ + metric2: [metric2.archive_policy.get_aggregation( + 'mean', numpy.timedelta64(5, 'm'))] + }, storage.MeasureQuery({u"∧": [ + {u"eq": 100}, + {u"≠": 50}]}))) def test_resize_policy(self): name = str(uuid.uuid4()) -- GitLab From e7c0a6d5b33ea6eb211b47da571a6bb24c6e7025 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 13 Jul 2018 14:14:13 +0200 Subject: [PATCH 1378/1483] storage: remove find_measures This is very specific to the API --- gnocchi/rest/api.py | 20 +++++++---- gnocchi/storage/__init__.py | 13 ------- gnocchi/tests/test_storage.py | 65 ----------------------------------- 3 files changed, 13 insertions(+), 85 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 7c5c90cb..782af8eb 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- # -# Copyright © 2016-2017 Red Hat, Inc. +# Copyright © 2016-2018 Red Hat, Inc. # Copyright © 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -1511,17 +1511,23 @@ class SearchMetricController(rest.RestController): metrics_and_aggregations[metric].append(agg) try: - return { - str(metric.id): results - for metric, results in six.iteritems( - pecan.request.storage.find_measure( - metrics_and_aggregations, predicate, start, stop)) - } + timeseries = pecan.request.storage.get_aggregated_measures( + metrics_and_aggregations, start, stop) except storage.MetricDoesNotExist as e: # This can happen if all the metrics have been created but one # doesn't have any measures yet. abort(400, e) + return { + str(metric.id): [ + (timestamp, aggregation.granularity, value) + for aggregation, ts in six.iteritems(aggregations_and_ts) + for timestamp, value in ts + if predicate(value) + ] + for metric, aggregations_and_ts in six.iteritems(timeseries) + } + class ResourcesMetricsMeasuresBatchController(rest.RestController): diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 660c9204..7a05e2cf 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -724,19 +724,6 @@ class StorageDriver(object): self._store_unaggregated_timeseries(new_boundts) self.statistics["raw measures store"] += len(new_boundts) - def find_measure(self, metrics_and_aggregations, predicate, - from_timestamp=None, to_timestamp=None): - ts = self.get_aggregated_measures( - metrics_and_aggregations, - from_timestamp, to_timestamp) - return { - metric: [(timestamp, aggregation.granularity, value) - for aggregation, ts in six.iteritems(aggregations_and_ts) - for timestamp, value in ts - if predicate(value)] - for metric, aggregations_and_ts in six.iteritems(ts) - } - class MeasureQuery(object): binary_operators = { diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index f1f7de1a..d02c6e73 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -1131,71 +1131,6 @@ class TestStorageDriver(tests_base.TestCase): self.storage.get_measures, self.metric, aggregations) - def test_find_measures(self): - metric2, __ = self._create_metric() - self.incoming.add_measures(self.metric.id, [ - incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1,), 69), - incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), - incoming.Measure(datetime64(2014, 1, 1, 12, 5, 31), 8), - incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), - incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 42), - ]) - - self.incoming.add_measures(metric2.id, [ - incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), - incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 2), - incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 6), - incoming.Measure(datetime64(2014, 1, 1, 12, 13, 10), 2), - ]) - self.trigger_processing([self.metric, metric2]) - - self.assertEqual( - {self.metric: [ - (datetime64(2014, 1, 1), - numpy.timedelta64(1, 'D'), 33), - ]}, - self.storage.find_measure({ - self.metric: [self.metric.archive_policy.get_aggregation( - 'mean', numpy.timedelta64(1, 'D'))] - }, storage.MeasureQuery({u"≥": 30}))) - - self.assertEqual( - {self.metric: [ - (datetime64(2014, 1, 1, 12), - numpy.timedelta64(5, 'm'), 69), - (datetime64(2014, 1, 1, 12, 10), - numpy.timedelta64(5, 'm'), 42) - ]}, - self.storage.find_measure({ - self.metric: [self.metric.archive_policy.get_aggregation( - 'mean', numpy.timedelta64(5, 'm'))] - }, storage.MeasureQuery({u"≥": 30}))) - - self.assertEqual( - {metric2: []}, - self.storage.find_measure({ - metric2: [metric2.archive_policy.get_aggregation( - 'mean', numpy.timedelta64(5, 'm'))] - }, storage.MeasureQuery({u"≥": 30}))) - - self.assertEqual( - {self.metric: []}, - self.storage.find_measure({ - self.metric: [self.metric.archive_policy.get_aggregation( - 'mean', numpy.timedelta64(5, 'm'))] - }, storage.MeasureQuery({u"∧": [ - {u"eq": 100}, - {u"≠": 50}]}))) - - self.assertEqual( - {metric2: []}, - self.storage.find_measure({ - metric2: [metric2.archive_policy.get_aggregation( - 'mean', numpy.timedelta64(5, 'm'))] - }, storage.MeasureQuery({u"∧": [ - {u"eq": 100}, - {u"≠": 50}]}))) - def test_resize_policy(self): name = str(uuid.uuid4()) ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)]) -- GitLab From 7c1ef428fe040d2a61e45695ec438295b9e14272 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 13 Jul 2018 14:21:36 +0200 Subject: [PATCH 1379/1483] storage: move MeasureQuery in REST API This is specific to the REST API. --- gnocchi/rest/api.py | 99 ++++++++++++++++++++++++++++++++++- gnocchi/storage/__init__.py | 97 ---------------------------------- gnocchi/tests/test_rest.py | 63 ++++++++++++++++++++++ gnocchi/tests/test_storage.py | 60 --------------------- 4 files changed, 160 insertions(+), 159 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 782af8eb..4be3f795 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -1457,6 +1457,101 @@ class SearchMetricController(rest.RestController): ) ) + class MeasureQuery(object): + binary_operators = { + u"=": operator.eq, + u"==": operator.eq, + u"eq": operator.eq, + + u"<": operator.lt, + u"lt": operator.lt, + + u">": operator.gt, + u"gt": operator.gt, + + u"<=": operator.le, + u"≤": operator.le, + u"le": operator.le, + + u">=": operator.ge, + u"≥": operator.ge, + u"ge": operator.ge, + + u"!=": operator.ne, + u"≠": operator.ne, + u"ne": operator.ne, + + u"%": operator.mod, + u"mod": operator.mod, + + u"+": operator.add, + u"add": operator.add, + + u"-": operator.sub, + u"sub": operator.sub, + + u"*": operator.mul, + u"×": operator.mul, + u"mul": operator.mul, + + u"/": operator.truediv, + u"÷": operator.truediv, + u"div": operator.truediv, + + u"**": operator.pow, + u"^": operator.pow, + u"pow": operator.pow, + } + + multiple_operators = { + u"or": any, + u"∨": any, + u"and": all, + u"∧": all, + } + + def __init__(self, tree): + self._eval = self.build_evaluator(tree) + + def __call__(self, value): + return self._eval(value) + + def build_evaluator(self, tree): + try: + operator, nodes = list(tree.items())[0] + except Exception: + return lambda value: tree + try: + op = self.multiple_operators[operator] + except KeyError: + try: + op = self.binary_operators[operator] + except KeyError: + raise self.InvalidQuery("Unknown operator %s" % operator) + return self._handle_binary_op(op, nodes) + return self._handle_multiple_op(op, nodes) + + def _handle_multiple_op(self, op, nodes): + elements = [self.build_evaluator(node) for node in nodes] + return lambda value: op((e(value) for e in elements)) + + def _handle_binary_op(self, op, node): + try: + iterator = iter(node) + except Exception: + return lambda value: op(value, node) + nodes = list(iterator) + if len(nodes) != 2: + raise self.InvalidQuery( + "Binary operator %s needs 2 arguments, %d given" % + (op, len(nodes))) + node0 = self.build_evaluator(node[0]) + node1 = self.build_evaluator(node[1]) + return lambda value: op(node0(value), node1(value)) + + class InvalidQuery(Exception): + pass + @pecan.expose('json') def post(self, metric_id, start=None, stop=None, aggregation='mean', granularity=None): @@ -1484,8 +1579,8 @@ class SearchMetricController(rest.RestController): abort(400, "Invalid value for stop") try: - predicate = storage.MeasureQuery(query) - except storage.InvalidQuery as e: + predicate = self.MeasureQuery(query) + except self.MeasureQuery.InvalidQuery as e: abort(400, six.text_type(e)) if granularity is not None: diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 7a05e2cf..7d2e0a0d 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -45,10 +45,6 @@ class StorageError(Exception): pass -class InvalidQuery(StorageError): - pass - - class MetricDoesNotExist(StorageError): """Error raised when this metric does not exist.""" @@ -723,96 +719,3 @@ class StorageDriver(object): with self.statistics.time("raw measures store"): self._store_unaggregated_timeseries(new_boundts) self.statistics["raw measures store"] += len(new_boundts) - - -class MeasureQuery(object): - binary_operators = { - u"=": operator.eq, - u"==": operator.eq, - u"eq": operator.eq, - - u"<": operator.lt, - u"lt": operator.lt, - - u">": operator.gt, - u"gt": operator.gt, - - u"<=": operator.le, - u"≤": operator.le, - u"le": operator.le, - - u">=": operator.ge, - u"≥": operator.ge, - u"ge": operator.ge, - - u"!=": operator.ne, - u"≠": operator.ne, - u"ne": operator.ne, - - u"%": operator.mod, - u"mod": operator.mod, - - u"+": operator.add, - u"add": operator.add, - - u"-": operator.sub, - u"sub": operator.sub, - - u"*": operator.mul, - u"×": operator.mul, - u"mul": operator.mul, - - u"/": operator.truediv, - u"÷": operator.truediv, - u"div": operator.truediv, - - u"**": operator.pow, - u"^": operator.pow, - u"pow": operator.pow, - } - - multiple_operators = { - u"or": any, - u"∨": any, - u"and": all, - u"∧": all, - } - - def __init__(self, tree): - self._eval = self.build_evaluator(tree) - - def __call__(self, value): - return self._eval(value) - - def build_evaluator(self, tree): - try: - operator, nodes = list(tree.items())[0] - except Exception: - return lambda value: tree - try: - op = self.multiple_operators[operator] - except KeyError: - try: - op = self.binary_operators[operator] - except KeyError: - raise InvalidQuery("Unknown operator %s" % operator) - return self._handle_binary_op(op, nodes) - return self._handle_multiple_op(op, nodes) - - def _handle_multiple_op(self, op, nodes): - elements = [self.build_evaluator(node) for node in nodes] - return lambda value: op((e(value) for e in elements)) - - def _handle_binary_op(self, op, node): - try: - iterator = iter(node) - except Exception: - return lambda value: op(value, node) - nodes = list(iterator) - if len(nodes) != 2: - raise InvalidQuery( - "Binary operator %s needs 2 arguments, %d given" % - (op, len(nodes))) - node0 = self.build_evaluator(node[0]) - node1 = self.build_evaluator(node[1]) - return lambda value: op(node0(value), node1(value)) diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index e2ece5a9..8825e383 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -1939,3 +1939,66 @@ class QueryStringSearchAttrFilterTest(tests_base.TestCase): ]}, {"=": {"foo": "quote"}}, ]}) + + +class TestMeasureQuery(tests_base.TestCase): + def test_equal(self): + q = api.SearchMetricController.MeasureQuery({"=": 4}) + self.assertTrue(q(4)) + self.assertFalse(q(40)) + + def test_gt(self): + q = api.SearchMetricController.MeasureQuery({">": 4}) + self.assertTrue(q(40)) + self.assertFalse(q(4)) + + def test_and(self): + q = api.SearchMetricController.MeasureQuery( + {"and": [{">": 4}, {"<": 10}]}) + self.assertTrue(q(5)) + self.assertFalse(q(40)) + self.assertFalse(q(1)) + + def test_or(self): + q = api.SearchMetricController.MeasureQuery( + {"or": [{"=": 4}, {"=": 10}]}) + self.assertTrue(q(4)) + self.assertTrue(q(10)) + self.assertFalse(q(-1)) + + def test_modulo(self): + q = api.SearchMetricController.MeasureQuery( + {"=": [{"%": 5}, 0]}) + self.assertTrue(q(5)) + self.assertTrue(q(10)) + self.assertFalse(q(-1)) + self.assertFalse(q(6)) + + def test_math(self): + q = api.SearchMetricController.MeasureQuery( + { + u"and": [ + # v+5 is bigger 0 + {u"≥": [{u"+": 5}, 0]}, + # v-6 is not 5 + {u"≠": [5, {u"-": 6}]}, + ], + } + ) + self.assertTrue(q(5)) + self.assertTrue(q(10)) + self.assertFalse(q(11)) + + def test_empty(self): + q = api.SearchMetricController.MeasureQuery({}) + self.assertFalse(q(5)) + self.assertFalse(q(10)) + + def test_bad_format(self): + self.assertRaises(api.SearchMetricController.MeasureQuery.InvalidQuery, + api.SearchMetricController.MeasureQuery, + {"foo": [{"=": 4}, {"=": 10}]}) + + self.assertRaises(api.SearchMetricController.MeasureQuery.InvalidQuery, + api.SearchMetricController.MeasureQuery, + {"=": [1, 2, 3]}) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index d02c6e73..c65d6119 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -1187,63 +1187,3 @@ class TestStorageDriver(tests_base.TestCase): datetime64(2014, 1, 1), datetime64(2015, 1, 1), resample=numpy.timedelta64(1, 'h')) - - -class TestMeasureQuery(tests_base.TestCase): - def test_equal(self): - q = storage.MeasureQuery({"=": 4}) - self.assertTrue(q(4)) - self.assertFalse(q(40)) - - def test_gt(self): - q = storage.MeasureQuery({">": 4}) - self.assertTrue(q(40)) - self.assertFalse(q(4)) - - def test_and(self): - q = storage.MeasureQuery({"and": [{">": 4}, {"<": 10}]}) - self.assertTrue(q(5)) - self.assertFalse(q(40)) - self.assertFalse(q(1)) - - def test_or(self): - q = storage.MeasureQuery({"or": [{"=": 4}, {"=": 10}]}) - self.assertTrue(q(4)) - self.assertTrue(q(10)) - self.assertFalse(q(-1)) - - def test_modulo(self): - q = storage.MeasureQuery({"=": [{"%": 5}, 0]}) - self.assertTrue(q(5)) - self.assertTrue(q(10)) - self.assertFalse(q(-1)) - self.assertFalse(q(6)) - - def test_math(self): - q = storage.MeasureQuery( - { - u"and": [ - # v+5 is bigger 0 - {u"≥": [{u"+": 5}, 0]}, - # v-6 is not 5 - {u"≠": [5, {u"-": 6}]}, - ], - } - ) - self.assertTrue(q(5)) - self.assertTrue(q(10)) - self.assertFalse(q(11)) - - def test_empty(self): - q = storage.MeasureQuery({}) - self.assertFalse(q(5)) - self.assertFalse(q(10)) - - def test_bad_format(self): - self.assertRaises(storage.InvalidQuery, - storage.MeasureQuery, - {"foo": [{"=": 4}, {"=": 10}]}) - - self.assertRaises(storage.InvalidQuery, - storage.MeasureQuery, - {"=": [1, 2, 3]}) -- GitLab From a4230ed8e62e52c4cc8b4f4831f1b20ebcfb942a Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 23 Jul 2018 11:56:59 +0200 Subject: [PATCH 1380/1483] Added amqp1.0 collectd daemon This enables gnocchi-amqp daemon. Which listens to metrics from collectd amqp1 write plugin. --- doc/source/amqp1d.rst | 37 +++ doc/source/collectd.rst | 35 ++- doc/source/index.rst | 2 + doc/source/install.rst | 1 + gnocchi/amqp1d.py | 234 ++++++++++++++++++ gnocchi/cli/amqp1d.py | 19 ++ gnocchi/opts.py | 16 ++ gnocchi/tests/test_amqp1d.py | 97 ++++++++ .../notes/amqp1-driver-78a9401768df7367.yaml | 6 + setup.cfg | 3 + tox.ini | 2 +- 11 files changed, 450 insertions(+), 2 deletions(-) create mode 100644 doc/source/amqp1d.rst create mode 100644 gnocchi/amqp1d.py create mode 100644 gnocchi/cli/amqp1d.py create mode 100644 gnocchi/tests/test_amqp1d.py create mode 100644 releasenotes/notes/amqp1-driver-78a9401768df7367.yaml diff --git a/doc/source/amqp1d.rst b/doc/source/amqp1d.rst new file mode 100644 index 00000000..200cbf74 --- /dev/null +++ b/doc/source/amqp1d.rst @@ -0,0 +1,37 @@ +===================== +AMQP 1.0 Daemon Usage +===================== + +Gnocchi provides a daemon `gnocchi-amqp1d` that is compatible with the `AMQP +1.0`_ (Advanced Messaging Queuing Protocol 1.0 (ISO/IEC 19464)) protocol and +can listen to |metrics| sent over the network via the amqp1 `collectd`_ plugin +named `amqp1`_. + +.. _`amqp1`: https://github.com/collectd/collectd/blob/master/src/amqp1.c +.. _`collectd`: https://github.com/collectd/collectd +.. _`AMQP 1.0`: https://www.amqp.org/resources/specifications + +`amqp1` collectd write plugin enables collectd output to be sent to an Advanced +Messaging Queuing Protocol 1.0 intermediary such as the Apache Qpid Dispatch +Router or Apache Artemis Broker. + +How It Works? +============= +In order to enable amqp1d support in Gnocchi, you need to configure the +`[amqp1d]` option group in the configuration file. You need to provide a +host with port and topic name that amqp1 collectd plugin is publishing metric +to and a |resource| name that will be used as the main |resource| where all +the |metrics| will be attached with host name as an attribute, a user and +project id that will be associated with the |resource| and |metrics|, +and an |archive policy| name that will be used to create the |metrics|. + +All the |metrics| will be created dynamically as the |metrics| are sent to +`gnocchi-amqp1d`, and attached with the source host name to the |resource| +name you configured. + +To use it, Gnocchi must be installed with the `amqp1` flavor:: + + pip install -e .[postgresql,file,amqp1] + + +.. include:: include/term-substitution.rst diff --git a/doc/source/collectd.rst b/doc/source/collectd.rst index 0b91b448..6c80bd80 100644 --- a/doc/source/collectd.rst +++ b/doc/source/collectd.rst @@ -3,12 +3,45 @@ ================== `Collectd`_ can use Gnocchi to store its data through a plugin called -`collectd-gnocchi`. It can be installed with *pip*:: +`collectd-gnocchi` or via the `gnocchi-amqp1d` daemon. + + +collectd-gnocchi +================ + +It can be installed with *pip*:: pip install collectd-gnocchi `Sources and documentation`_ are also available. +gnocchi-amqp1d +============== + +You need first to setup the Collectd `amqp1 write plugin`:: + + + + Host "localhost" + Port "5672" + Address "collectd" + + Format JSON + + + + + +Then configure the AMQP 1.0 url in gnocchi.conf:: + + [amqp1d] + url = localhost:5672/u/collectd/telemetry + + .. _`Collectd`: https://www.collectd.org/ .. _`Sources and documentation`: https://github.com/gnocchixyz/collectd-gnocchi +.. _`amqp1 write plugin`: https://github.com/ajssmith/collectd/blob/d4cc32c4dddb01081c49a67d13ab4a737cda0ed0/src/collectd.conf.pod#plugin-amqp1 +.. TODO(sileht): Change the link when + https://collectd.org/documentation/manpages/collectd.conf.5.shtml will be + up2date diff --git a/doc/source/index.rst b/doc/source/index.rst index bd285924..63934d8e 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -26,6 +26,7 @@ Gnocchi's main features are: - Statsd protocol support - Collectd plugin support - InfluxDB line protocol ingestion support +- AMQP 1.0 protocol support Community --------- @@ -47,6 +48,7 @@ Documentation client rest statsd + amqp1d grafana prometheus influxdb diff --git a/doc/source/install.rst b/doc/source/install.rst index 31071729..079df6d7 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -27,6 +27,7 @@ The list of variants available is: * `ceph_alternative` – provides Ceph (>= 12.2.0) storage support * `redis` – provides Redis storage support * `prometheus` – provides Prometheus Remote Write support +* `amqp1` – provides AMQP 1.0 support * `doc` – documentation building support * `test` – unit and functional tests support diff --git a/gnocchi/amqp1d.py b/gnocchi/amqp1d.py new file mode 100644 index 00000000..8ccfac6e --- /dev/null +++ b/gnocchi/amqp1d.py @@ -0,0 +1,234 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import itertools +import uuid + +import daiquiri +import proton.handlers +import proton.reactor +import six +import ujson + +from gnocchi import incoming +from gnocchi import indexer +from gnocchi import service +from gnocchi import utils + +LOG = daiquiri.getLogger(__name__) + + +class BatchProcessor(object): + def __init__(self, conf): + self.conf = conf + self.incoming = incoming.get_driver(self.conf) + self.indexer = indexer.get_driver(self.conf) + self._ensure_resource_type_exists() + + self._hosts = {} + self._measures = collections.defaultdict( + lambda: collections.defaultdict(list)) + + def reset(self): + self._hosts.clear() + self._measures.clear() + + def add_measures(self, host, name, measures): + host_id = "%s:%s" % (self.conf.amqp1d.resource_type, + host.replace("/", "_")) + self._hosts[host_id] = host + self._measures[host_id][name].extend(measures) + + def flush(self): + try: + self._flush() + except Exception: + LOG.error("Unepected error during flush()", exc_info=True) + self.reset() + + def _flush(self): + archive_policies = {} + resources = self._get_resources(self._measures.keys()) + for host_id, measures_by_names in six.iteritems(self._measures): + resource = resources[host_id] + + names = set(measures_by_names.keys()) + for name in names: + if name not in archive_policies: + archive_policies[name] = ( + self.indexer.get_archive_policy_for_metric(name)) + known_metrics = self.indexer.list_metrics(attribute_filter={ + "and": [{"=": {"resource_id": resource.id}}, + {"in": {"name": names}}] + }) + known_names = set((m.name for m in known_metrics)) + already_exists_names = [] + for name in (names - known_names): + try: + m = self.indexer.create_metric( + uuid.uuid4(), + creator=self.conf.amqp1d.creator, + resource_id=resource.id, + name=name, + archive_policy_name=archive_policies[name].name) + except indexer.NamedMetricAlreadyExists as e: + already_exists_names.append(e.metric) + except indexer.IndexerException as e: + LOG.error("Unexpected error, dropping metric %s", + name, exc_info=True) + else: + known_metrics.append(m) + + if already_exists_names: + # Add metrics created in the meantime + known_names.extend(already_exists_names) + known_metrics.extend( + self.indexer.list_metrics(attribute_filter={ + "and": [{"=": {"resource_id": resource.id}}, + {"in": {"name": already_exists_names}}] + })) + + self.incoming.add_measures_batch( + dict((metric.id, + measures_by_names[metric.name]) + for metric in known_metrics)) + + def _get_resources(self, host_ids): + + resource_ids = set((utils.ResourceUUID(host_id, + self.conf.amqp1d.creator) + for host_id in host_ids)) + + resources = self.indexer.list_resources( + resource_type=self.conf.amqp1d.resource_type, + attribute_filter={"in": {"id": resource_ids}}) + + resources_by_host_id = {r.original_resource_id: r for r in resources} + + missing_host_ids = set(host_ids) - set(resources_by_host_id.keys()) + + for host_id in missing_host_ids: + resource_id = utils.ResourceUUID(host_id, + self.conf.amqp1d.creator) + try: + r = self.indexer.create_resource( + self.conf.amqp1d.resource_type, + resource_id, + self.conf.amqp1d.creator, + original_resource_id=host_id, + host=self._hosts[host_id]) + except indexer.ResourceAlreadyExists: + r = self.indexer.get_resource( + self.conf.amqp1d.resource_type, + resource_id) + resources_by_host_id[host_id] = r + + return resources_by_host_id + + def _ensure_resource_type_exists(self): + try: + self.resource_type = self.indexer.get_resource_type( + self.conf.amqp1d.resource_type) + except indexer.NoSuchResourceType: + try: + mgr = self.indexer.get_resource_type_schema() + rtype = mgr.resource_type_from_dict( + self.conf.amqp1d.resource_type, { + "host": {"type": "string", "required": True, + "min_length": 0, "max_length": 255}, + }, "creating") + self.indexer.create_resource_type(rtype) + except indexer.ResourceTypeAlreadyExists: + LOG.debug("Resource type %s already exists", + self.conf.amqp1d.resource_type) + else: + LOG.info("Created resource type %s", + self.conf.amqp1d.resource_type) + self.resource_type = self.indexer.get_resource_type( + self.conf.amqp1d.resource_type) + else: + LOG.info("Found resource type %s", + self.conf.amqp1d.resource_type) + + +class CollectdFormatHandler(object): + def __init__(self, processor): + self.processor = processor + + @staticmethod + def _serialize_identifier(index, message): + """Based of FORMAT_VL from collectd/src/daemon/common.h. + + The biggest difference is that we don't prepend the host and append the + index of the value, and don't use slash. + + """ + suffix = ("-%s" % message["dsnames"][index] + if len(message["dsnames"]) > 1 else "") + return (message["plugin"] + ("-" + message["plugin_instance"] + if message["plugin_instance"] else "") + + "@" + + message["type"] + ("-" + message["type_instance"] + if message["type_instance"] else "") + + suffix) + + def on_message(self, event): + json_message = ujson.loads(event.message.body) + timestamp = utils.dt_in_unix_ns(utils.utcnow()) + measures_by_host_and_name = sorted(( + (message["host"], + self._serialize_identifier(index, message), + value) + for message in json_message + for index, value in enumerate(message["values"]) + )) + for (host, name), values in itertools.groupby( + measures_by_host_and_name, key=lambda x: x[0:2]): + measures = (incoming.Measure(timestamp, v[2]) for v in values) + self.processor.add_measures(host, name, measures) + + +class AMQP1Server(proton.handlers.MessagingHandler): + + def __init__(self, conf): + super(AMQP1Server, self).__init__() + self.peer_close_is_error = True + self.conf = conf + + self.processor = BatchProcessor(conf) + + # Only collectd format is supported for now + self.data_source_handler = { + "collectd": CollectdFormatHandler + }[self.conf.amqp1d.data_source](self.processor) + + def on_start(self, event): + event.container.schedule(self.conf.amqp1d.flush_delay, self) + + def on_message(self, event): + self.data_source_handler.on_message(event) + + def on_timer_task(self, event): + event.container.schedule(self.conf.amqp1d.flush_delay, self) + self.processor.flush() + + +def start(): + conf = service.prepare_service() + server = proton.reactor.Container(AMQP1Server(conf)) + try: + server.run() + except KeyboardInterrupt: + pass diff --git a/gnocchi/cli/amqp1d.py b/gnocchi/cli/amqp1d.py new file mode 100644 index 00000000..7f3dedc3 --- /dev/null +++ b/gnocchi/cli/amqp1d.py @@ -0,0 +1,19 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from gnocchi import amqp1d as amqp1d_service + + +def amqp1d(): + amqp1d_service.start() diff --git a/gnocchi/opts.py b/gnocchi/opts.py index b17524af..d86ab519 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -218,6 +218,22 @@ def list_opts(): default=10, help='Delay between flushes'), )), + ("amqp1d", ( + cfg.StrOpt('url', + default='localhost:5672/u/collectd/telemetry', + help='AMQP 1.0 URL to listen to'), + cfg.StrOpt('data_source', + default='collectd', + choices=['collectd'], + help='Data source for amqp1d'), + cfg.StrOpt('resource_type', + default='collectd_amqp1d', + help='Resource type name to use to identify metrics'), + cfg.StrOpt('creator', help='Creator value to use to amqpd1'), + cfg.FloatOpt('flush_delay', + default=10, + help='Delay between flushes in seconds'), + )), ("archive_policy", gnocchi.archive_policy.OPTS), ] diff --git a/gnocchi/tests/test_amqp1d.py b/gnocchi/tests/test_amqp1d.py new file mode 100644 index 00000000..44028b61 --- /dev/null +++ b/gnocchi/tests/test_amqp1d.py @@ -0,0 +1,97 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import datetime +import json +import uuid + +import mock +import numpy + +from gnocchi import amqp1d +from gnocchi.tests import base as tests_base +from gnocchi import utils + + +def datetime64(*args): + return numpy.datetime64(datetime.datetime(*args)) + + +class TestAmqp1d(tests_base.TestCase): + + AMQP1D_USER_ID = str(uuid.uuid4()) + AMQP1D_PROJECT_ID = str(uuid.uuid4()) + + def setUp(self): + super(TestAmqp1d, self).setUp() + self.conf.set_override("resource_type", + "collectd_amqp1d", "amqp1d") + self.conf.set_override("creator", + self.AMQP1D_USER_ID, "amqp1d") + + self.index.create_archive_policy_rule("rule-amqp", "*", "medium") + + self.server = amqp1d.AMQP1Server(self.conf) + self.server.processor.incoming = self.incoming + self.server.processor.indexer = self.index + + @mock.patch.object(utils, 'utcnow') + def test_amqp1d(self, utcnow): + utcnow.return_value = utils.datetime_utc(2017, 1, 10, 13, 58, 36) + + metrics = json.dumps([ + {u'dstypes': [u'gauge'], u'plugin': u'memory', u'dsnames': + [u'value'], u'interval': 10.0, u'host': u'www.gnocchi.test.com', + u'values': [9], u'time': 1506712460.824, u'plugin_instance': + u'', u'type_instance': u'free', u'type': u'memory'}, + {u'dstypes': [u'derive', u'derive'], u'plugin': u'interface', + u'dsnames': [u'rx', u'tx'], u'interval': 10.0, u'host': + u'www.gnocchi.test.com', u'values': [2, 5], u'time': + 1506712460.824, u'plugin_instance': u'ens2f1', u'type_instance': + u'', u'type': u'if_errors'} + ]) + + self.server.on_message(mock.Mock(message=mock.Mock(body=metrics))) + self.server.processor.flush() + + resources = self.index.list_resources( + self.conf.amqp1d.resource_type, + attribute_filter={"=": {"host": "www.gnocchi.test.com"}} + ) + self.assertEqual(1, len(resources)) + self.assertEqual("www.gnocchi.test.com", + resources[0].host) + + metrics = self.index.list_metrics(attribute_filter={ + '=': {"resource_id": resources[0].id} + }) + self.assertEqual(3, len(metrics)) + + self.trigger_processing(metrics) + + expected_measures = { + "memory@memory-free": [ + (datetime64(2017, 1, 10, 13, 58), numpy.timedelta64(1, 'm'), 9) + ], + "interface-ens2f1@if_errors-rx": [ + (datetime64(2017, 1, 10, 13, 58), numpy.timedelta64(1, 'm'), 2) + ], + "interface-ens2f1@if_errors-tx": [ + (datetime64(2017, 1, 10, 13, 58), numpy.timedelta64(1, 'm'), 5) + ] + } + for metric in metrics: + aggregation = metric.archive_policy.get_aggregation( + "mean", numpy.timedelta64(1, 'm')) + measures = self.storage.get_measures(metric, [aggregation]) + self.assertEqual(expected_measures[metric.name], + measures["mean"]) diff --git a/releasenotes/notes/amqp1-driver-78a9401768df7367.yaml b/releasenotes/notes/amqp1-driver-78a9401768df7367.yaml new file mode 100644 index 00000000..85142ffe --- /dev/null +++ b/releasenotes/notes/amqp1-driver-78a9401768df7367.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Gnocchi provides a new service to receive metrics and measures from an AMQP 1.0. + The expected payload format is the one from Collectd write AMQP 1.0. The daemon is + called ``gnocchi-amqp1d``. diff --git a/setup.cfg b/setup.cfg index a80e7b26..5866f94b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -82,6 +82,8 @@ ceph_alternative = prometheus = python-snappy protobuf +amqp1: + python-qpid-proton>=0.17.0 doc = sphinx<1.6.0 sphinx_rtd_theme @@ -145,6 +147,7 @@ console_scripts = gnocchi-upgrade = gnocchi.cli.manage:upgrade gnocchi-change-sack-size = gnocchi.cli.manage:change_sack_size gnocchi-statsd = gnocchi.cli.statsd:statsd + gnocchi-amqpd = gnocchi.cli.amqpd:amqpd gnocchi-metricd = gnocchi.cli.metricd:metricd gnocchi-injector = gnocchi.cli.injector:injector diff --git a/tox.ini b/tox.ini index 13ba5eba..bab5ff8c 100644 --- a/tox.ini +++ b/tox.ini @@ -43,7 +43,7 @@ setenv = # pifpaf) deps = -e - .[test,redis,prometheus,{env:GNOCCHI_STORAGE_DEPS:},{env:GNOCCHI_INDEXER_DEPS:}] + .[test,redis,prometheus,amqp1,{env:GNOCCHI_STORAGE_DEPS:},{env:GNOCCHI_INDEXER_DEPS:}] {env:GNOCCHI_TEST_TARBALLS:} cliff!=2.9.0 commands = -- GitLab From 74a0a38a8751eb2165aade704fd5bc2a9008080b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 31 Jul 2018 08:59:35 +0200 Subject: [PATCH 1381/1483] Don't use xattr 0.9.4 --- setup.cfg | 1 + tox.ini | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/setup.cfg b/setup.cfg index 5866f94b..67fc2773 100644 --- a/setup.cfg +++ b/setup.cfg @@ -106,6 +106,7 @@ test = WebTest>=2.0.16 keystonemiddleware>=4.0.0,!=4.19.0 wsgi_intercept>=1.4.1 + xattr!=0.9.4 # https://github.com/gnocchixyz/gnocchi/issues/951 test-swift = python-swiftclient diff --git a/tox.ini b/tox.ini index bab5ff8c..d84e5928 100644 --- a/tox.ini +++ b/tox.ini @@ -58,6 +58,7 @@ setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.0,<4.1 pifpaf[gnocchi]>=0.13 gnocchiclient>=2.8.0 + xattr!=0.9.4 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:py27-mysql-ceph-upgrade-from-4.0] @@ -70,6 +71,7 @@ setenv = deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.0,<4.1 gnocchiclient>=2.8.0 pifpaf[ceph,gnocchi]>=0.13 + xattr!=0.9.4 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:py37-postgresql-file-upgrade-from-4.1] @@ -80,6 +82,7 @@ setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.1,<4.2 pifpaf[gnocchi]>=0.13 gnocchiclient>=2.8.0 + xattr!=0.9.4 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:py27-mysql-ceph-upgrade-from-4.1] @@ -90,6 +93,7 @@ setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.1,<4.2 gnocchiclient>=2.8.0 pifpaf[ceph,gnocchi]>=0.13 + xattr!=0.9.4 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:py37-postgresql-file-upgrade-from-4.2] @@ -100,6 +104,7 @@ setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.2,<4.3 pifpaf[gnocchi]>=0.13 gnocchiclient>=2.8.0 + xattr!=0.9.4 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:py27-mysql-ceph-upgrade-from-4.2] @@ -110,6 +115,7 @@ setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.2,<4.3 gnocchiclient>=2.8.0 pifpaf[ceph,gnocchi]>=0.13 + xattr!=0.9.4 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:pep8] -- GitLab From 88bfa6fe4e775bb77a321a387b29195c30d6678b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 30 Jul 2018 15:42:44 +0200 Subject: [PATCH 1382/1483] Disable Web doc build on stable/4.3 branch --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 6f2fd7be..b8504b08 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,6 @@ cache: env: - TARGET: pep8 - TARGET: docs - - TARGET: docs-gnocchi.xyz - TARGET: py27-mysql-ceph-upgrade-from-4.0 - TARGET: py37-postgresql-file-upgrade-from-4.0 -- GitLab From 377ff698cace52f7d52217dfc1dc603f7a057a0d Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 31 Jul 2018 08:59:35 +0200 Subject: [PATCH 1383/1483] Don't use xattr 0.9.4 --- setup.cfg | 1 + tox.ini | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/setup.cfg b/setup.cfg index a80e7b26..f0d6d083 100644 --- a/setup.cfg +++ b/setup.cfg @@ -104,6 +104,7 @@ test = WebTest>=2.0.16 keystonemiddleware>=4.0.0,!=4.19.0 wsgi_intercept>=1.4.1 + xattr!=0.9.4 # https://github.com/gnocchixyz/gnocchi/issues/951 test-swift = python-swiftclient diff --git a/tox.ini b/tox.ini index 13ba5eba..5ea890d8 100644 --- a/tox.ini +++ b/tox.ini @@ -58,6 +58,7 @@ setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.0,<4.1 pifpaf[gnocchi]>=0.13 gnocchiclient>=2.8.0 + xattr!=0.9.4 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:py27-mysql-ceph-upgrade-from-4.0] @@ -70,6 +71,7 @@ setenv = deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.0,<4.1 gnocchiclient>=2.8.0 pifpaf[ceph,gnocchi]>=0.13 + xattr!=0.9.4 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:py37-postgresql-file-upgrade-from-4.1] @@ -80,6 +82,7 @@ setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.1,<4.2 pifpaf[gnocchi]>=0.13 gnocchiclient>=2.8.0 + xattr!=0.9.4 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:py27-mysql-ceph-upgrade-from-4.1] @@ -90,6 +93,7 @@ setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.1,<4.2 gnocchiclient>=2.8.0 pifpaf[ceph,gnocchi]>=0.13 + xattr!=0.9.4 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:py37-postgresql-file-upgrade-from-4.2] @@ -100,6 +104,7 @@ setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.2,<4.3 pifpaf[gnocchi]>=0.13 gnocchiclient>=2.8.0 + xattr!=0.9.4 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:py27-mysql-ceph-upgrade-from-4.2] @@ -110,6 +115,7 @@ setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.2,<4.3 gnocchiclient>=2.8.0 pifpaf[ceph,gnocchi]>=0.13 + xattr!=0.9.4 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:pep8] -- GitLab From b1de8d78a40dabdb2f2f4cddb3ca37786667d0f6 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 30 Jul 2018 15:41:52 +0200 Subject: [PATCH 1384/1483] Update doc and tests for Gnocchi 4.3.0 --- .mergify.yml | 1 + .travis.yml | 8 ++--- doc/source/conf.py | 2 +- doc/source/releasenotes/4.3.rst | 6 ++++ doc/source/releasenotes/index.rst | 1 + tox.ini | 54 +++---------------------------- 6 files changed, 15 insertions(+), 57 deletions(-) create mode 100644 doc/source/releasenotes/4.3.rst diff --git a/.mergify.yml b/.mergify.yml index 29cafbb5..5e9142bc 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -10,6 +10,7 @@ rules: merge_strategy: method: rebase automated_backport_labels: + backport-to-4.3: stable/4.3 backport-to-4.2: stable/4.2 backport-to-4.1: stable/4.1 backport-to-4.0: stable/4.0 diff --git a/.travis.yml b/.travis.yml index b8504b08..9194247d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,12 +11,8 @@ env: - TARGET: pep8 - TARGET: docs - - TARGET: py27-mysql-ceph-upgrade-from-4.0 - - TARGET: py37-postgresql-file-upgrade-from-4.0 - - TARGET: py27-mysql-ceph-upgrade-from-4.1 - - TARGET: py37-postgresql-file-upgrade-from-4.1 - - TARGET: py27-mysql-ceph-upgrade-from-4.2 - - TARGET: py37-postgresql-file-upgrade-from-4.2 + - TARGET: py27-mysql-ceph-upgrade-from-4.3 + - TARGET: py37-postgresql-file-upgrade-from-4.3 - TARGET: py27-mysql - TARGET: py37-mysql diff --git a/doc/source/conf.py b/doc/source/conf.py index 46837a49..841ab0d4 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -176,7 +176,7 @@ html_theme_options = { # Multiversion docs scv_sort = ('semver',) scv_show_banner = True -scv_banner_main_ref = 'stable/4.2' +scv_banner_main_ref = 'stable/4.3' scv_priority = 'branches' scv_whitelist_branches = ('master', '^stable/([3-9]\.)') scv_whitelist_tags = ("^$",) diff --git a/doc/source/releasenotes/4.3.rst b/doc/source/releasenotes/4.3.rst new file mode 100644 index 00000000..c3da2577 --- /dev/null +++ b/doc/source/releasenotes/4.3.rst @@ -0,0 +1,6 @@ +=================================== + 4.3 Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/4.3 diff --git a/doc/source/releasenotes/index.rst b/doc/source/releasenotes/index.rst index d642caed..dee60b31 100644 --- a/doc/source/releasenotes/index.rst +++ b/doc/source/releasenotes/index.rst @@ -5,6 +5,7 @@ Release Notes :maxdepth: 2 unreleased + 4.3 4.2 4.1 4.0 diff --git a/tox.ini b/tox.ini index d84e5928..983e6def 100644 --- a/tox.ini +++ b/tox.ini @@ -50,69 +50,23 @@ commands = {toxinidir}/run-tests.sh {posargs} {toxinidir}/run-func-tests.sh {posargs} -[testenv:py37-postgresql-file-upgrade-from-4.0] +[testenv:py37-postgresql-file-upgrade-from-4.3] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv recreate = True setenv = GNOCCHI_VARIANT=test,postgresql,file -deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.0,<4.1 +deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.3,<4.4 pifpaf[gnocchi]>=0.13 gnocchiclient>=2.8.0 xattr!=0.9.4 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} -[testenv:py27-mysql-ceph-upgrade-from-4.0] -# We should always recreate since the script upgrade -# Gnocchi we can't reuse the virtualenv -recreate = True -setenv = - GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib - PYTHONWARNINGS=ignore -deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.0,<4.1 - gnocchiclient>=2.8.0 - pifpaf[ceph,gnocchi]>=0.13 - xattr!=0.9.4 -commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} - -[testenv:py37-postgresql-file-upgrade-from-4.1] -# We should always recreate since the script upgrade -# Gnocchi we can't reuse the virtualenv -recreate = True -setenv = GNOCCHI_VARIANT=test,postgresql,file -deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.1,<4.2 - pifpaf[gnocchi]>=0.13 - gnocchiclient>=2.8.0 - xattr!=0.9.4 -commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} - -[testenv:py27-mysql-ceph-upgrade-from-4.1] -# We should always recreate since the script upgrade -# Gnocchi we can't reuse the virtualenv -recreate = True -setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib -deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.1,<4.2 - gnocchiclient>=2.8.0 - pifpaf[ceph,gnocchi]>=0.13 - xattr!=0.9.4 -commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} - -[testenv:py37-postgresql-file-upgrade-from-4.2] -# We should always recreate since the script upgrade -# Gnocchi we can't reuse the virtualenv -recreate = True -setenv = GNOCCHI_VARIANT=test,postgresql,file -deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.2,<4.3 - pifpaf[gnocchi]>=0.13 - gnocchiclient>=2.8.0 - xattr!=0.9.4 -commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} - -[testenv:py27-mysql-ceph-upgrade-from-4.2] +[testenv:py27-mysql-ceph-upgrade-from-4.3] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv recreate = True setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib -deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.2,<4.3 +deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.3,<4.4 gnocchiclient>=2.8.0 pifpaf[ceph,gnocchi]>=0.13 xattr!=0.9.4 -- GitLab From 5d412f612226880304ebc6ceed301b41088cbbdf Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 30 Jul 2018 22:06:55 +0200 Subject: [PATCH 1385/1483] setuptools: Fix gnocchi-api sheban in wheel --- gnocchi/setuptools.py | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/gnocchi/setuptools.py b/gnocchi/setuptools.py index 1cc63992..67c1c858 100644 --- a/gnocchi/setuptools.py +++ b/gnocchi/setuptools.py @@ -17,6 +17,7 @@ from __future__ import absolute_import import os import subprocess +import sys from distutils import version from setuptools.command import develop @@ -106,17 +107,31 @@ class local_egg_info(egg_info.egg_info): f.write(b"* %s\n" % msg.encode("utf8")) +# Can't use six in this file it's too early in the bootstrap process +PY3 = sys.version_info >= (3,) + + class local_install_scripts(install_scripts.install_scripts): def run(self): install_scripts.install_scripts.run(self) - header = easy_install.get_script_header( - "", easy_install.sys_executable, False) - self.write_script("gnocchi-api", header + SCRIPT_TMPL) + # NOTE(sileht): Build wheel embed custom script as data, and put sheban + # in script of the building machine. To workaround that build_scripts + # on bdist_whell return '#!python' and then during whl install it's + # replaced by the correct interpreter. We do the same here. + bs_cmd = self.get_finalized_command('build_scripts') + executable = getattr(bs_cmd, 'executable', easy_install.sys_executable) + script = easy_install.get_script_header("", executable) + SCRIPT_TMPL + if PY3: + script = script.encode('ascii') + self.write_script("gnocchi-api", script, 'b') class local_develop(develop.develop): def install_wrapper_scripts(self, dist): develop.develop.install_wrapper_scripts(self, dist) - header = easy_install.get_script_header( - "", easy_install.sys_executable, False) - self.write_script("gnocchi-api", header + SCRIPT_TMPL) + if self.exclude_scripts: + return + script = easy_install.get_script_header("") + SCRIPT_TMPL + if PY3: + script = script.encode('ascii') + self.write_script("gnocchi-api", script, 'b') -- GitLab From 711e51f706dcc5bc97ad14ddc8108e501befee23 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Mon, 30 Jul 2018 22:06:55 +0200 Subject: [PATCH 1386/1483] setuptools: Fix gnocchi-api sheban in wheel (cherry picked from commit 5d412f612226880304ebc6ceed301b41088cbbdf) --- gnocchi/setuptools.py | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/gnocchi/setuptools.py b/gnocchi/setuptools.py index 1cc63992..67c1c858 100644 --- a/gnocchi/setuptools.py +++ b/gnocchi/setuptools.py @@ -17,6 +17,7 @@ from __future__ import absolute_import import os import subprocess +import sys from distutils import version from setuptools.command import develop @@ -106,17 +107,31 @@ class local_egg_info(egg_info.egg_info): f.write(b"* %s\n" % msg.encode("utf8")) +# Can't use six in this file it's too early in the bootstrap process +PY3 = sys.version_info >= (3,) + + class local_install_scripts(install_scripts.install_scripts): def run(self): install_scripts.install_scripts.run(self) - header = easy_install.get_script_header( - "", easy_install.sys_executable, False) - self.write_script("gnocchi-api", header + SCRIPT_TMPL) + # NOTE(sileht): Build wheel embed custom script as data, and put sheban + # in script of the building machine. To workaround that build_scripts + # on bdist_whell return '#!python' and then during whl install it's + # replaced by the correct interpreter. We do the same here. + bs_cmd = self.get_finalized_command('build_scripts') + executable = getattr(bs_cmd, 'executable', easy_install.sys_executable) + script = easy_install.get_script_header("", executable) + SCRIPT_TMPL + if PY3: + script = script.encode('ascii') + self.write_script("gnocchi-api", script, 'b') class local_develop(develop.develop): def install_wrapper_scripts(self, dist): develop.develop.install_wrapper_scripts(self, dist) - header = easy_install.get_script_header( - "", easy_install.sys_executable, False) - self.write_script("gnocchi-api", header + SCRIPT_TMPL) + if self.exclude_scripts: + return + script = easy_install.get_script_header("") + SCRIPT_TMPL + if PY3: + script = script.encode('ascii') + self.write_script("gnocchi-api", script, 'b') -- GitLab From 428b27da16b8d7314a9845c34b78194e82240cf3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Fri, 3 Aug 2018 06:12:57 +0200 Subject: [PATCH 1387/1483] d/control: Use team+openstack@tracker.debian.org as maintainer --- debian/changelog | 6 ++++++ debian/control | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index e3bebd42..c97f1d55 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (4.2.0-6) UNRELEASED; urgency=medium + + * d/control: Use team+openstack@tracker.debian.org as maintainer + + -- Ondřej Nový Fri, 03 Aug 2018 06:12:57 +0200 + gnocchi (4.2.0-5) unstable; urgency=medium * Switch gnocchi to openstack-pkg-tools >= 81~ style of uwsgi app. diff --git a/debian/control b/debian/control index 6e8f0a37..d54c85f2 100644 --- a/debian/control +++ b/debian/control @@ -1,7 +1,7 @@ Source: gnocchi Section: net Priority: optional -Maintainer: Debian OpenStack +Maintainer: Debian OpenStack Uploaders: Thomas Goirand , Build-Depends: -- GitLab From 8099dfc2a30ddf305d9f904de0106e4dd5e56147 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 3 Aug 2018 18:19:20 +0200 Subject: [PATCH 1388/1483] storage: Remove useless fetch() in get_measures() --- gnocchi/storage/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 7d2e0a0d..4376e45a 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -358,8 +358,7 @@ class StorageDriver(object): return { aggmethod: list(itertools.chain( *[[(timestamp, timeseries[agg].aggregation.granularity, value) - for timestamp, value - in timeseries[agg].fetch(from_timestamp, to_timestamp)] + for timestamp, value in timeseries[agg]] for agg in sorted(aggs, key=ATTRGETTER_GRANULARITY, reverse=True)])) -- GitLab From 342a449cb1a326c989b5a46b1a6468aeb7d97706 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 30 Jul 2018 15:42:44 +0200 Subject: [PATCH 1389/1483] Disable Web doc build on stable/4.3 branch --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 6f2fd7be..b8504b08 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,6 @@ cache: env: - TARGET: pep8 - TARGET: docs - - TARGET: docs-gnocchi.xyz - TARGET: py27-mysql-ceph-upgrade-from-4.0 - TARGET: py37-postgresql-file-upgrade-from-4.0 -- GitLab From d100600da87aa561efb67705023e26f54f45d17a Mon Sep 17 00:00:00 2001 From: Nagasai Vinaykumar Kapalavai Date: Tue, 7 Aug 2018 23:37:30 -0400 Subject: [PATCH 1390/1483] This stops using deprecated storage.get_measures --- gnocchi/rest/api.py | 25 ++++-- gnocchi/storage/__init__.py | 5 +- gnocchi/tests/test_amqp1d.py | 19 ++++- gnocchi/tests/test_statsd.py | 31 ++++++- gnocchi/tests/test_storage.py | 147 +++++++++++++++++++++------------- 5 files changed, 161 insertions(+), 66 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 4be3f795..25993dfd 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -194,6 +194,19 @@ def strtobool(varname, v): abort(400, "Unable to parse `%s': %s" % (varname, six.text_type(e))) +def get_measures_list(measures_agg): + return { + aggmethod: list(itertools.chain( + *[[(timestamp, measures_agg[agg].aggregation.granularity, value) + for timestamp, value in measures_agg[agg]] + for agg in sorted(aggs, + key=storage.ATTRGETTER_GRANULARITY, + reverse=True)])) + for aggmethod, aggs in itertools.groupby(measures_agg.keys(), + storage.ATTRGETTER_METHOD) + } + + RESOURCE_DEFAULT_PAGINATION = [u'revision_start:asc', u'started_at:asc'] @@ -509,8 +522,10 @@ class MetricController(rest.RestController): abort(503, 'Unable to refresh metric: %s. Metric is locked. ' 'Please try again.' % self.metric.id) try: - return pecan.request.storage.get_measures( - self.metric, aggregations, start, stop, resample)[aggregation] + results = pecan.request.storage.get_aggregated_measures( + {self.metric: aggregations}, + start, stop, resample)[self.metric] + return get_measures_list(results)[aggregation] except storage.AggregationDoesNotExist as e: abort(404, six.text_type(e)) except storage.MetricDoesNotExist: @@ -2011,9 +2026,9 @@ class AggregationController(rest.RestController): }, }) try: - return pecan.request.storage.get_measures( - metric, aggregations, start, stop, resample - )[aggregation] + results = pecan.request.storage.get_aggregated_measures( + {metric: aggregations}, start, stop, resample)[metric] + return get_measures_list(results)[aggregation] except storage.MetricDoesNotExist: return [] return processor.get_measures( diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 4376e45a..1372414d 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -281,7 +281,8 @@ class StorageDriver(object): return name.split("_")[-1] == 'v%s' % v def get_aggregated_measures(self, metrics_and_aggregations, - from_timestamp=None, to_timestamp=None): + from_timestamp=None, to_timestamp=None, + resample=None): """Get aggregated measures from a metric. :param metrics_and_aggregations: The metrics and aggregations to @@ -332,6 +333,8 @@ class StorageDriver(object): ts.truncate(aggregation.timespan) results[metric][aggregation] = ts.fetch( from_timestamp, to_timestamp) + if resample: + results[metric][aggregation] = ts.resample(resample) return results diff --git a/gnocchi/tests/test_amqp1d.py b/gnocchi/tests/test_amqp1d.py index 44028b61..90202c38 100644 --- a/gnocchi/tests/test_amqp1d.py +++ b/gnocchi/tests/test_amqp1d.py @@ -11,6 +11,7 @@ # License for the specific language governing permissions and limitations # under the License. import datetime +import itertools import json import uuid @@ -18,6 +19,7 @@ import mock import numpy from gnocchi import amqp1d +from gnocchi import storage from gnocchi.tests import base as tests_base from gnocchi import utils @@ -26,6 +28,19 @@ def datetime64(*args): return numpy.datetime64(datetime.datetime(*args)) +def get_measures_list(measures_agg): + return { + aggmethod: list(itertools.chain( + *[[(timestamp, measures_agg[agg].aggregation.granularity, value) + for timestamp, value in measures_agg[agg]] + for agg in sorted(aggs, + key=storage.ATTRGETTER_GRANULARITY, + reverse=True)])) + for aggmethod, aggs in itertools.groupby(measures_agg.keys(), + storage.ATTRGETTER_METHOD) + } + + class TestAmqp1d(tests_base.TestCase): AMQP1D_USER_ID = str(uuid.uuid4()) @@ -92,6 +107,8 @@ class TestAmqp1d(tests_base.TestCase): for metric in metrics: aggregation = metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(1, 'm')) - measures = self.storage.get_measures(metric, [aggregation]) + results = self.storage.get_aggregated_measures( + {metric: [aggregation]})[metric] + measures = get_measures_list(results) self.assertEqual(expected_measures[metric.name], measures["mean"]) diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index 43c6ed50..bdd19a43 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. import datetime +import itertools import uuid import mock @@ -22,6 +23,7 @@ import numpy from gnocchi import indexer from gnocchi import statsd +from gnocchi import storage from gnocchi.tests import base as tests_base from gnocchi import utils @@ -30,6 +32,19 @@ def datetime64(*args): return numpy.datetime64(datetime.datetime(*args)) +def get_measures_list(measures_agg): + return { + aggmethod: list(itertools.chain( + *[[(timestamp, measures_agg[agg].aggregation.granularity, value) + for timestamp, value in measures_agg[agg]] + for agg in sorted(aggs, + key=storage.ATTRGETTER_GRANULARITY, + reverse=True)])) + for aggmethod, aggs in itertools.groupby(measures_agg.keys(), + storage.ATTRGETTER_METHOD) + } + + class TestStatsd(tests_base.TestCase): STATSD_USER_ID = str(uuid.uuid4()) @@ -75,7 +90,9 @@ class TestStatsd(tests_base.TestCase): self.trigger_processing([metric]) - measures = self.storage.get_measures(metric, self.aggregations) + measures = self.storage.get_aggregated_measures( + {metric: self.aggregations})[metric] + measures = get_measures_list(measures) self.assertEqual({"mean": [ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.0), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.0), @@ -94,7 +111,9 @@ class TestStatsd(tests_base.TestCase): self.trigger_processing([metric]) - measures = self.storage.get_measures(metric, self.aggregations) + measures = self.storage.get_aggregated_measures( + {metric: self.aggregations})[metric] + measures = get_measures_list(measures) self.assertEqual({"mean": [ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.5), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.5), @@ -126,7 +145,9 @@ class TestStatsd(tests_base.TestCase): self.trigger_processing([metric]) - measures = self.storage.get_measures(metric, self.aggregations) + measures = self.storage.get_aggregated_measures( + {metric: self.aggregations})[metric] + measures = get_measures_list(measures) self.assertEqual({"mean": [ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.0), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.0), @@ -144,7 +165,9 @@ class TestStatsd(tests_base.TestCase): self.trigger_processing([metric]) - measures = self.storage.get_measures(metric, self.aggregations) + measures = self.storage.get_aggregated_measures( + {metric: self.aggregations})[metric] + measures = get_measures_list(measures) self.assertEqual({"mean": [ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 28), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 28), diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index c65d6119..71ec3d88 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. import datetime +import itertools import uuid import mock @@ -37,6 +38,19 @@ def datetime64(*args): return numpy.datetime64(datetime.datetime(*args)) +def get_measures_list(measures_agg): + return { + aggmethod: list(itertools.chain( + *[[(timestamp, measures_agg[agg].aggregation.granularity, value) + for timestamp, value in measures_agg[agg]] + for agg in sorted(aggs, + key=storage.ATTRGETTER_GRANULARITY, + reverse=True)])) + for aggmethod, aggs in itertools.groupby(measures_agg.keys(), + storage.ATTRGETTER_METHOD) + } + + class TestStorageDriver(tests_base.TestCase): def setUp(self): super(TestStorageDriver, self).setUp() @@ -153,10 +167,11 @@ class TestStorageDriver(tests_base.TestCase): side_effect=carbonara.InvalidData()): self.trigger_processing() - m = self.storage.get_measures( - self.metric, - self.metric.archive_policy.get_aggregations_for_method('mean'), - )['mean'] + m = self.storage.get_aggregated_measures( + {self.metric: + self.metric.archive_policy.get_aggregations_for_method( + 'mean')},)[self.metric] + m = get_measures_list(m)['mean'] self.assertIn((datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 1), m) self.assertIn((datetime64(2014, 1, 1, 13), @@ -183,7 +198,9 @@ class TestStorageDriver(tests_base.TestCase): self.metric.archive_policy.get_aggregations_for_method("mean") ) - m = self.storage.get_measures(self.metric, aggregations)['mean'] + m = self.storage.get_aggregated_measures( + {self.metric: aggregations})[self.metric] + m = get_measures_list(m)['mean'] self.assertIn((datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 5.0), m) self.assertIn((datetime64(2014, 1, 1, 12), @@ -204,11 +221,12 @@ class TestStorageDriver(tests_base.TestCase): ) self.assertRaises(storage.MetricDoesNotExist, - self.storage.get_measures, - self.metric, aggregations) + self.storage.get_aggregated_measures, + {self.metric: aggregations}) self.assertEqual( {self.metric: None}, - self.storage._get_or_create_unaggregated_timeseries([self.metric])) + self.storage._get_or_create_unaggregated_timeseries( + [self.metric])) def test_measures_reporting_format(self): report = self.incoming.measures_report(True) @@ -303,7 +321,8 @@ class TestStorageDriver(tests_base.TestCase): ) self.assertEqual(3661, len( - self.storage.get_measures(m, aggregations)['mean'])) + get_measures_list(self.storage.get_aggregated_measures( + {m: aggregations})[m])['mean'])) @mock.patch('gnocchi.carbonara.SplitKey.POINTS_PER_SPLIT', 48) def test_add_measures_update_subset_split(self): @@ -371,7 +390,8 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ]}, self.storage.get_measures(self.metric, aggregations)) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: aggregations})[self.metric])) # One year later… self.incoming.add_measures(self.metric.id, [ @@ -383,7 +403,8 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2015, 1, 1), numpy.timedelta64(1, 'D'), 69), (datetime64(2015, 1, 1, 12), numpy.timedelta64(1, 'h'), 69), (datetime64(2015, 1, 1, 12), numpy.timedelta64(5, 'm'), 69), - ]}, self.storage.get_measures(self.metric, aggregations)) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: aggregations})[self.metric])) agg = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(1, 'D')) @@ -570,7 +591,8 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42), (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), - ]}, self.storage.get_measures(self.metric, [aggregation])) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: [aggregation]})[self.metric])) # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move @@ -637,7 +659,8 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), (datetime64(2016, 1, 10, 16, 18), numpy.timedelta64(1, 'm'), 45), (datetime64(2016, 1, 10, 17, 12), numpy.timedelta64(1, 'm'), 46), - ]}, self.storage.get_measures(self.metric, [aggregation])) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: [aggregation]})[self.metric])) def test_rewrite_measures_oldest_mutable_timestamp_eq_next_key(self): """See LP#1655422""" @@ -709,7 +732,8 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42), (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), - ]}, self.storage.get_measures(self.metric, [aggregation])) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: [aggregation]})[self.metric])) # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size is one hour here). We move @@ -776,7 +800,8 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), (datetime64(2016, 1, 10, 0, 12), numpy.timedelta64(1, 'm'), 45), - ]}, self.storage.get_measures(self.metric, [aggregation])) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: [aggregation]})[self.metric])) def test_rewrite_measures_corruption_missing_file(self): # Create an archive policy that spans on several splits. Each split @@ -852,7 +877,8 @@ class TestStorageDriver(tests_base.TestCase): numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), - ]}, self.storage.get_measures(self.metric, [aggregation])) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: [aggregation]})[self.metric])) # Test what happens if we delete the latest split and then need to # compress it! @@ -942,7 +968,8 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42), (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4), (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44), - ]}, self.storage.get_measures(self.metric, [aggregation])) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: [aggregation]})[self.metric])) # Test what happens if we write garbage self.storage._store_metric_splits({ @@ -979,7 +1006,8 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 55.5), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 42.0), - ]}, self.storage.get_measures(self.metric, aggregations)) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: aggregations})[self.metric])) self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), @@ -993,7 +1021,8 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ]}, self.storage.get_measures(self.metric, aggregations)) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: aggregations})[self.metric])) aggregations = ( self.metric.archive_policy.get_aggregations_for_method("max") @@ -1005,7 +1034,8 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 42.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ]}, self.storage.get_measures(self.metric, aggregations)) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: aggregations})[self.metric])) aggregations = ( self.metric.archive_policy.get_aggregations_for_method("min") @@ -1017,7 +1047,8 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 4.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ]}, self.storage.get_measures(self.metric, aggregations)) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: aggregations})[self.metric])) def test_add_and_get_splits(self): self.incoming.add_measures(self.metric.id, [ @@ -1038,51 +1069,52 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ]}, self.storage.get_measures(self.metric, aggregations)) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: aggregations})[self.metric])) self.assertEqual({"mean": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ]}, self.storage.get_measures( - self.metric, aggregations, - from_timestamp=datetime64(2014, 1, 1, 12, 10, 0))) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: aggregations}, + from_timestamp=datetime64(2014, 1, 1, 12, 10, 0))[self.metric])) self.assertEqual({"mean": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), - ]}, self.storage.get_measures( - self.metric, aggregations, - to_timestamp=datetime64(2014, 1, 1, 12, 6, 0))) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: aggregations}, + to_timestamp=datetime64(2014, 1, 1, 12, 6, 0))[self.metric])) self.assertEqual({"mean": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), - ]}, self.storage.get_measures( - self.metric, aggregations, + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: aggregations}, to_timestamp=datetime64(2014, 1, 1, 12, 10, 10), - from_timestamp=datetime64(2014, 1, 1, 12, 10, 10))) + from_timestamp=datetime64(2014, 1, 1, 12, 10, 10))[self.metric])) self.assertEqual({"mean": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), - ]}, self.storage.get_measures( - self.metric, aggregations, + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: aggregations}, from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))) + to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))[self.metric])) self.assertEqual({"mean": [ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), - ]}, self.storage.get_measures( - self.metric, aggregations, + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: aggregations}, from_timestamp=datetime64(2014, 1, 1, 12), - to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))) + to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))[self.metric])) aggregation_1h = ( self.metric.archive_policy.get_aggregation( @@ -1091,10 +1123,10 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual({"mean": [ (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), - ]}, self.storage.get_measures( - self.metric, [aggregation_1h], + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: [aggregation_1h]}, from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))) + to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))[self.metric])) aggregation_5m = ( self.metric.archive_policy.get_aggregation( @@ -1103,16 +1135,18 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual({"mean": [ (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), - ]}, self.storage.get_measures( - self.metric, [aggregation_5m], + ]}, get_measures_list(self.storage.get_aggregated_measures( + {self.metric: [aggregation_5m]}, from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))) + to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))[self.metric])) self.assertEqual({"mean": []}, - self.storage.get_measures( - self.metric, - [carbonara.Aggregation( - "mean", numpy.timedelta64(42, 's'), None)])) + get_measures_list( + self.storage.get_aggregated_measures( + {self.metric: + [carbonara.Aggregation( + "mean", numpy.timedelta64(42, 's'), + None)]})[self.metric])) def test_get_measure_unknown_aggregation(self): self.incoming.add_measures(self.metric.id, [ @@ -1128,8 +1162,8 @@ class TestStorageDriver(tests_base.TestCase): self.assertRaises( storage.MetricDoesNotExist, - self.storage.get_measures, - self.metric, aggregations) + self.storage.get_aggregated_measures, + {self.metric: aggregations}) def test_resize_policy(self): name = str(uuid.uuid4()) @@ -1151,7 +1185,8 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 5), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1), - ]}, self.storage.get_measures(m, [aggregation])) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {m: [aggregation]})[m])) # expand to more points self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)]) @@ -1164,7 +1199,8 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2014, 1, 1, 12, 0, 5), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 15), numpy.timedelta64(5, 's'), 1), - ]}, self.storage.get_measures(m, [aggregation])) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {m: [aggregation]})[m])) # shrink timespan self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)]) @@ -1174,16 +1210,17 @@ class TestStorageDriver(tests_base.TestCase): self.assertEqual({"mean": [ (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 15), numpy.timedelta64(5, 's'), 1), - ]}, self.storage.get_measures(m, [aggregation])) + ]}, get_measures_list(self.storage.get_aggregated_measures( + {m: [aggregation]})[m])) def test_resample_no_metric(self): """https://github.com/gnocchixyz/gnocchi/issues/69""" aggregation = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(300, 's')) self.assertRaises(storage.MetricDoesNotExist, - self.storage.get_measures, - self.metric, - [aggregation], + self.storage.get_aggregated_measures, + {self.metric: + [aggregation]}, datetime64(2014, 1, 1), datetime64(2015, 1, 1), resample=numpy.timedelta64(1, 'h')) -- GitLab From 7e476ad268215727973eb58c96388a00b7681435 Mon Sep 17 00:00:00 2001 From: Nagasai Vinaykumar Kapalavai Date: Mon, 20 Aug 2018 16:14:55 -0400 Subject: [PATCH 1391/1483] Disable calling to get_measures_list in the get_measures rest call --- gnocchi/rest/api.py | 24 ++++++++------------- gnocchi/storage/__init__.py | 40 ++++------------------------------- gnocchi/tests/test_amqp1d.py | 16 +------------- gnocchi/tests/test_statsd.py | 16 +------------- gnocchi/tests/test_storage.py | 15 +------------ gnocchi/tests/test_utils.py | 14 ++++++++++++ 6 files changed, 30 insertions(+), 95 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 25993dfd..9f9bc098 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -194,19 +194,6 @@ def strtobool(varname, v): abort(400, "Unable to parse `%s': %s" % (varname, six.text_type(e))) -def get_measures_list(measures_agg): - return { - aggmethod: list(itertools.chain( - *[[(timestamp, measures_agg[agg].aggregation.granularity, value) - for timestamp, value in measures_agg[agg]] - for agg in sorted(aggs, - key=storage.ATTRGETTER_GRANULARITY, - reverse=True)])) - for aggmethod, aggs in itertools.groupby(measures_agg.keys(), - storage.ATTRGETTER_METHOD) - } - - RESOURCE_DEFAULT_PAGINATION = [u'revision_start:asc', u'started_at:asc'] @@ -525,7 +512,10 @@ class MetricController(rest.RestController): results = pecan.request.storage.get_aggregated_measures( {self.metric: aggregations}, start, stop, resample)[self.metric] - return get_measures_list(results)[aggregation] + return [(timestamp, results[key].aggregation.granularity, value) + for key in sorted(results.keys(), + reverse=True) + for timestamp, value in results[key]] except storage.AggregationDoesNotExist as e: abort(404, six.text_type(e)) except storage.MetricDoesNotExist: @@ -2028,7 +2018,11 @@ class AggregationController(rest.RestController): try: results = pecan.request.storage.get_aggregated_measures( {metric: aggregations}, start, stop, resample)[metric] - return get_measures_list(results)[aggregation] + return [(timestamp, results[key].aggregation.granularity, + value) + for key in sorted(results.keys(), + reverse=True) + for timestamp, value in results[key]] except storage.MetricDoesNotExist: return [] return processor.get_measures( diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 1372414d..86383dda 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -331,44 +331,12 @@ class StorageDriver(object): # be processed. Truncate to be sure we don't return them. if aggregation.timespan is not None: ts.truncate(aggregation.timespan) - results[metric][aggregation] = ts.fetch( - from_timestamp, to_timestamp) - if resample: - results[metric][aggregation] = ts.resample(resample) - + results[metric][aggregation] = ts.resample(resample) if resample \ + else ts + results[metric][aggregation] = results[metric][ + aggregation].fetch(from_timestamp, to_timestamp) return results - def get_measures(self, metric, aggregations, - from_timestamp=None, to_timestamp=None, - resample=None): - """Get aggregated measures from a metric. - - Deprecated. Use `get_aggregated_measures` instead. - - :param metric: The metric measured. - :param aggregations: The aggregations to retrieve. - :param from timestamp: The timestamp to get the measure from. - :param to timestamp: The timestamp to get the measure to. - :param resample: The granularity to resample to. - """ - timeseries = self.get_aggregated_measures( - {metric: aggregations}, from_timestamp, to_timestamp)[metric] - - if resample: - for agg, ts in six.iteritems(timeseries): - timeseries[agg] = ts.resample(resample) - - return { - aggmethod: list(itertools.chain( - *[[(timestamp, timeseries[agg].aggregation.granularity, value) - for timestamp, value in timeseries[agg]] - for agg in sorted(aggs, - key=ATTRGETTER_GRANULARITY, - reverse=True)])) - for aggmethod, aggs in itertools.groupby(timeseries.keys(), - ATTRGETTER_METHOD) - } - def _get_splits_and_unserialize(self, metrics_aggregations_keys): """Get splits and unserialize them diff --git a/gnocchi/tests/test_amqp1d.py b/gnocchi/tests/test_amqp1d.py index 90202c38..527e3984 100644 --- a/gnocchi/tests/test_amqp1d.py +++ b/gnocchi/tests/test_amqp1d.py @@ -11,7 +11,6 @@ # License for the specific language governing permissions and limitations # under the License. import datetime -import itertools import json import uuid @@ -19,8 +18,8 @@ import mock import numpy from gnocchi import amqp1d -from gnocchi import storage from gnocchi.tests import base as tests_base +from gnocchi.tests.test_utils import get_measures_list from gnocchi import utils @@ -28,19 +27,6 @@ def datetime64(*args): return numpy.datetime64(datetime.datetime(*args)) -def get_measures_list(measures_agg): - return { - aggmethod: list(itertools.chain( - *[[(timestamp, measures_agg[agg].aggregation.granularity, value) - for timestamp, value in measures_agg[agg]] - for agg in sorted(aggs, - key=storage.ATTRGETTER_GRANULARITY, - reverse=True)])) - for aggmethod, aggs in itertools.groupby(measures_agg.keys(), - storage.ATTRGETTER_METHOD) - } - - class TestAmqp1d(tests_base.TestCase): AMQP1D_USER_ID = str(uuid.uuid4()) diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py index bdd19a43..ce6c52c4 100644 --- a/gnocchi/tests/test_statsd.py +++ b/gnocchi/tests/test_statsd.py @@ -15,7 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. import datetime -import itertools import uuid import mock @@ -23,8 +22,8 @@ import numpy from gnocchi import indexer from gnocchi import statsd -from gnocchi import storage from gnocchi.tests import base as tests_base +from gnocchi.tests.test_utils import get_measures_list from gnocchi import utils @@ -32,19 +31,6 @@ def datetime64(*args): return numpy.datetime64(datetime.datetime(*args)) -def get_measures_list(measures_agg): - return { - aggmethod: list(itertools.chain( - *[[(timestamp, measures_agg[agg].aggregation.granularity, value) - for timestamp, value in measures_agg[agg]] - for agg in sorted(aggs, - key=storage.ATTRGETTER_GRANULARITY, - reverse=True)])) - for aggmethod, aggs in itertools.groupby(measures_agg.keys(), - storage.ATTRGETTER_METHOD) - } - - class TestStatsd(tests_base.TestCase): STATSD_USER_ID = str(uuid.uuid4()) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 71ec3d88..874aac48 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -14,7 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. import datetime -import itertools import uuid import mock @@ -32,25 +31,13 @@ from gnocchi.storage import redis from gnocchi.storage import s3 from gnocchi.storage import swift from gnocchi.tests import base as tests_base +from gnocchi.tests.test_utils import get_measures_list def datetime64(*args): return numpy.datetime64(datetime.datetime(*args)) -def get_measures_list(measures_agg): - return { - aggmethod: list(itertools.chain( - *[[(timestamp, measures_agg[agg].aggregation.granularity, value) - for timestamp, value in measures_agg[agg]] - for agg in sorted(aggs, - key=storage.ATTRGETTER_GRANULARITY, - reverse=True)])) - for aggmethod, aggs in itertools.groupby(measures_agg.keys(), - storage.ATTRGETTER_METHOD) - } - - class TestStorageDriver(tests_base.TestCase): def setUp(self): super(TestStorageDriver, self).setUp() diff --git a/gnocchi/tests/test_utils.py b/gnocchi/tests/test_utils.py index c000d070..50856a8b 100644 --- a/gnocchi/tests/test_utils.py +++ b/gnocchi/tests/test_utils.py @@ -19,6 +19,7 @@ import uuid import iso8601 import mock +from gnocchi import storage from gnocchi.tests import base as tests_base from gnocchi import utils @@ -148,3 +149,16 @@ class ReturnNoneOnFailureTest(tests_base.TestCase): raise Exception("boom") self.assertIsNone(foobar()) + + +def get_measures_list(measures_agg): + return { + aggmethod: list(itertools.chain( + *[[(timestamp, measures_agg[agg].aggregation.granularity, value) + for timestamp, value in measures_agg[agg]] + for agg in sorted(aggs, + key=storage.ATTRGETTER_GRANULARITY, + reverse=True)])) + for aggmethod, aggs in itertools.groupby(measures_agg.keys(), + storage.ATTRGETTER_METHOD) + } -- GitLab From 2d7d2f9c1f1a2ddbf1140b871b0c738d448e59a6 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Sat, 25 Aug 2018 15:19:10 +0200 Subject: [PATCH 1392/1483] * New upstream release. * Fixed (build-)depends for this release. --- debian/changelog | 9 +++++++-- debian/control | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index c97f1d55..ea42c699 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,13 @@ -gnocchi (4.2.0-6) UNRELEASED; urgency=medium +gnocchi (4.3.1-1) experimental; urgency=medium + [ Ondřej Nový ] * d/control: Use team+openstack@tracker.debian.org as maintainer - -- Ondřej Nový Fri, 03 Aug 2018 06:12:57 +0200 + [ Thomas Goirand ] + * New upstream release. + * Fixed (build-)depends for this release. + + -- Thomas Goirand Sat, 25 Aug 2018 15:18:34 +0200 gnocchi (4.2.0-5) unstable; urgency=medium diff --git a/debian/control b/debian/control index d54c85f2..0ee58274 100644 --- a/debian/control +++ b/debian/control @@ -11,6 +11,7 @@ Build-Depends: python3-all, python3-pbr, python3-setuptools, + python3-setuptools-scm, python3-sphinx, Build-Depends-Indep: alembic, -- GitLab From 7bd2e497bd9e3c7b88371122f41b6364dcee55c7 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Sat, 25 Aug 2018 15:19:59 +0200 Subject: [PATCH 1393/1483] Refreshed patches. --- debian/changelog | 1 + debian/patches/no-distutils-usage.diff | 14 +++++++------- debian/patches/py3-compat.patch | 4 ++-- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/debian/changelog b/debian/changelog index ea42c699..9030aebc 100644 --- a/debian/changelog +++ b/debian/changelog @@ -6,6 +6,7 @@ gnocchi (4.3.1-1) experimental; urgency=medium [ Thomas Goirand ] * New upstream release. * Fixed (build-)depends for this release. + * Refreshed patches. -- Thomas Goirand Sat, 25 Aug 2018 15:18:34 +0200 diff --git a/debian/patches/no-distutils-usage.diff b/debian/patches/no-distutils-usage.diff index c16c9f20..594439c9 100644 --- a/debian/patches/no-distutils-usage.diff +++ b/debian/patches/no-distutils-usage.diff @@ -3,10 +3,10 @@ Author: Matthias Klose Forwarded: https://github.com/gnocchixyz/gnocchi/pull/904 Last-Update: 2018-06-04 -Index: b/gnocchi/cli/api.py +Index: gnocchi/gnocchi/cli/api.py =================================================================== ---- a/gnocchi/cli/api.py -+++ b/gnocchi/cli/api.py +--- gnocchi.orig/gnocchi/cli/api.py ++++ gnocchi/gnocchi/cli/api.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. @@ -16,7 +16,7 @@ Index: b/gnocchi/cli/api.py import math import os import sys -@@ -73,7 +73,7 @@ def api(): +@@ -78,7 +78,7 @@ def api(): "No need to pass `--' in gnocchi-api command line anymore, " "please remove") @@ -25,10 +25,10 @@ Index: b/gnocchi/cli/api.py if not uwsgi: LOG.error("Unable to find `uwsgi'.\n" "Be sure it is installed and in $PATH.") -Index: b/gnocchi/utils.py +Index: gnocchi/gnocchi/utils.py =================================================================== ---- a/gnocchi/utils.py -+++ b/gnocchi/utils.py +--- gnocchi.orig/gnocchi/utils.py ++++ gnocchi/gnocchi/utils.py @@ -15,7 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. diff --git a/debian/patches/py3-compat.patch b/debian/patches/py3-compat.patch index 7a82b51b..d877ef39 100644 --- a/debian/patches/py3-compat.patch +++ b/debian/patches/py3-compat.patch @@ -26,12 +26,12 @@ Index: gnocchi/gnocchi/cli/metricd.py + ) self.coord = get_coordinator_and_start(member_id, self.conf.coordination_url) - self.store = storage.get_driver(self.conf, self.coord) + self.store = storage.get_driver(self.conf) Index: gnocchi/gnocchi/rest/app.py =================================================================== --- gnocchi.orig/gnocchi/rest/app.py +++ gnocchi/gnocchi/rest/app.py -@@ -93,7 +93,7 @@ class GnocchiHook(pecan.hooks.PecanHook) +@@ -101,7 +101,7 @@ class GnocchiHook(pecan.hooks.PecanHook) # entirely. self.backends[name] = ( metricd.get_coordinator_and_start( -- GitLab From ad20fce5c1a040c8eb3bc6d40a5d70478eef5bac Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Sat, 25 Aug 2018 15:33:55 +0200 Subject: [PATCH 1394/1483] export SETUPTOOLS_SCM_PRETEND_VERSION=. --- debian/changelog | 1 + debian/rules | 2 ++ 2 files changed, 3 insertions(+) diff --git a/debian/changelog b/debian/changelog index 9030aebc..5336d5fa 100644 --- a/debian/changelog +++ b/debian/changelog @@ -7,6 +7,7 @@ gnocchi (4.3.1-1) experimental; urgency=medium * New upstream release. * Fixed (build-)depends for this release. * Refreshed patches. + * export SETUPTOOLS_SCM_PRETEND_VERSION=. -- Thomas Goirand Sat, 25 Aug 2018 15:18:34 +0200 diff --git a/debian/rules b/debian/rules index 364a9ebf..6e108cff 100755 --- a/debian/rules +++ b/debian/rules @@ -3,6 +3,8 @@ UPSTREAM_GIT:=https://github.com/gnocchixyz/gnocchi include /usr/share/openstack-pkg-tools/pkgos.make +export SETUPTOOLS_SCM_PRETEND_VERSION=$(shell dpkg-parsechangelog -SVersion | sed -e 's/^[[:digit:]]*://' -e 's/[-].*//' -e 's/~/.0/' -e 's/+dfsg1//' | head -n 1) + UNIT_TEST_BLACKLIST = test_carbonara.CarbonaraCmd.*|.*test_bin\.BinTestCase\.test_gnocchi_config_generator_run.* %: -- GitLab From b96a67691e40152c4cc2785a2ec101beb1e27099 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Sat, 25 Aug 2018 15:38:07 +0200 Subject: [PATCH 1395/1483] Do not call setup.py clean. --- debian/changelog | 1 + debian/rules | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 5336d5fa..0a4ccfb0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -8,6 +8,7 @@ gnocchi (4.3.1-1) experimental; urgency=medium * Fixed (build-)depends for this release. * Refreshed patches. * export SETUPTOOLS_SCM_PRETEND_VERSION=. + * Do not call setup.py clean. -- Thomas Goirand Sat, 25 Aug 2018 15:18:34 +0200 diff --git a/debian/rules b/debian/rules index 6e108cff..81c3f3d8 100755 --- a/debian/rules +++ b/debian/rules @@ -16,7 +16,7 @@ override_dh_clean: rm -rf debian/CHANGEME-common.postrm debian/*.templates debian/po override_dh_auto_clean: - python3 setup.py clean + echo "Do nothing ..." override_dh_auto_build: /usr/share/openstack-pkg-tools/pkgos_insert_include pkgos_func gnocchi-common.postinst -- GitLab From 83776ba5546d1bc221ec30aa4424e6828ef9e675 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Sat, 25 Aug 2018 20:45:38 +0000 Subject: [PATCH 1396/1483] Add install-missing-files.patch. --- debian/changelog | 1 + debian/patches/install-missing-files.patch | 7 +++++++ debian/patches/series | 1 + 3 files changed, 9 insertions(+) create mode 100644 debian/patches/install-missing-files.patch diff --git a/debian/changelog b/debian/changelog index 0a4ccfb0..bd4c5d15 100644 --- a/debian/changelog +++ b/debian/changelog @@ -9,6 +9,7 @@ gnocchi (4.3.1-1) experimental; urgency=medium * Refreshed patches. * export SETUPTOOLS_SCM_PRETEND_VERSION=. * Do not call setup.py clean. + * Add install-missing-files.patch. -- Thomas Goirand Sat, 25 Aug 2018 15:18:34 +0200 diff --git a/debian/patches/install-missing-files.patch b/debian/patches/install-missing-files.patch new file mode 100644 index 00000000..48bf7aaf --- /dev/null +++ b/debian/patches/install-missing-files.patch @@ -0,0 +1,7 @@ +--- MANIFEST.in 2018-08-25 20:43:59.663146409 +0000 ++++ ../MANIFEST.in 2018-08-25 20:44:33.634292853 +0000 +@@ -2,3 +2,4 @@ + include AUTHORS + exclude .gitignore + exclude .github ++recursive-include gnocchi * diff --git a/debian/patches/series b/debian/patches/series index 30a054d7..28ad9f4e 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -1,2 +1,3 @@ py3-compat.patch no-distutils-usage.diff +install-missing-files.patch -- GitLab From 7e1814e43b8f19af6efbc66e0a68d8603214e48b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 28 Aug 2018 16:09:45 +0200 Subject: [PATCH 1397/1483] Change the way to mimic pbr version Currently we have some case where `python setup.py --version` add `+nnone` to the local version. Since we don't care of the local part just throw it. --- setup.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/setup.py b/setup.py index 03534032..f3972f25 100755 --- a/setup.py +++ b/setup.py @@ -31,19 +31,10 @@ except ImportError: pass -def pbr_compat(v): - from setuptools_scm import version - # NOTE(sileht): this removes +g. to generate the same number as - # pbr. i don't get why yet but something call pbr even we don't depends on - # it anymore - v.dirty = False - v.node = None - return version.guess_next_dev_version(v) - - setuptools.setup( setup_requires=['setuptools>=30.3.0', 'setuptools_scm!=1.16.0,!=1.16.1,!=1.16.2'], - use_scm_version={'version_scheme': pbr_compat}, + # Remove any local stuff to mimic pbr + use_scm_version={'local_scheme': lambda v: ""}, cmdclass=cmdclass, ) -- GitLab From 768fc724f00eb63e84db1e9ec7f95aec226f2f9a Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 28 Aug 2018 16:09:45 +0200 Subject: [PATCH 1398/1483] Change the way to mimic pbr version Currently we have some case where `python setup.py --version` add `+nnone` to the local version. Since we don't care of the local part just throw it. (cherry picked from commit 7e1814e43b8f19af6efbc66e0a68d8603214e48b) --- setup.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/setup.py b/setup.py index 03534032..f3972f25 100755 --- a/setup.py +++ b/setup.py @@ -31,19 +31,10 @@ except ImportError: pass -def pbr_compat(v): - from setuptools_scm import version - # NOTE(sileht): this removes +g. to generate the same number as - # pbr. i don't get why yet but something call pbr even we don't depends on - # it anymore - v.dirty = False - v.node = None - return version.guess_next_dev_version(v) - - setuptools.setup( setup_requires=['setuptools>=30.3.0', 'setuptools_scm!=1.16.0,!=1.16.1,!=1.16.2'], - use_scm_version={'version_scheme': pbr_compat}, + # Remove any local stuff to mimic pbr + use_scm_version={'local_scheme': lambda v: ""}, cmdclass=cmdclass, ) -- GitLab From 4f82422e8861cbfd4df0a9df944733caf39fb27b Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Wed, 22 Aug 2018 21:12:15 +0200 Subject: [PATCH 1399/1483] Revert "Disable Web doc build on stable/4.3 branch" This reverts commit 88bfa6fe4e775bb77a321a387b29195c30d6678b which was merged into master by mistake. --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 9194247d..f0ab30ae 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,6 +10,7 @@ cache: env: - TARGET: pep8 - TARGET: docs + - TARGET: docs-gnocchi.xyz - TARGET: py27-mysql-ceph-upgrade-from-4.3 - TARGET: py37-postgresql-file-upgrade-from-4.3 -- GitLab From cb9be14b64de59df74817940bf544cdffb6fbe3f Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 4 Sep 2018 13:51:30 +0200 Subject: [PATCH 1400/1483] Don't require setuptools as runtime --- setup.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 67fc2773..798e37e4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -24,7 +24,6 @@ packages = include_package_data = true install_requires = - setuptools>=30.3 numpy>=1.9.0 iso8601 oslo.config>=3.22.0 -- GitLab From 29b924ecfdcaf216c840e54f6aba53935d4e5d9f Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Tue, 4 Sep 2018 13:51:30 +0200 Subject: [PATCH 1401/1483] Don't require setuptools as runtime (cherry picked from commit cb9be14b64de59df74817940bf544cdffb6fbe3f) --- setup.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index f0d6d083..1cd1652a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -24,7 +24,6 @@ packages = include_package_data = true install_requires = - setuptools>=30.3 numpy>=1.9.0 iso8601 oslo.config>=3.22.0 -- GitLab From d8ccb18c465ca7e4118f8acb7c897b0830e8d3e4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 7 Sep 2018 11:29:39 +0200 Subject: [PATCH 1402/1483] docs: use remove sphinx < 1.6 limitation, disable gnocchi.xyz doc job This is needed by reno. docs-gnocchi.xyz job does not work see #953 --- .travis.yml | 2 +- setup.cfg | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index f0ab30ae..fe3b59e3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,7 @@ cache: env: - TARGET: pep8 - TARGET: docs - - TARGET: docs-gnocchi.xyz + # - TARGET: docs-gnocchi.xyz - TARGET: py27-mysql-ceph-upgrade-from-4.3 - TARGET: py37-postgresql-file-upgrade-from-4.3 diff --git a/setup.cfg b/setup.cfg index 798e37e4..e822ff30 100644 --- a/setup.cfg +++ b/setup.cfg @@ -84,7 +84,7 @@ prometheus = amqp1: python-qpid-proton>=0.17.0 doc = - sphinx<1.6.0 + sphinx sphinx_rtd_theme sphinxcontrib-httpdomain PyYAML -- GitLab From c8be6382a55f46d17d1b00fc643157859f94874f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 6 Sep 2018 10:27:31 +0200 Subject: [PATCH 1403/1483] redis: fix sentinel Lua script registrations Fixes #966 --- gnocchi/common/redis.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/gnocchi/common/redis.py b/gnocchi/common/redis.py index 8f9ceeb8..8607aab8 100644 --- a/gnocchi/common/redis.py +++ b/gnocchi/common/redis.py @@ -154,12 +154,11 @@ def get_client(conf, scripts=None): del kwargs['sentinel'] if 'sentinel_fallback' in kwargs: del kwargs['sentinel_fallback'] - master_client = sentinel_server.master_for(sentinel_name, **kwargs) - # The master_client is a redis.StrictRedis using a + # The client is a redis.StrictRedis using a # Sentinel managed connection pool. - return master_client - - client = redis.StrictRedis(**kwargs) + client = sentinel_server.master_for(sentinel_name, **kwargs) + else: + client = redis.StrictRedis(**kwargs) if scripts is not None: scripts = { -- GitLab From ca67a8545e240769c9e9a36d958af5f0ca23cdac Mon Sep 17 00:00:00 2001 From: Vladyslav Drok Date: Tue, 11 Sep 2018 13:48:36 +0300 Subject: [PATCH 1404/1483] Fix splits update statistics dictionary key Change-Id: I9747a9bc9f9adea61bb6bcca571aa90c473172d5 --- gnocchi/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 86383dda..4cea6ffb 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -685,7 +685,7 @@ class StorageDriver(object): self.statistics["splits delete"] += len(splits_to_delete) with self.statistics.time("splits update"): self._update_metric_splits(splits_to_update) - self.statistics["splits delete"] += len(splits_to_update) + self.statistics["splits update"] += len(splits_to_update) with self.statistics.time("raw measures store"): self._store_unaggregated_timeseries(new_boundts) self.statistics["raw measures store"] += len(new_boundts) -- GitLab From cf0ebc4060a915134a66d53130aa3fc2f60cf23f Mon Sep 17 00:00:00 2001 From: Vladyslav Drok Date: Fri, 17 Aug 2018 17:13:48 +0300 Subject: [PATCH 1405/1483] Fix computation for metrics with multiple granularities The high archive policy created by default contains three different granularity definitions - 1 second, 1 hour and one day. For such metrics the split computation has been done incorrectly as it was using oldest keys not taking into account those keys granularity. It caused TypeErrors when comparing two SplitKeys with different granularities. closes: gnocchixyz/gnocchi#959 --- gnocchi/storage/__init__.py | 24 +++++++++++++++++------- gnocchi/tests/test_storage.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 7 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 4cea6ffb..ff66d355 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -459,16 +459,17 @@ class StorageDriver(object): ) aggregations_needing_list_of_keys = set() + oldest_values = {} for aggregation, ts in six.iteritems(aggregations_and_timeseries): # Don't do anything if the timeseries is empty if not ts: continue - if aggregation.timespan: - oldest_point_to_keep = ts.truncate(aggregation.timespan) - else: - oldest_point_to_keep = None + agg_oldest_values = { + 'oldest_point_to_keep': ts.truncate(aggregation.timespan) + if aggregation.timespan else None, + 'prev_oldest_mutable_key': None, 'oldest_mutable_key': None} if previous_oldest_mutable_timestamp and (aggregation.timespan or need_rewrite): @@ -480,6 +481,12 @@ class StorageDriver(object): # object for an old object to be cleanup if previous_oldest_mutable_key != oldest_mutable_key: aggregations_needing_list_of_keys.add(aggregation) + agg_oldest_values['prev_oldest_mutable_key'] = ( + previous_oldest_mutable_key) + agg_oldest_values['oldest_mutable_key'] = ( + oldest_mutable_key) + + oldest_values[aggregation.granularity] = agg_oldest_values all_existing_keys = self._list_split_keys( {metric: aggregations_needing_list_of_keys})[metric] @@ -495,7 +502,10 @@ class StorageDriver(object): if not ts: continue - oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) + agg_oldest_values = oldest_values[aggregation.granularity] + + oldest_key_to_keep = ts.get_split_key( + agg_oldest_values['oldest_point_to_keep']) # If we listed the keys for the aggregation, that's because we need # to check for cleanup and/or rewrite @@ -523,8 +533,8 @@ class StorageDriver(object): # not the first time we treat this timeserie. if need_rewrite: for key in existing_keys: - if previous_oldest_mutable_key <= key: - if key >= oldest_mutable_key: + if agg_oldest_values['prev_oldest_mutable_key'] <= key: + if key >= agg_oldest_values['oldest_mutable_key']: break LOG.debug( "Compressing previous split %s (%s) for " diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 874aac48..903f7e6c 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -649,6 +649,34 @@ class TestStorageDriver(tests_base.TestCase): ]}, get_measures_list(self.storage.get_aggregated_measures( {self.metric: [aggregation]})[self.metric])) + def test_rewrite_measures_multiple_granularities(self): + apname = str(uuid.uuid4()) + # Create an archive policy with two different granularities + ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60), (36000, 1)]) + self.index.create_archive_policy(ap) + self.metric = indexer.Metric(uuid.uuid4(), ap) + self.index.create_metric(self.metric.id, str(uuid.uuid4()), + apname) + + # First store some points + self.incoming.add_measures(self.metric.id, [ + incoming.Measure(datetime64(2016, 1, 6, 18, 15, 46), 43), + incoming.Measure(datetime64(2016, 1, 6, 18, 15, 47), 43), + incoming.Measure(datetime64(2016, 1, 6, 18, 15, 48), 43), + ]) + self.trigger_processing() + + # Add some more points, mocking out WRITE_FULL attribute of the current + # driver, so that rewrite happens + self.incoming.add_measures(self.metric.id, [ + incoming.Measure(datetime64(2016, 1, 7, 18, 15, 49), 43), + incoming.Measure(datetime64(2016, 1, 7, 18, 15, 50), 43), + incoming.Measure(datetime64(2016, 1, 7, 18, 18, 46), 43), + ]) + driver = storage.get_driver(self.conf) + with mock.patch.object(driver.__class__, 'WRITE_FULL', False): + self.trigger_processing() + def test_rewrite_measures_oldest_mutable_timestamp_eq_next_key(self): """See LP#1655422""" # Create an archive policy that spans on several splits. Each split -- GitLab From 6362ee85c3fe1796fcb958c735166863cb350469 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 7 Sep 2018 11:29:39 +0200 Subject: [PATCH 1406/1483] docs: use remove sphinx < 1.6 limitation, disable gnocchi.xyz doc job This is needed by reno. docs-gnocchi.xyz job does not work see #953 (cherry picked from commit d8ccb18c465ca7e4118f8acb7c897b0830e8d3e4) --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 1cd1652a..57201b4f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -82,7 +82,7 @@ prometheus = python-snappy protobuf doc = - sphinx<1.6.0 + sphinx sphinx_rtd_theme sphinxcontrib-httpdomain PyYAML -- GitLab From 799dfe110617f97a7a5e65338b7aeae0bb263fb4 Mon Sep 17 00:00:00 2001 From: Vladyslav Drok Date: Tue, 11 Sep 2018 13:48:36 +0300 Subject: [PATCH 1407/1483] Fix splits update statistics dictionary key Change-Id: I9747a9bc9f9adea61bb6bcca571aa90c473172d5 (cherry picked from commit ca67a8545e240769c9e9a36d958af5f0ca23cdac) --- gnocchi/storage/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 7d2e0a0d..48bd8eb5 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -715,7 +715,7 @@ class StorageDriver(object): self.statistics["splits delete"] += len(splits_to_delete) with self.statistics.time("splits update"): self._update_metric_splits(splits_to_update) - self.statistics["splits delete"] += len(splits_to_update) + self.statistics["splits update"] += len(splits_to_update) with self.statistics.time("raw measures store"): self._store_unaggregated_timeseries(new_boundts) self.statistics["raw measures store"] += len(new_boundts) -- GitLab From 8121b87677fde4e56464bdbc7ab6de4a1f7fa292 Mon Sep 17 00:00:00 2001 From: Vladyslav Drok Date: Fri, 17 Aug 2018 17:13:48 +0300 Subject: [PATCH 1408/1483] Fix computation for metrics with multiple granularities The high archive policy created by default contains three different granularity definitions - 1 second, 1 hour and one day. For such metrics the split computation has been done incorrectly as it was using oldest keys not taking into account those keys granularity. It caused TypeErrors when comparing two SplitKeys with different granularities. closes: gnocchixyz/gnocchi#959 (cherry picked from commit cf0ebc4060a915134a66d53130aa3fc2f60cf23f) --- gnocchi/storage/__init__.py | 24 +++++++++++++++++------- gnocchi/tests/test_storage.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 7 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index 48bd8eb5..360828af 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -489,16 +489,17 @@ class StorageDriver(object): ) aggregations_needing_list_of_keys = set() + oldest_values = {} for aggregation, ts in six.iteritems(aggregations_and_timeseries): # Don't do anything if the timeseries is empty if not ts: continue - if aggregation.timespan: - oldest_point_to_keep = ts.truncate(aggregation.timespan) - else: - oldest_point_to_keep = None + agg_oldest_values = { + 'oldest_point_to_keep': ts.truncate(aggregation.timespan) + if aggregation.timespan else None, + 'prev_oldest_mutable_key': None, 'oldest_mutable_key': None} if previous_oldest_mutable_timestamp and (aggregation.timespan or need_rewrite): @@ -510,6 +511,12 @@ class StorageDriver(object): # object for an old object to be cleanup if previous_oldest_mutable_key != oldest_mutable_key: aggregations_needing_list_of_keys.add(aggregation) + agg_oldest_values['prev_oldest_mutable_key'] = ( + previous_oldest_mutable_key) + agg_oldest_values['oldest_mutable_key'] = ( + oldest_mutable_key) + + oldest_values[aggregation.granularity] = agg_oldest_values all_existing_keys = self._list_split_keys( {metric: aggregations_needing_list_of_keys})[metric] @@ -525,7 +532,10 @@ class StorageDriver(object): if not ts: continue - oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) + agg_oldest_values = oldest_values[aggregation.granularity] + + oldest_key_to_keep = ts.get_split_key( + agg_oldest_values['oldest_point_to_keep']) # If we listed the keys for the aggregation, that's because we need # to check for cleanup and/or rewrite @@ -553,8 +563,8 @@ class StorageDriver(object): # not the first time we treat this timeserie. if need_rewrite: for key in existing_keys: - if previous_oldest_mutable_key <= key: - if key >= oldest_mutable_key: + if agg_oldest_values['prev_oldest_mutable_key'] <= key: + if key >= agg_oldest_values['oldest_mutable_key']: break LOG.debug( "Compressing previous split %s (%s) for " diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index c65d6119..7d6846c3 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -639,6 +639,34 @@ class TestStorageDriver(tests_base.TestCase): (datetime64(2016, 1, 10, 17, 12), numpy.timedelta64(1, 'm'), 46), ]}, self.storage.get_measures(self.metric, [aggregation])) + def test_rewrite_measures_multiple_granularities(self): + apname = str(uuid.uuid4()) + # Create an archive policy with two different granularities + ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60), (36000, 1)]) + self.index.create_archive_policy(ap) + self.metric = indexer.Metric(uuid.uuid4(), ap) + self.index.create_metric(self.metric.id, str(uuid.uuid4()), + apname) + + # First store some points + self.incoming.add_measures(self.metric.id, [ + incoming.Measure(datetime64(2016, 1, 6, 18, 15, 46), 43), + incoming.Measure(datetime64(2016, 1, 6, 18, 15, 47), 43), + incoming.Measure(datetime64(2016, 1, 6, 18, 15, 48), 43), + ]) + self.trigger_processing() + + # Add some more points, mocking out WRITE_FULL attribute of the current + # driver, so that rewrite happens + self.incoming.add_measures(self.metric.id, [ + incoming.Measure(datetime64(2016, 1, 7, 18, 15, 49), 43), + incoming.Measure(datetime64(2016, 1, 7, 18, 15, 50), 43), + incoming.Measure(datetime64(2016, 1, 7, 18, 18, 46), 43), + ]) + driver = storage.get_driver(self.conf) + with mock.patch.object(driver.__class__, 'WRITE_FULL', False): + self.trigger_processing() + def test_rewrite_measures_oldest_mutable_timestamp_eq_next_key(self): """See LP#1655422""" # Create an archive policy that spans on several splits. Each split -- GitLab From dce5db9b9ff87bafd65b3c675c57b611cf29a6ff Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 6 Sep 2018 10:27:31 +0200 Subject: [PATCH 1409/1483] redis: fix sentinel Lua script registrations Fixes #966 (cherry picked from commit c8be6382a55f46d17d1b00fc643157859f94874f) --- gnocchi/common/redis.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/gnocchi/common/redis.py b/gnocchi/common/redis.py index 8f9ceeb8..8607aab8 100644 --- a/gnocchi/common/redis.py +++ b/gnocchi/common/redis.py @@ -154,12 +154,11 @@ def get_client(conf, scripts=None): del kwargs['sentinel'] if 'sentinel_fallback' in kwargs: del kwargs['sentinel_fallback'] - master_client = sentinel_server.master_for(sentinel_name, **kwargs) - # The master_client is a redis.StrictRedis using a + # The client is a redis.StrictRedis using a # Sentinel managed connection pool. - return master_client - - client = redis.StrictRedis(**kwargs) + client = sentinel_server.master_for(sentinel_name, **kwargs) + else: + client = redis.StrictRedis(**kwargs) if scripts is not None: scripts = { -- GitLab From ca28902e2dcb295505770d40be45abc5a839f77b Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 28 Sep 2018 11:52:21 +0200 Subject: [PATCH 1410/1483] Hide username/password in indexer __str__ representation Closes #980 --- gnocchi/indexer/sqlalchemy.py | 12 +++++++++++- gnocchi/tests/test_indexer.py | 6 +++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index bd0ab812..f9c0dbc0 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -38,6 +38,7 @@ try: except ImportError: pymysql = None import six +from six.moves.urllib import parse as urlparse import sqlalchemy from sqlalchemy.engine import url as sqlalchemy_url import sqlalchemy.exc @@ -291,7 +292,16 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): self.facade = PerInstanceFacade(conf) def __str__(self): - return "%s: %s" % (self.__class__.__name__, self.conf.indexer.url) + parsed = urlparse.urlparse(self.conf.indexer.url) + url = urlparse.urlunparse(( + parsed.scheme, + "***:***@%s%s" % (parsed.hostname, + ":%s" % parsed.port if parsed.port else ""), + parsed.path, + parsed.params, + parsed.query, + parsed.fragment)) + return "%s: %s" % (self.__class__.__name__, url) def disconnect(self): self.facade.dispose() diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 2671bbfc..1216eeba 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -40,7 +40,11 @@ class TestIndexerDriver(tests_base.TestCase): def test_str(self): self.assertEqual("%s: %s" % (self.index.__class__.__name__, - self.conf.indexer.url), str(self.index)) + self.conf.indexer.url.replace( + "root@", "").replace( + "localhost", "***:***@localhost" + )), + str(self.index)) def test_create_archive_policy_already_exists(self): # NOTE(jd) This archive policy -- GitLab From 54bae4ecb0ae7147732628817dc064f27a4e1f3e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 28 Sep 2018 11:52:21 +0200 Subject: [PATCH 1411/1483] Hide username/password in indexer __str__ representation Closes #980 (cherry picked from commit ca28902e2dcb295505770d40be45abc5a839f77b) --- gnocchi/indexer/sqlalchemy.py | 12 +++++++++++- gnocchi/tests/test_indexer.py | 6 +++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index bd0ab812..f9c0dbc0 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -38,6 +38,7 @@ try: except ImportError: pymysql = None import six +from six.moves.urllib import parse as urlparse import sqlalchemy from sqlalchemy.engine import url as sqlalchemy_url import sqlalchemy.exc @@ -291,7 +292,16 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): self.facade = PerInstanceFacade(conf) def __str__(self): - return "%s: %s" % (self.__class__.__name__, self.conf.indexer.url) + parsed = urlparse.urlparse(self.conf.indexer.url) + url = urlparse.urlunparse(( + parsed.scheme, + "***:***@%s%s" % (parsed.hostname, + ":%s" % parsed.port if parsed.port else ""), + parsed.path, + parsed.params, + parsed.query, + parsed.fragment)) + return "%s: %s" % (self.__class__.__name__, url) def disconnect(self): self.facade.dispose() diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py index 2671bbfc..1216eeba 100644 --- a/gnocchi/tests/test_indexer.py +++ b/gnocchi/tests/test_indexer.py @@ -40,7 +40,11 @@ class TestIndexerDriver(tests_base.TestCase): def test_str(self): self.assertEqual("%s: %s" % (self.index.__class__.__name__, - self.conf.indexer.url), str(self.index)) + self.conf.indexer.url.replace( + "root@", "").replace( + "localhost", "***:***@localhost" + )), + str(self.index)) def test_create_archive_policy_already_exists(self): # NOTE(jd) This archive policy -- GitLab From fc0221b18cc32795539a85afaad7d43d37dea1df Mon Sep 17 00:00:00 2001 From: Mathias Laurin Date: Fri, 5 Oct 2018 11:06:07 +0200 Subject: [PATCH 1412/1483] Fixup rendering on Pypi The description of the project is currently not rendered on Pypi. Instead, Pypi shows raw rst. Checking the page with "readme_renderer" reports "undefined substitutions" for "time series" and "aggregates". This patch adds a before_deploy section to .travis.yml that uses sed to remove matching pairs of pipes from the README. The resulting README was tested again with the readme renderer. See Also: https://pypi.org/project/readme_renderer/ --- .travis.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.travis.yml b/.travis.yml index fe3b59e3..ef3be1fb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -66,6 +66,10 @@ notifications: channels: - "irc.freenode.org#gnocchi" +before_deploy: + # Remove |substitutions| to fix rendering on pypi. + - sed -i -e 's/|\([a-zA-Z0-9 ]\+\)|/\1/g' README.rst + deploy: provider: pypi user: jd -- GitLab From 2d1f0585845d0fe2947ba34caeb4ab11aa706c4f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 4 Sep 2018 23:07:26 +0200 Subject: [PATCH 1413/1483] docs: upgrade setuptools on doc build Fixes #953 --- tox.ini | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tox.ini b/tox.ini index 983e6def..7b3672c8 100644 --- a/tox.ini +++ b/tox.ini @@ -103,8 +103,10 @@ basepython = python2.7 whitelist_externals = bash rm setenv = GNOCCHI_STORAGE_DEPS=file GNOCCHI_TEST_DEBUG=1 +install_command = pip install -U {opts} {packages} deps = {[testenv:docs]deps} sphinxcontrib-versioning + setuptools # for < 4.3 doc pbr WebOb<1.8 -- GitLab From 6f060264c7fc3724b902fddb494edbef482a868a Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 5 Oct 2018 09:38:39 +0200 Subject: [PATCH 1414/1483] Disable sphinxcontrib-versioning This extension does not work anymore and is not maintained upstream. --- .travis.yml | 2 +- tools/validate_docs.sh | 16 ---------------- tox.ini | 16 +--------------- 3 files changed, 2 insertions(+), 32 deletions(-) delete mode 100755 tools/validate_docs.sh diff --git a/.travis.yml b/.travis.yml index ef3be1fb..7a22aa91 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,7 @@ cache: env: - TARGET: pep8 - TARGET: docs - # - TARGET: docs-gnocchi.xyz + - TARGET: docs-gnocchi.xyz - TARGET: py27-mysql-ceph-upgrade-from-4.3 - TARGET: py37-postgresql-file-upgrade-from-4.3 diff --git a/tools/validate_docs.sh b/tools/validate_docs.sh deleted file mode 100755 index 98527ea2..00000000 --- a/tools/validate_docs.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -set -e - -# NOTE(sileht): The flags -W with sphinx-versionning does not return 1 -# but when a build fail and the flags is present, the failed version does not appear -# in the version selector. This testchecks this. -ret=0 -for path in doc/build/html/stable*; do - version=$(basename $path) # stable_XXX - if ! grep -q $version doc/build/html/index.html ; then - echo "Version $version is missing" - ret=1 - fi -done -exit $ret diff --git a/tox.ini b/tox.ini index 7b3672c8..7e54929b 100644 --- a/tox.ini +++ b/tox.ini @@ -105,24 +105,10 @@ setenv = GNOCCHI_STORAGE_DEPS=file GNOCCHI_TEST_DEBUG=1 install_command = pip install -U {opts} {packages} deps = {[testenv:docs]deps} - sphinxcontrib-versioning setuptools -# for < 4.3 doc - pbr - WebOb<1.8 -# for <= 4.2 doc - scipy -# for <= 4.1 doc - pandas -# for 3.x doc - lz4>=0.9,<=0.13 - oslotest - oslosphinx - retrying commands = rm -rf doc/build/html - pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-versioning build doc/source doc/build/html - bash tools/validate_docs.sh + pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx [doc8] ignore-path = doc/source/rest.rst,doc/source/comparison-table.rst -- GitLab From 5f171bcd9e5d8c16fda373249de148ed5c9013d8 Mon Sep 17 00:00:00 2001 From: yunshi Date: Fri, 5 Oct 2018 09:34:59 +0200 Subject: [PATCH 1415/1483] api: Display voluptuous error details The error detail looks like: { 'cause': 'Invalid input', 'reason': 'boulet', 'detail': ['field'], } Note: Voluptuous prints `r` before data path for str(voluptuous.Invalid) in Python2. To simplify tests in Python2 and Python3, the error reasons are checked by regex. --- gnocchi/rest/api.py | 32 ++++++------ .../gabbits/aggregates-with-metric-ids.yaml | 44 ++++++++++------ .../functional/gabbits/archive-rule.yaml | 7 ++- gnocchi/tests/functional/gabbits/archive.yaml | 51 ++++++++++++++----- gnocchi/tests/functional/gabbits/metric.yaml | 12 +++-- .../functional/gabbits/resource-type.yaml | 44 +++++++++------- .../tests/functional/gabbits/resource.yaml | 17 +++++-- gnocchi/tests/functional/gabbits/search.yaml | 48 ++++++++++------- .../tests/functional_live/gabbits/live.yaml | 42 ++++++++++----- gnocchi/tests/test_rest.py | 25 ++++++--- .../api-user-input-invalid-09b045f5ab12c.yaml | 4 ++ 11 files changed, 212 insertions(+), 114 deletions(-) create mode 100644 releasenotes/notes/api-user-input-invalid-09b045f5ab12c.yaml diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 9f9bc098..23d1fc3c 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -68,7 +68,14 @@ def abort(status_code, detail=''): """Like pecan.abort, but make sure detail is a string.""" if status_code == 404 and not detail: raise RuntimeError("http code 404 must have 'detail' set") - if isinstance(detail, Exception): + + if isinstance(detail, voluptuous.Invalid): + detail = { + 'cause': 'Invalid input', + 'reason': six.text_type(detail), + 'detail': [six.text_type(path) for path in detail.path], + } + elif isinstance(detail, Exception): detail = detail.jsonify() return pecan.abort(status_code, detail) @@ -154,25 +161,18 @@ def deserialize(expected_content_types=None): return params -def validate(schema, data, required=True, detailed_exc=False): +def validate(schema, data, required=True): try: return voluptuous.Schema(schema, required=required)(data) except voluptuous.Invalid as e: - if detailed_exc: - abort(400, {"cause": "Attribute value error", - "reason": str(e), - "detail": e.path}) - else: - abort(400, "Invalid input: %s" % e) + abort(400, e) def deserialize_and_validate(schema, required=True, - expected_content_types=None, - detailed_exc=False): + expected_content_types=None): return validate(schema, deserialize(expected_content_types=expected_content_types), - required, - detailed_exc) + required) def Timespan(value): @@ -449,8 +449,7 @@ class MetricController(rest.RestController): @pecan.expose('json') def post_measures(self): self.enforce_metric("post measures") - measures = deserialize_and_validate(MeasuresListSchema, - detailed_exc=True) + measures = deserialize_and_validate(MeasuresListSchema) if measures: pecan.request.incoming.add_measures(self.metric.id, measures) pecan.response.status = 202 @@ -1654,8 +1653,7 @@ class ResourcesMetricsMeasuresBatchController(rest.RestController): MeasuresBatchSchema = voluptuous.Schema( {functools.partial(ResourceID, creator=creator): {six.text_type: self.BackwardCompatibleMeasuresList}}) - body = deserialize_and_validate(MeasuresBatchSchema, - detailed_exc=True) + body = deserialize_and_validate(MeasuresBatchSchema) known_metrics = [] unknown_metrics = [] @@ -2243,7 +2241,7 @@ class PrometheusWriteController(rest.RestController): data = [{'timestamp': s.timestamp_ms / 1000.0, 'value': s.value} for s in ts.samples] measures_by_rid[original_rid][name] = validate( - MeasuresListSchema, data, detailed_exc=True) + MeasuresListSchema, data) creator = pecan.request.auth_helper.get_current_user(pecan.request) diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml index 81835c90..8c004d80 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml @@ -579,8 +579,10 @@ tests: data: operations: [] status: 400 - response_strings: - - "Invalid input: Operation must not be empty" + response_json_paths: + $.description.cause: "Invalid input" + $.description.detail: ['operations'] + $.description.reason: "/^Operation must not be empty/" - name: get operations without list POST: /v1/aggregates @@ -592,8 +594,10 @@ tests: operations: foo: bar status: 400 - response_strings: - - "Invalid input: Expected a tuple/list, got a " + response_json_paths: + $.description.cause: "Invalid input" + $.description.detail: ['operations'] + $.description.reason: "/^Expected a tuple/list, got a/" - name: invalid operations string POST: /v1/aggregates @@ -607,7 +611,7 @@ tests: response_json_paths: $.code: 400 $.description.cause: "Invalid operations" - $.description.reason: "Fail to parse the operations string" + $.description.reason: "/^Fail to parse the operations string/" $.description.detail: "Expected \")\" (at char 15), (line:1, col:16)" - name: get invalid metric operations @@ -619,8 +623,10 @@ tests: data: operations: ["metric"] status: 400 - response_strings: - - "Invalid input: Operation need at least one argument for dictionary value" + response_json_paths: + $.description.cause: "Invalid input" + $.description.detail: ["operations"] + $.description.reason: "/^Operation need at least one argument for dictionary value/" - name: get unknown metrics POST: /v1/aggregates @@ -774,8 +780,10 @@ tests: data: operations: "(rolling blah 2 (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean))" status: 400 - response_strings: - - "Invalid input: 'rolling' operation invalid for dictionary value" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^'rolling' operation invalid for dictionary value/" + $.description.detail: ["operations"] - name: get rolling-mean missing window POST: /v1/aggregates @@ -786,8 +794,10 @@ tests: data: operations: "(rolling mean (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean))" status: 400 - response_strings: - - "Invalid input: 'rolling' operation invalid for dictionary value" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^'rolling' operation invalid for dictionary value/" + $.description.detail: ["operations"] - name: get measurements from metric and invalid operations POST: /v1/aggregates @@ -798,8 +808,10 @@ tests: data: operations: "(notexist (absolute (metric $HISTORY['create metric1'].$RESPONSE['$.id'] mean)))" status: 400 - response_strings: - - "Invalid input: 'notexist' operation invalid for dictionary value" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^'notexist' operation invalid for dictionary value/" + $.description.detail: ["operations"] - name: invalid resample POST: /v1/aggregates @@ -810,5 +822,7 @@ tests: data: operations: "(resample mean invalid (metric ($HISTORY['create metric1'].$RESPONSE['$.id'] mean) ($HISTORY['create metric2'].$RESPONSE['$.id'] mean)))" status: 400 - response_strings: - - "Invalid input: 'resample' operation invalid for dictionary value" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^'resample' operation invalid for dictionary value/" + $.description.detail: ["operations"] diff --git a/gnocchi/tests/functional/gabbits/archive-rule.yaml b/gnocchi/tests/functional/gabbits/archive-rule.yaml index 90b0f43f..3e7b357f 100644 --- a/gnocchi/tests/functional/gabbits/archive-rule.yaml +++ b/gnocchi/tests/functional/gabbits/archive-rule.yaml @@ -147,12 +147,15 @@ tests: POST: /v1/archive_policy_rule request_headers: # User admin + accept: application/json authorization: "basic YWRtaW46" data: whaa: foobar status: 400 - response_strings: - - "Invalid input: extra keys not allowed" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^extra keys not allowed @ data/" + $.description.detail: ["whaa"] # get an archive policy rules diff --git a/gnocchi/tests/functional/gabbits/archive.yaml b/gnocchi/tests/functional/gabbits/archive.yaml index c81ed03e..b84da5f6 100644 --- a/gnocchi/tests/functional/gabbits/archive.yaml +++ b/gnocchi/tests/functional/gabbits/archive.yaml @@ -91,59 +91,74 @@ tests: request_headers: # User admin authorization: "basic YWRtaW46" + accept: application/json data: cowsay: moo status: 400 - response_strings: - - "Invalid input: extra keys not allowed" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^extra keys not allowed/" + $.description.detail: ["cowsay"] - name: missing definition POST: /v1/archive_policy request_headers: # User admin authorization: "basic YWRtaW46" + accept: application/json data: name: medium status: 400 - response_strings: - - "Invalid input: required key not provided" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^required key not provided/" + $.description.detail: ["definition"] - name: empty definition POST: /v1/archive_policy request_headers: # User admin authorization: "basic YWRtaW46" + accept: application/json data: name: medium definition: [] status: 400 - response_strings: - - "Invalid input: length of value must be at least 1" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^length of value must be at least 1/" + $.description.detail: ["definition"] - name: wrong value definition POST: /v1/archive_policy request_headers: # User admin authorization: "basic YWRtaW46" + accept: application/json data: name: somename definition: foobar status: 400 - response_strings: - - "Invalid input: expected a list" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^expected a list/" + $.description.detail: ["definition"] - name: useless definition POST: /v1/archive_policy request_headers: # User admin authorization: "basic YWRtaW46" + accept: application/json data: name: medium definition: - cowsay: moo status: 400 - response_strings: - - "Invalid input: extra keys not allowed" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^extra keys not allowed/" + $.description.detail: ["definition", '0', "cowsay"] # Create a valid archive policy. @@ -434,28 +449,35 @@ tests: request_headers: # User admin authorization: "basic YWRtaW46" + accept: application/json data: name: complex definition: - granularity: 0 points: 60 status: 400 - response_strings: - - "Invalid input: Timespan must be positive for dictionary value" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^Timespan must be positive for dictionary value/" + $.description.detail: ["definition", '0', "granularity"] - name: create invalid points policy POST: /v1/archive_policy request_headers: # User admin authorization: "basic YWRtaW46" + accept: application/json data: name: complex definition: - granularity: 10 points: 0 status: 400 - response_strings: - - "Invalid input: value must be at least 1 for dictionary value " + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^value must be at least 1 for dictionary value/" + $.description.detail: ["definition", '0', "points"] + - name: create identical granularities policy POST: /v1/archive_policy @@ -504,6 +526,7 @@ tests: request_headers: # User admin authorization: "basic YWRtaW46" + accept: application/json data: name: should-have-failed definition: diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml index 89cf1613..2111a185 100644 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ b/gnocchi/tests/functional/gabbits/metric.yaml @@ -93,14 +93,16 @@ tests: - name: create metric with name and over length unit POST: /v1/metric + request_headers: + accept: application/json data: name: "disk.io.rate" unit: "over_length_unit_over_length_unit" status: 400 - response_strings: - # split to not match the u' in py2 - - "Invalid input: length of value must be at most 31 for dictionary value @ data[" - - "'unit']" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^length of value must be at most 31 for dictionary value @ data/" + $.description.detail: ["unit"] - name: create metric with name no rule POST: /v1/metric @@ -262,7 +264,7 @@ tests: - [ "2015-03-06T14:34:12", 12] status: 400 response_json_paths: - $.description.cause: "Attribute value error" + $.description.cause: "Invalid input" $.description.detail: [] $.description.reason: "unexpected measures format" diff --git a/gnocchi/tests/functional/gabbits/resource-type.yaml b/gnocchi/tests/functional/gabbits/resource-type.yaml index 8827ee62..babe6dcd 100644 --- a/gnocchi/tests/functional/gabbits/resource-type.yaml +++ b/gnocchi/tests/functional/gabbits/resource-type.yaml @@ -45,6 +45,7 @@ tests: request_headers: # User admin authorization: "basic YWRtaW46" + accept: application/json data: name: my_custom_resource attributes: @@ -54,12 +55,14 @@ tests: min_length: 5 noexist: foo status: 400 - response_strings: - # NOTE(sileht): We would prefer to have a better message but voluptuous seems a bit lost when - # an Any have many dict with the same key, here "type" - # - "Invalid input: extra keys not allowed @ data[u'attributes'][u'foo'][u'noexist']" - # - "Invalid input: not a valid value for dictionary value @ data[u'attributes'][u'foo'][u'type']" - - "Invalid input:" + response_json_paths: + $.description.cause: "Invalid input" + # NOTE(sileht): We would prefer to have a better message but voluptuous seems a bit lost when + # an Any have many dict with the same key, here "type" + # $.description.reason: "/^extra keys not allowed/" + # $.description.reason: "/^not a valid value for dictionary value @ data/" + # $.description.detail: ['attributes', 'foo', 'type'] + # $.description.detail: ['attributes', 'foo', 'noexist'] - name: post resource type bad min_length value POST: $LAST_URL @@ -233,29 +236,33 @@ tests: - name: post invalid resource POST: /v1/resource/my_custom_resource + request_headers: + accept: application/json data: id: d11edfca-4393-4fda-b94d-b05a3a1b3747 name: toolong!!! foobar: what uuid: 07eb339e-23c0-4be2-be43-cd8247afae3b status: 400 - response_strings: - # split to not match the u' in py2 - - "Invalid input: length of value must be at most 5 for dictionary value @ data[" - - "'name']" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^length of value must be at most 5 for dictionary value @ data/" + $.description.detail: ['name'] - name: post invalid resource uuid POST: $LAST_URL + request_headers: + accept: application/json data: id: d11edfca-4393-4fda-b94d-b05a3a1b3747 name: too foobar: what uuid: really! status: 400 - response_strings: - # split to not match the u' in py2 - - "Invalid input: badly formed hexadecimal UUID string for dictionary value @ data[" - - "'uuid']" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^badly formed hexadecimal UUID string for dictionary value @ data/" + $.description.detail: ['uuid'] # Good resources for this type @@ -506,7 +513,6 @@ tests: - name: post a new resource attribute with incorrect fill PATCH: /v1/resource_type/my_custom_resource - request_headers: request_headers: # User admin authorization: "basic YWRtaW46" @@ -771,6 +777,7 @@ tests: # User admin authorization: "basic YWRtaW46" content-type: application/json-patch+json + accept: application/json data: - op: replace path: /attributes/newstuff @@ -780,9 +787,10 @@ tests: min_length: 0 max_length: 255 status: 400 - response_strings: - - "Invalid input: not a valid value for dictionary value @ data[0][" - - "'op']" + response_json_paths: + $.description.cause: "Invalid input" + $.description.detail: ['0', 'op'] + $.description.reason: "/^not a valid value for dictionary value @ data/" - name: patch a resource attribute type not exist PATCH: /v1/resource_type/my_custom_resource diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index 525d006f..2b081f3d 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -186,12 +186,15 @@ tests: - name: patch generic resource with id PATCH: $LAST_URL + request_headers: + accept: application/json data: id: foobar status: 400 - response_strings: - - "Invalid input: extra keys not allowed @ data[" - - "'id']" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^extra keys not allowed @ data/" + $.description.detail: ["id"] - name: patch generic with metrics PATCH: $LAST_URL @@ -316,12 +319,16 @@ tests: - name: patch resource bad data desc: providing data that is not a dict is an error + request_headers: + accept: application/json PATCH: $LAST_URL status: 400 data: - Beer and pickles - response_strings: - - "Invalid input: expected a dictionary" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^expected a dictionary/" + $.description.detail: [] - name: patch noexit resource desc: "patching something that doesn't exist is a 404" diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml index ecfa2eaf..d414acf1 100644 --- a/gnocchi/tests/functional/gabbits/search.yaml +++ b/gnocchi/tests/functional/gabbits/search.yaml @@ -30,40 +30,49 @@ tests: id: "cd9eef" - name: search invalid and value + request_headers: + accept: application/json desc: and should be followed by a list, not dict POST: /v1/search/resource/generic data: and: project_id: foobar status: 400 - response_strings: - - "expected a list for dictionary value @ data[" - - "'and']" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^expected a list for dictionary value @ data/" + $.description.detail: ["and"] - name: search like id + request_headers: + accept: application/json POST: /v1/search/resource/generic data: like: id: fa% status: 400 - response_strings: - - "Invalid input: extra keys not allowed @ data[" - - "'like'][" - - "'id']" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^extra keys not allowed @ data/" + $.description.detail: ["like", "id"] - name: search like list id + request_headers: + accept: application/json POST: /v1/search/resource/generic data: like: id: - fa% status: 400 - response_strings: - - "Invalid input: extra keys not allowed @ data[" - - "'like'][" - - "'id']" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^extra keys not allowed @ data/" + $.description.detail: ["like", "id"] - name: search invalid ne value + request_headers: + accept: application/json desc: attribute value for binary operator must not be dict or list POST: /v1/search/resource/generic data: @@ -71,21 +80,24 @@ tests: project_id: - foobar status: 400 - response_strings: - - "for dictionary value @ data[" - - "'ne'][" - - "'project_id']" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^expected (unicode|str) for dictionary value @ data/" + $.description.detail: ["ne", "project_id"] - name: search invalid not value + request_headers: + accept: application/json desc: uninary operator must follow by dict, not list POST: /v1/search/resource/generic data: not: - project_id: foobar status: 400 - response_strings: - - "expected a dictionary for dictionary value @ data[" - - "'not']" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^expected a dictionary for dictionary value @ data/" + $.description.detail: ["not"] - name: post generic resource POST: /v1/resource/generic diff --git a/gnocchi/tests/functional_live/gabbits/live.yaml b/gnocchi/tests/functional_live/gabbits/live.yaml index d63cb096..858d18c9 100644 --- a/gnocchi/tests/functional_live/gabbits/live.yaml +++ b/gnocchi/tests/functional_live/gabbits/live.yaml @@ -46,60 +46,75 @@ tests: desc: archive policy contains invalid key 'cowsay' POST: /v1/archive_policy request_headers: + accept: application/json content-type: application/json data: cowsay: moo status: 400 - response_strings: - - "Invalid input: extra keys not allowed" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^extra keys not allowed @ data/" + $.description.detail: ["cowsay"] - name: missing definition desc: archive policy is missing 'definition' keyword POST: /v1/archive_policy request_headers: + accept: application/json content-type: application/json data: name: medium status: 400 - response_strings: - - "Invalid input: required key not provided" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^required key not provided/" + $.description.detail: ["definition"] - name: empty definition desc: empty definition for archive policy POST: /v1/archive_policy request_headers: + accept: application/json content-type: application/json data: name: medium definition: [] status: 400 - response_strings: - - "Invalid input: length of value must be at least 1" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^length of value must be at least 1/" + $.description.detail: ["definition"] - name: wrong value definition desc: invalid type of 'definition' key POST: /v1/archive_policy request_headers: + accept: application/json content-type: application/json data: name: somename definition: foobar status: 400 - response_strings: - - "Invalid input: expected a list" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^expected a list/" + $.description.detail: ["definition"] - name: useless definition desc: invalid archive policy definition POST: /v1/archive_policy request_headers: + accept: application/json content-type: application/json data: name: medium definition: - cowsay: moo status: 400 - response_strings: - - "Invalid input: extra keys not allowed" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^extra keys not allowed/" + $.description.detail: ['definition', '0', 'cowsay'] # # Create archive policy @@ -320,12 +335,15 @@ tests: - name: bad archive policy rule body POST: /v1/archive_policy_rule request_headers: + accept: application/json content-type: application/json data: whaa: foobar status: 400 - response_strings: - - "Invalid input: extra keys not allowed" + response_json_paths: + $.description.cause: "Invalid input" + $.description.reason: "/^extra keys not allowed/" + $.description.detail: ["whaa"] # get an archive policy rules diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py index 8825e383..74592e10 100644 --- a/gnocchi/tests/test_rest.py +++ b/gnocchi/tests/test_rest.py @@ -1230,10 +1230,15 @@ class ResourceTest(RestTest): "/v1/resource/" + self.resource_type + "/" + self.attributes['id'], params={'foobar': 123}, - status=400) - self.assertIn(b'Invalid input: extra keys not allowed @ data[' - + repr(u'foobar').encode('ascii') + b"]", - result.body) + status=400, + headers={"Accept": "application/json"} + ) + + result_description = result.json['description'] + self.assertEqual("Invalid input", result_description['cause']) + self.assertIn( + "extra keys not allowed @ data[", result_description['reason'] + ) def test_delete_resource(self): self.app.post_json("/v1/resource/" + self.resource_type, @@ -1864,11 +1869,15 @@ class GenericResourceTest(RestTest): result = self.app.post_json( "/v1/search/resource/generic", params={"wrongoperator": {"user_id": "bar"}}, - status=400) + status=400, + headers={"Accept": "application/json"}, + ) + + result_description = result.json['description'] + self.assertEqual("Invalid input", result_description['cause']) self.assertIn( - "Invalid input: extra keys not allowed @ data[" - + repr(u'wrongoperator') + "]", - result.text) + "extra keys not allowed @ data[", result_description['reason'] + ) class QueryStringSearchAttrFilterTest(tests_base.TestCase): diff --git a/releasenotes/notes/api-user-input-invalid-09b045f5ab12c.yaml b/releasenotes/notes/api-user-input-invalid-09b045f5ab12c.yaml new file mode 100644 index 00000000..bc5028e4 --- /dev/null +++ b/releasenotes/notes/api-user-input-invalid-09b045f5ab12c.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + The error format for many requests API has changed and the error is now reported in a better way. -- GitLab From c1c8332cdd776435f7c6c1eefe50c4877becbd09 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 23 Oct 2018 22:48:05 -0400 Subject: [PATCH 1416/1483] fix doc formatting the REST output seems to be clashing with the formatting of previous element. remove the formatting and move the REST output above. --- doc/source/rest.j2 | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 678bee41..8ff6b116 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -190,16 +190,17 @@ can be resampled to a new |granularity|. Time-series data can also be grouped by calendar dates beyond a standard day. The resulting groupings are tied to the leading date of the group. For example, grouping on month returns a monthly aggregate linked to the first of the month. -Available calendar groups are:: - - `Y` - by year. - `H` - by half. - `Q` - by quarter. - `M` - by month. - `W` - by week, starting on Sunday. {{ scenarios['get-measures-resample-calendar']['doc'] }} +Available calendar groups are: + +* `Y` – by year +* `H` – by half +* `Q` – by quarter +* `M` – by month +* `W` – by week, starting on Sunday + .. note:: If you plan to execute the query often, it is recommended for performance -- GitLab From c1c1e28b4e038991bba98aba0de359a6728d496a Mon Sep 17 00:00:00 2001 From: gord chung Date: Sun, 21 Oct 2018 22:44:39 -0400 Subject: [PATCH 1417/1483] ignore update_capabilites if unsupported catch and carry on. Closes: #996 --- gnocchi/cli/metricd.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index a8ada8db..523c47a7 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -242,8 +242,12 @@ class MetricProcessor(MetricProcessBase): LOG.error("Unexpected error processing assigned job", exc_info=True) LOG.debug("%d metrics processed from %d sacks", m_count, s_count) - # Update statistics - self.coord.update_capabilities(self.GROUP_ID, self.store.statistics) + try: + # Update statistics + self.coord.update_capabilities(self.GROUP_ID, + self.store.statistics) + except tooz.NotImplemented: + pass if sacks == self._get_sacks_to_process(): # We just did a full scan of all sacks, reset the timer self._last_full_sack_scan.reset() -- GitLab From 1e506094949d68b452d044941c0d6f85ef997b9e Mon Sep 17 00:00:00 2001 From: gord chung Date: Sun, 21 Oct 2018 22:44:39 -0400 Subject: [PATCH 1418/1483] ignore update_capabilites if unsupported catch and carry on. Closes: #996 (cherry picked from commit c1c1e28b4e038991bba98aba0de359a6728d496a) --- gnocchi/cli/metricd.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index a8ada8db..523c47a7 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -242,8 +242,12 @@ class MetricProcessor(MetricProcessBase): LOG.error("Unexpected error processing assigned job", exc_info=True) LOG.debug("%d metrics processed from %d sacks", m_count, s_count) - # Update statistics - self.coord.update_capabilities(self.GROUP_ID, self.store.statistics) + try: + # Update statistics + self.coord.update_capabilities(self.GROUP_ID, + self.store.statistics) + except tooz.NotImplemented: + pass if sacks == self._get_sacks_to_process(): # We just did a full scan of all sacks, reset the timer self._last_full_sack_scan.reset() -- GitLab From bcd9a8cdd7c37fb655764a3b12c7e567598d088d Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 5 Oct 2018 17:35:08 +0200 Subject: [PATCH 1419/1483] Switch from oslo.middleware to internal proxy converter --- gnocchi/gnocchi-config-generator.conf | 1 - gnocchi/opts.py | 3 +- gnocchi/rest/api-paste.ini | 10 +- gnocchi/rest/app.py | 4 +- gnocchi/rest/http_proxy_to_wsgi.py | 116 ++++++++++++++++++ gnocchi/tests/functional/fixtures.py | 2 + .../gabbits/http-proxy-to-wsgi.yaml | 16 +++ 7 files changed, 142 insertions(+), 10 deletions(-) create mode 100644 gnocchi/rest/http_proxy_to_wsgi.py create mode 100644 gnocchi/tests/functional/gabbits/http-proxy-to-wsgi.yaml diff --git a/gnocchi/gnocchi-config-generator.conf b/gnocchi/gnocchi-config-generator.conf index ab1752dd..5d0c4932 100644 --- a/gnocchi/gnocchi-config-generator.conf +++ b/gnocchi/gnocchi-config-generator.conf @@ -3,7 +3,6 @@ wrap_width = 79 namespace = gnocchi namespace = oslo.middleware.cors namespace = oslo.middleware.healthcheck -namespace = oslo.middleware.http_proxy_to_wsgi namespace = oslo.policy namespace = cotyledon namespace = keystonemiddleware.auth_token diff --git a/gnocchi/opts.py b/gnocchi/opts.py index d86ab519..e2d8f9c6 100644 --- a/gnocchi/opts.py +++ b/gnocchi/opts.py @@ -22,6 +22,7 @@ from oslo_config import cfg import gnocchi.archive_policy import gnocchi.common.redis import gnocchi.indexer +import gnocchi.rest.http_proxy_to_wsgi import gnocchi.storage import gnocchi.storage.ceph import gnocchi.storage.file @@ -192,7 +193,7 @@ def list_opts(): default=10, min=0, help='Number of seconds before timeout when attempting ' 'to do some operations.'), - ) + API_OPTS, + ) + API_OPTS + gnocchi.rest.http_proxy_to_wsgi.OPTS, ), ("storage", _STORAGE_OPTS), ("incoming", _INCOMING_OPTS), diff --git a/gnocchi/rest/api-paste.ini b/gnocchi/rest/api-paste.ini index 2b6df853..aa40553b 100644 --- a/gnocchi/rest/api-paste.ini +++ b/gnocchi/rest/api-paste.ini @@ -17,13 +17,13 @@ use = egg:Paste#urlmap /healthcheck = healthcheck [pipeline:gnocchiv1+noauth] -pipeline = http_proxy_to_wsgi gnocchiv1 +pipeline = gnocchiv1 [pipeline:gnocchiv1+keystone] -pipeline = http_proxy_to_wsgi keystone_authtoken gnocchiv1 +pipeline = keystone_authtoken gnocchiv1 [pipeline:gnocchiversions_pipeline] -pipeline = http_proxy_to_wsgi gnocchiversions +pipeline = gnocchiversions [app:gnocchiversions] paste.app_factory = gnocchi.rest.app:app_factory @@ -37,10 +37,6 @@ root = gnocchi.rest.api.V1Controller use = egg:keystonemiddleware#auth_token oslo_config_project = gnocchi -[filter:http_proxy_to_wsgi] -use = egg:oslo.middleware#http_proxy_to_wsgi -oslo_config_project = gnocchi - [app:healthcheck] use = egg:oslo.middleware#healthcheck oslo_config_project = gnocchi diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index d3ab7717..98384662 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -35,6 +35,7 @@ from gnocchi import exceptions from gnocchi import incoming as gnocchi_incoming from gnocchi import indexer as gnocchi_indexer from gnocchi import json +from gnocchi.rest import http_proxy_to_wsgi from gnocchi import storage as gnocchi_storage @@ -178,7 +179,8 @@ def load_app(conf, not_implemented_middleware=True): appname = "gnocchi+" + conf.api.auth_mode app = deploy.loadapp("config:" + cfg_path, name=appname, global_conf={'configkey': configkey}) - return cors.CORS(app, conf=conf) + return http_proxy_to_wsgi.HTTPProxyToWSGI( + cors.CORS(app, conf=conf), conf=conf) def _setup_app(root, conf, not_implemented_middleware): diff --git a/gnocchi/rest/http_proxy_to_wsgi.py b/gnocchi/rest/http_proxy_to_wsgi.py new file mode 100644 index 00000000..9b86360e --- /dev/null +++ b/gnocchi/rest/http_proxy_to_wsgi.py @@ -0,0 +1,116 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing permissions and +# limitations under the License. +from oslo_config import cfg + +import webob.dec +import webob.request +import webob.response + + +OPTS = ( + cfg.BoolOpt('enable_proxy_headers_parsing', + deprecated_group="oslo_middleware", + default=False, + help="Whether the application is behind a proxy or not. " + "This determines if the middleware should parse the " + "headers or not."), +) + + +class NoContentTypeResponse(webob.response.Response): + + default_content_type = None # prevents webob assigning content type + + +class NoContentTypeRequest(webob.request.Request): + + ResponseClass = NoContentTypeResponse + + +class HTTPProxyToWSGI(object): + """HTTP proxy to WSGI termination middleware. + + This middleware overloads WSGI environment variables with the one provided + by the remote HTTP reverse proxy. + + """ + + def __init__(self, application, conf=None): + """Base middleware constructor + + :param conf: a cfg.ConfigOpts object + """ + self.application = application + self.oslo_conf = conf + + @webob.dec.wsgify(RequestClass=NoContentTypeRequest) + def __call__(self, req): + self.process_request(req) + return req.get_response(self.application) + + @staticmethod + def _parse_rfc7239_header(header): + """Parses RFC7239 Forward headers. + + e.g. for=192.0.2.60;proto=http, for=192.0.2.60;by=203.0.113.43 + + """ + result = [] + for proxy in header.split(","): + entry = {} + for d in proxy.split(";"): + key, _, value = d.partition("=") + entry[key.lower().strip()] = value.strip() + result.append(entry) + return result + + def process_request(self, req): + if not self.oslo_conf.api.enable_proxy_headers_parsing: + return + fwd_hdr = req.environ.get("HTTP_FORWARDED") + if fwd_hdr: + proxies = self._parse_rfc7239_header(fwd_hdr) + # Let's use the value from the first proxy + if proxies: + proxy = proxies[0] + + forwarded_proto = proxy.get("proto") + if forwarded_proto: + req.environ['wsgi.url_scheme'] = forwarded_proto + + forwarded_host = proxy.get("host") + if forwarded_host: + req.environ['HTTP_HOST'] = forwarded_host + + forwarded_for = proxy.get("for") + if forwarded_for: + req.environ['REMOTE_ADDR'] = forwarded_for + + else: + # World before RFC7239 + forwarded_proto = req.environ.get("HTTP_X_FORWARDED_PROTO") + if forwarded_proto: + req.environ['wsgi.url_scheme'] = forwarded_proto + + forwarded_host = req.environ.get("HTTP_X_FORWARDED_HOST") + if forwarded_host: + req.environ['HTTP_HOST'] = forwarded_host + + forwarded_for = req.environ.get("HTTP_X_FORWARDED_FOR") + if forwarded_for: + req.environ['REMOTE_ADDR'] = forwarded_for + + v = req.environ.get("HTTP_X_FORWARDED_PREFIX") + if v: + req.environ['SCRIPT_NAME'] = v + req.environ['SCRIPT_NAME'] diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py index 6bcdf9db..dc0146d9 100644 --- a/gnocchi/tests/functional/fixtures.py +++ b/gnocchi/tests/functional/fixtures.py @@ -171,6 +171,8 @@ class ConfigFixture(fixture.GabbiFixture): # Set pagination to a testable value conf.set_override('max_limit', 7, 'api') + conf.set_override('enable_proxy_headers_parsing', True, group="api") + self.index = index self.coord = metricd.get_coordinator_and_start(str(uuid.uuid4()), diff --git a/gnocchi/tests/functional/gabbits/http-proxy-to-wsgi.yaml b/gnocchi/tests/functional/gabbits/http-proxy-to-wsgi.yaml new file mode 100644 index 00000000..368a620b --- /dev/null +++ b/gnocchi/tests/functional/gabbits/http-proxy-to-wsgi.yaml @@ -0,0 +1,16 @@ +fixtures: + - ConfigFixture + +defaults: + request_headers: + content-type: application/json + # User foobar + authorization: "basic Zm9vYmFyOg==" + +tests: + - name: test HTTP proxy headers + GET: / + request_headers: + Forwarded: for=192.0.2.60;proto=http;host=foobar + response_json_paths: + $.versions[0].links[0].href: http://foobar/gnocchi/v1/ -- GitLab From d5c815685d1272ed02a8651b6f88a7a1bd8a8ba1 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 14 Nov 2018 13:11:21 +0000 Subject: [PATCH 1420/1483] Uploading to unstable. --- debian/changelog | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/debian/changelog b/debian/changelog index bd4c5d15..5623ea6b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,11 @@ +gnocchi (4.3.1-2) UNRELEASED; urgency=medium + + * Uploading to unstable: + - This makes the build reproducible (Closes: #892419). + - Fix FTBFS (Closes: #911405). + + -- Thomas Goirand Wed, 14 Nov 2018 13:10:39 +0000 + gnocchi (4.3.1-1) experimental; urgency=medium [ Ondřej Nový ] -- GitLab From bc21d33c55a4ccaf8ef40532f2b8e4009914fbfb Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 14 Nov 2018 13:18:54 +0000 Subject: [PATCH 1421/1483] Fix gnocchi-statsd python3 depends. --- debian/changelog | 3 ++- debian/control | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index 5623ea6b..5247aa9c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,9 @@ -gnocchi (4.3.1-2) UNRELEASED; urgency=medium +gnocchi (4.3.1-2) unstable; urgency=medium * Uploading to unstable: - This makes the build reproducible (Closes: #892419). - Fix FTBFS (Closes: #911405). + * Fix gnocchi-statsd python3 depends. -- Thomas Goirand Wed, 14 Nov 2018 13:10:39 +0000 diff --git a/debian/control b/debian/control index 0ee58274..1e5d93b3 100644 --- a/debian/control +++ b/debian/control @@ -138,7 +138,7 @@ Depends: gnocchi-common (= ${binary:Version}), lsb-base, ${misc:Depends}, - ${python:Depends}, + ${python3:Depends}, Description: Metric as a Service - statsd daemon Gnocchi is a service for managing a set of resources and storing metrics about them, in a scalable and resilient way. Its functionalities are exposed over an -- GitLab From e3170d5be1512223bbe0dfe2c58ee03bbdee1d98 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 14 Nov 2018 14:37:33 +0100 Subject: [PATCH 1422/1483] Add a gnocchi logrotate file. --- debian/changelog | 6 ++++++ debian/gnocchi-common.logrotate | 8 ++++++++ 2 files changed, 14 insertions(+) create mode 100644 debian/gnocchi-common.logrotate diff --git a/debian/changelog b/debian/changelog index 5247aa9c..d0b3a06a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (4.3.1-3) unstable; urgency=medium + + * Add a gnocchi logrotate file. + + -- Thomas Goirand Wed, 14 Nov 2018 14:37:07 +0100 + gnocchi (4.3.1-2) unstable; urgency=medium * Uploading to unstable: diff --git a/debian/gnocchi-common.logrotate b/debian/gnocchi-common.logrotate new file mode 100644 index 00000000..3e2bd66d --- /dev/null +++ b/debian/gnocchi-common.logrotate @@ -0,0 +1,8 @@ +/var/log/gnocchi/*.log { + daily + missingok + compress + delaycompress + notifempty + copytruncate +} -- GitLab From 914a00aca5b8ee432a67b700e5c778b5be39dd49 Mon Sep 17 00:00:00 2001 From: Nagasai Vinaykumar Kapalavai Date: Mon, 22 Oct 2018 15:02:10 -0400 Subject: [PATCH 1423/1483] Support for clip operation --- doc/source/rest.j2 | 3 + gnocchi/rest/aggregates/api.py | 11 ++++ gnocchi/rest/aggregates/operations.py | 55 +++++++++++++++++++ .../tests/functional/gabbits/aggregation.yaml | 24 ++++++++ gnocchi/tests/test_aggregates.py | 54 ++++++++++++++++++ 5 files changed, 147 insertions(+) diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 index 8ff6b116..443c6ac3 100644 --- a/doc/source/rest.j2 +++ b/doc/source/rest.j2 @@ -909,6 +909,9 @@ Function operations (tan ()) (floor ()) (ceil ()) + (clip ()) + (clip_min ()) + (clip_max ()) (rateofchange ()) diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index ef0b7a7e..ebef7201 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -70,11 +70,22 @@ def MetricSchema(v): OperationsSchemaBase = [ MetricSchema, + voluptuous.ExactSequence( + [voluptuous.Any(*list( + agg_operations.ternary_operators.keys())), + _OperationsSubNodeSchema, _OperationsSubNodeSchema, + _OperationsSubNodeSchema] + ), voluptuous.ExactSequence( [voluptuous.Any(*list( agg_operations.binary_operators.keys())), _OperationsSubNodeSchema, _OperationsSubNodeSchema] ), + voluptuous.ExactSequence( + [voluptuous.Any(*list( + agg_operations.ternary_operators.keys())), + _OperationsSubNodeSchema, _OperationsSubNodeSchema] + ), voluptuous.ExactSequence( [voluptuous.Any(*list( agg_operations.unary_operators.keys())), diff --git a/gnocchi/rest/aggregates/operations.py b/gnocchi/rest/aggregates/operations.py index ebb3cea7..d085e26b 100644 --- a/gnocchi/rest/aggregates/operations.py +++ b/gnocchi/rest/aggregates/operations.py @@ -81,6 +81,13 @@ binary_operators = { u"^": numpy.power, u"pow": numpy.power, + u"clip_min": lambda array, value: numpy.clip(array, value, None), + u"clip_max": lambda array, value: numpy.clip(array, None, value), + +} + +ternary_operators = { + u"clip": numpy.clip, } # TODO(sileht): adds, numpy.around, but it take a decimal argument to handle @@ -150,6 +157,47 @@ def handle_binary_operator(nodes, granularity, timestamps, return granularity, timestamps, values, is_aggregated +def handle_ternary_operator(nodes, granularity, timestamps, + initial_values, is_aggregated, references): + op = nodes[0] + g1, t1, v1, is_a1 = evaluate(nodes[1], granularity, timestamps, + initial_values, is_aggregated, references) + g2, t2, v2, is_a2 = evaluate(nodes[2], granularity, timestamps, + initial_values, is_aggregated, references) + if len(nodes) > 3: + g3, t3, v3, is_a3 = evaluate(nodes[3], granularity, timestamps, + initial_values, is_aggregated, references) + else: + g3, t3, v3, is_a3 = g2, t2, None, is_a2 + + is_aggregated = is_a1 or is_a2 or is_a3 + if isinstance(v1, numpy.ndarray) and isinstance(v2, numpy.ndarray)\ + and isinstance(v3, numpy.ndarray): + if not numpy.array_equal(t1, t2) or g1 != g2: + if not numpy.array_equal(t2, t3) or g2 != g3: + raise exceptions.OperandsMismatch( + references, + "Can't compute timeseries with different " + "granularity %s <> %s <> %s" + % (nodes[1], nodes[2], nodes[3])) + timestamps = t1 + granularity = g1 + is_aggregated = True + + elif isinstance(v2, numpy.ndarray): + timestamps = t2 + granularity = g2 + elif isinstance(v3, numpy.ndarray): + timestamps = t3 + granularity = g3 + else: + timestamps = t1 + granularity = g1 + + values = ternary_operators[op](v1, v2, v3) + return granularity, timestamps, values, is_aggregated + + def handle_aggregate(agg, granularity, timestamps, values, is_aggregated, references): values = numpy.array([AGG_MAP[agg](values, axis=1)]).T @@ -254,10 +302,16 @@ def evaluate(nodes, granularity, timestamps, initial_values, is_aggregated, return handle_aggregation_operator(nodes, granularity, timestamps, initial_values, is_aggregated, references) + elif nodes[0] in ternary_operators: + return handle_ternary_operator(nodes, granularity, timestamps, + initial_values, is_aggregated, + references) + elif nodes[0] in binary_operators: return handle_binary_operator(nodes, granularity, timestamps, initial_values, is_aggregated, references) + elif (nodes[0] in unary_operators or nodes[0] in unary_operators_with_timestamps): return handle_unary_operator(nodes, granularity, timestamps, @@ -271,5 +325,6 @@ def evaluate(nodes, granularity, timestamps, initial_values, is_aggregated, indexes = [i for i, r in enumerate(references) if predicat(r)] return (granularity, timestamps, initial_values.T[indexes].T, is_aggregated) + else: raise RuntimeError("Operation node tree is malformed: %s" % nodes) diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml index be952b9e..d85e85de 100644 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ b/gnocchi/tests/functional/gabbits/aggregation.yaml @@ -361,3 +361,27 @@ tests: - name: get measure aggregates with no resource name POST: /v1/aggregation/resource/generic/metric status: 405 + + - name: get measure aggregates with None as min in clip operation + POST: /v1/aggregates?fill=0&granularity=1 + data: + resource_type: generic + search: {} + operations: '(clip_min (metric agg_meter mean) 10)' + response_json_paths: + $.measures.bcd3441c-b5aa-4d1b-af9a-5a72322bb269.agg_meter.mean: + - ['2015-03-06T14:33:57+00:00', 1.0, 43.1] + - ['2015-03-06T14:34:12+00:00', 1.0, 12.0] + - ['2015-03-06T14:35:12+00:00', 1.0, 10.0] + + - name: get measure aggregates with None as max in clip operation + POST: /v1/aggregates?fill=0&granularity=1 + data: + resource_type: generic + search: {} + operations: '(clip_max (metric agg_meter mean) 10)' + response_json_paths: + $.measures.bcd3441c-b5aa-4d1b-af9a-5a72322bb269.agg_meter.mean: + - ['2015-03-06T14:33:57+00:00', 1.0, 10.0] + - ['2015-03-06T14:34:12+00:00', 1.0, 10.0] + - ['2015-03-06T14:35:12+00:00', 1.0, 0.0] diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py index 093361a7..47de657e 100644 --- a/gnocchi/tests/test_aggregates.py +++ b/gnocchi/tests/test_aggregates.py @@ -1433,6 +1433,60 @@ class CrossMetricAggregated(base.TestCase): numpy.timedelta64(1, 'h'), 88)] }}, values) + def test_ternary_operator_clip_min_max_ts_on_left(self): + metric2, __ = self._create_metric() + self.incoming.add_measures(self.metric.id, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), + ]) + self.trigger_processing() + + values = processor.get_measures( + self.storage, [processor.MetricReference(self.metric, "mean")], + ["clip", ["metric", str(self.metric.id), "mean"], 5, 60], + granularities=[numpy.timedelta64(1, 'h')]) + + self.assertEqual({str(self.metric.id): { + "mean": [ + (datetime64(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 60), + (datetime64(2014, 1, 1, 13, 0, 0), + numpy.timedelta64(1, 'h'), 42), + (datetime64(2014, 1, 1, 14, 0, 0), + numpy.timedelta64(1, 'h'), 5), + (datetime64(2014, 1, 1, 15, 0, 0), + numpy.timedelta64(1, 'h'), 44)] + }}, values) + + def test_ternary_operator_clip_min_ts_on_left(self): + metric2, __ = self._create_metric() + self.incoming.add_measures(self.metric.id, [ + incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), + incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), + incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), + incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), + ]) + self.trigger_processing() + + values = processor.get_measures( + self.storage, [processor.MetricReference(self.metric, "mean")], + ["clip", ["metric", str(self.metric.id), "mean"], 50], + granularities=[numpy.timedelta64(1, 'h')]) + + self.assertEqual({str(self.metric.id): { + "mean": [ + (datetime64(2014, 1, 1, 12, 0, 0), + numpy.timedelta64(1, 'h'), 69), + (datetime64(2014, 1, 1, 13, 0, 0), + numpy.timedelta64(1, 'h'), 50), + (datetime64(2014, 1, 1, 14, 0, 0), + numpy.timedelta64(1, 'h'), 50), + (datetime64(2014, 1, 1, 15, 0, 0), + numpy.timedelta64(1, 'h'), 50)] + }}, values) + def test_binary_operator_ts_on_right(self): metric2, __ = self._create_metric() self.incoming.add_measures(self.metric.id, [ -- GitLab From 570e18be6bd0f216db31c3e604ca0b7e694fa8cd Mon Sep 17 00:00:00 2001 From: Marcin Juszkiewicz Date: Mon, 14 Jan 2019 18:14:10 +0100 Subject: [PATCH 1424/1483] use current Gnocchi upstream repository Gnocchi moved out of OpenStack project in 2017. http://lists.openstack.org/pipermail/openstack-dev/2017-March/114300.html --- debian/changelog | 7 +++++++ debian/control | 2 +- debian/copyright | 2 +- debian/watch | 2 +- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/debian/changelog b/debian/changelog index d0b3a06a..f188f816 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +gnocchi (4.3.1-3.1) UNRELEASED; urgency=medium + + * Non-maintainer upload. + * Updated upstream URL in control/copyright/watch. + + -- Marcin Juszkiewicz Tue, 15 Jan 2019 09:47:28 +0100 + gnocchi (4.3.1-3) unstable; urgency=medium * Add a gnocchi logrotate file. diff --git a/debian/control b/debian/control index 1e5d93b3..c8969817 100644 --- a/debian/control +++ b/debian/control @@ -79,7 +79,7 @@ Build-Depends-Indep: Standards-Version: 4.1.3 Vcs-Browser: https://salsa.debian.org/openstack-team/services/gnocchi Vcs-Git: https://salsa.debian.org/openstack-team/services/gnocchi.git -Homepage: https://github.com/openstack/gnocchi +Homepage: https://gnocchi.xyz/ Package: gnocchi-api Architecture: all diff --git a/debian/copyright b/debian/copyright index 35773989..d9fdbca7 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,7 +1,7 @@ Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: gnocchi Upstream-Contact: Julien Danjou -Source: https://github.com/openstack/gnocchi +Source: https://github.com/gnocchixyz/gnocchi Files: * Copyright: (c) 2014-2015, Julien Danjou diff --git a/debian/watch b/debian/watch index 3f8c9400..4336d842 100644 --- a/debian/watch +++ b/debian/watch @@ -1,4 +1,4 @@ version=3 opts="uversionmangle=s/\.(b|rc)/~$1/" \ -https://github.com/openstack/gnocchi/tags .*/(\d[\d\.]+)\.tar\.gz +https://github.com/gnocchixyz/gnocchi/tags .*/(\d[\d\.]+)\.tar\.gz -- GitLab From 6e501c7aa30f47df20f32358b1bf5d0d7b1026a4 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 4 Oct 2018 10:06:30 +0200 Subject: [PATCH 1425/1483] Update to Mergify v2 engine --- .mergify.yml | 99 ++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 76 insertions(+), 23 deletions(-) diff --git a/.mergify.yml b/.mergify.yml index 5e9142bc..e8e021f0 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -1,24 +1,77 @@ -rules: - default: - protection: - required_status_checks: - strict: True - contexts: - - continuous-integration/travis-ci - required_pull_request_reviews: - required_approving_review_count: 2 - merge_strategy: - method: rebase - automated_backport_labels: - backport-to-4.3: stable/4.3 - backport-to-4.2: stable/4.2 - backport-to-4.1: stable/4.1 - backport-to-4.0: stable/4.0 - backport-to-3.1: stable/3.1 - backport-to-3.0: stable/3.0 - branches: - '^stable/.*': - protection: - required_pull_request_reviews: - required_approving_review_count: 1 +pull_request_rules: + - name: automatic merge + actions: + merge: + method: rebase + rebase_fallback: merge + strict: true + conditions: + - label!=work-in-progress + - '#approved-reviews-by>=2' + - status-success=continuous-integration/travis-ci/pr + - name: merge backport to stable with one review + actions: + merge: + method: rebase + rebase_fallback: merge + strict: true + conditions: + - base~=^stable/.* + - label!=work-in-progress + - '#approved-reviews-by>=1' + - status-success=continuous-integration/travis-ci/pr + - name: automatic merge backports from Mergify + actions: + merge: + method: rebase + rebase_fallback: merge + strict: true + conditions: + - base~=^stable/.* + - label!=work-in-progress + - author=mergify[bot] + - status-success=continuous-integration/travis-ci/pr +# Backports to stable branches + - actions: + backport: + branches: + - stable/3.0 + conditions: + - label=backport-to-3.0 + name: backport stable/3.0 + - actions: + backport: + branches: + - stable/3.1 + conditions: + - label=backport-to-3.1 + name: backport stable/3.1 + - actions: + backport: + branches: + - stable/4.0 + conditions: + - label=backport-to-4.0 + name: backport stable/4.0 + - actions: + backport: + branches: + - stable/4.1 + conditions: + - label=backport-to-4.1 + name: backport stable/4.1 + - actions: + backport: + branches: + - stable/4.2 + conditions: + - label=backport-to-4.2 + name: backport stable/4.2 + - actions: + backport: + branches: + - stable/4.3 + conditions: + - label=backport-to-4.3 + name: backport stable/4.3 -- GitLab From 21f47fbebcdeda34b84facd5b1185370505edf8f Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 29 Jan 2019 23:16:09 -0500 Subject: [PATCH 1426/1483] don't fail everything if one group missing metric it's possible that not all groups will contain a specific metric. rather than fail, return the groups that do have the matching metric. only if all groups don't have matching metrics, should we fail. Fixes: #1013 --- gnocchi/indexer/__init__.py | 6 +++ gnocchi/rest/aggregates/api.py | 32 ++++++++----- .../gabbits/aggregates-with-resources.yaml | 46 +++++++++++++++++++ 3 files changed, 73 insertions(+), 11 deletions(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 9e11a832..d8fd8854 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -133,6 +133,12 @@ class NoSuchMetric(IndexerException): metric) self.metric = metric + def jsonify(self): + return { + "cause": "Metrics not found", + "detail": self.metric, + } + class NoSuchResource(IndexerException): """Error raised when a resource does not exist.""" diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index ebef7201..ca6acfb6 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -246,21 +246,32 @@ class AggregatesController(rest.RestController): except indexer.IndexerException as e: api.abort(400, six.text_type(e)) if not groupby: - return self._get_measures_by_name( - resources, references, body["operations"], start, stop, - granularity, needed_overlap, fill, details=details) + try: + return self._get_measures_by_name( + resources, references, body["operations"], start, stop, + granularity, needed_overlap, fill, details=details) + except indexer.NoSuchMetric as e: + api.abort(400, e) def groupper(r): return tuple((attr, r[attr]) for attr in groupby) results = [] for key, resources in itertools.groupby(resources, groupper): - results.append({ - "group": dict(key), - "measures": self._get_measures_by_name( - resources, references, body["operations"], start, stop, - granularity, needed_overlap, fill, details=details) - }) + try: + results.append({ + "group": dict(key), + "measures": self._get_measures_by_name( + resources, references, body["operations"], + start, stop, granularity, needed_overlap, fill, + details=details) + }) + except indexer.NoSuchMetric: + pass + if not results: + api.abort( + 400, + indexer.NoSuchMetric(set((m for (m, a) in references)))) return results else: @@ -316,8 +327,7 @@ class AggregatesController(rest.RestController): ]) if not references: - api.abort(400, {"cause": "Metrics not found", - "detail": set((m for (m, a) in metric_wildcards))}) + raise indexer.NoSuchMetric(set((m for (m, a) in metric_wildcards))) response = { "measures": get_measures_or_abort( diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml index 935213e3..625819cb 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml @@ -66,8 +66,19 @@ tests: metrics: cpu.util: archive_policy_name: low + unique.stuff: + archive_policy_name: low status: 201 + - name: post customstuff measures 1 + POST: /v1/resource/generic/2447CD7E-48A6-4C50-A991-6677CC0D00E6/metric/unique.stuff/measures + data: + - timestamp: "2015-03-06T14:33:57" + value: 23 + - timestamp: "2015-03-06T14:34:12" + value: 8 + status: 202 + - name: post cpuutil measures 2 POST: /v1/resource/generic/2447CD7E-48A6-4C50-A991-6677CC0D00E6/metric/cpu.util/measures data: @@ -296,6 +307,23 @@ tests: user_id: A50F549C-1F1C-4888-A71A-2C5473CCCEC1 project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 + - name: aggregate metric with groupby on id aggregates API + POST: /v1/aggregates?groupby=id&details=true + data: + resource_type: generic + search: "user_id = 'A50F549C-1F1C-4888-A71A-2C5473CCCEC1'" + operations: "(aggregate mean (metric unique.stuff mean))" + response_json_paths: + $.`len`: 1 + $[0].measures.references.`len`: 1 + $[0].measures.references[/id].[0]: $HISTORY['list resources'].$RESPONSE['$[1]'] + $[0].measures.measures.aggregated: + - ['2015-03-06T14:30:00+00:00', 300.0, 15.5] + - ['2015-03-06T14:33:57+00:00', 1.0, 23.0] + - ['2015-03-06T14:34:12+00:00', 1.0, 8.0] + $[0].group: + id: 2447cd7e-48a6-4c50-a991-6677cc0d00e6 + # Negative tests - name: not matching granularity @@ -332,6 +360,24 @@ tests: - foobar - notexists + - name: not matching metrics in any group + POST: /v1/aggregates?groupby=id + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + resource_type: generic + search: "user_id = 'A50F549C-1F1C-4888-A71A-2C5473CCCEC1'" + operations: "(aggregate mean (metric (notexists mean) (foobar mean)))" + status: 400 + response_json_paths: + $.code: 400 + $.description.cause: "Metrics not found" + $.description.detail.`sorted`: + - foobar + - notexists + - name: invalid groupby attribute POST: /v1/aggregates?groupby=unit request_headers: -- GitLab From c960f2934fe63f3bc67b78d7514c877ad17375f2 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 8 Feb 2019 14:48:47 +0100 Subject: [PATCH 1427/1483] Allow rate:XXX aggregations Old aggregate API was allowing rate:XXX format for rateofchange aggregation. This change restores it since we use the new internal API. Closes: #1016 --- gnocchi/rest/aggregates/operations.py | 17 +++++++++ .../gabbits/aggregates-with-metric-ids.yaml | 35 ++++++++++++++++++- 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/gnocchi/rest/aggregates/operations.py b/gnocchi/rest/aggregates/operations.py index d085e26b..5a54a720 100644 --- a/gnocchi/rest/aggregates/operations.py +++ b/gnocchi/rest/aggregates/operations.py @@ -36,6 +36,19 @@ AGG_MAP = { } +def rated_agg(agg): + def _inner_rated_agg(values, axis): + values = AGG_MAP[agg](values, axis) + values = numpy.diff(values) + return values + + return _inner_rated_agg + + +for agg in list(AGG_MAP): + AGG_MAP["rate:%s" % agg] = rated_agg(agg) + + # TODO(sileht): expose all operators in capability API binary_operators = { u"=": numpy.equal, @@ -204,6 +217,8 @@ def handle_aggregate(agg, granularity, timestamps, values, is_aggregated, if values.shape[1] != 1: raise RuntimeError("Unexpected resulting aggregated array shape: %s" % values) + if agg.startswith("rate:"): + timestamps = timestamps[1:] return (granularity, timestamps, values, True) @@ -223,6 +238,8 @@ def handle_rolling(agg, granularity, timestamps, values, is_aggregated, strides = values.strides + (values.strides[-1],) new_values = AGG_MAP[agg](as_strided(values, shape=shape, strides=strides), axis=-1) + if agg.startswith("rate:"): + timestamps = timestamps[1:] return granularity, timestamps, new_values.T, is_aggregated diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml index 8c004d80..ac1826ae 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml @@ -319,7 +319,6 @@ tests: - ["2015-03-06T14:35:12+00:00", 1.0, 7.5] - ["2015-03-06T14:35:15+00:00", 1.0, 12.5] - - name: get one metric POST: /v1/aggregates?details=true data: @@ -337,6 +336,40 @@ tests: - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] + - name: get aggregates mean + POST: /v1/aggregates + data: + operations: + - aggregate + - mean + - ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] + response_json_paths: + $.measures.aggregated: + - ["2015-03-06T14:33:00+00:00", 60.0, 22.55] + - ["2015-03-06T14:34:00+00:00", 60.0, 1.25] + - ["2015-03-06T14:35:00+00:00", 60.0, 11.25] + - ["2015-03-06T14:33:57+00:00", 1.0, 22.55] + - ["2015-03-06T14:34:12+00:00", 1.0, 8.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -5.5] + - ["2015-03-06T14:35:12+00:00", 1.0, 9.5] + - ["2015-03-06T14:35:15+00:00", 1.0, 13.0] + + - name: get aggregates rate:mean + POST: /v1/aggregates + data: + operations: + - aggregate + - rate:mean + - ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] + response_json_paths: + $.measures.aggregated: + - ["2015-03-06T14:34:00+00:00", 60.0, -21.30] + - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + - ["2015-03-06T14:34:12+00:00", 1.0, -14.55] + - ["2015-03-06T14:34:15+00:00", 1.0, -13.5] + - ["2015-03-06T14:35:12+00:00", 1.0, 15.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 3.5] + - name: get aggregates one metric POST: /v1/aggregates?details=true data: -- GitLab From 81173aa212c737bb04707a22ba24ed1bc3c252af Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 8 Feb 2019 14:48:47 +0100 Subject: [PATCH 1428/1483] Allow rate:XXX aggregations Old aggregate API was allowing rate:XXX format for rateofchange aggregation. This change restores it since we use the new internal API. Closes: #1016 (cherry picked from commit c960f2934fe63f3bc67b78d7514c877ad17375f2) --- gnocchi/rest/aggregates/operations.py | 17 +++++++++ .../gabbits/aggregates-with-metric-ids.yaml | 35 ++++++++++++++++++- 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/gnocchi/rest/aggregates/operations.py b/gnocchi/rest/aggregates/operations.py index ebb3cea7..50fc679d 100644 --- a/gnocchi/rest/aggregates/operations.py +++ b/gnocchi/rest/aggregates/operations.py @@ -36,6 +36,19 @@ AGG_MAP = { } +def rated_agg(agg): + def _inner_rated_agg(values, axis): + values = AGG_MAP[agg](values, axis) + values = numpy.diff(values) + return values + + return _inner_rated_agg + + +for agg in list(AGG_MAP): + AGG_MAP["rate:%s" % agg] = rated_agg(agg) + + # TODO(sileht): expose all operators in capability API binary_operators = { u"=": numpy.equal, @@ -156,6 +169,8 @@ def handle_aggregate(agg, granularity, timestamps, values, is_aggregated, if values.shape[1] != 1: raise RuntimeError("Unexpected resulting aggregated array shape: %s" % values) + if agg.startswith("rate:"): + timestamps = timestamps[1:] return (granularity, timestamps, values, True) @@ -175,6 +190,8 @@ def handle_rolling(agg, granularity, timestamps, values, is_aggregated, strides = values.strides + (values.strides[-1],) new_values = AGG_MAP[agg](as_strided(values, shape=shape, strides=strides), axis=-1) + if agg.startswith("rate:"): + timestamps = timestamps[1:] return granularity, timestamps, new_values.T, is_aggregated diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml index 81835c90..0295c245 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml @@ -319,7 +319,6 @@ tests: - ["2015-03-06T14:35:12+00:00", 1.0, 7.5] - ["2015-03-06T14:35:15+00:00", 1.0, 12.5] - - name: get one metric POST: /v1/aggregates?details=true data: @@ -337,6 +336,40 @@ tests: - ["2015-03-06T14:35:12+00:00", 1.0, 9.0] - ["2015-03-06T14:35:15+00:00", 1.0, 11.0] + - name: get aggregates mean + POST: /v1/aggregates + data: + operations: + - aggregate + - mean + - ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] + response_json_paths: + $.measures.aggregated: + - ["2015-03-06T14:33:00+00:00", 60.0, 22.55] + - ["2015-03-06T14:34:00+00:00", 60.0, 1.25] + - ["2015-03-06T14:35:00+00:00", 60.0, 11.25] + - ["2015-03-06T14:33:57+00:00", 1.0, 22.55] + - ["2015-03-06T14:34:12+00:00", 1.0, 8.0] + - ["2015-03-06T14:34:15+00:00", 1.0, -5.5] + - ["2015-03-06T14:35:12+00:00", 1.0, 9.5] + - ["2015-03-06T14:35:15+00:00", 1.0, 13.0] + + - name: get aggregates rate:mean + POST: /v1/aggregates + data: + operations: + - aggregate + - rate:mean + - ["metric", ["$HISTORY['create metric1'].$RESPONSE['$.id']", "mean"], ["$HISTORY['create metric2'].$RESPONSE['$.id']", "mean"]] + response_json_paths: + $.measures.aggregated: + - ["2015-03-06T14:34:00+00:00", 60.0, -21.30] + - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] + - ["2015-03-06T14:34:12+00:00", 1.0, -14.55] + - ["2015-03-06T14:34:15+00:00", 1.0, -13.5] + - ["2015-03-06T14:35:12+00:00", 1.0, 15.0] + - ["2015-03-06T14:35:15+00:00", 1.0, 3.5] + - name: get aggregates one metric POST: /v1/aggregates?details=true data: -- GitLab From c531da6ac555c0dd8936eecd27de2ab8ccb633ee Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 29 Jan 2019 23:16:09 -0500 Subject: [PATCH 1429/1483] don't fail everything if one group missing metric it's possible that not all groups will contain a specific metric. rather than fail, return the groups that do have the matching metric. only if all groups don't have matching metrics, should we fail. Fixes: #1013 (cherry picked from commit 21f47fbebcdeda34b84facd5b1185370505edf8f) --- gnocchi/indexer/__init__.py | 6 +++ gnocchi/rest/aggregates/api.py | 32 ++++++++----- .../gabbits/aggregates-with-resources.yaml | 46 +++++++++++++++++++ 3 files changed, 73 insertions(+), 11 deletions(-) diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py index 9e11a832..d8fd8854 100644 --- a/gnocchi/indexer/__init__.py +++ b/gnocchi/indexer/__init__.py @@ -133,6 +133,12 @@ class NoSuchMetric(IndexerException): metric) self.metric = metric + def jsonify(self): + return { + "cause": "Metrics not found", + "detail": self.metric, + } + class NoSuchResource(IndexerException): """Error raised when a resource does not exist.""" diff --git a/gnocchi/rest/aggregates/api.py b/gnocchi/rest/aggregates/api.py index ef0b7a7e..9f126477 100644 --- a/gnocchi/rest/aggregates/api.py +++ b/gnocchi/rest/aggregates/api.py @@ -235,21 +235,32 @@ class AggregatesController(rest.RestController): except indexer.IndexerException as e: api.abort(400, six.text_type(e)) if not groupby: - return self._get_measures_by_name( - resources, references, body["operations"], start, stop, - granularity, needed_overlap, fill, details=details) + try: + return self._get_measures_by_name( + resources, references, body["operations"], start, stop, + granularity, needed_overlap, fill, details=details) + except indexer.NoSuchMetric as e: + api.abort(400, e) def groupper(r): return tuple((attr, r[attr]) for attr in groupby) results = [] for key, resources in itertools.groupby(resources, groupper): - results.append({ - "group": dict(key), - "measures": self._get_measures_by_name( - resources, references, body["operations"], start, stop, - granularity, needed_overlap, fill, details=details) - }) + try: + results.append({ + "group": dict(key), + "measures": self._get_measures_by_name( + resources, references, body["operations"], + start, stop, granularity, needed_overlap, fill, + details=details) + }) + except indexer.NoSuchMetric: + pass + if not results: + api.abort( + 400, + indexer.NoSuchMetric(set((m for (m, a) in references)))) return results else: @@ -305,8 +316,7 @@ class AggregatesController(rest.RestController): ]) if not references: - api.abort(400, {"cause": "Metrics not found", - "detail": set((m for (m, a) in metric_wildcards))}) + raise indexer.NoSuchMetric(set((m for (m, a) in metric_wildcards))) response = { "measures": get_measures_or_abort( diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml index 935213e3..625819cb 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml @@ -66,8 +66,19 @@ tests: metrics: cpu.util: archive_policy_name: low + unique.stuff: + archive_policy_name: low status: 201 + - name: post customstuff measures 1 + POST: /v1/resource/generic/2447CD7E-48A6-4C50-A991-6677CC0D00E6/metric/unique.stuff/measures + data: + - timestamp: "2015-03-06T14:33:57" + value: 23 + - timestamp: "2015-03-06T14:34:12" + value: 8 + status: 202 + - name: post cpuutil measures 2 POST: /v1/resource/generic/2447CD7E-48A6-4C50-A991-6677CC0D00E6/metric/cpu.util/measures data: @@ -296,6 +307,23 @@ tests: user_id: A50F549C-1F1C-4888-A71A-2C5473CCCEC1 project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 + - name: aggregate metric with groupby on id aggregates API + POST: /v1/aggregates?groupby=id&details=true + data: + resource_type: generic + search: "user_id = 'A50F549C-1F1C-4888-A71A-2C5473CCCEC1'" + operations: "(aggregate mean (metric unique.stuff mean))" + response_json_paths: + $.`len`: 1 + $[0].measures.references.`len`: 1 + $[0].measures.references[/id].[0]: $HISTORY['list resources'].$RESPONSE['$[1]'] + $[0].measures.measures.aggregated: + - ['2015-03-06T14:30:00+00:00', 300.0, 15.5] + - ['2015-03-06T14:33:57+00:00', 1.0, 23.0] + - ['2015-03-06T14:34:12+00:00', 1.0, 8.0] + $[0].group: + id: 2447cd7e-48a6-4c50-a991-6677cc0d00e6 + # Negative tests - name: not matching granularity @@ -332,6 +360,24 @@ tests: - foobar - notexists + - name: not matching metrics in any group + POST: /v1/aggregates?groupby=id + request_headers: + accept: application/json + content-type: application/json + authorization: "basic Zm9vYmFyOg==" + data: + resource_type: generic + search: "user_id = 'A50F549C-1F1C-4888-A71A-2C5473CCCEC1'" + operations: "(aggregate mean (metric (notexists mean) (foobar mean)))" + status: 400 + response_json_paths: + $.code: 400 + $.description.cause: "Metrics not found" + $.description.detail.`sorted`: + - foobar + - notexists + - name: invalid groupby attribute POST: /v1/aggregates?groupby=unit request_headers: -- GitLab From 55f90ae0d1b755b1ed2b495864ed834469415f92 Mon Sep 17 00:00:00 2001 From: Michal Arbet Date: Fri, 3 May 2019 17:37:34 +0200 Subject: [PATCH 1430/1483] Bump openstack-pkg-tools to version 99 --- debian/control | 3 ++- debian/copyright | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/debian/control b/debian/control index c8969817..dd1762db 100644 --- a/debian/control +++ b/debian/control @@ -4,10 +4,11 @@ Priority: optional Maintainer: Debian OpenStack Uploaders: Thomas Goirand , + Michal Arbet , Build-Depends: debhelper (>= 10), dh-python, - openstack-pkg-tools (>= 81~), + openstack-pkg-tools (>= 99~), python3-all, python3-pbr, python3-setuptools, diff --git a/debian/copyright b/debian/copyright index d9fdbca7..74cdd6df 100644 --- a/debian/copyright +++ b/debian/copyright @@ -15,6 +15,7 @@ License: Apache-2 Files: debian/* Copyright: (c) 2014-2018, Thomas Goirand + (c) 2019, Michal Arbet License: Apache-2 License: Apache-2 -- GitLab From 1c7aa7b368356679b398664fc36e38018e2a9101 Mon Sep 17 00:00:00 2001 From: Michal Arbet Date: Fri, 3 May 2019 17:54:41 +0200 Subject: [PATCH 1431/1483] Remove python3- from oslo-config-generator --- debian/rules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/rules b/debian/rules index 81c3f3d8..80769b34 100755 --- a/debian/rules +++ b/debian/rules @@ -81,7 +81,7 @@ endif rm -rf $(CURDIR)/debian/python*-gnocchi/usr/etc mkdir -p $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common - PYTHONPATH=$(CURDIR)/debian/tmp/usr/lib/python3/dist-packages python3-oslo-config-generator \ + PYTHONPATH=$(CURDIR)/debian/tmp/usr/lib/python3/dist-packages oslo-config-generator \ --output-file $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf \ --wrap-width 140 \ --namespace gnocchi \ -- GitLab From 1722de0f38796b93c565f90908b935624bbab900 Mon Sep 17 00:00:00 2001 From: Michal Arbet Date: Fri, 3 May 2019 17:40:19 +0200 Subject: [PATCH 1432/1483] Release to experimental --- debian/changelog | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/debian/changelog b/debian/changelog index f188f816..1189f7fe 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,9 +1,16 @@ -gnocchi (4.3.1-3.1) UNRELEASED; urgency=medium +gnocchi (4.3.1-4) experimental; urgency=medium - * Non-maintainer upload. + [ Marcin Juszkiewicz ] * Updated upstream URL in control/copyright/watch. - -- Marcin Juszkiewicz Tue, 15 Jan 2019 09:47:28 +0100 + [ Michal Arbet ] + * d/control: + - Bump openstack-pkg-tools to version 99 + - Add me to uploaders field + * d/copyright: Add me to copyright file + * d/rules: Remove python3 from oslo-config-generator + + -- Michal Arbet Fri, 03 May 2019 17:39:11 +0200 gnocchi (4.3.1-3) unstable; urgency=medium -- GitLab From 84819f67923d1648452fb5bc869430cb07159e11 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 27 May 2019 00:17:34 +0000 Subject: [PATCH 1433/1483] fix gendoc gendoc throws: `AttributeError: 'Sphinx' object has no attribute 'info'` use logging provided by sphinx.util (not sure why it's using sphinx logger but meh) --- gnocchi/gendoc.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index 50a427b7..ccdc4d79 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -24,11 +24,15 @@ import jinja2 from oslo_config import generator import six import six.moves +from sphinx.util import logging import webob.request import yaml from gnocchi.tests import test_rest + +LOG = logging.getLogger(__name__) + # HACK(jd) Not sure why but Sphinx setup this multiple times, so we just avoid # doing several times the requests by using this global variable :( _RUN = False @@ -223,8 +227,8 @@ def setup(app): else: request.body = fake_file.read(clen) - app.info("Doing request %s: %s" % (entry['name'], - six.text_type(request))) + LOG.info("Doing request %s: %s", + entry['name'], six.text_type(request)) with webapp.use_admin_user(): response = webapp.request(request) entry['response'] = response @@ -244,7 +248,7 @@ def setup(app): f.write(content) config_output_file = 'doc/source/gnocchi.conf.sample' - app.info("Generating %s" % config_output_file) + LOG.info("Generating %s", config_output_file) generator.main([ '--config-file', '%s/gnocchi-config-generator.conf' % os.path.dirname(__file__), -- GitLab From 0be1bc0431441d10d2844611d75e557640f2af48 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 27 May 2019 11:01:43 +0200 Subject: [PATCH 1434/1483] Reduce the number of required reviewers With the low activity going on, it might be hard to get two reviews. --- .mergify.yml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/.mergify.yml b/.mergify.yml index e8e021f0..3bc9ae1e 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -7,17 +7,6 @@ pull_request_rules: strict: true conditions: - label!=work-in-progress - - '#approved-reviews-by>=2' - - status-success=continuous-integration/travis-ci/pr - - name: merge backport to stable with one review - actions: - merge: - method: rebase - rebase_fallback: merge - strict: true - conditions: - - base~=^stable/.* - - label!=work-in-progress - '#approved-reviews-by>=1' - status-success=continuous-integration/travis-ci/pr - name: automatic merge backports from Mergify -- GitLab From 56ce28a04cd3c3d35a21df5fddb3ccfd15d83d10 Mon Sep 17 00:00:00 2001 From: gord chung Date: Sun, 26 May 2019 23:49:39 +0000 Subject: [PATCH 1435/1483] handle infinity like nan infinity and nan are not part of json standard. the default python json serialiser will dump `inf` and `nan` (unquoted) and load the them. we've historically returned `nan` and supported ignoring it by specifying `fill=dropna`. i don't believe `"inf"` or `"nan"` is significantly better and since i don't want to break existing behaviour of nan, i think it's best to just treat infinity as we do nan for now. anything more opinionated probably requires more discussion. related: #1023 --- gnocchi/rest/aggregates/processor.py | 6 ++- .../gabbits/aggregates-with-resources.yaml | 51 +++++++++++++++++++ 2 files changed, 55 insertions(+), 2 deletions(-) diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index c7e7df94..9bf3957e 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -202,7 +202,8 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, for sampling in sorted(result, reverse=True): granularity, times, values, references = result[sampling] if fill == "dropna": - pos = ~numpy.isnan(values[0]) + pos = ~numpy.logical_or(numpy.isnan(values[0]), + numpy.isinf(values[0])) v = values[0][pos] t = times[pos] else: @@ -221,7 +222,8 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, granularity, times, values, references = result[sampling] for i, ref in enumerate(references): if fill == "dropna": - pos = ~numpy.isnan(values[i]) + pos = ~numpy.logical_or(numpy.isnan(values[i]), + numpy.isinf(values[i])) v = values[i][pos] t = times[pos] else: diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml index 625819cb..c5bcf041 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-resources.yaml @@ -324,6 +324,57 @@ tests: $[0].group: id: 2447cd7e-48a6-4c50-a991-6677cc0d00e6 + - name: aggregate and drop infinity from divide by zero + POST: /v1/aggregates?details=true + data: + resource_type: generic + search: "user_id = 'A50F549C-1F1C-4888-A71A-2C5473CCCEC1'" + operations: "(/ (* 100 (aggregate mean (metric cpu.util mean))) 0 )" + response_json_paths: + $.references.`len`: 3 + $.references[/id].[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] + $.references[/id].[1]: $HISTORY['list resources'].$RESPONSE['$[1]'] + $.references[/id].[2]: $HISTORY['list resources'].$RESPONSE['$[2]'] + $.measures.aggregated: [] + + - name: aggregate and return infinity from divide by zero + POST: /v1/aggregates?details=true&fill=null + data: + resource_type: generic + search: "user_id = 'A50F549C-1F1C-4888-A71A-2C5473CCCEC1'" + operations: "(/ (* 100 (aggregate mean (metric cpu.util mean))) 0 )" + response_json_paths: + $.references.`len`: 3 + $.references[/id].[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] + $.references[/id].[1]: $HISTORY['list resources'].$RESPONSE['$[1]'] + $.references[/id].[2]: $HISTORY['list resources'].$RESPONSE['$[2]'] + $.measures.aggregated: + - ['2015-03-06T14:30:00+00:00', 300.0, .inf] + - ['2015-03-06T14:33:57+00:00', 1.0, .inf] + - ['2015-03-06T14:34:12+00:00', 1.0, .inf] + + - name: aggregate metric with groupby on project_id and user_id drop infinity + POST: /v1/aggregates?groupby=project_id&groupby=user_id&details=true + data: + resource_type: generic + search: "user_id = 'A50F549C-1F1C-4888-A71A-2C5473CCCEC1'" + operations: "(/ (* 100 (aggregate mean (metric cpu.util mean))) 0 )" + response_json_paths: + $.`len`: 2 + $[0].measures.references.`len`: 2 + $[0].measures.references[/id].[0]: $HISTORY['list resources'].$RESPONSE['$[0]'] + $[0].measures.references[/id].[1]: $HISTORY['list resources'].$RESPONSE['$[1]'] + $[0].measures.measures.aggregated: [] + $[0].group: + user_id: A50F549C-1F1C-4888-A71A-2C5473CCCEC1 + project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 + $[1].measures.references.`len`: 1 + $[1].measures.references[/id].[0]: $HISTORY['list resources'].$RESPONSE['$[2]'] + $[1].measures.measures.aggregated: [] + $[1].group: + user_id: A50F549C-1F1C-4888-A71A-2C5473CCCEC1 + project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 + # Negative tests - name: not matching granularity -- GitLab From d12a3fb53651aefc51e3d05e07151ac5ac53d14e Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 28 May 2019 01:42:06 +0000 Subject: [PATCH 1436/1483] cleanup README.rst to upload to pypi seems long_description needs to render cleanly to upload to pypi: https://github.com/pypa/warehouse/issues/5890 related: #1025 --- README.rst | 10 +++++----- setup.cfg | 1 + 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/README.rst b/README.rst index ee52b3e5..9cd8e67e 100644 --- a/README.rst +++ b/README.rst @@ -11,20 +11,20 @@ .. image:: doc/source/_static/gnocchi-logo.png -Gnocchi is an open-source |time series| database. +Gnocchi is an open-source time series database. -The problem that Gnocchi solves is the storage and indexing of |time series| +The problem that Gnocchi solves is the storage and indexing of time series data and resources at a large scale. This is useful in modern cloud platforms which are not only huge but also are dynamic and potentially multi-tenant. Gnocchi takes all of that into account. -Gnocchi has been designed to handle large amounts of |aggregates| being stored +Gnocchi has been designed to handle large amounts of aggregates being stored while being performant, scalable and fault-tolerant. While doing this, the goal was to be sure to not build any hard dependency on any complex storage system. -Gnocchi takes a unique approach to |time series| storage: rather than storing +Gnocchi takes a unique approach to time series storage: rather than storing raw data points, it aggregates them before storing them. This built-in feature -is different from most other |time series| databases, which usually support +is different from most other time series databases, which usually support this mechanism as an option and compute aggregation (average, minimum, etc.) at query time. diff --git a/setup.cfg b/setup.cfg index e822ff30..9a1fc1a3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -3,6 +3,7 @@ name = gnocchi url = http://gnocchi.xyz description = Metric as a Service long_description = file: README.rst +long_description_content_type = text/x-rst author = Gnocchi developers author_email = invalid@gnocchi.xyz classifier = -- GitLab From d4ace01b816b2f9d970a8061969366b5f459d6bc Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 14 May 2019 23:20:24 -0400 Subject: [PATCH 1437/1483] remove unused statistic code this was previously used to log the aggregation performance (points/time). this logging doesn't exist anymore since we moved to bulk saving so remove it. --- gnocchi/storage/__init__.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py index ff66d355..936710a7 100644 --- a/gnocchi/storage/__init__.py +++ b/gnocchi/storage/__init__.py @@ -630,11 +630,6 @@ class StorageDriver(object): else: current_first_block_timestamp = ts.first_block_timestamp() - # NOTE(jd) This is Python where you need such - # hack to pass a variable around a closure, - # sorry. - computed_points = {"number": 0} - def _map_compute_splits_operations(bound_timeserie): # NOTE (gordc): bound_timeserie is entire set of # unaggregated measures matching largest @@ -644,8 +639,6 @@ class StorageDriver(object): new_first_block_timestamp = ( bound_timeserie.first_block_timestamp() ) - computed_points['number'] = len(bound_timeserie) - aggregations = metric.archive_policy.aggregations grouped_timeseries = { -- GitLab From f7e15d3ed31ad302dd2f83eb7f25178bcb16fa39 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 17 Jul 2019 15:40:06 +0200 Subject: [PATCH 1438/1483] Uploading to unstable. --- debian/changelog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 1189f7fe..6d8a3032 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -gnocchi (4.3.1-4) experimental; urgency=medium +gnocchi (4.3.1-4) unstable; urgency=medium [ Marcin Juszkiewicz ] * Updated upstream URL in control/copyright/watch. -- GitLab From 31e8287333586311430d8bde8a1a323afca507d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Thu, 18 Jul 2019 18:33:35 +0200 Subject: [PATCH 1439/1483] Use debhelper-compat instead of debian/compat --- debian/changelog | 6 ++++++ debian/compat | 1 - debian/control | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) delete mode 100644 debian/compat diff --git a/debian/changelog b/debian/changelog index 6d8a3032..7551d43c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (4.3.1-5) UNRELEASED; urgency=medium + + * Use debhelper-compat instead of debian/compat. + + -- Ondřej Nový Thu, 18 Jul 2019 18:33:35 +0200 + gnocchi (4.3.1-4) unstable; urgency=medium [ Marcin Juszkiewicz ] diff --git a/debian/compat b/debian/compat deleted file mode 100644 index f599e28b..00000000 --- a/debian/compat +++ /dev/null @@ -1 +0,0 @@ -10 diff --git a/debian/control b/debian/control index dd1762db..ebb12a3e 100644 --- a/debian/control +++ b/debian/control @@ -6,7 +6,7 @@ Uploaders: Thomas Goirand , Michal Arbet , Build-Depends: - debhelper (>= 10), + debhelper-compat (= 10), dh-python, openstack-pkg-tools (>= 99~), python3-all, -- GitLab From f32285cfdb24a7ed6ae1beaca0356a44d3867c43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Nov=C3=BD?= Date: Thu, 18 Jul 2019 18:33:36 +0200 Subject: [PATCH 1440/1483] d/changelog: Remove trailing whitespaces --- debian/changelog | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 7551d43c..52eae857 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,13 +1,14 @@ gnocchi (4.3.1-5) UNRELEASED; urgency=medium * Use debhelper-compat instead of debian/compat. + * d/changelog: Remove trailing whitespaces. -- Ondřej Nový Thu, 18 Jul 2019 18:33:35 +0200 gnocchi (4.3.1-4) unstable; urgency=medium [ Marcin Juszkiewicz ] - * Updated upstream URL in control/copyright/watch. + * Updated upstream URL in control/copyright/watch. [ Michal Arbet ] * d/control: -- GitLab From 94ab1562d8887b788bed1685f04f0613c4a57172 Mon Sep 17 00:00:00 2001 From: gord chung Date: Mon, 27 May 2019 00:17:34 +0000 Subject: [PATCH 1441/1483] fix gendoc gendoc throws: `AttributeError: 'Sphinx' object has no attribute 'info'` use logging provided by sphinx.util (not sure why it's using sphinx logger but meh) (cherry picked from commit 84819f67923d1648452fb5bc869430cb07159e11) also - dropping migration tests from pre4.2 because no ones got time for it. - fix test --- .travis.yml | 4 ---- gnocchi/gendoc.py | 10 +++++++--- .../functional/gabbits/aggregates-with-metric-ids.yaml | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.travis.yml b/.travis.yml index b8504b08..58f7b67c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,10 +11,6 @@ env: - TARGET: pep8 - TARGET: docs - - TARGET: py27-mysql-ceph-upgrade-from-4.0 - - TARGET: py37-postgresql-file-upgrade-from-4.0 - - TARGET: py27-mysql-ceph-upgrade-from-4.1 - - TARGET: py37-postgresql-file-upgrade-from-4.1 - TARGET: py27-mysql-ceph-upgrade-from-4.2 - TARGET: py37-postgresql-file-upgrade-from-4.2 diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index 50a427b7..ccdc4d79 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -24,11 +24,15 @@ import jinja2 from oslo_config import generator import six import six.moves +from sphinx.util import logging import webob.request import yaml from gnocchi.tests import test_rest + +LOG = logging.getLogger(__name__) + # HACK(jd) Not sure why but Sphinx setup this multiple times, so we just avoid # doing several times the requests by using this global variable :( _RUN = False @@ -223,8 +227,8 @@ def setup(app): else: request.body = fake_file.read(clen) - app.info("Doing request %s: %s" % (entry['name'], - six.text_type(request))) + LOG.info("Doing request %s: %s", + entry['name'], six.text_type(request)) with webapp.use_admin_user(): response = webapp.request(request) entry['response'] = response @@ -244,7 +248,7 @@ def setup(app): f.write(content) config_output_file = 'doc/source/gnocchi.conf.sample' - app.info("Generating %s" % config_output_file) + LOG.info("Generating %s", config_output_file) generator.main([ '--config-file', '%s/gnocchi-config-generator.conf' % os.path.dirname(__file__), diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml index 0295c245..e6ac5ac2 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml @@ -641,7 +641,7 @@ tests: $.code: 400 $.description.cause: "Invalid operations" $.description.reason: "Fail to parse the operations string" - $.description.detail: "Expected \")\" (at char 15), (line:1, col:16)" + $.description.detail: '/^Expected "\)", found end of text \(at char 15\), \(line:1, col:16\)/' - name: get invalid metric operations POST: /v1/aggregates -- GitLab From ae97c8f87a1f48995abd180950e5fc0c8291d92a Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 23 Jul 2019 03:38:49 +0000 Subject: [PATCH 1442/1483] fix test it's failing. --- .../tests/functional/gabbits/aggregates-with-metric-ids.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml index ac1826ae..d2cff26a 100644 --- a/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml +++ b/gnocchi/tests/functional/gabbits/aggregates-with-metric-ids.yaml @@ -645,7 +645,7 @@ tests: $.code: 400 $.description.cause: "Invalid operations" $.description.reason: "/^Fail to parse the operations string/" - $.description.detail: "Expected \")\" (at char 15), (line:1, col:16)" + $.description.detail: '/^Expected "\)", found end of text \(at char 15\), \(line:1, col:16\)/' - name: get invalid metric operations POST: /v1/aggregates -- GitLab From 7a3ddc4ceb64bf648a5f383814632f72b8700a8f Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 3 Sep 2018 11:40:32 +0100 Subject: [PATCH 1443/1483] py3: fix misc encoding issues Fix miscellanous encoding issue when running under Python 3: - Encoding of member_id prior to passing into tooz. - Decoding of member ID's during response processing for status API calls. --- gnocchi/cli/metricd.py | 14 ++++++++------ gnocchi/rest/api.py | 6 ++++-- gnocchi/rest/app.py | 2 +- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/gnocchi/cli/metricd.py b/gnocchi/cli/metricd.py index 523c47a7..3e8edcde 100644 --- a/gnocchi/cli/metricd.py +++ b/gnocchi/cli/metricd.py @@ -60,12 +60,14 @@ class MetricProcessBase(cotyledon.Service): self._wake_up.set() def _configure(self): - member_id = "%s.%s.%s" % (socket.gethostname(), - self.worker_id, - # NOTE(jd) Still use a uuid here so we're - # sure there's no conflict in case of - # crash/restart - str(uuid.uuid4())) + member_id = ( + "%s.%s.%s" % (socket.gethostname(), + self.worker_id, + # NOTE(jd) Still use a uuid here so we're + # sure there's no conflict in case of + # crash/restart + str(uuid.uuid4())) + ).encode() self.coord = get_coordinator_and_start(member_id, self.conf.coordination_url) self.store = storage.get_driver(self.conf) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 23d1fc3c..02ddbaa5 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -2106,9 +2106,11 @@ class StatusController(rest.RestController): metricd.MetricProcessor.GROUP_ID, member) for member in members ] - report_dict['metricd']['processors'] = members + report_dict['metricd']['processors'] = [ + member.decode() for member in members + ] report_dict['metricd']['statistics'] = { - member: cap.get() + member.decode(): cap.get() for member, cap in six.moves.zip(members, caps) } else: diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py index 98384662..c4a6f891 100644 --- a/gnocchi/rest/app.py +++ b/gnocchi/rest/app.py @@ -102,7 +102,7 @@ class GnocchiHook(pecan.hooks.PecanHook): # entirely. self.backends[name] = ( metricd.get_coordinator_and_start( - str(uuid.uuid4()), + str(uuid.uuid4()).encode(), self.conf.coordination_url) ) elif name == "storage": -- GitLab From cd2533404220a08f3af6ba8997faaaf2bfbc2a85 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 28 May 2019 01:42:06 +0000 Subject: [PATCH 1444/1483] cleanup README.rst to upload to pypi seems long_description needs to render cleanly to upload to pypi: https://github.com/pypa/warehouse/issues/5890 related: #1025 (cherry picked from commit d12a3fb53651aefc51e3d05e07151ac5ac53d14e) --- README.rst | 10 +++++----- setup.cfg | 1 + 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/README.rst b/README.rst index ee52b3e5..9cd8e67e 100644 --- a/README.rst +++ b/README.rst @@ -11,20 +11,20 @@ .. image:: doc/source/_static/gnocchi-logo.png -Gnocchi is an open-source |time series| database. +Gnocchi is an open-source time series database. -The problem that Gnocchi solves is the storage and indexing of |time series| +The problem that Gnocchi solves is the storage and indexing of time series data and resources at a large scale. This is useful in modern cloud platforms which are not only huge but also are dynamic and potentially multi-tenant. Gnocchi takes all of that into account. -Gnocchi has been designed to handle large amounts of |aggregates| being stored +Gnocchi has been designed to handle large amounts of aggregates being stored while being performant, scalable and fault-tolerant. While doing this, the goal was to be sure to not build any hard dependency on any complex storage system. -Gnocchi takes a unique approach to |time series| storage: rather than storing +Gnocchi takes a unique approach to time series storage: rather than storing raw data points, it aggregates them before storing them. This built-in feature -is different from most other |time series| databases, which usually support +is different from most other time series databases, which usually support this mechanism as an option and compute aggregation (average, minimum, etc.) at query time. diff --git a/setup.cfg b/setup.cfg index 57201b4f..a563a015 100644 --- a/setup.cfg +++ b/setup.cfg @@ -3,6 +3,7 @@ name = gnocchi url = http://gnocchi.xyz description = Metric as a Service long_description = file: README.rst +long_description_content_type = text/x-rst author = Gnocchi developers author_email = invalid@gnocchi.xyz classifier = -- GitLab From ef9da16f3d9bf1962bfa2eb4aa6ead5d993d41c8 Mon Sep 17 00:00:00 2001 From: gord chung Date: Sun, 21 Jul 2019 22:01:13 -0400 Subject: [PATCH 1445/1483] minor cleanup - don't concat the values when getting unique times across series - create a common sum function instead of making a sum timeseries and ripping out just the values. - fix test --- gnocchi/carbonara.py | 20 ++++++++++++-------- gnocchi/rest/aggregates/processor.py | 6 +++--- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 02eeacca..1bc5c280 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -129,15 +129,19 @@ class GroupedTimeSeries(object): self.tstamps, self.counts = numpy.unique(self.indexes, return_counts=True) + @staticmethod + def _sum(values, counts): + return numpy.bincount( + numpy.repeat(numpy.arange(counts.size), counts), weights=values) + def mean(self): - series = self.sum() - series['values'] /= self.counts - return series + return make_timeseries( + self.tstamps, + self._sum(self._ts['values'], self.counts) / self.counts) def sum(self): - return make_timeseries(self.tstamps, numpy.bincount( - numpy.repeat(numpy.arange(self.counts.size), self.counts), - weights=self._ts['values'])) + return make_timeseries(self.tstamps, + self._sum(self._ts['values'], self.counts)) def min(self): ordered = self._ts['values'].argsort() @@ -165,9 +169,9 @@ class GroupedTimeSeries(object): self._ts['values'][ordered][mid_ceil]) / 2.0) def std(self): - mean_ts = self.mean() + values = self._sum(self._ts['values'], self.counts) / self.counts diff_sq = numpy.square(self._ts['values'] - - numpy.repeat(mean_ts['values'], self.counts)) + numpy.repeat(values, self.counts)) bin_sum = numpy.bincount( numpy.repeat(numpy.arange(self.counts.size), self.counts), weights=diff_sq) diff --git a/gnocchi/rest/aggregates/processor.py b/gnocchi/rest/aggregates/processor.py index 9bf3957e..e837df22 100644 --- a/gnocchi/rest/aggregates/processor.py +++ b/gnocchi/rest/aggregates/processor.py @@ -152,10 +152,10 @@ def aggregated(refs_and_timeseries, operations, from_timestamp=None, is_aggregated = False result = {} for sampling in sorted(series, reverse=True): - combine = numpy.concatenate(series[sampling]) # np.unique sorts results for us - times, indices = numpy.unique(combine['timestamps'], - return_inverse=True) + times, indices = numpy.unique( + numpy.concatenate([i['timestamps'] for i in series[sampling]]), + return_inverse=True) # create nd-array (unique series x unique times) and fill filler = (numpy.NaN if fill in [None, 'null', 'dropna'] -- GitLab From c3b54d7568d9eb01bb237b15ccfb120fd5d1ffb8 Mon Sep 17 00:00:00 2001 From: gord chung Date: Tue, 30 Jul 2019 23:49:25 -0400 Subject: [PATCH 1446/1483] try forcing newer version of six on to travis image it's not installing correct version when it installs twine during deploy --- .travis.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.travis.yml b/.travis.yml index 58f7b67c..9f23b47a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -65,6 +65,10 @@ notifications: channels: - "irc.freenode.org#gnocchi" +before_deploy: + - pip install --user --upgrade pip + - pip install --user --upgrade six + deploy: provider: pypi user: jd -- GitLab From f93bb294af25706e0caf08143bc2d3482ad0bdd8 Mon Sep 17 00:00:00 2001 From: gord chung Date: Wed, 31 Jul 2019 08:45:45 -0400 Subject: [PATCH 1447/1483] fix travis deploy - force upgrade of six - remove README.rst cleanup as it's been done permanently (and i didn't realise this line existed when i did it) - add skip_existing[1]. each ENV runs the deploy so travis tries to upload multiple times and fails on each ENV except for the first [1] https://docs.travis-ci.com/user/deployment/pypi/#upload-artifacts-only-once --- .travis.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 7a22aa91..6be4f67d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -67,14 +67,15 @@ notifications: - "irc.freenode.org#gnocchi" before_deploy: - # Remove |substitutions| to fix rendering on pypi. - - sed -i -e 's/|\([a-zA-Z0-9 ]\+\)|/\1/g' README.rst + - pip install --user --upgrade pip + - pip install --user --upgrade six deploy: provider: pypi user: jd password: secure: c+Ccx3SHCWepiy0PUxDJ7XO9r3aNYnHjkzxF5c/kjV8QaCJayAJEgXJnBKhvjroqwgn7JPUgpD6QdSWdB4FqjbZYQ3I3oHOO1YL0vYYa8wHG5HuMsMp4J8qvzgs3QNQDECPI1mXsPevn3VMfGszUN+6BQrHB3FbZsTtOmE+Kmgok5NCT+obsfEhVea/UOD0XFUkVW9VJhPjQ2ytvYvFIc46/73GQf2Er/5DCa/4GGDEBSD++bDJgp3kQj438xslCAFeZWDwGsa+cTc43PI0Y0+E144ySVY7QyVbZ1B66a1BGWVrXJuM+gW/eIBCMN1FJXmD7CDdPa22azKI8dfMF7qaH3Oiv3cVovPWpubOvhTUHUFwG8+W7Fx+zUKktCWiLer/fZvEd3W8tcgby2kNOdcUfKfDB2ImZJ+P694/OJ4jJ8T5TQerruNoP2OstzcBMon77Ry0XawXR15SZd4JhbqhSi+h7XV6EYmct1UN4zoysA7fx/cWHcBxdnm2G6R0gzmOiiGUd74ptU8lZ3IlEP6EZckK/OZOdy1I8EQeUe7aiTooXZDAn07iPkDZliYRr2e36ij/xjtWCe1AjCksn/xdKfHOKJv5UVob495DU2GuNObe01ewXzexcnldjfp9Sb8SVEFuhHx6IvH5OC+vAq+BVYu2jwvMcVfXi3VSOkB4= + skip_existing: true on: all_branches: true tags: true -- GitLab From b6f4a3cfecde0610e60c18932e5383381eb4d722 Mon Sep 17 00:00:00 2001 From: Michal Arbet Date: Mon, 9 Sep 2019 13:21:25 +0200 Subject: [PATCH 1448/1483] Release to unstable --- debian/changelog | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index 52eae857..a30add71 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,9 +1,13 @@ -gnocchi (4.3.1-5) UNRELEASED; urgency=medium +gnocchi (4.3.4-1) unstable; urgency=medium + [ Ondřej Nový ] * Use debhelper-compat instead of debian/compat. * d/changelog: Remove trailing whitespaces. - -- Ondřej Nový Thu, 18 Jul 2019 18:33:35 +0200 + [ Michal Arbet ] + * New upstream version + + -- Michal Arbet Mon, 09 Sep 2019 13:19:28 +0200 gnocchi (4.3.1-4) unstable; urgency=medium -- GitLab From 6df99278ccea7cfb1d6608029471903031d842ca Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Tue, 29 Oct 2019 00:09:45 +0100 Subject: [PATCH 1449/1483] Rebuilt source-only. --- debian/changelog | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index 52eae857..648772bf 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,9 +1,13 @@ -gnocchi (4.3.1-5) UNRELEASED; urgency=medium +gnocchi (4.3.1-5) unstable; urgency=medium + [ Ondřej Nový ] * Use debhelper-compat instead of debian/compat. * d/changelog: Remove trailing whitespaces. - -- Ondřej Nový Thu, 18 Jul 2019 18:33:35 +0200 + [ Thomas Goirand ] + * Rebuilt source-only. + + -- Thomas Goirand Tue, 29 Oct 2019 00:09:14 +0100 gnocchi (4.3.1-4) unstable; urgency=medium -- GitLab From a933b0ea67dedd38067b453e12490a41257d8872 Mon Sep 17 00:00:00 2001 From: Michal Arbet Date: Fri, 1 Nov 2019 14:43:33 +0100 Subject: [PATCH 1450/1483] Fix changelog --- debian/changelog | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/debian/changelog b/debian/changelog index 648772bf..a30add71 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,13 +1,13 @@ -gnocchi (4.3.1-5) unstable; urgency=medium +gnocchi (4.3.4-1) unstable; urgency=medium [ Ondřej Nový ] * Use debhelper-compat instead of debian/compat. * d/changelog: Remove trailing whitespaces. - [ Thomas Goirand ] - * Rebuilt source-only. + [ Michal Arbet ] + * New upstream version - -- Thomas Goirand Tue, 29 Oct 2019 00:09:14 +0100 + -- Michal Arbet Mon, 09 Sep 2019 13:19:28 +0200 gnocchi (4.3.1-4) unstable; urgency=medium -- GitLab From aa4d0d1af049699be7d8bf73654811222677ca1c Mon Sep 17 00:00:00 2001 From: Michal Arbet Date: Fri, 1 Nov 2019 14:45:16 +0100 Subject: [PATCH 1451/1483] Rebuild with source only and upload to unstable --- debian/changelog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/debian/changelog b/debian/changelog index a30add71..0e7b7c1d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (4.3.4-2) unstable; urgency=medium + + * Rebuilt source-only. + + -- Michal Arbet Fri, 01 Nov 2019 14:44:21 +0100 + gnocchi (4.3.4-1) unstable; urgency=medium [ Ondřej Nový ] -- GitLab From ff84aaf91ccb6f32d0ef32e8c5ddb1e922570e78 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Thu, 7 Nov 2019 21:03:43 +0100 Subject: [PATCH 1452/1483] Rebuild source-only. --- debian/changelog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/debian/changelog b/debian/changelog index 0e7b7c1d..cd0a7103 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (4.3.4-3) unstable; urgency=medium + + * Rebuild source-only. + + -- Thomas Goirand Thu, 07 Nov 2019 21:03:26 +0100 + gnocchi (4.3.4-2) unstable; urgency=medium * Rebuilt source-only. -- GitLab From 4b670458e9a717ec325aa6bd725a1484b9346993 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 16 Jan 2020 11:46:08 +0100 Subject: [PATCH 1453/1483] Change URL to gnocchi.osci.io This is the current hosting platform now that the name has expired. --- .travis.yml | 4 ++-- README.rst | 2 +- doc/source/client.rst | 2 +- gnocchi/gendoc.py | 2 +- gnocchi/tests/functional_live/gabbits/search-resource.yaml | 2 +- setup.cfg | 3 +-- tox.ini | 2 +- 7 files changed, 8 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index 6be4f67d..0620d069 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,7 @@ cache: env: - TARGET: pep8 - TARGET: docs - - TARGET: docs-gnocchi.xyz + - TARGET: docs-gnocchi-web - TARGET: py27-mysql-ceph-upgrade-from-4.3 - TARGET: py37-postgresql-file-upgrade-from-4.3 @@ -38,7 +38,7 @@ before_script: ;; esac ; case $TARGET in - docs-gnocchi.xyz) + docs-gnocchi-web) git branch -a | sed -n "/\/HEAD /d; /\/master$/d; s,remotes/origin/,,p;" | xargs -i git branch {} origin/{} ; git branch -D master; git checkout -b master; diff --git a/README.rst b/README.rst index 9cd8e67e..9012f716 100644 --- a/README.rst +++ b/README.rst @@ -31,4 +31,4 @@ query time. Because Gnocchi computes all the aggregations at ingestion, getting the data back is extremely fast, as it just needs to read back the pre-computed results. -You can read the full documentation online at http://gnocchi.xyz. +You can read the full documentation online at http://gnocchi.osci.io. diff --git a/doc/source/client.rst b/doc/source/client.rst index 4be6893c..a53be245 100644 --- a/doc/source/client.rst +++ b/doc/source/client.rst @@ -24,6 +24,6 @@ It can be installed using *go get*:: This package provides the Go SDK only. You can read the `godoc reference`_. -.. _full documentation online: http://gnocchi.xyz/gnocchiclient +.. _full documentation online: http://gnocchi.osci.io/gnocchiclient .. _Gophercloud: https://github.com/gophercloud .. _godoc reference: https://godoc.org/github.com/gophercloud/utils diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py index ccdc4d79..bb49b413 100644 --- a/gnocchi/gendoc.py +++ b/gnocchi/gendoc.py @@ -175,7 +175,7 @@ def setup(app): if _RUN: return - # NOTE(sileht): On gnocchi.xyz, we build a multiversion of the docs + # NOTE(sileht): On gnocchi.osci.io, we build a multiversion of the docs # all versions are built with the master gnocchi.gendoc sphinx extension. # So the hack here run an other python script to generate the rest.rst # file of old version of the module. diff --git a/gnocchi/tests/functional_live/gabbits/search-resource.yaml b/gnocchi/tests/functional_live/gabbits/search-resource.yaml index fe254788..54573dd6 100644 --- a/gnocchi/tests/functional_live/gabbits/search-resource.yaml +++ b/gnocchi/tests/functional_live/gabbits/search-resource.yaml @@ -1,6 +1,6 @@ # # Tests to confirm resources are searchable. Run against a live setup. -# URL: http://gnocchi.xyz/rest.html#searching-for-resources +# URL: http://gnocchi.osci.io/rest.html#searching-for-resources # # Instance-ResourceID-1: a64ca14f-bc7c-45b0-aa85-42cd2179e1e2 # Instance-ResourceID-2: 7ccccfa0-92ce-4225-80ca-3ac9cb122d6a diff --git a/setup.cfg b/setup.cfg index 9a1fc1a3..9940280a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,11 +1,10 @@ [metadata] name = gnocchi -url = http://gnocchi.xyz +url = http://gnocchi.osci.io description = Metric as a Service long_description = file: README.rst long_description_content_type = text/x-rst author = Gnocchi developers -author_email = invalid@gnocchi.xyz classifier = Intended Audience :: Information Technology Intended Audience :: System Administrators diff --git a/tox.ini b/tox.ini index 7e54929b..24ca7933 100644 --- a/tox.ini +++ b/tox.ini @@ -98,7 +98,7 @@ setenv = GNOCCHI_TEST_DEBUG=1 commands = doc8 --ignore-path doc/source/rest.rst,doc/source/comparison-table.rst doc/source pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx -W -[testenv:docs-gnocchi.xyz] +[testenv:docs-gnocchi-web] basepython = python2.7 whitelist_externals = bash rm setenv = GNOCCHI_STORAGE_DEPS=file -- GitLab From cd80f0a4bcdc64e1df4c26e926d4d0db9b228a81 Mon Sep 17 00:00:00 2001 From: pedro Date: Thu, 6 Feb 2020 17:56:03 -0300 Subject: [PATCH 1454/1483] Enhance rest api logging --- gnocchi/rest/api.py | 9 +++++- gnocchi/rest/exceptions.py | 30 +++++++++++++++++++ .../tests/functional/gabbits/resource.yaml | 9 ++++-- 3 files changed, 45 insertions(+), 3 deletions(-) create mode 100644 gnocchi/rest/exceptions.py diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 02ddbaa5..31266381 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -17,6 +17,7 @@ import collections import functools import itertools +import logging import operator import uuid @@ -42,6 +43,7 @@ from gnocchi import json from gnocchi import resource_type from gnocchi.rest.aggregates import exceptions from gnocchi.rest.aggregates import processor +from gnocchi.rest import exceptions as rest_exceptions from gnocchi import storage from gnocchi import utils @@ -54,6 +56,7 @@ except ImportError: ATTRGETTER_GRANULARITY = operator.attrgetter("granularity") +LOG = logging.getLogger(__name__) def arg_to_list(value): @@ -77,6 +80,7 @@ def abort(status_code, detail=''): } elif isinstance(detail, Exception): detail = detail.jsonify() + LOG.debug("Aborting request. Code [%s]. Details [%s]", status_code, detail) return pecan.abort(status_code, detail) @@ -157,7 +161,10 @@ def deserialize(expected_content_types=None): try: params = json.load(pecan.request.body_file) except Exception as e: - abort(400, "Unable to decode body: " + six.text_type(e)) + details = rest_exceptions.UnableToDecodeBody(e, + pecan.request.body_file) + LOG.warning(details.jsonify()) + abort(400, details) return params diff --git a/gnocchi/rest/exceptions.py b/gnocchi/rest/exceptions.py new file mode 100644 index 00000000..1cc5eb88 --- /dev/null +++ b/gnocchi/rest/exceptions.py @@ -0,0 +1,30 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2016-2020 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import six + + +class UnableToDecodeBody(Exception): + def __init__(self, exception, body): + self.reason = six.text_type(exception) + self.body = body + super(UnableToDecodeBody, self).__init__(body) + + def jsonify(self): + return { + "cause": "Unable to decode body", + "reason": self.reason, + "detail": self.body + } diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml index 2b081f3d..96d5fa73 100644 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ b/gnocchi/tests/functional/gabbits/resource.yaml @@ -312,10 +312,15 @@ tests: - name: patch resource no data desc: providing no data is an error + request_headers: + accept: application/json PATCH: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9 status: 400 - response_strings: - - "Unable to decode body:" + response_json_paths: + $.description: + cause: "Unable to decode body" + reason: "Expected object or value" + detail: [] - name: patch resource bad data desc: providing data that is not a dict is an error -- GitLab From c0d5bfd4f0e6256e016e89f3d885e05070294485 Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Sat, 11 Jul 2020 02:18:51 +0200 Subject: [PATCH 1455/1483] Remove wrong response data checks in gabbits --- gnocchi/tests/functional/gabbits/resource-type.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/gnocchi/tests/functional/gabbits/resource-type.yaml b/gnocchi/tests/functional/gabbits/resource-type.yaml index babe6dcd..91b2a584 100644 --- a/gnocchi/tests/functional/gabbits/resource-type.yaml +++ b/gnocchi/tests/functional/gabbits/resource-type.yaml @@ -768,8 +768,6 @@ tests: min_length: 0 max_length: 255 status: 400 - response_strings: - - "can't remove non-existent object 'what'" - name: patch a resource attribute replace PATCH: /v1/resource_type/my_custom_resource @@ -818,8 +816,6 @@ tests: - op: remove path: /attributes/unknown status: 400 - response_strings: - - "can't remove non-existent object 'unknown'" # Ensure we can't delete the type -- GitLab From ad58d6c6154fc0d8c5feb88d8523a35f481bf961 Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Sat, 11 Jul 2020 02:25:23 +0200 Subject: [PATCH 1456/1483] Remove seconds keyword from numpy.timedelta64 --- gnocchi/cli/injector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnocchi/cli/injector.py b/gnocchi/cli/injector.py index a624c419..1f9a1dac 100644 --- a/gnocchi/cli/injector.py +++ b/gnocchi/cli/injector.py @@ -71,7 +71,7 @@ def _inject(inc, coord, store, idx, with utils.StopWatch() as sw: measures = { m_id: [incoming.Measure( - now + numpy.timedelta64(seconds=s), + now + numpy.timedelta64(s, 's'), random.randint(-999999, 999999)) for s in range(measures)] for m_id in metric_ids } -- GitLab From a338a6a7ad0395c647f07e0b2dfd73f0b881f5c8 Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Sat, 11 Jul 2020 02:30:04 +0200 Subject: [PATCH 1457/1483] Sphinx suppress ref.term warnings These are treated as error, suppress for now and we can go back and fix docs later. --- doc/source/conf.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/source/conf.py b/doc/source/conf.py index 841ab0d4..3da13ee2 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -73,6 +73,9 @@ exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None +# A list of warning types to suppress arbitrary warning messages. +suppress_warnings = ['ref.term'] + # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True -- GitLab From 1ecc8038efb9ff56ef2c5e0941be34f4ac053b31 Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Mon, 7 Sep 2020 23:27:15 +0200 Subject: [PATCH 1458/1483] Remove python2 testing --- .testr.conf | 2 +- .travis.yml | 4 +--- doc/source/contributing.rst | 10 +++++----- run-func-tests.sh | 4 +--- run-upgrade-tests.sh | 10 +++++----- tools/pretty_tox.sh | 4 ++-- tox.ini | 15 ++++++++------- 7 files changed, 23 insertions(+), 26 deletions(-) diff --git a/.testr.conf b/.testr.conf index 6e2e4a5e..e5caedb8 100644 --- a/.testr.conf +++ b/.testr.conf @@ -1,5 +1,5 @@ [DEFAULT] -test_command=${PYTHON:-python} -m subunit.run discover -t . ${GNOCCHI_TEST_PATH:-gnocchi/tests} $LISTOPT $IDOPTION +test_command=${PYTHON:-python3} -m subunit.run discover -t . ${GNOCCHI_TEST_PATH:-gnocchi/tests} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list group_regex=(gabbi\.suitemaker\.test_gabbi((_live_|_)([^_]+)))_ diff --git a/.travis.yml b/.travis.yml index 0620d069..2da9ac80 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,12 +12,10 @@ env: - TARGET: docs - TARGET: docs-gnocchi-web - - TARGET: py27-mysql-ceph-upgrade-from-4.3 + - TARGET: py37-mysql-ceph-upgrade-from-4.3 - TARGET: py37-postgresql-file-upgrade-from-4.3 - - TARGET: py27-mysql - TARGET: py37-mysql - - TARGET: py27-postgresql - TARGET: py37-postgresql before_script: diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 1cfc0b76..feda4dfd 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -66,16 +66,16 @@ a virtual environment for each test environment, so make sure you are using an up to date version of `virtualenv `_. Different test environments and configurations can be found by running the -``tox -l`` command. For example, to run tests with Python 2.7, PostgreSQL as -indexer, and file as storage backend: +``tox -l`` command. For example, to run tests with PostgreSQL as indexer, +and file as storage backend: :: - tox -e py27-postgresql-file + tox -e py37-postgresql-file -To run tests with Python 2.7, MySQL as indexer, and Ceph as storage backend: +To run tests with MySQL as indexer, and Ceph as storage backend: :: - tox -e py35-mysql-ceph + tox -e py37-mysql-ceph diff --git a/run-func-tests.sh b/run-func-tests.sh index da261380..5e3ebc34 100755 --- a/run-func-tests.sh +++ b/run-func-tests.sh @@ -15,12 +15,10 @@ check_empty_var() { fi } -PYTHON_VERSION_MAJOR=$(python -c 'import sys; print(sys.version_info.major)') - GNOCCHI_TEST_STORAGE_DRIVERS=${GNOCCHI_TEST_STORAGE_DRIVERS:-file} GNOCCHI_TEST_INDEXER_DRIVERS=${GNOCCHI_TEST_INDEXER_DRIVERS:-postgresql} for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS}; do - if [ "$storage" == "swift" ] && [ "$PYTHON_VERSION_MAJOR" == "3" ]; then + if [ "$storage" == "swift" ]; then echo "WARNING: swift does not support python 3 skipping" continue fi diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index bb0e405a..0dc1d300 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -3,7 +3,7 @@ set -e export GNOCCHI_DATA=$(mktemp -d -t gnocchi.XXXX) -old_version=$(pip freeze | sed -n '/gnocchi==/s/.*==\(.*\)/\1/p') +old_version=$(pip3 freeze | sed -n '/gnocchi==/s/.*==\(.*\)/\1/p') RESOURCE_IDS=( "5a301761-aaaa-46e2-8900-8b4f6fe6675a" @@ -40,7 +40,7 @@ inject_data() { { measures_sep="" - MEASURES=$(python -c 'import datetime, random, json; now = datetime.datetime.utcnow(); print(json.dumps([{"timestamp": (now - datetime.timedelta(seconds=i)).isoformat(), "value": random.uniform(-100000, 100000)} for i in range(0, 288000, 10)]))') + MEASURES=$(python3 -c 'import datetime, random, json; now = datetime.datetime.utcnow(); print(json.dumps([{"timestamp": (now - datetime.timedelta(seconds=i)).isoformat(), "value": random.uniform(-100000, 100000)} for i in range(0, 288000, 10)]))') echo -n '{' resource_sep="" for resource_id in ${RESOURCE_IDS[@]} $RESOURCE_ID_EXT; do @@ -80,9 +80,9 @@ inject_data $GNOCCHI_DATA dump_data $GNOCCHI_DATA/old pifpaf_stop -new_version=$(python setup.py --version) +new_version=$(python3 setup.py --version) echo "* Upgrading Gnocchi from $old_version to $new_version" -pip install -v -U .[${GNOCCHI_VARIANT}] +pip3 install -v -U .[${GNOCCHI_VARIANT}] eval $(pifpaf --debug run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) # Gnocchi 3.1 uses basic auth by default @@ -100,5 +100,5 @@ echo "* Checking output difference between Gnocchi $old_version and $new_version # archive policy for old in $GNOCCHI_DATA/old/*.json; do new=$GNOCCHI_DATA/new/$(basename $old) - python -c "import json; old = json.load(open('$old')); new = json.load(open('$new')); assert all(i in old for i in new)" + python3 -c "import json; old = json.load(open('$old')); new = json.load(open('$new')); assert all(i in old for i in new)" done diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh index 799ac184..5ff76e47 100755 --- a/tools/pretty_tox.sh +++ b/tools/pretty_tox.sh @@ -10,7 +10,7 @@ TESTRARGS=$1 # # this work around exists until that is addressed if [[ "$TESTARGS" =~ "until-failure" ]]; then - python setup.py testr --slowest --testr-args="$TESTRARGS" + python3 setup.py testr --slowest --testr-args="$TESTRARGS" else - python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f + python3 setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f fi diff --git a/tox.ini b/tox.ini index 24ca7933..686ed918 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] minversion = 2.4 -envlist = py{37,27}-{postgresql,mysql}{,-file,-swift,-ceph,-s3},pep8 +envlist = py37-{postgresql,mysql}{,-file,-swift,-ceph,-s3},pep8 skipsdist = True [testenv] @@ -61,7 +61,7 @@ deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.3,<4.4 xattr!=0.9.4 commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} -[testenv:py27-mysql-ceph-upgrade-from-4.3] +[testenv:py37-mysql-ceph-upgrade-from-4.3] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv recreate = True @@ -73,11 +73,12 @@ deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.3,<4.4 commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} [testenv:pep8] +basepython = python3 deps = hacking>=0.12,<0.13 commands = flake8 -[testenv:py27-cover] -commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py testr --coverage --testr-args="{posargs}" +[testenv:py37-cover] +commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python3 setup.py testr --coverage --testr-args="{posargs}" [flake8] exclude = .tox,.eggs,doc,gnocchi/rest/prometheus/remote_pb2.py @@ -96,10 +97,10 @@ deps = doc8 setenv = GNOCCHI_TEST_DEBUG=1 commands = doc8 --ignore-path doc/source/rest.rst,doc/source/comparison-table.rst doc/source - pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx -W + pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python3 setup.py build_sphinx -W [testenv:docs-gnocchi-web] -basepython = python2.7 +basepython = python3 whitelist_externals = bash rm setenv = GNOCCHI_STORAGE_DEPS=file GNOCCHI_TEST_DEBUG=1 @@ -108,7 +109,7 @@ deps = {[testenv:docs]deps} setuptools commands = rm -rf doc/build/html - pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx + pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python3 setup.py build_sphinx [doc8] ignore-path = doc/source/rest.rst,doc/source/comparison-table.rst -- GitLab From 230b806284e5d5554f3b9cb7b664b68d01610237 Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Tue, 8 Sep 2020 00:24:54 +0200 Subject: [PATCH 1459/1483] Fix upgrade testing command not found There is a issue when executing pifpaf directly from tox, something with psutil/subprocess interaction that makes it not find the run-upgrade-tests.sh script. --- run-upgrade-tests.sh | 14 ++++++++++++-- tox.ini | 4 ++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index 0dc1d300..380635fb 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -1,5 +1,15 @@ #!/bin/bash -set -e +set -xe + +if [ "$1" == "postgresql-file" ]; then + eval $(pifpaf --debug --env-prefix INDEXER run postgresql) +elif [ "$1" == "mysql-ceph" ]; then + eval $(pifpaf --debug --env-prefix INDEXER run mysql) + eval $(pifpaf --debug --env-prefix STORAGE run ceph) +else + echo "error: unsupported upgrade type" + exit 1 +fi export GNOCCHI_DATA=$(mktemp -d -t gnocchi.XXXX) @@ -72,7 +82,7 @@ else STORAGE_URL=file://$GNOCCHI_DATA fi -eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) +eval $(pifpaf --debug run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) export OS_AUTH_TYPE=gnocchi-basic export GNOCCHI_USER=$GNOCCHI_USER_ID original_statsd_resource_id=$GNOCCHI_STATSD_RESOURCE_ID diff --git a/tox.ini b/tox.ini index 686ed918..4616c55a 100644 --- a/tox.ini +++ b/tox.ini @@ -59,7 +59,7 @@ deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.3,<4.4 pifpaf[gnocchi]>=0.13 gnocchiclient>=2.8.0 xattr!=0.9.4 -commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} +commands = {toxinidir}/run-upgrade-tests.sh postgresql-file [testenv:py37-mysql-ceph-upgrade-from-4.3] # We should always recreate since the script upgrade @@ -70,7 +70,7 @@ deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.3,<4.4 gnocchiclient>=2.8.0 pifpaf[ceph,gnocchi]>=0.13 xattr!=0.9.4 -commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} +commands = {toxinidir}/run-upgrade-tests.sh mysql-ceph [testenv:pep8] basepython = python3 -- GitLab From 94de0057d1b85d16db5fd254ff7f7c86e5a11f2a Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Tue, 8 Sep 2020 10:22:24 +0200 Subject: [PATCH 1460/1483] Revert "Remove wrong response data checks in gabbits" This reverts commit df3ed0a4a2772ca6224cb4a9c3093a7f1d728c6f. --- gnocchi/tests/functional/gabbits/resource-type.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/gnocchi/tests/functional/gabbits/resource-type.yaml b/gnocchi/tests/functional/gabbits/resource-type.yaml index 91b2a584..babe6dcd 100644 --- a/gnocchi/tests/functional/gabbits/resource-type.yaml +++ b/gnocchi/tests/functional/gabbits/resource-type.yaml @@ -768,6 +768,8 @@ tests: min_length: 0 max_length: 255 status: 400 + response_strings: + - "can't remove non-existent object 'what'" - name: patch a resource attribute replace PATCH: /v1/resource_type/my_custom_resource @@ -816,6 +818,8 @@ tests: - op: remove path: /attributes/unknown status: 400 + response_strings: + - "can't remove non-existent object 'unknown'" # Ensure we can't delete the type -- GitLab From d580b7ddd33a666af446d720519a1fa43e10a32f Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Tue, 8 Sep 2020 10:25:16 +0200 Subject: [PATCH 1461/1483] Add back recommendations and remove debug Debug causes Travis to drop the job because there is to much output. --- .testr.conf | 2 +- doc/source/contributing.rst | 4 ++-- .../functional/gabbits/resource-type.yaml | 4 ++-- run-upgrade-tests.sh | 24 ++++++++++--------- tools/pretty_tox.sh | 4 ++-- tox.ini | 11 +++++---- 6 files changed, 26 insertions(+), 23 deletions(-) diff --git a/.testr.conf b/.testr.conf index e5caedb8..6e2e4a5e 100644 --- a/.testr.conf +++ b/.testr.conf @@ -1,5 +1,5 @@ [DEFAULT] -test_command=${PYTHON:-python3} -m subunit.run discover -t . ${GNOCCHI_TEST_PATH:-gnocchi/tests} $LISTOPT $IDOPTION +test_command=${PYTHON:-python} -m subunit.run discover -t . ${GNOCCHI_TEST_PATH:-gnocchi/tests} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list group_regex=(gabbi\.suitemaker\.test_gabbi((_live_|_)([^_]+)))_ diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index feda4dfd..12f56e22 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -66,8 +66,8 @@ a virtual environment for each test environment, so make sure you are using an up to date version of `virtualenv `_. Different test environments and configurations can be found by running the -``tox -l`` command. For example, to run tests with PostgreSQL as indexer, -and file as storage backend: +``tox -l`` command. For example, to run tests with Python 3.7, PostgreSQL as +indexer, and file as storage backend: :: diff --git a/gnocchi/tests/functional/gabbits/resource-type.yaml b/gnocchi/tests/functional/gabbits/resource-type.yaml index babe6dcd..747b051e 100644 --- a/gnocchi/tests/functional/gabbits/resource-type.yaml +++ b/gnocchi/tests/functional/gabbits/resource-type.yaml @@ -769,7 +769,7 @@ tests: max_length: 255 status: 400 response_strings: - - "can't remove non-existent object 'what'" + - "can't remove a non-existent object 'what'" - name: patch a resource attribute replace PATCH: /v1/resource_type/my_custom_resource @@ -819,7 +819,7 @@ tests: path: /attributes/unknown status: 400 response_strings: - - "can't remove non-existent object 'unknown'" + - "can't remove a non-existent object 'unknown'" # Ensure we can't delete the type diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh index 380635fb..76c80e06 100755 --- a/run-upgrade-tests.sh +++ b/run-upgrade-tests.sh @@ -1,11 +1,11 @@ #!/bin/bash -set -xe +set -e if [ "$1" == "postgresql-file" ]; then - eval $(pifpaf --debug --env-prefix INDEXER run postgresql) + eval $(pifpaf --env-prefix INDEXER run postgresql) elif [ "$1" == "mysql-ceph" ]; then - eval $(pifpaf --debug --env-prefix INDEXER run mysql) - eval $(pifpaf --debug --env-prefix STORAGE run ceph) + eval $(pifpaf --env-prefix INDEXER run mysql) + eval $(pifpaf --env-prefix STORAGE run ceph) else echo "error: unsupported upgrade type" exit 1 @@ -13,7 +13,7 @@ fi export GNOCCHI_DATA=$(mktemp -d -t gnocchi.XXXX) -old_version=$(pip3 freeze | sed -n '/gnocchi==/s/.*==\(.*\)/\1/p') +old_version=$(pip freeze | sed -n '/gnocchi==/s/.*==\(.*\)/\1/p') RESOURCE_IDS=( "5a301761-aaaa-46e2-8900-8b4f6fe6675a" @@ -50,7 +50,7 @@ inject_data() { { measures_sep="" - MEASURES=$(python3 -c 'import datetime, random, json; now = datetime.datetime.utcnow(); print(json.dumps([{"timestamp": (now - datetime.timedelta(seconds=i)).isoformat(), "value": random.uniform(-100000, 100000)} for i in range(0, 288000, 10)]))') + MEASURES=$(python -c 'import datetime, random, json; now = datetime.datetime.utcnow(); print(json.dumps([{"timestamp": (now - datetime.timedelta(seconds=i)).isoformat(), "value": random.uniform(-100000, 100000)} for i in range(0, 288000, 10)]))') echo -n '{' resource_sep="" for resource_id in ${RESOURCE_IDS[@]} $RESOURCE_ID_EXT; do @@ -71,6 +71,8 @@ pifpaf_stop(){ cleanup(){ pifpaf_stop rm -rf $GNOCCHI_DATA + indexer_stop || true + [ "$STORAGE_DAEMON" == "ceph" ] && storage_stop || true } trap cleanup EXIT @@ -82,7 +84,7 @@ else STORAGE_URL=file://$GNOCCHI_DATA fi -eval $(pifpaf --debug run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) +eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) export OS_AUTH_TYPE=gnocchi-basic export GNOCCHI_USER=$GNOCCHI_USER_ID original_statsd_resource_id=$GNOCCHI_STATSD_RESOURCE_ID @@ -90,11 +92,11 @@ inject_data $GNOCCHI_DATA dump_data $GNOCCHI_DATA/old pifpaf_stop -new_version=$(python3 setup.py --version) +new_version=$(python setup.py --version) echo "* Upgrading Gnocchi from $old_version to $new_version" -pip3 install -v -U .[${GNOCCHI_VARIANT}] +pip install -v -U .[${GNOCCHI_VARIANT}] -eval $(pifpaf --debug run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) +eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) # Gnocchi 3.1 uses basic auth by default export OS_AUTH_TYPE=gnocchi-basic export GNOCCHI_USER=$GNOCCHI_USER_ID @@ -110,5 +112,5 @@ echo "* Checking output difference between Gnocchi $old_version and $new_version # archive policy for old in $GNOCCHI_DATA/old/*.json; do new=$GNOCCHI_DATA/new/$(basename $old) - python3 -c "import json; old = json.load(open('$old')); new = json.load(open('$new')); assert all(i in old for i in new)" + python -c "import json; old = json.load(open('$old')); new = json.load(open('$new')); assert all(i in old for i in new)" done diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh index 5ff76e47..799ac184 100755 --- a/tools/pretty_tox.sh +++ b/tools/pretty_tox.sh @@ -10,7 +10,7 @@ TESTRARGS=$1 # # this work around exists until that is addressed if [[ "$TESTARGS" =~ "until-failure" ]]; then - python3 setup.py testr --slowest --testr-args="$TESTRARGS" + python setup.py testr --slowest --testr-args="$TESTRARGS" else - python3 setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f + python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f fi diff --git a/tox.ini b/tox.ini index 4616c55a..c48ca155 100644 --- a/tox.ini +++ b/tox.ini @@ -46,6 +46,7 @@ deps = .[test,redis,prometheus,amqp1,{env:GNOCCHI_STORAGE_DEPS:},{env:GNOCCHI_INDEXER_DEPS:}] {env:GNOCCHI_TEST_TARBALLS:} cliff!=2.9.0 + gnocchiclient>=2.8.0,!=7.0.7 commands = {toxinidir}/run-tests.sh {posargs} {toxinidir}/run-func-tests.sh {posargs} @@ -57,7 +58,7 @@ recreate = True setenv = GNOCCHI_VARIANT=test,postgresql,file deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.3,<4.4 pifpaf[gnocchi]>=0.13 - gnocchiclient>=2.8.0 + gnocchiclient>=2.8.0,!=7.0.7 xattr!=0.9.4 commands = {toxinidir}/run-upgrade-tests.sh postgresql-file @@ -67,7 +68,7 @@ commands = {toxinidir}/run-upgrade-tests.sh postgresql-file recreate = True setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib deps = gnocchi[{env:GNOCCHI_VARIANT}]>=4.3,<4.4 - gnocchiclient>=2.8.0 + gnocchiclient>=2.8.0,!=7.0.7 pifpaf[ceph,gnocchi]>=0.13 xattr!=0.9.4 commands = {toxinidir}/run-upgrade-tests.sh mysql-ceph @@ -78,7 +79,7 @@ deps = hacking>=0.12,<0.13 commands = flake8 [testenv:py37-cover] -commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python3 setup.py testr --coverage --testr-args="{posargs}" +commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py testr --coverage --testr-args="{posargs}" [flake8] exclude = .tox,.eggs,doc,gnocchi/rest/prometheus/remote_pb2.py @@ -97,7 +98,7 @@ deps = doc8 setenv = GNOCCHI_TEST_DEBUG=1 commands = doc8 --ignore-path doc/source/rest.rst,doc/source/comparison-table.rst doc/source - pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python3 setup.py build_sphinx -W + pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx -W [testenv:docs-gnocchi-web] basepython = python3 @@ -109,7 +110,7 @@ deps = {[testenv:docs]deps} setuptools commands = rm -rf doc/build/html - pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python3 setup.py build_sphinx + pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx [doc8] ignore-path = doc/source/rest.rst,doc/source/comparison-table.rst -- GitLab From 8ce26aeb7183d112d8cb194fbaa3fab32efff69b Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 1 Oct 2019 11:46:03 +0100 Subject: [PATCH 1462/1483] Fix compatibility with MySQL >= 8 Under newer MySQL versions check constraints are supported and must have unique names. Drop and re-create the check constraints on the resource and resource_history tables to ensure that they are uniquely named for upgrades in existing deployments. Ensure that check constraints are created with unique names in new deployments. --- ...a72e4f90_rename_ck_started_before_ended.py | 55 +++++++++++++++++++ .../40c6aae14c3f_ck_started_before_ended.py | 4 +- gnocchi/indexer/sqlalchemy.py | 4 +- gnocchi/indexer/sqlalchemy_base.py | 11 +++- 4 files changed, 68 insertions(+), 6 deletions(-) create mode 100644 gnocchi/indexer/alembic/versions/04eba72e4f90_rename_ck_started_before_ended.py diff --git a/gnocchi/indexer/alembic/versions/04eba72e4f90_rename_ck_started_before_ended.py b/gnocchi/indexer/alembic/versions/04eba72e4f90_rename_ck_started_before_ended.py new file mode 100644 index 00000000..e449cc71 --- /dev/null +++ b/gnocchi/indexer/alembic/versions/04eba72e4f90_rename_ck_started_before_ended.py @@ -0,0 +1,55 @@ +# Copyright 2019 The Gnocchi Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""rename ck_started_before_ended + +Revision ID: 04eba72e4f90 +Revises: 1e1a63d3d186 +Create Date: 2019-10-01 11:19:38.865522 + +""" + +from alembic import op +from sqlalchemy.engine.reflection import Inspector + +# revision identifiers, used by Alembic. +revision = '04eba72e4f90' +down_revision = '1e1a63d3d186' +branch_labels = None +depends_on = None + + +def upgrade(): + bind = op.get_bind() + inspector = Inspector.from_engine(bind) + + for table in ("resource", "resource_history"): + existing_cks = [ + c['name'] for c in inspector.get_check_constraints(table) + ] + if "ck_started_before_ended" in existing_cks: + # Drop non-uniquely named check constraints + # for consistency across DB types. + op.drop_constraint("ck_started_before_ended", + table, + type_="check") + + new_ck_name = "ck_{}_started_before_ended".format(table) + if new_ck_name not in existing_cks: + # Re-create check constraint with unique name + # if needed + op.create_check_constraint(new_ck_name, + table, + "started_at <= ended_at") diff --git a/gnocchi/indexer/alembic/versions/40c6aae14c3f_ck_started_before_ended.py b/gnocchi/indexer/alembic/versions/40c6aae14c3f_ck_started_before_ended.py index cf6922c9..4fa1ff06 100644 --- a/gnocchi/indexer/alembic/versions/40c6aae14c3f_ck_started_before_ended.py +++ b/gnocchi/indexer/alembic/versions/40c6aae14c3f_ck_started_before_ended.py @@ -31,9 +31,9 @@ from alembic import op def upgrade(): - op.create_check_constraint("ck_started_before_ended", + op.create_check_constraint("ck_resource_started_before_ended", "resource", "started_at <= ended_at") - op.create_check_constraint("ck_started_before_ended", + op.create_check_constraint("ck_resource_history_started_before_ended", "resource_history", "started_at <= ended_at") diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py index f9c0dbc0..1265194a 100644 --- a/gnocchi/indexer/sqlalchemy.py +++ b/gnocchi/indexer/sqlalchemy.py @@ -880,7 +880,9 @@ class SQLAlchemyIndexer(indexer.IndexerDriver): session.flush() except exception.DBConstraintError as e: - if e.check_name == "ck_started_before_ended": + if e.check_name in ( + "ck_resource_started_before_ended", + "ck_resource_history_started_before_ended"): raise indexer.ResourceValueError( resource_type, "ended_at", ended_at) raise diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py index 7def0cf4..6bd76439 100644 --- a/gnocchi/indexer/sqlalchemy_base.py +++ b/gnocchi/indexer/sqlalchemy_base.py @@ -221,9 +221,14 @@ class ResourceJsonifier(indexer.Resource): class ResourceMixin(ResourceJsonifier): @declarative.declared_attr def __table_args__(cls): - return (sqlalchemy.CheckConstraint('started_at <= ended_at', - name="ck_started_before_ended"), - COMMON_TABLES_ARGS) + return (sqlalchemy.CheckConstraint( + 'started_at <= ended_at', + name="ck_{}_started_before_ended".format( + cls.__tablename__ + ) + ), + COMMON_TABLES_ARGS + ) @declarative.declared_attr def type(cls): -- GitLab From d5a782e58fc5b3593672fb9c8ac86fddc0c0dedf Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Wed, 16 Sep 2020 21:28:48 +0200 Subject: [PATCH 1463/1483] Use Bionic as dist in Travis --- .travis.yml | 9 +++++---- gnocchi/common/ceph.py | 2 +- gnocchi/tests/base.py | 15 ++++++++++++--- .../functional/gabbits/.metric-derived.yaml.swp | Bin 0 -> 1024 bytes tox.ini | 10 +++++----- 5 files changed, 23 insertions(+), 13 deletions(-) create mode 100644 gnocchi/tests/functional/gabbits/.metric-derived.yaml.swp diff --git a/.travis.yml b/.travis.yml index 2da9ac80..6a41c9de 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,4 @@ +dist: bionic language: generic sudo: required @@ -12,11 +13,11 @@ env: - TARGET: docs - TARGET: docs-gnocchi-web - - TARGET: py37-mysql-ceph-upgrade-from-4.3 - - TARGET: py37-postgresql-file-upgrade-from-4.3 + - TARGET: py36-mysql-ceph-upgrade-from-4.3 + - TARGET: py36-postgresql-file-upgrade-from-4.3 - - TARGET: py37-mysql - - TARGET: py37-postgresql + - TARGET: py36-mysql + - TARGET: py36-postgresql before_script: # NOTE(sileht): We need to fetch all tags/branches for documentation. diff --git a/gnocchi/common/ceph.py b/gnocchi/common/ceph.py index 407aa44a..1d7513b5 100644 --- a/gnocchi/common/ceph.py +++ b/gnocchi/common/ceph.py @@ -20,7 +20,7 @@ import daiquiri LOG = daiquiri.getLogger(__name__) -for RADOS_MODULE_NAME in ('cradox', 'rados'): +for RADOS_MODULE_NAME in ('rados', 'cradox'): try: rados = __import__(RADOS_MODULE_NAME) except ImportError: diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py index 9a7e8396..135d2a78 100644 --- a/gnocchi/tests/base.py +++ b/gnocchi/tests/base.py @@ -345,12 +345,12 @@ class TestCase(BaseTestCase): self.conf.set_override('ceph_conffile', os.getenv("CEPH_CONF"), 'storage') - pool_name = uuid.uuid4().hex + self.ceph_pool_name = uuid.uuid4().hex with open(os.devnull, 'w') as f: subprocess.call("rados -c %s mkpool %s" % ( - os.getenv("CEPH_CONF"), pool_name), shell=True, + os.getenv("CEPH_CONF"), self.ceph_pool_name), shell=True, stdout=f, stderr=subprocess.STDOUT) - self.conf.set_override('ceph_pool', pool_name, 'storage') + self.conf.set_override('ceph_pool', self.ceph_pool_name, 'storage') # Override the bucket prefix to be unique to avoid concurrent access # with any other test @@ -377,6 +377,15 @@ class TestCase(BaseTestCase): def tearDown(self): self.index.disconnect() self.coord.stop() + + if self.conf.storage.driver == 'ceph': + with open(os.devnull, 'w') as f: + ceph_rmpool_command = "rados -c %s rmpool %s %s \ +--yes-i-really-really-mean-it" % (os.getenv("CEPH_CONF"), self.ceph_pool_name, + self.ceph_pool_name) + subprocess.call(ceph_rmpool_command, shell=True, + stdout=f, stderr=subprocess.STDOUT) + super(TestCase, self).tearDown() def _create_metric(self, archive_policy_name="low"): diff --git a/gnocchi/tests/functional/gabbits/.metric-derived.yaml.swp b/gnocchi/tests/functional/gabbits/.metric-derived.yaml.swp new file mode 100644 index 0000000000000000000000000000000000000000..69aba64667bdeecb538f1715b82a2699cf4e70a5 GIT binary patch literal 1024 zcmYc?$V<%2S1{5u)iY*50`t8X7)pvV)6-LnkVLUFi!)Mlaw@UQA&aKx=4.3,<4.4 xattr!=0.9.4 commands = {toxinidir}/run-upgrade-tests.sh postgresql-file -[testenv:py37-mysql-ceph-upgrade-from-4.3] +[testenv:py36-mysql-ceph-upgrade-from-4.3] # We should always recreate since the script upgrade # Gnocchi we can't reuse the virtualenv recreate = True @@ -78,7 +78,7 @@ basepython = python3 deps = hacking>=0.12,<0.13 commands = flake8 -[testenv:py37-cover] +[testenv:py36-cover] commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py testr --coverage --testr-args="{posargs}" [flake8] -- GitLab From bf37e90edaa693880a6f025b1c9329579a7b9882 Mon Sep 17 00:00:00 2001 From: Matthias Runge Date: Mon, 7 Sep 2020 08:36:31 +0200 Subject: [PATCH 1464/1483] cradox is not supported anymore. Update the docs etc. to use rados directly. --- doc/source/install.rst | 7 +------ gnocchi/common/ceph.py | 19 +++++++------------ 2 files changed, 8 insertions(+), 18 deletions(-) diff --git a/doc/source/install.rst b/doc/source/install.rst index 079df6d7..bf661149 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -55,12 +55,7 @@ be created for example with: Gnocchi leverages some *librados* features (omap, async, operation context) -available in the Python binding only since *python-rados* >= 12.2.0. To handle -this, Gnocchi uses *cradox* python library which has exactly the same API but -works with Ceph >= 0.80.0. - -If Ceph and python-rados are >= 12.2.0, the cradox Python library becomes -optional but is still recommended. +available in the Ceph Python binding only since *python-rados* >= 12.2.0. Configuration diff --git a/gnocchi/common/ceph.py b/gnocchi/common/ceph.py index 1d7513b5..4d508cc8 100644 --- a/gnocchi/common/ceph.py +++ b/gnocchi/common/ceph.py @@ -19,15 +19,10 @@ import daiquiri LOG = daiquiri.getLogger(__name__) - -for RADOS_MODULE_NAME in ('rados', 'cradox'): - try: - rados = __import__(RADOS_MODULE_NAME) - except ImportError: - pass - else: - break -else: +RADOS_MODULE_NAME = 'rados' +try: + rados = __import__(RADOS_MODULE_NAME) +except ImportError: RADOS_MODULE_NAME = None rados = None @@ -48,12 +43,12 @@ def create_rados_connection(conf): options['client_mount_timeout'] = conf.ceph_timeout if not rados: - raise ImportError("No module named 'rados' nor 'cradox'") + raise ImportError("No module named 'rados'") if not hasattr(rados, 'OmapIterator'): raise ImportError("Your rados python module does not support " - "omap feature. Install 'cradox' (recommended) " - "or upgrade 'python-rados' >= 9.1.0 ") + "omap feature. Install or upgrade " + "'python-rados' >= 9.1.0 ") LOG.info("Ceph storage backend use '%s' python library", RADOS_MODULE_NAME) -- GitLab From 2574c9cdbbd2f64e0a54a38f4269c5e7ff636d96 Mon Sep 17 00:00:00 2001 From: Matthias Runge Date: Thu, 17 Sep 2020 14:57:54 +0200 Subject: [PATCH 1465/1483] Pull rados as dependency from distribution package --- .travis.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.travis.yml b/.travis.yml index 6a41c9de..0a45307a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -47,6 +47,9 @@ before_script: esac ; set +x; fi + + # pull python3-rados from distro packages + sudo apt install -y python3-rados install: - if \[ "$TRAVIS_PULL_REQUEST" != "false" -o -n "$TRAVIS_TAG" \]; then docker pull gnocchixyz/ci-tools:latest; -- GitLab From b2f4e5c877f59f27a5ccab635a34350ab0781679 Mon Sep 17 00:00:00 2001 From: Matthias Runge Date: Wed, 30 Sep 2020 12:33:46 +0200 Subject: [PATCH 1466/1483] Revert "Pull rados as dependency from distribution package" This reverts commit 1daba130be03f8bcc2654677ad9b69db70bb74ce. --- .travis.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0a45307a..6a41c9de 100644 --- a/.travis.yml +++ b/.travis.yml @@ -47,9 +47,6 @@ before_script: esac ; set +x; fi - - # pull python3-rados from distro packages - sudo apt install -y python3-rados install: - if \[ "$TRAVIS_PULL_REQUEST" != "false" -o -n "$TRAVIS_TAG" \]; then docker pull gnocchixyz/ci-tools:latest; -- GitLab From c774675db41e64ffa8f341caa345f3bdc47646a0 Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Thu, 1 Oct 2020 22:44:12 +0200 Subject: [PATCH 1467/1483] Remove Cradox references --- gnocchi/carbonara.py | 5 ++--- setup.cfg | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py index 1bc5c280..72747ed8 100644 --- a/gnocchi/carbonara.py +++ b/gnocchi/carbonara.py @@ -311,9 +311,8 @@ class TimeSerie(object): @staticmethod def _compress(payload): - # FIXME(jd) lz4 > 0.9.2 returns bytearray instead of bytes. But Cradox - # does not accept bytearray but only bytes, so make sure that we have a - # byte type returned. + # FIXME(jd) lz4 > 0.9.2 returns bytearray instead of bytes but we need + # a byte type returned. return memoryview(lz4.block.compress(payload)).tobytes() diff --git a/setup.cfg b/setup.cfg index 9940280a..ce41fd7b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -75,7 +75,7 @@ redis = swift = python-swiftclient>=3.1.0 ceph = - cradox>=2.0.0 + # No deps - need rados dist package ceph_alternative = python-rados>=12.2.0 # not available on pypi prometheus = -- GitLab From 3764650c276dc09db631e1f8ef6cb00411df532e Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Thu, 1 Oct 2020 22:45:24 +0200 Subject: [PATCH 1468/1483] Add release note about Cradox removed --- .../notes/remove-cradox-support-aa0e5f7546484bed.yaml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 releasenotes/notes/remove-cradox-support-aa0e5f7546484bed.yaml diff --git a/releasenotes/notes/remove-cradox-support-aa0e5f7546484bed.yaml b/releasenotes/notes/remove-cradox-support-aa0e5f7546484bed.yaml new file mode 100644 index 00000000..e1819b96 --- /dev/null +++ b/releasenotes/notes/remove-cradox-support-aa0e5f7546484bed.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Cradox is no longer maintained or supported, please use the python rados + module that can be installation from your distributions package manager. -- GitLab From bd2cafadd0961231748cf4b67efd28c6db277b04 Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Sat, 11 Jul 2020 01:40:11 +0200 Subject: [PATCH 1469/1483] Ensure member statistics key is decoded In the StatusController the statistics for the members on python3 is a byte string which cannot be jsonified. This changes so it loops through the statistics for the member and ensures the key is a string and not a byte string. --- gnocchi/rest/api.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/gnocchi/rest/api.py b/gnocchi/rest/api.py index 31266381..c6812c97 100644 --- a/gnocchi/rest/api.py +++ b/gnocchi/rest/api.py @@ -2116,10 +2116,14 @@ class StatusController(rest.RestController): report_dict['metricd']['processors'] = [ member.decode() for member in members ] - report_dict['metricd']['statistics'] = { - member.decode(): cap.get() - for member, cap in six.moves.zip(members, caps) - } + members_data = {} + for member, cap in six.moves.zip(members, caps): + caps_data = { + six.ensure_str(k): v + for k, v in six.iteritems(cap.get()) + } + members_data[member.decode()] = caps_data + report_dict['metricd']['statistics'] = members_data else: report_dict['metricd']['processors'] = None report_dict['metricd']['statistics'] = {} -- GitLab From 64ff78fb27bff33fc8c284f7dd7267da0676c14d Mon Sep 17 00:00:00 2001 From: Tobias Urdin Date: Thu, 1 Oct 2020 23:42:24 +0200 Subject: [PATCH 1470/1483] Add release note about MySQL >= 8 support --- releasenotes/notes/mysql-relnote-a3713efbc8f4dd6b.yaml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 releasenotes/notes/mysql-relnote-a3713efbc8f4dd6b.yaml diff --git a/releasenotes/notes/mysql-relnote-a3713efbc8f4dd6b.yaml b/releasenotes/notes/mysql-relnote-a3713efbc8f4dd6b.yaml new file mode 100644 index 00000000..395ee880 --- /dev/null +++ b/releasenotes/notes/mysql-relnote-a3713efbc8f4dd6b.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Gnocchi now supports MySQL >= 8 -- GitLab From eb7524fd2e844d2328a70f3f03dc416842ae2931 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 19 Oct 2020 23:06:35 +0200 Subject: [PATCH 1471/1483] Fixed debian/watch. --- debian/changelog | 7 +++++++ debian/watch | 5 ++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/debian/changelog b/debian/changelog index cd0a7103..7a34f5be 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +gnocchi (4.4.0-1) unstable; urgency=medium + + * New upstream release. + * Fixed debian/watch. + + -- Thomas Goirand Mon, 19 Oct 2020 23:04:47 +0200 + gnocchi (4.3.4-3) unstable; urgency=medium * Rebuild source-only. diff --git a/debian/watch b/debian/watch index 4336d842..360af16c 100644 --- a/debian/watch +++ b/debian/watch @@ -1,4 +1,3 @@ version=3 -opts="uversionmangle=s/\.(b|rc)/~$1/" \ -https://github.com/gnocchixyz/gnocchi/tags .*/(\d[\d\.]+)\.tar\.gz - +opts="uversionmangle=s/\.0rc/~rc/;s/\.0b1/~b1/;s/\.0b2/~b2/;s/\.0b3/~b3/" \ +https://github.com/gnocchixyz/gnocchi/tags .*/(\d[brc\d\.]+)\.tar\.gz -- GitLab From eaa86740105b68ffc9e66a1fa0308f26f1f4efea Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 19 Oct 2020 23:07:00 +0200 Subject: [PATCH 1472/1483] Add a debian/salsa-ci.yml. --- debian/changelog | 1 + debian/salsa-ci.yml | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 debian/salsa-ci.yml diff --git a/debian/changelog b/debian/changelog index 7a34f5be..918219a6 100644 --- a/debian/changelog +++ b/debian/changelog @@ -2,6 +2,7 @@ gnocchi (4.4.0-1) unstable; urgency=medium * New upstream release. * Fixed debian/watch. + * Add a debian/salsa-ci.yml. -- Thomas Goirand Mon, 19 Oct 2020 23:04:47 +0200 diff --git a/debian/salsa-ci.yml b/debian/salsa-ci.yml new file mode 100644 index 00000000..0c22dc43 --- /dev/null +++ b/debian/salsa-ci.yml @@ -0,0 +1,3 @@ +include: + - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/salsa-ci.yml + - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/pipeline-jobs.yml -- GitLab From ea5648e671ee753d92077fb0843edacfc17885d0 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 19 Oct 2020 23:10:16 +0200 Subject: [PATCH 1473/1483] Removed py3-compat.patch. --- debian/changelog | 1 + debian/patches/py3-compat.patch | 42 --------------------------------- debian/patches/series | 1 - 3 files changed, 1 insertion(+), 43 deletions(-) delete mode 100644 debian/patches/py3-compat.patch diff --git a/debian/changelog b/debian/changelog index 918219a6..d6ebf2f0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -3,6 +3,7 @@ gnocchi (4.4.0-1) unstable; urgency=medium * New upstream release. * Fixed debian/watch. * Add a debian/salsa-ci.yml. + * Removed py3-compat.patch. -- Thomas Goirand Mon, 19 Oct 2020 23:04:47 +0200 diff --git a/debian/patches/py3-compat.patch b/debian/patches/py3-compat.patch deleted file mode 100644 index d877ef39..00000000 --- a/debian/patches/py3-compat.patch +++ /dev/null @@ -1,42 +0,0 @@ -Description: Ensure member_id is correctly encoded -Author: James Page -Forwarded: no - -Index: gnocchi/gnocchi/cli/metricd.py -=================================================================== ---- gnocchi.orig/gnocchi/cli/metricd.py -+++ gnocchi/gnocchi/cli/metricd.py -@@ -60,12 +60,14 @@ class MetricProcessBase(cotyledon.Servic - self._wake_up.set() - - def _configure(self): -- member_id = "%s.%s.%s" % (socket.gethostname(), -- self.worker_id, -- # NOTE(jd) Still use a uuid here so we're -- # sure there's no conflict in case of -- # crash/restart -- str(uuid.uuid4())) -+ member_id = str.encode( -+ "%s.%s.%s" % (socket.gethostname(), -+ self.worker_id, -+ # NOTE(jd) Still use a uuid here so we're -+ # sure there's no conflict in case of -+ # crash/restart -+ str(uuid.uuid4())) -+ ) - self.coord = get_coordinator_and_start(member_id, - self.conf.coordination_url) - self.store = storage.get_driver(self.conf) -Index: gnocchi/gnocchi/rest/app.py -=================================================================== ---- gnocchi.orig/gnocchi/rest/app.py -+++ gnocchi/gnocchi/rest/app.py -@@ -101,7 +101,7 @@ class GnocchiHook(pecan.hooks.PecanHook) - # entirely. - self.backends[name] = ( - metricd.get_coordinator_and_start( -- str(uuid.uuid4()), -+ str.encode(str(uuid.uuid4())), - self.conf.coordination_url) - ) - elif name == "storage": diff --git a/debian/patches/series b/debian/patches/series index 28ad9f4e..2288badf 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -1,3 +1,2 @@ -py3-compat.patch no-distutils-usage.diff install-missing-files.patch -- GitLab From 1875394e45a70f3975f4356785776492a426ba75 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 19 Oct 2020 23:11:25 +0200 Subject: [PATCH 1474/1483] Fixed diff with upstream tag. --- gnocchi/tests/test_storage.py | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py index 8f53dd76..903f7e6c 100644 --- a/gnocchi/tests/test_storage.py +++ b/gnocchi/tests/test_storage.py @@ -677,34 +677,6 @@ class TestStorageDriver(tests_base.TestCase): with mock.patch.object(driver.__class__, 'WRITE_FULL', False): self.trigger_processing() - def test_rewrite_measures_multiple_granularities(self): - apname = str(uuid.uuid4()) - # Create an archive policy with two different granularities - ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60), (36000, 1)]) - self.index.create_archive_policy(ap) - self.metric = indexer.Metric(uuid.uuid4(), ap) - self.index.create_metric(self.metric.id, str(uuid.uuid4()), - apname) - - # First store some points - self.incoming.add_measures(self.metric.id, [ - incoming.Measure(datetime64(2016, 1, 6, 18, 15, 46), 43), - incoming.Measure(datetime64(2016, 1, 6, 18, 15, 47), 43), - incoming.Measure(datetime64(2016, 1, 6, 18, 15, 48), 43), - ]) - self.trigger_processing() - - # Add some more points, mocking out WRITE_FULL attribute of the current - # driver, so that rewrite happens - self.incoming.add_measures(self.metric.id, [ - incoming.Measure(datetime64(2016, 1, 7, 18, 15, 49), 43), - incoming.Measure(datetime64(2016, 1, 7, 18, 15, 50), 43), - incoming.Measure(datetime64(2016, 1, 7, 18, 18, 46), 43), - ]) - driver = storage.get_driver(self.conf) - with mock.patch.object(driver.__class__, 'WRITE_FULL', False): - self.trigger_processing() - def test_rewrite_measures_oldest_mutable_timestamp_eq_next_key(self): """See LP#1655422""" # Create an archive policy that spans on several splits. Each split -- GitLab From f719a534b6d1f69cbcdfd67ae92290acd1919cc8 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Sat, 21 Nov 2020 00:13:34 +0100 Subject: [PATCH 1475/1483] Add a debian/README.Debian telling the package cannot be used with SQLite. --- debian/README.Debian | 2 ++ debian/changelog | 6 ++++++ 2 files changed, 8 insertions(+) create mode 100644 debian/README.Debian diff --git a/debian/README.Debian b/debian/README.Debian new file mode 100644 index 00000000..cec7d65e --- /dev/null +++ b/debian/README.Debian @@ -0,0 +1,2 @@ +Note that even though dbconfig-common proposes such a choice, this package +currently doesn't support using SQLite. You've been warned. diff --git a/debian/changelog b/debian/changelog index d6ebf2f0..420fc202 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (4.4.0-2) UNRELEASED; urgency=medium + + * Add a debian/README.Debian telling the package cannot be used with SQLite. + + -- Thomas Goirand Sat, 21 Nov 2020 00:13:14 +0100 + gnocchi (4.4.0-1) unstable; urgency=medium * New upstream release. -- GitLab From 03947d3e279f45e01fab8f9b74e726ff8488310c Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 16 Dec 2020 12:16:55 +0100 Subject: [PATCH 1476/1483] Add add-header = Connection: close in gnochi-api-uwsgi.ini. --- debian/changelog | 5 +++-- debian/gnocchi-api-uwsgi.ini | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/debian/changelog b/debian/changelog index 420fc202..2e144bf1 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,9 @@ -gnocchi (4.4.0-2) UNRELEASED; urgency=medium +gnocchi (4.4.0-2) unstable; urgency=medium * Add a debian/README.Debian telling the package cannot be used with SQLite. + * Add add-header = Connection: close in gnochi-api-uwsgi.ini. - -- Thomas Goirand Sat, 21 Nov 2020 00:13:14 +0100 + -- Thomas Goirand Wed, 16 Dec 2020 12:16:29 +0100 gnocchi (4.4.0-1) unstable; urgency=medium diff --git a/debian/gnocchi-api-uwsgi.ini b/debian/gnocchi-api-uwsgi.ini index 64d53d0f..19ccfa2f 100644 --- a/debian/gnocchi-api-uwsgi.ini +++ b/debian/gnocchi-api-uwsgi.ini @@ -37,6 +37,7 @@ die-on-term = true ################################## ### OpenStack service specific ### ################################## +add-header = Connection: close # This is the standard port for the WSGI application, listening on all available IPs logto = /var/log/gnocchi/gnocchi-api.log -- GitLab From 8be794bd7cc13385c0c72f4bd2c1b04192bcbf6b Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 16 Dec 2020 12:17:54 +0100 Subject: [PATCH 1477/1483] Configure with threads = 32 by default (as per Gnocchi's doc). --- debian/changelog | 1 + debian/gnocchi-api-uwsgi.ini | 2 ++ 2 files changed, 3 insertions(+) diff --git a/debian/changelog b/debian/changelog index 2e144bf1..6f426bd5 100644 --- a/debian/changelog +++ b/debian/changelog @@ -2,6 +2,7 @@ gnocchi (4.4.0-2) unstable; urgency=medium * Add a debian/README.Debian telling the package cannot be used with SQLite. * Add add-header = Connection: close in gnochi-api-uwsgi.ini. + * Configure with threads = 32 by default (as per Gnocchi's doc). -- Thomas Goirand Wed, 16 Dec 2020 12:16:29 +0100 diff --git a/debian/gnocchi-api-uwsgi.ini b/debian/gnocchi-api-uwsgi.ini index 19ccfa2f..0c2f6f0e 100644 --- a/debian/gnocchi-api-uwsgi.ini +++ b/debian/gnocchi-api-uwsgi.ini @@ -15,6 +15,8 @@ master = true # Threads and processes enable-threads = true +threads = 32 + processes = 8 # uwsgi recommends this to prevent thundering herd on accept. -- GitLab From 275cf39efc8d8462dce293c888e0bbd2e8418050 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 1 Feb 2021 17:00:58 +0100 Subject: [PATCH 1478/1483] Add threads = 12 and listen = 100 in the default UWSGI config. --- debian/changelog | 6 ++++++ debian/gnocchi-api-uwsgi.ini | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/debian/changelog b/debian/changelog index 6f426bd5..2192eea3 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (4.4.0-3) unstable; urgency=medium + + * Add threads = 12 and listen = 100 in the default UWSGI config. + + -- Thomas Goirand Mon, 01 Feb 2021 17:00:37 +0100 + gnocchi (4.4.0-2) unstable; urgency=medium * Add a debian/README.Debian telling the package cannot be used with SQLite. diff --git a/debian/gnocchi-api-uwsgi.ini b/debian/gnocchi-api-uwsgi.ini index 0c2f6f0e..3dbb29c9 100644 --- a/debian/gnocchi-api-uwsgi.ini +++ b/debian/gnocchi-api-uwsgi.ini @@ -41,6 +41,10 @@ die-on-term = true ################################## add-header = Connection: close +# This makes the Gnocchi API much more efficient: +threads = 12 +listen = 100 + # This is the standard port for the WSGI application, listening on all available IPs logto = /var/log/gnocchi/gnocchi-api.log name = gnocchi-api -- GitLab From 5c48ca7169ae72359e38b78cf63c755291f6608b Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 1 Feb 2021 20:58:20 +0100 Subject: [PATCH 1479/1483] Fix changelog. --- debian/changelog | 18 ++++-------------- debian/gnocchi-api-uwsgi.ini | 1 - 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/debian/changelog b/debian/changelog index 2192eea3..b243ad44 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,23 +1,13 @@ -gnocchi (4.4.0-3) unstable; urgency=medium - - * Add threads = 12 and listen = 100 in the default UWSGI config. - - -- Thomas Goirand Mon, 01 Feb 2021 17:00:37 +0100 - -gnocchi (4.4.0-2) unstable; urgency=medium - - * Add a debian/README.Debian telling the package cannot be used with SQLite. - * Add add-header = Connection: close in gnochi-api-uwsgi.ini. - * Configure with threads = 32 by default (as per Gnocchi's doc). - - -- Thomas Goirand Wed, 16 Dec 2020 12:16:29 +0100 - gnocchi (4.4.0-1) unstable; urgency=medium * New upstream release. * Fixed debian/watch. * Add a debian/salsa-ci.yml. * Removed py3-compat.patch. + * Add a debian/README.Debian telling the package cannot be used with SQLite. + * Add add-header = Connection: close in gnochi-api-uwsgi.ini. + * Configure with threads = 32 by default (as per Gnocchi's doc). + * Add listen = 100 in the default UWSGI config. -- Thomas Goirand Mon, 19 Oct 2020 23:04:47 +0200 diff --git a/debian/gnocchi-api-uwsgi.ini b/debian/gnocchi-api-uwsgi.ini index 3dbb29c9..287156c5 100644 --- a/debian/gnocchi-api-uwsgi.ini +++ b/debian/gnocchi-api-uwsgi.ini @@ -42,7 +42,6 @@ die-on-term = true add-header = Connection: close # This makes the Gnocchi API much more efficient: -threads = 12 listen = 100 # This is the standard port for the WSGI application, listening on all available IPs -- GitLab From 1479ef31378f99c505686ac84bf8efb37c0726ae Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Wed, 17 Mar 2021 13:33:09 +0100 Subject: [PATCH 1480/1483] Tune gnocchi-api-uwsgi.ini for performance. --- debian/changelog | 6 ++++++ debian/gnocchi-api-uwsgi.ini | 24 +++++++++++++++++------- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/debian/changelog b/debian/changelog index b243ad44..0ecd088c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +gnocchi (4.4.0-2) UNRELEASED; urgency=medium + + * Tune gnocchi-api-uwsgi.ini for performance. + + -- Thomas Goirand Wed, 17 Mar 2021 13:32:56 +0100 + gnocchi (4.4.0-1) unstable; urgency=medium * New upstream release. diff --git a/debian/gnocchi-api-uwsgi.ini b/debian/gnocchi-api-uwsgi.ini index 287156c5..45e17f16 100644 --- a/debian/gnocchi-api-uwsgi.ini +++ b/debian/gnocchi-api-uwsgi.ini @@ -15,10 +15,6 @@ master = true # Threads and processes enable-threads = true -threads = 32 - -processes = 8 - # uwsgi recommends this to prevent thundering herd on accept. thunder-lock = true @@ -36,14 +32,28 @@ no-orphans = true # exit instead of brutal reload on SIGTERM die-on-term = true +########################## +### Performance tuning ### +########################## +# Threads and processes +enable-threads = true + +# For max perf, set this to number of core*2 +processes = 8 + +# This was benchmarked as a good value +threads = 32 + +# This is the number of sockets in the queue. +# It improves a lot performances. This is comparable +# to the Apache ServerLimit/MaxClients option. +listen = 100 + ################################## ### OpenStack service specific ### ################################## add-header = Connection: close -# This makes the Gnocchi API much more efficient: -listen = 100 - # This is the standard port for the WSGI application, listening on all available IPs logto = /var/log/gnocchi/gnocchi-api.log name = gnocchi-api -- GitLab From 3e95a54e4587844857485cf3495d43c64b6feed4 Mon Sep 17 00:00:00 2001 From: Thomas Goirand Date: Mon, 16 Aug 2021 12:46:37 +0200 Subject: [PATCH 1481/1483] Upload to unstable. --- debian/changelog | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 0ecd088c..776516f6 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,6 +1,7 @@ -gnocchi (4.4.0-2) UNRELEASED; urgency=medium +gnocchi (4.4.0-2) unstable; urgency=medium * Tune gnocchi-api-uwsgi.ini for performance. + * Upload to unstable. -- Thomas Goirand Wed, 17 Mar 2021 13:32:56 +0100 -- GitLab From 284e250e988cb4ed627f8826779a6c78b37ee987 Mon Sep 17 00:00:00 2001 From: Debian Janitor Date: Thu, 9 Sep 2021 23:31:44 +0000 Subject: [PATCH 1482/1483] Bump debhelper from old 10 to 13. + Replace python_distutils buildsystem with pybuild. + debian/rules: Drop --fail-missing argument to dh_missing, which is now the default. Changes-By: lintian-brush Fixes: lintian: package-uses-old-debhelper-compat-version See-also: https://lintian.debian.org/tags/package-uses-old-debhelper-compat-version.html --- debian/changelog | 9 +++++++++ debian/control | 2 +- debian/rules | 4 ++-- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/debian/changelog b/debian/changelog index 776516f6..bcbc4fa1 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,12 @@ +gnocchi (4.4.0-3) UNRELEASED; urgency=medium + + * Bump debhelper from old 10 to 13. + + Replace python_distutils buildsystem with pybuild. + + debian/rules: Drop --fail-missing argument to dh_missing, which is now the + default. + + -- Debian Janitor Thu, 09 Sep 2021 23:31:44 -0000 + gnocchi (4.4.0-2) unstable; urgency=medium * Tune gnocchi-api-uwsgi.ini for performance. diff --git a/debian/control b/debian/control index ebb12a3e..7aa42f46 100644 --- a/debian/control +++ b/debian/control @@ -6,7 +6,7 @@ Uploaders: Thomas Goirand , Michal Arbet , Build-Depends: - debhelper-compat (= 10), + debhelper-compat (= 13), dh-python, openstack-pkg-tools (>= 99~), python3-all, diff --git a/debian/rules b/debian/rules index 80769b34..ab68be10 100755 --- a/debian/rules +++ b/debian/rules @@ -8,7 +8,7 @@ export SETUPTOOLS_SCM_PRETEND_VERSION=$(shell dpkg-parsechangelog -SVersion | se UNIT_TEST_BLACKLIST = test_carbonara.CarbonaraCmd.*|.*test_bin\.BinTestCase\.test_gnocchi_config_generator_run.* %: - dh $@ --buildsystem=python_distutils --with python3,sphinxdoc + dh $@ --buildsystem=pybuild --with python3,sphinxdoc override_dh_clean: dh_clean @@ -95,7 +95,7 @@ endif sed -i 's|^[ \t#]*url[ \t#]*=.*|url = sqlite:////var/lib/gnocchi/gnocchidb|' $(CURDIR)/debian/gnocchi-common/usr/share/gnocchi-common/gnocchi.conf dh_install - dh_missing --fail-missing + dh_missing override_dh_sphinxdoc: -- GitLab From f657a02852b130d63c4b0ff51c2ab7fb94784b09 Mon Sep 17 00:00:00 2001 From: Debian Janitor Date: Thu, 9 Sep 2021 23:31:50 +0000 Subject: [PATCH 1483/1483] Set upstream metadata fields: Bug-Database, Bug-Submit, Repository, Repository-Browse. Changes-By: lintian-brush Fixes: lintian: upstream-metadata-file-is-missing See-also: https://lintian.debian.org/tags/upstream-metadata-file-is-missing.html Fixes: lintian: upstream-metadata-missing-bug-tracking See-also: https://lintian.debian.org/tags/upstream-metadata-missing-bug-tracking.html Fixes: lintian: upstream-metadata-missing-repository See-also: https://lintian.debian.org/tags/upstream-metadata-missing-repository.html --- debian/changelog | 2 ++ debian/upstream/metadata | 5 +++++ 2 files changed, 7 insertions(+) create mode 100644 debian/upstream/metadata diff --git a/debian/changelog b/debian/changelog index bcbc4fa1..a935e596 100644 --- a/debian/changelog +++ b/debian/changelog @@ -4,6 +4,8 @@ gnocchi (4.4.0-3) UNRELEASED; urgency=medium + Replace python_distutils buildsystem with pybuild. + debian/rules: Drop --fail-missing argument to dh_missing, which is now the default. + * Set upstream metadata fields: Bug-Database, Bug-Submit, Repository, + Repository-Browse. -- Debian Janitor Thu, 09 Sep 2021 23:31:44 -0000 diff --git a/debian/upstream/metadata b/debian/upstream/metadata new file mode 100644 index 00000000..05817f9b --- /dev/null +++ b/debian/upstream/metadata @@ -0,0 +1,5 @@ +--- +Bug-Database: https://github.com/gnocchixyz/gnocchi/issues +Bug-Submit: https://github.com/gnocchixyz/gnocchi/issues/new +Repository: https://github.com/gnocchixyz/gnocchi.git +Repository-Browse: https://github.com/gnocchixyz/gnocchi -- GitLab